Commit 7cb838fb authored by Manuel Günther's avatar Manuel Günther
Browse files

Added more debug output during experiments

parent 2356d0ae
......@@ -72,6 +72,7 @@ def test_verify_local():
'-e', os.path.join(dummy_dir, 'extractor.py'),
'-a', os.path.join(dummy_dir, 'algorithm.py'),
'--zt-norm',
'-vvv',
'-s', 'test_local',
'--temp-directory', test_dir,
'--result-directory', test_dir
......@@ -91,6 +92,7 @@ def test_verify_resources():
'-e', 'dummy',
'-a', 'dummy',
'--zt-norm',
'-vvv',
'-s', 'test_resource',
'--temp-directory', test_dir,
'--result-directory', test_dir
......@@ -110,6 +112,7 @@ def test_verify_commandline():
'-e', 'bob.bio.base.test.dummy.extractor.DummyExtractor()',
'-a', 'bob.bio.base.test.dummy.algorithm.DummyAlgorithm()',
'--zt-norm',
'-vvv',
'-s', 'test_commandline',
'--temp-directory', test_dir,
'--result-directory', test_dir
......@@ -132,6 +135,7 @@ def test_verify_parallel():
'-e', 'bob.bio.base.test.dummy.extractor.DummyExtractor()',
'-a', 'dummy',
'--zt-norm',
'-vvv',
'-s', 'test_parallel',
'--temp-directory', test_dir,
'--result-directory', test_dir,
......@@ -153,6 +157,7 @@ def test_verify_compressed():
'-e', 'dummy',
'-a', 'dummy',
'--zt-norm',
'-vvv',
'-s', 'test_compressed',
'--temp-directory', test_dir,
'--result-directory', test_dir,
......@@ -173,6 +178,7 @@ def test_verify_calibrate():
'-e', 'dummy',
'-a', 'dummy',
'--zt-norm',
'-vvv',
'-s', 'test_calibrate',
'--temp-directory', test_dir,
'--result-directory', test_dir,
......@@ -193,6 +199,7 @@ def test_verify_fileset():
'-e', 'bob.bio.base.test.dummy.extractor.DummyExtractor()',
'-a', 'dummy',
'--zt-norm',
'-vvv',
'-s', 'test_fileset',
'--temp-directory', test_dir,
'--result-directory', test_dir
......@@ -217,6 +224,7 @@ def test_verify_filelist():
'-e', 'dummy',
'-a', 'dummy',
'--zt-norm',
'-vvv',
'-s', 'test_filelist',
'--temp-directory', test_dir,
'--result-directory', test_dir
......@@ -267,6 +275,7 @@ def test_evaluate():
'--roc', plots[0],
'--det', plots[1],
'--cmc', plots[2],
'-vvv',
]
# execute the script
......@@ -342,7 +351,7 @@ def test_grid_search():
'-T', test_dir,
'-R', test_dir,
'-w', 'Job.txt',
'-l', '4', '-L', '-1', '-vv',
'-l', '4', '-L', '-1', '-vvv',
'--', '--imports', 'bob.io.image',
'--dry-run'
]
......@@ -398,7 +407,8 @@ def test_scripts():
'-a', annotation_file,
'-p', 'dummy',
'-o', preprocessed_file,
'-c', preprocessed_image
'-c', preprocessed_image,
'-vvv',
]
preprocess(parameters)
......@@ -414,6 +424,7 @@ def test_scripts():
'-e', 'dummy',
'-E', extractor_file,
'-o', extracted_file,
'-vvv',
]
extract(parameters)
......@@ -427,7 +438,8 @@ def test_scripts():
'-a', 'dummy',
'-P', projector_file,
'-E', enroller_file,
'-o', model_file
'-o', model_file,
'-vvv',
]
enroll(parameters)
......@@ -441,6 +453,7 @@ def test_scripts():
'-a', 'dummy',
'-P', projector_file,
'-E', enroller_file,
'-vvv',
]
score(parameters)
......
......@@ -107,6 +107,7 @@ def project(algorithm, extractor, groups = None, indices = None, force = False):
projected_file = str(projected_files[i])
if not utils.check_file(projected_file, force, 1000):
logger.debug("... Projecting features for file '%s'", feature_file)
# load feature
feature = extractor.read_feature(feature_file)
# project feature
......@@ -115,6 +116,9 @@ def project(algorithm, extractor, groups = None, indices = None, force = False):
bob.io.base.create_directories_safe(os.path.dirname(projected_file))
algorithm.write_feature(projected, projected_file)
else:
logger.debug("... Skipping feature file '%s' since projected file '%s' exists", feature_file, projected_file)
def train_enroller(algorithm, extractor, force = False):
......@@ -225,6 +229,7 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
# Removes old file if required
if not utils.check_file(model_file, force, 1000):
enroll_files = fs.enroll_files(model_id, group, 'projected' if algorithm.use_projected_features_for_enrollment else 'extracted')
logger.debug("... Enrolling model from %d features to file '%s'", len(enroll_files), model_file)
# load all files into memory
enroll_features = [reader.read_feature(str(enroll_file)) for enroll_file in enroll_files]
......@@ -234,6 +239,10 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
bob.io.base.create_directories_safe(os.path.dirname(model_file))
algorithm.write_model(model, model_file)
else:
logger.debug("... Skipping model file '%s' since it exists", model_file)
# T-Norm-Models
if 'T' in types and compute_zt_norm:
for group in groups:
......@@ -251,6 +260,7 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
# Removes old file if required
if not utils.check_file(t_model_file, force, 1000):
t_enroll_files = fs.t_enroll_files(t_model_id, group, 'projected' if algorithm.use_projected_features_for_enrollment else 'extracted')
logger.debug("... Enrolling T-model from %d features to file '%s'", len(t_enroll_files), t_model_file)
# load all files into memory
t_enroll_features = [reader.read_feature(str(t_enroll_file)) for t_enroll_file in t_enroll_files]
......@@ -259,3 +269,5 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
# save model
bob.io.base.create_directories_safe(os.path.dirname(t_model_file))
algorithm.write_model(t_model, t_model_file)
else:
logger.debug("... Skipping T-model file '%s' since it exists", t_model_file)
......@@ -97,6 +97,7 @@ def extract(extractor, preprocessor, groups=None, indices = None, force = False)
feature_file = str(feature_files[i])
if not utils.check_file(feature_file, force, 1000):
logger.debug("... Extracting features for data file '%s'", data_file)
# load data
data = preprocessor.read_data(data_file)
# extract feature
......@@ -104,6 +105,9 @@ def extract(extractor, preprocessor, groups=None, indices = None, force = False)
# write feature
bob.io.base.create_directories_safe(os.path.dirname(feature_file))
extractor.write_feature(feature, feature_file)
else:
logger.debug("... Skipping preprocessed data '%s' since feature file '%s' exists", data_file, feature_file)
def read_features(file_names, extractor, split_by_client = False):
"""read_features(file_names, extractor, split_by_client = False) -> extracted
......
......@@ -51,10 +51,11 @@ def preprocess(preprocessor, groups = None, indices = None, force = False):
# iterate over the selected files
for i in index_range:
preprocessed_data_file = str(preprocessed_data_files[i])
file_name = str(data_files[i])
# check for existence
if not utils.check_file(preprocessed_data_file, force, 1000):
file_name = data_files[i]
logger.debug("... Processing original data file '%s'", file_name)
data = preprocessor.read_original_data(file_name)
# get the annotations; might be None
......@@ -63,12 +64,16 @@ def preprocess(preprocessor, groups = None, indices = None, force = False):
# call the preprocessor
preprocessed_data = preprocessor(data, annotations)
if preprocessed_data is None:
logger.error("Preprocessing of file %s was not successful", str(file_name))
logger.error("Preprocessing of file '%s' was not successful", file_name)
# write the data
bob.io.base.create_directories_safe(os.path.dirname(preprocessed_data_file))
preprocessor.write_data(preprocessed_data, preprocessed_data_file)
else:
logger.debug("... Skipping original data file '%s' since preprocessed data '%s' exists", file_name, preprocessed_data_file)
def read_preprocessed_data(file_names, preprocessor, split_by_client = False):
"""read_preprocessed_data(file_names, preprocessor, split_by_client = False) -> preprocessed
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment