Commit b37e16a3 authored by André Anjos's avatar André Anjos 💬
Browse files

Improves verbosity for preprocessing, extraction and enrollment; Adds more...

Improves verbosity for preprocessing, extraction and enrollment; Adds more verbosity for scoring (closes #98)
parent 90f80ff8
Pipeline #14110 passed with stages
in 10 minutes and 13 seconds
......@@ -123,7 +123,8 @@ def project(algorithm, extractor, groups = None, indices = None, allow_missing_f
if not utils.check_file(projected_file, force,
algorithm.min_projected_file_size):
logger.debug("... Projecting features for file '%s'", feature_file)
logger.debug("... Projecting features for file '%s' (%d/%d)",
feature_file, index_range.index(i)+1, len(index_range))
# create output directory before reading the data file (is sometimes required, when relative directories are specified, especially, including a .. somewhere)
bob.io.base.create_directories_safe(os.path.dirname(projected_file))
# load feature
......@@ -256,7 +257,7 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
logger.info("- Enrollment: splitting of index range %s", str(indices))
logger.info("- Enrollment: enrolling models of group '%s'", group)
for model_id in model_ids:
for pos, model_id in enumerate(model_ids):
# Path to the model
model_file = fs.model_file(model_id, group)
......@@ -271,7 +272,9 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
logger.debug("... Skipping model file %s since no feature file could be found", model_file)
continue
logger.debug("... Enrolling model from %d features to file '%s'", len(enroll_files), model_file)
logger.debug("... Enrolling model '%d' from %d feature(s) to "
"file '%s' (%d/%d)", model_id, len(enroll_files), model_file,
pos+1, len(model_ids))
bob.io.base.create_directories_safe(os.path.dirname(model_file))
# load all files into memory
......
......@@ -112,7 +112,8 @@ def extract(extractor, preprocessor, groups=None, indices = None, allow_missing_
if not utils.check_file(feature_file, force,
extractor.min_feature_file_size):
logger.debug("... Extracting features for data file '%s'", data_file)
logger.debug("... Extracting features for data file '%s' (%d/%d)",
data_file, index_range.index(i)+1, len(index_range))
# create output directory before reading the data file (is sometimes required, when relative directories are specified, especially, including a .. somewhere)
bob.io.base.create_directories_safe(os.path.dirname(feature_file))
# load data
......
......@@ -67,7 +67,8 @@ def preprocess(preprocessor, groups = None, indices = None, allow_missing_files
# check for existence
if not utils.check_file(preprocessed_data_file, force,
preprocessor.min_preprocessed_file_size):
logger.debug("... Processing original data file '%s'", file_name)
logger.debug("... Processing original data file '%s' (%d/%d)", file_name,
index_range.index(i)+1, len(index_range))
data = preprocessor.read_original_data(file_object, original_directory, original_extension)
# create output directory before reading the data file (is sometimes required, when relative directories are specified, especially, including a .. somewhere)
......
......@@ -131,9 +131,11 @@ def _scores_a(algorithm, reader, model_ids, group, compute_zt_norm, force, write
logger.info("- Scoring: computing scores for group '%s'", group)
# Computes the raw scores for each model
for model_id in model_ids:
for pos, model_id in enumerate(model_ids):
# test if the file is already there
score_file = fs.a_file(model_id, group) if compute_zt_norm else fs.no_norm_file(model_id, group)
logger.debug("... Scoring model '%s' at '%s' (%d/%d)", model_id, score_file,
pos+1, len(model_ids))
if utils.check_file(score_file, force):
logger.warn("Score file '%s' already exists.", score_file)
else:
......@@ -166,9 +168,11 @@ def _scores_b(algorithm, reader, model_ids, group, force, allow_missing_files):
logger.info("- Scoring: computing score matrix B for group '%s'", group)
# Loads the models
for model_id in model_ids:
for pos, model_id in enumerate(model_ids):
# test if the file is already there
score_file = fs.b_file(model_id, group)
logger.debug("... Scoring model '%s' at '%s' (%d/%d)", model_id,
score_file, pos+1, len(model_ids))
if utils.check_file(score_file, force):
logger.warn("Score file '%s' already exists.", score_file)
else:
......@@ -191,9 +195,11 @@ def _scores_c(algorithm, reader, t_model_ids, group, force, allow_missing_files)
logger.info("- Scoring: computing score matrix C for group '%s'", group)
# Computes the raw scores for the T-Norm model
for t_model_id in t_model_ids:
for pos, t_model_id in enumerate(t_model_ids):
# test if the file is already there
score_file = fs.c_file(t_model_id, group)
logger.debug("... Scoring model '%s' at '%s' (%d/%d)", t_model_id,
score_file, pos+1, len(t_model_ids))
if utils.check_file(score_file, force):
logger.warn("Score file '%s' already exists.", score_file)
else:
......@@ -219,9 +225,11 @@ def _scores_d(algorithm, reader, t_model_ids, group, force, allow_missing_files)
z_probe_ids = [z_probe_object.client_id for z_probe_object in z_probe_objects]
# Loads the T-Norm models
for t_model_id in t_model_ids:
for pos, t_model_id in enumerate(t_model_ids):
# test if the file is already there
score_file = fs.d_file(t_model_id, group)
logger.debug("... Scoring model '%s' at '%s' (%d/%d)", t_model_id,
score_file, pos+1, len(t_model_ids))
same_score_file = fs.d_same_value_file(t_model_id, group)
if utils.check_file(score_file, force) and utils.check_file(same_score_file, force):
logger.warn("score files '%s' and '%s' already exist.", score_file, same_score_file)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment