Commit b279347d authored by Manuel Günther's avatar Manuel Günther
Browse files

Implemented command line option to properly handle files for which the...

Implemented command line option to properly handle files for which the preprocessing fails -- resulting scores will be NaN
parent 6cb749d8
......@@ -252,6 +252,7 @@ def execute(args):
args.preprocessor,
groups = tools.groups(args),
indices = tools.indices(fs.original_data_list(groups=tools.groups(args)), None if args.grid is None else args.grid.number_of_preprocessing_jobs),
allow_missing_files = args.allow_missing_files,
force = args.force)
# train the feature extractor
......@@ -259,6 +260,7 @@ def execute(args):
tools.train_extractor(
args.extractor,
args.preprocessor,
allow_missing_files = args.allow_missing_files,
force = args.force)
# extract the features
......@@ -268,6 +270,7 @@ def execute(args):
args.preprocessor,
groups = tools.groups(args),
indices = tools.indices(fs.preprocessed_data_list(groups=tools.groups(args)), None if args.grid is None else args.grid.number_of_extraction_jobs),
allow_missing_files = args.allow_missing_files,
force = args.force)
# train the feature projector
......@@ -275,6 +278,7 @@ def execute(args):
tools.train_projector(
args.algorithm,
args.extractor,
allow_missing_files = args.allow_missing_files,
force = args.force)
# project the features
......@@ -284,6 +288,7 @@ def execute(args):
args.extractor,
groups = tools.groups(args),
indices = tools.indices(fs.preprocessed_data_list(groups=tools.groups(args)), None if args.grid is None else args.grid.number_of_projection_jobs),
allow_missing_files = args.allow_missing_files,
force = args.force)
# train the model enroller
......@@ -291,6 +296,7 @@ def execute(args):
tools.train_enroller(
args.algorithm,
args.extractor,
allow_missing_files = args.allow_missing_files,
force = args.force)
# enroll the models
......@@ -303,6 +309,7 @@ def execute(args):
indices = tools.indices(fs.model_ids(args.group), None if args.grid is None else args.grid.number_of_enrollment_jobs),
groups = [args.group],
types = ['N'],
allow_missing_files = args.allow_missing_files,
force = args.force)
else:
......@@ -313,6 +320,7 @@ def execute(args):
indices = tools.indices(fs.t_model_ids(args.group), None if args.grid is None else args.grid.number_of_enrollment_jobs),
groups = [args.group],
types = ['T'],
allow_missing_files = args.allow_missing_files,
force = args.force)
# compute scores
......@@ -325,6 +333,7 @@ def execute(args):
groups = [args.group],
types = [args.score_type],
force = args.force,
allow_missing_files = args.allow_missing_files,
write_compressed = args.write_compressed_score_files)
elif args.score_type in ['C', 'D']:
......@@ -335,12 +344,14 @@ def execute(args):
groups = [args.group],
types = [args.score_type],
force = args.force,
allow_missing_files = args.allow_missing_files,
write_compressed = args.write_compressed_score_files)
else:
tools.zt_norm(
groups = [args.group],
write_compressed = args.write_compressed_score_files)
write_compressed = args.write_compressed_score_files,
allow_missing_files = args.allow_missing_files)
# concatenate
elif args.sub_task == 'concatenate':
......
from bob.bio.base.preprocessor import Preprocessor
class DummyPreprocessor (Preprocessor):
def __init__(self, **kwargs):
def __init__(self, return_none=False, **kwargs):
Preprocessor.__init__(self)
self.return_none = return_none
def __call__(self, data, annotation):
"""Does nothing, simply converts the data type of the data, ignoring any annotation."""
if self.return_none:
return None
return data
preprocessor = DummyPreprocessor()
......@@ -63,7 +63,7 @@ def _verify(parameters, test_dir, sub_dir, ref_modifier="", score_modifier=('sco
shutil.rmtree(test_dir)
def test_verify_local():
def test_verify_config():
test_dir = tempfile.mkdtemp(prefix='bobtest_')
# define dummy parameters
parameters = [
......@@ -72,14 +72,14 @@ def test_verify_local():
'-e', os.path.join(dummy_dir, 'extractor.py'),
'-a', os.path.join(dummy_dir, 'algorithm.py'),
'--zt-norm',
'-vs', 'test_local',
'-vs', 'test_config',
'--temp-directory', test_dir,
'--result-directory', test_dir
]
print (bob.bio.base.tools.command_line(parameters))
_verify(parameters, test_dir, 'test_local')
_verify(parameters, test_dir, 'test_config')
def test_verify_resources():
......@@ -91,6 +91,7 @@ def test_verify_resources():
'-e', 'dummy',
'-a', 'dummy',
'--zt-norm',
'--allow-missing-files',
'-vs', 'test_resource',
'--temp-directory', test_dir,
'--result-directory', test_dir,
......@@ -113,7 +114,8 @@ def test_verify_commandline():
'--zt-norm',
'-vs', 'test_commandline',
'--temp-directory', test_dir,
'--result-directory', test_dir
'--result-directory', test_dir,
'--imports', 'bob.bio.base.test.dummy'
]
print (bob.bio.base.tools.command_line(parameters))
......@@ -133,13 +135,14 @@ def test_verify_parallel():
'-e', 'bob.bio.base.test.dummy.extractor.DummyExtractor()',
'-a', 'dummy',
'--zt-norm',
'--allow-missing-files',
'-vs', 'test_parallel',
'--temp-directory', test_dir,
'--result-directory', test_dir,
'-g', 'bob.bio.base.grid.Grid(grid_type = "local", number_of_parallel_processes = 2, scheduler_sleep_time = 0.1)',
'-G', test_database, '--run-local-scheduler', '--stop-on-failure',
'-D', 'success',
'--import', 'bob.io.image',
'--imports', 'bob.io.image', 'bob.bio.base.test.dummy',
'--preferred-package', 'bob.bio.base'
]
......@@ -202,7 +205,8 @@ def test_verify_fileset():
'-vs', 'test_fileset',
'--temp-directory', test_dir,
'--result-directory', test_dir,
'--preferred-package', 'bob.bio.base'
'--preferred-package', 'bob.bio.base',
'--imports', 'bob.bio.base.test.dummy'
]
print (bob.bio.base.tools.command_line(parameters))
......@@ -261,6 +265,51 @@ def test_verify_filelist():
shutil.rmtree(test_dir)
def test_verify_missing():
try:
import bob.db.verification.filelist
except ImportError:
raise SkipTest("Skipping test since bob.db.verification.filelist is not available")
test_dir = tempfile.mkdtemp(prefix='bobtest_')
# define dummy parameters
parameters = [
'-d', 'dummy',
'-p', 'bob.bio.base.test.dummy.preprocessor.DummyPreprocessor(return_none=True)',
'-e', 'dummy',
'-a', 'dummy',
'--zt-norm',
'--allow-missing-files',
'-vs', 'test_missing',
'--temp-directory', test_dir,
'--result-directory', test_dir,
'--preferred-package', 'bob.bio.base',
'--imports', 'bob.bio.base.test.dummy'
]
print (bob.bio.base.tools.command_line(parameters))
try:
from bob.bio.base.script.verify import main
main(parameters)
# assert that the score file exists
score_files = [os.path.join(test_dir, 'test_missing', 'Default', norm, 'scores-dev') for norm in ('nonorm', 'ztnorm')]
assert os.path.exists(score_files[0]), "Score file %s does not exist" % score_files[0]
assert os.path.exists(score_files[1]), "Score file %s does not exist" % score_files[1]
# assert that all scores are NaN
for i in (0,1):
# load scores
a, b = bob.measure.load.split_four_column(score_files[i])
assert numpy.all(numpy.isnan(a))
assert numpy.all(numpy.isnan(b))
finally:
shutil.rmtree(test_dir)
def test_fusion():
# tests that the fuse_scores script is doing something useful
test_dir = tempfile.mkdtemp(prefix='bobtest_')
......
......@@ -9,7 +9,7 @@ from .extractor import read_features
from .. import utils
def train_projector(algorithm, extractor, force = False):
def train_projector(algorithm, extractor, allow_missing_files = False, force = False):
"""Trains the feature projector using extracted features of the ``'world'`` group, if the algorithm requires projector training.
This function should only be called, when the ``algorithm`` actually requires projector training.
......@@ -25,6 +25,9 @@ def train_projector(algorithm, extractor, force = False):
extractor : py:class:`bob.bio.base.extractor.Extractor` or derived
The extractor, used for reading the training data.
allow_missing_files : bool
If set to ``True``, extracted files that are not found are silently ignored during training.
force : bool
If given, the projector file is regenerated, even if it already exists.
"""
......@@ -42,7 +45,7 @@ def train_projector(algorithm, extractor, force = False):
# train projector
logger.info("- Projection: loading training data")
train_files = fs.training_list('extracted', 'train_projector', arrange_by_client = algorithm.split_training_features_by_client)
train_features = read_features(train_files, extractor, algorithm.split_training_features_by_client)
train_features = read_features(train_files, extractor, algorithm.split_training_features_by_client, allow_missing_files)
if algorithm.split_training_features_by_client:
logger.info("- Projection: training projector '%s' using %d identities: ", fs.projector_file, len(train_files))
else:
......@@ -53,7 +56,7 @@ def train_projector(algorithm, extractor, force = False):
def project(algorithm, extractor, groups = None, indices = None, force = False):
def project(algorithm, extractor, groups = None, indices = None, allow_missing_files = False, force = False):
"""Projects the features for all files of the database.
The given ``algorithm`` is used to project all features required for the current experiment.
......@@ -77,6 +80,9 @@ def project(algorithm, extractor, groups = None, indices = None, force = False):
If specified, only the features for the given index range ``range(begin, end)`` should be projected.
This is usually given, when parallel threads are executed.
allow_missing_files : bool
If set to ``True``, extracted files that are not found are silently ignored.
force : bool
If given, files are regenerated, even if they already exist.
"""
......@@ -106,6 +112,14 @@ def project(algorithm, extractor, groups = None, indices = None, force = False):
feature_file = feature_files[i]
projected_file = projected_files[i]
if not os.path.exists(feature_file):
if allow_missing_files:
logger.debug("... Cannot find extracted feature file %s; skipping", feature_file)
continue
else:
logger.error("Cannot find extracted feature file %s", feature_file)
if not utils.check_file(projected_file, force, 1000):
logger.debug("... Projecting features for file '%s'", feature_file)
# create output directory before reading the data file (is sometimes required, when relative directories are specified, especially, including a .. somewhere)
......@@ -122,7 +136,7 @@ def project(algorithm, extractor, groups = None, indices = None, force = False):
def train_enroller(algorithm, extractor, force = False):
def train_enroller(algorithm, extractor, allow_missing_files = False, force = False):
"""Trains the model enroller using the extracted or projected features, depending on your setup of the algorithm.
This function should only be called, when the ``algorithm`` actually requires enroller training.
......@@ -139,6 +153,9 @@ def train_enroller(algorithm, extractor, force = False):
extractor : py:class:`bob.bio.base.extractor.Extractor` or derived
The extractor, used for reading the training data, if unprojected features are used for enroller training.
allow_missing_files : bool
If set to ``True``, extracted files that are not found are silently ignored during training.
force : bool
If given, the enroller file is regenerated, even if it already exists.
"""
......@@ -163,7 +180,7 @@ def train_enroller(algorithm, extractor, force = False):
# load training data
train_files = fs.training_list('projected' if algorithm.use_projected_features_for_enrollment else 'extracted', 'train_enroller', arrange_by_client = True)
logger.info("- Enrollment: loading %d enroller training files", len(train_files))
train_features = read_features(train_files, reader, True)
train_features = read_features(train_files, reader, True, allow_missing_files)
# perform training
logger.info("- Enrollment: training enroller '%s' using %d identities", fs.enroller_file, len(train_features))
......@@ -171,7 +188,7 @@ def train_enroller(algorithm, extractor, force = False):
def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev', 'eval'], types = ['N', 'T'], force = False):
def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev', 'eval'], types = ['N', 'T'], allow_missing_files = False, force = False):
"""Enroll the models for the given groups, eventually for both models and T-Norm-models.
This function uses the extracted or projected features to compute the models, depending on your setup of the given ``algorithm``.
......@@ -199,6 +216,10 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
groups : some of ``('dev', 'eval')``
The list of groups, for which models should be enrolled.
allow_missing_files : bool
If set to ``True``, extracted or ptojected files that are not found are silently ignored.
If none of the enroll files are found, no model file will be written.
force : bool
If given, files are regenerated, even if they already exist.
"""
......@@ -230,6 +251,13 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
# Removes old file if required
if not utils.check_file(model_file, force, 1000):
enroll_files = fs.enroll_files(model_id, group, 'projected' if algorithm.use_projected_features_for_enrollment else 'extracted')
if allow_missing_files:
enroll_files = utils.filter_missing_files(enroll_files)
if not enroll_files:
logger.debug("... Skipping model file %s since no feature file could be found", model_file)
continue
logger.debug("... Enrolling model from %d features to file '%s'", len(enroll_files), model_file)
bob.io.base.create_directories_safe(os.path.dirname(model_file))
......@@ -261,6 +289,13 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
# Removes old file if required
if not utils.check_file(t_model_file, force, 1000):
t_enroll_files = fs.t_enroll_files(t_model_id, group, 'projected' if algorithm.use_projected_features_for_enrollment else 'extracted')
if allow_missing_files:
t_enroll_files = utils.filter_missing_files(t_enroll_files)
if not t_enroll_files:
logger.debug("... Skipping T-model file %s since no feature file could be found", t_model_file)
continue
logger.debug("... Enrolling T-model from %d features to file '%s'", len(t_enroll_files), t_model_file)
bob.io.base.create_directories_safe(os.path.dirname(t_model_file))
......
......@@ -135,6 +135,8 @@ def command_line_parser(description=__doc__, exclude_resources_from=[]):
help = 'Performs score calibration after the scores are computed.')
flag_group.add_argument('-z', '--zt-norm', action='store_true',
help = 'Enable the computation of ZT norms')
flag_group.add_argument('-A', '--allow-missing-files', action='store_true',
help = "If given, missing files will not stop the processing; this is helpful if not all files of the database can be processed; missing scores will be NaN.")
flag_group.add_argument('-r', '--parallel', type=int,
help = 'This flag is a shortcut for running the commands on the local machine with the given amount of parallel threads; equivalent to --grid bob.bio.base.grid.Grid("local", number_of_parallel_threads=X) --run-local-scheduler --stop-on-failure.')
......
......@@ -8,7 +8,7 @@ from .FileSelector import FileSelector
from .preprocessor import read_preprocessed_data
from .. import utils
def train_extractor(extractor, preprocessor, force = False):
def train_extractor(extractor, preprocessor, allow_missing_files = False, force = False):
"""Trains the feature extractor using preprocessed data of the ``'world'`` group, if the feature extractor requires training.
This function should only be called, when the ``extractor`` actually requires training.
......@@ -24,6 +24,9 @@ def train_extractor(extractor, preprocessor, force = False):
preprocessor : py:class:`bob.bio.base.preprocessor.Preprocessor` or derived
The preprocessor, used for reading the preprocessed data.
allow_missing_files : bool
If set to ``True``, preprocessed data files that are not found are silently ignored during training.
force : bool
If given, the extractor file is regenerated, even if it already exists.
"""
......@@ -41,7 +44,7 @@ def train_extractor(extractor, preprocessor, force = False):
bob.io.base.create_directories_safe(os.path.dirname(fs.extractor_file))
# read training files
train_files = fs.training_list('preprocessed', 'train_extractor', arrange_by_client = extractor.split_training_data_by_client)
train_data = read_preprocessed_data(train_files, preprocessor, extractor.split_training_data_by_client)
train_data = read_preprocessed_data(train_files, preprocessor, extractor.split_training_data_by_client, allow_missing_files)
if extractor.split_training_data_by_client:
logger.info("- Extraction: training extractor '%s' using %d identities:", fs.extractor_file, len(train_files))
else:
......@@ -51,7 +54,7 @@ def train_extractor(extractor, preprocessor, force = False):
def extract(extractor, preprocessor, groups=None, indices = None, force = False):
def extract(extractor, preprocessor, groups=None, indices = None, allow_missing_files = False, force = False):
"""Extracts features from the preprocessed data using the given extractor.
The given ``extractor`` is used to extract all features required for the current experiment.
......@@ -75,6 +78,9 @@ def extract(extractor, preprocessor, groups=None, indices = None, force = False)
If specified, only the features for the given index range ``range(begin, end)`` should be extracted.
This is usually given, when parallel threads are executed.
allow_missing_files : bool
If set to ``True``, preprocessed data files that are not found are silently ignored.
force : bool
If given, files are regenerated, even if they already exist.
"""
......@@ -96,6 +102,13 @@ def extract(extractor, preprocessor, groups=None, indices = None, force = False)
data_file = data_files[i]
feature_file = feature_files[i]
if not os.path.exists(data_file):
if allow_missing_files:
logger.debug("... Cannot find preprocessed data file %s; skipping", data_file)
continue
else:
logger.error("Cannot find preprocessed data file %s", data_file)
if not utils.check_file(feature_file, force, 1000):
logger.debug("... Extracting features for data file '%s'", data_file)
# create output directory before reading the data file (is sometimes required, when relative directories are specified, especially, including a .. somewhere)
......@@ -110,7 +123,7 @@ def extract(extractor, preprocessor, groups=None, indices = None, force = False)
logger.debug("... Skipping preprocessed data '%s' since feature file '%s' exists", data_file, feature_file)
def read_features(file_names, extractor, split_by_client = False):
def read_features(file_names, extractor, split_by_client = False, allow_missing_files = False):
"""read_features(file_names, extractor, split_by_client = False) -> extracted
Reads the extracted features from ``file_names`` using the given ``extractor``.
......@@ -128,11 +141,16 @@ def read_features(file_names, extractor, split_by_client = False):
split_by_client : bool
Indicates if the given ``file_names`` are split into groups.
allow_missing_files : bool
If set to ``True``, extracted files that are not found are silently ignored.
**Returns:**
extracted : [object] or [[object]]
The list of extracted features, in the same order as in the ``file_names``.
"""
file_names = utils.filter_missing_files(file_names, split_by_client, allow_missing_files)
if split_by_client:
return [[extractor.read_feature(f) for f in client_files] for client_files in file_names]
else:
......
......@@ -7,7 +7,8 @@ logger = logging.getLogger("bob.bio.base")
from .FileSelector import FileSelector
from .. import utils
def preprocess(preprocessor, groups = None, indices = None, force = False):
def preprocess(preprocessor, groups = None, indices = None, allow_missing_files = False, force = False):
"""Preprocesses the original data of the database with the given preprocessor.
The given ``preprocessor`` is used to preprocess all data required for the current experiment.
......@@ -26,6 +27,9 @@ def preprocess(preprocessor, groups = None, indices = None, force = False):
If specified, only the data for the given index range ``range(begin, end)`` should be preprocessed.
This is usually given, when parallel threads are executed.
allow_missing_files : bool
If set to ``True``, files for which the preprocessor returns ``None`` are silently ignored.
force : bool
If given, files are regenerated, even if they already exist.
"""
......@@ -66,7 +70,11 @@ def preprocess(preprocessor, groups = None, indices = None, force = False):
# call the preprocessor
preprocessed_data = preprocessor(data, annotations)
if preprocessed_data is None:
logger.error("Preprocessing of file '%s' was not successful", file_name)
if allow_missing_files:
logger.debug("... Processing original data file '%s' was not successful", file_name)
else:
logger.error("Preprocessing of file '%s' was not successful", file_name)
continue
# write the data
preprocessor.write_data(preprocessed_data, preprocessed_data_file)
......@@ -76,7 +84,7 @@ def preprocess(preprocessor, groups = None, indices = None, force = False):
def read_preprocessed_data(file_names, preprocessor, split_by_client = False):
def read_preprocessed_data(file_names, preprocessor, split_by_client = False, allow_missing_files = False):
"""read_preprocessed_data(file_names, preprocessor, split_by_client = False) -> preprocessed
Reads the preprocessed data from ``file_names`` using the given preprocessor.
......@@ -94,11 +102,16 @@ def read_preprocessed_data(file_names, preprocessor, split_by_client = False):
split_by_client : bool
Indicates if the given ``file_names`` are split into groups.
allow_missing_files : bool
If set to ``True``, preprocessed data files that are not found are silently ignored.
**Returns:**
preprocessed : [object] or [[object]]
The list of preprocessed data, in the same order as in the ``file_names``.
"""
file_names = utils.filter_missing_files(file_names, split_by_client, allow_missing_files)
if split_by_client:
return [[preprocessor.read_data(f) for f in client_files] for client_files in file_names]
else:
......
......@@ -13,13 +13,17 @@ from .FileSelector import FileSelector
from .extractor import read_features
from .. import utils
def _scores(algorithm, model, probes):
def _scores(algorithm, model, probes, allow_missing_files):
"""Compute scores for the given model and a list of probes.
"""
# the file selector object
fs = FileSelector.instance()
# the scores to be computed
scores = numpy.ndarray((1,len(probes)), 'float64')
# the scores to be computed; initialized with NaN
scores = numpy.full((1,len(probes)), numpy.nan, numpy.float64)
if allow_missing_files and model is None:
# if we have no model, all scores are undefined
return scores
# Loops over the probe sets
for i, probe_element in enumerate(probes):
......@@ -27,9 +31,17 @@ def _scores(algorithm, model, probes):
assert isinstance(probe_element, list)
# read probe from probe_set
probe = [algorithm.read_probe(probe_file) for probe_file in probe_element]
if allow_missing_files:
probe = utils.filter_missing_files(probe)
if not probe:
# we keep the NaN score
continue
# compute score
scores[0,i] = algorithm.score_for_multiple_probes(model, probe)
else:
if allow_missing_files and not os.path.exists(probe_element):
# we keep the NaN score
continue
# read probe
probe = algorithm.read_probe(probe_element)
# compute score
......@@ -84,7 +96,7 @@ def _close_written(score_file, f, write_compressed):
f.close()
def _save_scores(score_file, scores, probe_objects, client_id, write_compressed=False):
def _save_scores(score_file, scores, probe_objects, client_id, write_compressed):
"""Saves the scores of one model into a text file that can be interpreted by :py:func:`bob.measure.load.split_four_column`."""
assert len(probe_objects) == scores.shape[1]
......@@ -98,7 +110,7 @@ def _save_scores(score_file, scores, probe_objects, client_id, write_compressed=
_close_written(score_file, f, write_compressed)
def _scores_a(algorithm, model_ids, group, compute_zt_norm, force, write_compressed=False):
def _scores_a(algorithm, model_ids, group, compute_zt_norm, force, write_compressed, allow_missing_files):
"""Computes A scores for the models with the given model_ids. If ``compute_zt_norm = False``, these are the only scores that are actually computed."""
# the file selector object
fs = FileSelector.instance()
......@@ -117,11 +129,15 @@ def _scores_a(algorithm, model_ids, group, compute_zt_norm, force, write_compres
else:
# get probe files that are required for this model
current_probe_objects = fs.probe_objects_for_model(model_id, group)
model = algorithm.read_model(fs.model_file(model_id, group))
model_file = fs.model_file(model_id, group)
if allow_missing_files and not os.path.exists(model_file):
model = None
else:
model = algorithm.read_model(model_file)
# get the probe files
current_probe_files = fs.get_paths(current_probe_objects, 'projected' if algorithm.performs_projection else 'extracted')
# compute scores
a = _scores(algorithm, model, current_probe_files)
a = _scores(algorithm, model, current_probe_files, allow_missing_files)
if compute_zt_norm:
# write A matrix only when you want to compute zt norm afterwards
......@@ -131,7 +147,7 @@ def _scores_a(algorithm, model_ids, group, compute_zt_norm, force, write_compres
_save_scores(fs.no_norm_file(model_id, group), a, current_probe_objects, fs.client_id(model_id, group), write_compressed)
def _scores_b(algorithm, model_ids, group, force):
def _scores_b(algorithm, model_ids, group, force, allow_missing_files):
"""Computes B scores for the given model ids."""
# the file selector object
fs = FileSelector.instance()
......@@ -149,11 +165,15 @@ def _scores_b(algorithm, model_ids, group, force):
if utils.check_file(score_file, force):
logger.warn("Score file '%s' already exists.", score_file)
else:
model = algorithm.read_model(fs.model_file(model_id, group))
b = _scores(algorithm, model, z_probe_files)
model_file = fs.model_file(model_id, group)
if allow_missing_files and not os.path.exists(model_file):
model = None
else:
model = algorithm.read_model(model_file)
b = _scores(algorithm, model, z_probe_files, allow_missing_files)
bob.io.base.save(b, score_file, True)
def _scores_c(algorithm, t_model_ids, group, force):
def _scores_c(algorithm, t_model_ids, group, force, allow_missing_files):
"""Computes C scores for the given t-norm model ids."""
# the file selector object
fs = FileSelector.instance()
......@@ -171,11 +191,15 @@ def _scores_c(algorithm, t_model_ids, group, force):
if utils.check_file(score_file, force):
logger.warn("Score file '%s' already exists.", score_file)
else:
t_model = algorithm.read_model(fs.t_model_file(t_model_id, group))
c = _scores(algorithm, t_model, probe_files)
t_model_file = fs.t_model_file(t_model_id, group)
if allow_missing_files and not os.path.exists(t_model_file):
t_model = None
else:
t_model = algorithm.read_model(t_model_file)
c = _scores(algorithm, t_model, probe_files, allow_missing_files)