Commit 9b6c3d8a authored by Manuel Günther's avatar Manuel Günther
Browse files

Removed str() conversion of file names as file.make_path() is supposed to return str

parent d9750158
......@@ -362,9 +362,8 @@ class DatabaseBob (Database):
**Returns:**
paths : [str] or [[str]]
paths : [str]
The paths extracted for the files, in the same order.
If this database provides file sets, a list of lists of file names is returned, one sub-list for each file set.
"""
return self.database.original_file_names(files, self.check_existence)
......
......@@ -103,8 +103,8 @@ def project(algorithm, extractor, groups = None, indices = None, force = False):
logger.info("- Projection: projecting %d features from directory '%s' to directory '%s'", len(index_range), fs.directories['extracted'], fs.directories['projected'])
# extract the features
for i in index_range:
feature_file = str(feature_files[i])
projected_file = str(projected_files[i])
feature_file = feature_files[i]
projected_file = projected_files[i]
if not utils.check_file(projected_file, force, 1000):
logger.debug("... Projecting features for file '%s'", feature_file)
......@@ -224,7 +224,7 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
logger.info("- Enrollment: enrolling models of group '%s'", group)
for model_id in model_ids:
# Path to the model
model_file = str(fs.model_file(model_id, group))
model_file = fs.model_file(model_id, group)
# Removes old file if required
if not utils.check_file(model_file, force, 1000):
......@@ -232,7 +232,7 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
logger.debug("... Enrolling model from %d features to file '%s'", len(enroll_files), model_file)
# load all files into memory
enroll_features = [reader.read_feature(str(enroll_file)) for enroll_file in enroll_files]
enroll_features = [reader.read_feature(enroll_file) for enroll_file in enroll_files]
model = algorithm.enroll(enroll_features)
# save the model
......@@ -255,7 +255,7 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
logger.info("- Enrollment: enrolling T-models of group '%s'", group)
for t_model_id in t_model_ids:
# Path to the model
t_model_file = str(fs.t_model_file(t_model_id, group))
t_model_file = fs.t_model_file(t_model_id, group)
# Removes old file if required
if not utils.check_file(t_model_file, force, 1000):
......@@ -263,7 +263,7 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
logger.debug("... Enrolling T-model from %d features to file '%s'", len(t_enroll_files), t_model_file)
# load all files into memory
t_enroll_features = [reader.read_feature(str(t_enroll_file)) for t_enroll_file in t_enroll_files]
t_enroll_features = [reader.read_feature(t_enroll_file) for t_enroll_file in t_enroll_files]
t_model = algorithm.enroll(t_enroll_features)
# save model
......
......@@ -93,8 +93,8 @@ def extract(extractor, preprocessor, groups=None, indices = None, force = False)
logger.info("- Extraction: extracting %d features from directory '%s' to directory '%s'", len(index_range), fs.directories['preprocessed'], fs.directories['extracted'])
for i in index_range:
data_file = str(data_files[i])
feature_file = str(feature_files[i])
data_file = data_files[i]
feature_file = feature_files[i]
if not utils.check_file(feature_file, force, 1000):
logger.debug("... Extracting features for data file '%s'", data_file)
......@@ -133,6 +133,6 @@ def read_features(file_names, extractor, split_by_client = False):
The list of extracted features, in the same order as in the ``file_names``.
"""
if split_by_client:
return [[extractor.read_feature(str(f)) for f in client_files] for client_files in file_names]
return [[extractor.read_feature(f) for f in client_files] for client_files in file_names]
else:
return [extractor.read_feature(str(f)) for f in file_names]
return [extractor.read_feature(f) for f in file_names]
......@@ -50,8 +50,8 @@ def preprocess(preprocessor, groups = None, indices = None, force = False):
# iterate over the selected files
for i in index_range:
preprocessed_data_file = str(preprocessed_data_files[i])
file_name = str(data_files[i])
preprocessed_data_file = preprocessed_data_files[i]
file_name = data_files[i]
# check for existence
if not utils.check_file(preprocessed_data_file, force, 1000):
......@@ -99,6 +99,6 @@ def read_preprocessed_data(file_names, preprocessor, split_by_client = False):
The list of preprocessed data, in the same order as in the ``file_names``.
"""
if split_by_client:
return [[preprocessor.read_data(str(f)) for f in client_files] for client_files in file_names]
return [[preprocessor.read_data(f) for f in client_files] for client_files in file_names]
else:
return [preprocessor.read_data(str(f)) for f in file_names]
return [preprocessor.read_data(f) for f in file_names]
......@@ -26,7 +26,7 @@ def _scores(algorithm, model, probes):
if fs.uses_probe_file_sets():
assert isinstance(probe_element, list)
# read probe from probe_set
probe = [algorithm.read_probe(str(probe_file)) for probe_file in probe_element]
probe = [algorithm.read_probe(probe_file) for probe_file in probe_element]
# compute score
scores[0,i] = algorithm.score_for_multiple_probes(model, probe)
else:
......@@ -244,7 +244,7 @@ def compute_scores(algorithm, compute_zt_norm, force = False, indices = None, gr
fs = FileSelector.instance()
# load the projector and the enroller, if needed
if algorithm.requires_projector_training:
if algorithm.requires_projector_training:
algorithm.load_projector(fs.projector_file)
algorithm.load_enroller(fs.enroller_file)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment