From 6674ddb01c4115ff05e33d0f3ed29fe68c1204a5 Mon Sep 17 00:00:00 2001
From: Manuel Gunther <siebenkopf@googlemail.com>
Date: Thu, 28 Jan 2016 15:21:18 -0700
Subject: [PATCH] Optimized order of commands that might have failed to read
 files in some cases

---
 bob/bio/base/tools/algorithm.py    | 7 ++++---
 bob/bio/base/tools/extractor.py    | 3 ++-
 bob/bio/base/tools/preprocessor.py | 3 ++-
 3 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/bob/bio/base/tools/algorithm.py b/bob/bio/base/tools/algorithm.py
index 6fdaf318..01a526de 100644
--- a/bob/bio/base/tools/algorithm.py
+++ b/bob/bio/base/tools/algorithm.py
@@ -108,12 +108,13 @@ def project(algorithm, extractor, groups = None, indices = None, force = False):
 
     if not utils.check_file(projected_file, force, 1000):
       logger.debug("... Projecting features for file '%s'", feature_file)
+      # create output directory before reading the data file (is sometimes required, when relative directories are specified, especially, including a .. somewhere)
+      bob.io.base.create_directories_safe(os.path.dirname(projected_file))
       # load feature
       feature = extractor.read_feature(feature_file)
       # project feature
       projected = algorithm.project(feature)
       # write it
-      bob.io.base.create_directories_safe(os.path.dirname(projected_file))
       algorithm.write_feature(projected, projected_file)
 
     else:
@@ -230,13 +231,13 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
         if not utils.check_file(model_file, force, 1000):
           enroll_files = fs.enroll_files(model_id, group, 'projected' if algorithm.use_projected_features_for_enrollment else 'extracted')
           logger.debug("... Enrolling model from %d features to file '%s'", len(enroll_files), model_file)
+          bob.io.base.create_directories_safe(os.path.dirname(model_file))
 
           # load all files into memory
           enroll_features = [reader.read_feature(enroll_file) for enroll_file in enroll_files]
 
           model = algorithm.enroll(enroll_features)
           # save the model
-          bob.io.base.create_directories_safe(os.path.dirname(model_file))
           algorithm.write_model(model, model_file)
 
         else:
@@ -261,13 +262,13 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
         if not utils.check_file(t_model_file, force, 1000):
           t_enroll_files = fs.t_enroll_files(t_model_id, group, 'projected' if algorithm.use_projected_features_for_enrollment else 'extracted')
           logger.debug("... Enrolling T-model from %d features to file '%s'", len(t_enroll_files), t_model_file)
+          bob.io.base.create_directories_safe(os.path.dirname(t_model_file))
 
           # load all files into memory
           t_enroll_features = [reader.read_feature(t_enroll_file) for t_enroll_file in t_enroll_files]
 
           t_model = algorithm.enroll(t_enroll_features)
           # save model
-          bob.io.base.create_directories_safe(os.path.dirname(t_model_file))
           algorithm.write_model(t_model, t_model_file)
         else:
           logger.debug("... Skipping T-model file '%s' since it exists", t_model_file)
diff --git a/bob/bio/base/tools/extractor.py b/bob/bio/base/tools/extractor.py
index 08a659cd..384a9991 100644
--- a/bob/bio/base/tools/extractor.py
+++ b/bob/bio/base/tools/extractor.py
@@ -98,12 +98,13 @@ def extract(extractor, preprocessor, groups=None, indices = None, force = False)
 
     if not utils.check_file(feature_file, force, 1000):
       logger.debug("... Extracting features for data file '%s'", data_file)
+      # create output directory before reading the data file (is sometimes required, when relative directories are specified, especially, including a .. somewhere)
+      bob.io.base.create_directories_safe(os.path.dirname(feature_file))
       # load data
       data = preprocessor.read_data(data_file)
       # extract feature
       feature = extractor(data)
       # write feature
-      bob.io.base.create_directories_safe(os.path.dirname(feature_file))
       extractor.write_feature(feature, feature_file)
     else:
       logger.debug("... Skipping preprocessed data '%s' since feature file '%s' exists", data_file, feature_file)
diff --git a/bob/bio/base/tools/preprocessor.py b/bob/bio/base/tools/preprocessor.py
index 07aa4b71..8cf350a2 100644
--- a/bob/bio/base/tools/preprocessor.py
+++ b/bob/bio/base/tools/preprocessor.py
@@ -57,6 +57,8 @@ def preprocess(preprocessor, groups = None, indices = None, force = False):
     if not utils.check_file(preprocessed_data_file, force, 1000):
       logger.debug("... Processing original data file '%s'", file_name)
       data = preprocessor.read_original_data(file_name)
+      # create output directory before reading the data file (is sometimes required, when relative directories are specified, especially, including a .. somewhere)
+      bob.io.base.create_directories_safe(os.path.dirname(preprocessed_data_file))
 
       # get the annotations; might be None
       annotations = fs.get_annotations(annotation_list[i])
@@ -67,7 +69,6 @@ def preprocess(preprocessor, groups = None, indices = None, force = False):
         logger.error("Preprocessing of file '%s' was not successful", file_name)
 
       # write the data
-      bob.io.base.create_directories_safe(os.path.dirname(preprocessed_data_file))
       preprocessor.write_data(preprocessed_data, preprocessed_data_file)
 
     else:
-- 
GitLab