diff --git a/bob/bio/base/script/verify.py b/bob/bio/base/script/verify.py
index 861706efc1b7f4cde9be5d01cd404e07bd4a9be6..cd80550a23cb90103894a13308df3631f6df4035 100644
--- a/bob/bio/base/script/verify.py
+++ b/bob/bio/base/script/verify.py
@@ -252,6 +252,7 @@ def execute(args):
         args.preprocessor,
         groups = tools.groups(args),
         indices = tools.indices(fs.original_data_list(groups=tools.groups(args)), None if args.grid is None else args.grid.number_of_preprocessing_jobs),
+        allow_missing_files = args.allow_missing_files,
         force = args.force)
 
   # train the feature extractor
@@ -259,6 +260,7 @@ def execute(args):
     tools.train_extractor(
         args.extractor,
         args.preprocessor,
+        allow_missing_files = args.allow_missing_files,
         force = args.force)
 
   # extract the features
@@ -268,6 +270,7 @@ def execute(args):
         args.preprocessor,
         groups = tools.groups(args),
         indices = tools.indices(fs.preprocessed_data_list(groups=tools.groups(args)), None if args.grid is None else args.grid.number_of_extraction_jobs),
+        allow_missing_files = args.allow_missing_files,
         force = args.force)
 
   # train the feature projector
@@ -275,6 +278,7 @@ def execute(args):
     tools.train_projector(
         args.algorithm,
         args.extractor,
+        allow_missing_files = args.allow_missing_files,
         force = args.force)
 
   # project the features
@@ -284,6 +288,7 @@ def execute(args):
         args.extractor,
         groups = tools.groups(args),
         indices = tools.indices(fs.preprocessed_data_list(groups=tools.groups(args)), None if args.grid is None else args.grid.number_of_projection_jobs),
+        allow_missing_files = args.allow_missing_files,
         force = args.force)
 
   # train the model enroller
@@ -291,6 +296,7 @@ def execute(args):
     tools.train_enroller(
         args.algorithm,
         args.extractor,
+        allow_missing_files = args.allow_missing_files,
         force = args.force)
 
   # enroll the models
@@ -303,6 +309,7 @@ def execute(args):
           indices = tools.indices(fs.model_ids(args.group), None if args.grid is None else args.grid.number_of_enrollment_jobs),
           groups = [args.group],
           types = ['N'],
+          allow_missing_files = args.allow_missing_files,
           force = args.force)
 
     else:
@@ -313,6 +320,7 @@ def execute(args):
           indices = tools.indices(fs.t_model_ids(args.group), None if args.grid is None else args.grid.number_of_enrollment_jobs),
           groups = [args.group],
           types = ['T'],
+          allow_missing_files = args.allow_missing_files,
           force = args.force)
 
   # compute scores
@@ -325,6 +333,7 @@ def execute(args):
           groups = [args.group],
           types = [args.score_type],
           force = args.force,
+          allow_missing_files = args.allow_missing_files,
           write_compressed = args.write_compressed_score_files)
 
     elif args.score_type in ['C', 'D']:
@@ -335,12 +344,14 @@ def execute(args):
           groups = [args.group],
           types = [args.score_type],
           force = args.force,
+          allow_missing_files = args.allow_missing_files,
           write_compressed = args.write_compressed_score_files)
 
     else:
       tools.zt_norm(
           groups = [args.group],
-          write_compressed = args.write_compressed_score_files)
+          write_compressed = args.write_compressed_score_files,
+          allow_missing_files = args.allow_missing_files)
 
   # concatenate
   elif args.sub_task == 'concatenate':
diff --git a/bob/bio/base/test/dummy/preprocessor.py b/bob/bio/base/test/dummy/preprocessor.py
index 9584d9fd34562decdb63e9d058105200a8dee1f5..fd32e3ce9776efa53e1b638070688b866094b376 100644
--- a/bob/bio/base/test/dummy/preprocessor.py
+++ b/bob/bio/base/test/dummy/preprocessor.py
@@ -1,11 +1,14 @@
 from bob.bio.base.preprocessor import Preprocessor
 
 class DummyPreprocessor (Preprocessor):
-  def __init__(self, **kwargs):
+  def __init__(self, return_none=False, **kwargs):
     Preprocessor.__init__(self)
+    self.return_none = return_none
 
   def __call__(self, data, annotation):
     """Does nothing, simply converts the data type of the data, ignoring any annotation."""
+    if self.return_none:
+      return None
     return data
 
 preprocessor = DummyPreprocessor()
diff --git a/bob/bio/base/test/test_scripts.py b/bob/bio/base/test/test_scripts.py
index 2f755da0777f57e779732c30fc28c07016d4b44e..b7d2bfbc6282dfa15a83f00ca34a1598ee34594e 100644
--- a/bob/bio/base/test/test_scripts.py
+++ b/bob/bio/base/test/test_scripts.py
@@ -63,7 +63,7 @@ def _verify(parameters, test_dir, sub_dir, ref_modifier="", score_modifier=('sco
     shutil.rmtree(test_dir)
 
 
-def test_verify_local():
+def test_verify_config():
   test_dir = tempfile.mkdtemp(prefix='bobtest_')
   # define dummy parameters
   parameters = [
@@ -72,14 +72,14 @@ def test_verify_local():
       '-e', os.path.join(dummy_dir, 'extractor.py'),
       '-a', os.path.join(dummy_dir, 'algorithm.py'),
       '--zt-norm',
-      '-vs', 'test_local',
+      '-vs', 'test_config',
       '--temp-directory', test_dir,
       '--result-directory', test_dir
   ]
 
   print (bob.bio.base.tools.command_line(parameters))
 
-  _verify(parameters, test_dir, 'test_local')
+  _verify(parameters, test_dir, 'test_config')
 
 
 def test_verify_resources():
@@ -91,6 +91,7 @@ def test_verify_resources():
       '-e', 'dummy',
       '-a', 'dummy',
       '--zt-norm',
+      '--allow-missing-files',
       '-vs', 'test_resource',
       '--temp-directory', test_dir,
       '--result-directory', test_dir,
@@ -113,7 +114,8 @@ def test_verify_commandline():
       '--zt-norm',
       '-vs', 'test_commandline',
       '--temp-directory', test_dir,
-      '--result-directory', test_dir
+      '--result-directory', test_dir,
+      '--imports', 'bob.bio.base.test.dummy'
   ]
 
   print (bob.bio.base.tools.command_line(parameters))
@@ -133,13 +135,14 @@ def test_verify_parallel():
       '-e', 'bob.bio.base.test.dummy.extractor.DummyExtractor()',
       '-a', 'dummy',
       '--zt-norm',
+      '--allow-missing-files',
       '-vs', 'test_parallel',
       '--temp-directory', test_dir,
       '--result-directory', test_dir,
       '-g', 'bob.bio.base.grid.Grid(grid_type = "local", number_of_parallel_processes = 2, scheduler_sleep_time = 0.1)',
       '-G', test_database, '--run-local-scheduler', '--stop-on-failure',
       '-D', 'success',
-      '--import', 'bob.io.image',
+      '--imports', 'bob.io.image', 'bob.bio.base.test.dummy',
       '--preferred-package', 'bob.bio.base'
   ]
 
@@ -202,7 +205,8 @@ def test_verify_fileset():
       '-vs', 'test_fileset',
       '--temp-directory', test_dir,
       '--result-directory', test_dir,
-      '--preferred-package', 'bob.bio.base'
+      '--preferred-package', 'bob.bio.base',
+      '--imports', 'bob.bio.base.test.dummy'
   ]
 
   print (bob.bio.base.tools.command_line(parameters))
@@ -261,6 +265,51 @@ def test_verify_filelist():
     shutil.rmtree(test_dir)
 
 
+def test_verify_missing():
+  try:
+    import bob.db.verification.filelist
+  except ImportError:
+    raise SkipTest("Skipping test since bob.db.verification.filelist is not available")
+  test_dir = tempfile.mkdtemp(prefix='bobtest_')
+  # define dummy parameters
+  parameters = [
+      '-d', 'dummy',
+      '-p', 'bob.bio.base.test.dummy.preprocessor.DummyPreprocessor(return_none=True)',
+      '-e', 'dummy',
+      '-a', 'dummy',
+      '--zt-norm',
+      '--allow-missing-files',
+      '-vs', 'test_missing',
+      '--temp-directory', test_dir,
+      '--result-directory', test_dir,
+      '--preferred-package', 'bob.bio.base',
+      '--imports', 'bob.bio.base.test.dummy'
+  ]
+
+  print (bob.bio.base.tools.command_line(parameters))
+
+  try:
+    from bob.bio.base.script.verify import main
+    main(parameters)
+
+    # assert that the score file exists
+    score_files = [os.path.join(test_dir, 'test_missing', 'Default', norm, 'scores-dev') for norm in ('nonorm', 'ztnorm')]
+    assert os.path.exists(score_files[0]), "Score file %s does not exist" % score_files[0]
+    assert os.path.exists(score_files[1]), "Score file %s does not exist" % score_files[1]
+
+    # assert that all scores are NaN
+
+    for i in (0,1):
+      # load scores
+      a, b = bob.measure.load.split_four_column(score_files[i])
+
+      assert numpy.all(numpy.isnan(a))
+      assert numpy.all(numpy.isnan(b))
+
+  finally:
+    shutil.rmtree(test_dir)
+
+
 def test_fusion():
   # tests that the fuse_scores script is doing something useful
   test_dir = tempfile.mkdtemp(prefix='bobtest_')
diff --git a/bob/bio/base/tools/algorithm.py b/bob/bio/base/tools/algorithm.py
index 01a526deca9a70b1b98e642d5da9bdd0cac2c98b..e8e7c8375e40c6cf87e9cd0a44dd8c4851bac73a 100644
--- a/bob/bio/base/tools/algorithm.py
+++ b/bob/bio/base/tools/algorithm.py
@@ -9,7 +9,7 @@ from .extractor import read_features
 from .. import utils
 
 
-def train_projector(algorithm, extractor, force = False):
+def train_projector(algorithm, extractor, allow_missing_files = False, force = False):
   """Trains the feature projector using extracted features of the ``'world'`` group, if the algorithm requires projector training.
 
   This function should only be called, when the ``algorithm`` actually requires projector training.
@@ -25,6 +25,9 @@ def train_projector(algorithm, extractor, force = False):
   extractor : py:class:`bob.bio.base.extractor.Extractor` or derived
     The extractor, used for reading the training data.
 
+  allow_missing_files : bool
+    If set to ``True``, extracted files that are not found are silently ignored during training.
+
   force : bool
     If given, the projector file is regenerated, even if it already exists.
   """
@@ -42,7 +45,7 @@ def train_projector(algorithm, extractor, force = False):
     # train projector
     logger.info("- Projection: loading training data")
     train_files = fs.training_list('extracted', 'train_projector', arrange_by_client = algorithm.split_training_features_by_client)
-    train_features = read_features(train_files, extractor, algorithm.split_training_features_by_client)
+    train_features = read_features(train_files, extractor, algorithm.split_training_features_by_client, allow_missing_files)
     if algorithm.split_training_features_by_client:
       logger.info("- Projection: training projector '%s' using %d identities: ", fs.projector_file, len(train_files))
     else:
@@ -53,7 +56,7 @@ def train_projector(algorithm, extractor, force = False):
 
 
 
-def project(algorithm, extractor, groups = None, indices = None, force = False):
+def project(algorithm, extractor, groups = None, indices = None, allow_missing_files = False, force = False):
   """Projects the features for all files of the database.
 
   The given ``algorithm`` is used to project all features required for the current experiment.
@@ -77,6 +80,9 @@ def project(algorithm, extractor, groups = None, indices = None, force = False):
     If specified, only the features for the given index range ``range(begin, end)`` should be projected.
     This is usually given, when parallel threads are executed.
 
+  allow_missing_files : bool
+    If set to ``True``, extracted files that are not found are silently ignored.
+
   force : bool
     If given, files are regenerated, even if they already exist.
   """
@@ -106,6 +112,14 @@ def project(algorithm, extractor, groups = None, indices = None, force = False):
     feature_file = feature_files[i]
     projected_file = projected_files[i]
 
+    if not os.path.exists(feature_file):
+      if allow_missing_files:
+        logger.debug("... Cannot find extracted feature file %s; skipping", feature_file)
+        continue
+      else:
+        logger.error("Cannot find extracted feature file %s", feature_file)
+
+
     if not utils.check_file(projected_file, force, 1000):
       logger.debug("... Projecting features for file '%s'", feature_file)
       # create output directory before reading the data file (is sometimes required, when relative directories are specified, especially, including a .. somewhere)
@@ -122,7 +136,7 @@ def project(algorithm, extractor, groups = None, indices = None, force = False):
 
 
 
-def train_enroller(algorithm, extractor, force = False):
+def train_enroller(algorithm, extractor, allow_missing_files = False, force = False):
   """Trains the model enroller using the extracted or projected features, depending on your setup of the algorithm.
 
   This function should only be called, when the ``algorithm`` actually requires enroller training.
@@ -139,6 +153,9 @@ def train_enroller(algorithm, extractor, force = False):
   extractor : py:class:`bob.bio.base.extractor.Extractor` or derived
     The extractor, used for reading the training data, if unprojected features are used for enroller training.
 
+  allow_missing_files : bool
+    If set to ``True``, extracted files that are not found are silently ignored during training.
+
   force : bool
     If given, the enroller file is regenerated, even if it already exists.
   """
@@ -163,7 +180,7 @@ def train_enroller(algorithm, extractor, force = False):
     # load training data
     train_files = fs.training_list('projected' if algorithm.use_projected_features_for_enrollment else 'extracted', 'train_enroller', arrange_by_client = True)
     logger.info("- Enrollment: loading %d enroller training files", len(train_files))
-    train_features = read_features(train_files, reader, True)
+    train_features = read_features(train_files, reader, True, allow_missing_files)
 
     # perform training
     logger.info("- Enrollment: training enroller '%s' using %d identities", fs.enroller_file, len(train_features))
@@ -171,7 +188,7 @@ def train_enroller(algorithm, extractor, force = False):
 
 
 
-def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev', 'eval'], types = ['N', 'T'], force = False):
+def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev', 'eval'], types = ['N', 'T'], allow_missing_files = False, force = False):
   """Enroll the models for the given groups, eventually for both models and T-Norm-models.
      This function uses the extracted or projected features to compute the models, depending on your setup of the given ``algorithm``.
 
@@ -199,6 +216,10 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
   groups : some of ``('dev', 'eval')``
     The list of groups, for which models should be enrolled.
 
+  allow_missing_files : bool
+    If set to ``True``, extracted or ptojected files that are not found are silently ignored.
+    If none of the enroll files are found, no model file will be written.
+
   force : bool
     If given, files are regenerated, even if they already exist.
   """
@@ -230,6 +251,13 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
         # Removes old file if required
         if not utils.check_file(model_file, force, 1000):
           enroll_files = fs.enroll_files(model_id, group, 'projected' if algorithm.use_projected_features_for_enrollment else 'extracted')
+
+          if allow_missing_files:
+            enroll_files = utils.filter_missing_files(enroll_files)
+            if not enroll_files:
+              logger.debug("... Skipping model file %s since no feature file could be found", model_file)
+              continue
+
           logger.debug("... Enrolling model from %d features to file '%s'", len(enroll_files), model_file)
           bob.io.base.create_directories_safe(os.path.dirname(model_file))
 
@@ -261,6 +289,13 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
         # Removes old file if required
         if not utils.check_file(t_model_file, force, 1000):
           t_enroll_files = fs.t_enroll_files(t_model_id, group, 'projected' if algorithm.use_projected_features_for_enrollment else 'extracted')
+
+          if allow_missing_files:
+            t_enroll_files = utils.filter_missing_files(t_enroll_files)
+            if not t_enroll_files:
+              logger.debug("... Skipping T-model file %s since no feature file could be found", t_model_file)
+              continue
+
           logger.debug("... Enrolling T-model from %d features to file '%s'", len(t_enroll_files), t_model_file)
           bob.io.base.create_directories_safe(os.path.dirname(t_model_file))
 
diff --git a/bob/bio/base/tools/command_line.py b/bob/bio/base/tools/command_line.py
index 495f5865e96be8e43e5712b3e216aade1f9d2862..d584083abbfd28691adbd4333e16316fc7f68cd4 100644
--- a/bob/bio/base/tools/command_line.py
+++ b/bob/bio/base/tools/command_line.py
@@ -135,6 +135,8 @@ def command_line_parser(description=__doc__, exclude_resources_from=[]):
       help = 'Performs score calibration after the scores are computed.')
   flag_group.add_argument('-z', '--zt-norm', action='store_true',
       help = 'Enable the computation of ZT norms')
+  flag_group.add_argument('-A', '--allow-missing-files', action='store_true',
+      help = "If given, missing files will not stop the processing; this is helpful if not all files of the database can be processed; missing scores will be NaN.")
   flag_group.add_argument('-r', '--parallel', type=int,
       help = 'This flag is a shortcut for running the commands on the local machine with the given amount of parallel threads; equivalent to --grid bob.bio.base.grid.Grid("local", number_of_parallel_threads=X) --run-local-scheduler --stop-on-failure.')
 
diff --git a/bob/bio/base/tools/extractor.py b/bob/bio/base/tools/extractor.py
index 00d0e93243c3dd5ae54fc93da73bf9011e001509..30a19cbe1758cfe7694c8eb9f4c3fc52fabb055b 100644
--- a/bob/bio/base/tools/extractor.py
+++ b/bob/bio/base/tools/extractor.py
@@ -8,7 +8,7 @@ from .FileSelector import FileSelector
 from .preprocessor import read_preprocessed_data
 from .. import utils
 
-def train_extractor(extractor, preprocessor, force = False):
+def train_extractor(extractor, preprocessor, allow_missing_files = False, force = False):
   """Trains the feature extractor using preprocessed data of the ``'world'`` group, if the feature extractor requires training.
 
   This function should only be called, when the ``extractor`` actually requires training.
@@ -24,6 +24,9 @@ def train_extractor(extractor, preprocessor, force = False):
   preprocessor : py:class:`bob.bio.base.preprocessor.Preprocessor` or derived
     The preprocessor, used for reading the preprocessed data.
 
+  allow_missing_files : bool
+    If set to ``True``, preprocessed data files that are not found are silently ignored during training.
+
   force : bool
     If given, the extractor file is regenerated, even if it already exists.
   """
@@ -41,7 +44,7 @@ def train_extractor(extractor, preprocessor, force = False):
     bob.io.base.create_directories_safe(os.path.dirname(fs.extractor_file))
     # read training files
     train_files = fs.training_list('preprocessed', 'train_extractor', arrange_by_client = extractor.split_training_data_by_client)
-    train_data = read_preprocessed_data(train_files, preprocessor, extractor.split_training_data_by_client)
+    train_data = read_preprocessed_data(train_files, preprocessor, extractor.split_training_data_by_client, allow_missing_files)
     if extractor.split_training_data_by_client:
       logger.info("- Extraction: training extractor '%s' using %d identities:", fs.extractor_file, len(train_files))
     else:
@@ -51,7 +54,7 @@ def train_extractor(extractor, preprocessor, force = False):
 
 
 
-def extract(extractor, preprocessor, groups=None, indices = None, force = False):
+def extract(extractor, preprocessor, groups=None, indices = None, allow_missing_files = False, force = False):
   """Extracts features from the preprocessed data using the given extractor.
 
   The given ``extractor`` is used to extract all features required for the current experiment.
@@ -75,6 +78,9 @@ def extract(extractor, preprocessor, groups=None, indices = None, force = False)
     If specified, only the features for the given index range ``range(begin, end)`` should be extracted.
     This is usually given, when parallel threads are executed.
 
+  allow_missing_files : bool
+    If set to ``True``, preprocessed data files that are not found are silently ignored.
+
   force : bool
     If given, files are regenerated, even if they already exist.
   """
@@ -96,6 +102,13 @@ def extract(extractor, preprocessor, groups=None, indices = None, force = False)
     data_file = data_files[i]
     feature_file = feature_files[i]
 
+    if not os.path.exists(data_file):
+      if allow_missing_files:
+        logger.debug("... Cannot find preprocessed data file %s; skipping", data_file)
+        continue
+      else:
+        logger.error("Cannot find preprocessed data file %s", data_file)
+
     if not utils.check_file(feature_file, force, 1000):
       logger.debug("... Extracting features for data file '%s'", data_file)
       # create output directory before reading the data file (is sometimes required, when relative directories are specified, especially, including a .. somewhere)
@@ -110,7 +123,7 @@ def extract(extractor, preprocessor, groups=None, indices = None, force = False)
       logger.debug("... Skipping preprocessed data '%s' since feature file '%s' exists", data_file, feature_file)
 
 
-def read_features(file_names, extractor, split_by_client = False):
+def read_features(file_names, extractor, split_by_client = False, allow_missing_files = False):
   """read_features(file_names, extractor, split_by_client = False) -> extracted
 
   Reads the extracted features from ``file_names`` using the given ``extractor``.
@@ -128,11 +141,16 @@ def read_features(file_names, extractor, split_by_client = False):
   split_by_client : bool
     Indicates if the given ``file_names`` are split into groups.
 
+  allow_missing_files : bool
+    If set to ``True``, extracted files that are not found are silently ignored.
+
   **Returns:**
 
   extracted : [object] or [[object]]
     The list of extracted features, in the same order as in the ``file_names``.
   """
+  file_names = utils.filter_missing_files(file_names, split_by_client, allow_missing_files)
+
   if split_by_client:
     return [[extractor.read_feature(f) for f in client_files] for client_files in file_names]
   else:
diff --git a/bob/bio/base/tools/preprocessor.py b/bob/bio/base/tools/preprocessor.py
index 8cf350a2f13879d0b33dbb05be5ed0c8f5627f85..d28b2852b47f4601351b5484b32f19e3d558f15a 100644
--- a/bob/bio/base/tools/preprocessor.py
+++ b/bob/bio/base/tools/preprocessor.py
@@ -7,7 +7,8 @@ logger = logging.getLogger("bob.bio.base")
 from .FileSelector import FileSelector
 from .. import utils
 
-def preprocess(preprocessor, groups = None, indices = None, force = False):
+
+def preprocess(preprocessor, groups = None, indices = None, allow_missing_files = False, force = False):
   """Preprocesses the original data of the database with the given preprocessor.
 
   The given ``preprocessor`` is used to preprocess all data required for the current experiment.
@@ -26,6 +27,9 @@ def preprocess(preprocessor, groups = None, indices = None, force = False):
     If specified, only the data for the given index range ``range(begin, end)`` should be preprocessed.
     This is usually given, when parallel threads are executed.
 
+  allow_missing_files : bool
+    If set to ``True``, files for which the preprocessor returns ``None`` are silently ignored.
+
   force : bool
     If given, files are regenerated, even if they already exist.
   """
@@ -66,7 +70,11 @@ def preprocess(preprocessor, groups = None, indices = None, force = False):
       # call the preprocessor
       preprocessed_data = preprocessor(data, annotations)
       if preprocessed_data is None:
-        logger.error("Preprocessing of file '%s' was not successful", file_name)
+        if allow_missing_files:
+          logger.debug("... Processing original data file '%s' was not successful", file_name)
+        else:
+          logger.error("Preprocessing of file '%s' was not successful", file_name)
+        continue
 
       # write the data
       preprocessor.write_data(preprocessed_data, preprocessed_data_file)
@@ -76,7 +84,7 @@ def preprocess(preprocessor, groups = None, indices = None, force = False):
 
 
 
-def read_preprocessed_data(file_names, preprocessor, split_by_client = False):
+def read_preprocessed_data(file_names, preprocessor, split_by_client = False, allow_missing_files = False):
   """read_preprocessed_data(file_names, preprocessor, split_by_client = False) -> preprocessed
 
   Reads the preprocessed data from ``file_names`` using the given preprocessor.
@@ -94,11 +102,16 @@ def read_preprocessed_data(file_names, preprocessor, split_by_client = False):
   split_by_client : bool
     Indicates if the given ``file_names`` are split into groups.
 
+  allow_missing_files : bool
+    If set to ``True``, preprocessed data files that are not found are silently ignored.
+
   **Returns:**
 
   preprocessed : [object] or [[object]]
     The list of preprocessed data, in the same order as in the ``file_names``.
   """
+  file_names = utils.filter_missing_files(file_names, split_by_client, allow_missing_files)
+
   if split_by_client:
     return [[preprocessor.read_data(f) for f in client_files] for client_files in file_names]
   else:
diff --git a/bob/bio/base/tools/scoring.py b/bob/bio/base/tools/scoring.py
index b2a537f098812b53f2bacd40066eaef36afe0f0f..ab240af8a80f930d3338792c14fee830a888863e 100644
--- a/bob/bio/base/tools/scoring.py
+++ b/bob/bio/base/tools/scoring.py
@@ -13,13 +13,17 @@ from .FileSelector import FileSelector
 from .extractor import read_features
 from .. import utils
 
-def _scores(algorithm, model, probes):
+def _scores(algorithm, model, probes, allow_missing_files):
   """Compute scores for the given model and a list of probes.
   """
   # the file selector object
   fs = FileSelector.instance()
-  # the scores to be computed
-  scores = numpy.ndarray((1,len(probes)), 'float64')
+  # the scores to be computed; initialized with NaN
+  scores = numpy.full((1,len(probes)), numpy.nan, numpy.float64)
+
+  if allow_missing_files and model is None:
+    # if we have no model, all scores are undefined
+    return scores
 
   # Loops over the probe sets
   for i, probe_element in enumerate(probes):
@@ -27,9 +31,17 @@ def _scores(algorithm, model, probes):
       assert isinstance(probe_element, list)
       # read probe from probe_set
       probe = [algorithm.read_probe(probe_file) for probe_file in probe_element]
+      if allow_missing_files:
+        probe = utils.filter_missing_files(probe)
+        if not probe:
+          # we keep the NaN score
+          continue
       # compute score
       scores[0,i] = algorithm.score_for_multiple_probes(model, probe)
     else:
+      if allow_missing_files and not os.path.exists(probe_element):
+        # we keep the NaN score
+        continue
       # read probe
       probe = algorithm.read_probe(probe_element)
       # compute score
@@ -84,7 +96,7 @@ def _close_written(score_file, f, write_compressed):
   f.close()
 
 
-def _save_scores(score_file, scores, probe_objects, client_id, write_compressed=False):
+def _save_scores(score_file, scores, probe_objects, client_id, write_compressed):
   """Saves the scores of one model into a text file that can be interpreted by :py:func:`bob.measure.load.split_four_column`."""
   assert len(probe_objects) == scores.shape[1]
 
@@ -98,7 +110,7 @@ def _save_scores(score_file, scores, probe_objects, client_id, write_compressed=
   _close_written(score_file, f, write_compressed)
 
 
-def _scores_a(algorithm, model_ids, group, compute_zt_norm, force, write_compressed=False):
+def _scores_a(algorithm, model_ids, group, compute_zt_norm, force, write_compressed, allow_missing_files):
   """Computes A scores for the models with the given model_ids. If ``compute_zt_norm = False``, these are the only scores that are actually computed."""
   # the file selector object
   fs = FileSelector.instance()
@@ -117,11 +129,15 @@ def _scores_a(algorithm, model_ids, group, compute_zt_norm, force, write_compres
     else:
       # get probe files that are required for this model
       current_probe_objects = fs.probe_objects_for_model(model_id, group)
-      model = algorithm.read_model(fs.model_file(model_id, group))
+      model_file = fs.model_file(model_id, group)
+      if allow_missing_files and not os.path.exists(model_file):
+        model = None
+      else:
+        model = algorithm.read_model(model_file)
       # get the probe files
       current_probe_files = fs.get_paths(current_probe_objects, 'projected' if algorithm.performs_projection else 'extracted')
       # compute scores
-      a = _scores(algorithm, model, current_probe_files)
+      a = _scores(algorithm, model, current_probe_files, allow_missing_files)
 
       if compute_zt_norm:
         # write A matrix only when you want to compute zt norm afterwards
@@ -131,7 +147,7 @@ def _scores_a(algorithm, model_ids, group, compute_zt_norm, force, write_compres
       _save_scores(fs.no_norm_file(model_id, group), a, current_probe_objects, fs.client_id(model_id, group), write_compressed)
 
 
-def _scores_b(algorithm, model_ids, group, force):
+def _scores_b(algorithm, model_ids, group, force, allow_missing_files):
   """Computes B scores for the given model ids."""
   # the file selector object
   fs = FileSelector.instance()
@@ -149,11 +165,15 @@ def _scores_b(algorithm, model_ids, group, force):
     if utils.check_file(score_file, force):
       logger.warn("Score file '%s' already exists.", score_file)
     else:
-      model = algorithm.read_model(fs.model_file(model_id, group))
-      b = _scores(algorithm, model, z_probe_files)
+      model_file = fs.model_file(model_id, group)
+      if allow_missing_files and not os.path.exists(model_file):
+        model = None
+      else:
+        model = algorithm.read_model(model_file)
+      b = _scores(algorithm, model, z_probe_files, allow_missing_files)
       bob.io.base.save(b, score_file, True)
 
-def _scores_c(algorithm, t_model_ids, group, force):
+def _scores_c(algorithm, t_model_ids, group, force, allow_missing_files):
   """Computes C scores for the given t-norm model ids."""
   # the file selector object
   fs = FileSelector.instance()
@@ -171,11 +191,15 @@ def _scores_c(algorithm, t_model_ids, group, force):
     if utils.check_file(score_file, force):
       logger.warn("Score file '%s' already exists.", score_file)
     else:
-      t_model = algorithm.read_model(fs.t_model_file(t_model_id, group))
-      c = _scores(algorithm, t_model, probe_files)
+      t_model_file = fs.t_model_file(t_model_id, group)
+      if allow_missing_files and not os.path.exists(t_model_file):
+        t_model = None
+      else:
+        t_model = algorithm.read_model(t_model_file)
+      c = _scores(algorithm, t_model, probe_files, allow_missing_files)
       bob.io.base.save(c, score_file, True)
 
-def _scores_d(algorithm, t_model_ids, group, force):
+def _scores_d(algorithm, t_model_ids, group, force, allow_missing_files):
   """Computes D scores for the given t-norm model ids. Both the D matrix and the D-samevalue matrix are written."""
   # the file selector object
   fs = FileSelector.instance()
@@ -197,8 +221,12 @@ def _scores_d(algorithm, t_model_ids, group, force):
     if utils.check_file(score_file, force) and utils.check_file(same_score_file, force):
       logger.warn("score files '%s' and '%s' already exist.", score_file, same_score_file)
     else:
-      t_model = algorithm.read_model(fs.t_model_file(t_model_id, group))
-      d = _scores(algorithm, t_model, z_probe_files)
+      t_model_file = fs.t_model_file(t_model_id, group)
+      if allow_missing_files and not os.path.exists(t_model_file):
+        t_model = None
+      else:
+        t_model = algorithm.read_model(t_model_file)
+      d = _scores(algorithm, t_model, z_probe_files, allow_missing_files)
       bob.io.base.save(d, score_file, True)
 
       t_client_id = [fs.client_id(t_model_id, group, True)]
@@ -206,7 +234,7 @@ def _scores_d(algorithm, t_model_ids, group, force):
       bob.io.base.save(d_same_value_tm, same_score_file, True)
 
 
-def compute_scores(algorithm, compute_zt_norm, force = False, indices = None, groups = ['dev', 'eval'], types = ['A', 'B', 'C', 'D'], write_compressed = False):
+def compute_scores(algorithm, compute_zt_norm, indices = None, groups = ['dev', 'eval'], types = ['A', 'B', 'C', 'D'], write_compressed = False, allow_missing_files = False, force = False):
   """Computes the scores for the given groups.
 
   This function computes all scores for the experiment, and writes them to files, one per model.
@@ -218,9 +246,6 @@ def compute_scores(algorithm, compute_zt_norm, force = False, indices = None, gr
   algorithm : py:class:`bob.bio.base.algorithm.Algorithm` or derived
     The algorithm, used for enrolling model and writing them to file.
 
-  force : bool
-    If given, files are regenerated, even if they already exist.
-
   compute_zt_norm : bool
     If set to ``True``, also ZT-norm scores are computed.
 
@@ -239,12 +264,18 @@ def compute_scores(algorithm, compute_zt_norm, force = False, indices = None, gr
 
   write_compressed : bool
     If enabled, score files are compressed as ``.tar.bz2`` files.
+
+  allow_missing_files : bool
+    If set to ``True``, model and probe files that are not found will produce ``NaN`` scores.
+
+  force : bool
+    If given, score files are regenerated, even if they already exist.
   """
   # the file selector object
   fs = FileSelector.instance()
 
   # load the projector and the enroller, if needed
-  if algorithm.requires_projector_training:
+  if algorithm.performs_projection:
     algorithm.load_projector(fs.projector_file)
   algorithm.load_enroller(fs.enroller_file)
 
@@ -261,26 +292,26 @@ def compute_scores(algorithm, compute_zt_norm, force = False, indices = None, gr
 
     # compute A scores
     if 'A' in types:
-      _scores_a(algorithm, model_ids, group, compute_zt_norm, force, write_compressed)
+      _scores_a(algorithm, model_ids, group, compute_zt_norm, force, write_compressed, allow_missing_files)
 
     if compute_zt_norm:
       # compute B scores
       if 'B' in types:
-        _scores_b(algorithm, model_ids, group, force)
+        _scores_b(algorithm, model_ids, group, force, allow_missing_files)
 
       # compute C scores
       if 'C' in types:
-        _scores_c(algorithm, t_model_ids, group, force)
+        _scores_c(algorithm, t_model_ids, group, force, allow_missing_files)
 
       # compute D scores
       if 'D' in types:
-        _scores_d(algorithm, t_model_ids, group, force)
+        _scores_d(algorithm, t_model_ids, group, force, allow_missing_files)
 
 
 
 def _c_matrix_split_for_model(selected_probe_objects, all_probe_objects, all_c_scores):
   """Helper function to sub-select the c-scores in case not all probe files were used to compute A scores."""
-  c_scores_for_model = numpy.ndarray((all_c_scores.shape[0], len(selected_probe_objects)), numpy.float64)
+  c_scores_for_model = numpy.empty((all_c_scores.shape[0], len(selected_probe_objects)), numpy.float64)
   selected_index = 0
   for all_index in range(len(all_probe_objects)):
     if selected_index < len(selected_probe_objects) and selected_probe_objects[selected_index].id == all_probe_objects[all_index].id:
@@ -338,7 +369,7 @@ def _scores_d_normalize(t_model_ids, group):
 
 
 
-def zt_norm(groups = ['dev', 'eval'], write_compressed=False):
+def zt_norm(groups = ['dev', 'eval'], write_compressed = False, allow_missing_files = False):
   """Computes ZT-Norm using the previously generated A, B, C, D and D-samevalue matrix files.
 
   This function computes the ZT-norm scores for all model ids for all desired groups and writes them into files defined by the :py:class:`bob.bio.base.tools.FileSelector`.
@@ -351,6 +382,10 @@ def zt_norm(groups = ['dev', 'eval'], write_compressed=False):
 
   write_compressed : bool
     If enabled, score files are compressed as ``.tar.bz2`` files.
+
+  allow_missing_files : bool
+    Currently, this option is only provided for completeness.
+    ``NaN`` scores are not yet handled correctly.
   """
   # the file selector object
   fs = FileSelector.instance()
@@ -366,10 +401,10 @@ def zt_norm(groups = ['dev', 'eval'], write_compressed=False):
     # and normalize it
     _scores_d_normalize(t_model_ids, group)
 
-
     # load D matrices only once
     d = bob.io.base.load(fs.d_matrix_file(group))
     d_same_value = bob.io.base.load(fs.d_same_value_matrix_file(group)).astype(bool)
+    error_log_done = False
     # Loops over the model ids
     for model_id in model_ids:
       # Loads probe files to get information about the type of access
@@ -381,6 +416,12 @@ def zt_norm(groups = ['dev', 'eval'], write_compressed=False):
       c = bob.io.base.load(fs.c_file_for_model(model_id, group))
 
       # compute zt scores
+      if allow_missing_files:
+        # TODO: handle NaN scores, i.e., when allow_missing_files is enabled
+        if not error_log_done and any(numpy.any(numpy.isnan(x)) for x in (a,b,c,d,d_same_value)):
+          logger.error("There are NaN scores inside one of the score files for group %s; ZT-Norm will not work", group)
+          error_log_done = True
+
       zt_scores = bob.learn.em.ztnorm(a, b, c, d, d_same_value)
 
       # Saves to text file
@@ -401,7 +442,7 @@ def _concat(score_files, output, write_compressed):
 
 
 
-def concatenate(compute_zt_norm, groups = ['dev', 'eval'], write_compressed=False):
+def concatenate(compute_zt_norm, groups = ['dev', 'eval'], write_compressed = False):
   """Concatenates all results into one (or two) score files per group.
 
   Score files, which were generated per model, are concatenated into a single score file, which can be interpreter by :py:func:`bob.measure.load.split_four_column`.
@@ -443,6 +484,10 @@ def calibrate(compute_zt_norm, groups = ['dev', 'eval'], prior = 0.5, write_comp
   For ZT-norm scores, the calibration is performed independently, if enabled.
   The names of the calibrated score files that should be written are obtained from the :py:class:`bob.bio.base.tools.FileSelector`.
 
+  .. note::
+     All ``NaN`` scores in the development set are silently ignored.
+     This might raise an error, if **all** scores are ``NaN``.
+
   **Parameters:**
 
   compute_zt_norm : bool
@@ -452,6 +497,9 @@ def calibrate(compute_zt_norm, groups = ['dev', 'eval'], prior = 0.5, write_comp
     The list of groups, for which score files should be calibrated.
     The first of the given groups is used to train the logistic regression parameters, while the calibration is performed for all given groups.
 
+  prior : float
+    Whatever :py:class:`bob.learn.linear.CGLogRegTrainer` takes as a ``prior``.
+
   write_compressed : bool
     If enabled, calibrated score files are compressed as ``.tar.bz2`` files.
   """
@@ -469,8 +517,9 @@ def calibrate(compute_zt_norm, groups = ['dev', 'eval'], prior = 0.5, write_comp
     training_scores = list(bob.measure.load.split_four_column(training_score_file))
     for i in (0,1):
       h = numpy.array(training_scores[i])
-      h.shape = (len(training_scores[i]), 1)
-      training_scores[i] = h
+      # remove NaN's
+      h = h[~numpy.isnan(h)]
+      training_scores[i] = h[:,numpy.newaxis]
     # train the LLR
     llr_machine = llr_trainer.train(training_scores[0], training_scores[1])
     del training_scores
diff --git a/bob/bio/base/utils/io.py b/bob/bio/base/utils/io.py
index b53a5d10f0bf0c42ffc4629dc2d72e0b9ec03c6a..1e2851bcf9d79e092936e1177c9f9438e2b9131d 100644
--- a/bob/bio/base/utils/io.py
+++ b/bob/bio/base/utils/io.py
@@ -6,6 +6,22 @@ logger = logging.getLogger("bob.bio.base")
 
 import bob.io.base
 
+def filter_missing_files(file_names, split_by_client=False, allow_missing_files=True):
+  """This function filters out files that do not exist, but only if ``allow_missing_files`` is set to ``True``, otherwise the list of ``file_names`` is returned unaltered."""
+
+  if not allow_missing_files:
+    return file_names
+
+  if split_by_client:
+    # filter out missing files and empty clients
+    existing_files = [[f for f in client_files if os.path.exists(f)] for client_files in file_names]
+    existing_files = [client_files for client_files in existing_files if client_files]
+  else:
+    # filter out missing files
+    existing_files = [f for f in file_names if os.path.exists(f)]
+  return existing_files
+
+
 def check_file(filename, force, expected_file_size = 1):
   """Checks if the file with the given ``filename`` exists and has size greater or equal to ``expected_file_size``.
   If the file is to small, **or** if the ``force`` option is set to ``True``, the file is removed.
@@ -85,7 +101,6 @@ def close_compressed(filename, hdf5_file, compression_type='bz2', create_link=Fa
   os.remove(hdf5_file_name)
 
 
-
 def load_compressed(filename, compression_type='bz2'):
   """Extracts the data to a temporary HDF5 file using HDF5 and reads its contents.
   Note that, though the file name is .hdf5, it contains compressed data!
diff --git a/doc/experiments.rst b/doc/experiments.rst
index 77dbf262dc46867285daf38f1dcb9d79b1e36616..b01e259709478efa25d2c6fb0198996c3183c81e 100644
--- a/doc/experiments.rst
+++ b/doc/experiments.rst
@@ -228,8 +228,28 @@ If the ZT-norm is enabled, two sets of scores will be computed, and they will be
 Other Arguments
 ---------------
 
+Calibration
+~~~~~~~~~~~
+
 For some applications it is interesting to get calibrated scores.
 Simply add the ``--calibrate-scores`` option and another set of score files will be created by training the score calibration on the scores of the ``'dev'`` group and execute it to all available groups.
-The scores will be located at the same directory as the **nonorm** and **ztnorm** scores, and the file names are **calibrated-dev** (and **calibrated-eval** if applicable) .
+The scores will be located at the same directory as the **nonorm** and **ztnorm** scores, and the file names are **calibrated-dev** (and **calibrated-eval** if applicable).
+
+Unsuccessful Preprocessing
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In some cases, the preprocessor is not able to preprocess the data (e.g., for face image processing the face detector might not detect the face).
+If you expect such cases to happen, you might want to use the ``--allow-missing-files`` option.
+When this option is enabled, missing files will be handled correctly throughout the whole processing chain, i.e.:
+
+* the data file is not used during training (in any step of the processing tool chain)
+* preprocessed data is not written
+* feature extraction is not performed for that file
+* the file is exempt from model enrollment; if no enrollment file is found for a model, no model file is written
+* if either model or probe file is not found, the according score will be ``NaN``.
+  If several probe files are combined into one score, missing probe files will be ignored; if all probe files are not found, the score is ``NaN``.
+
+.. warning::
+   At the moment, combining the ``--allow-missing-files`` and ``zt-norm`` options might result in unexpected behavior, as the ZT-Norm computation does not handle ``NaN`` values appropriately.
 
 .. include:: links.rst