diff --git a/bob/bio/base/tools/algorithm.py b/bob/bio/base/tools/algorithm.py
index 8cd0010538f0c17351c329be33571b0a14598a56..0447df71cbc0a8331bf799b81bfc3aafc19210d9 100644
--- a/bob/bio/base/tools/algorithm.py
+++ b/bob/bio/base/tools/algorithm.py
@@ -123,7 +123,8 @@ def project(algorithm, extractor, groups = None, indices = None, allow_missing_f
 
     if not utils.check_file(projected_file, force,
                             algorithm.min_projected_file_size):
-      logger.debug("... Projecting features for file '%s'", feature_file)
+      logger.debug("... Projecting features for file '%s' (%d/%d)",
+          feature_file, index_range.index(i)+1, len(index_range))
       # create output directory before reading the data file (is sometimes required, when relative directories are specified, especially, including a .. somewhere)
       bob.io.base.create_directories_safe(os.path.dirname(projected_file))
       # load feature
@@ -256,7 +257,7 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
         logger.info("- Enrollment: splitting of index range %s", str(indices))
 
       logger.info("- Enrollment: enrolling models of group '%s'", group)
-      for model_id in model_ids:
+      for pos, model_id in enumerate(model_ids):
         # Path to the model
         model_file = fs.model_file(model_id, group)
 
@@ -271,7 +272,9 @@ def enroll(algorithm, extractor, compute_zt_norm, indices = None, groups = ['dev
               logger.debug("... Skipping model file %s since no feature file could be found", model_file)
               continue
 
-          logger.debug("... Enrolling model from %d features to file '%s'", len(enroll_files), model_file)
+          logger.debug("... Enrolling model '%d' from %d feature(s) to "
+              "file '%s' (%d/%d)", model_id, len(enroll_files), model_file,
+              pos+1, len(model_ids))
           bob.io.base.create_directories_safe(os.path.dirname(model_file))
 
           # load all files into memory
diff --git a/bob/bio/base/tools/extractor.py b/bob/bio/base/tools/extractor.py
index 7a0b62e2cf30ac55d33ba9a507961978aa10c88c..281314677f7a5e8766fcbe8ee92986c91414de64 100644
--- a/bob/bio/base/tools/extractor.py
+++ b/bob/bio/base/tools/extractor.py
@@ -112,7 +112,8 @@ def extract(extractor, preprocessor, groups=None, indices = None, allow_missing_
 
     if not utils.check_file(feature_file, force,
                             extractor.min_feature_file_size):
-      logger.debug("... Extracting features for data file '%s'", data_file)
+      logger.debug("... Extracting features for data file '%s' (%d/%d)",
+          data_file, index_range.index(i)+1, len(index_range))
       # create output directory before reading the data file (is sometimes required, when relative directories are specified, especially, including a .. somewhere)
       bob.io.base.create_directories_safe(os.path.dirname(feature_file))
       # load data
diff --git a/bob/bio/base/tools/preprocessor.py b/bob/bio/base/tools/preprocessor.py
index 1f7374cf98a3d9135035eabfb76fd140491094d2..83eafab8953940e3a668c398e6b4e6166fa5cccf 100644
--- a/bob/bio/base/tools/preprocessor.py
+++ b/bob/bio/base/tools/preprocessor.py
@@ -67,7 +67,8 @@ def preprocess(preprocessor, groups = None, indices = None, allow_missing_files
     # check for existence
     if not utils.check_file(preprocessed_data_file, force,
                             preprocessor.min_preprocessed_file_size):
-      logger.debug("... Processing original data file '%s'", file_name)
+      logger.debug("... Processing original data file '%s' (%d/%d)", file_name,
+          index_range.index(i)+1, len(index_range))
 
       data = preprocessor.read_original_data(file_object, original_directory, original_extension)
       # create output directory before reading the data file (is sometimes required, when relative directories are specified, especially, including a .. somewhere)
diff --git a/bob/bio/base/tools/scoring.py b/bob/bio/base/tools/scoring.py
index 778b4f46e530c56fc86e9be642c4218fb0a556c6..1649add9826561f8c13a9336c9538154958a6773 100644
--- a/bob/bio/base/tools/scoring.py
+++ b/bob/bio/base/tools/scoring.py
@@ -131,9 +131,11 @@ def _scores_a(algorithm, reader, model_ids, group, compute_zt_norm, force, write
     logger.info("- Scoring: computing scores for group '%s'", group)
 
   # Computes the raw scores for each model
-  for model_id in model_ids:
+  for pos, model_id in enumerate(model_ids):
     # test if the file is already there
     score_file = fs.a_file(model_id, group) if compute_zt_norm else fs.no_norm_file(model_id, group)
+    logger.debug("... Scoring model '%s' at '%s' (%d/%d)", model_id, score_file,
+        pos+1, len(model_ids))
     if utils.check_file(score_file, force):
       logger.warn("Score file '%s' already exists.", score_file)
     else:
@@ -166,9 +168,11 @@ def _scores_b(algorithm, reader, model_ids, group, force, allow_missing_files):
   logger.info("- Scoring: computing score matrix B for group '%s'", group)
 
   # Loads the models
-  for model_id in model_ids:
+  for pos, model_id in enumerate(model_ids):
     # test if the file is already there
     score_file = fs.b_file(model_id, group)
+    logger.debug("... Scoring model '%s' at '%s' (%d/%d)", model_id,
+        score_file, pos+1, len(model_ids))
     if utils.check_file(score_file, force):
       logger.warn("Score file '%s' already exists.", score_file)
     else:
@@ -191,9 +195,11 @@ def _scores_c(algorithm, reader, t_model_ids, group, force, allow_missing_files)
   logger.info("- Scoring: computing score matrix C for group '%s'", group)
 
   # Computes the raw scores for the T-Norm model
-  for t_model_id in t_model_ids:
+  for pos, t_model_id in enumerate(t_model_ids):
     # test if the file is already there
     score_file = fs.c_file(t_model_id, group)
+    logger.debug("... Scoring model '%s' at '%s' (%d/%d)", t_model_id,
+        score_file, pos+1, len(t_model_ids))
     if utils.check_file(score_file, force):
       logger.warn("Score file '%s' already exists.", score_file)
     else:
@@ -219,9 +225,11 @@ def _scores_d(algorithm, reader, t_model_ids, group, force, allow_missing_files)
   z_probe_ids = [z_probe_object.client_id for z_probe_object in z_probe_objects]
 
   # Loads the T-Norm models
-  for t_model_id in t_model_ids:
+  for pos, t_model_id in enumerate(t_model_ids):
     # test if the file is already there
     score_file = fs.d_file(t_model_id, group)
+    logger.debug("... Scoring model '%s' at '%s' (%d/%d)", t_model_id,
+        score_file, pos+1, len(t_model_ids))
     same_score_file = fs.d_same_value_file(t_model_id, group)
     if utils.check_file(score_file, force) and utils.check_file(same_score_file, force):
       logger.warn("score files '%s' and '%s' already exist.", score_file, same_score_file)