diff --git a/bob/bio/base/pipelines/vanilla_biometrics/annotated_legacy.py b/bob/bio/base/pipelines/vanilla_biometrics/annotated_legacy.py
index 3013f87dedb50202a588d32c03fd5e33f84d5653..34d35a50753ec41e0b056dda5f4300ebed659b29 100644
--- a/bob/bio/base/pipelines/vanilla_biometrics/annotated_legacy.py
+++ b/bob/bio/base/pipelines/vanilla_biometrics/annotated_legacy.py
@@ -274,7 +274,7 @@ class SampleLoaderAnnotated(SampleLoader):
                     try:
                         # preprocessing is required, and checkpointing, do it now
                         data = func(s.data, annotations=s.annotations)
-                    except:
+                    except:                        
                         data = func(s.data)
 
 
@@ -294,15 +294,17 @@ class SampleLoaderAnnotated(SampleLoader):
                 # because we are checkpointing, we return a DelayedSample
                 # instead of normal (preloaded) sample. This allows the next
                 # phase to avoid loading it would it be unnecessary (e.g. next
-                # phase is already check-pointed)
+                # phase is already check-pointed)                
+                #reader = bob.io.base.load
                 reader = (
                     getattr(func, "read_data")
                     if hasattr(func, "read_data")
                     else getattr(func, "read_feature")
                 )
+                reader = reader.__func__ # The reader object might not be picklable
                 samples.append(
                     DelayedSample(
-                        functools.partial(reader, candidate), parent=s
+                        functools.partial(reader, None, candidate), parent=s
                     )
                 )
         else:
diff --git a/bob/bio/base/pipelines/vanilla_biometrics/blocks.py b/bob/bio/base/pipelines/vanilla_biometrics/blocks.py
index c53483fb74a1765b3ab641a6573cec1e1a17bdc6..0e0c9b7f7a6c34a4adb5f98307a51735b5ca27da 100644
--- a/bob/bio/base/pipelines/vanilla_biometrics/blocks.py
+++ b/bob/bio/base/pipelines/vanilla_biometrics/blocks.py
@@ -107,8 +107,9 @@ class SampleLoader:
                     if hasattr(func, "read_data")
                     else getattr(func, "read_feature")
                 )
+                reader = reader.__func__ # The reader object might not be picklable
                 samples.append(
-                    DelayedSample(functools.partial(reader, candidate), parent=s)
+                    DelayedSample(functools.partial(reader, None, candidate), parent=s)
                 )
         else:
             # if checkpointing is not required, load the data and preprocess it
diff --git a/bob/bio/base/pipelines/vanilla_biometrics/legacy.py b/bob/bio/base/pipelines/vanilla_biometrics/legacy.py
index 961e2c27e6444f35d7c262028027da43d87df73b..c8bbb4ef62a94c64857f4819b39790cc7e119c0c 100644
--- a/bob/bio/base/pipelines/vanilla_biometrics/legacy.py
+++ b/bob/bio/base/pipelines/vanilla_biometrics/legacy.py
@@ -9,7 +9,7 @@ import functools
 
 import bob.io.base
 from bob.pipelines.sample.sample import DelayedSample, SampleSet, Sample
-
+import numpy
 
 class DatabaseConnector:
     """Wraps a bob.bio.base database and generates conforming samples
@@ -316,17 +316,17 @@ class AlgorithmAdaptor:
                 self.load()
                 if self.model.requires_projector_training:
                     return self.model.enroll(
-                        [self.model.project(s.data) for s in k.samples]
+                        numpy.array([self.model.project(s.data) for s in k.samples])
                     )
                 else:
-                    return [s.data for s in k.samples]
+                    return self.model.enroll(numpy.array([s.data for s in k.samples]))
 
             def write_enrolled(self, k, path):
                 self.model.write_model(k, path)
 
         model = _CachedModel(self.algorithm, path)
 
-        retval = []
+        retval = []        
         for k in references:
             if checkpoint is not None:
                 candidate = os.path.join(os.path.join(checkpoint, k.path + ".hdf5"))
@@ -334,7 +334,7 @@ class AlgorithmAdaptor:
                     # create new checkpoint
                     bob.io.base.create_directories_safe(os.path.dirname(candidate))
                     enrolled = model.enroll(k)
-                    model.model.write_model(enrolled, candidate)
+                    model.model.write_model(enrolled, candidate)                
                 retval.append(
                     DelayedSample(
                         functools.partial(model.model.read_model, candidate), parent=k
diff --git a/bob/bio/base/pipelines/vanilla_biometrics/pipeline.py b/bob/bio/base/pipelines/vanilla_biometrics/pipeline.py
index da9cb4ff066457762ebec0b9135261ace3749614..6c9214cbf3f0fe20dcae380765987e3e74d7ba07 100644
--- a/bob/bio/base/pipelines/vanilla_biometrics/pipeline.py
+++ b/bob/bio/base/pipelines/vanilla_biometrics/pipeline.py
@@ -108,7 +108,7 @@ def biometric_pipeline(
     ## Create biometric samples
     biometric_references = create_biometric_reference(background_model,references,loader,algorithm,npartitions,checkpoints)
 
-    ## Scores all probes
+    ## Scores all probes    
     return compute_scores(background_model, biometric_references, probes, loader, algorithm, npartitions, checkpoints)
 
 
@@ -354,12 +354,13 @@ def compute_scores(
     db = dask.bag.from_sequence(probes, npartitions=npartitions)
     db = db.map_partitions(loader, checkpoints.get("probes", {}))
 
+
     ## TODO: Here, we are sending all computed biometric references to all
     ## probes.  It would be more efficient if only the models related to each
     ## probe are sent to the probing split.  An option would be to use caching
     ## and allow the ``score`` function above to load the required data from
     ## the disk, directly.  A second option would be to generate named delays
-    ## for each model and then associate them here.
-    all_references = dask.delayed(list)(references)
+    ## for each model and then associate them here.          
+    all_references = dask.delayed(list)(references)    
     return db.map_partitions(algorithm.score, all_references, background_model)