diff --git a/bob/bio/base/config/extractor/linearize.py b/bob/bio/base/config/extractor/linearize.py
index ddc02165757ec3ed8a66d43d0d59d8f3ce6edaf5..7d452d6e3363c553ecd9f069982e0269b3f0166c 100644
--- a/bob/bio/base/config/extractor/linearize.py
+++ b/bob/bio/base/config/extractor/linearize.py
@@ -3,4 +3,4 @@
 import bob.bio.base
 
 # Linearization of the data to a vector, no data type specified
-extractor = bob.bio.base.extractor.Linearize
+extractor = bob.bio.base.extractor.Linearize()
diff --git a/bob/bio/base/mixins/__init__.py b/bob/bio/base/mixins/__init__.py
index 62acd81c13d30ee5eb3adb9526d466727af2bfda..729af155f8f3f72cd1e3805f4a6efe40e2787dd7 100644
--- a/bob/bio/base/mixins/__init__.py
+++ b/bob/bio/base/mixins/__init__.py
@@ -1,2 +1,2 @@
 from .linearize import Linearize, SampleLinearize, CheckpointSampleLinearize
-#from .pca import CheckpointSamplePCA, SamplePCA
+from .pca import CheckpointSamplePCA, SamplePCA
diff --git a/bob/bio/base/mixins/legacy.py b/bob/bio/base/mixins/legacy.py
index 616bdf43d683cb69a6d4ad85407c091b627b86e9..a1c523dea8b7e175de10abb61e6d1dbc4f199d79 100644
--- a/bob/bio/base/mixins/legacy.py
+++ b/bob/bio/base/mixins/legacy.py
@@ -83,12 +83,23 @@ class LegacyProcessorMixin(TransformerMixin):
 
     def transform(self, X):
 
-        X = check_array(X, allow_nd=True)
-
         # Instantiates and do the "real" transform
         if self.instance is None:
             self.instance = self.callable()
-        return [self.instance(x) for x in X]
+
+        if isinstance(X[0], dict):
+            # Handling annotations if it's the case
+            retval = []
+            for x in X:
+                data = x["data"]
+                annotations = x["annotations"]
+
+                retval.append(self.instance(data, annotations=annotations))
+            return retval
+
+        else:
+            X = check_array(X, allow_nd=True)
+            return [self.instance(x) for x in X]
 
 
 from bob.pipelines.mixins import CheckpointMixin, SampleMixin
diff --git a/bob/bio/base/mixins/pca.py b/bob/bio/base/mixins/pca.py
index 83e7bf47baef37ad611c8b53f3e88fa0f8ce6400..1412188f66aa8b111d39b741093486cf5bf780c8 100644
--- a/bob/bio/base/mixins/pca.py
+++ b/bob/bio/base/mixins/pca.py
@@ -7,7 +7,7 @@
 TODO: This should be deployed in bob.pipelines
 """
 
-from bob.pipelines.processor import CheckpointMixin, SampleMixin
+from bob.pipelines.mixins import CheckpointMixin, SampleMixin
 from sklearn.base import TransformerMixin
 from sklearn.decomposition import PCA
 import numpy
diff --git a/bob/bio/base/pipelines/vanilla_biometrics/database.py b/bob/bio/base/pipelines/vanilla_biometrics/database.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/bob/bio/base/pipelines/vanilla_biometrics/pipeline.py b/bob/bio/base/pipelines/vanilla_biometrics/pipeline.py
index fe346a6aa4e0cf20649cbebb24e1367174ffd4d5..9b32ae55f66ca498b1d0cbf88692b842fc9e6406 100644
--- a/bob/bio/base/pipelines/vanilla_biometrics/pipeline.py
+++ b/bob/bio/base/pipelines/vanilla_biometrics/pipeline.py
@@ -12,6 +12,9 @@ import dask.bag
 import dask.delayed
 from bob.pipelines.sample import samplesets_to_samples
 
+import logging
+logger = logging.getLogger(__name__)
+
 
 def biometric_pipeline(
     background_model_samples,
@@ -20,16 +23,21 @@ def biometric_pipeline(
     extractor,
     biometric_algorithm,
 ):
+    logger.info(f" >> Vanilla Biometrics: Training background model with pipeline {extractor}")
 
     ## Training background model (fit will return even if samples is ``None``,
     ## in which case we suppose the algorithm is not trainable in any way)
     extractor = train_background_model(background_model_samples, extractor)
 
+    logger.info(f" >> Creating biometric references with the biometric algorithm {biometric_algorithm}")
+
     ## Create biometric samples
     biometric_references = create_biometric_reference(
         biometric_reference_samples, extractor, biometric_algorithm
     )
 
+    logger.info(f" >> Computing scores with the biometric algorithm {biometric_algorithm}")
+
     ## Scores all probes
     return compute_scores(
         probe_samples, biometric_references, extractor, biometric_algorithm
@@ -38,7 +46,6 @@ def biometric_pipeline(
 
 def train_background_model(background_model_samples, extractor):
 
-    # TODO: Maybe here is supervised
     X, y = samplesets_to_samples(background_model_samples)
 
     extractor = extractor.fit(X, y=y)
diff --git a/bob/bio/base/script/vanilla_biometrics.py b/bob/bio/base/script/vanilla_biometrics.py
index f8e17b31f5ec48d7e7ae3c480dcc8d8787d84b9f..b58204d2bf9fcbe4a51386fb993ee7352dbb6dd1 100644
--- a/bob/bio/base/script/vanilla_biometrics.py
+++ b/bob/bio/base/script/vanilla_biometrics.py
@@ -171,6 +171,8 @@ def vanilla_biometrics(
 
         with open(os.path.join(output,f"scores-{g}"), "w") as f:
             biometric_references = database.references(group=g)
+            
+            logger.info(f"Running vanilla biometrics for group {g}")
 
             result = biometric_pipeline(
                 database.background_model_samples(),
diff --git a/bob/bio/base/test/test_processor.py b/bob/bio/base/test/test_processor.py
deleted file mode 100644
index fc06fcf88667731438379b7ca69c085a3091ba1f..0000000000000000000000000000000000000000
--- a/bob/bio/base/test/test_processor.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
-
-from bob.pipelines.sample import Sample, SampleSet, DelayedSample
-import os
-import numpy
-import tempfile
-from sklearn.utils.validation import check_is_fitted
-
-from bob.bio.base.processor import Linearize, SampleLinearize, CheckpointSampleLinearize
-def test_linearize_processor():
-    
-    ## Test the transformer only
-    transformer = Linearize()
-    X = numpy.zeros(shape=(10,10))
-    X_tr = transformer.transform(X)
-    assert X_tr.shape == (100,)
-
-
-    ## Test wrapped in to a Sample
-    sample = Sample(X, key="1")
-    transformer = SampleLinearize()
-    X_tr = transformer.transform([sample])
-    assert X_tr[0].data.shape == (100,)
-
-    ## Test checkpoint    
-    with tempfile.TemporaryDirectory() as d:
-        transformer = CheckpointSampleLinearize(features_dir=d)
-        X_tr =  transformer.transform([sample])
-        assert X_tr[0].data.shape == (100,)
-        assert os.path.exists(os.path.join(d, "1.h5"))
-
-
-from bob.bio.base.processor import SamplePCA, CheckpointSamplePCA
-def test_pca_processor():
-    
-    ## Test wrapped in to a Sample
-    X = numpy.random.rand(100,10)
-    samples = [Sample(data, key=str(i)) for i, data in enumerate(X)]
-
-    # fit
-    n_components = 2
-    estimator = SamplePCA(n_components=n_components)
-    estimator = estimator.fit(samples)
-    
-    # https://scikit-learn.org/stable/modules/generated/sklearn.utils.validation.check_is_fitted.html
-    assert check_is_fitted(estimator, "n_components_") is None
-    
-    # transform
-    samples_tr = estimator.transform(samples)
-    assert samples_tr[0].data.shape == (n_components,)
-    
-
-    ## Test Checkpoining
-    with tempfile.TemporaryDirectory() as d:        
-        model_path = os.path.join(d, "model.pkl")
-        estimator = CheckpointSamplePCA(n_components=n_components, features_dir=d, model_path=model_path)
-
-        # fit
-        estimator = estimator.fit(samples)
-        assert check_is_fitted(estimator, "n_components_") is None
-        assert os.path.exists(model_path)
-        
-        # transform
-        samples_tr = estimator.transform(samples)
-        assert samples_tr[0].data.shape == (n_components,)        
-        assert os.path.exists(os.path.join(d, samples_tr[0].key+".h5"))
diff --git a/bob/bio/base/test/test_vanilla_biometrics.py b/bob/bio/base/test/test_vanilla_biometrics.py
index 082622427618c41da91354d016a3ae016d95a121..60e0ac4027c6e7535496db6a4b88b02d284974df 100644
--- a/bob/bio/base/test/test_vanilla_biometrics.py
+++ b/bob/bio/base/test/test_vanilla_biometrics.py
@@ -9,9 +9,6 @@ import tempfile
 from sklearn.utils.validation import check_is_fitted
 
 
-#from bob.bio.base.processor import Linearize, SampleLinearize, CheckpointSampleLinearize
-
-
 class DummyDatabase:
 
     def __init__(self, delayed=False, n_references=10, n_probes=10, dim=10, one_d = True):
@@ -61,7 +58,8 @@ class DummyDatabase:
         return probes
 
 
-from bob.bio.base.pipelines.vanilla_biometrics.comparator import DistanceComparator
+from bob.bio.base.pipelines.vanilla_biometrics.biometric_algorithm import Distance
+import itertools
 def test_distance_comparator():
     
     n_references = 10
@@ -70,40 +68,15 @@ def test_distance_comparator():
     database = DummyDatabase(delayed=False, n_references=n_references, n_probes=n_probes, dim=10, one_d = True)
     references = database.references()    
     probes = database.probes()
-
-    pass
-
-    comparator = DistanceComparator()
-    references = comparator.enroll_samples(references)
+    
+    comparator = Distance()
+    references = comparator._enroll_samples(references)
     assert len(references)== n_references
     assert references[0].data.shape == (dim,)
 
     probes = database.probes()
-    scores = comparator.score_samples(probes, references)
+    scores = comparator._score_samples(probes, references)
+    scores = list(itertools.chain(*scores))
     
     assert len(scores) == n_probes*n_references
     assert len(scores[0].samples)==n_references
-    
-    
-
-    ## Test the transformer only
-    #transformer = Linearize()
-    #X = numpy.zeros(shape=(10,10))
-    #X_tr = transformer.transform(X)
-    #assert X_tr.shape == (100,)
-
-
-    ## Test wrapped in to a Sample
-    #sample = Sample(X, key="1")
-    #transformer = SampleLinearize()
-    #X_tr = transformer.transform([sample])
-    #assert X_tr[0].data.shape == (100,)
-
-    ## Test checkpoint    
-    #with tempfile.TemporaryDirectory() as d:
-        #transformer = CheckpointSampleLinearize(features_dir=d)
-        #X_tr =  transformer.transform([sample])
-        #assert X_tr[0].data.shape == (100,)
-        #assert os.path.exists(os.path.join(d, "1.h5"))
-
-