diff --git a/bob/bio/face/algorithm/GaborJet.py b/bob/bio/face/algorithm/GaborJet.py
index 54879038e63d6171a80bf53887958191d87f1463..3430552633e349b8af47db927c9c2ee940946bfa 100644
--- a/bob/bio/face/algorithm/GaborJet.py
+++ b/bob/bio/face/algorithm/GaborJet.py
@@ -296,7 +296,6 @@ class GaborJet(Algorithm):
         return graph_scoring(local_scores)
 
     def score_for_multiple_models(self, models, probe):
-
         self._check_feature(probe)
         [self._check_feature(m) for model in models for m in model]
 
diff --git a/bob/bio/face/embeddings/facenet_sanderberg.py b/bob/bio/face/embeddings/facenet_sanderberg.py
index ada60509026b3017e4f9d3f17ee375ef08807981..e0a48dc2c648960b2a3f520b73e9592d28c7d031 100644
--- a/bob/bio/face/embeddings/facenet_sanderberg.py
+++ b/bob/bio/face/embeddings/facenet_sanderberg.py
@@ -193,10 +193,7 @@ class FaceNetSanderberg(TransformerMixin, BaseEstimator):
             features = self.session.run(self.embeddings, feed_dict=feed_dict)
             return features
         
-        if isinstance(X, list):
-            return [_transform(i) for i in X]
-        else:
-            return _transform(X)
+        return [_transform(i) for i in X]
 
     @staticmethod
     def get_modelpath():
diff --git a/bob/bio/face/embeddings/tensorflow_compat_v1.py b/bob/bio/face/embeddings/tensorflow_compat_v1.py
index 295307313608c4359baab61d5541e5a27052de45..216d1cc84377b191ce5f43f3257eeefa12ac5c11 100644
--- a/bob/bio/face/embeddings/tensorflow_compat_v1.py
+++ b/bob/bio/face/embeddings/tensorflow_compat_v1.py
@@ -81,10 +81,7 @@ class TensorflowCompatV1(TransformerMixin, BaseEstimator):
 
             return self.session.run(self.embedding, feed_dict={self.input_tensor: data},)
 
-        if isinstance(X, SampleBatch):
-            return [_transform(x) for x in X]
-        else:
-            return _transform(X)
+        return [_transform(x) for x in X]
 
     def load_model(self):
         import tensorflow as tf
diff --git a/bob/bio/face/extractor/DCTBlocks.py b/bob/bio/face/extractor/DCTBlocks.py
index e74d1b1eea09fc140567716438e769d7314c537b..31a78a5910bb8a20d8f69b014defec0f67de250e 100644
--- a/bob/bio/face/extractor/DCTBlocks.py
+++ b/bob/bio/face/extractor/DCTBlocks.py
@@ -128,10 +128,7 @@ class DCTBlocks(TransformerMixin, BaseEstimator):
             # Computes DCT features
             return self.dct_features(image)
 
-        if isinstance(X, SampleBatch):
-            return [_extract(x) for x in X]
-        else:
-            return _extract(X)
+        return [_extract(x) for x in X]
 
     def _more_tags(self):
         return {"stateless": True, "requires_fit": False}
diff --git a/bob/bio/face/extractor/LGBPHS.py b/bob/bio/face/extractor/LGBPHS.py
index 8f592e01eb5dc3758e1660f7df82e068b16943c3..05559395175aac96ed83b2abbe0122cacc2f3d20 100644
--- a/bob/bio/face/extractor/LGBPHS.py
+++ b/bob/bio/face/extractor/LGBPHS.py
@@ -274,11 +274,7 @@ class LGBPHS(TransformerMixin, BaseEstimator):
             # return the concatenated list of all histograms
             return self._sparsify(lgbphs_array)
 
-
-        if isinstance(X, SampleBatch):
-            return [_extract(x) for x in X]
-        else:
-            return _extract(X)
+        return [_extract(x) for x in X]
 
     def __getstate__(self):
         d = self.__dict__.copy()
diff --git a/bob/bio/face/preprocessor/FaceCrop.py b/bob/bio/face/preprocessor/FaceCrop.py
index 6b22a1e3abd0451a2b3f6e5cb5901395960fe2cb..e19f34c8aac8d99a1968b567c39980aeb97e6c04 100644
--- a/bob/bio/face/preprocessor/FaceCrop.py
+++ b/bob/bio/face/preprocessor/FaceCrop.py
@@ -300,8 +300,7 @@ class FaceCrop(Base):
                 The cropped face.
             """
 
-        def _crop(image, annot):
-
+        def _crop(image, annot):            
             # if annotations are missing and cannot do anything else return None.
             if (
                 not self.is_annotations_valid(annot)
@@ -335,15 +334,10 @@ class FaceCrop(Base):
             # crop face
             return self.data_type(self.crop_face(image, annot))
 
-        if isinstance(X, SampleBatch):
-
-            if annotations is None:
-                return [_crop(data) for data in X]
-            else:
-                return [_crop(data, annot) for data, annot in zip(X, annotations)]
-
+        if annotations is None:
+            return [_crop(data, None) for data in X]
         else:
-            return _crop(X, annotations)
+            return [_crop(data, annot) for data, annot in zip(X, annotations)]
 
 
     def __getstate__(self):
diff --git a/bob/bio/face/preprocessor/FaceDetect.py b/bob/bio/face/preprocessor/FaceDetect.py
index e6d883aa14eb1dcaa73f853c788d3ffe71829e89..479a0eab61e96cb160c7f5bef498392ece2e5813 100644
--- a/bob/bio/face/preprocessor/FaceDetect.py
+++ b/bob/bio/face/preprocessor/FaceDetect.py
@@ -224,17 +224,10 @@ class FaceDetect(Base):
             # convert data type
             return self.data_type(image)
 
-
-        if isinstance(X, SampleBatch):
-
-            if annotations is None:
-                return [_crop(data) for data in X]
-            else:
-                return [_crop(data, annot) for data, annot in zip(X, annotations)]
-
+        if annotations is None:
+            return [_crop(data) for data in X]
         else:
-            return _crop(X, annotations)
-
+            return [_crop(data, annot) for data, annot in zip(X, annotations)]
 
     def __getstate__(self):
         d = dict(self.__dict__)
diff --git a/bob/bio/face/preprocessor/HistogramEqualization.py b/bob/bio/face/preprocessor/HistogramEqualization.py
index b4df2d80df61eedd8cee345b8b8caa3dd6f7e308..8679242ce41f675bb40c955e004dfc984bbde5bd 100644
--- a/bob/bio/face/preprocessor/HistogramEqualization.py
+++ b/bob/bio/face/preprocessor/HistogramEqualization.py
@@ -96,15 +96,12 @@ class HistogramEqualization(Base):
         def _crop(image, annotations):
             image = self.color_channel(image)
             if self.cropper is not None:
-                image = self.cropper.transform(image, annotations)
+                image = self.cropper.transform([image], [annotations])[0]
             image = self.equalize_histogram(image)
             return self.data_type(image)
 
-        if isinstance(X, SampleBatch):
-            if annotations is None:
-                return [_crop(data) for data in X]
-            else:
-                return [_crop(data, annot) for data, annot in zip(X, annotations)]
+        if annotations is None:
+            return [_crop(data) for data in X]
         else:
-            return _crop(X, annotations)
+            return [_crop(data, annot) for data, annot in zip(X, annotations)]
 
diff --git a/bob/bio/face/preprocessor/INormLBP.py b/bob/bio/face/preprocessor/INormLBP.py
index eb9bb2754430ad248fc9a64a429ff907bb9d2ba7..1b0d89446cc961fdb3315cf2d8407d33fb9484b4 100644
--- a/bob/bio/face/preprocessor/INormLBP.py
+++ b/bob/bio/face/preprocessor/INormLBP.py
@@ -120,19 +120,14 @@ class INormLBP(Base):
         def _crop(image, annotations=None):
             image = self.color_channel(image)
             if self.cropper is not None:
-                image = self.cropper.transform(image, annotations=annotations)
+                image = self.cropper.transform([image], annotations=[annotations])[0]
             image = self.lbp_extractor(image)
             return self.data_type(image)
                 
-        if isinstance(X, SampleBatch):
-
-            if annotations is None:
-                return [_crop(data) for data in X]
-            else:
-                return [_crop(data, annot) for data, annot in zip(X, annotations)]
-
+        if annotations is None:
+            return [_crop(data) for data in X]
         else:
-            return _crop(X, annotations)
+            return [_crop(data, annot) for data, annot in zip(X, annotations)]
 
     def __getstate__(self):
         d = dict(self.__dict__)
diff --git a/bob/bio/face/preprocessor/SelfQuotientImage.py b/bob/bio/face/preprocessor/SelfQuotientImage.py
index ffbf05240f75811fddc04450a3f8d70f63cb477c..2698e628d6534773796489f486670364f4feb023 100644
--- a/bob/bio/face/preprocessor/SelfQuotientImage.py
+++ b/bob/bio/face/preprocessor/SelfQuotientImage.py
@@ -93,18 +93,14 @@ class SelfQuotientImage(Base):
         def _crop(image, annotations):
             image = self.color_channel(image)
             if self.cropper is not None:
-                image = self.cropper.transform(image, annotations)
+                image = self.cropper.transform([image], [annotations])[0]
             image = self.self_quotient(image)
             return self.data_type(image)
 
-        if isinstance(X, SampleBatch):
-            if annotations is None:
-                return [_crop(data) for data in X]
-            else:
-                return [_crop(data, annot) for data, annot in zip(X, annotations)]
+        if annotations is None:
+            return [_crop(data) for data in X]
         else:
-            return _crop(X, annotations)
-
+            return [_crop(data, annot) for data, annot in zip(X, annotations)]
 
     def __getstate__(self):
         d = dict(self.__dict__)
diff --git a/bob/bio/face/preprocessor/TanTriggs.py b/bob/bio/face/preprocessor/TanTriggs.py
index f788339e2568ba5085e3479b0800e0f9d73e7436..0df05470083c9a2c37002a7100344e2dffb72e0d 100644
--- a/bob/bio/face/preprocessor/TanTriggs.py
+++ b/bob/bio/face/preprocessor/TanTriggs.py
@@ -110,18 +110,15 @@ class TanTriggs(Base):
         def _crop(image, annotations=None):
             image = self.color_channel(image)
             if self.cropper is not None:
-                image = self.cropper.transform(image, annotations)
+                image = self.cropper.transform([image], [annotations])[0]
             image = self.tan_triggs(image)
 
             return self.data_type(image)
 
-        if isinstance(X, SampleBatch):
-            if annotations is None:
-                return [_crop(data) for data in X]
-            else:
-                return [_crop(data, annot) for data, annot in zip(X, annotations)]
+        if annotations is None:
+            return [_crop(data) for data in X]
         else:
-            return _crop(X, annotations)
+            return [_crop(data, annot) for data, annot in zip(X, annotations)]
 
 
     def __getstate__(self):
diff --git a/bob/bio/face/test/test_baselines.py b/bob/bio/face/test/test_baselines.py
index effdcdfafe43dfc4d3fe6c19ca4ee37e77863a22..c645d01cce252d3ba85bcbca5c36077b8076268f 100644
--- a/bob/bio/face/test/test_baselines.py
+++ b/bob/bio/face/test/test_baselines.py
@@ -62,7 +62,7 @@ def run_baseline(baseline, samples_for_training=[]):
     probes = get_fake_sample_set(purpose="probe")
 
     # Regular pipeline
-    pipeline = load_resource(baseline, "pipeline")
+    pipeline = load_resource(baseline, "pipeline")    
     scores = pipeline(samples_for_training, biometric_references, probes)
     assert len(scores) == 1
     assert len(scores[0]) == 1
@@ -72,7 +72,7 @@ def run_baseline(baseline, samples_for_training=[]):
 
         checkpoint_pipeline = checkpoint_vanilla_biometrics(
             copy.deepcopy(pipeline), base_dir=d
-        )
+        )        
         checkpoint_scores = checkpoint_pipeline([], biometric_references, probes)
         assert len(checkpoint_scores) == 1
         assert len(checkpoint_scores[0]) == 1
diff --git a/bob/bio/face/test/test_embeddings.py b/bob/bio/face/test/test_embeddings.py
index 780021507b089a9cd151075b2e91244060adde8c..316423881b873de657c29d13a3f4f22b41fd0fd1 100644
--- a/bob/bio/face/test/test_embeddings.py
+++ b/bob/bio/face/test/test_embeddings.py
@@ -13,14 +13,13 @@ def test_facenet():
     transformer = FaceNetSanderberg()
     # Raw data
     data = np.random.rand(3, 160, 160).astype("uint8")
-    output = transformer.transform(data)
+    output = transformer.transform([data])[0]
     assert output.size == 128, output.shape
 
     # Sample Batch
     sample = Sample(data)
     transformer_sample = wrap(["sample"], transformer)
-    output = [s.data for s in transformer_sample.transform([sample])][0]
-
+    output = [s.data for s in transformer_sample.transform([sample])][0]    
     assert output.size == 128, output.shape
 
 
@@ -36,7 +35,7 @@ def test_idiap_inceptionv2_msceleb():
     np.random.seed(10)
     transformer = InceptionResnetv2_MsCeleb()
     data = (np.random.rand(3, 160, 160) * 255).astype("uint8")
-    output = transformer.transform(data)
+    output = transformer.transform([data])[0]
     assert output.size == 128, output.shape
 
     # Sample Batch
@@ -54,7 +53,7 @@ def test_idiap_inceptionv2_casia():
     np.random.seed(10)
     transformer = InceptionResnetv2_CasiaWebFace()
     data = np.random.rand(3, 160, 160).astype("uint8")
-    output = transformer.transform(data)
+    output = transformer.transform([data])[0]
     assert output.size == 128, output.shape
 
     # Sample Batch
@@ -71,7 +70,7 @@ def test_idiap_inceptionv1_msceleb():
     np.random.seed(10)
     transformer = InceptionResnetv1_MsCeleb()
     data = np.random.rand(3, 160, 160).astype("uint8")
-    output = transformer.transform(data)
+    output = transformer.transform([data])[0]
     assert output.size == 128, output.shape
 
     # Sample Batch
@@ -88,7 +87,7 @@ def test_idiap_inceptionv1_casia():
     np.random.seed(10)
     transformer = InceptionResnetv1_CasiaWebFace()
     data = np.random.rand(3, 160, 160).astype("uint8")
-    output = transformer.transform(data)
+    output = transformer.transform([data])[0]
     assert output.size == 128, output.shape
 
     # Sample Batch
@@ -109,7 +108,7 @@ def test_arface_insight_tf():
     np.random.seed(10)
     transformer = ArcFace_InsightFaceTF()
     data = np.random.rand(3, 112, 112).astype("uint8")
-    output = transformer.transform(data)
+    output = transformer.transform([data])[0]
     assert output.size == 512, output.shape
 
     # Sample Batch
diff --git a/bob/bio/face/test/test_extractors.py b/bob/bio/face/test/test_extractors.py
index ef02c367e56f8017fabb797f653651a13c0a74db..30936866027d58884bb4e6c236eea39d866e7f37 100644
--- a/bob/bio/face/test/test_extractors.py
+++ b/bob/bio/face/test/test_extractors.py
@@ -78,7 +78,7 @@ def test_dct_blocks():
     dct = bob.bio.face.extractor.DCTBlocks(8, (0, 0), 15)
 
     # extract features
-    feature = dct.transform(data)
+    feature = dct.transform([data])[0]
     assert feature.ndim == 2
     # feature dimension is one lower than the block size, since blocks are normalized by default
     assert feature.shape == (80, 14)
@@ -158,7 +158,7 @@ def test_lgbphs():
     )
 
     # extract feature
-    feature = lgbphs.transform(data)
+    feature = lgbphs.transform([data])[0]
     assert feature.ndim == 2
 
     reference = pkg_resources.resource_filename(
@@ -175,7 +175,7 @@ def test_lgbphs():
         gabor_sigma=math.sqrt(2.0) * math.pi,
         use_gabor_phases=True,
     )
-    feature = lgbphs.transform(data)
+    feature = lgbphs.transform([data])[0]
     assert feature.ndim == 1
 
     reference = pkg_resources.resource_filename(
diff --git a/bob/bio/face/test/test_preprocessors.py b/bob/bio/face/test/test_preprocessors.py
index 301cce0c90d3a2dc0ce01b0b76d903cbae2e21cf..4c88442af87e75f0c0936033f3a753efb1082db2 100644
--- a/bob/bio/face/test/test_preprocessors.py
+++ b/bob/bio/face/test/test_preprocessors.py
@@ -118,7 +118,8 @@ def test_face_crop():
     reference = pkg_resources.resource_filename(
         "bob.bio.face.test", "data/cropped.hdf5"
     )
-    ref_image = _compare(cropper.transform(image, annotation), reference)
+    ref_image = _compare(cropper.transform([image], [annotation]), reference)
+
 
     # test the preprocessor with fixed eye positions (which correspond to th ones
     fixed_cropper = bob.bio.face.preprocessor.FaceCrop(
@@ -126,12 +127,12 @@ def test_face_crop():
         cropper.cropped_positions,
         fixed_positions={"reye": annotation["reye"], "leye": annotation["leye"]},
     )
-    # result must be identical to the original face cropper (same eyes are used)
-    _compare(fixed_cropper.transform(image), reference)
+    # result must be identical to the original face cropper (same eyes are used)    
+    _compare(fixed_cropper.transform([image]), reference)
 
     # check color cropping
     cropper.channel = "rgb"
-    cropped = cropper.transform(image, annotation)
+    cropped = cropper.transform([image], [annotation])[0]
     assert cropped.ndim == 3
     assert cropped.shape[0] == 3
     assert cropped.shape[1:] == ref_image.shape
@@ -142,7 +143,7 @@ def test_face_crop():
     # test a ValueError is raised if eye annotations are swapped
     try:
         annot = dict(reye=annotation["leye"], leye=annotation["reye"])
-        cropper.transform(image, annot)
+        cropper.transform([image], [annot])
         assert 0, "FaceCrop did not raise a ValueError for swapped eye annotations"
     except ValueError:
         pass
@@ -171,7 +172,7 @@ def test_face_detect():
     reference = pkg_resources.resource_filename(
         "bob.bio.face.test", "data/detected.hdf5"
     )
-    _compare(cropper.transform(image, annotation), reference)
+    _compare(cropper.transform([image], [annotation]), reference)
     assert abs(cropper.quality - 39.209601948013685) < 1e-5
 
     # execute face detector with flandmark
@@ -182,7 +183,7 @@ def test_face_detect():
     reference = pkg_resources.resource_filename(
         "bob.bio.face.test", "data/flandmark.hdf5"
     )
-    _compare(cropper.transform(image, annotation), reference)
+    _compare(cropper.transform([image], [annotation]), reference)
     assert abs(cropper.quality - 39.209601948013685) < 1e-5
 
 
@@ -203,7 +204,7 @@ def test_tan_triggs():
 
     # execute face cropper
     _compare(
-        preprocessor.transform(image, annotation),
+        preprocessor.transform([image], [annotation]),
         pkg_resources.resource_filename(
             "bob.bio.face.test", "data/tan_triggs_cropped.hdf5"
         )
@@ -214,7 +215,7 @@ def test_tan_triggs():
     assert preprocessor.cropper is None
     # result must be identical to the original face cropper (same eyes are used)
     _compare(
-        preprocessor.transform(image, annotation),
+        preprocessor.transform([image], [annotation]),
         pkg_resources.resource_filename(
             "bob.bio.face.test", "data/tan_triggs_none.hdf5"
         )
@@ -247,7 +248,7 @@ def test_inorm_lbp():
     assert isinstance(preprocessor.cropper, bob.bio.face.preprocessor.FaceCrop)
     # execute preprocessor
     _compare(
-        preprocessor.transform(image, annotation),
+        preprocessor.transform([image], [annotation]),
         pkg_resources.resource_filename(
             "bob.bio.face.test", "data/inorm_lbp_cropped.hdf5"
         )
@@ -288,7 +289,7 @@ def test_heq():
     assert isinstance(preprocessor.cropper, bob.bio.face.preprocessor.FaceCrop)
     # execute preprocessor
     _compare(
-        preprocessor.transform(image, annotation),
+        preprocessor.transform([image],[annotation]),
         pkg_resources.resource_filename(
             "bob.bio.face.test", "data/histogram_cropped.hdf5"
         ),
@@ -331,7 +332,7 @@ def test_sqi():
     assert isinstance(preprocessor.cropper, bob.bio.face.preprocessor.FaceCrop)
     # execute preprocessor
     _compare(
-        preprocessor.transform(image, annotation),
+        preprocessor.transform([image], [annotation]),
         pkg_resources.resource_filename(
             "bob.bio.face.test", "data/self_quotient_cropped.hdf5"
         )
diff --git a/bob/bio/face/test/test_transformers.py b/bob/bio/face/test/test_transformers.py
index 97abda34c38d33c540fcce1884358218c45fa52b..4f9e63e81b10e918c7741a95cd0ad5f051680001 100644
--- a/bob/bio/face/test/test_transformers.py
+++ b/bob/bio/face/test/test_transformers.py
@@ -77,19 +77,16 @@ def test_gabor_graph():
     transformer = load_resource("gabor_graph", "transformer")
 
     fake_sample = get_fake_sample()
-
     transformed_sample = transformer.transform([fake_sample])[0]
     transformed_data = transformed_sample.data
-
-    assert len(transformed_sample.data) == 80
+    assert len(transformed_sample.data) == 400
 
 
-def test_lgbphs():    
+def test_lgbphs():
     transformer = load_resource("lgbphs", "transformer")
 
     fake_sample = get_fake_sample()
 
     transformed_sample = transformer.transform([fake_sample])[0]
     transformed_data = transformed_sample.data
-
-    assert transformed_sample.data.shape == (2, 44014)
+    assert transformed_sample.data.shape == (2, 220267)