diff --git a/bob/bio/face/preprocessor/FaceCrop.py b/bob/bio/face/preprocessor/FaceCrop.py
index 8a4178861658156e1a579db4e4daa673ae5f69a8..5aaa202b0e192fd22286f9eba8bc514ef8a352a2 100644
--- a/bob/bio/face/preprocessor/FaceCrop.py
+++ b/bob/bio/face/preprocessor/FaceCrop.py
@@ -10,11 +10,11 @@ import logging
 from .Base import Base
 from bob.bio.base.preprocessor import Preprocessor
 
-logger = logging.getLogger('bob.bio.face')
+logger = logging.getLogger("bob.bio.face")
 
 
-class FaceCrop (Base):
-  """Crops the face according to the given annotations.
+class FaceCrop(Base):
+    """Crops the face according to the given annotations.
 
   This class is designed to perform a geometric normalization of the face based
   on the eye locations, using :py:class:`bob.ip.base.FaceEyesNorm`. Usually,
@@ -106,58 +106,72 @@ class FaceCrop (Base):
     such as ``color_channel`` or ``dtype``.
   """
 
-  def __init__(
-      self,
-      cropped_image_size,
-      cropped_positions,
-      fixed_positions=None,
-      mask_sigma=None,
-      mask_neighbors=5,
-      mask_seed=None,
-      annotator=None,
-      allow_upside_down_normalized_faces=False,
-      **kwargs
-  ):
-
-    Base.__init__(self, **kwargs)
-
-    # call base class constructor
-    Preprocessor.__init__(
+    def __init__(
         self,
-        cropped_image_size=cropped_image_size,
-        cropped_positions=cropped_positions,
-        fixed_positions=fixed_positions,
-        mask_sigma=mask_sigma,
-        mask_neighbors=mask_neighbors,
-        mask_seed=mask_seed
-    )
-
-    # check parameters
-    assert len(cropped_positions) == 2
-    if fixed_positions:
-      assert len(fixed_positions) == 2
-
-    # copy parameters
-    self.cropped_image_size = cropped_image_size
-    self.cropped_positions = cropped_positions
-    self.cropped_keys = sorted(cropped_positions.keys())
-    self.fixed_positions = fixed_positions
-    self.mask_sigma = mask_sigma
-    self.mask_neighbors = mask_neighbors
-    self.mask_rng = bob.core.random.mt19937(
-        mask_seed) if mask_seed is not None else bob.core.random.mt19937()
-    self.annotator = annotator
-    self.allow_upside_down_normalized_faces = allow_upside_down_normalized_faces
-
-    # create objects required for face cropping
-    self.cropper = bob.ip.base.FaceEyesNorm(
-        crop_size=cropped_image_size,
-        right_eye=cropped_positions[self.cropped_keys[0]],
-        left_eye=cropped_positions[self.cropped_keys[1]])
-    self.cropped_mask = numpy.ndarray(cropped_image_size, numpy.bool)
-
-  def crop_face(self, image, annotations=None):
-    """Crops the face.
+        cropped_image_size,
+        cropped_positions,
+        fixed_positions=None,
+        mask_sigma=None,
+        mask_neighbors=5,
+        mask_seed=None,
+        annotator=None,
+        allow_upside_down_normalized_faces=False,
+        **kwargs
+    ):
+
+        Base.__init__(self, **kwargs)
+
+        # call base class constructor
+        Preprocessor.__init__(
+            self,
+            cropped_image_size=cropped_image_size,
+            cropped_positions=cropped_positions,
+            fixed_positions=fixed_positions,
+            mask_sigma=mask_sigma,
+            mask_neighbors=mask_neighbors,
+            mask_seed=mask_seed,
+        )
+
+        # check parameters
+        assert len(cropped_positions) == 2
+        if fixed_positions:
+            assert len(fixed_positions) == 2
+
+        # copy parameters
+        self.cropped_image_size = cropped_image_size
+        self.cropped_positions = cropped_positions
+        self.cropped_keys = sorted(cropped_positions.keys())
+        self.fixed_positions = fixed_positions
+        self.mask_sigma = mask_sigma
+        self.mask_neighbors = mask_neighbors
+        self.mask_seed = mask_seed
+        self.annotator = annotator
+        self.allow_upside_down_normalized_faces = allow_upside_down_normalized_faces
+
+        # create objects required for face cropping
+        self.cropper = bob.ip.base.FaceEyesNorm(
+            crop_size=cropped_image_size,
+            right_eye=cropped_positions[self.cropped_keys[0]],
+            left_eye=cropped_positions[self.cropped_keys[1]],
+        )
+        self.cropped_mask = numpy.ndarray(cropped_image_size, numpy.bool)
+
+        self._init_non_pickables()
+
+    def _init_non_pickables(self):
+        self.mask_rng = (
+            bob.core.random.mt19937(self.mask_seed)
+            if self.mask_seed is not None
+            else bob.core.random.mt19937()
+        )
+        self.cropper = bob.ip.base.FaceEyesNorm(
+            crop_size=self.cropped_image_size,
+            right_eye=self.cropped_positions[self.cropped_keys[0]],
+            left_eye=self.cropped_positions[self.cropped_keys[1]],
+        )
+
+    def crop_face(self, image, annotations=None):
+        """Crops the face.
     Executes the face cropping on the given image and returns the cropped
     version of it.
 
@@ -180,77 +194,96 @@ class FaceCrop (Base):
     ValueError
         If the annotations is None.
     """
-    if self.fixed_positions is not None:
-      annotations = self.fixed_positions
-    if annotations is None:
-      raise ValueError(
-          "Cannot perform image cropping since annotations are not given, and "
-          "no fixed annotations are specified.")
-
-    assert isinstance(annotations, dict)
-    if not all(k in annotations for k in self.cropped_keys):
-      raise ValueError(
-          "At least one of the expected annotations '%s' are not given "
-          "in '%s'." % (self.cropped_keys, annotations.keys()))
-
-    reye = self.cropped_keys[0]
-    leye = self.cropped_keys[1]
-    reye_desired_width = self.cropped_positions[reye][1]
-    leye_desired_width = self.cropped_positions[leye][1]
-    right_eye = annotations[reye]
-    left_eye = annotations[leye]
-    if not self.allow_upside_down_normalized_faces:
-      if (reye_desired_width > leye_desired_width and right_eye[1] < left_eye[1]) or \
-         (reye_desired_width < leye_desired_width and right_eye[1] > left_eye[1]):
-        raise ValueError(
-            "Looks like {leye} and {reye} in annotations: {annot} are swapped. "
-            "This will make the normalized face upside down (compared to the original "
-            "image). Most probably your annotations are wrong. Otherwise, you can set "
-            "the ``allow_upside_down_normalized_faces`` parameter to "
-            "True.".format(leye=leye, reye=reye, annot=annotations))
-
-    # create output
-    mask = numpy.ones(image.shape[-2:], dtype=numpy.bool)
-    shape = self.cropped_image_size if image.ndim == 2 else [
-        image.shape[0]] + list(self.cropped_image_size)
-    cropped_image = numpy.zeros(shape)
-    self.cropped_mask[:] = False
-
-    # perform the cropping
-    self.cropper(
-        image,  # input image
-        mask,   # full input mask
-        cropped_image,  # cropped image
-        self.cropped_mask,  # cropped mask
-        # position of first annotation, usually right eye
-        right_eye=right_eye,
-        # position of second annotation, usually left eye
-        left_eye=left_eye,
-    )
-
-    if self.mask_sigma is not None:
-      # extrapolate the mask so that pixels outside of the image original image
-      # region are filled with border pixels
-      if cropped_image.ndim == 2:
-        bob.ip.base.extrapolate_mask(
-            self.cropped_mask, cropped_image, self.mask_sigma,
-            self.mask_neighbors, self.mask_rng)
-      else:
-        [bob.ip.base.extrapolate_mask(
-            self.cropped_mask, cropped_image_channel, self.mask_sigma,
-            self.mask_neighbors, self.mask_rng)
-         for cropped_image_channel in cropped_image]
-
-    return cropped_image
-
-  def is_annotations_valid(self, annotations):
-    if not annotations:
-      return False
-    # check if the required keys are available
-    return all(key in annotations for key in self.cropped_keys)
-
-  def __call__(self, image, annotations=None):
-    """Aligns the given image according to the given annotations.
+        if self.fixed_positions is not None:
+            annotations = self.fixed_positions
+        if annotations is None:
+            raise ValueError(
+                "Cannot perform image cropping since annotations are not given, and "
+                "no fixed annotations are specified."
+            )
+
+        assert isinstance(annotations, dict)
+        if not all(k in annotations for k in self.cropped_keys):
+            raise ValueError(
+                "At least one of the expected annotations '%s' are not given "
+                "in '%s'." % (self.cropped_keys, annotations.keys())
+            )
+
+        reye = self.cropped_keys[0]
+        leye = self.cropped_keys[1]
+        reye_desired_width = self.cropped_positions[reye][1]
+        leye_desired_width = self.cropped_positions[leye][1]
+        right_eye = annotations[reye]
+        left_eye = annotations[leye]
+        if not self.allow_upside_down_normalized_faces:
+            if (
+                reye_desired_width > leye_desired_width and right_eye[1] < left_eye[1]
+            ) or (
+                reye_desired_width < leye_desired_width and right_eye[1] > left_eye[1]
+            ):
+                raise ValueError(
+                    "Looks like {leye} and {reye} in annotations: {annot} are swapped. "
+                    "This will make the normalized face upside down (compared to the original "
+                    "image). Most probably your annotations are wrong. Otherwise, you can set "
+                    "the ``allow_upside_down_normalized_faces`` parameter to "
+                    "True.".format(leye=leye, reye=reye, annot=annotations)
+                )
+
+        # create output
+        mask = numpy.ones(image.shape[-2:], dtype=numpy.bool)
+        shape = (
+            self.cropped_image_size
+            if image.ndim == 2
+            else [image.shape[0]] + list(self.cropped_image_size)
+        )
+        cropped_image = numpy.zeros(shape)
+        self.cropped_mask[:] = False
+
+        # perform the cropping
+        self.cropper(
+            image,  # input image
+            mask,  # full input mask
+            cropped_image,  # cropped image
+            self.cropped_mask,  # cropped mask
+            # position of first annotation, usually right eye
+            right_eye=right_eye,
+            # position of second annotation, usually left eye
+            left_eye=left_eye,
+        )
+
+        if self.mask_sigma is not None:
+            # extrapolate the mask so that pixels outside of the image original image
+            # region are filled with border pixels
+            if cropped_image.ndim == 2:
+                bob.ip.base.extrapolate_mask(
+                    self.cropped_mask,
+                    cropped_image,
+                    self.mask_sigma,
+                    self.mask_neighbors,
+                    self.mask_rng,
+                )
+            else:
+                [
+                    bob.ip.base.extrapolate_mask(
+                        self.cropped_mask,
+                        cropped_image_channel,
+                        self.mask_sigma,
+                        self.mask_neighbors,
+                        self.mask_rng,
+                    )
+                    for cropped_image_channel in cropped_image
+                ]
+
+        return cropped_image
+
+    def is_annotations_valid(self, annotations):
+        if not annotations:
+            return False
+        # check if the required keys are available
+        return all(key in annotations for key in self.cropped_keys)
+
+    def __call__(self, image, annotations=None):
+        """Aligns the given image according to the given annotations.
 
     First, the desired color channel is extracted from the given image.
     Afterward, the face is cropped, according to the given ``annotations`` (or
@@ -269,30 +302,48 @@ class FaceCrop (Base):
     face : 2D :py:class:`numpy.ndarray`
         The cropped face.
     """
-    # if annotations are missing and cannot do anything else return None.
-    if not self.is_annotations_valid(annotations) and \
-       not self.fixed_positions and \
-       self.annotator is None:
-      logger.warn("Cannot crop face without valid annotations or "
-                  "fixed_positions or an annotator. Returning None. "
-                  "The annotations were: {}".format(annotations))
-      return None
-
-    # convert to the desired color channel
-    image = self.color_channel(image)
-
-    # annotate the image if annotations are missing
-    if not self.is_annotations_valid(annotations) and \
-       not self.fixed_positions and \
-       self.annotator is not None:
-      annotations = self.annotator(image, annotations=annotations)
-      if not self.is_annotations_valid(annotations):
-        logger.warn("The annotator failed and the annotations are missing too"
-                    ". Returning None.")
-        return None
-
-    # crop face
-    image = self.crop_face(image, annotations)
-
-    # convert data type
-    return self.data_type(image)
+        # if annotations are missing and cannot do anything else return None.
+        if (
+            not self.is_annotations_valid(annotations)
+            and not self.fixed_positions
+            and self.annotator is None
+        ):
+            logger.warn(
+                "Cannot crop face without valid annotations or "
+                "fixed_positions or an annotator. Returning None. "
+                "The annotations were: {}".format(annotations)
+            )
+            return None
+
+        # convert to the desired color channel
+        image = self.color_channel(image)
+
+        # annotate the image if annotations are missing
+        if (
+            not self.is_annotations_valid(annotations)
+            and not self.fixed_positions
+            and self.annotator is not None
+        ):
+            annotations = self.annotator(image, annotations=annotations)
+            if not self.is_annotations_valid(annotations):
+                logger.warn(
+                    "The annotator failed and the annotations are missing too"
+                    ". Returning None."
+                )
+                return None
+
+        # crop face
+        image = self.crop_face(image, annotations)
+
+        # convert data type
+        return self.data_type(image)
+
+    def __getstate__(self):
+        d = dict(self.__dict__)
+        d.pop("mask_rng")
+        d.pop("cropper")
+        return d
+
+    def __setstate__(self, d):
+        self.__dict__ = d
+        self._init_non_pickables()
diff --git a/bob/bio/face/preprocessor/FaceDetect.py b/bob/bio/face/preprocessor/FaceDetect.py
index a97cd3ec43ce106adda672df97669a137b409216..80d459850c1dc8a4f129e2f1f7d7710c3466d754 100644
--- a/bob/bio/face/preprocessor/FaceDetect.py
+++ b/bob/bio/face/preprocessor/FaceDetect.py
@@ -12,10 +12,12 @@ from .utils import load_cropper_only
 from bob.bio.base.preprocessor import Preprocessor
 
 import logging
+
 logger = logging.getLogger("bob.bio.face")
 
-class FaceDetect (Base):
-  """Performs a face detection (and facial landmark localization) in the given image and crops the face.
+
+class FaceDetect(Base):
+    """Performs a face detection (and facial landmark localization) in the given image and crops the face.
 
   This class is designed to perform a geometric normalization of the face based on the detected face.
   Face detection is performed using :ref:`bob.ip.facedetect <bob.ip.facedetect>`.
@@ -63,74 +65,104 @@ class FaceDetect (Base):
     Remaining keyword parameters passed to the :py:class:`Base` constructor, such as ``color_channel`` or ``dtype``.
   """
 
-  def __init__(
-      self,
-      face_cropper,
-      cascade = None,
-      use_flandmark = False,
-      detection_overlap = 0.2,
-      distance = 2,
-      scale_base = math.pow(2., -1./16.),
-      lowest_scale = 0.125,
-      **kwargs
-  ):
-    # call base class constructors
-    Base.__init__(self, **kwargs)
-
-    Preprocessor.__init__(
-      self,
-      face_cropper = face_cropper,
-      cascade = cascade,
-      use_flandmark = use_flandmark,
-      detection_overlap = detection_overlap,
-      distance = distance,
-      scale_base = scale_base,
-      lowest_scale = lowest_scale
-    )
-
-    assert face_cropper is not None
-
-    self.sampler = bob.ip.facedetect.Sampler(scale_factor=scale_base, lowest_scale=lowest_scale, distance=distance)
-    if cascade is None:
-      self.cascade = bob.ip.facedetect.default_cascade()
-    else:
-      self.cascade = bob.ip.facedetect.Cascade(bob.io.base.HDF5File(cascade))
-    self.detection_overlap = detection_overlap
-    self.flandmark = bob.ip.flandmark.Flandmark() if use_flandmark else None
-    self.quality = None
-
-    self.cropper = load_cropper_only(face_cropper)
-
-
-  def _landmarks(self, image, bounding_box):
-    """Try to detect the landmarks in the given bounding box, and return the eye locations."""
-    # get the landmarks in the face
-    if self.flandmark is not None:
-      # use the flandmark detector
-
-      # make the bounding box square shape by extending the horizontal position by 2 pixels times width/20
-      bb = bob.ip.facedetect.BoundingBox(topleft = (bounding_box.top_f, bounding_box.left_f - bounding_box.size[1] / 10.), size = bounding_box.size)
-
-      top = max(bb.top, 0)
-      left = max(bb.left, 0)
-      bottom = min(bb.bottom, image.shape[0])
-      right = min(bb.right, image.shape[1])
-      landmarks = self.flandmark.locate(image, top, left, bottom-top, right-left)
-
-      if landmarks is not None and len(landmarks):
-        return {
-          'reye' : ((landmarks[1][0] + landmarks[5][0])/2., (landmarks[1][1] + landmarks[5][1])/2.),
-          'leye' : ((landmarks[2][0] + landmarks[6][0])/2., (landmarks[2][1] + landmarks[6][1])/2.)
-        }
-      else:
-        logger.warn("Could not detect landmarks -- using estimated landmarks")
-
-    # estimate from default locations
-    return bob.ip.facedetect.expected_eye_positions(bounding_box)
-
-
-  def crop_face(self, image, annotations=None):
-    """crop_face(image, annotations = None) -> face
+    def __init__(
+        self,
+        face_cropper,
+        cascade=None,
+        use_flandmark=False,
+        detection_overlap=0.2,
+        distance=2,
+        scale_base=math.pow(2.0, -1.0 / 16.0),
+        lowest_scale=0.125,
+        **kwargs
+    ):
+        # call base class constructors
+        Base.__init__(self, **kwargs)
+
+        Preprocessor.__init__(
+            self,
+            face_cropper=face_cropper,
+            cascade=cascade,
+            use_flandmark=use_flandmark,
+            detection_overlap=detection_overlap,
+            distance=distance,
+            scale_base=scale_base,
+            lowest_scale=lowest_scale,
+        )
+
+        assert face_cropper is not None
+
+        self.scale_base = scale_base
+        self.lowest_scale = lowest_scale
+        self.distance = distance
+        self.cascade = cascade
+        self.use_flandmark = use_flandmark
+
+        self.detection_overlap = detection_overlap
+        self.quality = None
+
+        self.cropper = load_cropper_only(face_cropper)
+
+        self._init_non_pickables()
+
+    def _init_non_pickables(self):
+        self.sampler = bob.ip.facedetect.Sampler(
+            scale_factor=self.scale_base,
+            lowest_scale=self.lowest_scale,
+            distance=self.distance,
+        )
+
+        if self.cascade is None:
+            self.cascade_classifier = bob.ip.facedetect.default_cascade()
+        else:
+            self.cascade_classifier = bob.ip.facedetect.Cascade(
+                bob.io.base.HDF5File(self.cascade)
+            )
+
+        self.flandmark = bob.ip.flandmark.Flandmark() if self.use_flandmark else None
+
+    def _landmarks(self, image, bounding_box):
+        """Try to detect the landmarks in the given bounding box, and return the eye locations."""
+        # get the landmarks in the face
+        if self.flandmark is not None:
+            # use the flandmark detector
+
+            # make the bounding box square shape by extending the horizontal position by 2 pixels times width/20
+            bb = bob.ip.facedetect.BoundingBox(
+                topleft=(
+                    bounding_box.top_f,
+                    bounding_box.left_f - bounding_box.size[1] / 10.0,
+                ),
+                size=bounding_box.size,
+            )
+
+            top = max(bb.top, 0)
+            left = max(bb.left, 0)
+            bottom = min(bb.bottom, image.shape[0])
+            right = min(bb.right, image.shape[1])
+            landmarks = self.flandmark.locate(
+                image, top, left, bottom - top, right - left
+            )
+
+            if landmarks is not None and len(landmarks):
+                return {
+                    "reye": (
+                        (landmarks[1][0] + landmarks[5][0]) / 2.0,
+                        (landmarks[1][1] + landmarks[5][1]) / 2.0,
+                    ),
+                    "leye": (
+                        (landmarks[2][0] + landmarks[6][0]) / 2.0,
+                        (landmarks[2][1] + landmarks[6][1]) / 2.0,
+                    ),
+                }
+            else:
+                logger.warn("Could not detect landmarks -- using estimated landmarks")
+
+        # estimate from default locations
+        return bob.ip.facedetect.expected_eye_positions(bounding_box)
+
+    def crop_face(self, image, annotations=None):
+        """crop_face(image, annotations = None) -> face
 
     Detects the face (and facial landmarks), and used the ``face_cropper`` given in the constructor to crop the face.
 
@@ -147,22 +179,23 @@ class FaceDetect (Base):
     face : 2D or 3D :py:class:`numpy.ndarray` (float)
       The detected and cropped face.
     """
-    uint8_image = image.astype(numpy.uint8)
-    if uint8_image.ndim == 3:
-      uint8_image = bob.ip.color.rgb_to_gray(uint8_image)
-
-    # detect the face
-    bounding_box, self.quality = bob.ip.facedetect.detect_single_face(uint8_image, self.cascade, self.sampler, self.detection_overlap)
+        uint8_image = image.astype(numpy.uint8)
+        if uint8_image.ndim == 3:
+            uint8_image = bob.ip.color.rgb_to_gray(uint8_image)
 
-    # get the eye landmarks
-    annotations = self._landmarks(uint8_image, bounding_box)
+        # detect the face
+        bounding_box, self.quality = bob.ip.facedetect.detect_single_face(
+            uint8_image, self.cascade_classifier, self.sampler, self.detection_overlap
+        )
 
-    # apply face cropping
-    return self.cropper.crop_face(image, annotations)
+        # get the eye landmarks
+        annotations = self._landmarks(uint8_image, bounding_box)
 
+        # apply face cropping
+        return self.cropper.crop_face(image, annotations)
 
-  def __call__(self, image, annotations=None):
-    """__call__(image, annotations = None) -> face
+    def __call__(self, image, annotations=None):
+        """__call__(image, annotations = None) -> face
 
     Aligns the given image according to the detected face bounding box or the detected facial features.
 
@@ -183,11 +216,22 @@ class FaceDetect (Base):
     face : 2D :py:class:`numpy.ndarray`
       The cropped face.
     """
-    # convert to the desired color channel
-    image = self.color_channel(image)
+        # convert to the desired color channel
+        image = self.color_channel(image)
+
+        # detect face and crop it
+        image = self.crop_face(image)
+
+        # convert data type
+        return self.data_type(image)
 
-    # detect face and crop it
-    image = self.crop_face(image)
+    def __getstate__(self):
+        d = dict(self.__dict__)
+        d.pop("sampler")
+        d.pop("cascade_classifier")
+        d.pop("flandmark")
+        return d
 
-    # convert data type
-    return self.data_type(image)
+    def __setstate__(self, d):
+        self.__dict__ = d
+        self._init_non_pickables()
diff --git a/bob/bio/face/preprocessor/INormLBP.py b/bob/bio/face/preprocessor/INormLBP.py
index d5827de1156a9331f38966f886f57c88dff56dbc..ed7cae2d07e01686478a25f7306a3ea14aacfd62 100644
--- a/bob/bio/face/preprocessor/INormLBP.py
+++ b/bob/bio/face/preprocessor/INormLBP.py
@@ -24,20 +24,21 @@ from .Base import Base
 from .utils import load_cropper
 from bob.bio.base.preprocessor import Preprocessor
 
-class INormLBP (Base):
-  """Performs I-Norm LBP on the given image"""
 
-  def __init__(
-      self,
-      face_cropper,
-      radius = 2,  # Radius of the LBP
-      is_circular = True, # use circular LBP?
-      compare_to_average = False,
-      elbp_type = 'regular',
-      **kwargs
-  ):
+class INormLBP(Base):
+    """Performs I-Norm LBP on the given image"""
 
-    """Parameters of the constructor of this preprocessor:
+    def __init__(
+        self,
+        face_cropper,
+        radius=2,  # Radius of the LBP
+        is_circular=True,  # use circular LBP?
+        compare_to_average=False,
+        elbp_type="regular",
+        **kwargs
+    ):
+
+        """Parameters of the constructor of this preprocessor:
 
     face_cropper : str or :py:class:`bob.bio.face.preprocessor.FaceCrop` or :py:class:`bob.bio.face.preprocessor.FaceDetect` or ``None``
       The face image cropper that should be applied to the image.
@@ -61,35 +62,41 @@ class INormLBP (Base):
       Remaining keyword parameters passed to the :py:class:`Base` constructor, such as ``color_channel`` or ``dtype``.
     """
 
-    # call base class constructors
-    Base.__init__(self, **kwargs)
-
-    Preprocessor.__init__(
-        self,
-        face_cropper = face_cropper,
-        radius = radius,
-        is_circular = is_circular,
-        compare_to_average = compare_to_average,
-        elbp_type = elbp_type
-    )
-
-    # lbp extraction
-    self.lbp_extractor = bob.ip.base.LBP(
-        neighbors = 8,
-        radius = radius,
-        circular = is_circular,
-        to_average = compare_to_average,
-        add_average_bit = False,
-        uniform = False,
-        elbp_type = elbp_type,
-        border_handling = 'wrap'
-    )
-
-    self.cropper = load_cropper(face_cropper)
-
-
-  def __call__(self, image, annotations = None):
-    """__call__(image, annotations = None) -> face
+        # call base class constructors
+        Base.__init__(self, **kwargs)
+
+        Preprocessor.__init__(
+            self,
+            face_cropper=face_cropper,
+            radius=radius,
+            is_circular=is_circular,
+            compare_to_average=compare_to_average,
+            elbp_type=elbp_type,
+        )
+
+        self.radius = radius
+        self.is_circular = is_circular
+        self.compare_to_average = compare_to_average
+        self.elbp_type = elbp_type
+        self.cropper = load_cropper(face_cropper)
+
+        self._init_non_pickables()
+
+    def _init_non_pickables(self):
+        # lbp extraction
+        self.lbp_extractor = bob.ip.base.LBP(
+            neighbors=8,
+            radius=self.radius,
+            circular=self.is_circular,
+            to_average=self.compare_to_average,
+            add_average_bit=False,
+            uniform=False,
+            elbp_type=self.elbp_type,
+            border_handling="wrap",
+        )
+
+    def __call__(self, image, annotations=None):
+        """__call__(image, annotations = None) -> face
 
     Aligns the given image according to the given annotations.
 
@@ -113,15 +120,24 @@ class INormLBP (Base):
       The cropped and photometrically enhanced face.
     """
 
-    image = self.color_channel(image)
-    if self.cropper is not None:
+        image = self.color_channel(image)
+        if self.cropper is not None:
+
+            # TODO: IN DASK, SELF.CROPPER IS A FUNCTOOLS
+            # WE NEED TO THINK HOW TO PROPERLY APPROACH THIS ISSUE
+
+            if not isinstance(self.cropper, bob.bio.face.preprocessor.FaceCrop):
+                self.cropper = self.cropper()
+
+            image = self.cropper.crop_face(image, annotations)
+        image = self.lbp_extractor(image)
+        return self.data_type(image)
 
-      # TODO: IN DASK, SELF.CROPPER IS A FUNCTOOLS
-      # WE NEED TO THINK HOW TO PROPERLY APPROACH THIS ISSUE
-      
-      if not isinstance(self.cropper, bob.bio.face.preprocessor.FaceCrop):
-          self.cropper = self.cropper()
+    def __getstate__(self):
+        d = dict(self.__dict__)
+        d.pop("lbp_extractor")
+        return d
 
-      image = self.cropper.crop_face(image, annotations)
-    image = self.lbp_extractor(image)
-    return self.data_type(image)
+    def __setstate__(self, d):
+        self.__dict__ = d
+        self._init_non_pickables()
diff --git a/bob/bio/face/preprocessor/SelfQuotientImage.py b/bob/bio/face/preprocessor/SelfQuotientImage.py
index 5fc84359ed93f5c0487350368392d86b13d5e22c..f1612fa67bfe390b5b4ee39216e0a8128423a0ad 100644
--- a/bob/bio/face/preprocessor/SelfQuotientImage.py
+++ b/bob/bio/face/preprocessor/SelfQuotientImage.py
@@ -25,8 +25,9 @@ from .Base import Base
 from .utils import load_cropper
 from bob.bio.base.preprocessor import Preprocessor
 
-class SelfQuotientImage (Base):
-  """Crops the face (if desired) and applies self quotient image algorithm [WLW04]_ to photometrically enhance the image.
+
+class SelfQuotientImage(Base):
+    """Crops the face (if desired) and applies self quotient image algorithm [WLW04]_ to photometrically enhance the image.
 
   **Parameters:**
 
@@ -44,30 +45,28 @@ class SelfQuotientImage (Base):
     Remaining keyword parameters passed to the :py:class:`Base` constructor, such as ``color_channel`` or ``dtype``.
   """
 
-  def __init__(
-      self,
-      face_cropper,
-      sigma = math.sqrt(2.),
-      **kwargs
-  ):
+    def __init__(self, face_cropper, sigma=math.sqrt(2.0), **kwargs):
+
+        Base.__init__(self, **kwargs)
 
-    Base.__init__(self, **kwargs)
+        # call base class constructor with its set of parameters
+        Preprocessor.__init__(self, face_cropper=face_cropper, sigma=sigma)
 
-    # call base class constructor with its set of parameters
-    Preprocessor.__init__(
-        self,
-        face_cropper = face_cropper,
-        sigma = sigma
-    )
+        self.cropper = load_cropper(face_cropper)
 
-    self.cropper = load_cropper(face_cropper)
+        size = max(1, int(3.0 * sigma))
 
-    size = max(1, int(3. * sigma))
-    self.self_quotient = bob.ip.base.SelfQuotientImage(size_min = size, sigma = sigma)
+        self.size = size
+        self.sigma = sigma
+        self._init_non_pickables()
 
+    def _init_non_pickables(self):
+        self.self_quotient = bob.ip.base.SelfQuotientImage(
+            size_min=self.size, sigma=self.sigma
+        )
 
-  def __call__(self, image, annotations = None):
-    """__call__(image, annotations = None) -> face
+    def __call__(self, image, annotations=None):
+        """__call__(image, annotations = None) -> face
 
     Aligns the given image according to the given annotations.
 
@@ -90,8 +89,18 @@ class SelfQuotientImage (Base):
     face : 2D :py:class:`numpy.ndarray`
       The cropped and photometrically enhanced face.
     """
-    image = self.color_channel(image)
-    if self.cropper is not None:
-      image = self.cropper.crop_face(image, annotations)
-    image = self.self_quotient(image)
-    return self.data_type(image)
+        image = self.color_channel(image)
+        if self.cropper is not None:
+            image = self.cropper.crop_face(image, annotations)
+        image = self.self_quotient(image)
+        return self.data_type(image)
+
+
+    def __getstate__(self):
+        d = dict(self.__dict__)
+        d.pop("self_quotient")
+        return d
+
+    def __setstate__(self, d):
+        self.__dict__ = d
+        self._init_non_pickables()
diff --git a/bob/bio/face/preprocessor/TanTriggs.py b/bob/bio/face/preprocessor/TanTriggs.py
index 9ef08b8c91dcfdc41bd84dd4e3acf4671ee711da..23976e205353d661da753c29583b81da66c80127 100644
--- a/bob/bio/face/preprocessor/TanTriggs.py
+++ b/bob/bio/face/preprocessor/TanTriggs.py
@@ -24,8 +24,9 @@ from .Base import Base
 from .utils import load_cropper
 from bob.bio.base.preprocessor import Preprocessor
 
-class TanTriggs (Base):
-  """Crops the face (if desired) and applies Tan&Triggs algorithm [TT10]_ to photometrically enhance the image.
+
+class TanTriggs(Base):
+    """Crops the face (if desired) and applies Tan&Triggs algorithm [TT10]_ to photometrically enhance the image.
 
   **Parameters:**
 
@@ -43,38 +44,49 @@ class TanTriggs (Base):
     Remaining keyword parameters passed to the :py:class:`Base` constructor, such as ``color_channel`` or ``dtype``.
   """
 
-  def __init__(
-      self,
-      face_cropper,
-      gamma = 0.2,
-      sigma0 = 1,
-      sigma1 = 2,
-      size = 5,
-      threshold = 10.,
-      alpha = 0.1,
-      **kwargs
-  ):
-
-    Base.__init__(self, **kwargs)
-
-    # call base class constructor with its set of parameters
-    Preprocessor.__init__(
+    def __init__(
         self,
-        face_cropper = face_cropper,
-        gamma = gamma,
-        sigma0 = sigma0,
-        sigma1 = sigma1,
-        size = size,
-        threshold = threshold,
-        alpha = alpha
-    )
-
-    self.cropper = load_cropper(face_cropper)
-    self.tan_triggs = bob.ip.base.TanTriggs(gamma, sigma0, sigma1, size, threshold, alpha)
-
-
-  def __call__(self, image, annotations = None):
-    """__call__(image, annotations = None) -> face
+        face_cropper,
+        gamma=0.2,
+        sigma0=1,
+        sigma1=2,
+        size=5,
+        threshold=10.0,
+        alpha=0.1,
+        **kwargs
+    ):
+
+        Base.__init__(self, **kwargs)
+
+        # call base class constructor with its set of parameters
+        Preprocessor.__init__(
+            self,
+            face_cropper=face_cropper,
+            gamma=gamma,
+            sigma0=sigma0,
+            sigma1=sigma1,
+            size=size,
+            threshold=threshold,
+            alpha=alpha,
+        )
+
+        self.gamma = gamma
+        self.sigma0 = sigma0
+        self.sigma1 = sigma1
+        self.size = size
+        self.threshold = threshold
+        self.alpha = alpha
+
+        self.cropper = load_cropper(face_cropper)
+        self._init_non_pickables()
+
+    def _init_non_pickables(self):
+        self.tan_triggs = bob.ip.base.TanTriggs(
+            self.gamma, self.sigma0, self.sigma1, self.size, self.threshold, self.alpha
+        )
+
+    def __call__(self, image, annotations=None):
+        """__call__(image, annotations = None) -> face
 
     Aligns the given image according to the given annotations.
 
@@ -97,12 +109,19 @@ class TanTriggs (Base):
     face : 2D :py:class:`numpy.ndarray`
       The cropped and photometrically enhanced face.
     """
-    if not isinstance(self.cropper, bob.bio.face.preprocessor.FaceCrop):
-        self.cropper = self.cropper()
-    
-    image = self.color_channel(image)
-    if self.cropper is not None:
-      image = self.cropper.crop_face(image, annotations)
-    image = self.tan_triggs(image)
-
-    return self.data_type(image)
+
+        image = self.color_channel(image)
+        if self.cropper is not None:
+            image = self.cropper.crop_face(image, annotations)
+        image = self.tan_triggs(image)
+
+        return self.data_type(image)
+
+    def __getstate__(self):
+        d = dict(self.__dict__)
+        d.pop("tan_triggs")
+        return d
+
+    def __setstate__(self, d):
+        self.__dict__ = d
+        self._init_non_pickables()
diff --git a/bob/bio/face/test/test_picklability.py b/bob/bio/face/test/test_picklability.py
new file mode 100644
index 0000000000000000000000000000000000000000..a7461a4fdbb33e89658b50c966fd58115721199f
--- /dev/null
+++ b/bob/bio/face/test/test_picklability.py
@@ -0,0 +1,46 @@
+import bob.bio.face
+from bob.pipelines.utils import assert_picklable
+
+
+def test_face_crop():
+    CROPPED_IMAGE_HEIGHT = 64
+    CROPPED_IMAGE_WIDTH = 64
+    RIGHT_EYE_POS = (16.0, 15.5)
+    LEFT_EYE_POS = (16.0, 48.0)
+    cropper = bob.bio.face.preprocessor.FaceCrop(
+        cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
+        cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
+        color_channel="rgb",
+        dtype="uint8",
+    )
+    assert_picklable(cropper)
+
+
+def test_face_detect():
+    face_detect = bob.bio.face.preprocessor.FaceDetect(face_cropper="face-crop-eyes")
+    assert_picklable(face_detect)
+
+    face_detect = bob.bio.face.preprocessor.FaceDetect(
+        face_cropper="face-crop-eyes", use_flandmark=True
+    )
+    assert_picklable(face_detect)
+
+
+def test_INormLBP():
+    face_crop = bob.bio.face.preprocessor.INormLBP(face_cropper="face-crop-eyes")
+    assert_picklable(face_crop)
+
+
+def test_TanTriggs():
+    face_crop = bob.bio.face.preprocessor.TanTriggs(face_cropper="face-crop-eyes")
+    assert_picklable(face_crop)
+
+
+def test_SQI():
+    face_crop = bob.bio.face.preprocessor.SelfQuotientImage(face_cropper="face-crop-eyes")
+    assert_picklable(face_crop)
+
+
+def test_HistogramEqualization():
+    face_crop = bob.bio.face.preprocessor.HistogramEqualization(face_cropper="face-crop-eyes")
+    assert_picklable(face_crop)
diff --git a/conda/meta.yaml b/conda/meta.yaml
index ae19d8825daf5662427122911ae6430190fa6f40..52f440f6f8e107733c808f4283e707157c495652 100644
--- a/conda/meta.yaml
+++ b/conda/meta.yaml
@@ -43,6 +43,7 @@ requirements:
     - bob.learn.boosting
     - bob.ip.facedetect
     - bob.ip.flandmark
+    - bob.pipelines
     - matplotlib {{ matplotlib }}
     - six {{ six }}
   run:
diff --git a/requirements.txt b/requirements.txt
index 4dc31984ef4954f5d97cf1a33972148d2169222c..c6320c065a300b4f82811a1aeb4a0639d5adc750 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -20,5 +20,6 @@ bob.bio.base
 bob.learn.boosting
 bob.ip.facedetect
 bob.ip.flandmark
+bob.pipelines
 matplotlib   # for plotting
 six