diff --git a/bob/pad/face/config/lbp_64.py b/bob/pad/face/config/lbp_64.py
deleted file mode 100644
index e887e4c3c62f9cc17166818e92e444dfed2005b8..0000000000000000000000000000000000000000
--- a/bob/pad/face/config/lbp_64.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import bob.pipelines as mario
-from bob.bio.face.utils import face_crop_solver, get_default_cropped_positions
-from bob.bio.video.transformer import VideoWrapper
-from bob.pad.face.extractor import LBPHistogram
-
-database = globals().get("database")
-if database is not None:
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
-else:
-    annotation_type = None
-    fixed_positions = None
-
-# Preprocessor #
-cropped_image_size = (64, 64)
-cropped_positions = get_default_cropped_positions(
-    "pad", cropped_image_size, annotation_type
-)
-cropper = face_crop_solver(
-    cropped_image_size=cropped_image_size,
-    cropped_positions=cropped_positions,
-    color_channel="gray",
-    fixed_positions=fixed_positions,
-)
-preprocessor = VideoWrapper(cropper)
-preprocessor = mario.wrap(
-    ["sample"],
-    preprocessor,
-    transform_extra_arguments=(("annotations", "annotations"),),
-)
-
-# Extractor #
-extractor = VideoWrapper(
-    LBPHistogram(
-        lbp_type="uniform",
-        elbp_type="regular",
-        radius=1,
-        neighbors=8,
-        circular=False,
-        dtype=None,
-    )
-)
-extractor = mario.wrap(["sample"], extractor)
diff --git a/bob/pad/face/database/database.py b/bob/pad/face/database/database.py
index 003450a03ffdbe2fa2982b25e0d999ab15ef685f..b32aa67b1d78af4c97edb60eace8ce1060ebb50c 100644
--- a/bob/pad/face/database/database.py
+++ b/bob/pad/face/database/database.py
@@ -1,7 +1,7 @@
 from functools import partial
 import os
 import bob.bio.video
-from bob.db.base.annotations import read_annotation_file
+from bob.bio.base.utils.annotations import read_annotation_file
 from sklearn.preprocessing import FunctionTransformer
 from bob.bio.video import VideoAsArray
 from bob.pipelines import DelayedSample
diff --git a/bob/pad/face/extractor/ImageQualityMeasure.py b/bob/pad/face/extractor/ImageQualityMeasure.py
index f7c22bf112c7a25266185b7db864b2242fe7b039..a494f70116ffb0bc69093237cd737cffa4e0fc66 100644
--- a/bob/pad/face/extractor/ImageQualityMeasure.py
+++ b/bob/pad/face/extractor/ImageQualityMeasure.py
@@ -1,8 +1,6 @@
 import logging
 
 import numpy as np
-from bob.ip.qualitymeasure import galbally_iqm_features as iqm
-from bob.ip.qualitymeasure import msu_iqa_features as iqa
 from sklearn.preprocessing import FunctionTransformer
 
 logger = logging.getLogger(__name__)
diff --git a/bob/pad/face/extractor/LBPHistogram.py b/bob/pad/face/extractor/LBPHistogram.py
deleted file mode 100644
index 75d9b9719ed78ce6791110ac438bf970ec3216e1..0000000000000000000000000000000000000000
--- a/bob/pad/face/extractor/LBPHistogram.py
+++ /dev/null
@@ -1,163 +0,0 @@
-from bob.ip.base import LBP, histogram
-import numpy as np
-from sklearn.base import BaseEstimator
-from sklearn.base import TransformerMixin
-
-
-class LBPHistogram(TransformerMixin, BaseEstimator):
-    """Calculates a normalized LBP histogram over an image.
-    These features are implemented based on [CAM12]_.
-
-    Parameters
-    ----------
-    lbp_type : str
-        The type of the LBP operator (regular, uniform or riu2)
-    elbp_type : str
-        Which type of LBP codes should be computed; possible values: ('regular',
-        'transitional', 'direction-coded'). For the old 'modified' method,
-        specify `elbp_type` as 'regular` and `to_average` as True.
-    to_average : bool
-        Compare the neighbors to the average of the pixels instead of the central pixel?
-    radius : float
-        The radius of the circle on which the points are taken (for circular LBP)
-    neighbors : int
-        The number of points around the central point on which LBP is
-        computed (4, 8, 16)
-    circular : bool
-        True if circular LBP is needed, False otherwise
-    n_hor : int
-        Number of blocks horizontally for spatially-enhanced LBP/MCT
-        histograms. Default: 1
-    n_vert
-        Number of blocks vertically for spatially-enhanced LBP/MCT
-        histograms. Default: 1
-
-    Attributes
-    ----------
-    dtype : numpy.dtype
-        If a ``dtype`` is specified in the constructor, it is assured that the
-        resulting features have that dtype.
-    lbp : bob.ip.base.LBP
-        The LPB extractor object.
-    """
-
-    def __init__(
-        self,
-        lbp_type="uniform",
-        elbp_type="regular",
-        to_average=False,
-        radius=1,
-        neighbors=8,
-        circular=False,
-        dtype=None,
-        n_hor=1,
-        n_vert=1,
-        **kwargs,
-    ):
-
-        super().__init__(**kwargs)
-        self.lbp_type = lbp_type
-        self.elbp_type = elbp_type
-        self.to_average = to_average
-        self.radius = radius
-        self.neighbors = neighbors
-        self.circular = circular
-        self.dtype = dtype
-        self.n_hor = n_hor
-        self.n_vert = n_vert
-
-        self.fit()
-
-    def fit(self, X=None, y=None):
-
-        self.lbp_ = LBP(
-            neighbors=self.neighbors,
-            radius=self.radius,
-            circular=self.circular,
-            to_average=self.to_average,
-            uniform=self.lbp_type in ("uniform", "riu2"),
-            rotation_invariant=self.lbp_type == "riu2",
-            elbp_type=self.elbp_type,
-        )
-        return self
-
-    def __getstate__(self):
-        d = self.__dict__.copy()
-        d.pop("lbp_")
-        return d
-
-    def __setstate__(self, state):
-        self.__dict__.update(state)
-        self.fit()
-
-    def comp_block_histogram(self, data):
-        """
-        Extracts LBP/MCT histograms from a gray-scale image/block.
-
-        Takes data of arbitrary dimensions and linearizes it into a 1D vector;
-        Then, calculates the histogram.
-        enforcing the data type, if desired.
-
-        Parameters
-        ----------
-        data : numpy.ndarray
-            The preprocessed data to be transformed into one vector.
-
-        Returns
-        -------
-        1D :py:class:`numpy.ndarray`
-            The extracted feature vector, of the desired ``dtype`` (if
-            specified)
-        """
-        assert isinstance(data, np.ndarray)
-
-        # allocating the image with lbp codes
-        lbpimage = np.ndarray(self.lbp_.lbp_shape(data), "uint16")
-        self.lbp_(data, lbpimage)  # calculating the lbp image
-        hist = histogram(lbpimage, (0, self.lbp_.max_label - 1), self.lbp_.max_label)
-        hist = hist / np.sum(hist)  # histogram normalization
-        if self.dtype is not None:
-            hist = hist.astype(self.dtype)
-        return hist
-
-    def transform_one_image(self, data):
-        """
-        Extracts spatially-enhanced LBP/MCT histograms from a gray-scale image.
-
-        Parameters
-        ----------
-        data : numpy.ndarray
-            The preprocessed data to be transformed into one vector.
-
-        Returns
-        -------
-        1D :py:class:`numpy.ndarray`
-            The extracted feature vector, of the desired ``dtype`` (if
-            specified)
-
-        """
-
-        # Make sure the data can be split into equal blocks:
-        row_max = int(data.shape[0] / self.n_vert) * self.n_vert
-        col_max = int(data.shape[1] / self.n_hor) * self.n_hor
-        data = data[:row_max, :col_max]
-
-        blocks = [
-            sub_block
-            for block in np.hsplit(data, self.n_hor)
-            for sub_block in np.vsplit(block, self.n_vert)
-        ]
-
-        hists = [self.comp_block_histogram(block) for block in blocks]
-
-        hist = np.hstack(hists)
-
-        hist = hist / len(blocks)  # histogram normalization
-
-        return hist
-
-    def transform(self, images):
-        return [self.transform_one_image(img) for img in images]
-
-    def _more_tags(self):
-        return {"stateless": True, "requires_fit": False}
diff --git a/bob/pad/face/extractor/__init__.py b/bob/pad/face/extractor/__init__.py
index 2878cf784b6bcd3e5996f11a9fa580a452674daa..3f778fb4306e949a32b0131ce3d35a3a3e9aec69 100644
--- a/bob/pad/face/extractor/__init__.py
+++ b/bob/pad/face/extractor/__init__.py
@@ -1,4 +1,3 @@
-from .LBPHistogram import LBPHistogram
 from .ImageQualityMeasure import ImageQualityMeasure
 
 
@@ -21,7 +20,6 @@ def __appropriate__(*args):
 
 
 __appropriate__(
-    LBPHistogram,
     ImageQualityMeasure,
 )
 __all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/bob/pad/face/qualitymeasure/__init__.py b/bob/pad/face/qualitymeasure/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/bob/pad/face/qualitymeasure/filters.py b/bob/pad/face/qualitymeasure/filters.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a0d698812ffc6f80d3fd0ca47360b34c8c3a634
--- /dev/null
+++ b/bob/pad/face/qualitymeasure/filters.py
@@ -0,0 +1,29 @@
+# vim: set fileencoding=utf-8 :
+# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+import scipy
+import numpy as np
+
+
+def sobel(image):
+    """
+    Implements the sobel filter like bob.ip.base.sobel
+
+    Performs a Sobel filtering of the input image
+    This function will perform a Sobel filtering woth both the vertical and the horizontal filter.
+    A Sobel filter is an edge detector, which will detect either horizontal or vertical edges.
+    The two filter are given as:
+
+    .. math:: S_y =  \\left\\lgroup\\begin{array}{ccc} -1 & -2 & -1 \\\\ 0 & 0 & 0 \\\\ 1 & 2 & 1 \\end{array}\\right\\rgroup \\qquad S_x = \\left\\lgroup\\begin{array}{ccc} -1 & 0 & 1 \\\\ -2 & 0 & 2 \\\\ -1 & 0 & 1 \\end{array}\\right\\rgroup\n\n
+    If given, the dst array should have the expected type (numpy.float64) and two layers of the same size as the input image.
+    Finally, the result of the vertical filter will be put into the first layer of ``dst[0]``, while the result of the horizontal filter will be written to ``dst[1]``.
+    """
+
+    mask_v = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
+    mask_h = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
+
+    return np.array(
+        [
+            scipy.ndimage.convolve(image, weights=mask_v),
+            scipy.ndimage.convolve(image, weights=mask_h),
+        ]
+    )
diff --git a/bob/pad/face/qualitymeasure/galbally_iqm_features.py b/bob/pad/face/qualitymeasure/galbally_iqm_features.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb3f62635794281d5f7bdb07b360e0edf0f98227
--- /dev/null
+++ b/bob/pad/face/qualitymeasure/galbally_iqm_features.py
@@ -0,0 +1,842 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+"""
+Created on 25 Sep 2015
+
+@author: sbhatta
+"""
+
+
+import numpy as np
+import scipy.signal as ssg
+import scipy.ndimage.filters as snf
+
+from .filters import sobel
+
+
+"""
+compute_quality_features is the main function to be called, to extract a set of
+image quality-features computed for the input image
+:param image: 2d numpy array. Should contain input image of size [M,N] (i.e. M
+    rows x N cols).
+:return featSet: a tuple of float-scalars, each representing one image-quality
+    measure.
+"""
+
+
+def compute_quality_features(image):
+    """Extract a set of image quality-features computed for the input image.
+
+    Parameters:
+
+    image (:py:class:`numpy.ndarray`): A ``uint8`` array with 2 or 3
+        dimensions, representing the input image of shape [M,N] (M rows x N
+        cols). If 2D, image should contain a gray-image of shape [M,N]. If 3D,
+        image should have a shape [3,M,N], and should contain an RGB-image.
+
+
+    Returns:
+
+    featSet (:py:class:`numpy.ndarray`): a 1D numpy array of 18 float32
+        scalars, each representing one image-quality measure. This function
+        returns a subset of the image-quality features (for face anti-spoofing)
+        that have been described by Galbally et al. in their paper:
+        "Image Quality Assessment for Fake Biometric Detection: Application to
+        Iris, Fingerprint, and Face Recognition", IEEE Trans. on Image
+        Processing Vol 23(2), 2014.
+    """
+    gray_image = None
+    # print("shape of input image:")
+    # print(image.shape)
+    if len(image.shape) == 3:
+        if image.shape[0] == 3:
+            # compute gray-level image for input color-frame
+            gray_image = matlab_rgb2gray(image)
+        #             print(gray_image.shape)
+        else:
+            print("error. Wrong kind of input image")
+    else:
+        if len(image.shape) == 2:
+            gray_image = image
+        #             print(gray_image.shape)
+        else:
+            print("error -- wrong kind of input")
+
+    if gray_image is not None:
+
+        gwin = gauss_2d((3, 3), 0.5)  # set up the smoothing-filter
+        #         print("computing degraded version of image")
+        smoothed = ssg.convolve2d(gray_image, gwin, boundary="symm", mode="same")
+
+        """
+        Some of the image-quality measures computed here require a reference
+        image. For these measures, we use the input-image itself as a
+        reference-image, and we compute the quality-measure of a smoothed
+        version of the input-image. The assumption in this approach is that
+        smoothing degrades a spoof-image more than it does a genuine image (see
+        Galbally's paper referenced above).
+        """
+        #         print("computing galbally quality features")
+        featSet = image_quality_measures(gray_image, smoothed)
+
+        return featSet
+
+    else:
+        return None
+
+
+"""
+actually computes various measures of similarity between the two input images,
+but also returns some descriptors of the reference-image that are independent
+of any other image. Returns a tuple of 18 values, each of which is a float-
+scalar. The quality measures computed in this function correspond to the Image-
+quality features discussed in Galbally et al., 2014.
+"""
+
+
+def image_quality_measures(refImage, testImage):
+    """Compute image-quality measures for testImage and return a tuple of
+    quality-measures. Some of the quality-measures require a reference-
+    image, but others are 'no-reference' measures.
+    :input refImage: 2d numpy array. Should represent input 8-bit gray-level
+        image of size [M,N].
+    :input testImage: 2d numpy array. Should represent input 8-bit gray-
+        level image of size [M,N]..
+    :return a tuple of 18 values, each of which is a float-scalar. The
+         quality measures computed in this function correspond to the Image-
+         quality features discussed in Galbally et al., 2014.
+    """
+    assert len(refImage.shape) == 2, "refImage should be a 2D array"
+    assert len(testImage.shape) == 2, "testImage should be a 2D array"
+    assert (
+        refImage.shape[0] == testImage.shape[0]
+    ), "The two images should have the same width"
+    assert (
+        refImage.shape[1] == testImage.shape[1]
+    ), "The two images should have the same height"
+
+    diffImg = refImage.astype(np.float) - testImage.astype(np.float)
+    diffSq = np.square(diffImg)
+    sumDiffSq = np.sum(diffSq)
+    absDiffImg = np.absolute(diffImg)
+
+    refSq = np.square(refImage.astype(np.float))
+    sumRefSq = np.sum(refSq)
+
+    # number of pixels in each image
+    numPx = refImage.shape[0] * refImage.shape[1]
+    maxPxVal = 255.0
+
+    # 1 MSE
+    mse00 = float(sumDiffSq) / float(numPx)
+
+    # 2 PSNR
+    psnr01 = np.inf
+    if mse00 > 0:
+        psnr01 = 10.0 * np.log10(maxPxVal * maxPxVal / mse00)
+
+    # 3 AD: Average difference
+    ad02 = float(np.sum(diffImg)) / float(numPx)
+
+    # 4 SC: structural content
+    testSq = np.square(testImage.astype(np.float))
+    sumTestSq = np.sum(testSq)
+    sc03 = np.inf
+    if sumTestSq > 0:
+        sc03 = float(sumRefSq) / float(sumTestSq)
+
+    # 5 NK: normalized cross-correlation
+    imgProd = refImage * testImage  # element-wise product
+    nk04 = float(np.sum(imgProd)) / float(sumRefSq)
+
+    # 6 MD: Maximum difference
+    md05 = float(np.amax(absDiffImg))
+
+    # 7 LMSE: Laplacian MSE scipy implementation of laplacian is different from
+    # Matlab's version, especially at the image-borders To significant
+    # differences between scipy...laplace and Matlab's del2() are:
+    #    a. Matlab del2() divides the convolution result by 4, so the ratio
+    #       (scipy.laplace() result)/(del2()-result) is 4
+    #    b. Matlab does a different kind of processing at the boundaries, so
+    #       the results at the boundaries are different in the 2 calls.
+    # In Galbally's Matlab code, there is a factor of 4, which I have dropped
+    # (no difference in result),
+    # because this is implicit in scipy.ndimage.filters.laplace()
+    # mode can be 'wrap', 'reflect', 'nearest', 'mirror', or ['constant' with
+    # a specified value]
+    op = snf.laplace(refImage, mode="reflect")
+    opSq = np.square(op)
+    sum_opSq = np.sum(opSq)
+    tmp1 = op - (snf.laplace(testImage, mode="reflect"))
+    num_op = np.square(tmp1)
+    lmse06 = float(np.sum(num_op)) / float(sum_opSq)
+
+    # 8 NAE: normalized abs. error
+    sumRef = np.sum(np.absolute(refImage))
+    nae07 = float(np.sum(absDiffImg)) / float(sumRef)
+
+    # 9 SNRv: SNR in db
+    snrv08 = 10.0 * np.log10(float(sumRefSq) / float(sumDiffSq))
+
+    # 10 RAMDv: R-averaged max diff (r=10)
+    # implementation below is same as what Galbally does in Matlab
+    r = 10
+    # the [::-1] flips the sorted vector, so that it is in descending order
+    sorted = np.sort(diffImg.flatten())[::-1]
+    topsum = np.sum(sorted[0:r])
+    ramdv09 = np.sqrt(float(topsum) / float(r))
+
+    # 11,12: MAS: Mean Angle Similarity, MAMS: Mean Angle-Magnitude Similarity
+    mas10, mams11 = angle_similarity(refImage, testImage, diffImg)
+
+    fftRef = np.fft.fft2(refImage)
+    # fftTest = np.fft.fft2(testImage)
+
+    # 13, 14: SME: spectral magnitude error; SPE: spectral phase error
+    # spectralSimilarity(fftRef, fftTest, numPx)
+    sme12, spe13 = spectral_similarity(refImage, testImage)
+
+    # 15 TED: Total edge difference
+    # ted14 = edge_similarity(refImage, testImage)
+
+    # 16 TCD: Total corner difference
+    # tcd15 = corner_similarity(refImage, testImage)
+
+    # 17, 18: GME: gradient-magnitude error; GPE: gradient phase error
+    gme16, gpe17 = gradient_similarity(refImage, testImage)
+
+    # 19 SSIM
+    ssim18, _ = ssim(refImage, testImage)
+
+    # 20 VIF
+    vif19 = vif(refImage, testImage)
+
+    # 21,22,23,24,25: RRED, BIQI, JQI, NIQE: these parameters are not computed
+    # here.
+
+    # 26 HLFI: high-low frequency index (implemented as done by Galbally in
+    # Matlab).
+    hlfi25 = high_low_freq_index(fftRef, refImage.shape[1])
+
+    return np.asarray(
+        (
+            mse00,
+            psnr01,
+            ad02,
+            sc03,
+            nk04,
+            md05,
+            lmse06,
+            nae07,
+            snrv08,
+            ramdv09,
+            mas10,
+            mams11,
+            sme12,
+            gme16,
+            gpe17,
+            ssim18,
+            vif19,
+            hlfi25,
+        ),
+        dtype=np.float32,
+    )
+
+
+"""
+Matlab-like RGB to gray...
+"""
+
+
+def matlab_rgb2gray(rgbImage):
+    """converts color rgbImage to gray to produce exactly the same result as
+    Matlab would.
+    Inputs:
+    rgbimage: numpy array of shape [3, height, width]
+    Return:
+    numpy array of shape [height, width] containing a gray-image with floating-
+    point pixel values, in the range[(16.0/255) .. (235.0/255)]
+    """
+    # g1 = 0.299*rgbFrame[0,:,:] + 0.587*rgbFrame[1,:,:] +
+    # 0.114*rgbFrame[2,:,:] #standard coeffs CCIR601
+    # this is how it's done in matlab...
+    rgbImage = rgbImage / 255.0
+    C0 = 65.481 / 255.0
+    C1 = 128.553 / 255.0
+    C2 = 24.966 / 255.0
+    scaleMin = 16.0 / 255.0
+    # scaleMax = 235.0/255.0
+    gray = scaleMin + (
+        C0 * rgbImage[0, :, :] + C1 * rgbImage[1, :, :] + C2 * rgbImage[2, :, :]
+    )
+    return gray
+
+
+"""
+SSIM: Structural Similarity index between two gray-level images. The dynamic
+range is assumed to be 0..255.
+Ref:Z. Wang, A.C. Bovik, H.R. Sheikh and E.P. Simoncelli:
+    "Image Quality Assessment: From error measurement to Structural Similarity"
+    IEEE Trans. on Image Processing, 13(1), 01/2004
+    @param refImage: 2D numpy array (reference image)
+    @param testImage: 2D numpy array (test image)
+    Both input images should have the same dimensions. This is assumed, and not
+    verified in this function
+    @return ssim: float-scalar. The mean structural similarity between the 2
+        input images.
+    @return ssim_map: the SSIM index map of the test image (this map is smaller
+        than the test image).
+"""
+
+
+def ssim(refImage, testImage):
+    """Compute and return SSIM between two images.
+
+    Parameters:
+
+    refImage: 2D numpy array (reference image)
+    testImage: 2D numpy array (test image)
+
+    Returns:
+
+    Returns ssim and ssim_map
+    ssim: float-scalar. The mean structural similarity between the 2 input
+        images.
+    ssim_map: the SSIM index map of the test image (this map is smaller than
+        the test image).
+    """
+    M = refImage.shape[0]
+    N = refImage.shape[1]
+
+    winSz = 11  # window size for gaussian filter
+    winSgm = 1.5  # sigma for gaussian filter
+
+    # input image should be at least 11x11 in size.
+    if (M < winSz) or (N < winSz):
+        ssim_index = -np.inf
+        ssim_map = -np.inf
+
+        return ssim_index, ssim_map
+
+    # construct the gaussian filter
+    gwin = gauss_2d((winSz, winSz), winSgm)
+
+    # constants taken from the initial matlab implementation provided by
+    # Bovik's lab.
+    K1 = 0.01
+    K2 = 0.03
+    L = 255  # dynamic range.
+
+    C1 = (K1 * L) * (K1 * L)
+    C2 = (K2 * L) * (K2 * L)
+    # refImage=refImage.astype(np.float)
+    # testImage=testImage.astype(np.float)
+
+    # ssg is scipy.signal
+    mu1 = ssg.convolve2d(refImage, gwin, mode="valid")
+    mu2 = ssg.convolve2d(testImage, gwin, mode="valid")
+
+    mu1Sq = mu1 * mu1
+    mu2Sq = mu2 * mu2
+    mu1_mu2 = mu1 * mu2
+
+    sigma1_sq = ssg.convolve2d((refImage * refImage), gwin, mode="valid") - mu1Sq
+    sigma2_sq = ssg.convolve2d((testImage * testImage), gwin, mode="valid") - mu1Sq
+    sigma12 = ssg.convolve2d((refImage * testImage), gwin, mode="valid") - mu1_mu2
+
+    assert C1 > 0 and C2 > 0, "Conditions for computing ssim with this "
+    "code are not met. Set the Ks and L to values > 0."
+    num1 = (2.0 * mu1_mu2 + C1) * (2.0 * sigma12 + C2)
+    den1 = (mu1Sq + mu2Sq + C1) * (sigma1_sq + sigma2_sq + C2)
+    ssim_map = num1 / den1
+
+    ssim = np.average(ssim_map)
+
+    return ssim, ssim_map
+
+
+def vif(refImage, testImage):
+    """
+    VIF: Visual Information Fidelity measure.
+    Ref: H.R. Sheikh and A.C. Bovik: "Image Information and Visual Quality",
+    IEEE Trans. Image Processing. Adapted from Galbally's matlab code, which
+    was provided by Bovik et al's LIVE lab.
+        @param refImage: 2D numpy array (reference image)
+        @param testImage: 2D numpy array (test image)
+        Both input images should have the same dimensions. This is assumed, and
+        not verified in this function
+        @return vifp: float-scalar. Measure of visual information fidelity
+            between the 2 input images
+    """
+    sigma_nsq = 2.0
+    num = 0
+    den = 0
+
+    # sc is scale, taking values (1,2,3,4)
+    for sc in range(1, 5):
+        N = (2 ** (4 - sc + 1)) + 1
+        win = gauss_2d((N, N), (float(N) / 5.0))
+
+        if sc > 1:
+            refImage = ssg.convolve2d(refImage, win, mode="valid")
+            testImage = ssg.convolve2d(testImage, win, mode="valid")
+            # downsample by factor 2 in each direction
+            refImage = refImage[::2, ::2]
+            testImage = testImage[::2, ::2]
+
+        mu1 = ssg.convolve2d(refImage, win, mode="valid")
+        mu2 = ssg.convolve2d(testImage, win, mode="valid")
+        mu1Sq = mu1 * mu1
+        mu2Sq = mu2 * mu2
+        mu1_mu2 = mu1 * mu2
+
+        sigma1_sq = ssg.convolve2d((refImage * refImage), win, mode="valid") - mu1Sq
+        sigma2_sq = ssg.convolve2d((testImage * testImage), win, mode="valid") - mu2Sq
+        sigma12 = ssg.convolve2d((refImage * testImage), win, mode="valid") - mu1_mu2
+
+        sigma1_sq[sigma1_sq < 0] = 0  # set negative filter responses to 0.
+        sigma2_sq[sigma2_sq < 0] = 0
+
+        g = sigma12 / (sigma1_sq + 1e-10)
+        sv_sq = sigma2_sq - g * sigma12
+
+        g[(sigma1_sq < 1e-10)] = 0
+        sv_sq[sigma1_sq < 1e-10] = sigma2_sq[sigma1_sq < 1e-10]
+        sigma1_sq[sigma1_sq < 1e-10] = 0
+
+        g[(sigma2_sq < 1e-10)] = 0
+        sv_sq[sigma2_sq < 1e-10] = 0
+
+        sv_sq[g < 0] = sigma2_sq[g < 0]
+        g[g < 0] = 0
+        # sic. As implemented in the original matlab version...
+        sv_sq[sv_sq <= 1e-10] = 1e-10
+
+        m1 = g * g * sigma1_sq
+        m2 = sv_sq + sigma_nsq
+        m3 = np.log10(1 + m1 / m2)
+
+        m4 = np.log10(1 + (sigma1_sq / sigma_nsq))
+
+        num += np.sum(m3)
+        den += np.sum(m4)
+
+    vifp = num / den
+    return vifp
+
+
+def high_low_freq_index(imgFFT, ncols):
+    """
+    HLFI: relative difference between high- and low-frequency energy in image.
+    Ref: I. Avcibas, N. Memon, B. Sankur: "Steganalysis using image quality
+    metrics", IEEE Trans. Image Processing, 12, 2003.
+    @param imgFFT: 2D numpy array of complex numbers, representing Fourier
+        transform of test image.
+    @param ncols: int. Number of columns in image.
+    @return float-scalar.
+    """
+
+    N = ncols
+    colHalf = int(round(N / 2))  # (N/2) + (N % 2) #round it up
+    freqSel = 0.15
+
+    freqCol = round(freqSel * N)
+    lowFreqColHalf = int(round(freqCol / 2.0))
+
+    fftRes = imgFFT  # np.fft.fft2(image)
+    fftMag = np.abs(fftRes)
+    totalEnergy = np.sum(fftMag)
+    # print(totalEnergy)
+
+    lowIdx = colHalf - lowFreqColHalf
+    hiIdx = colHalf + lowFreqColHalf
+    LowFreqMag = fftMag[:, lowIdx:hiIdx]
+    lowFreqMagTotal = np.sum(LowFreqMag)
+
+    fftMag[:, lowIdx:hiIdx] = 0
+    highFreqMagTotal = np.sum(fftMag)
+
+    highLowFreqIQ = np.abs(lowFreqMagTotal - highFreqMagTotal) / float(totalEnergy)
+
+    return highLowFreqIQ
+
+
+def gradient_similarity(refImage, testImage):
+    """
+    Image similarity based on gradient. Computes the mean phase and magnitude
+    difference of gradient between input reference and test images.
+    Ref: I. Avcibas, N. Memon, B. Sankur: "Steganalysis using image quality
+    metrics", IEEE Trans. Image Processing, 12, 2003.
+        @param refImage: 2D numpy array (reference image)
+        @param testImage: 2D numpy array (test image)
+        Both input images should have the same dimensions. This is assumed, and
+        not verified in this function.
+        @return difGradMag: float-scalar. Mean difference in gradient-
+            magnitude.
+        @return difGradPhase: float-scalar. Mean difference in gradient-phase.
+    """
+
+    # we assume that testImage is of the same shape as refImage
+    numPx = refImage.shape[0] * refImage.shape[1]
+
+    # compute gradient (a la matlab) for reference image
+    # 5: spacing of 5 pixels between 2 sites of grad. evaluation.
+    refGrad = np.gradient(refImage, 5, 5)
+
+    refReal = refGrad[0]
+    refImag = refGrad[1]
+    refGradComplex = refReal + 1j * refImag
+
+    refMag = np.abs(refGradComplex)
+    refPhase = np.arctan2(refImag, refReal)
+
+    # compute gradient for test image
+    # 5: spacing of 5 pixels between 2 sites of grad. evaluation. It applies
+    # to both dims.
+    testGrad = np.gradient(testImage, 5)
+    testReal = testGrad[0]
+    testImag = testGrad[1]
+    testGradComplex = testReal + 1j * testImag
+
+    testMag = np.abs(testGradComplex)
+    testPhase = np.arctan2(testImag, testReal)
+
+    absPhaseDiff = np.abs(refPhase - testPhase)
+    difGradPhase = (np.sum(absPhaseDiff)) / float(numPx)
+
+    absMagDiff = np.abs(refMag - testMag)
+    difGradMag = float(np.sum(absMagDiff)) / float(numPx)
+
+    return difGradMag, difGradPhase
+
+
+def testRegionalMax():
+    A = 10 * np.ones([10, 10])
+    A[1:4, 1:4] = 22
+    A[5:8, 5:8] = 33
+    A[1, 6] = 44
+    A[2, 7] = 45
+    A[3, 8] = 44
+    rm = regionalmax(A)
+    print(A)
+    print(rm)
+
+
+def regionalmax(img):
+    """
+    find local maxima using 3x3 mask.
+    Used in corner_similarity()
+    Should produce results very similar to matlabs imregionalmax()
+    @param img: 2d numpy array. Image-like, containing a 'cornerness'-index for
+        every pixel.
+    @return regmax: 2d numpy array. Binary image showing corners (which are
+        regions of local maxima in input image).
+    """
+    h = img.shape[0]
+    w = img.shape[1]
+
+    # extend input image borders by repeating border-values
+    b = np.zeros((img.shape[0] + 2, img.shape[1] + 2))
+    b[1:-1, 1:-1] = img
+    b[0, :] = b[1, :]
+    b[:, 0] = b[:, 1]
+    b[-1, :] = b[-2, :]
+    b[:, -1] = b[:, -2]
+
+    # will contain the output bitmap showing local maxima.
+    regmax = np.zeros((h, w), dtype="uint8")
+
+    for i in range(1, h + 1):
+        for j in range(1, w + 1):
+            subim = b[i - 1 : i + 2, j - 1 : j + 2]
+            lmax = np.amax(subim)
+            lmin = np.amin(subim)
+            if b[i, j] == lmax and b[i, j] > lmin:
+                regmax[i - 1, j - 1] = 1
+
+    for i in range(1, h):
+        for j in range(w):
+            if regmax[i, j] == 1:
+                imin = i - 1
+                if imin < 0:
+                    imin = 0
+                imax = i + 2
+                if imax > h:
+                    imax = h
+                for k in range(imin, imax):
+                    jmin = j - 1
+                    if jmin < 0:
+                        jmin = 0
+                    jmax = j + 2
+                    if jmax > w:
+                        jmax = w
+                    for l in range(jmin, jmax):
+                        if img[k, l] == img[i, j]:
+                            regmax[k, l] = 1
+
+    return regmax
+
+
+def cornerMetric(image):
+    """
+    returns a 'cornerness' image, where each pixel-value specifies the 'degree
+    of cornerness' of the corresponding pixel in input image The 'cornerness'
+    image is of size (N-2, M-2) for an input image of size (N,M) (no cornerness
+    computed for the border pixel of input.)
+    @param image: 2d numpy array. Input image for which cornerness needs to be
+        computed.
+    @return cornerness: 2d numpy array giving a 'cornerness'-value for the
+        input image.
+    """
+    image = image.astype(np.float)
+
+    sensitivity_factor = 0.4
+    gwin = gauss_2d((5, 5), 1.5)
+
+    vfilt = np.array([-1, 0, 1], ndmin=2)
+    hfilt = vfilt.T
+    A = ssg.convolve2d(image, vfilt, boundary="symm", mode="same")
+    B = ssg.convolve2d(image, hfilt, boundary="symm", mode="same")
+    # crop out the valid portions of the filter-response (same size for both A
+    # and B)
+    A = A[1:-2, 1:-2]
+    B = B[1:-2, 1:-2]
+
+    # compute products of A, B, C
+    C = A * B
+    A = A * A
+    B = B * B
+
+    # filter A, B, and C
+    A = ssg.convolve2d(A, gwin, boundary="symm", mode="valid")
+    B = ssg.convolve2d(B, gwin, boundary="symm", mode="valid")
+    C = ssg.convolve2d(C, gwin, boundary="symm", mode="valid")
+
+    ABsum = A + B
+    cornerness = (A * B) - (C * C) - sensitivity_factor * (ABsum * ABsum)
+
+    return cornerness
+
+
+def corner_similarity(refImage, testImage):
+    """
+    compute the corner-based similarity between 2 images (how close are the
+    numbers of corners found in the two images?).
+    returns an index between 0 and 1. The smaller the better.
+    @param refImage: 2D numpy array (reference image)
+    @param testImage: 2D numpy array (test image)
+    @return float-scalar.
+    """
+
+    C = cornerMetric(refImage)
+    C_peaks = regionalmax(C)
+
+    # imshow(C_peaks)
+
+    CG = cornerMetric(testImage)
+    CG_peaks = regionalmax(CG)
+
+    nCornersRef = np.sum(C_peaks)
+    nCornersTest = np.sum(CG_peaks)
+    # print('CornerSim:: %f %f', %(nCornersRef, nCornersTest) )
+
+    maxCorners = max(nCornersRef, nCornersTest)
+
+    qCornersDiff = np.fabs(nCornersRef - nCornersTest) / float(maxCorners)
+
+    return qCornersDiff
+
+
+def edge_similarity(refImage, testImage):
+    """
+    Similarity between the edge-maps of the two input images.
+    Ref: I. Avcibas, N. Memon, B. Sankur: "Steganalysis using image quality
+    metrics", IEEE Trans. Image Processing, 12, 2003.
+    @param refImage: 2D numpy array (reference image)
+    @param testImage: 2D numpy array (test image)
+    @return float-scalar
+    """
+
+    # bob..sobel returns filter-responses which need to be thresholded to get
+    # the edge-map
+    thinning = 1
+    refImage = refImage.astype(np.float)
+
+    # compute edge map for reference image
+    # returns 3D image. 1st dim is the edge-direction. 1st component is
+    # vertical; 2nd component is hor. responses
+    refSobel_sep = sobel(refImage)
+    refSobelX = refSobel_sep[0, :, :]
+    refSobelY = refSobel_sep[1, :, :]
+    refEdge = edge_thinning(refSobelX[:, :], refSobelY[:, :], thinning)
+
+    # compute edge map for test image
+    testSobel_sep = sobel(testImage)
+    testSobelX = testSobel_sep[0, :, :]
+    testSobelY = testSobel_sep[1, :, :]
+    testEdge = edge_thinning(testSobelX[:, :], testSobelY[:, :], thinning)
+
+    numPx = refImage.shape[0] * refImage.shape[1]
+    numRefEdgePx = np.sum(refEdge)
+    numTestEdgePx = np.sum(testEdge)
+    qEdgeD = np.abs(numRefEdgePx - numTestEdgePx) / float(numPx)
+
+    return qEdgeD
+
+
+def edge_thinning(bx, by, thinning=1):
+    """
+    function to perform edge-thining in the same way as done in Matlab. Called
+    in edge_similarity()
+    Returns a binary edge-map (uint8 image).
+    @param  bx: vertical edge-filter responses (for example, response of 1 of
+        the two Sobel filters)
+    @param  by: horizontal edge-filter responses
+    @param  thinning: [0,1]. Default:1, implies 'do edge-thinning'. If set to
+        0, no edge-thinning is done. bx and by should be of the same shape
+    """
+    assert (len(bx.shape) == 2) and (
+        len(by.shape) == 2
+    ), "bx and by should be 2D arrays."
+    assert (bx.shape[0] == by.shape[0]) and (
+        bx.shape[1] == by.shape[1]
+    ), "bx and by should have the same shape."
+    m = bx.shape[0]
+    n = by.shape[1]
+    # will contain the resulting edge-map.
+    e = np.zeros([m, n], dtype=np.uint8)
+
+    # compute the edge-strength from the 2 directional filter-responses
+    b = np.sqrt(bx * bx + by * by)
+
+    # compute the threshold a la Matlab (as described in "Digital Image
+    # Processing" book by W.K. Pratt.
+    scale = 4
+    cutoff = scale * np.mean(b)
+
+    # np.spacing(1) is the same as eps in matlab.
+    myEps = np.spacing(1) * 100.0
+    # compute the edge-map a la Matlab
+
+    if not thinning:
+        e = b > cutoff
+    else:
+        b1 = np.ones_like(b, dtype=bool)
+        b2 = np.ones_like(b, dtype=bool)
+        b3 = np.ones_like(b, dtype=bool)
+        b4 = np.ones_like(b, dtype=bool)
+
+        c1 = b > cutoff
+
+        b1[:, 1:] = (np.roll(b, 1, axis=1) < b)[:, 1:]
+        b2[:, :-1] = (np.roll(b, -1, axis=1) < b)[:, :-1]
+        c2 = (bx >= (by - myEps)) & b1 & b2
+
+        b3[1:, :] = (np.roll(b, 1, axis=0) < b)[1:, :]
+        b4[:-1, 1:] = (np.roll(b, -1, axis=0) < b)[:-1, 1:]
+        c3 = (by >= (bx - myEps)) & b3 & b4
+
+        e = c1 & (c2 | c3)
+
+    return e
+
+
+def spectral_similarity(refImage, testImage):
+    """
+    @param refImage: 2D numpy array (reference image)
+    @param testImage: 2D numpy array (test image)
+    @return sme: float-scalar. Mean difference in magnitudes of spectra of the
+        two images.
+    @return spe: float-scalar. Mean difference in phases of spectra of the two
+        images.
+    """
+
+    # assume that ref and test images have the same shape
+    rows = refImage.shape[0]
+    cols = refImage.shape[1]
+    numPx = rows * cols
+    fftRef = np.fft.rfft2(refImage)
+    fftTest = np.fft.rfft2(testImage)
+
+    refMag = np.abs(fftRef)
+    testMag = np.abs(fftTest)
+    absMagDiff = np.abs(refMag - testMag)
+    # SME: spectral magnitude error
+    sme = np.sum(absMagDiff * absMagDiff) / float(numPx)
+
+    # SPE: spectral phase error
+    refPhase = np.angle(fftRef)
+    testPhase = np.angle(fftTest)
+    absPhaseDiff = np.abs(refPhase - testPhase)
+    spe = np.sum(absPhaseDiff * absPhaseDiff) / float(numPx)
+
+    return sme, spe
+
+
+def angle_similarity(refImage, testImage, diffImage):
+    """
+    Cosine-Similarity between the the rows of the two input images.
+    Ref: I. Avcibas, N. Memon, B. Sankur: "Steganalysis using image quality
+    metrics", IEEE Trans. Image Processing, 12, 2003.
+    @param refImage: 2D numpy array (reference image)
+    @param testImage: 2D numpy array (test image)
+    @param diffImage: 2D numpy array. Difference between refImage and
+        testImage. Not strictly necessary as input but precomputed here, to
+        save computation time.
+    @return mas: float-scalar. Mean angle-similarity.
+    @return mams: float-scalar. Mean angle-magnitude similarity.
+    """
+    mas = None
+    mams = None
+
+    numPx = refImage.shape[0] * refImage.shape[1]
+
+    refNorm = np.linalg.norm(refImage, axis=1)
+    testNorm = np.linalg.norm(testImage, axis=1)
+    diffNorm = np.linalg.norm(diffImage, axis=1)
+    magnitVec = diffNorm / 255.0  # Galbally divides by sqrt(255**2)
+    magnitVec = np.reshape(magnitVec, (refImage.shape[0], 1))
+
+    # np.einsum('ij,ij->i',a,b) is equivalent to np.diag(np.dot(a,b.T))
+    cosTheta = np.einsum("ij,ij->i", refImage, testImage) / (refNorm * testNorm)
+    cosTheta[cosTheta < -1.0] = -1.0
+    cosTheta[cosTheta > 1.0] = 1.0
+    cosTheta = np.nan_to_num(cosTheta)
+    thetaVec = np.arccos(cosTheta).reshape((refImage.shape[0], 1))
+
+    tmp2 = thetaVec * 2.0 / np.pi
+
+    # MAS: mean angle similarity
+    mas = 1.0 - (sum(tmp2) / float(numPx))
+
+    tmp3 = 1.0 - tmp2
+    tmp4 = 1.0 - magnitVec
+    chi = 1.0 - (tmp3 * tmp4)
+    # MAMS: mean angle-magnitude similarity
+    mams = sum(chi) / float(numPx)
+
+    return (float(mas), float(mams))
+
+
+def gauss_2d(shape=(3, 3), sigma=0.5):
+    """
+    Returns a 2D gaussian-filter matrix equivalent of matlab
+    fspecial('gaussian'...)
+
+    Works correctly.
+    @param shape: tuple defining the size of the desired filter in each
+        dimension. Elements of tuple should be 2 positive odd-integers.
+    @param sigma: float-scalar
+    @return h: 2D numpy array. Contains weights for 2D gaussian filter.
+    2D gaussian mask - should give the same result as MATLAB's
+    fspecial('gaussian',[shape],[sigma])
+    """
+    m, n = [(ss - 1.0) / 2.0 for ss in shape]
+    y, x = np.ogrid[-m : m + 1, -n : n + 1]
+    h = np.exp(-(x * x + y * y) / (2.0 * sigma * sigma))
+    h[h < np.finfo(h.dtype).eps * h.max()] = 0
+    sumh = h.sum()
+    if sumh != 0:
+        h /= sumh
+    return h
diff --git a/bob/pad/face/qualitymeasure/msu_iqa_features.py b/bob/pad/face/qualitymeasure/msu_iqa_features.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9e86be3d4642a4723a3653fbfe7bda6add68b21
--- /dev/null
+++ b/bob/pad/face/qualitymeasure/msu_iqa_features.py
@@ -0,0 +1,469 @@
+"""
+Created on 9 Feb 2016
+
+@author: sbhatta
+"""
+
+import numpy as np
+import scipy.signal as ssg
+
+from bob.pad.face.qualitymeasure import galbally_iqm_features as iqm
+from bob.pad.face.qualitymeasure import tan_specular_highlights as tsh
+from bob.bio.face.color import rgb_to_hsv, rgb_to_gray
+from .filters import sobel
+import skimage
+
+""" Utility functions """
+
+
+def matlab_rgb2hsv(rgbImage):
+    # first normalize the range of values to 0-1
+
+    # rgbImage = rgbImage.astype(np.float64) / 255.0
+
+    # hsv = np.zeros_like(rgbImage)
+    hsv = rgb_to_hsv(rgbImage)
+
+    h = hsv[0, :, :]
+    s = hsv[1, :, :]
+    v = hsv[2, :, :]
+    #
+    return (h, s, v)
+
+
+def imshow(image):
+    import matplotlib
+    from matplotlib import pyplot as plt
+
+    if len(image.shape) == 3:
+        # imshow() expects color image in a slightly different format, so first
+        # rearrange the 3d data for imshow...
+        outImg = image.tolist()
+        print(len(outImg))
+        result = np.dstack((outImg[0], outImg[1]))
+        outImg = np.dstack((result, outImg[2]))
+        # [:,:,1], cmap=mpl.cm.gray)
+        plt.imshow((outImg * 255.0).astype(np.uint8))
+
+    else:
+        if len(image.shape) == 2:
+            # display gray image.
+            plt.imshow(image.astype(np.uint8), cmap=matplotlib.cm.gray)
+
+    plt.show()
+
+
+"""Auxilliary functions"""
+
+
+def sobelEdgeMap(image, orientation="both"):
+
+    # bob..sobel returns filter-responses which need to be thresholded to get
+    # the edge-map
+    thinning = 1
+    refImage = image.astype(np.float)
+
+    # compute edge map for reference image
+    # returns 3D image. 1st dim is the edge-direction. 1st component is
+    # vertical; 2nd component is hor. responses
+    refSobel_sep = sobel(refImage)
+
+    refSobelX = refSobel_sep[0, :, :]
+    refSobelY = refSobel_sep[1, :, :]
+    if orientation is "horizontal":
+        refEdge = iqm.edge_thinning(refSobelX[:, :], refSobelX[:, :], thinning)
+    else:
+        if orientation is "vertical":
+            refEdge = iqm.edge_thinning(refSobelY[:, :], refSobelY[:, :], thinning)
+        else:
+            refEdge = iqm.edge_thinning(refSobelX[:, :], refSobelY[:, :], thinning)
+
+    return refEdge
+
+
+def compute_msu_iqa_features(rgbImage):
+    """Computes image-quality features for the given input color (RGB) image.
+    This is the main function to call.
+
+    Parameters:
+
+    rgbImage (:py:class:`numpy.ndarray`): A ``uint8`` array with 3 dimensions,
+        representing the RGB input image of shape [3,M,N] (M rows x N cols).
+
+    Returns:
+
+    featSet (:py:class:`numpy.ndarray`): a 1D numpy array of 121 float32
+        scalars. This function returns the image-quality features (for face
+        anti- spoofing) that have been described by Wen et al. in their paper:
+        "Face  spoof  detection  with  image distortion analysis", IEEE Trans.
+        on Information Forensics and Security, vol. 10(4), pp. 746-761, April
+        2015.
+    """
+    rgbImage = rgbImage.copy()
+    assert len(rgbImage.shape) == 3, (
+        "compute_msu_iqa_features():: image should be "
+        "a 3D array (containing a rgb image)"
+    )
+    # defined above. Calls Bob's rgb_to_hsv() after rescaling the input image.
+    h, s, v = matlab_rgb2hsv(rgbImage)
+
+    grayImage = np.zeros_like(h, dtype="uint8")
+    grayImage = rgb_to_gray(rgbImage)
+
+    # compute blur-features
+    blurFeat = blurriness(grayImage)
+
+    pinaBlur = marzilianoBlur(grayImage)
+    pinaBlur /= 30.0
+
+    # compute color-diversity features
+    colorHist, totNumColors = calColorHist(rgbImage)
+    totNumColors /= 2000.0  # as done in Matlab code provided by MSU.
+
+    # calculate mean, deviation and skewness of each channel
+    # use histogram shifting for the hue channel
+    momentFeatsH = calmoment_shift(h)
+
+    momentFeats = momentFeatsH.copy()
+    momentFeatsS = calmoment(s)
+    momentFeats = np.hstack((momentFeats, momentFeatsS))
+    momentFeatsV = calmoment(v)
+    momentFeats = np.hstack((momentFeats, momentFeatsV))
+
+    # compute the image-specularity features
+    speckleFeats = compute_iqa_specularity_features(rgbImage, startEps=0.06)
+
+    # stack the various feature-values in the same order as in MSU's matlab
+    # code.
+    fv = speckleFeats.copy()
+
+    fv = np.hstack((fv, momentFeats))
+    fv = np.hstack((fv, colorHist))
+    fv = np.hstack((fv, totNumColors))
+    fv = np.hstack((fv, blurFeat))
+    fv = np.hstack((fv, pinaBlur))
+
+    return fv
+
+
+def compute_iqa_specularity_features(rgbImage, startEps=0.05):
+    """Returns three features characterizing the specularity present in input
+    color image. First the specular and diffuse components of the input image
+    are separated using the
+    """
+
+    # separate the specular and diffuse components of input color image.
+    speckleFreeImg, diffuseImg, speckleImg = tsh.remove_highlights(
+        rgbImage.astype(float), startEps, verboseFlag=False
+    )
+    # speckleImg contains the specular-component
+
+    if len(speckleImg.shape) == 3:
+        speckleImg = speckleImg[0]
+    speckleImg = speckleImg.clip(min=0)
+
+    speckleMean = np.mean(speckleImg)
+    # factors 1.5 and 4.0 are proposed by Wen et al. in their paper and matlab
+    # code.
+    lowSpeckleThresh = speckleMean * 1.5
+    hiSpeckleThresh = speckleMean * 4.0
+    specklePixels = speckleImg[
+        np.where(
+            np.logical_and(speckleImg >= lowSpeckleThresh, speckleImg < hiSpeckleThresh)
+        )
+    ]
+
+    # percentage of specular pixels in image
+    r = float(specklePixels.flatten().shape[0]) / (
+        speckleImg.shape[0] * speckleImg.shape[1]
+    )
+    m = np.mean(specklePixels)  # mean-specularity (of specular-pixels)
+    s = np.std(specklePixels)  # std. of specularity (of specular-pixels)
+
+    # scaling by factor of 150 is as done by Wen et al. in their matlab code.
+    return np.asarray((r, m / 150.0, s / 150.0), dtype=np.float32)
+
+
+def marzilianoBlur(image):
+    """Method proposed by Marziliano et al. for determining the average width
+    of vertical edges, as a measure of blurredness in an image. (Reimplemented
+    from the Matlab code provided by MSU.)
+
+    :param image: 2D gray-level (face) image
+    :param regionMask: (optional) 2D matrix (binary image), where 1s mark the
+        pixels belonging to a region of interest, and 0s indicate pixels
+        outside ROI.
+    """
+
+    assert len(image.shape) == 2, (
+        "marzilianoBlur():: input image should be " "a 2D array (gray level image)"
+    )
+
+    # compute vertical edge-map of image using sobel
+    edgeMap = sobelEdgeMap(image, "vertical")
+
+    # There will be some difference between the result of this function and the
+    # Matlab version, because the edgeMap produced by sobelEdgeMap() is not
+    # exactly the same as that produced by Matlab's edge() function. Test edge-
+    # map generated in Matlab produces the same result as the matlab version of
+    # MarzilianoBlur().
+
+    blurImg = image
+    C = blurImg.shape[1]  # number of cols in image
+    # row, col contain the indices of the pixels that comprise edge-map.
+    (row, col) = edgeMap.nonzero()
+
+    blurMetric = 0
+    if len(row) > 0:
+
+        # to make the following code work in a similar fashion to the original
+        # matlab code, sort the cols in ascending order, and sort the rows
+        # according to the cols.
+        #     ind = np.lexsort((row,col))
+        #     row = row[ind]
+        #     col = col[ind]
+        # print('lexsort_col: %d' % (1+col))
+        # print('lexsort_row: %d' % (1+row))
+        # This was only used for debugging (to compare with Matlab code). In
+        # fact it is not necessary, so it is commented out.
+
+        edgeWidths = np.zeros_like(row, dtype=int)
+
+        for i in range(len(row)):
+            rEdge = row[i]
+            cEdge = col[i]
+            # instead of setting them to 'inf' as in MSU's Matlab version
+            cStart = 0
+            cEnd = 0
+
+            # we want to estimate the edge-width, which will be cEnd - cStart.
+
+            # search for start of edge in horizontal direction
+            if cEdge > 0:  # i.e., edge is not on the left-border
+                # 2.1: search left of current pixel (backwards)
+                if (
+                    blurImg[rEdge, cEdge] > blurImg[rEdge, cEdge - 1]
+                ):  # edge corresponds to a local peak; estimate left-
+                    # extent of peak
+                    j = cEdge - 1
+                    while j > 0 and blurImg[rEdge, j] >= blurImg[rEdge, j - 1]:
+                        j -= 1
+                    cStart = j
+                else:  # edge corresponds to a local valley; determine left-
+                    # extent of valley
+                    j = cEdge - 1
+                    while j > 0 and blurImg[rEdge, j] <= blurImg[rEdge, j - 1]:
+                        j -= 1
+                    cStart = j
+
+            # search for end of edge in horizontal direction
+            cEnd = C - 1  # initialize to right-border of image -- the max.
+            # possible position for cEnd
+            if cEdge < C - 1:
+                if blurImg[rEdge, cEdge] > blurImg[rEdge, cEdge + 1]:
+                    # edge corresponds to a local peak; estimate right-extent
+                    # of peak
+                    j = cEdge + 1
+                    while j < C - 1 and blurImg[rEdge, j] >= blurImg[rEdge, j + 1]:
+                        j += 1
+                    cEnd = j
+                else:
+                    # edge corresponds to a local valley; determine right-
+                    # extent of valley
+                    j = cEdge + 1
+                    while j < C - 1 and blurImg[rEdge, j] <= blurImg[rEdge, j + 1]:
+                        j += 1
+                    cEnd = j
+
+            edgeWidths[i] = cEnd - cStart
+
+            # sanity-check (edgeWidths should not have negative values)
+            negSum = np.sum(edgeWidths[np.where(edgeWidths < 0)])
+            assert negSum == 0, (
+                "marzilianoBlur():: edgeWidths[] contains "
+                "negative values. YOU CANNOT BE SERIOUS!"
+            )
+
+        # Final metric computation
+        blurMetric = np.mean(edgeWidths)
+
+    # compute histogram of edgeWidths ...(later)
+    # binnum = 100;
+    # t = ((1:binnum) - .5) .* C ./ binnum;
+    # whist = hist(width_array, t) ./ length(width_array);
+
+    return blurMetric
+
+
+def calmoment(channel, regionMask=None):
+    """returns the first 3 statistical moments (mean, standard-dev., skewness)
+    and 2 other first-order statistical measures of input image
+    :param channel: 2D array containing gray-image-like data
+    """
+
+    assert len(channel.shape) == 2, (
+        "calmoment():: channel should be " "a 2D array (a single color-channel)"
+    )
+
+    t = np.arange(0.05, 1.05, 0.05) + 0.025  # t = 0.05:0.05:1;
+
+    # pixnum = length(channel(:));
+    nPix = np.prod(channel.shape)
+    m = np.mean(channel)  # m = mean(channel(:));
+    # d = sqrt(sum((channel(:) - m) .^ 2) / pixnum);
+    d = np.std(channel)
+    # s = sum(((channel(:) - m) ./ d) .^ 3) / pixnum;
+    s = np.sum(np.power(((channel - m) / d), 3)) / nPix
+    # print(t)
+    myHH = np.histogram(channel, t)[0]
+    # print(myHH)
+    # hh = hist(channel(:),t) / pixnum;
+    hh = myHH.astype(float) / nPix
+
+    # H = np.array([m,d,s, np.sum(hh[0:1]), np.sum(hh[-2:-1])])  # H = [m d s
+    # sum(hh(1:2)) sum(hh(end-1:end))];
+    H = np.array([m, d, s])
+    s0 = np.sum(hh[0:2])
+    # print(s0)
+    H = np.hstack((H, s0))
+    s1 = np.sum(hh[-2:])
+    # print(s1)
+    H = np.hstack((H, s1))
+
+    return H
+
+
+def calmoment_shift(channel):
+    assert len(channel.shape) == 2, (
+        "calmoment_shift():: channel should be a " "2D array (a single color-channel)"
+    )
+
+    channel = channel + 0.5
+    channel[np.where(channel > 1.0)] -= 1.0
+
+    H = calmoment(channel)
+
+    return H
+
+
+def calColorHist(image, m=100):
+    """
+    function returns the top 'm' most popular colors in the input image
+        :param image: RGB color-image represented in a 3D array
+        :param m: integer specifying how many 'top' colors to be counted (e.g.
+            for m=10 the function will return the pixel-counts for the top 10
+            most popular colors in image)
+        :return cHist: counts of the top 100 most popular colors in image
+        :return numClrs: total number of distinct colors in image
+    """
+    # 1. compute color histogram of image (normalized, if specified)
+    numBins = 32
+    maxval = 255
+    cHist = rgbhist(image, maxval, numBins, 1)
+
+    # 2. determine top 100 colors of image from histogram
+    y = sorted(cHist, reverse=True)  # [Y, I] = sort(H,'descend');
+    cHist = y[0:m]  # H = Y(1:m)';
+
+    c = np.cumsum(y)  # C = cumsum(Y);
+    numClrs = np.where(c > 0.99)[0][0]  # clrnum = find(C>.99,1,'first') - 1;
+
+    cHist = np.array(cHist)
+    return cHist, numClrs
+
+
+"""
+computes 3d color histogram of image
+"""
+
+
+def rgbhist(image, maxval, nBins, normType=0):
+    assert len(image.shape) == 3, (
+        "image should be a 3D (rgb) array of shape"
+        " (3, m,n) where m is no. of rows, and n is no. if cols in image.c$"
+    )
+    assert normType > -1 and normType < 2, (
+        "rgbhist():: normType should " " be only 0 or 1"
+    )
+    # zeros([nBins nBins nBins]);
+    H = np.zeros((nBins, nBins, nBins), dtype=np.uint32)
+
+    decimator = (maxval + 1) / nBins
+    numPix = image.shape[1] * image.shape[2]
+    # im = reshape(I,[size(I,1)*size(I,2) 3]);
+    im = image.reshape(3, numPix).copy()
+    im = im.T
+
+    p = np.floor(im.astype(float) / decimator).astype(np.uint32)
+    # in future versions of numpy (1.13 and above) you can replace this with:
+    # unique_p, count = np.unique(p, return_counts=True, axis=0)
+    # the following lines were taken from: https://stackoverflow.com/a/16973510
+    p2 = np.ascontiguousarray(p).view(
+        np.dtype((np.void, p.dtype.itemsize * p.shape[1]))
+    )
+    unique_p, count = np.unique(p2, return_counts=True)
+    unique_p = unique_p.view(p.dtype).reshape(-1, p.shape[1])
+    # till here
+    H[unique_p[:, 0], unique_p[:, 1], unique_p[:, 2]] = count
+
+    H = H.ravel()  # H = H(:);
+    # Un-Normalized histogram
+
+    if normType == 1:
+        H = H.astype(np.float32) / np.sum(H)  # l1 normalization
+    return H
+
+
+def blurriness(image):
+    """
+    function to estimate blurriness of an image, as computed by Di Wen et al.
+    in their IEEE-TIFS-2015 paper.
+        :param image: a gray-level image
+    """
+
+    assert len(image.shape) == 2, (
+        "Input to blurriness() function should " "be a 2D (gray) image"
+    )
+
+    d = 4
+    fsize = 2 * d + 1
+    kver = np.ones((1, fsize)) / fsize
+    khor = kver.T
+
+    Bver = ssg.convolve2d(
+        image.astype(np.float32), kver.astype(np.float32), mode="same"
+    )
+    Bhor = ssg.convolve2d(
+        image.astype(np.float32), khor.astype(np.float32), mode="same"
+    )
+
+    # implementations of DFver and DFhor below don't look the same as in the
+    # Matlab code, but the following implementation produces equivalent
+    # results. there might be a bug in Matlab! The 2 commented statements above
+    # would correspond to the intent of the Matlab code.
+    DFver = np.diff(image.astype("int16"), axis=0)
+    DFver[np.where(DFver < 0)] = 0
+
+    DFhor = np.diff(image.astype("int16"), axis=1)
+    DFhor[np.where(DFhor < 0)] = 0
+
+    DBver = np.abs(np.diff(Bver, axis=0))
+    DBhor = np.abs(np.diff(Bhor, axis=1))
+
+    Vver = DFver.astype(float) - DBver.astype(float)
+    Vhor = DFhor.astype(float) - DBhor.astype(float)
+    Vver[Vver < 0] = 0  # Vver(find(Vver<0)) = 0;
+    Vhor[Vhor < 0] = 0  # Vhor(find(Vhor<0)) = 0;
+
+    SFver = np.sum(DFver)
+    SFhor = np.sum(DFhor)  # sum(DFhor(:));
+
+    SVver = np.sum(Vver)  # sum(Vver(:));
+    SVhor = np.sum(Vhor)  # sum(Vhor(:));
+
+    BFver = (SFver - SVver) / SFver
+    BFhor = (SFhor - SVhor) / SFhor
+
+    blurF = max(BFver, BFhor)  # max([BFver BFhor]);
+
+    return blurF
diff --git a/bob/pad/face/qualitymeasure/tan_specular_highlights.py b/bob/pad/face/qualitymeasure/tan_specular_highlights.py
new file mode 100644
index 0000000000000000000000000000000000000000..b817c235dbc93ceb6783dbd51e779d874161af51
--- /dev/null
+++ b/bob/pad/face/qualitymeasure/tan_specular_highlights.py
@@ -0,0 +1,799 @@
+"""
+Created on May 9, 2016
+
+@author: sbhatta
+
+#------------------------------#
+   reference:
+   "separating reflection components of textured surfaces using a single image"
+   by Robby T. Tan, Katsushi Ikeuchi,
+   IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI),
+   27(2), pp.179-193, February, 2005
+
+   This Python implementation is inspired by the C++ code provided by Prof.
+   Robby Tan:
+       http://tanrobby.github.io/code.html#
+       http://tanrobby.github.io/code/highlight.zip
+
+   The main difference between this implementation and the original C++
+   implementation is that here the init_labels() function also ignores pixels
+   marked G_DIFFUSE. This leads to a smaller number of iterations per epsilon
+   value, while producing the same result as the C++ code.
+#------------------------------#
+"""
+
+import numpy as np
+
+# np.seterr(divide='ignore', invalid='ignore')
+
+import bob.io.image
+import bob.io.base
+
+# special-pixel-markers. The values don't matter, but they should all
+# remain different from each other.
+G_SPECULARX = 10
+G_SPECULARY = 11
+G_DIFFUSE = 12
+G_BOUNDARY = 13
+G_NOISE = 14
+G_CAMERA_DARK = 15
+
+
+"""
+
+"""
+
+
+def remove_highlights(srcImage, startEps=0.5, verboseFlag=True):
+    """Returns the separate reflection components (highlights and diffuse
+    components) comprising the input color RGB image. This is the main function
+    that processes the input srcImage and returns the separate components.
+
+    Args:
+        srcImage: numpy array of shape (3, maxY, maxX), containing a RGB image.
+            (maxY, maxX) give the image-size (num. rows, num. cols).
+        startEps: floating point (small value), determines the number of
+                  iterations of algorithm. epsilon is initialized to this
+                  value. At each iteration epsilon is reduced by 0.01, and if
+                  it is not less than 0.0, another iteration is performed.
+                  Thus, a value of 0.5 leads to 50 iterations. Specify a
+                  smaller value if less iterations are desired.
+        verboseFlag: flag to indicate whether to print some intermediate values
+            or not. Specify 'False' if you want no message printed.
+
+    Outputs:
+        sfi: numpy array of shape (3, maxY, maxX), containing speckle-free
+            image
+        diffuse: numpy array of shape (3, maxY, maxX), containing the diffuse
+            component
+        speckleResidue: numpy array of shape (3, maxY, maxX), containing
+            specular component.
+    """
+    # initialize resulting diffuse-image
+    diffuse = np.copy(srcImage)  # this copy will be updated in zIteration()
+
+    srcPixelStatus, sfi, srcMaxChroma, srcMax, srcTotal = specular_free_image(diffuse)
+    # sfi is the specular-free image.
+
+    epsilon = startEps
+    step = 0.01
+
+    while epsilon >= 0.0:
+        # if verboseFlag:
+        #     print('*')
+        diffuse, srcPixelStatus = iteration(
+            diffuse, srcPixelStatus, sfi, epsilon, verboseFlag
+        )
+        epsilon -= step
+        # if verboseFlag:
+        #     print('ep: %f' % epsilon)
+
+    speckleResidue = srcImage - diffuse
+
+    # speckleResidue is 3D but for every pixel all channels have the same
+    # value.
+    return sfi, diffuse, speckleResidue
+
+
+def specular_free_image(src, srcPixelStatus=None):
+    """Generates specular-free version of input RGB color image src.
+
+    Args:
+        src: numpy array of shape (3, maxY, maxX) containing a RGB image.
+        srcPixelStatus: numpy array of shape (maxX, maxY) containing a marker
+            per pixel, indicating the type of pixel. This array is updated
+            inside this function, so if the input param. is None, it is first
+            allocated and initialized to 0-values.
+
+    Outputs:
+        srcPixelStatus: numpy array of shape (maxX, maxY) containing a marker
+            per pixel, indicating the type of pixel.
+        sfi: numpy array of shape (3, maxY, maxX) containing the specular-free
+            image corresponding to src.
+        srcMaxChroma: numpy array of shape (maxX, maxY) containing the max.
+            chroma for each pixel.
+        srcMax: numpy array of shape (maxX, maxY) containing the max. among the
+            3 color-intensities, for each pixel in src.
+        srcTotal: numpy array of shape (maxX, maxY) containing pixel-wise sum
+            of color-intensities in src.
+    """
+
+    # allocate output image
+    cLambda = 0.6  # arbitrary constant. See paper referenced above.
+    camDark = 10.0  # threshold to determine if a pixel is too dark
+    lambdaConst = 3.0 * cLambda - 1.0
+
+    if srcPixelStatus is None:
+        srcPixelStatus = np.zeros((src.shape[1], src.shape[2]))
+    else:
+        assert src.shape[1:-1] == srcPixelStatus.shape, (
+            "specular_free_image()"
+            ":: srcPixelStatus should be None, or should match the "
+            "pixel-layout of src."
+        )
+
+    srcPixelStatus[np.logical_and.reduce(src < camDark, 0)] = G_CAMERA_DARK
+
+    srcMaxChroma, srcMax, srcTotal = max_chroma(src)
+
+    numer = srcMax * (3.0 * srcMaxChroma - 1.0)
+    denom = srcMaxChroma * lambdaConst
+    old_settings = np.seterr(divide="ignore", invalid="ignore")
+    dI = np.divide(numer, denom)
+    np.seterr(**old_settings)
+    sI = (srcTotal - dI) / 3.0
+
+    # src: 3d array (rgb image); sI: 2D array matching pixel-layout of src
+    drgb = src.astype(float) - sI
+
+    drgb[np.where(np.isinf(drgb))] = 0
+    drgb[np.where(np.isnan(drgb))] = 0
+    drgb[np.where(drgb < 0)] = 0
+    drgb[np.where(drgb > 255)] = 255
+    sfi = drgb
+
+    return srcPixelStatus, sfi, srcMaxChroma, srcMax, srcTotal
+
+
+def iteration(src, srcPixelStatus, sfi, epsilon, verboseFlag=True):
+    """Iteratively converts each specular-pixel to diffuse.
+
+    Args:
+        src: numpy array of shape (3, maxY, maxX) containing a RGB image.
+        srcPixelStatus: numpy array of shape (maxX, maxY) containing a marker
+            per pixel, indicating the type of pixel.
+        sfi: numpy array of shape (3, maxY, maxX) containing the speckle-free
+            image corresponding to src
+        epsilon: floating-point (small) value.
+        verboseFlag: indicator to print something in every loop.
+
+    Outputs:
+        src: numpy array of shape (3, maxY, maxX) containing updated input
+            image
+        srcPixelStatus: numpy array of shape (maxX, maxY) containing updated
+            pixel-markers
+    """
+
+    thR = 0.1
+    thG = 0.1
+    pcount = 0
+    count, srcPixelStatus = init_labels(src, sfi, epsilon, srcPixelStatus)
+
+    while 1:
+        maxY, maxX = src.shape[-2:]
+        srcMaxChroma, srcMax, srcTotal = max_chroma(src)
+        srcChroma, srcTotal = rgb_chroma(src, srcTotal)
+
+        red_chroma_diff_x = np.diff(srcChroma[0, :, :], axis=1)
+        red_chroma_diff_y = np.diff(srcChroma[0, :, :], axis=0)
+
+        grn_chroma_diff_x = np.diff(srcChroma[1, :, :], axis=1)
+        grn_chroma_diff_y = np.diff(srcChroma[1, :, :], axis=0)
+
+        # a non-complete attempt to remove the for loop below
+        # # find non G_CAMERA_DARK pixels
+        # non_g_camera_dark = (srcPixelStatus != G_CAMERA_DARK)
+        # non_g_camera_dark[-1, :] = False
+        # non_g_camera_dark[:, -1] = False
+        # g_specularx = (non_g_camera_dark) & \
+        #     (srcPixelStatus == G_SPECULARX)
+        # g_speculary = (non_g_camera_dark) & \
+        #     (srcPixelStatus == G_SPECULARY)
+
+        # '''for specular y'''
+
+        # # if it is a boundary in x
+        # boundary_x = np.zeros_like(g_specularx)
+        # boundary_x[:-1, :-1] = g_specularx[:-1, :-1] & \
+        #     (np.fabs(red_chroma_diff_x[:-1, :]) > thR) & \
+        #     (np.fabs(grn_chroma_diff_x[:-1, :]) > thG)
+        # srcPixelStatus[boundary_x] = G_BOUNDARY
+
+        # # if it is noise in x
+        # srcMaxChromaDiffX = np.diff(srcMaxChroma, axis=1)
+        # srcMaxChromaAbsDiffX = np.fabs(srcMaxChromaDiffX)
+        # noise_mask_x = g_specularx & (~boundary_x)
+        # noise_mask_x[:-1, :-1] = noise_mask_x[:-1, :-1] & \
+        #     (srcMaxChromaAbsDiffX[:-1, :] < 0.01)
+        # srcPixelStatus[noise_mask_x] = G_NOISE
+
+        # # if it is a boundary in x
+        # boundary_y = np.zeros_like(g_speculary)
+        # boundary_y[:-1, :-1] = g_speculary[:-1, :-1] & \
+        #     (np.fabs(red_chroma_diff_y[:, :-1]) > thR) & \
+        #     (np.fabs(grn_chroma_diff_y[:, :-1]) > thG)
+        # srcPixelStatus[boundary_y] = G_BOUNDARY
+
+        # # if it is noise in x
+        # srcMaxChromaDiffY = np.diff(srcMaxChroma, axis=0)
+        # srcMaxChromaAbsDiffY = np.fabs(srcMaxChromaDiffY)
+        # noise_mask_y = g_speculary & (~boundary_y)
+        # noise_mask_y[:-1, :-1] = noise_mask_y[:-1, :-1] & \
+        #     (srcMaxChromaAbsDiffY[:, :-1] < 0.01)
+        # srcPixelStatus[noise_mask_y] = G_NOISE
+
+        # # if it is not noise or boundary
+        # boundary_x = np.zeros_like(g_specularx)
+        # boundary_x[:-1, :-1] = g_specularx[:-1, :-1] & \
+        #     (np.fabs(red_chroma_diff_x[:-1, :]) > thR) & \
+        #     (np.fabs(grn_chroma_diff_x[:-1, :]) > thG)
+        # srcMaxChromaDiffX = np.diff(srcMaxChroma, axis=1)
+        # srcMaxChromaAbsDiffX = np.fabs(srcMaxChromaDiffX)
+        # noise_mask_x = g_specularx & (~boundary_x)
+        # noise_mask_x[:-1, :-1] = noise_mask_x[:-1, :-1] & \
+        #     (srcMaxChromaAbsDiffX[:-1, :] < 0.01)
+        # non_noise_mask_x = g_specularx & (~boundary_x) & (~noise_mask_x)
+        # srcPixelStatus[non_noise_mask_x] = G_DIFFUSE
+        # non_noise_mask_next_x = np.roll(non_noise_mask_x, 1, axis=1)
+        # srcPixelStatus[non_noise_mask_next_x] = G_DIFFUSE
+
+        # boundary_y = np.zeros_like(g_speculary)
+        # boundary_y[:-1, :-1] = g_speculary[:-1, :-1] & \
+        #     (np.fabs(red_chroma_diff_y[:, :-1]) > thR) & \
+        #     (np.fabs(grn_chroma_diff_y[:, :-1]) > thG)
+        # srcMaxChromaDiffY = np.diff(srcMaxChroma, axis=0)
+        # srcMaxChromaAbsDiffY = np.fabs(srcMaxChromaDiffY)
+        # noise_mask_y = g_speculary & (~boundary_y)
+        # noise_mask_y[:-1, :-1] = noise_mask_y[:-1, :-1] & \
+        #     (srcMaxChromaAbsDiffY[:, :-1] < 0.01)
+        # non_noise_mask_y = g_speculary & (~boundary_y) & (~noise_mask_y)
+        # srcPixelStatus[non_noise_mask_y] = G_DIFFUSE
+        # non_noise_mask_next_y = np.roll(non_noise_mask_y, 1, axis=0)
+        # srcPixelStatus[non_noise_mask_next_y] = G_DIFFUSE
+
+        # # if it is not boundary or noise
+        # pos_chroma_maskx = (srcMaxChromaDiffX[:-1, :] > 0)
+        # reduce_mask_pos_x = non_noise_mask_x.copy()
+        # reduce_mask_pos_x[:-1, :-1] = reduce_mask_pos_x[:-1, :-1] & \
+        #     pos_chroma_maskx
+        # reduce_mask_pos_next_x = np.roll(reduce_mask_pos_x, 1, axis=1)
+        # iro = np.moveaxis(np.moveaxis(src, 0, -1)[reduce_mask_pos_x], -1, 0)
+        # refMaxChroma = srcMaxChroma[reduce_mask_pos_next_x]
+        # iroTotal = srcTotal[reduce_mask_pos_x]
+        # iroMax = srcMax[reduce_mask_pos_x]
+        # iroMaxChroma = srcMaxChroma[reduce_mask_pos_x]
+        # pStat, iroRes = specular_to_diffuse_vectorized(
+        #     iro, iroMax, iroMaxChroma, iroTotal, refMaxChroma)
+        # iro[:, pStat != G_NOISE] = iroRes[:, pStat != G_NOISE]
+
+        # pos_chroma_maskx = ~pos_chroma_maskx
+        # reduce_mask_pos_x = non_noise_mask_x.copy()
+        # reduce_mask_pos_x[:-1, :-1] = reduce_mask_pos_x[:-1, :-1] & \
+        #     pos_chroma_maskx
+        # reduce_mask_pos_next_x = np.roll(reduce_mask_pos_x, 1, axis=1)
+        # iro = np.moveaxis(np.moveaxis(src, 0, -1)[reduce_mask_pos_next_x],
+        #                   -1, 0)
+        # refMaxChroma = srcMaxChroma[reduce_mask_pos_x]
+        # iroTotal = srcTotal[reduce_mask_pos_next_x]
+        # iroMax = srcMax[reduce_mask_pos_next_x]
+        # iroMaxChroma = srcMaxChroma[reduce_mask_pos_next_x]
+        # pStat, iroRes = specular_to_diffuse_vectorized(
+        #     iro, iroMax, iroMaxChroma, iroTotal, refMaxChroma)
+        # iro[:, pStat != G_NOISE] = iroRes[:, pStat != G_NOISE]
+
+        # # if it is not boundary or noise
+        # pos_chroma_masky = (srcMaxChromaDiffY[:, :-1] > 0)
+        # reduce_mask_pos_y = non_noise_mask_y.copy()
+        # reduce_mask_pos_y[:-1, :-1] = reduce_mask_pos_y[:-1, :-1] & \
+        #     pos_chroma_masky
+        # reduce_mask_pos_next_y = np.roll(reduce_mask_pos_y, 1, axis=1)
+        # iro = np.moveaxis(np.moveaxis(src, 0, -1)[reduce_mask_pos_y], -1, 0)
+        # refMaxChroma = srcMaxChroma[reduce_mask_pos_next_y]
+        # iroTotal = srcTotal[reduce_mask_pos_y]
+        # iroMax = srcMax[reduce_mask_pos_y]
+        # iroMaxChroma = srcMaxChroma[reduce_mask_pos_y]
+        # pStat, iroRes = specular_to_diffuse_vectorized(
+        #     iro, iroMax, iroMaxChroma, iroTotal, refMaxChroma)
+        # iro[:, pStat != G_NOISE] = iroRes[:, pStat != G_NOISE]
+
+        # pos_chroma_masky = ~pos_chroma_masky
+        # reduce_mask_pos_y = non_noise_mask_y.copy()
+        # reduce_mask_pos_y[:-1, :-1] = reduce_mask_pos_y[:-1, :-1] & \
+        #     pos_chroma_masky
+        # reduce_mask_pos_next_y = np.roll(reduce_mask_pos_y, 1, axis=1)
+        # iro = np.moveaxis(np.moveaxis(src, 0, -1)[reduce_mask_pos_next_y],
+        #                   -1, 0)
+        # refMaxChroma = srcMaxChroma[reduce_mask_pos_y]
+        # iroTotal = srcTotal[reduce_mask_pos_next_y]
+        # iroMax = srcMax[reduce_mask_pos_next_y]
+        # iroMaxChroma = srcMaxChroma[reduce_mask_pos_next_y]
+        # pStat, iroRes = specular_to_diffuse_vectorized(
+        #     iro, iroMax, iroMaxChroma, iroTotal, refMaxChroma)
+        # iro[:, pStat != G_NOISE] = iroRes[:, pStat != G_NOISE]
+        for y in range(maxY - 1):
+            for x in range(maxX - 1):
+                current_pixel = srcPixelStatus[y, x]
+                if current_pixel == G_CAMERA_DARK:
+                    continue
+
+                drx = red_chroma_diff_x[y, x]
+                dgx = grn_chroma_diff_x[y, x]
+                dry = red_chroma_diff_y[y, x]
+                dgy = grn_chroma_diff_y[y, x]
+
+                if current_pixel == G_SPECULARX:
+                    # if it is  a boundary in the x direction
+                    if (abs(drx) > thR) and (abs(dgx) > thG):
+                        # pixel right
+                        srcPixelStatus[y, x] = G_BOUNDARY
+                        continue
+                    # if it is a noise
+                    if abs(srcMaxChroma[y, x] - srcMaxChroma[y, x + 1]) < 0.01:
+                        srcPixelStatus[y, x] = G_NOISE
+                        continue
+                    # reduce the specularity at x direction
+                    if srcMaxChroma[y, x] < srcMaxChroma[y, x + 1]:
+                        iro = src[:, y, x]
+                        # pStat = current_pixel
+                        refMaxChroma = srcMaxChroma[y, x + 1]
+                        iroTotal = srcTotal[y, x]
+                        iroMax = srcMax[y, x]
+                        iroMaxChroma = srcMaxChroma[y, x]
+                        pStat, iroRes = specular_to_diffuse(
+                            iro, iroMax, iroMaxChroma, iroTotal, refMaxChroma
+                        )
+
+                        # update pixelStatus only if
+                        # zSpecular2Diffuse() returned pStat as
+                        # G_NOISE.
+                        if pStat != G_NOISE:
+                            src[:, y, x] = iroRes
+                        # else:
+                        #     srcPixelStatus[y, x] = pStat
+
+                    else:
+                        iro = src[:, y, x + 1]
+                        # pStat = srcPixelStatus[y, x + 1]
+                        refMaxChroma = srcMaxChroma[y, x]
+                        iroTotal = srcTotal[y, x + 1]
+                        iroMax = srcMax[y, x + 1]
+                        iroMaxChroma = srcMaxChroma[y, x + 1]
+                        pStat, iroRes = specular_to_diffuse(
+                            iro, iroMax, iroMaxChroma, iroTotal, refMaxChroma
+                        )
+
+                        # update pixelStatus only if
+                        # zSpecular2Diffuse() returned pStat as
+                        # G_NOISE.
+                        if pStat != G_NOISE:
+                            src[:, y, x + 1] = iroRes
+                        # else:
+                        #     srcPixelStatus[y, x + 1] = pStat
+
+                    srcPixelStatus[y, x] = G_DIFFUSE
+                    srcPixelStatus[y, x + 1] = G_DIFFUSE
+
+                if current_pixel == G_SPECULARY:
+                    # if it is a boundary in the y direction
+                    if (abs(dry) > thR) and (abs(dgy) > thG):
+                        # pixel right
+                        srcPixelStatus[y, x] = G_BOUNDARY
+                        continue
+                    # if it is a noise
+                    if abs(srcMaxChroma[y, x] - srcMaxChroma[y + 1, x]) < 0.01:
+                        srcPixelStatus[y, x] = G_NOISE
+                        continue
+                    # reduce the specularity in y direction
+                    if srcMaxChroma[y, x] < srcMaxChroma[y + 1, x]:
+                        iro = src[:, y, x]
+                        pStat = current_pixel
+                        refMaxChroma = srcMaxChroma[y + 1, x]
+                        iroTotal = srcTotal[y, x]
+                        iroMax = srcMax[y, x]
+                        iroMaxChroma = srcMaxChroma[y, x]
+                        pStat, iroRes = specular_to_diffuse(
+                            iro, iroMax, iroMaxChroma, iroTotal, refMaxChroma
+                        )
+                        # C call:
+                        # zSpecular2Diffuse(src(y,x),src(y+1,x).zMaxChroma())
+
+                        # update pixelStatus only if
+                        # zSpecular2Diffuse() returned pStat as
+                        # G_NOISE.
+                        if pStat == G_NOISE:
+                            srcPixelStatus[y, x] = pStat
+                        else:
+                            src[:, y, x] = iroRes
+
+                    else:
+                        iro = src[:, y + 1, x]
+                        pStat = srcPixelStatus[y + 1, x]
+                        pMaxChroma = srcMaxChroma[y + 1, x]
+                        iroTotal = srcTotal[y + 1, x]
+                        iroMax = srcMax[y + 1, x]
+                        iroMaxChroma = srcMaxChroma[y + 1, x]
+                        pStat, iroRes = specular_to_diffuse(
+                            iro, iroMax, iroMaxChroma, iroTotal, pMaxChroma
+                        )
+
+                        # update pixelStatus only if
+                        # zSpecular2Diffuse() returned pStat as
+                        # G_NOISE.
+                        if pStat == G_NOISE:
+                            srcPixelStatus[y + 1, x] = pStat
+                        else:
+                            src[:, y + 1, x] = iroRes
+
+                    srcPixelStatus[y, x] = G_DIFFUSE
+                    srcPixelStatus[y + 1, x] = G_DIFFUSE
+
+        pcount = count
+        count, srcPixelStatus = init_labels(src, sfi, epsilon, srcPixelStatus)
+
+        # Robby Tan's original C++ code checks if count<0, but that doesn't
+        # make sense as count cannot be negative.
+        if count == 0 or pcount <= count:
+            break  # break out of the while-loop
+
+    srcPixelStatus = reset_labels(srcPixelStatus)
+
+    return src, srcPixelStatus
+
+
+def init_labels(src, sfi, epsilon, srcPixelStatus):
+    """Generates initial labels for all pixels in src (input RGB image).
+
+    Args:
+        src: numpy array of shape (3, maxY, maxX) containing a RGB image.
+        sfi: numpy array of shape (3, maxY, maxX) containing a speckle-free
+            image corresponding to src.
+        epsilon: positive floating point (small) value
+        srcPixelStatus: numpy array of shape (maxX, maxY) containing pixel-
+            markers corresponding to src.
+
+    Returns:
+        count: number of pixels marked as specular in src.
+        srcPixelStatus: numpy array of shape (maxX, maxY) containing updated
+            pixel-markers corresponding to src.
+    """
+
+    old_settings = np.seterr(divide="ignore", invalid="ignore")
+    # to have initial labels
+    zTotalSrc = np.sum(src, axis=0)
+    diff_x_src = np.diff(zTotalSrc, axis=1)
+    diff_y_src = np.diff(zTotalSrc, axis=0)
+
+    zTotalSfi = np.sum(sfi, axis=0)
+    diff_x_sfi = np.diff(zTotalSfi, axis=1)
+    diff_y_sfi = np.diff(zTotalSfi, axis=0)
+
+    dlog_src_x = np.log(np.fabs(diff_x_src))
+    dlog_src_y = np.log(np.fabs(diff_y_src))
+
+    dlog_sfi_x = np.log(np.fabs(diff_x_sfi))
+    dlog_sfi_y = np.log(np.fabs(diff_y_sfi))
+
+    dlogx = np.fabs(dlog_src_x - dlog_sfi_x)
+    dlogy = np.fabs(dlog_src_y - dlog_sfi_y)
+
+    # maxY = src.shape[1]
+    # maxX = src.shape[2]
+    valid_values = (G_BOUNDARY, G_NOISE, G_CAMERA_DARK, G_DIFFUSE)
+    notvalid_mask = np.in1d(
+        srcPixelStatus[1:-1, 1:-1], valid_values, invert=True
+    ).reshape(srcPixelStatus.shape[0] - 2, -1)
+    dlogx_mask = (notvalid_mask) & (dlogx[1:-1, 1:] > epsilon)
+    dlogy_mask = (notvalid_mask) & (dlogy[1:, 1:-1] > epsilon) & (~(dlogx_mask))
+    diffuse_mask = (notvalid_mask) & (~dlogx_mask) & (~dlogy_mask)
+    count = notvalid_mask.sum()
+    srcPixelStatus[1:-1, 1:-1][dlogx_mask] = G_SPECULARX
+    srcPixelStatus[1:-1, 1:-1][dlogy_mask] = G_SPECULARY
+    srcPixelStatus[1:-1, 1:-1][diffuse_mask] = G_DIFFUSE
+
+    np.seterr(**old_settings)
+    return count, srcPixelStatus  # count is the number of specular pixels
+
+
+def specular_to_diffuse(iro, iroMax, iroMaxChroma, iroTotal, refMaxChroma):
+    """Converts a color-pixel from speckled to diffuse, by subtracting a
+    certain amount from its intensity value in each color channel.
+
+    Args:
+        iro: 3-element column vector containing the rgb-color values of the
+            pixel to be processed.
+        iroMax: max value among the elements of iro
+        iroMaxChroma: max-chroma for this pixel
+        iroTotal: sum of the 3 elements of iro.
+        refMaxChroma: chroma-value of a neighboring pixel for comparison
+
+    Returns:
+        pixelStatus: a value (marker) indicating whether the pixel is
+            considered as noise or diffuse, after processing.
+        iro: updated pixel-color-values.
+    """
+    c = iroMaxChroma
+    pixelStatus = 0  # arbitrary initialization
+    numr = iroMax * (3.0 * c - 1.0)
+    denm = c * (3.0 * refMaxChroma - 1.0)
+    if abs(denm) > 0.000000001:  # to avoid div. by zero.
+        dI = numr / denm
+        sI = (iroTotal - dI) / 3.0
+        nrgb = iro - sI
+
+        if np.any(nrgb < 0):
+            pixelStatus = G_NOISE
+        else:
+            iro = nrgb
+    else:
+        pixelStatus = G_NOISE
+
+    return pixelStatus, iro
+
+
+# def specular_to_diffuse_vectorized(iro, iroMax, iroMaxChroma, iroTotal,
+#                                    refMaxChroma):
+#     """Converts a color-pixel from speckled to diffuse, by subtracting a
+#     certain amount from its intensity value in each color channel.
+#
+#     Args:
+#         iro: 3-element column vector containing the rgb-color values of the
+#             pixel to be processed.
+#         iroMax: max value among the elements of iro
+#         iroMaxChroma: max-chroma for this pixel
+#         iroTotal: sum of the 3 elements of iro.
+#         refMaxChroma: chroma-value of a neighboring pixel for comparison
+#
+#     Returns:
+#         pixelStatus: a value (marker) indicating whether the pixel is
+#             considered as noise or diffuse, after processing.
+#         iro: updated pixel-color-values.
+#     """
+#     c = iroMaxChroma
+#     # arbitrary initialization
+#     pixelStatus = np.empty((*iro.shape[1:]), dtype=iro.dtype)
+#     numr = (iroMax * (3.0 * c - 1.0))
+#     denm = (c * (3.0 * refMaxChroma - 1.0))
+#
+#     non_zero = np.fabs(denm) > 0.000000001
+#     dI = numr[non_zero] / denm[non_zero]
+#     sI = (iroTotal[non_zero] - dI) / 3.0
+#     nrgb = iro[:, non_zero] - sI
+#     negnrgb = np.logical_or.reduce(nrgb < 0, axis=0)
+#
+#     pixelStatus[non_zero][negnrgb] = G_NOISE
+#     iro[:, non_zero][:, ~negnrgb] = nrgb[:, ~negnrgb]
+#     pixelStatus[~non_zero] = G_NOISE
+#
+#     return pixelStatus, iro
+
+
+def reset_labels(pixelLabels):
+    """Resets all pixel-markers (except those indicating "dark" pixels, to 0.
+    Called in zIteration().
+
+    Args:
+        pixelLabels: numpy array of shape (maxX, maxY) containing pixel-
+            markers.
+
+    Returns:
+        pixelLabels: numpy array of shape (maxX, maxY) containing updated
+            pixel-markers..
+    """
+    # to reset the label of the pixels
+    pixelLabels[np.where(pixelLabels != G_CAMERA_DARK)] = 0
+
+    return pixelLabels
+
+
+#####
+# functions from globals
+#####
+
+
+def max_color(rgbImg):
+    """For input RGB image, this function computes max intensity among all
+    color-channels, for each pixel position.
+
+    Args:
+        rgbImg: numpy array of shape (3, maxY, maxX) containing a RGB image.
+
+    Returns:
+        rgbMax: (2D numpy array of shape (maxY, maxY)) contains pixel-wise max.
+            among the three color values.
+    """
+
+    return np.amax(rgbImg, axis=0)
+
+
+def total_color(rgbImg):
+    """For input RGB image, this function computes sum of intensities in each
+    color-channel for each pixel position.
+
+    Args:
+        rgbImg: numpy array of shape (3, maxY, maxX) containing a RGB image.
+
+    Returns:
+        rgbTotal: (2D numpy array of shape (maxY, maxY)) contains pixel-wise
+            sum of the 3 color-values.
+    """
+
+    return np.sum(rgbImg.astype(float), axis=0)
+
+
+def max_chroma(rgbImg, rgbMax=None, rgbTotal=None):
+    """Given an input RGB image (rgbImg, with shape (3, maxY, maxX)), this
+    function computes the maximum chroma-value for each pixel position.
+
+    Args:
+        rgbImg: numpy array of shape (3, maxY, maxX) containing a RGB image.
+        rgbMax: numpy array of shape (maxY, maxX) containing pixel-wise max.
+            color intensity. If None, it is computed.
+        rgbTotal: numpy array of shape (maxY, maxX) containing pixel-wise sum
+            of color-intensities. If None, it is computed.
+
+    Returns:
+        rgbChroma: (3D numpy array of same shape as rgbImg) contains the
+            chroma-values of the individual channels in rgbChroma, and
+        rgbMax: (2D numpy array of shape (maxY, maxY)) contains pixel-wise max.
+            among the three color values.
+        rgbTotal: (2D numpy array of shape (maxY, maxY)) contains pixel-wise
+            sum of the 3 color-values.
+    """
+
+    if rgbMax is None:
+        rgbMax = max_color(rgbImg)
+    if rgbTotal is None:
+        rgbTotal = total_color(rgbImg)
+
+    old_settings = np.seterr(divide="ignore", invalid="ignore")
+    maxChroma = np.divide(rgbMax.astype(float), rgbTotal.astype(float))
+    np.seterr(**old_settings)
+    maxChroma[np.where(rgbTotal == 0)] = 0
+
+    # max_chroma(rgbImg, rgbMax=None, rgbTotal=None)
+    return maxChroma, rgbMax, rgbTotal
+
+
+def rgb_chroma(rgbImg, rgbTotal=None):
+    """Given an input RGB color image, compute the chroma-values in the 3
+    channels.
+
+    Args:
+        rgbImg: numpy array of shape (3, maxY, maxX) containing a RGB image.
+        rgbTotal: numpy array of shape (maxY, maxX) containing pixel-wise sum
+            of color-intensities. If None, it is computed.
+
+    Returns:
+        rgbChroma: (3D numpy array of same shape as rgbImg) contains the
+            chroma-values of the individual channels in rgbChroma, and
+        rgbTotal: (2D numpy array of shape (maxY, maxY)) contains pixel-wise
+            sum of the 3 color-values.
+    """
+
+    if rgbTotal is None:
+        rgbTotal = total_color(rgbImg)
+
+    old_settings = np.seterr(divide="ignore", invalid="ignore")
+    rgbChroma = np.divide(rgbImg.astype(float), rgbTotal.astype(float))
+    np.seterr(**old_settings)
+    rgbChroma[:, rgbTotal == 0] = 0
+
+    return rgbChroma, rgbTotal
+
+
+"""
+utility function to display image on screen (for debugging).
+"""
+
+
+def imshow(image):
+    """please use bob.io.image.imshow instead"""
+    import matplotlib
+    from matplotlib import pyplot as plt
+
+    if len(image.shape) == 3:
+        # imshow() expects color image in a slightly different format, so first
+        # rearrange the 3d data for imshow...
+        outImg = image.tolist()
+        result = np.dstack((outImg[0], outImg[1]))
+        outImg = np.dstack((result, outImg[2]))
+        # [:,:,1], cmap=mpl.cm.gray)
+        plt.imshow((outImg * 255.0).astype(np.uint8))
+
+    else:
+        if len(image.shape) == 2:
+            # display gray image.
+            plt.imshow(image.astype(np.uint8), cmap=matplotlib.cm.gray)
+        else:
+            print("inshow():: image should be either 2d or 3d.")
+
+    plt.show()
+
+
+def computeIQSpecularityFeatures(rgbImage, startEps=0.05):
+
+    speckleFreeImg, diffuseImg, speckleImg = remove_highlights(
+        rgbImage.astype(float), startEps=0.05
+    )
+
+    if len(speckleImg.shape) == 3:
+        speckleImg = speckleImg[0]
+
+    speckleImg = speckleImg.clip(min=0)
+
+    speckleMean = np.mean(speckleImg)
+    lowSpeckleThresh = speckleMean * 1.5
+    hiSpeckleThresh = speckleMean * 4.0
+    specklePixels = speckleImg[
+        np.where(
+            np.logical_and(speckleImg > lowSpeckleThresh, speckleImg < hiSpeckleThresh)
+        )
+    ]
+
+    r = float(specklePixels.shape[0]) / (speckleImg.shape[0] * speckleImg.shape[1])
+    m = np.mean(specklePixels)
+    s = np.std(specklePixels)
+
+    return (r, m / 150.0, s / 150.0)
+
+
+"""
+utility function to test the implementation. Relies on bob to load and save
+images.
+"""
+
+
+def test_highlight_detection():
+    inputImageFile = "/idiap/home/sbhatta/work/Antispoofing/ImageQualityMeasures/specular_highlights/highlight_C_Code/images/head.ppm"
+    outRoot = "/idiap/home/sbhatta/work/Antispoofing/ImageQualityMeasures/specular_highlights/"
+    # inputImageFile =
+    # 'C:/IDIAP/AntiSpoofing/ImageQualityMeasures/highlight/images/head.ppm'
+    # outRoot = 'C:/IDIAP/AntiSpoofing/ImageQualityMeasures/'
+
+    # load color image
+    inpImage = bob.io.base.load(inputImageFile)
+    # inpImage = misc.imread(inputImageFile)
+    # inpImage =  np.rollaxis(inpImage,2)
+
+    speckleFreeImg, diffuseImg, speckleImg = remove_highlights(
+        inpImage.astype(float), startEps=0.05
+    )
+
+    print("saving output images")
+    bob.io.base.save(
+        speckleFreeImg.astype("uint8"), outRoot + "speckleFreeHeadImage.ppm"
+    )
+    # speckleFreeImg = np.rollaxis(np.rollaxis(speckleFreeImg, 0,-1),2,1)
+    # misc.imsave(outRoot+'speckleFreeHeadImage.ppm', speckleFreeImg.astype('uint8'))
+
+    bob.io.base.save(diffuseImg.astype("uint8"), outRoot + "diffuseImgHeadImage.ppm")
+    #     diffuseImg = np.rollaxis(np.rollaxis(diffuseImg, 0,-1),2,1)
+    #     misc.imsave(outRoot+'diffuseImgHeadImage.ppm', diffuseImg.astype('uint8'))
+
+    bob.io.base.save(speckleImg.astype("uint8"), outRoot + "speckleImgHeadImage.ppm")
+    #     speckleImg = np.rollaxis(np.rollaxis(speckleImg, 0,-1),2,1)
+    #     misc.imsave(outRoot+'speckleImgHeadImage.ppm', speckleImg.astype('uint8'))
+    #
+
+    #     r,m,s=computeIQSpecularityFeatures(inpImage, startEps=0.05)
+    #     print(r, m, s)
+
+    #
+    #    imshow(inpImage)
+    #    imshow(speckleFreeImg)
+    #    imshow(diffuseImg)
+    imshow(speckleImg)
+
+
+if __name__ == "__main__":
+    test_highlight_detection()
diff --git a/bob/pad/face/test/data/real_client001_android_SD_scene01.face b/bob/pad/face/test/data/real_client001_android_SD_scene01.face
new file mode 100644
index 0000000000000000000000000000000000000000..bce523310f9fc6f467173d0f3880e0aa11f14455
--- /dev/null
+++ b/bob/pad/face/test/data/real_client001_android_SD_scene01.face
@@ -0,0 +1,301 @@
+0, 283, 107, 500, 375, 330.62, 235.40, 437.58, 232.54
+1, 292, 109, 520, 367, 334.61, 233.83, 436.75, 232.13
+2, 283, 104, 512, 376, 329.54, 235.68, 436.84, 232.43
+3, 284, 109, 499, 374, 330.47, 235.79, 436.46, 232.24
+4, 281, 108, 497, 373, 330.09, 235.71, 436.66, 232.40
+5, 283, 100, 510, 371, 329.64, 235.59, 436.94, 231.64
+6, 287, 102, 519, 378, 330.88, 234.92, 437.30, 232.08
+7, 279, 105, 500, 376, 330.70, 235.84, 437.60, 232.44
+8, 285, 107, 508, 374, 329.97, 235.24, 437.23, 232.31
+9, 282, 105, 501, 375, 329.31, 235.91, 436.97, 232.52
+10, 290, 108, 512, 373, 330.55, 234.90, 436.83, 232.57
+11, 281, 102, 504, 379, 330.83, 235.72, 438.06, 232.53
+12, 278, 106, 498, 376, 331.35, 235.27, 437.81, 231.96
+13, 278, 104, 501, 379, 330.75, 235.30, 438.48, 231.51
+14, 284, 106, 504, 376, 330.70, 234.71, 437.98, 232.21
+15, 283, 105, 502, 375, 330.68, 235.36, 437.02, 232.38
+16, 288, 102, 517, 378, 330.70, 235.23, 437.32, 232.34
+17, 281, 105, 499, 375, 330.97, 235.02, 438.15, 231.67
+18, 283, 104, 505, 380, 331.14, 235.15, 437.72, 231.99
+19, 283, 105, 505, 376, 332.25, 236.46, 437.40, 232.29
+20, 283, 105, 500, 373, 332.27, 236.47, 437.33, 232.70
+21, 292, 104, 515, 373, 332.19, 235.57, 437.55, 232.82
+22, 281, 105, 499, 375, 331.53, 235.84, 438.90, 232.99
+23, 286, 109, 502, 374, 331.09, 236.09, 438.55, 232.85
+24, 281, 102, 504, 376, 330.62, 235.67, 438.13, 231.49
+25, 278, 104, 501, 377, 330.74, 235.11, 437.57, 231.56
+26, 283, 106, 501, 376, 330.56, 234.71, 438.14, 232.10
+27, 286, 102, 508, 377, 330.56, 234.68, 437.65, 231.32
+28, 286, 104, 508, 378, 331.22, 234.85, 438.08, 231.08
+29, 281, 102, 507, 379, 331.45, 234.83, 438.94, 230.85
+30, 281, 98, 510, 380, 331.54, 234.70, 439.48, 230.66
+31, 285, 103, 503, 374, 331.22, 234.81, 441.71, 232.00
+32, 283, 104, 503, 374, 331.39, 234.67, 439.82, 230.47
+33, 282, 105, 505, 378, 332.56, 235.14, 440.82, 231.67
+34, 282, 103, 507, 380, 332.86, 234.75, 440.81, 231.19
+35, 286, 103, 508, 377, 332.49, 234.76, 439.94, 231.12
+36, 283, 101, 511, 380, 332.06, 234.45, 439.63, 230.75
+37, 286, 103, 508, 376, 332.15, 234.62, 439.91, 231.76
+38, 289, 105, 505, 373, 332.26, 234.77, 439.79, 231.98
+39, 284, 100, 512, 381, 332.67, 234.76, 439.93, 230.63
+40, 290, 102, 518, 378, 332.92, 234.20, 440.21, 230.33
+41, 284, 101, 510, 379, 332.33, 234.40, 440.49, 230.80
+42, 286, 104, 507, 374, 331.13, 234.26, 438.62, 230.15
+43, 280, 105, 501, 376, 330.96, 234.01, 438.67, 230.06
+44, 282, 102, 504, 376, 331.59, 234.18, 438.91, 230.06
+45, 281, 102, 507, 378, 331.89, 234.53, 439.76, 230.65
+46, 282, 102, 504, 375, 331.75, 234.60, 438.71, 230.37
+47, 277, 102, 499, 374, 330.81, 233.35, 438.17, 229.72
+48, 281, 103, 504, 377, 331.47, 233.32, 438.86, 229.33
+49, 284, 102, 506, 376, 331.35, 233.47, 439.52, 229.56
+50, 282, 100, 505, 376, 331.34, 232.89, 439.23, 228.81
+51, 280, 100, 506, 377, 331.80, 233.33, 439.18, 228.95
+52, 284, 101, 506, 375, 331.31, 233.35, 439.00, 229.05
+53, 280, 100, 504, 377, 331.44, 233.62, 439.12, 229.45
+54, 285, 103, 506, 374, 332.13, 233.30, 440.20, 229.38
+55, 282, 101, 508, 378, 332.65, 232.48, 440.17, 228.42
+56, 284, 104, 505, 375, 332.34, 233.33, 439.93, 229.58
+57, 284, 103, 506, 375, 332.16, 233.61, 439.57, 229.27
+58, 283, 103, 505, 376, 332.32, 234.01, 440.63, 229.85
+59, 281, 103, 507, 380, 332.60, 234.32, 440.83, 230.66
+60, 280, 100, 509, 383, 333.15, 234.40, 441.21, 230.32
+61, 288, 103, 511, 378, 332.18, 234.02, 439.92, 230.38
+62, 288, 103, 511, 379, 332.26, 234.26, 440.05, 230.14
+63, 284, 104, 507, 379, 332.73, 234.55, 440.96, 230.21
+64, 284, 104, 507, 379, 332.73, 234.55, 440.96, 230.21
+65, 287, 105, 509, 378, 331.90, 234.24, 440.04, 230.74
+66, 291, 105, 516, 377, 331.55, 233.88, 438.97, 230.87
+67, 286, 105, 509, 378, 331.80, 234.27, 439.18, 231.02
+68, 291, 106, 516, 378, 331.64, 233.98, 438.98, 230.53
+69, 294, 109, 511, 373, 331.22, 234.12, 438.57, 230.43
+70, 283, 105, 502, 374, 331.46, 234.48, 439.19, 229.93
+71, 284, 105, 506, 379, 332.08, 234.02, 440.07, 230.33
+72, 280, 98, 514, 385, 332.43, 233.82, 439.56, 229.75
+73, 278, 101, 508, 383, 332.16, 234.61, 440.67, 230.40
+74, 279, 104, 507, 381, 332.41, 234.82, 440.26, 230.06
+75, 278, 101, 509, 384, 332.78, 234.45, 440.90, 230.22
+76, 281, 102, 509, 381, 333.03, 234.23, 441.39, 230.33
+77, 283, 103, 507, 380, 332.42, 234.14, 440.92, 230.16
+78, 283, 102, 511, 384, 333.30, 233.75, 440.71, 230.07
+79, 286, 104, 509, 379, 332.65, 234.48, 441.37, 230.48
+80, 284, 105, 506, 379, 333.44, 234.62, 442.29, 230.70
+81, 281, 105, 504, 380, 333.25, 234.87, 442.09, 230.66
+82, 287, 106, 509, 378, 332.44, 234.81, 441.20, 231.14
+83, 291, 102, 519, 379, 332.68, 235.43, 440.00, 231.27
+84, 288, 106, 509, 376, 333.92, 235.13, 441.70, 231.23
+85, 288, 102, 520, 381, 333.74, 234.46, 442.11, 230.91
+86, 277, 102, 506, 385, 333.62, 235.92, 441.69, 231.83
+87, 279, 107, 501, 381, 333.01, 235.89, 440.19, 231.39
+88, 279, 105, 505, 382, 332.29, 236.11, 440.12, 232.22
+89, 285, 108, 501, 375, 333.43, 235.72, 441.83, 231.86
+90, 287, 101, 517, 381, 334.12, 235.85, 441.43, 231.70
+91, 277, 103, 506, 382, 333.67, 234.88, 441.51, 230.92
+92, 282, 104, 508, 381, 333.10, 234.71, 441.14, 230.92
+93, 282, 104, 509, 382, 333.47, 234.68, 441.90, 230.69
+94, 287, 103, 516, 382, 334.72, 234.63, 443.94, 230.67
+95, 285, 105, 508, 379, 334.65, 234.46, 442.31, 231.09
+96, 287, 104, 512, 380, 333.70, 233.68, 441.33, 230.45
+97, 289, 106, 510, 377, 334.18, 234.70, 442.05, 231.06
+98, 289, 103, 515, 380, 334.30, 235.18, 443.32, 231.51
+99, 278, 89, 531, 392, 333.53, 235.30, 441.89, 231.39
+100, 283, 102, 511, 381, 332.87, 235.61, 441.77, 231.61
+101, 274, 98, 508, 387, 334.27, 235.26, 442.38, 231.60
+102, 291, 100, 509, 369, 334.16, 235.41, 442.79, 231.69
+103, 285, 106, 508, 379, 334.40, 234.95, 442.68, 231.08
+104, 292, 108, 513, 374, 333.61, 235.19, 442.32, 231.37
+105, 292, 109, 513, 375, 334.61, 234.97, 443.08, 231.26
+106, 280, 101, 508, 382, 334.25, 235.89, 441.91, 231.48
+107, 293, 109, 512, 373, 334.10, 235.13, 442.97, 231.06
+108, 298, 109, 510, 373, 333.81, 235.40, 442.41, 231.09
+109, 285, 98, 519, 385, 333.67, 235.40, 442.04, 231.70
+110, 286, 96, 538, 384, 335.81, 234.28, 441.38, 230.95
+111, 288, 99, 537, 384, 335.14, 235.01, 441.20, 231.76
+112, 278, 97, 513, 386, 333.38, 235.37, 443.04, 231.42
+113, 291, 109, 508, 375, 334.45, 235.19, 443.25, 231.37
+114, 287, 107, 504, 375, 334.42, 235.37, 444.04, 231.57
+115, 294, 104, 522, 372, 334.23, 235.05, 443.11, 232.14
+116, 281, 88, 546, 391, 335.70, 236.18, 441.54, 232.00
+117, 285, 95, 522, 388, 333.39, 235.46, 442.39, 231.67
+118, 291, 109, 507, 374, 334.52, 235.42, 443.73, 231.72
+119, 292, 107, 507, 371, 334.86, 235.25, 443.58, 231.03
+120, 294, 108, 511, 373, 334.51, 235.03, 443.81, 231.13
+121, 268, 104, 494, 381, 334.16, 234.02, 441.63, 230.45
+122, 284, 104, 505, 375, 333.46, 234.41, 441.81, 230.12
+123, 281, 105, 503, 376, 332.77, 234.01, 441.94, 230.12
+124, 283, 103, 506, 378, 332.67, 233.81, 442.29, 229.96
+125, 285, 104, 507, 375, 333.63, 233.40, 442.29, 229.13
+126, 284, 103, 506, 375, 333.88, 233.04, 442.28, 228.78
+127, 284, 100, 508, 376, 333.30, 232.74, 442.43, 228.72
+128, 283, 101, 507, 377, 332.88, 233.31, 442.05, 228.93
+129, 287, 105, 506, 374, 333.81, 233.56, 442.43, 229.06
+130, 286, 103, 509, 375, 333.88, 233.46, 442.00, 228.71
+131, 280, 102, 506, 379, 333.14, 233.15, 442.20, 229.13
+132, 285, 104, 508, 375, 334.32, 233.86, 442.59, 229.20
+133, 290, 98, 513, 371, 334.23, 232.96, 442.09, 229.28
+134, 281, 101, 506, 378, 334.47, 233.15, 442.27, 228.66
+135, 281, 101, 505, 377, 334.79, 233.65, 443.28, 228.98
+136, 278, 101, 507, 381, 334.93, 233.75, 443.71, 229.44
+137, 283, 103, 506, 376, 334.32, 234.20, 442.42, 229.36
+138, 282, 102, 506, 378, 334.06, 233.55, 442.18, 229.35
+139, 280, 100, 508, 379, 334.56, 233.81, 442.62, 229.42
+140, 282, 100, 510, 381, 335.76, 233.69, 444.04, 229.42
+141, 283, 102, 510, 379, 335.25, 233.47, 443.86, 229.04
+142, 286, 102, 509, 376, 334.03, 233.31, 442.87, 228.96
+143, 281, 102, 505, 377, 334.32, 233.81, 442.56, 228.89
+144, 283, 99, 511, 380, 335.13, 234.01, 443.02, 229.51
+145, 284, 100, 512, 379, 335.00, 234.02, 443.37, 229.09
+146, 283, 101, 508, 378, 334.58, 233.76, 443.21, 229.30
+147, 281, 99, 509, 378, 334.54, 234.09, 442.80, 228.46
+148, 281, 99, 511, 382, 335.59, 233.89, 444.02, 229.32
+149, 281, 100, 509, 380, 334.51, 233.69, 443.07, 229.14
+150, 291, 101, 515, 378, 334.20, 233.93, 443.49, 228.88
+151, 282, 99, 510, 381, 335.40, 233.59, 444.68, 229.12
+152, 281, 102, 505, 378, 334.48, 234.26, 442.63, 229.33
+153, 281, 96, 514, 379, 334.40, 234.33, 443.27, 228.61
+154, 279, 98, 512, 382, 335.58, 233.98, 443.99, 229.10
+155, 284, 99, 512, 380, 336.00, 234.30, 444.30, 229.81
+156, 282, 101, 510, 382, 335.31, 234.60, 443.87, 230.04
+157, 283, 102, 508, 379, 335.52, 234.57, 444.02, 229.81
+158, 286, 103, 509, 378, 336.44, 234.66, 445.23, 229.80
+159, 285, 101, 512, 377, 336.25, 235.24, 444.43, 230.10
+160, 282, 102, 509, 378, 335.68, 234.43, 445.17, 229.99
+161, 282, 101, 508, 378, 336.25, 234.19, 444.53, 229.74
+162, 283, 101, 511, 379, 335.97, 234.04, 445.75, 229.73
+163, 278, 99, 510, 383, 335.77, 234.16, 444.67, 229.24
+164, 281, 98, 511, 381, 335.70, 233.81, 444.28, 229.15
+165, 291, 100, 520, 378, 335.91, 233.62, 445.09, 229.34
+166, 288, 93, 530, 382, 336.46, 233.50, 445.10, 228.53
+167, 295, 97, 524, 380, 336.68, 233.33, 445.43, 229.04
+168, 284, 98, 513, 380, 336.05, 233.39, 445.72, 228.77
+169, 288, 94, 528, 383, 336.27, 232.93, 444.79, 228.72
+170, 286, 98, 517, 381, 336.97, 233.66, 445.57, 229.32
+171, 289, 93, 529, 384, 336.71, 233.29, 445.48, 228.87
+172, 287, 91, 528, 384, 336.12, 233.34, 445.04, 228.51
+173, 285, 96, 519, 383, 336.56, 233.40, 445.40, 229.22
+174, 291, 96, 525, 382, 337.03, 233.34, 445.93, 228.84
+175, 287, 97, 519, 382, 337.54, 233.58, 446.36, 229.27
+176, 280, 92, 520, 386, 336.63, 233.24, 445.94, 228.54
+177, 284, 98, 515, 381, 336.80, 233.32, 445.88, 228.57
+178, 284, 97, 514, 380, 337.09, 233.60, 446.28, 228.94
+179, 283, 96, 517, 385, 337.74, 233.40, 447.29, 229.06
+180, 277, 92, 517, 387, 337.07, 233.56, 446.05, 228.31
+181, 283, 96, 517, 383, 337.25, 233.54, 446.38, 228.84
+182, 284, 94, 519, 383, 337.49, 233.57, 446.59, 228.97
+183, 280, 98, 514, 385, 337.74, 233.76, 447.16, 229.24
+184, 284, 95, 518, 384, 337.91, 233.56, 446.81, 229.14
+185, 285, 95, 519, 383, 337.96, 233.11, 446.81, 228.88
+186, 285, 98, 515, 381, 337.30, 233.79, 446.75, 229.05
+187, 287, 96, 520, 381, 337.64, 233.33, 446.95, 228.90
+188, 285, 94, 519, 383, 337.90, 233.51, 446.99, 228.83
+189, 285, 95, 519, 383, 337.39, 233.66, 446.89, 228.85
+190, 287, 96, 519, 381, 337.38, 233.60, 446.88, 229.32
+191, 279, 93, 519, 386, 337.79, 233.96, 446.68, 228.78
+192, 289, 94, 530, 382, 336.77, 233.56, 445.88, 228.85
+193, 285, 94, 519, 383, 337.74, 233.23, 446.80, 228.64
+194, 282, 100, 513, 383, 339.24, 233.62, 448.73, 229.16
+195, 294, 97, 526, 380, 339.03, 234.25, 448.39, 229.68
+196, 284, 97, 517, 382, 338.77, 233.92, 448.02, 229.07
+197, 287, 99, 516, 382, 338.55, 233.81, 447.62, 229.49
+198, 287, 101, 515, 379, 337.59, 233.41, 447.21, 228.96
+199, 286, 102, 512, 379, 337.57, 234.47, 447.02, 229.76
+200, 288, 101, 514, 377, 338.35, 234.15, 448.00, 229.13
+201, 302, 100, 526, 374, 338.20, 233.10, 448.23, 227.88
+202, 291, 92, 532, 379, 336.52, 232.06, 446.78, 227.12
+203, 292, 92, 534, 380, 336.58, 232.53, 446.19, 227.48
+204, 297, 99, 522, 375, 336.96, 233.18, 446.60, 228.36
+205, 287, 100, 514, 377, 337.51, 233.81, 446.55, 228.40
+206, 287, 98, 516, 379, 336.71, 233.54, 445.86, 228.17
+207, 295, 96, 526, 379, 336.55, 233.16, 446.12, 227.71
+208, 287, 99, 515, 378, 336.68, 233.26, 445.91, 227.93
+209, 287, 98, 517, 380, 335.93, 233.45, 444.88, 228.18
+210, 284, 98, 514, 380, 335.22, 233.62, 444.24, 228.41
+211, 293, 98, 524, 380, 335.93, 233.72, 445.22, 228.62
+212, 284, 98, 514, 381, 336.22, 233.56, 445.02, 228.32
+213, 284, 97, 515, 380, 335.45, 233.40, 444.38, 227.81
+214, 288, 99, 516, 379, 335.42, 233.52, 444.63, 228.32
+215, 283, 97, 517, 382, 335.61, 233.83, 445.47, 228.10
+216, 291, 96, 526, 385, 336.21, 234.13, 446.08, 228.86
+217, 294, 98, 523, 381, 335.75, 234.73, 445.18, 229.24
+218, 282, 100, 515, 383, 335.26, 234.45, 445.12, 229.13
+219, 293, 98, 523, 381, 334.63, 234.75, 444.11, 229.31
+220, 287, 100, 516, 381, 335.19, 234.90, 444.76, 229.61
+221, 284, 96, 520, 384, 336.24, 234.26, 445.58, 229.08
+222, 284, 96, 519, 384, 336.14, 234.45, 445.27, 229.12
+223, 285, 97, 520, 384, 335.72, 234.28, 444.58, 228.98
+224, 286, 91, 527, 384, 335.23, 234.52, 444.81, 228.35
+225, 282, 96, 519, 385, 335.34, 234.91, 444.89, 229.27
+226, 290, 97, 525, 381, 335.25, 234.77, 444.68, 229.22
+227, 285, 101, 513, 380, 335.49, 235.24, 444.87, 229.47
+228, 278, 99, 513, 386, 336.16, 235.14, 445.82, 229.04
+229, 277, 100, 512, 386, 336.98, 235.19, 446.54, 229.40
+230, 281, 100, 515, 386, 336.94, 235.16, 446.24, 229.04
+231, 281, 98, 516, 387, 336.80, 235.55, 445.96, 229.59
+232, 288, 101, 516, 380, 336.23, 235.81, 445.85, 230.04
+233, 286, 102, 514, 381, 336.12, 235.99, 445.60, 230.35
+234, 290, 100, 522, 382, 335.83, 235.34, 445.02, 229.92
+235, 283, 97, 518, 384, 336.15, 235.13, 445.43, 229.28
+236, 284, 97, 518, 385, 336.43, 235.00, 445.80, 229.26
+237, 288, 101, 516, 381, 337.01, 234.73, 446.14, 229.10
+238, 284, 100, 515, 382, 336.29, 235.33, 445.24, 230.08
+239, 284, 100, 516, 382, 335.95, 235.45, 444.44, 229.58
+240, 288, 99, 519, 381, 337.80, 236.53, 445.87, 229.71
+241, 289, 100, 519, 382, 338.46, 235.21, 444.86, 230.69
+242, 287, 99, 518, 381, 337.96, 234.76, 447.51, 229.84
+243, 286, 98, 521, 386, 337.58, 234.26, 447.05, 229.04
+244, 283, 99, 514, 382, 337.59, 234.63, 446.79, 228.77
+245, 297, 101, 522, 376, 337.30, 233.81, 447.51, 228.11
+246, 295, 96, 528, 378, 337.11, 233.16, 446.71, 227.75
+247, 292, 95, 521, 376, 336.54, 233.58, 445.85, 227.84
+248, 296, 93, 534, 376, 337.19, 233.52, 446.36, 227.02
+249, 295, 98, 524, 378, 337.02, 233.95, 446.39, 227.91
+250, 286, 97, 519, 381, 337.00, 233.31, 446.20, 228.07
+251, 284, 95, 521, 384, 336.50, 233.66, 445.59, 227.87
+252, 298, 98, 526, 380, 337.12, 233.51, 446.90, 228.01
+253, 299, 97, 529, 379, 337.36, 233.85, 447.40, 228.10
+254, 295, 96, 530, 381, 337.35, 233.66, 447.18, 228.44
+255, 287, 97, 521, 382, 337.38, 233.77, 446.19, 228.07
+256, 288, 96, 522, 382, 336.90, 233.37, 446.18, 227.72
+257, 299, 97, 528, 378, 336.65, 233.52, 446.74, 227.58
+258, 291, 97, 523, 380, 337.71, 233.64, 447.50, 228.00
+259, 292, 93, 529, 383, 337.59, 233.53, 447.63, 228.33
+260, 285, 96, 521, 385, 336.88, 234.35, 446.32, 228.44
+261, 284, 95, 520, 383, 336.20, 233.80, 445.83, 228.11
+262, 288, 99, 516, 377, 336.89, 233.44, 446.96, 228.20
+263, 299, 95, 536, 377, 338.63, 233.45, 447.55, 228.07
+264, 287, 100, 516, 378, 337.86, 233.67, 447.26, 228.29
+265, 287, 96, 522, 382, 337.24, 233.73, 446.84, 228.16
+266, 285, 96, 519, 383, 337.47, 233.98, 446.70, 228.60
+267, 297, 95, 533, 381, 338.52, 234.37, 447.65, 228.63
+268, 297, 97, 527, 379, 338.31, 233.88, 448.10, 228.23
+269, 288, 97, 519, 380, 337.31, 233.61, 446.64, 227.89
+270, 290, 96, 522, 380, 336.97, 233.14, 446.85, 227.62
+271, 290, 91, 530, 384, 336.93, 233.29, 446.37, 228.02
+272, 288, 98, 518, 380, 337.09, 233.50, 446.54, 227.78
+273, 288, 98, 520, 380, 337.49, 233.52, 447.02, 228.08
+274, 295, 94, 533, 379, 337.55, 233.62, 447.57, 228.06
+275, 297, 94, 531, 382, 337.18, 233.49, 446.70, 227.45
+276, 287, 98, 516, 379, 336.13, 233.21, 445.36, 227.70
+277, 289, 97, 519, 379, 336.29, 232.93, 445.55, 227.58
+278, 296, 98, 526, 378, 336.87, 232.99, 446.29, 227.78
+279, 300, 98, 529, 377, 337.72, 233.34, 447.67, 227.80
+280, 300, 97, 528, 378, 338.07, 233.67, 447.46, 228.43
+281, 285, 98, 519, 383, 336.79, 233.70, 446.93, 228.48
+282, 284, 96, 519, 384, 336.15, 233.59, 445.54, 228.23
+283, 287, 96, 521, 383, 337.14, 233.61, 446.82, 228.44
+284, 297, 93, 535, 381, 338.47, 234.11, 448.19, 227.79
+285, 298, 92, 538, 380, 338.70, 233.97, 447.77, 227.98
+286, 296, 93, 539, 381, 338.27, 234.25, 447.64, 228.10
+287, 292, 92, 530, 383, 338.36, 233.50, 446.95, 228.76
+288, 290, 98, 521, 380, 337.69, 233.44, 447.08, 228.10
+289, 288, 99, 516, 379, 337.42, 233.59, 446.47, 228.16
+290, 289, 98, 521, 380, 337.55, 233.71, 447.15, 228.27
+291, 299, 98, 528, 377, 337.29, 233.82, 447.31, 228.23
+292, 299, 97, 528, 377, 337.39, 233.65, 447.58, 228.37
+293, 285, 96, 520, 381, 337.76, 233.45, 446.66, 228.27
+294, 296, 96, 530, 379, 337.80, 233.62, 447.33, 228.36
+295, 291, 97, 524, 380, 337.97, 233.50, 447.42, 228.16
+296, 296, 96, 533, 378, 338.40, 233.49, 447.96, 228.05
+297, 293, 94, 530, 382, 337.26, 233.45, 446.51, 228.12
+298, 291, 96, 524, 380, 337.16, 233.41, 446.37, 227.91
+299, 290, 96, 523, 380, 337.66, 233.26, 447.44, 228.01
+300, 301, 97, 529, 376, 337.80, 233.55, 447.97, 227.77
diff --git a/bob/pad/face/test/data/real_client001_android_SD_scene01.mp4 b/bob/pad/face/test/data/real_client001_android_SD_scene01.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..26f44879c3eea7638dd0d7d90189130c9f643cda
Binary files /dev/null and b/bob/pad/face/test/data/real_client001_android_SD_scene01.mp4 differ
diff --git a/bob/pad/face/test/data/real_client001_android_SD_scene01_ref_feats.h5 b/bob/pad/face/test/data/real_client001_android_SD_scene01_ref_feats.h5
new file mode 100644
index 0000000000000000000000000000000000000000..ad81355a14472636154f4e40ab77731529977126
Binary files /dev/null and b/bob/pad/face/test/data/real_client001_android_SD_scene01_ref_feats.h5 differ
diff --git a/bob/pad/face/test/dummy/database.py b/bob/pad/face/test/dummy/database.py
index e8ef084437e3a79de1bda01fd7378bca7e2a0ba6..3928d5e797a8f1f36555794396ef79264248aaac 100644
--- a/bob/pad/face/test/dummy/database.py
+++ b/bob/pad/face/test/dummy/database.py
@@ -1,10 +1,10 @@
-from bob.bio.base.test.utils import atnt_database_directory
 import bob.io.base
 import os
 from bob.pipelines import DelayedSample
 from bob.pad.base.pipelines.vanilla_pad.abstract_classes import Database
-from bob.db.base.utils import check_parameters_for_validity, convert_names_to_lowlevel
+from bob.bio.base.database.legacy import check_parameters_for_validity, convert_names_to_lowlevel
 from bob.bio.video import VideoLikeContainer
+from bob.bio.base.database import AtntBioDatabase
 
 
 def DummyPadSample(
@@ -35,13 +35,11 @@ class DummyDatabase(Database):
     def __init__(self, none_annotations=False):
         # call base class constructor with useful parameters
         super(DummyDatabase, self).__init__()
-        self.original_directory = atnt_database_directory()
-        import bob.db.atnt
-
-        self._db = bob.db.atnt.Database()
-        self.low_level_names = ("world", "dev")
-        self.high_level_names = ("train", "dev")
+        self._db = AtntBioDatabase()
+        self.original_directory = self._db.original_directory
         self.none_annotations = none_annotations
+        self.high_level_names = ["train", "dev", "eval"]
+        self.low_level_names = ["world", "dev", "eval"]
 
     def _make_bio(self, files):
         files = [
diff --git a/bob/pad/face/test/test_block.py b/bob/pad/face/test/test_block.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c23374e5e532230e497e3b25a1040a643229ac3
--- /dev/null
+++ b/bob/pad/face/test/test_block.py
@@ -0,0 +1,22 @@
+import numpy
+
+A_org = numpy.array(range(1, 17), "float64").reshape((4, 4))
+A_ans_0_3D = numpy.array(
+    [[[1, 2], [5, 6]], [[3, 4], [7, 8]], [[9, 10], [13, 14]], [[11, 12], [15, 16]]],
+    "float64",
+)
+A_ans_0_4D = numpy.array(
+    [[[[1, 2], [5, 6]], [[3, 4], [7, 8]]], [[[9, 10], [13, 14]], [[11, 12], [15, 16]]]],
+    "float64",
+)
+
+from bob.pad.face.utils.load_utils import block
+
+
+def test_block():
+    B = block(A_org, (2, 2), (0, 0))
+
+    assert (B == A_ans_0_4D).all()
+
+    B = block(A_org, (2, 2), (0, 0), flat=True)
+    assert (B == A_ans_0_3D).all()
diff --git a/bob/pad/face/test/test_qualitymeasure.py b/bob/pad/face/test/test_qualitymeasure.py
new file mode 100644
index 0000000000000000000000000000000000000000..b67b72e2837a3a872b7add7800575f79f2c0e994
--- /dev/null
+++ b/bob/pad/face/test/test_qualitymeasure.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# sushil bhattacharjee <sushil.bhattacharjee@idiap.ch>
+# Fri. 10 March 2017
+
+"""Unit-tests for bob.ip.qualitymeasure"""
+
+import os
+import numpy as np
+import nose.tools
+import pkg_resources
+import h5py
+
+import bob.io.base
+import bob.io.base.test_utils
+
+# import bob.io.video
+# import bob.ip.color
+from bob.io.image import to_bob
+import imageio
+
+from bob.pad.face.qualitymeasure import galbally_iqm_features as iqm
+from bob.pad.face.qualitymeasure import msu_iqa_features as iqa
+
+
+REF_VIDEO_FILE = "real_client001_android_SD_scene01.mp4"
+REF_FEATURE_FILE = "real_client001_android_SD_scene01_ref_feats.h5"
+
+
+def F(n):
+    return pkg_resources.resource_filename(__name__, os.path.join("data", n))
+
+
+input_video_file = F(REF_VIDEO_FILE)
+assert os.path.isfile(input_video_file), "File: not found: %s" % input_video_file
+video_data = to_bob(np.array(list((imageio.get_reader(input_video_file).iter_data()))))
+
+numframes = 3
+
+
+def load_reference_features():
+    ref_feat_file = F(REF_FEATURE_FILE)
+    assert os.path.isfile(ref_feat_file), "File: not found: %s" % ref_feat_file
+
+    rf = h5py.File(ref_feat_file)
+    # assert rf.has_key("/bobiqm"), "Key: /bobiqm not found in file %s" % ref_feat_file
+    # assert rf.has_key("/msuiqa"), "Key: /msuiqa not found in file %s" % ref_feat_file
+    galbally_ref_features = np.array(rf["bobiqm"])
+    msu_ref_features = np.array(rf["msuiqa"])
+    del rf
+    return (galbally_ref_features, msu_ref_features)
+
+
+# load reference-features into global vars.
+galbally_ref_features, msu_ref_features = load_reference_features()
+
+
+def test_galbally_feat_extr():
+
+    # change this, if you add more features to galbally_iqm_features module.
+    iqm_len = 18
+    # feature-array to hold features for several frames
+    bobfset = np.zeros([numframes, iqm_len])
+    f = 0
+
+    # process first frame separately, to get the no. of iqm features
+    rgbFrame = video_data[f]
+    iqmSet = iqm.compute_quality_features(rgbFrame)
+    numIQM = len(iqmSet)
+
+    # test: check that numIQM is the same as expected iqm_len (18)
+    nose.tools.eq_(numIQM, iqm_len)
+
+    # store features for first frame in feature-array
+    bobfset[f] = iqmSet
+
+    # now store iqm features for remaining test-frames of input video.
+    for f in range(1, numframes):
+        rgbFrame = video_data[f]
+        bobfset[f] = iqm.compute_quality_features(rgbFrame)
+
+    # test: compare feature-values in bobfset[] with those loaded from hdf5 file
+    nose.tools.assert_true((bobfset == galbally_ref_features).all())
+    # np.allclose(A,B)
+
+
+def test_msu_feat_extr():
+    # change this, if you change the no. of features in msu_iqa_features module.
+    iqa_len = 121
+    # feature-array to hold features for several frames
+    msufset = np.zeros([numframes, iqa_len])
+    f = 0
+
+    # process first frame separately, to get the no. of iqa features
+    rgbFrame = video_data[f]
+
+    iqaSet = iqa.compute_msu_iqa_features(rgbFrame)
+    numIQA = len(iqaSet)
+
+    # test: check that numIQA matches the expected iqa_len(121)
+    nose.tools.eq_(numIQA, iqa_len)
+
+    # store features for first frame in feature-array
+    msufset[f] = iqaSet
+
+    # now store iqm features for remaining test-frames of input video.
+    for f in range(1, numframes):
+
+        rgbFrame = video_data[f]
+        msuQFeats = iqa.compute_msu_iqa_features(rgbFrame)
+        msufset[f] = msuQFeats
+
+    # test: compare feature-values in bobfset[] with those loaded from hdf5 file
+    np.testing.assert_allclose(msufset, msu_ref_features)
diff --git a/bob/pad/face/test/test_transformers.py b/bob/pad/face/test/test_transformers.py
index b527de156af9f6cea655727ce24193c9b3e45d0c..000a64f199d55371f12ad8ed719a45bb80b829ac 100644
--- a/bob/pad/face/test/test_transformers.py
+++ b/bob/pad/face/test/test_transformers.py
@@ -10,85 +10,13 @@ from bob.io.base.test_utils import datafile
 
 from bob.io.base import load
 
-import bob.io.image  # for image loading functionality
-
 import bob.bio.video
 
-from bob.ip.color import rgb_to_gray
-
-from bob.pad.face.extractor import LBPHistogram, ImageQualityMeasure
-
-
-def test_lbp_histogram():
-    lbp = LBPHistogram()
-    img = load(datafile("testimage.jpg", "bob.bio.face.test"))
-    img = rgb_to_gray(img)
-    features = lbp.transform([img])[0]
-    reference = load(datafile("lbp.hdf5", "bob.pad.face.test"))
-    assert np.allclose(features, reference)
-
-
-def notest_video_lbp_histogram():
-    """
-    Test LBPHistogram with Wrapper extractor.
-    """
-
-    from ..preprocessor import FaceCropAlign
-    from bob.bio.video.preprocessor import Wrapper
-
-    FACE_SIZE = 64  # The size of the resulting face
-    RGB_OUTPUT_FLAG = False  # Gray-scale output
-    USE_FACE_ALIGNMENT = False  # use annotations
-    MAX_IMAGE_SIZE = None  # no limiting here
-    FACE_DETECTION_METHOD = None  # use annotations
-    MIN_FACE_SIZE = 50  # skip small faces
-
-    image_preprocessor = FaceCropAlign(
-        face_size=FACE_SIZE,
-        rgb_output_flag=RGB_OUTPUT_FLAG,
-        use_face_alignment=USE_FACE_ALIGNMENT,
-        max_image_size=MAX_IMAGE_SIZE,
-        face_detection_method=FACE_DETECTION_METHOD,
-        min_face_size=MIN_FACE_SIZE,
-    )
+from bob.bio.face.color import rgb_to_gray
 
-    preprocessor = Wrapper(image_preprocessor)
+from bob.pad.face.extractor import ImageQualityMeasure
 
-    image = load(datafile("test_image.png", "bob.pad.face.test"))
-    annotations = {"topleft": (95, 155), "bottomright": (215, 265)}
 
-    video, annotations = convert_image_to_video_data(image, annotations, 20)
-
-    faces = preprocessor(frames=video, annotations=annotations)
-
-    LBPTYPE = "uniform"
-    ELBPTYPE = "regular"
-    RAD = 1
-    NEIGHBORS = 8
-    CIRC = False
-    DTYPE = None
-
-    extractor = bob.bio.video.extractor.Wrapper(
-        LBPHistogram(
-            lbptype=LBPTYPE,
-            elbptype=ELBPTYPE,
-            rad=RAD,
-            neighbors=NEIGHBORS,
-            circ=CIRC,
-            dtype=DTYPE,
-        )
-    )
-
-    lbp_histograms = extractor(faces)
-
-    assert len(lbp_histograms) == 20
-    assert len(lbp_histograms[0][1]) == 59
-    assert (lbp_histograms[0][1] == lbp_histograms[-1][1]).all()
-    assert (lbp_histograms[0][1][0] - 0.12695109261186263) < 0.000001
-    assert (lbp_histograms[0][1][-1] - 0.031737773152965658) < 0.000001
-
-
-# ==============================================================================
 def notest_video_quality_measure():
     """
     Test ImageQualityMeasure with Wrapper extractor.
diff --git a/bob/pad/face/utils/load_utils.py b/bob/pad/face/utils/load_utils.py
index 088385ca85c76be32714a04674d5e7e14bec6f49..04f82a8b76f320efcc905faab5e284da49743bfd 100644
--- a/bob/pad/face/utils/load_utils.py
+++ b/bob/pad/face/utils/load_utils.py
@@ -6,9 +6,94 @@ import bob.io.image
 import numpy
 from bob.bio.face.annotator import bounding_box_from_annotation, min_face_size_validator
 from bob.bio.video.annotator import normalize_annotations
-from bob.ip.base import block, block_generator, block_output_shape, scale
-from bob.ip.color import rgb_to_hsv, rgb_to_yuv
+from bob.bio.face.color import rgb_to_hsv, rgb_to_yuv
 from imageio import get_reader
+from PIL import Image
+
+
+def block(X, block_size, block_overlap, flat=False):
+    """
+    Parameters
+    ----------
+    X : numpy.ndarray
+        The image to be split into blocks.
+    block_size : tuple
+        The size of the block.
+    block_overlap : tuple
+        The overlap of the block.
+
+    Returns
+    -------
+    numpy.ndarray
+        The image split into blocks.
+    """
+
+    assert len(block_size) == 2
+    assert len(block_overlap) == 2
+
+    size_ov_h = int(block_size[0] - block_overlap[0])
+    size_ov_w = int(block_size[1] - block_overlap[1])
+    n_blocks_h = int((X.shape[0] - block_overlap[0]) / size_ov_h)
+    n_blocks_w = int((X.shape[1] - block_overlap[1]) / size_ov_w)
+
+    blocks = numpy.zeros(shape=(n_blocks_h, n_blocks_w, size_ov_h, size_ov_w))
+    for h in range(n_blocks_h):
+        for w in range(n_blocks_w):
+
+            blocks[h, w, :, :] = X[
+                h * size_ov_h : h * size_ov_h + block_size[0],
+                w * size_ov_w : w * size_ov_w + block_size[1],
+            ]
+
+    if flat:
+        return blocks.reshape(n_blocks_h * n_blocks_w, blocks.shape[2], blocks.shape[3])
+
+    return blocks
+
+
+def scale(image, scaling_factor):
+    """
+    Scales and image using PIL
+
+    Parameters
+    ----------
+
+        image:
+           Input image to be scaled
+
+        new_shape: tuple
+           Shape of the rescaled image
+
+
+
+    """
+
+    if isinstance(scaling_factor, float):
+        new_size = tuple((numpy.array(image.shape) * scaling_factor).astype(numpy.int))
+        return bob.io.image.to_bob(
+            numpy.array(
+                Image.fromarray(bob.io.image.to_matplotlib(image)).resize(
+                    size=new_size
+                ),
+                dtype="float",
+            )
+        )
+
+    elif isinstance(scaling_factor, tuple):
+
+        if len(scaling_factor) > 2:
+            scaling_factor = scaling_factor[1:]
+
+        return bob.io.image.to_bob(
+            numpy.array(
+                Image.fromarray(bob.io.image.to_matplotlib(image)).resize(
+                    size=scaling_factor
+                ),
+                dtype="float",
+            )
+        )
+    else:
+        raise ValueError(f"Scaling factor not supported: {scaling_factor}")
 
 
 def frames(path):
@@ -127,8 +212,8 @@ def scale_face(face, face_height, face_width=None):
     face_width = face_height if face_width is None else face_width
     shape = list(face.shape)
     shape[-2:] = (face_height, face_width)
-    scaled_face = numpy.empty(shape, dtype="float64")
-    scale(face, scaled_face)
+    # scaled_face = numpy.empty(shape, dtype="float64")
+    scaled_face = scale(face, tuple(shape))
     return scaled_face
 
 
@@ -154,19 +239,21 @@ def blocks(data, block_size, block_overlap=(0, 0)):
     ValueError
         If data dimension is not between 2 and 4 (inclusive).
     """
+
     data = numpy.asarray(data)
     # if a gray scale image:
     if data.ndim == 2:
         output = block(data, block_size, block_overlap, flat=True)
     # if a color image:
     elif data.ndim == 3:
-        out_shape = list(data.shape[0:1]) + list(
-            block_output_shape(data[0], block_size, block_overlap, flat=True)
-        )
+        # out_shape = list(data.shape[0:1]) + list(
+        #    block_output_shape(data[0], block_size, block_overlap, flat=True)
+        # )
 
-        output = numpy.empty(out_shape, dtype=data.dtype)
+        # output = numpy.empty(out_shape, dtype=data.dtype)
+        output = []
         for i, img2d in enumerate(data):
-            block(img2d, block_size, block_overlap, output[i], flat=True)
+            output.append(block(img2d, block_size, block_overlap, flat=True))
         output = numpy.moveaxis(output, 0, 1)
     # if a color video:
     elif data.ndim == 4:
diff --git a/conda/meta.yaml b/conda/meta.yaml
index ff4a4f429617722123baeff8ecb931dd4c0c2e96..f11f2c621d7536d04d33cd08f5c6e601b090e772 100644
--- a/conda/meta.yaml
+++ b/conda/meta.yaml
@@ -24,10 +24,6 @@ requirements:
     - pip {{ pip }}
     - bob.extension
     - bob.io.base
-    - bob.io.image
-    - bob.ip.base
-    - bob.ip.color
-    - bob.ip.qualitymeasure
     - bob.bio.base
     - bob.bio.face
     - bob.bio.video
diff --git a/conda/yum_requirements.txt b/conda/yum_requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0e16b14e7a64ea16e541639ed1215b3650f1d48b
--- /dev/null
+++ b/conda/yum_requirements.txt
@@ -0,0 +1 @@
+mesa-libGL
diff --git a/doc/baselines.rst b/doc/baselines.rst
index 84cfe4e7fe8b7d28cbbdc5da276b13ced008a92c..55c66bbe9a1c5c2e6a1cbe51fbb30a93c327d630 100644
--- a/doc/baselines.rst
+++ b/doc/baselines.rst
@@ -32,7 +32,7 @@ Usually, it is a good idea to have at least verbose level 2 (i.e., calling
    To run the experiments in parallel, you can use an existing or (define a new)
    SGE grid or local host multiprocessing configuration. To run the experiment
    in the Idiap SGE grid, you can simply add the ``--dask-client sge`` command
-   line option. To run experiments in parallel on the local machine, add the 
+   line option. To run experiments in parallel on the local machine, add the
    ``--dask-client local-parallel`` option.
 
    See :any:`this <pipeline_simple_features>` for more
@@ -89,79 +89,6 @@ more detail you can check the corresponding configuration file:
 ``bob/pad/face/config/replay_attack.py``.
 
 
-LBP features of facial region + SVM classifier
-===================================================
-
-Detailed description of this PAD pipe-line is given at
-:ref:`bob.pad.face.resources.face_pad.lbp_svm_replayattack`.
-
-To run this baseline on the `replayattack`_ database, using the ``grandtest``
-protocol, execute the following:
-
-.. code-block:: sh
-
-    $ bob pad vanilla-pad replay-attack lbp svm-frames \
-    --output <PATH_TO_STORE_THE_RESULTS>
-
-.. tip::
-
-    If you are at `idiap`_ you can use the SGE grid to speed-up the calculations.
-    Simply add the ``--dask-client sge`` (or ``-l sge``) argument to the above
-    command. For example:
-
-    .. code-block:: sh
-
-        $ bob pad vanilla-pad replay-attack lbp svm-frames \
-        --output <PATH_TO_STORE_THE_RESULTS> \
-        --dask-client sge
-
-To understand the settings of this baseline PAD experiment you can check the
-corresponding configuration file: ``bob/pad/face/config/lbp_svm.py``
-
-To evaluate the results computing EER, HTER and plotting ROC you can use the
-following command:
-
-.. code-block:: sh
-
-    $ bob pad evaluate \
-    <PATH_TO_STORE_THE_RESULTS>/scores-dev.csv  \
-    <PATH_TO_STORE_THE_RESULTS>/scores-eval.csv \
-    --legends "LBP features of facial region + SVM classifier + REPLAY-ATTACK database" \
-    --eval \
-    --criterion eer \
-    --output <PATH_TO_STORE_THE_RESULTS>/evaluation_report.pdf
-
-
-The error rates for `replayattack`_ database are summarized in the table below:
-
-==============  =================  =================
-..              Development        Evaluation
-==============  =================  =================
-APCER (attack)  17.4%              14.2%
-APCER_AP        17.4%              14.2%
-BPCER           17.4%              16.4%
-ACER            17.4%              15.3%
-FTA             0.0%               0.0%
-FPR             17.4% (1045/6000)  14.2% (1134/7999)
-FNR             17.4% (209/1200)   16.4% (262/1600)
-HTER            17.4%              15.3%
-FAR             17.4%              14.2%
-FRR             17.4%              16.4%
-PRECISION       0.5                0.5
-RECALL          0.8                0.8
-F1_SCORE        0.6                0.7
-AUC             0.9                0.9
-AUC-LOG-SCALE   2.0                2.1
-==============  =================  =================
-
-
-The ROC curves for this particular experiment can be downloaded from here:
-
-:download:`ROC curve <img/ROC_lbp_svm_replay_attack.pdf>`
-
-------------
-
-
 Image Quality Measures as features of facial region + SVM classifier
 ========================================================================
 
@@ -235,70 +162,6 @@ This section summarizes the results of baseline face PAD experiments on the `Rep
 The description of the database-related settings, which are used to run face PAD baselines on the Replay-Mobile is given here :ref:`bob.pad.face.resources.databases.replay_mobile`. To understand the settings in more detail you can check the corresponding configuration file : ``bob/pad/face/config/replay_mobile.py``.
 
 
-LBP features of facial region + SVM classifier
-========================================================================
-
-Detailed description of this PAD pipe-line is given at :ref:`bob.pad.face.resources.face_pad.lbp_svm_replayattack`.
-Note, that the same PAD pipe-line was used to run experiments on the Replay-Attack database.
-
-To run this baseline on the `Replay-Mobile`_ database, using the ``grandtest`` protocol, execute the following:
-
-.. code-block:: sh
-
-    $ bob pad vanilla-pad replay-mobile lbp svm_frame \
-    --output <PATH_TO_STORE_THE_RESULTS>
-
-.. tip::
-
-    Similarly to the tip above you can run this baseline in parallel with the
-    ``--dask-client`` option.
-
-To understand the settings of this baseline PAD experiment you can check the
-corresponding configuration file: ``bob/pad/face/config/lbp_svm.py``
-
-To evaluate the results computing EER, HTER and plotting ROC you can use the
-following command:
-
-.. code-block:: sh
-
-    $ bob pad evaluate \
-    <PATH_TO_STORE_THE_RESULTS>/scores-dev.csv  \
-    <PATH_TO_STORE_THE_RESULTS>/scores-eval.csv \
-    --legends "LBP features of facial region + SVM classifier + Replay-Mobile database" \
-    --eval \
-    --criterion eer \
-    --output <PATH_TO_STORE_THE_RESULTS>/evaluation_report.pdf
-
-The EER/HTER errors for the `Replay-Mobile`_ database are summarized in the table below:
-
-===================  =================  =================
-..                   Development        Evaluation
-===================  =================  =================
-APCER (mattescreen)  15.60%             25.77%
-APCER (print)        12.97%             8.44%
-APCER_AP             15.60%             25.77%
-BPCER                14.29%             20.03%
-ACER                 14.94%             22.90%
-FTA                  0.00%              0.00%
-FPR                  14.28% (728/5098)  17.02% (647/3802)
-FNR                  14.29% (457/3199)  20.03% (439/2192)
-HTER                 14.28%             18.52%
-FAR                  14.28%             17.02%
-FRR                  14.29%             20.03%
-PRECISION            0.79               0.73
-RECALL               0.86               0.80
-F1_SCORE             0.82               0.76
-AUC                  0.93               0.88
-AUC-LOG-SCALE        1.83               1.76
-===================  =================  =================
-
-
-The ROC curves for the particular experiment can be downloaded from here:
-
-:download:`ROC curve <img/ROC_lbp_svm_replay_mobile.pdf>`
-
-------------
-
 
 Image Quality Measures as features of facial region + SVM classifier
 ========================================================================
diff --git a/doc/resources.rst b/doc/resources.rst
index 5f905694408c2c9ecec117bc9ea3d8865a6b63de..8448bbf0967c84d63563956f5eaf5ee85ecbd9f8 100644
--- a/doc/resources.rst
+++ b/doc/resources.rst
@@ -59,11 +59,6 @@ The configuration files contain at least the following arguments of the
     * ``pipeline`` containing zero, one, or more Transformers and one Classifier
 
 
-.. _bob.pad.face.resources.face_pad.lbp_svm_replayattack:
-
-LBP features of facial region + SVM for REPLAY-ATTACK
-================================================================================
-
 
 .. _bob.pad.face.resources.face_pad.qm_svm_replayattack:
 
diff --git a/requirements.txt b/requirements.txt
index c0e85a1e8a73e312c6a7a13bef3f77d51518cef5..b9edf490978c937b1533015aedb6860da3b21fdb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,13 +3,9 @@ numpy
 bob.extension
 bob.bio.base
 bob.io.base
-bob.ip.base
 bob.pad.base
 bob.bio.face
 bob.bio.video
-bob.io.image
-bob.ip.color
-bob.ip.qualitymeasure
 scikit-learn
 scikit-image
 imageio-ffmpeg
diff --git a/setup.py b/setup.py
index f14cecf4057cc447451a8941a3699920d5311012..4e5cd0e622cdb72f9dc38af73d82268532322ea6 100644
--- a/setup.py
+++ b/setup.py
@@ -76,8 +76,6 @@ setup(
             "casiasurf = bob.pad.face.config.casiasurf",
             "swan = bob.pad.face.config.swan",
             "oulunpu = bob.pad.face.config.oulunpu",
-            # LBPs
-            "lbp = bob.pad.face.config.lbp_64",
             # quality measure
             "qm = bob.pad.face.config.qm_64",
             # classifiers