diff --git a/MANIFEST.in b/MANIFEST.in
index 9d106fba3c5988e717d2a94d46458436f21c6310..35cfe3094335c5a2afcdf7ba6446438c7d345808 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
 include README.rst bootstrap-buildout.py buildout.cfg COPYING
 recursive-include doc *.py *.rst
-recursive-include bob/bio/vein/tests *.png *.mat *.txt *.npy
+recursive-include bob/bio/vein/tests *.png *.mat *.txt *.hdf5
diff --git a/bob/bio/vein/algorithm/Correlate.py b/bob/bio/vein/algorithm/Correlate.py
new file mode 100644
index 0000000000000000000000000000000000000000..233854128cf82c4ea4d88a0f894356a8dfa5beab
--- /dev/null
+++ b/bob/bio/vein/algorithm/Correlate.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+import numpy
+import skimage.feature
+
+from bob.bio.base.algorithm import Algorithm
+
+
+class Correlate (Algorithm):
+  """Correlate probe and model without cropping
+
+  The method is based on "cross-correlation" between a model and a probe image.
+  The difference between this and :py:class:`MiuraMatch` is that **no**
+  cropping takes place on this implementation. We simply fill the excess
+  boundary with zeros and extract the valid correlation region between the
+  probe and the model using :py:func:`skimage.feature.match_template`.
+
+  """
+
+  def __init__(self):
+
+    # call base class constructor
+    Algorithm.__init__(
+        self,
+        multiple_model_scoring = None,
+        multiple_probe_scoring = None
+    )
+
+
+  def enroll(self, enroll_features):
+    """Enrolls the model by computing an average graph for each model"""
+
+    # return the generated model
+    return numpy.array(enroll_features)
+
+
+  def score(self, model, probe):
+    """Computes the score between the probe and the model.
+
+    Parameters:
+
+      model (numpy.ndarray): The model of the user to test the probe agains
+
+      probe (numpy.ndarray): The probe to test
+
+
+    Returns:
+
+      float: Value between 0 and 0.5, larger value means a better match
+
+    """
+
+    I=probe.astype(numpy.float64)
+
+    if len(model.shape) == 2:
+      model = numpy.array([model])
+
+    scores = []
+
+    # iterate over all models for a given individual
+    for md in model:
+
+      R = md.astype(numpy.float64)
+      Nm = skimage.feature.match_template(I, R)
+
+      # figures out where the maximum is on the resulting matrix
+      t0, s0 = numpy.unravel_index(Nm.argmax(), Nm.shape)
+
+      # this is our output
+      scores.append(Nm[t0,s0])
+
+    return numpy.mean(scores)
diff --git a/bob/bio/vein/algorithm/MiuraMatch.py b/bob/bio/vein/algorithm/MiuraMatch.py
index 36a834e461d60ef205c137b5d4c55fe003320292..b5ade6196f601e7b692c15078ec72a2f9fc8be03 100644
--- a/bob/bio/vein/algorithm/MiuraMatch.py
+++ b/bob/bio/vein/algorithm/MiuraMatch.py
@@ -47,8 +47,8 @@ class MiuraMatch (Algorithm):
   """
 
   def __init__(self,
-      ch = 8,       # Maximum search displacement in y-direction
-      cw = 5,       # Maximum search displacement in x-direction
+      ch = 80,       # Maximum search displacement in y-direction
+      cw = 90,       # Maximum search displacement in x-direction
       ):
 
     # call base class constructor
@@ -94,8 +94,6 @@ class MiuraMatch (Algorithm):
     if len(model.shape) == 2:
       model = numpy.array([model])
 
-    n_models = model.shape[0]
-
     scores = []
 
     # iterate over all models for a given individual
@@ -103,7 +101,7 @@ class MiuraMatch (Algorithm):
 
       # erode model by (ch, cw)
       R = md.astype(numpy.float64)
-      h, w = R.shape
+      h, w = R.shape #same as I
       crop_R = R[self.ch:h-self.ch, self.cw:w-self.cw]
 
       # correlates using scipy - fastest option available iff the self.ch and
@@ -127,6 +125,6 @@ class MiuraMatch (Algorithm):
       # normalizes the output by the number of pixels lit on the input
       # matrices, taking into consideration the surface that produced the
       # result (i.e., the eroded model and part of the probe)
-      scores.append(Nmm/(sum(sum(crop_R)) + sum(sum(I[t0:t0+h-2*self.ch, s0:s0+w-2*self.cw]))))
+      scores.append(Nmm/(crop_R.sum() + I[t0:t0+h-2*self.ch, s0:s0+w-2*self.cw].sum()))
 
     return numpy.mean(scores)
diff --git a/bob/bio/vein/algorithm/MiuraMatchRotationFast.py b/bob/bio/vein/algorithm/MiuraMatchRotationFast.py
new file mode 100644
index 0000000000000000000000000000000000000000..b51ce6c2df44b60844d621a57fc15457b5163155
--- /dev/null
+++ b/bob/bio/vein/algorithm/MiuraMatchRotationFast.py
@@ -0,0 +1,386 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Created on Wed Jan 18 10:02:17 2017
+
+@author: onikisins
+"""
+
+import numpy as np
+import scipy.signal
+from scipy import ndimage
+from skimage import morphology
+from skimage import transform as tf
+
+from bob.bio.base.algorithm import Algorithm
+
+
+#==============================================================================
+class MiuraMatchRotationFast (Algorithm):
+    """
+
+    This method is an enhancement of Miura Matching algorithm introduced in:
+
+    Based on N. Miura, A. Nagasaka, and T. Miyatake. Feature extraction of finger
+    vein patterns based on repeated line tracking and its application to personal
+    identification. Machine Vision and Applications, Vol. 15, Num. 4, pp.
+    194--203, 2004
+
+    The algorithm is designed to compensate both rotation and translation
+    in a computationally efficient manner, prior to score computation.
+
+    This is achieved computing the cross-correlation of enrolment and probe samples 
+    twice. In the first pass the probe is cross-correlated with an image, which is
+    the sum of pre-rotated enroll images. 
+    This makes the cross-correlation robust to the rotation in the certain 
+    range of angles, and with some additional steps helps to define the angle between 
+    enrolment and probe samples. The angular range is defined by the ``angle_limit`` 
+    parameter of the algorithm.
+
+    Next, the enrolled image is rotated by the obtained angle, thus compensating the 
+    angle between the enrolment and probe samples. After that, the ordinary Miura
+    matching algorithm is applied. 
+
+    The matching of both binary and gray-scale vein patterns is possible. 
+    Set the ``gray_scale_input_flag`` to ``True`` if the input is gray-scale.
+
+    The details of the this algorithm are introduced in the following paper:
+
+    Olegs Nikisins, Andre Anjos, Teodors Eglitis, Sebastien Marcel.
+    Fast cross-correlation based wrist vein recognition algorithm with 
+    rotation and translation compensation. 
+
+
+    **Parameters:**
+
+    ``ch`` : :py:class:`int`
+        Maximum search displacement in y-direction.
+        Default value: 5.
+
+    ``cw`` : :py:class:`int`
+        Maximum search displacement in x-direction.
+        Default value: 5.
+
+    ``angle_limit`` : :py:class:`float`
+        Rotate the probe in the range [-angle_limit, +angle_limit] degrees.
+        Default value: 10.
+
+    ``angle_step`` : :py:class:`float`
+        Rotate the probe with this step in degrees.
+        Default value: 1.
+
+    ``perturbation_matching_flag`` : :py:class:`bool`
+        Compute the score using perturbation_matching method of the class.
+        Default: ``False``.
+
+    ``kernel_radius`` : :py:class:`int`
+        Radius of the circular kernel used in the morphological dilation of
+        the enroll. Only valid when ``perturbation_matching_flag`` is ``True``.
+        Default: 3.
+
+    ``score_fusion_method`` : :py:class:`str`
+        Score fusion method.
+        Default value: 'mean'.
+        Possible options: 'mean', 'max', 'median'.
+
+    ``gray_scale_input_flag`` : :py:class:`bool`
+        Set this flag to ``True`` if image is grayscale. Defaults: ``False``.
+    """
+
+
+    #==========================================================================
+    def __init__(self, ch = 5, cw = 5,
+                 angle_limit = 10, angle_step = 1,
+                 perturbation_matching_flag = False,
+                 kernel_radius = 3,
+                 score_fusion_method = 'mean',
+                 gray_scale_input_flag = False):
+
+        # call base class constructor
+        Algorithm.__init__(self,
+                            ch = ch,
+                            cw = cw,
+                            angle_limit = angle_limit,
+                            angle_step = angle_step,
+                            perturbation_matching_flag = perturbation_matching_flag,
+                            kernel_radius = kernel_radius,
+                            score_fusion_method = score_fusion_method,
+                            gray_scale_input_flag = gray_scale_input_flag,
+                            multiple_model_scoring = None,
+                            multiple_probe_scoring = None)
+
+        self.ch = ch
+        self.cw = cw
+        self.angle_limit = angle_limit
+        self.angle_step = angle_step
+        self.perturbation_matching_flag = perturbation_matching_flag
+        self.kernel_radius = kernel_radius
+        self.score_fusion_method = score_fusion_method
+        self.gray_scale_input_flag = gray_scale_input_flag
+
+
+    #==========================================================================
+    def enroll(self, enroll_features):
+        """Enrolls the model by computing an average graph for each model"""
+
+        # return the generated model
+        return enroll_features
+
+
+    #==========================================================================
+    def perturbation_matching(self, enroll, probe, kernel_radius):
+        """
+        Compute the matching score as a normalized intersection of the enroll and
+        probe allowing perturbation of the enroll in the computation
+        of the intersection.
+
+        **Parameters:**
+
+        ``enroll`` : 2D :py:class:`numpy.ndarray`
+            Binary image of the veins representing the enroll.
+
+        ``probe`` : 2D :py:class:`numpy.ndarray`
+            Binary image of the veins representing the probe.
+
+        ``kernel_radius`` : :py:class:`int`
+            Radius of the circular kernel used in the morphological dilation of
+            the enroll.
+
+        **Returns:**
+
+        ``score`` : :py:class:`float`
+            Natching score, larger value means a better match.
+        """
+
+        ellipse_kernel = morphology.disk(radius = kernel_radius)
+
+        enroll_dilated = ndimage.morphology.binary_dilation(enroll, structure = ellipse_kernel).astype(np.float)
+
+        probe_dilated = ndimage.morphology.binary_dilation(probe, structure = ellipse_kernel).astype(np.float)
+
+        normalizer = np.sum(enroll_dilated) + np.sum(probe_dilated)
+
+        score = np.sum( enroll_dilated * probe_dilated ) / normalizer
+
+        return score
+
+
+    #==========================================================================
+    def miura_match(self, image_enroll, image_probe, ch, cw, compute_score_flag = True,
+                    perturbation_matching_flag = False, kernel_radius = 3):
+        """
+        Match two binary vein images using Miura matching algorithm.
+
+        **Parameters:**
+
+        ``image_enroll`` : 2D :py:class:`numpy.ndarray`
+            Binary image of the veins representing the model.
+
+        ``image_probe`` : 2D :py:class:`numpy.ndarray`
+            Probing binary image of the veins.
+
+        ``ch`` : :py:class:`int`
+            Cropping parameter in Y-direction.
+
+        ``cw`` : :py:class:`int`
+            Cropping parameter in X-direction.
+
+        ``compute_score_flag`` : :py:class:`bool`
+            Compute the score if True. Otherwise only the ``crop_image_probe``
+            is returned. Default: ``True``.
+
+        ``perturbation_matching_flag`` : :py:class:`bool`
+            Compute the score using perturbation_matching method of the class.
+            Only valid if ``compute_score_flag`` is set to ``True``.
+            Default: ``False``.
+
+        ``kernel_radius`` : :py:class:`int`
+            Radius of the circular kernel used in the morphological dilation of
+            the enroll.
+
+        **Returns:**
+
+        ``score`` : :py:class:`float`
+            Natching score between 0 and 0.5, larger value means a better match.
+            Only returned if ``compute_score_flag`` is set to ``True``.
+
+        ``crop_image_probe`` : 2D :py:class:`numpy.ndarray`
+            Cropped binary image of the probe.
+        """
+
+        if image_enroll.dtype != np.float64:
+            image_enroll = image_enroll.astype(np.float64)
+
+        if image_probe.dtype != np.float64:
+            image_probe = image_probe.astype(np.float64)
+
+        h, w = image_enroll.shape
+
+        crop_image_enroll = image_enroll[ ch : h - ch, cw : w - cw ]
+
+        Nm = scipy.signal.fftconvolve(image_probe, np.rot90(crop_image_enroll, k=2), 'valid')
+
+        t0, s0 = np.unravel_index(Nm.argmax(), Nm.shape)
+
+        Nmm = Nm[t0,s0]
+
+        crop_image_probe = image_probe[ t0: t0 + h - 2 * ch, s0: s0 + w - 2 * cw ]
+
+        return_data = crop_image_probe
+
+        if compute_score_flag:
+
+            if perturbation_matching_flag:
+
+                score = self.perturbation_matching(crop_image_enroll, crop_image_probe, kernel_radius)
+
+            else:
+
+                score = Nmm / ( np.sum( crop_image_enroll ) + np.sum( crop_image_probe ) )
+
+            return_data = ( score, crop_image_probe )
+
+        return return_data
+
+
+    #==========================================================================
+    def sum_of_rotated_images(self, image, angle_limit, angle_step, gray_scale_input_flag):
+        """
+        Generate the output image, which is the sum of input images rotated
+        in the specified range with the defined step.
+
+        **Parameters:**
+
+        ``image`` : 2D :py:class:`numpy.ndarray`
+            Input image.
+
+        ``angle_limit`` : :py:class:`float`
+            Rotate the image in the range [-angle_limit, +angle_limit] degrees.
+
+        ``angle_step`` : :py:class:`float`
+            Rotate the image with this step in degrees.
+
+        ``gray_scale_input_flag`` : :py:class:`bool`
+            Set this flag to ``True`` if image is grayscale. Defaults: ``False``.
+
+        **Returns:**
+
+        ``output_image`` : 2D :py:class:`numpy.ndarray`
+            Sum of rotated images.
+
+        ``rotated_images`` : 3D :py:class:`numpy.ndarray`
+            A stack of rotated images. Array size:
+            (N_images, Height, Width)
+        """
+
+        offset = np.array(image.shape)/2
+
+        h, w = image.shape
+
+        image_coords = np.argwhere(image) - offset # centered coordinates of the vein (non-zero) pixels
+
+        if gray_scale_input_flag:
+
+            image_val = image[image>0]
+
+        angles = np.arange(-angle_limit, angle_limit + 1, angle_step) / 180. * np.pi # angles in the radians
+
+        rotated_images = np.zeros( (angles.shape[0], image.shape[0], image.shape[1]) )
+
+        for idx, angle in enumerate(angles):
+
+            rot_matrix = np.array([[np.cos(angle), - np.sin(angle)],
+                                   [np.sin(angle),  np.cos(angle)]]) # rotation matrix
+
+            rotated_coords = np.round( np.dot( image_coords, rot_matrix ) ).astype(np.int) + offset
+
+            rotated_coords[rotated_coords < 0] = 0
+            rotated_coords[:, 0][rotated_coords[:, 0] >= h] = h-1
+            rotated_coords[:, 1][rotated_coords[:, 1] >= w] = w-1
+
+            rotated_coords = rotated_coords.astype(np.int)
+
+            if gray_scale_input_flag:
+
+                rotated_images[idx, rotated_coords[:,0], rotated_coords[:,1]] = image_val
+
+            else:
+
+                rotated_images[idx, rotated_coords[:,0], rotated_coords[:,1]] = 1
+
+        output_image = np.sum(rotated_images, axis = 0)
+
+        return output_image, rotated_images
+
+
+    #==========================================================================
+    def score(self, model, probe):
+        """Computes the score between the probe and the model.
+
+        **Parameters:**
+
+        ``model`` : 2D :py:class:`numpy.ndarray`
+            Binary image of the veins representing the model.
+
+        ``probe`` : 2D :py:class:`numpy.ndarray`
+            Probing binary image of the veins.
+
+        **Returns:**
+
+        ``score_fused`` : :py:class:`float`
+            Natching score between 0 and 0.5, larger value means a better match.
+        """
+
+        if probe.dtype != np.float64:
+            probe = probe.astype(np.float64)
+
+        scores = []
+
+        angles = np.arange(-self.angle_limit, self.angle_limit + 1, self.angle_step)
+
+        # iterate over all models for a given individual
+        for enroll in model:
+
+            if enroll.dtype != np.float64:
+                enroll = enroll.astype(np.float64)
+
+            sum_of_rotated_img_enroll, rotated_images_enroll = self.sum_of_rotated_images(enroll, self.angle_limit, self.angle_step,
+                                                                                          self.gray_scale_input_flag)
+
+            h, w = enroll.shape
+
+            crop_rotated_images_enroll = rotated_images_enroll[:, self.ch: h - self.ch, self.cw: w - self.cw]
+
+            crop_probe = self.miura_match(sum_of_rotated_img_enroll, probe, self.ch, self.cw, compute_score_flag = False)
+
+            scores_internal = []
+
+            for crop_binary_image_enroll in crop_rotated_images_enroll:
+
+                scores_internal.append( np.sum(crop_binary_image_enroll * crop_probe) )
+
+            idx_selected = np.argmax(scores_internal) # the index of the rotated enroll image having the best match
+
+            if self.gray_scale_input_flag:
+
+                angle = angles[idx_selected]
+
+                enroll_rotated =tf.rotate(enroll, angle = -angle, preserve_range = True)
+
+                score = self.miura_match(enroll_rotated, probe, self.ch, self.cw, compute_score_flag = True,
+                                     perturbation_matching_flag = False,
+                                     kernel_radius = self.kernel_radius)[0]
+
+            else:
+
+                score = self.miura_match(rotated_images_enroll[ idx_selected ], probe, self.ch, self.cw, compute_score_flag = True,
+                                         perturbation_matching_flag = self.perturbation_matching_flag,
+                                         kernel_radius = self.kernel_radius)[0]
+
+            scores.append( score )
+
+        score_fused = getattr( np, self.score_fusion_method )(scores)
+
+        return score_fused
+
+
diff --git a/bob/bio/vein/algorithm/__init__.py b/bob/bio/vein/algorithm/__init__.py
index 44ed84fe74160932dc298b1e32743be6f2d23694..793f107a27ff326af7608be8a189077a39c25ab7 100644
--- a/bob/bio/vein/algorithm/__init__.py
+++ b/bob/bio/vein/algorithm/__init__.py
@@ -1,4 +1,27 @@
 from .MiuraMatch import MiuraMatch
+from .MiuraMatchRotationFast import MiuraMatchRotationFast
+from .Correlate import Correlate
+from .HammingDistance import HammingDistance
 
 # gets sphinx autodoc done right - don't remove it
+def __appropriate__(*args):
+  """Says object was actually declared here, an not on the import module.
+
+  Parameters:
+
+    *args: An iterable of objects to modify
+
+  Resolves `Sphinx referencing issues
+  <https://github.com/sphinx-doc/sphinx/issues/3048>`
+  """
+
+  for obj in args: obj.__module__ = __name__
+
+__appropriate__(
+    MiuraMatch,
+    MiuraMatchRotationFast,
+    Correlate,
+    HammingDistance,
+    )
+
 __all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/bob/bio/vein/configurations/fv3d.py b/bob/bio/vein/configurations/fv3d.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea14ecaa30df40aeef1eeeb0a026834e796310b6
--- /dev/null
+++ b/bob/bio/vein/configurations/fv3d.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+"""`3D Fingervein`_ is a database for biometric fingervein recognition
+
+The `3D Fingervein`_ Database for finger vein recognition consists of 13614
+images from 141 subjects collected in various acquisition campaigns.
+
+You can download the raw data of the `3D Fingervein`_ database by following
+the link.
+"""
+
+
+from ..database.fv3d import Database
+
+_fv3d_directory = "[YOUR_FV3D_DIRECTORY]"
+"""Value of ``~/.bob_bio_databases.txt`` for this database"""
+
+database = Database(
+    original_directory = _fv3d_directory,
+    original_extension = '.png',
+    )
+"""The :py:class:`bob.bio.base.database.BioDatabase` derivative with fv3d
+database settings
+
+.. warning::
+
+   This class only provides a programmatic interface to load data in an orderly
+   manner, respecting usage protocols. It does **not** contain the raw
+   datafiles. You should procure those yourself.
+
+Notice that ``original_directory`` is set to ``[YOUR_FV3D_DIRECTORY]``. You
+must make sure to create ``${HOME}/.bob_bio_databases.txt`` setting this value
+to the place where you actually installed the `3D Fingervein`_ Database, as
+explained in the section :ref:`bob.bio.vein.baselines`.
+"""
+
+protocol = 'central'
+"""The default protocol to use for tests
+
+You may modify this at runtime by specifying the option ``--protocol`` on the
+command-line of ``verify.py`` or using the keyword ``protocol`` on a
+configuration file that is loaded **after** this configuration resource.
+"""
diff --git a/bob/bio/vein/configurations/gridio4g48.py b/bob/bio/vein/configurations/gridio4g48.py
new file mode 100644
index 0000000000000000000000000000000000000000..082adfde74f316d41bc4c24e6ed096b1527ac5a4
--- /dev/null
+++ b/bob/bio/vein/configurations/gridio4g48.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+
+'''Grid configurations for bob.bio.vein'''
+
+
+import bob.bio.base
+
+grid = bob.bio.base.grid.Grid(
+    number_of_preprocessing_jobs = 48,
+    number_of_extraction_jobs    = 48,
+    number_of_projection_jobs    = 48,
+    number_of_enrollment_jobs    = 48,
+    number_of_scoring_jobs       = 48,
+    training_queue               = '4G-io-big',
+    preprocessing_queue          = '4G-io-big',
+    extraction_queue             = '4G-io-big',
+    projection_queue             = '4G-io-big',
+    enrollment_queue             = '4G-io-big',
+    scoring_queue                = '4G-io-big'
+    )
+'''Defines an SGE grid configuration for running at Idiap
+
+This grid configuration will use 48 slots for each of the stages defined below.
+
+The queue ``4G-io-big`` corresponds to the following settings:
+
+  * ``queue``: ``q1d`` (in this queue you have a maximum of 48 slots according
+    to: https://secure.idiap.ch/intranet/system/computing/ComputationGrid
+  * ``memfree``: ``4G`` (this is the minimum amount of memory you can take -
+    the lower, the more probable your job will be allocated faster)
+  * ``io_big``: SET (this flag must be set so your job runs downstairs and not
+    on people's workstations
+
+Notice the queue names do not directly correspond SGE grid queue names. These
+are names only known to :py:mod:`bob.bio.base.grid` and are translated
+from there to settings which are finally passed to ``gridtk``.
+
+To use this configuration file, just add it to your ``verify.py`` commandline.
+
+For example::
+
+  $ verify.py <other-options> gridio4g48
+
+'''
diff --git a/bob/bio/vein/configurations/maximum_curvature.py b/bob/bio/vein/configurations/maximum_curvature.py
index f92b50f2d9ecef0915b46229d5812a21d1af78c6..3bee726bbcfb34dbce8468f0753971a1dcef3605 100644
--- a/bob/bio/vein/configurations/maximum_curvature.py
+++ b/bob/bio/vein/configurations/maximum_curvature.py
@@ -20,13 +20,20 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
 this resource.
 """
 
-from ..preprocessor import FingerCrop
-preprocessor = FingerCrop()
+from ..preprocessor import NoCrop, TomesLeeMask, HuangNormalization, \
+    NoFilter, Preprocessor
+
+preprocessor = Preprocessor(
+    crop=NoCrop(),
+    mask=TomesLeeMask(),
+    normalize=HuangNormalization(),
+    filter=NoFilter(),
+    )
 """Preprocessing using gray-level based finger cropping and no post-processing
 """
 
 from ..extractor import MaximumCurvature
-extractor = MaximumCurvature(sigma = 5)
+extractor = MaximumCurvature()
 """Features are the output of the maximum curvature algorithm, as described on
 [MNM05]_.
 
@@ -36,7 +43,7 @@ Defaults taken from [TV13]_.
 # Notice the values of ch and cw are different than those from the
 # repeated-line tracking baseline.
 from ..algorithm import MiuraMatch
-algorithm = MiuraMatch(ch=80, cw=90)
+algorithm = MiuraMatch()
 """Miura-matching algorithm with specific settings for search displacement
 
 Defaults taken from [TV13]_.
diff --git a/bob/bio/vein/configurations/repeated_line_tracking.py b/bob/bio/vein/configurations/repeated_line_tracking.py
index 46d91d7380dc6eac119829a2c2c7b4f3b33dc75d..55702ef160d0fcccaaf75bf69af6ab50cac7189f 100644
--- a/bob/bio/vein/configurations/repeated_line_tracking.py
+++ b/bob/bio/vein/configurations/repeated_line_tracking.py
@@ -20,28 +20,21 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
 this resource.
 """
 
-from ..preprocessor import FingerCrop
-preprocessor = FingerCrop()
+from ..preprocessor import NoCrop, TomesLeeMask, HuangNormalization, \
+    NoFilter, Preprocessor
+
+preprocessor = Preprocessor(
+    crop=NoCrop(),
+    mask=TomesLeeMask(),
+    normalize=HuangNormalization(),
+    filter=NoFilter(),
+    )
 """Preprocessing using gray-level based finger cropping and no post-processing
 """
 
 from ..extractor import RepeatedLineTracking
 
-# Maximum number of iterations
-NUMBER_ITERATIONS = 3000
-
-# Distance between tracking point and cross section of profile
-DISTANCE_R = 1
-
-# Width of profile
-PROFILE_WIDTH = 21
-
-extractor = RepeatedLineTracking(
-    iterations=NUMBER_ITERATIONS,
-    r=DISTANCE_R,
-    profile_w=PROFILE_WIDTH,
-    seed=0, #Sets numpy.random.seed() to this value
-    )
+extractor = RepeatedLineTracking()
 """Features are the output of repeated-line tracking, as described on [MNM04]_.
 
 Defaults taken from [TV13]_.
diff --git a/bob/bio/vein/configurations/utfvp.py b/bob/bio/vein/configurations/utfvp.py
index 4481a5b10710063a7da93ad790adb176d86fdf9c..216c485a545c94ee6a43eeef552da7141538cff6 100644
--- a/bob/bio/vein/configurations/utfvp.py
+++ b/bob/bio/vein/configurations/utfvp.py
@@ -19,11 +19,11 @@ You can download the raw data of the `UTFVP`_ database by following the link.
 
 from ..database.utfvp import Database
 
-utfvp_directory = "[YOUR_UTFVP_DIRECTORY]"
+_utfvp_directory = "[YOUR_UTFVP_DIRECTORY]"
 """Value of ``~/.bob_bio_databases.txt`` for this database"""
 
 database = Database(
-    original_directory = utfvp_directory,
+    original_directory = _utfvp_directory,
     original_extension = '.png',
     )
 """The :py:class:`bob.bio.base.database.BioDatabase` derivative with UTFVP settings
diff --git a/bob/bio/vein/configurations/verafinger.py b/bob/bio/vein/configurations/verafinger.py
index 3acfe0cb5c52e802f6fc0d18cda8ecd4b2304494..c7e54ffc8561d3ce9c7052db7f4b0401ab89cb87 100644
--- a/bob/bio/vein/configurations/verafinger.py
+++ b/bob/bio/vein/configurations/verafinger.py
@@ -10,18 +10,16 @@ Occidentale in Sion, in Switzerland. The reference citation is [TVM14]_.
 
 You can download the raw data of the `VERA Fingervein`_ database by following
 the link.
-
-.. include:: links.rst
 """
 
 
 from ..database.verafinger import Database
 
-verafinger_directory = "[YOUR_VERAFINGER_DIRECTORY]"
+_verafinger_directory = "[YOUR_VERAFINGER_DIRECTORY]"
 """Value of ``~/.bob_bio_databases.txt`` for this database"""
 
 database = Database(
-    original_directory = verafinger_directory,
+    original_directory = _verafinger_directory,
     original_extension = '.png',
     )
 """The :py:class:`bob.bio.base.database.BioDatabase` derivative with Verafinger
diff --git a/bob/bio/vein/configurations/wide_line_detector.py b/bob/bio/vein/configurations/wide_line_detector.py
index e02509361bf2aba91521576ad4a657d57d0e0bc5..8b2662c08f0ab4ed3e9b1f42b92dcdaddd9e167c 100644
--- a/bob/bio/vein/configurations/wide_line_detector.py
+++ b/bob/bio/vein/configurations/wide_line_detector.py
@@ -20,27 +20,21 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
 this resource.
 """
 
-from ..preprocessor import FingerCrop
-preprocessor = FingerCrop()
+from ..preprocessor import NoCrop, TomesLeeMask, HuangNormalization, \
+    NoFilter, Preprocessor
+
+preprocessor = Preprocessor(
+    crop=NoCrop(),
+    mask=TomesLeeMask(),
+    normalize=HuangNormalization(),
+    filter=NoFilter(),
+    )
 """Preprocessing using gray-level based finger cropping and no post-processing
 """
 
 from ..extractor import WideLineDetector
 
-# Radius of the circular neighbourhood region
-RADIUS_NEIGHBOURHOOD_REGION = 5
-NEIGHBOURHOOD_THRESHOLD = 1
-
-#Sum of neigbourhood threshold
-SUM_NEIGHBOURHOOD = 41
-RESCALE = True
-
-extractor = WideLineDetector(
-    radius=RADIUS_NEIGHBOURHOOD_REGION,
-    threshold=NEIGHBOURHOOD_THRESHOLD,
-    g=SUM_NEIGHBOURHOOD,
-    rescale=RESCALE
-    )
+extractor = WideLineDetector()
 """Features are the output of the maximum curvature algorithm, as described on
 [HDLTL10]_.
 
diff --git a/bob/bio/vein/database/__init__.py b/bob/bio/vein/database/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..b930169438cf7a8cf242919a536dd7529d5bf52d 100644
--- a/bob/bio/vein/database/__init__.py
+++ b/bob/bio/vein/database/__init__.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+'''Database definitions for Vein Recognition'''
+
+
+import numpy
+
+
+class AnnotatedArray(numpy.ndarray):
+  """Defines a numpy array subclass that can carry its own metadata
+
+  Copied from: https://docs.scipy.org/doc/numpy-1.12.0/user/basics.subclassing.html#slightly-more-realistic-example-attribute-added-to-existing-array
+  """
+
+  def __new__(cls, input_array, metadata=None):
+      obj = numpy.asarray(input_array).view(cls)
+      obj.metadata = metadata if metadata is not None else dict()
+      return obj
+
+  def __array_finalize__(self, obj):
+      if obj is None: return
+      self.metadata = getattr(obj, 'metadata', dict())
diff --git a/bob/bio/vein/database/fv3d.py b/bob/bio/vein/database/fv3d.py
new file mode 100644
index 0000000000000000000000000000000000000000..04ff2a415ad17e242ba428c362eac3ba885e3979
--- /dev/null
+++ b/bob/bio/vein/database/fv3d.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Fri 13 Jan 2017 14:46:06 CET
+
+
+import numpy
+
+from bob.bio.base.database import BioFile, BioDatabase
+
+from . import AnnotatedArray
+from ..preprocessor.utils import poly_to_mask
+
+
+class File(BioFile):
+    """
+    Implements extra properties of vein files for the 3D Fingervein database
+
+
+    Parameters:
+
+      f (object): Low-level file (or sample) object that is kept inside
+
+    """
+
+    def __init__(self, f):
+
+        super(File, self).__init__(client_id=f.finger.unique_name, path=f.path,
+            file_id=f.id)
+        self.__f = f
+
+
+    def load(self, *args, **kwargs):
+        """(Overrides base method) Loads both image and mask"""
+
+        image = super(File, self).load(*args, **kwargs)
+        image = numpy.rot90(image, -1)
+
+        if not self.__f.has_roi():
+          return image
+
+        else:
+          roi = self.__f.roi()
+
+          # calculates the 90 degrees anti-clockwise rotated RoI points
+          w, h = image.shape
+          roi = [(x,h-y) for (y,x) in roi]
+
+        return AnnotatedArray(image, metadata=dict(roi=roi))
+
+
+class Database(BioDatabase):
+    """
+    Implements verification API for querying the 3D Fingervein database.
+    """
+
+    def __init__(self, **kwargs):
+
+        super(Database, self).__init__(name='fv3d', **kwargs)
+        from bob.db.fv3d.query import Database as LowLevelDatabase
+        self.__db = LowLevelDatabase()
+
+        self.low_level_group_names = ('train', 'dev', 'eval')
+        self.high_level_group_names = ('world', 'dev', 'eval')
+
+
+    def groups(self):
+
+        return self.convert_names_to_highlevel(self.__db.groups(),
+            self.low_level_group_names, self.high_level_group_names)
+
+
+    def client_id_from_model_id(self, model_id, group='dev'):
+        """Required as ``model_id != client_id`` on this database"""
+
+        return self.__db.finger_name_from_model_id(model_id)
+
+
+    def model_ids_with_protocol(self, groups=None, protocol=None, **kwargs):
+
+        groups = self.convert_names_to_lowlevel(groups,
+            self.low_level_group_names, self.high_level_group_names)
+        return self.__db.model_ids(groups=groups, protocol=protocol)
+
+
+    def objects(self, groups=None, protocol=None, purposes=None,
+        model_ids=None, **kwargs):
+
+        groups = self.convert_names_to_lowlevel(groups,
+            self.low_level_group_names, self.high_level_group_names)
+        retval = self.__db.objects(groups=groups, protocol=protocol,
+            purposes=purposes, model_ids=model_ids, **kwargs)
+
+        return [File(f) for f in retval]
+
+
+    def annotations(self, file):
+        return None
diff --git a/bob/bio/vein/database/utfvp.py b/bob/bio/vein/database/utfvp.py
index e9099b89d70d4f709283104aaecf9f1f18848cec..c699fbd23d56585916fa5d6780285aa65b0bacda 100644
--- a/bob/bio/vein/database/utfvp.py
+++ b/bob/bio/vein/database/utfvp.py
@@ -42,9 +42,14 @@ class Database(BioDatabase):
                 model_ids=None, **kwargs):
 
         retval = self._db.objects(groups=groups, protocol=protocol,
-                                  purposes=purposes, model_ids=model_ids, **kwargs)
+            purposes=purposes, model_ids=model_ids, **kwargs)
 
         return [File(f) for f in retval]
 
     def annotations(self, file):
         return None
+
+    def client_id_from_model_id(self, model_id, group='dev'):
+        """Required as ``model_id != client_id`` on this database"""
+
+        return self._db.get_client_id_from_model_id(model_id)
diff --git a/bob/bio/vein/database/verafinger.py b/bob/bio/vein/database/verafinger.py
index d8cea83446963e276c4f321abe28b1775359d518..a99f1fe2a34c2bfc35a904afd88f495de93885f2 100644
--- a/bob/bio/vein/database/verafinger.py
+++ b/bob/bio/vein/database/verafinger.py
@@ -5,6 +5,9 @@
 
 from bob.bio.base.database import BioFile, BioDatabase
 
+from . import AnnotatedArray
+from ..preprocessor.utils import poly_to_mask
+
 
 class File(BioFile):
     """
@@ -20,23 +23,16 @@ class File(BioFile):
     def __init__(self, f):
 
         super(File, self).__init__(client_id=f.unique_finger_name, path=f.path,
-                                   file_id=f.id)
+            file_id=f.id)
         self.__f = f
 
-    def mask(self):
-        """Returns the binary mask from the ROI annotations available"""
-
-        from ..preprocessor.utils import poly_to_mask
-
-        # The size of images in this database is (250, 665) pixels (h, w)
-        return poly_to_mask((250, 665), self.__f.roi())
 
     def load(self, *args, **kwargs):
         """(Overrides base method) Loads both image and mask"""
 
         image = super(File, self).load(*args, **kwargs)
-
-        return image, self.mask()
+        roi = self.__f.roi()
+        return AnnotatedArray(image, metadata=dict(roi=roi))
 
 
 class Database(BioDatabase):
@@ -56,28 +52,32 @@ class Database(BioDatabase):
     def groups(self):
 
         return self.convert_names_to_highlevel(self._db.groups(),
-                                               self.low_level_group_names, self.high_level_group_names)
+            self.low_level_group_names, self.high_level_group_names)
 
     def client_id_from_model_id(self, model_id, group='dev'):
         """Required as ``model_id != client_id`` on this database"""
 
         return self._db.finger_name_from_model_id(model_id)
 
+
     def model_ids_with_protocol(self, groups=None, protocol=None, **kwargs):
 
         groups = self.convert_names_to_lowlevel(groups,
-                                                self.low_level_group_names, self.high_level_group_names)
+            self.low_level_group_names, self.high_level_group_names)
         return self._db.model_ids(groups=groups, protocol=protocol)
 
+
     def objects(self, groups=None, protocol=None, purposes=None,
                 model_ids=None, **kwargs):
 
         groups = self.convert_names_to_lowlevel(groups,
-                                                self.low_level_group_names, self.high_level_group_names)
+            self.low_level_group_names, self.high_level_group_names)
         retval = self._db.objects(groups=groups, protocol=protocol,
-                                  purposes=purposes, model_ids=model_ids, **kwargs)
+                                  purposes=purposes, model_ids=model_ids,
+                                  **kwargs)
 
         return [File(f) for f in retval]
 
+
     def annotations(self, file):
-            return None
+        return None
diff --git a/bob/bio/vein/extractor/MaximumCurvature.py b/bob/bio/vein/extractor/MaximumCurvature.py
index 0d891cfd1fe43e984d48707b901f114dcaa36141..c3d98809fd0c8806181eac8f4cd34be07108a471 100644
--- a/bob/bio/vein/extractor/MaximumCurvature.py
+++ b/bob/bio/vein/extractor/MaximumCurvature.py
@@ -3,14 +3,10 @@
 
 import math
 import numpy
-
-import bob.core
+import scipy.ndimage
 import bob.io.base
-
 from bob.bio.base.extractor import Extractor
 
-from .. import utils
-
 
 class MaximumCurvature (Extractor):
   """
@@ -18,12 +14,15 @@ class MaximumCurvature (Extractor):
 
   Based on N. Miura, A. Nagasaka, and T. Miyatake, Extraction of Finger-Vein
   Pattern Using Maximum Curvature Points in Image Profiles. Proceedings on IAPR
-  conference on machine vision applications, 9 (2005), pp. 347--350
+  conference on machine vision applications, 9 (2005), pp. 347--350.
+
 
-  **Parameters:**
+  Parameters:
+
+    sigma (:py:class:`int`, optional): standard deviation for the gaussian
+      smoothing kernel used to denoise the input image. The width of the
+      gaussian kernel will be set automatically to 4x this value (in pixels).
 
-  sigma : :py:class:`int`
-      Optional: Sigma used for determining derivatives.
   """
 
 
@@ -32,227 +31,474 @@ class MaximumCurvature (Extractor):
     self.sigma = sigma
 
 
-  def maximum_curvature(self, image, mask):
-    """Computes and returns the Maximum Curvature features for the given input
-    fingervein image"""
+  def detect_valleys(self, image, mask):
+    """Detects valleys on the image respecting the mask
+
+    This step corresponds to Step 1-1 in the original paper. The objective is,
+    for all 4 cross-sections (z) of the image (horizontal, vertical, 45 and -45
+    diagonals), to compute the following proposed valley detector as defined in
+    Equation 1, page 348:
+
+    .. math::
+
+       \kappa(z) = \\frac{d^2P_f(z)/dz^2}{(1 + (dP_f(z)/dz)^2)^\\frac{3}{2}}
+
+
+    We start the algorithm by smoothing the image with a 2-dimensional gaussian
+    filter. The equation that defines the kernel for the filter is:
+
+    .. math::
+
+       \mathcal{N}(x,y)=\\frac{1}{2\pi\sigma^2}e^\\frac{-(x^2+y^2)}{2\sigma^2}
+
+
+    This is done to avoid noise from the raw data (from the sensor). The
+    maximum curvature method then requires we compute the first and second
+    derivative of the image for all cross-sections, as per the equation above.
+
+    We instead take the following equivalent approach:
+
+    1. construct a gaussian filter
+    2. take the first (dh/dx) and second (d^2/dh^2) deritivatives of the filter
+    3. calculate the first and second derivatives of the smoothed signal using
+       the results from 3. This is done for all directions we're interested in:
+       horizontal, vertical and 2 diagonals. First and second derivatives of a
+       convolved signal
+
+    .. note::
+
+       Item 3 above is only possible thanks to the steerable filter property of
+       the gaussian kernel. See "The Design and Use of Steerable Filters" from
+       Freeman and Adelson, IEEE Transactions on Pattern Analysis and Machine
+       Intelligence, Vol. 13, No. 9, September 1991.
+
+
+    Parameters:
+
+      image (numpy.ndarray): an array of 64-bit floats containing the input
+        image
+      mask (numpy.ndarray): an array, of the same size as ``image``, containing
+        a mask (booleans) indicating where the finger is on ``image``.
+
+
+    Returns:
+
+      numpy.ndarray: a 3-dimensional array of 64-bits containing $\kappa$ for
+      all considered directions. $\kappa$ has the same shape as ``image``,
+      except for the 3rd. dimension, which provides planes for the
+      cross-section valley detections for each of the contemplated directions,
+      in this order: horizontal, vertical, +45 degrees, -45 degrees.
+
+    """
+
+    # 1. constructs the 2D gaussian filter "h" given the window size,
+    # extrapolated from the "sigma" parameter (4x)
+    # N.B.: This is a text-book gaussian filter definition
+    winsize = numpy.ceil(4*self.sigma) #enough space for the filter
+    window = numpy.arange(-winsize, winsize+1)
+    X, Y = numpy.meshgrid(window, window)
+    G = 1.0 / (2*math.pi*self.sigma**2)
+    G *= numpy.exp(-(X**2 + Y**2) / (2*self.sigma**2))
+
+    # 2. calculates first and second derivatives of "G" with respect to "X"
+    # (0), "Y" (90 degrees) and 45 degrees (?)
+    G1_0 = (-X/(self.sigma**2))*G
+    G2_0 = ((X**2 - self.sigma**2)/(self.sigma**4))*G
+    G1_90 = G1_0.T
+    G2_90 = G2_0.T
+    hxy = ((X*Y)/(self.sigma**4))*G
+
+    # 3. calculates derivatives w.r.t. to all directions of interest
+    #    stores results in the variable "k". The entries (last dimension) in k
+    #    correspond to curvature detectors in the following directions:
+    #
+    #    [0] horizontal
+    #    [1] vertical
+    #    [2] diagonal \ (45 degrees rotation)
+    #    [3] diagonal / (-45 degrees rotation)
+    image_g1_0  = scipy.ndimage.convolve(image, G1_0, mode='nearest')
+    image_g2_0  = scipy.ndimage.convolve(image, G2_0, mode='nearest')
+    image_g1_90 = scipy.ndimage.convolve(image, G1_90, mode='nearest')
+    image_g2_90 = scipy.ndimage.convolve(image, G2_90, mode='nearest')
+    fxy = scipy.ndimage.convolve(image, hxy, mode='nearest')
+
+    # support calculation for diagonals, given the gaussian kernel is
+    # steerable. To calculate the derivatives for the "\" diagonal, we first
+    # **would** have to rotate the image 45 degrees counter-clockwise (so the
+    # diagonal lies on the horizontal axis). Using the steerable property, we
+    # can evaluate the first derivative like this:
+    #
+    # image_g1_45 = cos(45)*image_g1_0 + sin(45)*image_g1_90
+    #             = sqrt(2)/2*fx + sqrt(2)/2*fx
+    #
+    # to calculate the first derivative for the "/" diagonal, we first
+    # **would** have to rotate the image -45 degrees "counter"-clockwise.
+    # Therefore, we can calculate it like this:
+    #
+    # image_g1_m45 = cos(-45)*image_g1_0 + sin(-45)*image_g1_90
+    #              = sqrt(2)/2*image_g1_0 - sqrt(2)/2*image_g1_90
+    #
+
+    image_g1_45 = 0.5*numpy.sqrt(2)*(image_g1_0 + image_g1_90)
+    image_g1_m45  = 0.5*numpy.sqrt(2)*(image_g1_0 - image_g1_90)
+
+    # NOTE: You can't really get image_g2_45 and image_g2_m45 from the theory
+    # of steerable filters. In contact with B.Ton, he suggested the following
+    # material, where that is explained: Chapter 5.2.3 of van der Heijden, F.
+    # (1994) Image based measurement systems: object recognition and parameter
+    # estimation. John Wiley & Sons Ltd, Chichester. ISBN 978-0-471-95062-2
+
+    # This also shows the same result:
+    # http://www.mif.vu.lt/atpazinimas/dip/FIP/fip-Derivati.html (look for
+    # SDGD)
+
+    # He also suggested to look at slide 75 of the following presentation
+    # indicating it is self-explanatory: http://slideplayer.com/slide/5084635/
+
+    image_g2_45 = 0.5*image_g2_0 + fxy + 0.5*image_g2_90
+    image_g2_m45  = 0.5*image_g2_0 - fxy + 0.5*image_g2_90
+
+    # ######################################################################
+    # [Step 1-1] Calculation of curvature profiles
+    # ######################################################################
+
+    # Peak detection (k or kappa) calculation as per equation (1) page 348 on
+    # Miura's paper
+    finger_mask = mask.astype('float64')
+
+    return numpy.dstack([
+      (image_g2_0   / ((1 + image_g1_0**2)**(1.5))  ) * finger_mask,
+      (image_g2_90  / ((1 + image_g1_90**2)**(1.5)) ) * finger_mask,
+      (image_g2_45  / ((1 + image_g1_45**2)**(1.5)) ) * finger_mask,
+      (image_g2_m45 / ((1 + image_g1_m45**2)**(1.5))) * finger_mask,
+      ])
+
+
+  def eval_vein_probabilities(self, k):
+    '''Evaluates joint vein centre probabilities from cross-sections
+
+    This function will take $\kappa$ and will calculate the vein centre
+    probabilities taking into consideration valley widths and depths. It
+    aggregates the following steps from the paper:
+
+    * [Step 1-2] Detection of the centres of veins
+    * [Step 1-3] Assignment of scores to the centre positions
+    * [Step 1-4] Calculation of all the profiles
+
+    Once the arrays of curvatures (concavities) are calculated, here is how
+    detection works: The code scans the image in a precise direction (vertical,
+    horizontal, diagonal, etc). It tries to find a concavity on that direction
+    and measure its width (see Wr on Figure 3 on the original paper). It then
+    identifies the centers of the concavity and assign a value to it, which
+    depends on its width (Wr) and maximum depth (where the peak of darkness
+    occurs) in such a concavity. This value is accumulated on a variable (Vt),
+    which is re-used for all directions. Vt represents the vein probabilites
+    from the paper.
+
 
-    finger_mask = numpy.zeros(mask.shape)
-    finger_mask[mask == True] = 1
+    Parameters:
 
-    winsize = numpy.ceil(4*self.sigma)
+      k (numpy.ndarray): a 3-dimensional array of 64-bits containing $\kappa$
+        for all considered directions. $\kappa$ has the same shape as
+        ``image``, except for the 3rd. dimension, which provides planes for the
+        cross-section valley detections for each of the contemplated
+        directions, in this order: horizontal, vertical, +45 degrees, -45
+        degrees.
 
-    x = numpy.arange(-winsize, winsize+1)
-    y = numpy.arange(-winsize, winsize+1)
-    X, Y = numpy.meshgrid(x, y)
 
-    h = (1/(2*math.pi*self.sigma**2))*numpy.exp(-(X**2 + Y**2)/(2*self.sigma**2))
-    hx = (-X/(self.sigma**2))*h
-    hxx = ((X**2 - self.sigma**2)/(self.sigma**4))*h
-    hy = hx.T
-    hyy = hxx.T
-    hxy = ((X*Y)/(self.sigma**4))*h
+    Returns:
 
-    # Do the actual filtering
+      numpy.ndarray: The un-accumulated vein centre probabilities ``V``. This
+      is a 3D array with 64-bit floats with the same dimensions of the input
+      array ``k``. You must accumulate (sum) over the last dimension to
+      retrieve the variable ``V`` from the paper.
 
-    fx = utils.imfilter(image, hx)
-    fxx = utils.imfilter(image, hxx)
-    fy = utils.imfilter(image, hy)
-    fyy = utils.imfilter(image, hyy)
-    fxy = utils.imfilter(image, hxy)
+    '''
 
-    f1  = 0.5*numpy.sqrt(2)*(fx + fy)   # \  #
-    f2  = 0.5*numpy.sqrt(2)*(fx - fy)   # /  #
-    f11 = 0.5*fxx + fxy + 0.5*fyy       # \\ #
-    f22 = 0.5*fxx - fxy + 0.5*fyy       # // #
+    V = numpy.zeros(k.shape[:2], dtype='float64')
 
-    img_h, img_w = image.shape  #Image height and width
+    def _prob_1d(a):
+      '''Finds "vein probabilities" in a 1-D signal
 
-    # Calculate curvatures
-    k = numpy.zeros((img_h, img_w, 4))
-    k[:,:,0] = (fxx/((1 + fx**2)**(3/2)))*finger_mask  # hor #
-    k[:,:,1] = (fyy/((1 + fy**2)**(3/2)))*finger_mask  # ver #
-    k[:,:,2] = (f11/((1 + f1**2)**(3/2)))*finger_mask  # \   #
-    k[:,:,3] = (f22/((1 + f2**2)**(3/2)))*finger_mask  # /   #
+      This function efficiently counts the width and height of concavities in
+      the cross-section (1-D) curvature signal ``s``.
+
+      It works like this:
+
+      1. We create a 1-shift difference between the thresholded signal and
+         itself
+      2. We compensate for starting and ending regions
+      3. For each sequence of start/ends, we compute the maximum in the
+         original signal
+
+      Example (mixed with pseudo-code):
+
+         a = 0 1 2 3 2 1 0 -1 0 0 1 2 5 2 2 2 1
+         b = a > 0 (as type int)
+         b = 0 1 1 1 1 1 0  0 0 0 1 1 1 1 1 1 1
+
+         0 1 1 1 1 1  0 0 0 0 1 1 1 1 1 1 1
+           0 1 1 1 1  1 0 0 0 0 1 1 1 1 1 1 1 (-)
+       -------------------------------------------
+         X 1 0 0 0 0 -1 0 0 0 1 0 0 0 0 0 0 X (length is smaller than orig.)
+
+         starts = numpy.where(diff > 0)
+         ends   = numpy.where(diff < 0)
+
+         -> now the number of starts and ends should match, otherwise, we must
+         compensate
+
+            -> case 1: b starts with 1: add one start in begin of "starts"
+            -> case 2: b ends with 1: add one end in the end of "ends"
+
+         -> iterate over the sequence of starts/ends and find maximums
+
+
+      Parameters:
+
+        a (numpy.ndarray): 1D signal with curvature to explore
+
+
+      Returns:
+
+        numpy.ndarray: 1D container with the vein centre probabilities
+
+      '''
+
+      b = (a > 0).astype(int)
+      diff = b[1:] - b[:-1]
+      starts = numpy.argwhere(diff > 0)
+      starts += 1 #compensates for shifted different
+      ends = numpy.argwhere(diff < 0)
+      ends += 1 #compensates for shifted different
+      if b[0]: starts = numpy.insert(starts, 0, 0)
+      if b[-1]: ends = numpy.append(ends, len(a))
+
+      z = numpy.zeros_like(a)
+
+      if starts.size == 0 and ends.size == 0: return z
+
+      for start, end in zip(starts, ends):
+        maximum = numpy.argmax(a[int(start):int(end)])
+        z[start+maximum] = a[start+maximum] * (end-start)
+
+      return z
 
-    # Scores
-    Vt = numpy.zeros(image.shape)
-    Wr = 0
 
     # Horizontal direction
-    bla = k[:,:,0] > 0
-    for y in range(0,img_h):
-        for x in range(0,img_w):
-            if (bla[y,x]):
-                Wr = Wr + 1
-            if ( Wr > 0 and (x == (img_w-1) or not bla[y,x]) ):
-                if (x == (img_w-1)):
-                    # Reached edge of image
-                    pos_end = x
-                else:
-                    pos_end = x - 1
-
-                pos_start = pos_end - Wr + 1 # Start pos of concave
-                if (pos_start == pos_end):
-                    I=numpy.argmax(k[y,pos_start,0])
-                else:
-                    I=numpy.argmax(k[y,pos_start:pos_end+1,0])
-
-                pos_max = pos_start + I
-                Scr = k[y,pos_max,0]*Wr
-                Vt[y,pos_max] = Vt[y,pos_max] + Scr
-                Wr = 0
+    for index in range(k.shape[0]):
+      V[index,:] += _prob_1d(k[index,:,0])
+
+    # Vertical direction
+    for index in range(k.shape[1]):
+      V[:,index] += _prob_1d(k[:,index,1])
+
+    # Direction: 45 degrees (\)
+    curv = k[:,:,2]
+    i,j = numpy.indices(curv.shape)
+    for index in range(-curv.shape[0]+1, curv.shape[1]):
+      V[i==(j-index)] += _prob_1d(curv.diagonal(index))
+
+    # Direction: -45 degrees (/)
+    # NOTE: due to the way the access to the diagonals are implemented, in this
+    # loop, we operate bottom-up. To match this behaviour, we also address V
+    # through Vud.
+    curv = numpy.flipud(k[:,:,3]) #required so we get "/" diagonals correctly
+    Vud = numpy.flipud(V) #match above inversion
+    for index in reversed(range(curv.shape[1]-1, -curv.shape[0], -1)):
+      Vud[i==(j-index)] += _prob_1d(curv.diagonal(index))
+
+    return V
+
+
+  def connect_centres(self, V):
+    """Connects vein centres by filtering vein probabilities ``V``
+
+    This function does the equivalent of Step 2 / Equation 4 at Miura's paper.
+
+    The operation is applied on a row from the ``V`` matrix, which may be
+    acquired horizontally, vertically or on a diagonal direction. The pixel
+    value is then reset in the center of a windowing operation (width = 5) with
+    the following value:
+
+      .. math::
 
+         b[w] = min(max(a[w+1], a[w+2]) + max(a[w-1], a[w-2]))
+
+
+    Parameters:
+
+      V (numpy.ndarray): The accumulated vein centre probabilities ``V``. This
+        is a 2D array with 64-bit floats and is defined by Equation (3) on the
+        paper.
+
+
+    Returns:
+
+      numpy.ndarray: A 3-dimensional 64-bit array ``Cd`` containing the result
+      of the filtering operation for each of the directions. ``Cd`` has the
+      dimensions of $\kappa$ and $V_i$. Each of the planes correspond to the
+      horizontal, vertical, +45 and -45 directions.
+
+    """
+
+    def _connect_1d(a):
+      '''Connects centres in the given vector
+
+      The strategy we use to vectorize this is to shift a twice to the left and
+      twice to the right and apply a vectorized operation to compute the above.
+
+
+      Parameters:
+
+        a (numpy.ndarray): Input 1D array which will be window scanned
+
+
+      Returns:
+
+        numpy.ndarray: Output 1D array (must be writeable), in which we will
+        set the corrected pixel values after the filtering above. Notice that,
+        given the windowing operation, the returned array size would be 4 short
+        of the input array.
+
+      '''
+
+      return numpy.amin([numpy.amax([a[3:-1], a[4:]], axis=0),
+        numpy.amax([a[1:-3], a[:-4]], axis=0)], axis=0)
+
+
+    Cd = numpy.zeros(V.shape + (4,), dtype='float64')
+
+    # Horizontal direction
+    for index in range(V.shape[0]):
+      Cd[index, 2:-2, 0] = _connect_1d(V[index,:])
 
     # Vertical direction
-    bla = k[:,:,1] > 0
-    for x in range(0,img_w):
-        for y in range(0,img_h):
-            if (bla[y,x]):
-                Wr = Wr + 1
-            if ( Wr > 0 and (y == (img_h-1) or not bla[y,x]) ):
-                if (y == (img_h-1)):
-                    # Reached edge of image
-                    pos_end = y
-                else:
-                    pos_end = y - 1
-
-                pos_start = pos_end - Wr + 1 # Start pos of concave
-                if (pos_start == pos_end):
-                    I=numpy.argmax(k[pos_start,x,1])
-                else:
-                    I=numpy.argmax(k[pos_start:pos_end+1,x,1])
-
-                pos_max = pos_start + I
-                Scr = k[pos_max,x,1]*Wr
-
-                Vt[pos_max,x] = Vt[pos_max,x] + Scr
-                Wr = 0
-
-    # Direction: \ #
-    bla = k[:,:,2] > 0
-    for start in range(0,img_w+img_h-1):
-        # Initial values
-        if (start <= img_w-1):
-            x = start
-            y = 0
-        else:
-            x = 0
-            y = start - img_w + 1
-        done = False
-
-        while (not done):
-            if(bla[y,x]):
-                Wr = Wr + 1
-
-            if ( Wr > 0 and (y == img_h-1 or x == img_w-1 or not bla[y,x]) ):
-                if (y == img_h-1 or x == img_w-1):
-                    # Reached edge of image
-                    pos_x_end = x
-                    pos_y_end = y
-                else:
-                    pos_x_end = x - 1
-                    pos_y_end = y - 1
-
-                pos_x_start = pos_x_end - Wr + 1
-                pos_y_start = pos_y_end - Wr + 1
-
-                if (pos_y_start == pos_y_end and pos_x_start == pos_x_end):
-                    d = k[pos_y_start, pos_x_start, 2]
-                elif (pos_y_start == pos_y_end):
-                    d = numpy.diag(k[pos_y_start, pos_x_start:pos_x_end+1, 2])
-                elif (pos_x_start == pos_x_end):
-                    d = numpy.diag(k[pos_y_start:pos_y_end+1, pos_x_start, 2])
-                else:
-                    d = numpy.diag(k[pos_y_start:pos_y_end+1, pos_x_start:pos_x_end+1, 2])
-
-                I = numpy.argmax(d)
-
-                pos_x_max = pos_x_start + I
-                pos_y_max = pos_y_start + I
-
-                Scr = k[pos_y_max,pos_x_max,2]*Wr
-
-                Vt[pos_y_max,pos_x_max] = Vt[pos_y_max,pos_x_max] + Scr
-                Wr = 0
-
-            if((x == img_w-1) or (y == img_h-1)):
-                done = True
-            else:
-                x = x + 1
-                y = y + 1
-
-    # Direction: /
-    bla = k[:,:,3] > 0
-    for start in range(0,img_w+img_h-1):
-        # Initial values
-        if (start <= (img_w-1)):
-            x = start
-            y = img_h-1
-        else:
-            x = 0
-            y = img_w+img_h-start-1
-        done = False
-
-        while (not done):
-            if(bla[y,x]):
-                Wr = Wr + 1
-            if ( Wr > 0 and (y == 0 or x == img_w-1 or not bla[y,x]) ):
-                if (y == 0 or x == img_w-1):
-                    # Reached edge of image
-                    pos_x_end = x
-                    pos_y_end = y
-                else:
-                    pos_x_end = x - 1
-                    pos_y_end = y + 1
-
-                pos_x_start = pos_x_end - Wr + 1
-                pos_y_start = pos_y_end + Wr - 1
-
-                if (pos_y_start == pos_y_end and pos_x_start == pos_x_end):
-                    d = k[pos_y_end, pos_x_start, 3]
-                elif (pos_y_start == pos_y_end):
-                    d = numpy.diag(numpy.flipud(k[pos_y_end, pos_x_start:pos_x_end+1, 3]))
-                elif (pos_x_start == pos_x_end):
-                    d = numpy.diag(numpy.flipud(k[pos_y_end:pos_y_start+1, pos_x_start, 3]))
-                else:
-                    d = numpy.diag(numpy.flipud(k[pos_y_end:pos_y_start+1, pos_x_start:pos_x_end+1, 3]))
-
-                I = numpy.argmax(d)
-                pos_x_max = pos_x_start + I
-                pos_y_max = pos_y_start - I
-                Scr = k[pos_y_max,pos_x_max,3]*Wr
-                Vt[pos_y_max,pos_x_max] = Vt[pos_y_max,pos_x_max] + Scr
-                Wr = 0
-
-            if((x == img_w-1) or (y == 0)):
-                done = True
-            else:
-                x = x + 1
-                y = y - 1
-
-    ## Connection of vein centres
-    Cd = numpy.zeros((img_h, img_w, 4))
-    for x in range(2,img_w-3):
-        for y in range(2,img_h-3):
-            Cd[y,x,0] = min(numpy.amax(Vt[y,x+1:x+3]), numpy.amax(Vt[y,x-2:x]))   # Hor  #
-            Cd[y,x,1] = min(numpy.amax(Vt[y+1:y+3,x]), numpy.amax(Vt[y-2:y,x]))   # Vert #
-            Cd[y,x,2] = min(numpy.amax(Vt[y-2:y,x-2:x]), numpy.amax(Vt[y+1:y+3,x+1:x+3])) # \  #
-            Cd[y,x,3] = min(numpy.amax(Vt[y+1:y+3,x-2:x]), numpy.amax(Vt[y-2:y,x+1:x+3])) # /  #
-
-    #Veins
-    img_veins = numpy.amax(Cd,axis=2)
-
-    # Binarise the vein image
-    md = numpy.median(img_veins[img_veins>0])
-    img_veins_bin = img_veins > md
-
-    return img_veins_bin.astype(numpy.float64)
+    for index in range(V.shape[1]):
+      Cd[2:-2, index, 1] = _connect_1d(V[:,index])
+
+    # Direction: 45 degrees (\)
+    i,j = numpy.indices(V.shape)
+    border = numpy.zeros((2,), dtype='float64')
+    for index in range(-V.shape[0]+5, V.shape[1]-4):
+      # NOTE: hstack **absolutately** necessary here as double indexing after
+      # array indexing is **not** possible with numpy (it returns a copy)
+      Cd[:,:,2][i==(j-index)] = numpy.hstack([border,
+        _connect_1d(V.diagonal(index)), border])
+
+    # Direction: -45 degrees (/)
+    Vud = numpy.flipud(V)
+    Cdud = numpy.flipud(Cd[:,:,3])
+    for index in reversed(range(V.shape[1]-5, -V.shape[0]+4, -1)):
+      # NOTE: hstack **absolutately** necessary here as double indexing after
+      # array indexing is **not** possible with numpy (it returns a copy)
+      Cdud[:,:][i==(j-index)] = numpy.hstack([border,
+        _connect_1d(Vud.diagonal(index)), border])
+
+    return Cd
+
+
+  def binarise(self, G):
+    """Binarise vein images using a threshold assuming distribution is diphasic
+
+    This function implements Step 3 of the paper. It binarises the 2-D array
+    ``G`` assuming its histogram is mostly diphasic and using a median value.
+
+
+    Parameters:
+
+      G (numpy.ndarray): A 2-dimensional 64-bit array ``G`` containing the
+        result of the filtering operation. ``G`` has the dimensions of the
+        original image.
+
+
+    Returns:
+
+      numpy.ndarray: A 2-dimensional 64-bit float array with the same
+      dimensions of the input image, but containing its vein-binarised version.
+      The output of this function corresponds to the output of the method.
+
+    """
+
+    median = numpy.median(G[G>0])
+    Gbool = G > median
+    return Gbool.astype(numpy.float64)
+
+
+  def _view_four(self, k, suptitle):
+    '''Display four plots using matplotlib'''
+
+    import matplotlib.pyplot as plt
+
+    k[k<=0] = 0
+    k /= k.max()
+
+    plt.subplot(2,2,1)
+    plt.imshow(k[...,0], cmap='gray')
+    plt.title('Horizontal')
+
+    plt.subplot(2,2,2)
+    plt.imshow(k[...,1], cmap='gray')
+    plt.title('Vertical')
+
+    plt.subplot(2,2,3)
+    plt.imshow(k[...,2], cmap='gray')
+    plt.title('+45 degrees')
+
+    plt.subplot(2,2,4)
+    plt.imshow(k[...,3], cmap='gray')
+    plt.title('-45 degrees')
+
+    plt.suptitle(suptitle)
+    plt.tight_layout()
+    plt.show()
+
+
+  def _view_single(self, k, title):
+    '''Displays a single plot using matplotlib'''
+
+    import matplotlib.pyplot as plt
+
+    plt.imshow(k, cmap='gray')
+    plt.title(title)
+    plt.tight_layout()
+    plt.show()
 
 
   def __call__(self, image):
-    """Reads the input image, extract the features based on Maximum Curvature of the fingervein image, and writes the resulting template"""
 
-    finger_image = image[0] #Normalized image with or without histogram equalization
+    finger_image = image[0].astype('float64')
     finger_mask = image[1]
 
-    return self.maximum_curvature(finger_image, finger_mask)
+    #import time
+    #start = time.time()
+
+    kappa = self.detect_valleys(finger_image, finger_mask)
+
+    #self._view_four(kappa, "Valley Detectors - $\kappa$")
+
+    #print('filtering took %.2f seconds' % (time.time() - start))
+    #start = time.time()
+
+    V = self.eval_vein_probabilities(kappa)
+
+    #self._view_single(V, "Accumulated Probabilities - V")
+
+    #print('probabilities took %.2f seconds' % (time.time() - start))
+    #start = time.time()
+
+    Cd = self.connect_centres(V)
+
+    #self._view_four(Cd, "Connected Centers - $C_{di}$")
+    #self._view_single(numpy.amax(Cd, axis=2), "Connected Centers - G")
+
+    #print('connections took %.2f seconds' % (time.time() - start))
+    #start = time.time()
+
+    retval = self.binarise(numpy.amax(Cd, axis=2))
+
+    #self._view_single(retval, "Final Binarised Image")
+
+    #print('binarization took %.2f seconds' % (time.time() - start))
+
+    return retval
diff --git a/bob/bio/vein/preprocessor/FingerCrop.py b/bob/bio/vein/preprocessor/FingerCrop.py
deleted file mode 100644
index 5dd355cf0dee1a42b488e8009b4f607d42a7e775..0000000000000000000000000000000000000000
--- a/bob/bio/vein/preprocessor/FingerCrop.py
+++ /dev/null
@@ -1,533 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-
-import math
-import numpy
-from PIL import Image
-
-import bob.io.base
-
-from bob.bio.base.preprocessor import Preprocessor
-
-from .. import utils
-
-
-class FingerCrop (Preprocessor):
-  """
-  Extracts the mask heuristically and pre-processes fingervein images.
-
-  Based on the implementation: E.C. Lee, H.C. Lee and K.R. Park. Finger vein
-  recognition using minutia-based alignment and local binary pattern-based
-  feature extraction. International Journal of Imaging Systems and
-  Technology. Vol. 19, No. 3, pp. 175-178, September 2009.
-
-  Finger orientation is based on B. Huang, Y. Dai, R. Li, D. Tang and W. Li,
-  Finger-vein authentication based on wide line detector and pattern
-  normalization, Proceedings on 20th International Conference on Pattern
-  Recognition (ICPR), 2010.
-
-  The ``konomask`` option is based on the work of M. Kono, H. Ueki and S.
-  Umemura. Near-infrared finger vein patterns for personal identification,
-  Applied Optics, Vol. 41, Issue 35, pp. 7429-7436 (2002).
-
-  In this implementation, the finger image is (in this order):
-
-    1. The mask is extracted (if ``annotation`` is not chosen as a parameter to
-       ``fingercontour``). Other mask extraction options correspond to
-       heuristics developed by Lee et al. (2009) or Kono et al. (2002)
-    2. The finger is normalized (made horizontal), via a least-squares
-       normalization procedure concerning the center of the annotated area,
-       width-wise. Before normalization, the image is padded to avoid loosing
-       pixels corresponding to veins during the rotation
-    3. (optionally) Post processed with histogram-equalization to enhance vein
-       information. Notice that only the area inside the mask is used for
-       normalization. Areas outside of the mask (where the mask is ``False``
-       are set to black)
-
-
-  Parameters:
-
-    mask_h (:py:obj:`int`, optional): Height of contour mask in pixels, must
-      be an even number (used by the methods ``leemaskMod`` or
-      ``leemaskMatlab``)
-
-    mask_w (:py:obj:`int`, optional): Width of the contour mask in pixels
-      (used by the methods ``leemaskMod`` or ``leemaskMatlab``)
-
-    padding_width (:py:obj:`int`, optional): How much padding (in pixels) to
-      add around the borders of the input image. We normally always keep this
-      value on its default (5 pixels). This parameter is always used before
-      normalizing the finger orientation.
-
-    padding_constant (:py:obj:`int`, optional): What is the value of the pixels
-      added to the padding. This number should be a value between 0 and 255.
-      (From Pedro Tome: for UTFVP (high-quality samples), use 0. For the VERA
-      Fingervein database (low-quality samples), use 51 (that corresponds to
-      0.2 in a float image with values between 0 and 1). This parameter is
-      always used before normalizing the finger orientation.
-
-    fingercontour (:py:obj:`str`, optional): Select between three finger
-      contour implementations: ``"leemaskMod"``, ``"leemaskMatlab"``,
-      ``"konomask"`` or ``annotation``. (From Pedro Tome: the option
-      ``leemaskMatlab`` was just implemented for testing purposes so we could
-      compare with MAT files generated from Matlab code of other authors. He
-      only used it with the UTFVP database, using ``leemaskMod`` with that
-      database yields slight worse results.)
-
-    postprocessing (:py:obj:`str`, optional): Select between ``HE`` (histogram
-      equalization, as with :py:func:`skimage.exposure.equalize_hist`) or
-      ``None`` (the default).
-
-  """
-
-
-  def __init__(self, mask_h = 4, mask_w = 40,
-      padding_width = 5, padding_constant = 51,
-      fingercontour = 'leemaskMod', postprocessing = None, **kwargs):
-
-    Preprocessor.__init__(self,
-        mask_h = mask_h,
-        mask_w = mask_w,
-        padding_width = padding_width,
-        padding_constant = padding_constant,
-        fingercontour = fingercontour,
-        postprocessing = postprocessing,
-        **kwargs
-        )
-
-    self.mask_h = mask_h
-    self.mask_w = mask_w
-
-    self.fingercontour = fingercontour
-    self.postprocessing = postprocessing
-
-    self.padding_width = padding_width
-    self.padding_constant = padding_constant
-
-
-  def __konomask__(self, image, sigma):
-    """
-    Finger vein mask extractor.
-
-    Based on the work of M. Kono, H. Ueki and S. Umemura. Near-infrared finger
-    vein patterns for personal identification, Applied Optics, Vol. 41, Issue
-    35, pp. 7429-7436 (2002).
-
-    """
-
-    padded_image = numpy.pad(image, self.padding_width, 'constant',
-        constant_values = self.padding_constant)
-
-    sigma = 5
-    img_h,img_w = padded_image.shape
-
-    # Determine lower half starting point
-    if numpy.mod(img_h,2) == 0:
-        half_img_h = img_h/2 + 1
-    else:
-        half_img_h = numpy.ceil(img_h/2)
-
-    #Construct filter kernel
-    winsize = numpy.ceil(4*sigma)
-
-    x = numpy.arange(-winsize, winsize+1)
-    y = numpy.arange(-winsize, winsize+1)
-    X, Y = numpy.meshgrid(x, y)
-
-    hy = (-Y/(2*math.pi*sigma**4))*numpy.exp(-(X**2 + Y**2)/(2*sigma**2))
-
-    # Filter the image with the directional kernel
-    fy = utils.imfilter(padded_image, hy)
-
-    # Upper part of filtred image
-    img_filt_up = fy[0:half_img_h,:]
-    y_up = img_filt_up.argmax(axis=0)
-
-    # Lower part of filtred image
-    img_filt_lo = fy[half_img_h-1:,:]
-    y_lo = img_filt_lo.argmin(axis=0)
-
-    # Fill region between upper and lower edges
-    finger_mask = numpy.ndarray(padded_image.shape, numpy.bool)
-    finger_mask[:,:] = False
-
-    for i in range(0,img_w):
-      finger_mask[y_up[i]:y_lo[i]+padded_image.shape[0]-half_img_h+2,i] = True
-
-    if not self.padding_width:
-      return finger_mask
-    else:
-      w = self.padding_width
-      return finger_mask[w:-w,w:-w]
-
-
-  def __leemaskMod__(self, image):
-    """
-    A method to calculate the finger mask.
-
-    Based on the work of Finger vein recognition using minutia-based alignment
-    and local binary pattern-based feature extraction, E.C. Lee, H.C. Lee and
-    K.R. Park, International Journal of Imaging Systems and Technology, Volume
-    19, Issue 3, September 2009, Pages 175--178, doi: 10.1002/ima.20193
-
-    This code is a variant of the Matlab implementation by Bram Ton, available
-    at:
-
-    https://nl.mathworks.com/matlabcentral/fileexchange/35752-finger-region-localisation/content/lee_region.m
-
-    In this variant from Pedro Tome, the technique of filtering the image with
-    a horizontal filter is also applied on the vertical axis.
-
-
-    Parameters:
-
-    image (numpy.ndarray): raw image to use for finding the mask, as 2D array
-        of unsigned 8-bit integers
-
-
-    **Returns:**
-
-    numpy.ndarray: A 2D boolean array with the same shape of the input image
-        representing the cropping mask. ``True`` values indicate where the
-        finger is.
-
-    numpy.ndarray: A 2D array with 64-bit floats indicating the indexes where
-       the mask, for each column, starts and ends on the original image. The
-       same of this array is (2, number of columns on input image).
-
-    """
-
-    padded_image = numpy.pad(image, self.padding_width, 'constant',
-        constant_values = self.padding_constant)
-
-    img_h,img_w = padded_image.shape
-
-    # Determine lower half starting point
-    half_img_h = img_h/2
-    half_img_w = img_w/2
-
-    # Construct mask for filtering (up-bottom direction)
-    mask = numpy.ones((self.mask_h, self.mask_w), dtype='float64')
-    mask[int(self.mask_h/2):,:] = -1.0
-
-    img_filt = utils.imfilter(padded_image, mask)
-
-    # Upper part of filtred image
-    img_filt_up = img_filt[:int(half_img_h),:]
-    y_up = img_filt_up.argmax(axis=0)
-
-    # Lower part of filtred image
-    img_filt_lo = img_filt[int(half_img_h):,:]
-    y_lo = img_filt_lo.argmin(axis=0)
-
-    img_filt = utils.imfilter(padded_image, mask.T)
-
-    # Left part of filtered image
-    img_filt_lf = img_filt[:,:int(half_img_w)]
-    y_lf = img_filt_lf.argmax(axis=1)
-
-    # Right part of filtred image
-    img_filt_rg = img_filt[:,int(half_img_w):]
-    y_rg = img_filt_rg.argmin(axis=1)
-
-    finger_mask = numpy.zeros(padded_image.shape, dtype='bool')
-
-    for i in range(0,y_up.size):
-        finger_mask[y_up[i]:y_lo[i]+img_filt_lo.shape[0]+1,i] = True
-
-    # Left region
-    for i in range(0,y_lf.size):
-        finger_mask[i,0:y_lf[i]+1] = False
-
-    # Right region has always the finger ending, crop the padding with the
-    # meadian
-    finger_mask[:,int(numpy.median(y_rg)+img_filt_rg.shape[1]):] = False
-
-    if not self.padding_width:
-      return finger_mask
-    else:
-      w = self.padding_width
-      return finger_mask[w:-w,w:-w]
-
-
-  def __leemaskMatlab__(self, image):
-    """
-    A method to calculate the finger mask.
-
-    Based on the work of Finger vein recognition using minutia-based alignment
-    and local binary pattern-based feature extraction, E.C. Lee, H.C. Lee and
-    K.R. Park, International Journal of Imaging Systems and Technology, Volume
-    19, Issue 3, September 2009, Pages 175--178, doi: 10.1002/ima.20193
-
-    This code is based on the Matlab implementation by Bram Ton, available at:
-
-    https://nl.mathworks.com/matlabcentral/fileexchange/35752-finger-region-localisation/content/lee_region.m
-
-    In this method, we calculate the mask of the finger independently for each
-    column of the input image. Firstly, the image is convolved with a [1,-1]
-    filter of size ``(self.mask_h, self.mask_w)``. Then, the upper and lower
-    parts of the resulting filtered image are separated. The location of the
-    maxima in the upper part is located. The same goes for the location of the
-    minima in the lower part. The mask is then calculated, per column, by
-    considering it starts in the point where the maxima is in the upper part
-    and goes up to the point where the minima is detected on the lower part.
-
-
-    **Parameters:**
-
-    image (numpy.ndarray): raw image to use for finding the mask, as 2D array
-        of unsigned 8-bit integers
-
-
-    **Returns:**
-
-    numpy.ndarray: A 2D boolean array with the same shape of the input image
-        representing the cropping mask. ``True`` values indicate where the
-        finger is.
-
-    numpy.ndarray: A 2D array with 64-bit floats indicating the indexes where
-       the mask, for each column, starts and ends on the original image. The
-       same of this array is (2, number of columns on input image).
-
-    """
-
-    padded_image = numpy.pad(image, self.padding_width, 'constant',
-        constant_values = self.padding_constant)
-
-    img_h,img_w = padded_image.shape
-
-    # Determine lower half starting point
-    half_img_h = int(img_h/2)
-
-    # Construct mask for filtering
-    mask = numpy.ones((self.mask_h,self.mask_w), dtype='float64')
-    mask[int(self.mask_h/2):,:] = -1.0
-
-    img_filt = utils.imfilter(padded_image, mask)
-
-    # Upper part of filtered image
-    img_filt_up = img_filt[:half_img_h,:]
-    y_up = img_filt_up.argmax(axis=0)
-
-    # Lower part of filtered image
-    img_filt_lo = img_filt[half_img_h:,:]
-    y_lo = img_filt_lo.argmin(axis=0)
-
-    # Translation: for all columns of the input image, set to True all pixels
-    # of the mask from index where the maxima occurred in the upper part until
-    # the index where the minima occurred in the lower part.
-    finger_mask = numpy.zeros(padded_image.shape, dtype='bool')
-    for i in range(img_filt.shape[1]):
-      finger_mask[y_up[i]:(y_lo[i]+img_filt_lo.shape[0]+1), i] = True
-
-    if not self.padding_width:
-      return finger_mask
-    else:
-      w = self.padding_width
-      return finger_mask[w:-w,w:-w]
-
-
-  def __huangnormalization__(self, image, mask):
-    """
-    Simple finger normalization.
-
-    Based on B. Huang, Y. Dai, R. Li, D. Tang and W. Li, Finger-vein
-    authentication based on wide line detector and pattern normalization,
-    Proceedings on 20th International Conference on Pattern Recognition (ICPR),
-    2010.
-
-    This implementation aligns the finger to the centre of the image using an
-    affine transformation. Elliptic projection which is described in the
-    referenced paper is not included.
-
-    In order to defined the affine transformation to be performed, the
-    algorithm first calculates the center for each edge (column wise) and
-    calculates the best linear fit parameters for a straight line passing
-    through those points.
-
-
-    **Parameters:**
-
-    image (numpy.ndarray): raw image to normalize as 2D array of unsigned
-        8-bit integers
-
-    mask (numpy.ndarray): mask to normalize as 2D array of booleans
-
-
-    **Returns:**
-
-    numpy.ndarray: A 2D boolean array with the same shape and data type of
-        the input image representing the newly aligned image.
-
-    numpy.ndarray: A 2D boolean array with the same shape and data type of
-        the input mask representing the newly aligned mask.
-
-    """
-
-    img_h, img_w = image.shape
-
-    # Calculates the mask edges along the columns
-    edges = numpy.zeros((2, mask.shape[1]), dtype=int)
-
-    edges[0,:] = mask.argmax(axis=0) # get upper edges
-    edges[1,:] = len(mask) - numpy.flipud(mask).argmax(axis=0) - 1
-
-    bl = edges.mean(axis=0) #baseline
-    x = numpy.arange(0, edges.shape[1])
-    A = numpy.vstack([x, numpy.ones(len(x))]).T
-
-    # Fit a straight line through the base line points
-    w = numpy.linalg.lstsq(A,bl)[0] # obtaining the parameters
-
-    angle = -1*math.atan(w[0])  # Rotation
-    tr = img_h/2 - w[1]         # Translation
-    scale = 1.0                 # Scale
-
-    #Affine transformation parameters
-    sx=sy=scale
-    cosine = math.cos(angle)
-    sine = math.sin(angle)
-
-    a = cosine/sx
-    b = -sine/sy
-    #b = sine/sx
-    c = 0 #Translation in x
-
-    d = sine/sx
-    e = cosine/sy
-    f = tr #Translation in y
-    #d = -sine/sy
-    #e = cosine/sy
-    #f = 0
-
-    g = 0
-    h = 0
-    #h=tr
-    i = 1
-
-    T = numpy.matrix([[a,b,c],[d,e,f],[g,h,i]])
-    Tinv = numpy.linalg.inv(T)
-    Tinvtuple = (Tinv[0,0],Tinv[0,1], Tinv[0,2], Tinv[1,0],Tinv[1,1],Tinv[1,2])
-
-    def _afftrans(img):
-      '''Applies the affine transform on the resulting image'''
-
-      t = Image.fromarray(img.astype('uint8'))
-      w, h = t.size #pillow image is encoded w, h
-      w += 2*self.padding_width
-      h += 2*self.padding_width
-      t = t.transform(
-          (w,h),
-          Image.AFFINE,
-          Tinvtuple,
-          resample=Image.BICUBIC,
-          fill=self.padding_constant)
-
-      return numpy.array(t).astype(img.dtype)
-
-    return _afftrans(image), _afftrans(mask)
-
-
-  def __HE__(self, image, mask):
-    """
-    Applies histogram equalization on the input image inside the mask.
-
-    In this implementation, only the pixels that lie inside the mask will be
-    used to calculate the histogram equalization parameters. Because of this
-    particularity, we don't use Bob's implementation for histogram equalization
-    and have one based exclusively on scikit-image.
-
-
-    **Parameters:**
-
-    image (numpy.ndarray): raw image to be filtered, as 2D array of
-          unsigned 8-bit integers
-
-    mask (numpy.ndarray): mask of the same size of the image, but composed
-          of boolean values indicating which values should be considered for
-          the histogram equalization
-
-
-    **Returns:**
-
-    numpy.ndarray: normalized image as a 2D array of unsigned 8-bit integers
-
-    """
-    from skimage.exposure import equalize_hist
-    from skimage.exposure import rescale_intensity
-
-    retval = rescale_intensity(equalize_hist(image, mask=mask), out_range = (0, 255))
-
-    # make the parts outside the mask totally black
-    retval[~mask] = 0
-
-    return retval
-
-
-  def __call__(self, data, annotations=None):
-    """Reads the input image or (image, mask) and prepares for fex.
-
-    Parameters:
-
-      data (numpy.ndarray, tuple): Either a :py:class:`numpy.ndarray`
-        containing a gray-scaled image with dtype ``uint8`` or a 2-tuple
-        containing both the gray-scaled image and a mask, with the same size of
-        the image, with dtype ``bool`` containing the points which should be
-        considered part of the finger
-
-
-    Returns:
-
-      numpy.ndarray: The image, preprocessed and normalized
-
-      numpy.ndarray: A mask, of the same size of the image, indicating where
-      the valid data for the object is.
-
-    """
-
-    if isinstance(data, numpy.ndarray):
-      image = data
-      mask = None
-    else:
-      image, mask = data
-
-    ## Finger edges and contour extraction:
-    if self.fingercontour == 'leemaskMatlab':
-      mask = self.__leemaskMatlab__(image) #for UTFVP
-    elif self.fingercontour == 'leemaskMod':
-      mask = self.__leemaskMod__(image) #for VERA
-    elif self.fingercontour == 'konomask':
-      mask = self.__konomask__(image, sigma=5)
-    elif self.fingercontour == 'annotation':
-      if mask is None:
-        raise RuntimeError("Cannot use fingercontour=annotation - the " \
-            "current sample being processed does not provide a mask")
-    else:
-      raise RuntimeError("Please choose between leemaskMod, leemaskMatlab, " \
-          "konomask or annotation for parameter 'fingercontour'. %s is not " \
-          "valid" % self.fingercontour)
-
-    ## Finger region normalization:
-    image_norm, mask_norm = self.__huangnormalization__(image, mask)
-
-    ## veins enhancement:
-    if self.postprocessing == 'HE':
-      image_norm = self.__HE__(image_norm, mask_norm)
-
-    ## returns the normalized image and the finger mask
-    return image_norm, mask_norm
-
-
-  def write_data(self, data, filename):
-    '''Overrides the default method implementation to handle our tuple'''
-
-    f = bob.io.base.HDF5File(filename, 'w')
-    f.set('image', data[0])
-    f.set('mask', data[1])
-
-
-  def read_data(self, filename):
-    '''Overrides the default method implementation to handle our tuple'''
-
-    f = bob.io.base.HDF5File(filename, 'r')
-    return f.read('image'), f.read('mask')
diff --git a/bob/bio/vein/preprocessor/__init__.py b/bob/bio/vein/preprocessor/__init__.py
index 41d1e0433c3551b5e20ba6f059b77011e8360a37..9ad9c7c63c2f4876c23634a57e22511a8a14d19a 100644
--- a/bob/bio/vein/preprocessor/__init__.py
+++ b/bob/bio/vein/preprocessor/__init__.py
@@ -1,4 +1,43 @@
-from .FingerCrop import FingerCrop
+from .crop import Cropper, FixedCrop, NoCrop
+from .mask import Padder, Masker, FixedMask, NoMask, AnnotatedRoIMask
+from .mask import KonoMask, LeeMask, TomesLeeMask, WatershedMask
+from .normalize import Normalizer, NoNormalization, HuangNormalization
+from .filters import Filter, NoFilter, HistogramEqualization
+from .preprocessor import Preprocessor
 
 # gets sphinx autodoc done right - don't remove it
+def __appropriate__(*args):
+  """Says object was actually declared here, an not on the import module.
+
+  Parameters:
+
+    *args: An iterable of objects to modify
+
+  Resolves `Sphinx referencing issues
+  <https://github.com/sphinx-doc/sphinx/issues/3048>`
+  """
+
+  for obj in args: obj.__module__ = __name__
+
+__appropriate__(
+    Cropper,
+    FixedCrop,
+    NoCrop,
+    Padder,
+    Masker,
+    FixedMask,
+    NoMask,
+    AnnotatedRoIMask,
+    KonoMask,
+    LeeMask,
+    TomesLeeMask,
+    WatershedMask,
+    Normalizer,
+    NoNormalization,
+    HuangNormalization,
+    Filter,
+    NoFilter,
+    HistogramEqualization,
+    Preprocessor,
+    )
 __all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/bob/bio/vein/preprocessor/crop.py b/bob/bio/vein/preprocessor/crop.py
new file mode 100644
index 0000000000000000000000000000000000000000..dfd620debf92e971d249b4cc21b89a1a92733f5d
--- /dev/null
+++ b/bob/bio/vein/preprocessor/crop.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+
+'''Base utilities for pre-cropping images'''
+
+import numpy
+
+
+class Cropper(object):
+    """This is the base class for all croppers
+
+    It defines the minimum requirements for all derived cropper classes.
+
+
+    """
+
+    def __init__(self):
+      pass
+
+
+    def __call__(self, image):
+      """Overwrite this method to implement your masking method
+
+
+      Parameters:
+
+        image (numpy.ndarray): A 2D numpy array of type ``uint8`` with the
+          input image
+
+
+      Returns:
+
+        numpy.ndarray: A 2D numpy array of the same type as the input, with
+        cropped rows and columns as per request
+
+      """
+
+      raise NotImplemented('You must implement the __call__ slot')
+
+
+class FixedCrop(Cropper):
+  """Implements cropping using a fixed suppression of border pixels
+
+  The defaults supress no lines from the image and returns an image like the
+  original. If an :py:class:`bob.bio.vein.database.AnnotatedArray` is passed,
+  then we also check for its ``.metadata['roi']`` component and correct it so
+  that annotated RoI points are consistent on the cropped image.
+
+
+  .. note::
+
+     Before choosing values, note you're responsible for knowing what is the
+     orientation of images fed into this cropper.
+
+
+  Parameters:
+
+    top (:py:class:`int`, optional): Number of lines to suppress from the top
+      of the image. The top of the image corresponds to ``y = 0``.
+
+    bottom (:py:class:`int`, optional): Number of lines to suppress from the
+      bottom of the image. The bottom of the image corresponds to ``y =
+      height``.
+
+    left (:py:class:`int`, optional): Number of lines to suppress from the left
+      of the image. The left of the image corresponds to ``x = 0``.
+
+    right (:py:class:`int`, optional): Number of lines to suppress from the
+      right of the image. The right of the image corresponds to ``x = width``.
+
+  """
+
+  def __init__(self, top=0, bottom=0, left=0, right=0):
+    self.top = top
+    self.bottom = bottom
+    self.left = left
+    self.right = right
+
+
+  def __call__(self, image):
+    """Returns a big mask
+
+
+    Parameters:
+
+      image (numpy.ndarray): A 2D numpy array of type ``uint8`` with the
+        input image
+
+
+    Returns:
+
+      numpy.ndarray: A 2D numpy array of type boolean with the caculated
+      mask. ``True`` values correspond to regions where the finger is
+      situated
+
+
+    """
+
+    # this should work even if limits are zeros
+    h, w = image.shape
+    retval = image[self.top:h-self.bottom, self.left:w-self.right]
+
+    if hasattr(retval, 'metadata') and 'roi' in retval.metadata:
+      # adjust roi points to new cropping
+      retval = retval.copy() #don't override original
+      h, w = retval.shape
+      points = []
+      for y, x in retval.metadata['roi']:
+        y = max(y-self.top, 0) #adjust
+        y = min(y, h-1) #verify it is not over the limits
+        x = max(x-self.left, 0) #adjust
+        x = min(x, w-1) #verify it is not over the limits
+        points.append((y,x))
+      retval.metadata['roi'] = points
+
+    return retval
+
+
+class NoCrop(FixedCrop):
+  """Convenience: same as FixedCrop()"""
+
+  def __init__(self):
+    super(NoCrop, self).__init__(0, 0, 0, 0)
diff --git a/bob/bio/vein/preprocessor/filters.py b/bob/bio/vein/preprocessor/filters.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1aa814566fe8d3640bd92c4ed6a050f63e92366
--- /dev/null
+++ b/bob/bio/vein/preprocessor/filters.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+'''Base utilities for post-filtering vein images'''
+
+import numpy
+
+
+class Filter(object):
+  '''Objects of this class filter the input image'''
+
+
+  def __init__(self):
+    pass
+
+
+  def __call__(self, image, mask):
+    '''Inputs image and mask and outputs a filtered version of the image
+
+
+    Parameters:
+
+      image (numpy.ndarray): raw image to filter as 2D array of unsigned
+          8-bit integers
+
+      mask (numpy.ndarray): mask to normalize as 2D array of booleans
+
+
+    Returns:
+
+      numpy.ndarray: A 2D boolean array with the same shape and data type of
+      the input image representing the filtered image.
+
+    '''
+
+    raise NotImplemented('You must implement the __call__ slot')
+
+
+class NoFilter(Filter):
+  '''Applies no filtering on the input image, returning it without changes'''
+
+  def __init__(self):
+    pass
+
+
+  def __call__(self, image, mask):
+    '''Inputs image and mask and outputs the image, without changes
+
+
+    Parameters:
+
+      image (numpy.ndarray): raw image to filter as 2D array of unsigned
+          8-bit integers
+
+      mask (numpy.ndarray): mask to normalize as 2D array of booleans
+
+
+    Returns:
+
+      numpy.ndarray: A 2D boolean array with the same shape and data type of
+      the input image representing the filtered image.
+
+    '''
+
+    return image
+
+
+class HistogramEqualization(Filter):
+  '''Applies histogram equalization on the input image inside the mask.
+
+  In this implementation, only the pixels that lie inside the mask will be
+  used to calculate the histogram equalization parameters. Because of this
+  particularity, we don't use Bob's implementation for histogram equalization
+  and have one based exclusively on scikit-image.
+  '''
+
+
+  def __init__(self):
+    pass
+
+
+  def __call__(self, image, mask):
+    '''Applies histogram equalization on the input image, returns filtered
+
+
+    Parameters:
+
+      image (numpy.ndarray): raw image to filter as 2D array of unsigned
+          8-bit integers
+
+      mask (numpy.ndarray): mask to normalize as 2D array of booleans
+
+
+    Returns:
+
+      numpy.ndarray: A 2D boolean array with the same shape and data type of
+      the input image representing the filtered image.
+
+    '''
+
+    from skimage.exposure import equalize_hist
+    from skimage.exposure import rescale_intensity
+
+    retval = rescale_intensity(equalize_hist(image, mask=mask), out_range = (0, 255))
+
+    # make the parts outside the mask totally black
+    retval[~mask] = 0
+
+    return retval
diff --git a/bob/bio/vein/preprocessor/mask.py b/bob/bio/vein/preprocessor/mask.py
new file mode 100644
index 0000000000000000000000000000000000000000..e56439cfe47286576683337ec4ac411844b985a6
--- /dev/null
+++ b/bob/bio/vein/preprocessor/mask.py
@@ -0,0 +1,669 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+'''Base utilities for mask processing'''
+
+import math
+import numpy
+import scipy.ndimage
+import skimage.filters
+import skimage.morphology
+
+from .utils import poly_to_mask
+
+
+class Padder(object):
+  """A class that pads the input image returning a new object
+
+
+  Parameters:
+
+    padding_width (:py:obj:`int`, optional): How much padding (in pixels) to
+      add around the borders of the input image. We normally always keep this
+      value on its default (5 pixels). This parameter is always used before
+      normalizing the finger orientation.
+
+    padding_constant (:py:obj:`int`, optional): What is the value of the pixels
+      added to the padding. This number should be a value between 0 and 255.
+      (From Pedro Tome: for UTFVP (high-quality samples), use 0. For the VERA
+      Fingervein database (low-quality samples), use 51 (that corresponds to
+      0.2 in a float image with values between 0 and 1). This parameter is
+      always used before normalizing the finger orientation.
+
+  """
+
+  def __init__(self, padding_width = 5, padding_constant = 51):
+
+    self.padding_width = padding_width
+    self.padding_constant = padding_constant
+
+
+  def __call__(self, image):
+    '''Inputs an image, returns a padded (larger) image
+
+      Parameters:
+
+        image (numpy.ndarray): A 2D numpy array of type ``uint8`` with the
+          input image
+
+
+      Returns:
+
+        numpy.ndarray: A 2D numpy array of the same type as the input, but with
+        the extra padding
+
+    '''
+
+    return numpy.pad(image, self.padding_width, 'constant',
+        constant_values = self.padding_constant)
+
+
+
+class Masker(object):
+    """This is the base class for all maskers
+
+    It defines the minimum requirements for all derived masker classes.
+
+
+    """
+
+    def __init__(self):
+      pass
+
+
+    def __call__(self, image):
+      """Overwrite this method to implement your masking method
+
+
+      Parameters:
+
+        image (numpy.ndarray): A 2D numpy array of type ``uint8`` with the
+          input image
+
+
+      Returns:
+
+        numpy.ndarray: A 2D numpy array of type boolean with the caculated
+        mask. ``True`` values correspond to regions where the finger is
+        situated
+
+      """
+
+      raise NotImplemented('You must implement the __call__ slot')
+
+
+class FixedMask(Masker):
+  """Implements masking using a fixed suppression of border pixels
+
+  The defaults mask no lines from the image and returns a mask of the same size
+  of the original image where all values are ``True``.
+
+
+  .. note::
+
+     Before choosing values, note you're responsible for knowing what is the
+     orientation of images fed into this masker.
+
+
+  Parameters:
+
+    top (:py:class:`int`, optional): Number of lines to suppress from the top
+      of the image. The top of the image corresponds to ``y = 0``.
+
+    bottom (:py:class:`int`, optional): Number of lines to suppress from the
+      bottom of the image. The bottom of the image corresponds to ``y =
+      height``.
+
+    left (:py:class:`int`, optional): Number of lines to suppress from the left
+      of the image. The left of the image corresponds to ``x = 0``.
+
+    right (:py:class:`int`, optional): Number of lines to suppress from the
+      right of the image. The right of the image corresponds to ``x = width``.
+
+  """
+
+  def __init__(self, top=0, bottom=0, left=0, right=0):
+    self.top = top
+    self.bottom = bottom
+    self.left = left
+    self.right = right
+
+
+  def __call__(self, image):
+    """Returns a big mask
+
+
+    Parameters:
+
+      image (numpy.ndarray): A 2D numpy array of type ``uint8`` with the
+        input image
+
+
+    Returns:
+
+      numpy.ndarray: A 2D numpy array of type boolean with the caculated
+      mask. ``True`` values correspond to regions where the finger is
+      situated
+
+
+    """
+
+    retval = numpy.zeros(image.shape, dtype='bool')
+    h, w = image.shape
+    retval[self.top:h-self.bottom, self.left:w-self.right] = True
+    return retval
+
+
+class NoMask(FixedMask):
+  """Convenience: same as FixedMask()"""
+
+  def __init__(self):
+    super(NoMask, self).__init__(0, 0, 0, 0)
+
+
+class AnnotatedRoIMask(Masker):
+  """Devises the mask from the annotated RoI"""
+
+
+  def __init__(self):
+    pass
+
+
+  def __call__(self, image):
+    """Returns a mask extrapolated from RoI annotations
+
+
+    Parameters:
+
+      image (bob.bio.vein.database.AnnotatedArray): A 2D numpy array of type
+        ``uint8`` with the input image containing an attribute called
+        ``metadata`` (a python dictionary). The ``metadata`` object just
+        contain a key called ``roi`` containing the annotated points
+
+
+    Returns:
+
+      numpy.ndarray: A 2D numpy array of type boolean with the caculated
+      mask. ``True`` values correspond to regions where the finger is
+      situated
+
+
+    """
+
+    return poly_to_mask(image.shape, image.metadata['roi'])
+
+
+class KonoMask(Masker):
+  """Estimates the finger region given an input NIR image using Kono et al.
+
+  This method is based on the work of M. Kono, H. Ueki and S.  Umemura.
+  Near-infrared finger vein patterns for personal identification, Applied
+  Optics, Vol. 41, Issue 35, pp. 7429-7436 (2002).
+
+
+  Parameters:
+
+    sigma (:py:obj:`float`, optional): The standard deviation of the gaussian
+      blur filter to apply for low-passing the input image (background
+      extraction). Defaults to ``5``.
+
+    padder (:py:class:`Padder`, optional): If passed, will pad the image before
+      evaluating the mask. The returned value will have the padding removed and
+      is, therefore, of the exact size of the input image.
+
+  """
+
+  def __init__(self, sigma=5, padder=Padder()):
+
+    self.sigma = sigma
+    self.padder = padder
+
+
+  def __call__(self, image):
+    '''Inputs an image, returns a mask (numpy boolean array)
+
+      Parameters:
+
+        image (numpy.ndarray): A 2D numpy array of type ``uint8`` with the
+          input image
+
+
+      Returns:
+
+        numpy.ndarray: A 2D numpy array of type boolean with the caculated
+        mask. ``True`` values correspond to regions where the finger is
+        situated
+
+    '''
+
+    image = image if self.padder is None else self.padder(image)
+    if image.dtype == numpy.uint8: image = image.astype('float64')/255.
+
+    img_h,img_w = image.shape
+
+    # Determine lower half starting point
+    if numpy.mod(img_h,2) == 0:
+        half_img_h = img_h/2 + 1
+    else:
+        half_img_h = numpy.ceil(img_h/2)
+
+    #Construct filter kernel
+    winsize = numpy.ceil(4*self.sigma)
+
+    x = numpy.arange(-winsize, winsize+1)
+    y = numpy.arange(-winsize, winsize+1)
+    X, Y = numpy.meshgrid(x, y)
+
+    hy = (-Y/(2*math.pi*self.sigma**4)) * \
+        numpy.exp(-(X**2 + Y**2)/(2*self.sigma**2))
+
+    # Filter the image with the directional kernel
+    fy = scipy.ndimage.convolve(image, hy, mode='nearest')
+
+    # Upper part of filtred image
+    img_filt_up = fy[0:half_img_h,:]
+    y_up = img_filt_up.argmax(axis=0)
+
+    # Lower part of filtred image
+    img_filt_lo = fy[half_img_h-1:,:]
+    y_lo = img_filt_lo.argmin(axis=0)
+
+    # Fill region between upper and lower edges
+    finger_mask = numpy.ndarray(image.shape, numpy.bool)
+    finger_mask[:,:] = False
+
+    for i in range(0,img_w):
+      finger_mask[y_up[i]:y_lo[i]+image.shape[0]-half_img_h+2,i] = True
+
+    if not self.padder:
+      return finger_mask
+    else:
+      w = self.padder.padding_width
+      return finger_mask[w:-w,w:-w]
+
+
+class LeeMask(Masker):
+  """Estimates the finger region given an input NIR image using Lee et al.
+
+  This method is based on the work of Finger vein recognition using
+  minutia-based alignment and local binary pattern-based feature extraction,
+  E.C. Lee, H.C. Lee and K.R. Park, International Journal of Imaging Systems
+  and Technology, Volume 19, Issue 3, September 2009, Pages 175--178, doi:
+  10.1002/ima.20193
+
+  This code is based on the Matlab implementation by Bram Ton, available at:
+
+  https://nl.mathworks.com/matlabcentral/fileexchange/35752-finger-region-localisation/content/lee_region.m
+
+  In this method, we calculate the mask of the finger independently for each
+  column of the input image. Firstly, the image is convolved with a [1,-1]
+  filter of size ``(self.filter_height, self.filter_width)``. Then, the upper and
+  lower parts of the resulting filtered image are separated. The location of
+  the maxima in the upper part is located. The same goes for the location of
+  the minima in the lower part. The mask is then calculated, per column, by
+  considering it starts in the point where the maxima is in the upper part and
+  goes up to the point where the minima is detected on the lower part.
+
+
+  Parameters:
+
+    filter_height (:py:obj:`int`, optional): Height of contour mask in pixels,
+      must be an even number
+
+    filter_width (:py:obj:`int`, optional): Width of the contour mask in pixels
+
+  """
+
+  def __init__(self, filter_height = 4, filter_width = 40, padder=Padder()):
+    self.filter_height = filter_height
+    self.filter_width = filter_width
+    self.padder = padder
+
+
+  def __call__(self, image):
+    '''Inputs an image, returns a mask (numpy boolean array)
+
+      Parameters:
+
+        image (numpy.ndarray): A 2D numpy array of type ``uint8`` with the
+          input image
+
+
+      Returns:
+
+        numpy.ndarray: A 2D numpy array of type boolean with the caculated
+        mask. ``True`` values correspond to regions where the finger is
+        situated
+
+    '''
+
+    image = image if self.padder is None else self.padder(image)
+    if image.dtype == numpy.uint8: image = image.astype('float64')/255.
+
+    img_h,img_w = image.shape
+
+    # Determine lower half starting point
+    half_img_h = int(img_h/2)
+
+    # Construct mask for filtering
+    mask = numpy.ones((self.filter_height,self.filter_width), dtype='float64')
+    mask[int(self.filter_height/2.):,:] = -1.0
+
+    img_filt = scipy.ndimage.convolve(image, mask, mode='nearest')
+
+    # Upper part of filtered image
+    img_filt_up = img_filt[:half_img_h,:]
+    y_up = img_filt_up.argmax(axis=0)
+
+    # Lower part of filtered image
+    img_filt_lo = img_filt[half_img_h:,:]
+    y_lo = img_filt_lo.argmin(axis=0)
+
+    # Translation: for all columns of the input image, set to True all pixels
+    # of the mask from index where the maxima occurred in the upper part until
+    # the index where the minima occurred in the lower part.
+    finger_mask = numpy.zeros(image.shape, dtype='bool')
+    for i in range(img_filt.shape[1]):
+      finger_mask[y_up[i]:(y_lo[i]+img_filt_lo.shape[0]+1), i] = True
+
+    if not self.padder:
+      return finger_mask
+    else:
+      w = self.padder.padding_width
+      return finger_mask[w:-w,w:-w]
+
+
+class TomesLeeMask(Masker):
+  """Estimates the finger region given an input NIR image using Lee et al.
+
+  This method is based on the work of Finger vein recognition using
+  minutia-based alignment and local binary pattern-based feature extraction,
+  E.C. Lee, H.C. Lee and K.R. Park, International Journal of Imaging Systems
+  and Technology, Volume 19, Issue 3, September 2009, Pages 175--178, doi:
+  10.1002/ima.20193
+
+  This code is a variant of the Matlab implementation by Bram Ton, available
+  at:
+
+  https://nl.mathworks.com/matlabcentral/fileexchange/35752-finger-region-localisation/content/lee_region.m
+
+  In this variant from Pedro Tome, the technique of filtering the image with
+  a horizontal filter is also applied on the vertical axis. The objective is to
+  find better limits on the horizontal axis in case finger images show the
+  finger tip. If that is not your case, you may use the original variant
+  :py:class:`LeeMask` above.
+
+
+  Parameters:
+
+    filter_height (:py:obj:`int`, optional): Height of contour mask in pixels,
+      must be an even number
+
+    filter_width (:py:obj:`int`, optional): Width of the contour mask in pixels
+
+  """
+
+  def __init__(self, filter_height = 4, filter_width = 40, padder=Padder()):
+    self.filter_height = filter_height
+    self.filter_width = filter_width
+    self.padder = padder
+
+
+  def __call__(self, image):
+    '''Inputs an image, returns a mask (numpy boolean array)
+
+      Parameters:
+
+        image (numpy.ndarray): A 2D numpy array of type ``uint8`` with the
+          input image
+
+
+      Returns:
+
+        numpy.ndarray: A 2D numpy array of type boolean with the caculated
+        mask. ``True`` values correspond to regions where the finger is
+        situated
+
+    '''
+
+    image = image if self.padder is None else self.padder(image)
+    if image.dtype == numpy.uint8: image = image.astype('float64')/255.
+
+    img_h,img_w = image.shape
+
+    # Determine lower half starting point
+    half_img_h = img_h/2
+    half_img_w = img_w/2
+
+    # Construct mask for filtering (up-bottom direction)
+    mask = numpy.ones((self.filter_height, self.filter_width), dtype='float64')
+    mask[int(self.filter_height/2.):,:] = -1.0
+
+    img_filt = scipy.ndimage.convolve(image, mask, mode='nearest')
+
+    # Upper part of filtred image
+    img_filt_up = img_filt[:int(half_img_h),:]
+    y_up = img_filt_up.argmax(axis=0)
+
+    # Lower part of filtred image
+    img_filt_lo = img_filt[int(half_img_h):,:]
+    y_lo = img_filt_lo.argmin(axis=0)
+
+    img_filt = scipy.ndimage.convolve(image, mask.T, mode='nearest')
+
+    # Left part of filtered image
+    img_filt_lf = img_filt[:,:int(half_img_w)]
+    y_lf = img_filt_lf.argmax(axis=1)
+
+    # Right part of filtred image
+    img_filt_rg = img_filt[:,int(half_img_w):]
+    y_rg = img_filt_rg.argmin(axis=1)
+
+    finger_mask = numpy.zeros(image.shape, dtype='bool')
+
+    for i in range(0,y_up.size):
+      finger_mask[y_up[i]:y_lo[i]+img_filt_lo.shape[0]+1,i] = True
+
+    # Left region
+    for i in range(0,y_lf.size):
+      finger_mask[i,0:y_lf[i]+1] = False
+
+    # Right region has always the finger ending, crop the padding with the
+    # meadian
+    finger_mask[:,int(numpy.median(y_rg)+img_filt_rg.shape[1]):] = False
+
+    if not self.padder:
+      return finger_mask
+    else:
+      w = self.padder.padding_width
+      return finger_mask[w:-w,w:-w]
+
+
+class WatershedMask(Masker):
+  """Estimates the finger region given an input NIR image using Watershedding
+
+  This method uses the `Watershedding Morphological Algorithm
+  <https://en.wikipedia.org/wiki/Watershed_(image_processing)>` for determining
+  the finger mask given an input image.
+
+  The masker works first by determining image edges using a simple 2-D Sobel
+  filter. The next step is to determine markers in the image for both the
+  finger region and background. Markers are set on the image by using a
+  pre-trained feed-forward neural network model (multi-layer perceptron or MLP)
+  learned from existing annotations. The model is trained in a separate
+  program and operates on 3x3 regions around the pixel to be predicted for
+  finger/background. The ``(y,x)`` location also is provided as input to the
+  classifier. The feature vector is then composed of 9 pixel values plus the
+  ``y`` and ``x`` (normalized) coordinates of the pixel. The network then
+  provides a prediction that depends on these input parameters. The closer the
+  output is to ``1.0``, the more likely it is from within the finger region.
+
+  Values output by the network are thresholded in order to remove uncertain
+  markers. The ``threshold`` parameter is configurable.
+
+  A series of morphological opening operations is used to, given the neural net
+  markers, remove noise before watershedding the edges from the Sobel'ed
+  original image.
+
+
+  Parameters:
+
+    model (str): Path to the model file to be used for generating
+      finger/background markers. This model should be pre-trained using a
+      separate program.
+
+    foreground_threshold (float): Threshold given a logistic regression output
+      (interval :math:`[0, 1]`) for which we consider finger markers provided
+      by the network.  The higher the value, the more selective the algorithm
+      will be and the less (foreground) markers will be used from the network
+      selection. This value should be a floating point number in the open-set
+      interval :math:`(0.0, 1.0)`.  If ``background_threshold`` is not set,
+      values for background selection will be set to :math:`1.0-T`, where ``T``
+      represents this threshold.
+
+    background_threshold (float): Threshold given a logistic regression output
+      (interval :math:`[0, 1]`) for which we consider finger markers provided
+      by the network.  The smaller the value, the more selective the algorithm
+      will be and the less (background) markers will be used from the network
+      selection. This value should be a floating point number in the open-set
+      interval :math:`(0.0, 1.0)`.  If ``foreground_threshold`` is not set,
+      values for foreground selection will be set to :math:`1.0-T`, where ``T``
+      represents this threshold.
+
+
+  """
+
+
+  def __init__(self, model, foreground_threshold, background_threshold):
+
+    import bob.io.base
+    import bob.learn.mlp
+    import bob.learn.activation
+
+    self.labeller = bob.learn.mlp.Machine((11,10,1))
+    h5f = bob.io.base.HDF5File(model)
+    self.labeller.load(h5f)
+    self.labeller.output_activation = bob.learn.activation.Logistic()
+    del h5f
+
+    # adjust threshold from background and foreground
+    if foreground_threshold is None and background_threshold is not None:
+      foreground_threshold = 1 - background_threshold
+    if background_threshold is None and foreground_threshold is not None:
+      background_threshold = 1 - foreground_threshold
+    if foreground_threshold is None and background_threshold is None:
+      foreground_threshold = 0.5
+      background_threshold = 0.5
+
+    self.foreground_threshold = foreground_threshold
+    self.background_threshold = background_threshold
+
+
+  class _filterfun(object):
+    '''Callable for filtering the input image with marker predictions'''
+
+
+    def __init__(self, image, labeller):
+      self.labeller = labeller
+      self.features = numpy.zeros(self.labeller.shape[0], dtype='float64')
+      self.output = numpy.zeros(self.labeller.shape[-1], dtype='float64')
+
+      # builds indexes before hand, based on image dimensions
+      idx = numpy.mgrid[:image.shape[0], :image.shape[1]]
+      self.indexes = numpy.array([idx[0].flatten(), idx[1].flatten()],
+          dtype='float64')
+      self.indexes[0,:] /= image.shape[0]
+      self.indexes[1,:] /= image.shape[1]
+      self.current = 0
+
+
+    def __call__(self, arr):
+
+      self.features[:9] = arr.astype('float64')/255
+      self.features[-2:] = self.indexes[:,self.current]
+      self.current += 1
+      return self.labeller(self.features, self.output)
+
+
+  def run(self, image):
+    '''Fully preprocesses the input image and returns intermediate results
+
+      Parameters:
+
+        image (numpy.ndarray): A 2D numpy array of type ``uint8`` with the
+          input image
+
+
+      Returns:
+
+        numpy.ndarray: A 2D numpy array of type ``uint8`` with the markers for
+        foreground and background, selected by the neural network model
+
+        numpy.ndarray: A 2D numpy array of type ``float64`` with the edges used
+        to define the borders of the watermasking process
+
+        numpy.ndarray: A 2D numpy array of type boolean with the caculated
+        mask. ``True`` values correspond to regions where the finger is
+        located
+
+    '''
+
+    # applies the pre-trained neural network model to get predictions about
+    # finger/background regions
+    function = WatershedMask._filterfun(image, self.labeller)
+    predictions = numpy.zeros(image.shape, 'float64')
+    scipy.ndimage.filters.generic_filter(image, function,
+        size=3, mode='nearest', output=predictions)
+
+    selector = skimage.morphology.disk(radius=5)
+
+    # applies a morphological "opening" operation
+    # (https://en.wikipedia.org/wiki/Opening_(morphology)) to remove outliers
+    markers_bg = numpy.where(predictions<self.background_threshold, 1, 0)
+    markers_bg = skimage.morphology.opening(markers_bg, selem=selector)
+    markers_fg = numpy.where(predictions>=self.foreground_threshold, 255, 0)
+    markers_fg = skimage.morphology.opening(markers_fg, selem=selector)
+
+    # avoids markers on finger borders
+    selector = skimage.morphology.disk(radius=2)
+    markers_fg = skimage.morphology.erosion(markers_fg, selem=selector)
+
+    # the final markers are a combination of foreground and background markers
+    markers = markers_fg | markers_bg
+
+    # this will determine the natural boundaries in the image where the
+    # flooding will be limited - dialation is applied on the output of the
+    # Sobel filter to well mark the finger boundaries
+    edges = skimage.filters.sobel(image)
+    edges = skimage.morphology.dilation(edges, selem=selector)
+
+    # applies watersheding to get a final estimate of the finger mask
+    segmentation = skimage.morphology.watershed(edges, markers)
+
+    # removes small perturbations and makes the finger region more uniform
+    segmentation[segmentation==1] = 0
+    mask = skimage.morphology.binary_opening(segmentation.astype('bool'),
+        selem=selector)
+
+    return markers, edges, mask
+
+
+  def __call__(self, image):
+    '''Inputs an image, returns a mask (numpy boolean array)
+
+      Parameters:
+
+        image (numpy.ndarray): A 2D numpy array of type ``uint8`` with the
+          input image
+
+
+      Returns:
+
+        numpy.ndarray: A 2D numpy array of type boolean with the caculated
+        mask. ``True`` values correspond to regions where the finger is
+        located
+
+    '''
+
+    markers, edges, mask = self.run(image)
+    return mask
diff --git a/bob/bio/vein/preprocessor/normalize.py b/bob/bio/vein/preprocessor/normalize.py
new file mode 100644
index 0000000000000000000000000000000000000000..0fce4cb454649df64053a6744387beda483ae6f0
--- /dev/null
+++ b/bob/bio/vein/preprocessor/normalize.py
@@ -0,0 +1,185 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+'''Base utilities for normalization'''
+
+import math
+import numpy
+from PIL import Image
+
+
+class Normalizer(object):
+  '''Objects of this class normalize the input image orientation and scale'''
+
+
+  def __init__(self):
+    pass
+
+
+  def __call__(self, image, mask):
+    '''Inputs image and mask and outputs a normalized version of those
+
+
+    Parameters:
+
+      image (numpy.ndarray): raw image to normalize as 2D array of unsigned
+          8-bit integers
+
+      mask (numpy.ndarray): mask to normalize as 2D array of booleans
+
+
+    Returns:
+
+      numpy.ndarray: A 2D boolean array with the same shape and data type of
+      the input image representing the newly aligned image.
+
+      numpy.ndarray: A 2D boolean array with the same shape and data type of
+      the input mask representing the newly aligned mask.
+
+    '''
+
+    raise NotImplemented('You must implement the __call__ slot')
+
+
+
+class NoNormalization(Normalizer):
+  '''Trivial implementation with no normalization'''
+
+
+  def __init__(self):
+    pass
+
+
+  def __call__(self, image, mask):
+    '''Returns the input parameters, without changing them
+
+
+    Parameters:
+
+      image (numpy.ndarray): raw image to normalize as 2D array of unsigned
+          8-bit integers
+
+      mask (numpy.ndarray): mask to normalize as 2D array of booleans
+
+
+    Returns:
+
+      numpy.ndarray: A 2D boolean array with the same shape and data type of
+      the input image representing the newly aligned image.
+
+      numpy.ndarray: A 2D boolean array with the same shape and data type of
+      the input mask representing the newly aligned mask.
+
+    '''
+
+    return image, mask
+
+
+
+class HuangNormalization(Normalizer):
+  '''Simple finger normalization from Huang et. al
+
+  Based on B. Huang, Y. Dai, R. Li, D. Tang and W. Li, Finger-vein
+  authentication based on wide line detector and pattern normalization,
+  Proceedings on 20th International Conference on Pattern Recognition (ICPR),
+  2010.
+
+  This implementation aligns the finger to the centre of the image using an
+  affine transformation. Elliptic projection which is described in the
+  referenced paper is **not** included.
+
+  In order to defined the affine transformation to be performed, the
+  algorithm first calculates the center for each edge (column wise) and
+  calculates the best linear fit parameters for a straight line passing
+  through those points.
+  '''
+
+  def __init__(self, padding_width=5, padding_constant=51):
+    self.padding_width = padding_width
+    self.padding_constant = padding_constant
+
+
+  def __call__(self, image, mask):
+    '''Inputs image and mask and outputs a normalized version of those
+
+
+    Parameters:
+
+      image (numpy.ndarray): raw image to normalize as 2D array of unsigned
+          8-bit integers
+
+      mask (numpy.ndarray): mask to normalize as 2D array of booleans
+
+
+    Returns:
+
+      numpy.ndarray: A 2D boolean array with the same shape and data type of
+      the input image representing the newly aligned image.
+
+      numpy.ndarray: A 2D boolean array with the same shape and data type of
+      the input mask representing the newly aligned mask.
+
+    '''
+
+    img_h, img_w = image.shape
+
+    # Calculates the mask edges along the columns
+    edges = numpy.zeros((2, mask.shape[1]), dtype=int)
+
+    edges[0,:] = mask.argmax(axis=0) # get upper edges
+    edges[1,:] = len(mask) - numpy.flipud(mask).argmax(axis=0) - 1
+
+    bl = edges.mean(axis=0) #baseline
+    x = numpy.arange(0, edges.shape[1])
+    A = numpy.vstack([x, numpy.ones(len(x))]).T
+
+    # Fit a straight line through the base line points
+    w = numpy.linalg.lstsq(A,bl)[0] # obtaining the parameters
+
+    angle = -1*math.atan(w[0])  # Rotation
+    tr = img_h/2 - w[1]         # Translation
+    scale = 1.0                 # Scale
+
+    #Affine transformation parameters
+    sx=sy=scale
+    cosine = math.cos(angle)
+    sine = math.sin(angle)
+
+    a = cosine/sx
+    b = -sine/sy
+    #b = sine/sx
+    c = 0 #Translation in x
+
+    d = sine/sx
+    e = cosine/sy
+    f = tr #Translation in y
+    #d = -sine/sy
+    #e = cosine/sy
+    #f = 0
+
+    g = 0
+    h = 0
+    #h=tr
+    i = 1
+
+    T = numpy.matrix([[a,b,c],[d,e,f],[g,h,i]])
+    Tinv = numpy.linalg.inv(T)
+    Tinvtuple = (Tinv[0,0],Tinv[0,1], Tinv[0,2], Tinv[1,0],Tinv[1,1],Tinv[1,2])
+
+    def _afftrans(img):
+      '''Applies the affine transform on the resulting image'''
+
+      t = Image.fromarray(img.astype('uint8'))
+      w, h = t.size #pillow image is encoded w, h
+      w += 2*self.padding_width
+      h += 2*self.padding_width
+      t = t.transform(
+          (w,h),
+          Image.AFFINE,
+          Tinvtuple,
+          resample=Image.BICUBIC,
+          fill=self.padding_constant)
+
+      return numpy.array(t).astype(img.dtype)
+
+    return _afftrans(image), _afftrans(mask)
diff --git a/bob/bio/vein/preprocessor/preprocessor.py b/bob/bio/vein/preprocessor/preprocessor.py
new file mode 100644
index 0000000000000000000000000000000000000000..be5463ee8ba30de39aec4f59163378710a8c9969
--- /dev/null
+++ b/bob/bio/vein/preprocessor/preprocessor.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+import bob.io.base
+from bob.bio.base.preprocessor import Preprocessor as BasePreprocessor
+
+
+class Preprocessor (BasePreprocessor):
+  """
+  Extracts the mask and pre-processes fingervein images.
+
+  In this implementation, the finger image is (in this order):
+
+    #. The image is pre-cropped to remove obvious non-finger image parts
+    #. The mask is extrapolated from the image using one of our
+       :py:class:`Masker`'s concrete implementations
+    #. The image is normalized with one of our :py:class:`Normalizer`'s
+    #. The image is filtered with one of our :py:class:`Filter`'s
+
+
+  Parameters:
+
+    crop (:py:class:`Cropper`): An object that will perform pre-cropping on
+      the input image before a mask can be estimated. It removes parts of the
+      image which are surely not part of the finger region you'll want to
+      consider for the next steps.
+
+    mask (:py:class:`Masker`): An object representing a Masker instance which
+      will extrapolate the mask from the input image.
+
+    normalize (:py:class:`Normalizer`): An object representing a Normalizer
+      instance which will normalize the input image and its mask returning a
+      new image mask pair.
+
+    filter (:py:class:`Filter`): An object representing a Filter instance will
+      will filter the input image and return a new filtered image. The filter
+      instance also receives the extrapolated mask so it can, if desired, only
+      apply the filtering operation where the mask has a value of ``True``
+
+  """
+
+
+  def __init__(self, crop, mask, normalize, filter, **kwargs):
+
+    BasePreprocessor.__init__(self,
+        crop = crop,
+        mask = mask,
+        normalize = normalize,
+        filter = filter,
+        **kwargs
+        )
+
+    self.crop = crop
+    self.mask = mask
+    self.normalize = normalize
+    self.filter = filter
+
+
+  def __call__(self, data, annotations=None):
+    """Reads the input image or (image, mask) and prepares for fex.
+
+    Parameters:
+
+      data (numpy.ndarray): An 2D numpy array containing a gray-scaled image
+        with dtype ``uint8``. The image maybe annotated with an RoI.
+
+
+    Returns:
+
+      numpy.ndarray: The image, preprocessed and normalized
+
+      numpy.ndarray: A mask, of the same size of the image, indicating where
+      the valid data for the object is.
+
+    """
+
+    data = self.crop(data)
+    mask = self.mask(data)
+    data, mask = self.normalize(data, mask)
+    data = self.filter(data, mask)
+    return data, mask
+
+
+  def write_data(self, data, filename):
+    '''Overrides the default method implementation to handle our tuple'''
+
+    f = bob.io.base.HDF5File(filename, 'w')
+    f.set('image', data[0])
+    f.set('mask', data[1])
+
+
+  def read_data(self, filename):
+    '''Overrides the default method implementation to handle our tuple'''
+
+    f = bob.io.base.HDF5File(filename, 'r')
+    return f.read('image'), f.read('mask')
diff --git a/bob/bio/vein/preprocessor/utils.py b/bob/bio/vein/preprocessor/utils.py
index 067956f2f0ef034c4a28df6ff6cd80eba10c23ed..723b2458352f35b263e3c4d420fe734245676ac2 100644
--- a/bob/bio/vein/preprocessor/utils.py
+++ b/bob/bio/vein/preprocessor/utils.py
@@ -100,7 +100,7 @@ def poly_to_mask(shape, points):
   # n.b.: PIL images are (x, y), while Bob shapes are represented in (y, x)!
   mask = Image.new('L', (shape[1], shape[0]))
 
-  # coverts whatever comes in into a list of tuples for PIL
+  # converts whatever comes in into a list of tuples for PIL
   fixed = tuple(map(tuple, numpy.roll(fix_points(shape, points), 1, 1)))
 
   # draws polygon
diff --git a/bob/bio/vein/script/blame.py b/bob/bio/vein/script/blame.py
new file mode 100644
index 0000000000000000000000000000000000000000..19e68fc0833e879219ef0d4b61cfb4388b131177
--- /dev/null
+++ b/bob/bio/vein/script/blame.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Wed 18 Jan 2017 09:40:25 CET
+
+
+"""Evaluates best/worst performers in a run given original scores
+
+Usage: %(prog)s [-v...] [options] <score-file> [<score-file> ...]
+       %(prog)s --help
+       %(prog)s --version
+
+
+Arguments:
+  <score-file>  Path to model-by-model score files for analysis
+
+
+Options:
+  -h, --help           Shows this help message and exits
+  -V, --version        Prints the version and exits
+  -v, --verbose        Increases the output verbosity level
+  -c INT, --cases=INT  Number of worst/best cases to show [default: 5]
+
+
+Examples:
+
+  1. Simple trial:
+
+     $ %(prog)s -vv model1.txt model2.txt
+
+  2. Change the number of cases to show:
+
+     $ %(prog)s -vv --cases=5 model*.txt
+
+"""
+
+
+import os
+import sys
+import numpy
+
+import bob.core
+logger = bob.core.log.setup("bob.bio.vein")
+
+
+def main(user_input=None):
+
+  if user_input is not None:
+    argv = user_input
+  else:
+    argv = sys.argv[1:]
+
+  import docopt
+  import pkg_resources
+
+  completions = dict(
+      prog=os.path.basename(sys.argv[0]),
+      version=pkg_resources.require('bob.measure')[0].version
+      )
+
+  args = docopt.docopt(
+      __doc__ % completions,
+      argv=argv,
+      version=completions['version'],
+      )
+
+  # Sets-up logging
+  verbosity = int(args['--verbose'])
+  bob.core.log.set_verbosity_level(logger, verbosity)
+
+  # validates number of cases
+  cases = int(args['--cases'])
+
+  # generates a huge
+  from bob.measure.load import load_score, get_negatives_positives
+  scores = []
+  names = {}
+
+  length = 0
+  for k in args['<score-file>']:
+    model = os.path.splitext(os.path.basename(k))[0]
+    length = max(length, len(model))
+
+  for k in args['<score-file>']:
+    model = os.path.splitext(os.path.basename(k))[0]
+    names[model] = k
+    logger.info("Loading score file `%s' for model `%s'..." % (k, model))
+    s = load_score(k)
+
+    # append a column with the model name
+    m = numpy.array(len(s)*[model], dtype='<U%d' % length)
+    new_dt = numpy.dtype(s.dtype.descr + [('model', m.dtype.descr)])
+    sp = numpy.zeros(s.shape, dtype=new_dt)
+    sp['claimed_id'] = s['claimed_id']
+    sp['real_id'] = s['real_id']
+    sp['test_label'] = s['test_label']
+    sp['score'] = s['score']
+    sp['model'] = m
+
+    # stack into the existing scores set
+    scores.append(sp)
+
+  scores = numpy.concatenate(scores)
+  genuines = scores[scores['claimed_id'] == scores['real_id']]
+  genuines.sort(order='score') #ascending
+  impostors = scores[scores['claimed_id'] != scores['real_id']]
+  impostors.sort(order='score') #ascending
+
+  # print
+  print('The %d worst genuine scores:' % cases)
+  for k in range(cases):
+    print(' %d. model %s -> %s (%f)' % (k+1, genuines[k]['model'][0],
+      genuines[k]['test_label'], genuines[k]['score']))
+
+  print('The %d best genuine scores:' % cases)
+  for k in range(cases):
+    pos = len(genuines)-k-1
+    print(' %d. model %s -> %s (%f)' % (k+1, genuines[pos]['model'][0],
+      genuines[pos]['test_label'], genuines[pos]['score']))
+
+  print('The %d worst impostor scores:' % cases)
+  for k in range(cases):
+    pos = len(impostors)-k-1
+    print(' %d. model %s -> %s (%f)' % (k+1, impostors[pos]['model'][0],
+      impostors[pos]['test_label'], impostors[pos]['score']))
+
+  print('The %d best impostor scores:' % cases)
+  for k in range(cases):
+    print(' %d. model %s -> %s (%f)' % (k+1, impostors[k]['model'][0],
+      impostors[k]['test_label'], impostors[k]['score']))
+
+  return 0
diff --git a/bob/bio/vein/script/compare_rois.py b/bob/bio/vein/script/compare_rois.py
index f8578619dfa4d21df458088a42d1432f3a36c2a4..126fa3f1628c306d81866941c7e87191bd727090 100644
--- a/bob/bio/vein/script/compare_rois.py
+++ b/bob/bio/vein/script/compare_rois.py
@@ -51,7 +51,7 @@ import operator
 import numpy
 
 import bob.core
-logger = bob.core.log.setup("bob.measure")
+logger = bob.core.log.setup("bob.bio.vein")
 
 import bob.io.base
 
@@ -171,7 +171,6 @@ def main(user_input=None):
   from ..preprocessor import utils
   metrics = []
   for k in gt:
-    logger.info("Evaluating metrics for `%s'..." % k)
     gt_file = os.path.join(args['<ground-truth>'], k)
     db_file = os.path.join(args['<database>'], k)
     gt_roi = bob.io.base.HDF5File(gt_file).read('mask')
@@ -182,6 +181,7 @@ def main(user_input=None):
       utils.intersect_ratio(gt_roi, db_roi),
       utils.intersect_ratio_of_complement(gt_roi, db_roi),
       ))
+    logger.info("%s: JI = %.5g, M1 = %.5g, M2 = %5.g" % metrics[-1])
 
   # Print statistics
   names = (
diff --git a/bob/bio/vein/script/markdet.py b/bob/bio/vein/script/markdet.py
new file mode 100644
index 0000000000000000000000000000000000000000..824e3ca9cb25d89bda3de0e8cbcb32de86fd98af
--- /dev/null
+++ b/bob/bio/vein/script/markdet.py
@@ -0,0 +1,311 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+
+"""Trains a new MLP to perform pre-watershed marker detection
+
+Usage: %(prog)s [-v...] [--samples=N] [--model=PATH] [--points=N] [--hidden=N]
+                [--batch=N] [--iterations=N] <database> <protocol> <group>
+       %(prog)s --help
+       %(prog)s --version
+
+
+Arguments:
+
+  <database>  Name of the database to use for creating the model (options are:
+              "fv3d" or "verafinger")
+  <protocol>  Name of the protocol to use for creating the model (options
+              depend on the database chosen)
+  <group>     Name of the group to use on the database/protocol with the
+              samples to use for training the model (options are: "train",
+              "dev" or "eval")
+
+Options:
+
+  -h, --help             Shows this help message and exits
+  -V, --version          Prints the version and exits
+  -v, --verbose          Increases the output verbosity level. Using "-vv"
+                         allows the program to output informational messages as
+                         it goes along.
+  -m PATH, --model=PATH  Path to the generated model file [default: model.hdf5]
+  -s N, --samples=N      Maximum number of samples to use for training. If not
+                         set, use all samples
+  -p N, --points=N       Maximum number of samples to use for plotting
+                         ground-truth and classification errors. The more
+                         points, the less responsive the plot becomes
+                         [default: 1000]
+  -H N, --hidden=N       Number of neurons on the hidden layer of the
+                         multi-layer perceptron [default: 5]
+  -b N, --batch=N        Number of samples to use for every batch [default: 1]
+  -i N, --iterations=N   Number of iterations to train the neural net for
+                         [default: 2000]
+
+
+Examples:
+
+  Trains on the 3D Fingervein database:
+
+     $ %(prog)s -vv fv3d central dev
+
+  Saves the model to a different file, use only 100 samples:
+
+    $ %(prog)s -vv -s 100 --model=/path/to/saved-model.hdf5 fv3d central dev
+
+"""
+
+
+import os
+import sys
+import schema
+import docopt
+import numpy
+import skimage
+
+
+def validate(args):
+  '''Validates command-line arguments, returns parsed values
+
+  This function uses :py:mod:`schema` for validating :py:mod:`docopt`
+  arguments. Logging level is not checked by this procedure (actually, it is
+  ignored) and must be previously setup as some of the elements here may use
+  logging for outputing information.
+
+
+  Parameters:
+
+    args (dict): Dictionary of arguments as defined by the help message and
+      returned by :py:mod:`docopt`
+
+
+  Returns
+
+    dict: Validate dictionary with the same keys as the input and with values
+      possibly transformed by the validation procedure
+
+
+  Raises:
+
+    schema.SchemaError: in case one of the checked options does not validate.
+
+  '''
+
+  from .validate import check_model_does_not_exist, validate_protocol, \
+      validate_group
+
+  sch = schema.Schema({
+    '--model': check_model_does_not_exist,
+    '--samples': schema.Or(schema.Use(int), None),
+    '--points': schema.Use(int),
+    '--hidden': schema.Use(int),
+    '--batch': schema.Use(int),
+    '--iterations': schema.Use(int),
+    '<database>': lambda n: n in ('fv3d', 'verafinger'),
+    '<protocol>': validate_protocol(args['<database>']),
+    '<group>': validate_group(args['<database>']),
+    str: object, #ignores strings we don't care about
+    }, ignore_extra_keys=True)
+
+  return sch.validate(args)
+
+
+def main(user_input=None):
+
+  if user_input is not None:
+    argv = user_input
+  else:
+    argv = sys.argv[1:]
+
+  import pkg_resources
+
+  completions = dict(
+      prog=os.path.basename(sys.argv[0]),
+      version=pkg_resources.require('bob.bio.vein')[0].version
+      )
+
+  args = docopt.docopt(
+      __doc__ % completions,
+      argv=argv,
+      version=completions['version'],
+      )
+
+  try:
+    from .validate import setup_logger
+    logger = setup_logger('bob.bio.vein', args['--verbose'])
+    args = validate(args)
+  except schema.SchemaError as e:
+    sys.exit(e)
+
+  if args['<database>'] == 'fv3d':
+    from ..configurations.fv3d import database as db
+  elif args['<database>'] == 'verafinger':
+    from ..configurations.verafinger import database as db
+  else:
+    raise schema.SchemaError('Database %s is not supported' % \
+        args['<database>'])
+
+  database_replacement = "%s/.bob_bio_databases.txt" % os.environ["HOME"]
+  db.replace_directories(database_replacement)
+  objects = db.objects(protocol=args['<protocol>'], groups=args['<group>'])
+  if args['--samples'] is None:
+    args['--samples'] = len(objects)
+
+  from ..preprocessor.utils import poly_to_mask
+  features = None
+  target = None
+  loaded = 0
+  for k, sample in enumerate(objects):
+
+    if args['--samples'] is not None and loaded >= args['--samples']:
+      break
+    path = sample.make_path(directory=db.original_directory,
+        extension=db.original_extension)
+    logger.info('Loading sample %d/%d (%s)...', loaded, len(objects), path)
+    image = sample.load(directory=db.original_directory,
+        extension=db.original_extension)
+    if not (hasattr(image, 'metadata') and 'roi' in image.metadata):
+      logger.info('Skipping sample (no ROI)')
+      continue
+    loaded += 1
+
+    # copy() required by skimage.util.shape.view_as_windows()
+    image = image.copy().astype('float64') / 255.
+    windows = skimage.util.shape.view_as_windows(image, (3,3))
+
+    if features is None and target is None:
+      features = numpy.zeros(
+          (args['--samples']*windows.shape[0]*windows.shape[1],
+            windows.shape[2]*windows.shape[3]+2), dtype='float64')
+      target = numpy.zeros(args['--samples']*windows.shape[0]*windows.shape[1],
+          dtype='bool')
+
+    mask = poly_to_mask(image.shape, image.metadata['roi'])
+
+    mask = mask[1:-1, 1:-1]
+    for y in range(windows.shape[0]):
+      for x in range(windows.shape[1]):
+        idx = ((loaded-1)*windows.shape[0]*windows.shape[1]) + \
+            (y*windows.shape[1]) + x
+        features[idx,:-2] = windows[y,x].flatten()
+        features[idx,-2] = y+1
+        features[idx,-1] = x+1
+        target[idx] = mask[y,x]
+
+  # if number of loaded samples is smaller than expected, clip features array
+  features = features[:loaded*windows.shape[0]*windows.shape[1]]
+  target = target[:loaded*windows.shape[0]*windows.shape[1]]
+
+  # normalize w.r.t. dimensions
+  features[:,-2] /= image.shape[0]
+  features[:,-1] /= image.shape[1]
+
+  target_float = target.astype('float64')
+  target_float[~target] = -1.0
+  target_float = target_float.reshape(len(target), 1)
+  positives = features[target]
+  negatives = features[~target]
+  logger.info('There are %d samples on input dataset', len(target))
+  logger.info('  %d are negatives', len(negatives))
+  logger.info('  %d are positives', len(positives))
+
+  import bob.learn.mlp
+
+  # by default, machine uses hyperbolic tangent output
+  machine = bob.learn.mlp.Machine((features.shape[1], args['--hidden'], 1))
+  machine.randomize() #initialize weights randomly
+  loss = bob.learn.mlp.SquareError(machine.output_activation)
+  train_biases = True
+  trainer = bob.learn.mlp.RProp(args['--batch'], loss, machine, train_biases)
+  trainer.reset()
+  shuffler = bob.learn.mlp.DataShuffler([negatives, positives],
+      [[-1.0], [+1.0]])
+
+  # start cost
+  output = machine(features)
+  cost = loss.f(output, target_float)
+  logger.info('[initial] MSE = %g', cost.mean())
+
+  # trains the network until the error is near zero
+  for i in range(args['--iterations']):
+    try:
+      _feats, _tgts = shuffler.draw(args['--batch'])
+      trainer.train(machine, _feats, _tgts)
+      logger.info('[%d] MSE = %g', i, trainer.cost(_tgts))
+    except KeyboardInterrupt:
+      print() #avoids the ^C line
+      logger.info('Gracefully stopping training before limit (%d iterations)',
+          args['--batch'])
+      break
+
+  # describe errors
+  neg_output = machine(negatives)
+  pos_output = machine(positives)
+  neg_errors = neg_output >= 0
+  pos_errors = pos_output < 0
+  hter_train = ((sum(neg_errors) / float(len(negatives))) + \
+      (sum(pos_errors)) / float(len(positives))) / 2.0
+  logger.info('Training set HTER: %.2f%%', 100*hter_train)
+  logger.info('  Errors on negatives: %d / %d', sum(neg_errors), len(negatives))
+  logger.info('  Errors on positives: %d / %d', sum(pos_errors), len(positives))
+
+  threshold = 0.8
+  neg_errors = neg_output >= threshold
+  pos_errors = pos_output < -threshold
+  hter_train = ((sum(neg_errors) / float(len(negatives))) + \
+      (sum(pos_errors)) / float(len(positives))) / 2.0
+  logger.info('Training set HTER (threshold=%g): %.2f%%', threshold,
+      100*hter_train)
+  logger.info('  Errors on negatives: %d / %d', sum(neg_errors), len(negatives))
+  logger.info('  Errors on positives: %d / %d', sum(pos_errors), len(positives))
+  # plot separation threshold
+  import matplotlib.pyplot as plt
+  from mpl_toolkits.mplot3d import Axes3D
+
+  # only plot N random samples otherwise it makes it too slow
+  N = numpy.random.randint(min(len(negatives), len(positives)),
+      size=min(len(negatives), len(positives), args['--points']))
+
+  fig = plt.figure()
+
+  ax = fig.add_subplot(211, projection='3d')
+  ax.scatter(image.shape[1]*negatives[N,-1], image.shape[0]*negatives[N,-2],
+      255*negatives[N,4], label='negatives', color='blue', marker='.')
+  ax.scatter(image.shape[1]*positives[N,-1], image.shape[0]*positives[N,-2],
+      255*positives[N,4], label='positives', color='red', marker='.')
+  ax.set_xlabel('Width')
+  ax.set_xlim(0, image.shape[1])
+  ax.set_ylabel('Height')
+  ax.set_ylim(0, image.shape[0])
+  ax.set_zlabel('Intensity')
+  ax.set_zlim(0, 255)
+  ax.legend()
+  ax.grid()
+  ax.set_title('Ground Truth')
+  plt.tight_layout()
+
+  ax = fig.add_subplot(212, projection='3d')
+  neg_plot = negatives[neg_output[:,0]>=threshold]
+  pos_plot = positives[pos_output[:,0]<-threshold]
+  N = numpy.random.randint(min(len(neg_plot), len(pos_plot)),
+      size=min(len(neg_plot), len(pos_plot), args['--points']))
+  ax.scatter(image.shape[1]*neg_plot[N,-1], image.shape[0]*neg_plot[N,-2],
+      255*neg_plot[N,4], label='negatives', color='red', marker='.')
+  ax.scatter(image.shape[1]*pos_plot[N,-1], image.shape[0]*pos_plot[N,-2],
+      255*pos_plot[N,4], label='positives', color='blue', marker='.')
+  ax.set_xlabel('Width')
+  ax.set_xlim(0, image.shape[1])
+  ax.set_ylabel('Height')
+  ax.set_ylim(0, image.shape[0])
+  ax.set_zlabel('Intensity')
+  ax.set_zlim(0, 255)
+  ax.legend()
+  ax.grid()
+  ax.set_title('Classifier Errors')
+  plt.tight_layout()
+
+  print('Close plot window to save model and end program...')
+  plt.show()
+  import bob.io.base
+  h5f = bob.io.base.HDF5File(args['--model'], 'w')
+  machine.save(h5f)
+  del h5f
+  logger.info('Saved MLP model to %s', args['--model'])
diff --git a/bob/bio/vein/script/validate.py b/bob/bio/vein/script/validate.py
new file mode 100644
index 0000000000000000000000000000000000000000..e404f592f1b915a379b934da79b3dbd22232d303
--- /dev/null
+++ b/bob/bio/vein/script/validate.py
@@ -0,0 +1,248 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+
+'''Utilities for command-line option validation'''
+
+
+import os
+import glob
+import schema
+import logging
+logger = logging.getLogger(__name__)
+
+
+def setup_logger(name, level):
+  '''Sets up and checks a verbosity level respects min and max boundaries
+
+
+  Parameters:
+
+    name (str): The name of the logger to setup
+
+    v (int): A value indicating the verbosity that must be set
+
+
+  Returns:
+
+    logging.Logger: A standard Python logger that can be used to log messages
+
+
+  Raises:
+
+    schema.SchemaError: If the verbosity level exceeds the maximum allowed of 4
+
+  '''
+
+  import bob.core
+  logger = bob.core.log.setup(name)
+
+  if not (0 <= level < 4):
+    raise schema.SchemaError("there can be only up to 3 -v's in a command-line")
+
+  # Sets-up logging
+  bob.core.log.set_verbosity_level(logger, level)
+
+  return logger
+
+
+def make_dir(p):
+  '''Checks if a path exists, if it doesn't, creates it
+
+
+  Parameters:
+
+    p (str): The path to check
+
+
+  Returns
+
+    bool: ``True``, always
+
+  '''
+
+  if not os.path.exists(p):
+    logger.info("Creating directory `%s'...", p)
+    os.makedirs(p)
+
+  return True
+
+
+def check_path_does_not_exist(p):
+  '''Checks if a path exists, if it does, raises an exception
+
+
+  Parameters:
+
+    p (str): The path to check
+
+
+  Returns:
+
+    bool: ``True``, always
+
+
+  Raises:
+
+    schema.SchemaError: if the path exists
+
+  '''
+
+  if os.path.exists(p):
+    raise schema.SchemaError("path to {} exists".format(p))
+
+  return True
+
+
+def check_path_exists(p):
+  '''Checks if a path exists, if it doesn't, raises an exception
+
+
+  Parameters:
+
+    p (str): The path to check
+
+
+  Returns:
+
+    bool: ``True``, always
+
+
+  Raises:
+
+    schema.SchemaError: if the path doesn't exist
+
+  '''
+
+  if not os.path.exists(p):
+    raise schema.SchemaError("path to {} does not exist".format(p))
+
+  return True
+
+
+def check_model_does_not_exist(p):
+  '''Checks if the path to any potential model file does not exist
+
+
+  Parameters:
+
+    p (str): The path to check
+
+
+  Returns:
+
+    bool: ``True``, always
+
+
+  Raises:
+
+    schema.SchemaError: if the path exists
+
+  '''
+
+  files = glob.glob(p + '.*')
+  if files:
+    raise schema.SchemaError("{} already exists".format(files))
+
+  return True
+
+
+def open_multipage_pdf_file(s):
+  '''Returns an opened matplotlib multi-page file
+
+
+  Parameters:
+
+    p (str): The path to the file to open
+
+
+  Returns:
+
+    matplotlib.backends.backend_pdf.PdfPages: with the handle to the multipage
+    PDF file
+
+
+  Raises:
+
+    schema.SchemaError: if the path exists
+
+  '''
+  import matplotlib.pyplot as mpl
+  from matplotlib.backends.backend_pdf import PdfPages
+  return PdfPages(s)
+
+
+class validate_protocol(object):
+  '''Validates the protocol for a given database
+
+
+  Parameters:
+
+    name (str): The name of the database to validate the protocol for
+
+
+  Raises:
+
+    schema.SchemaError: if the database is not supported
+
+  '''
+
+  def __init__(self, name):
+
+    self.dbname = name
+
+    if name == 'fv3d':
+      import bob.db.fv3d
+      self.valid_names = bob.db.fv3d.Database().protocol_names()
+    elif name == 'verafinger':
+      import bob.db.verafinger
+      self.valid_names = bob.db.verafinger.Database().protocol_names()
+    else:
+      raise schema.SchemaError("do not support database {}".format(name))
+
+
+  def __call__(self, name):
+
+    if name not in self.valid_names:
+      msg = "{} is not a valid protocol for database {}"
+      raise schema.SchemaError(msg.format(name, self.dbname))
+
+    return True
+
+
+class validate_group(object):
+  '''Validates the group for a given database
+
+
+  Parameters:
+
+    name (str): The name of the database to validate the group for
+
+
+  Raises:
+
+    schema.SchemaError: if the database is not supported
+
+  '''
+
+  def __init__(self, name):
+
+    self.dbname = name
+
+    if name == 'fv3d':
+      import bob.db.fv3d
+      self.valid_names = bob.db.fv3d.Database().groups()
+    elif name == 'verafinger':
+      import bob.db.verafinger
+      self.valid_names = bob.db.verafinger.Database().groups()
+    else:
+      raise schema.SchemaError("do not support database {}".format(name))
+
+
+  def __call__(self, name):
+
+    if name not in self.valid_names:
+      msg = "{} is not a valid group for database {}"
+      raise schema.SchemaError(msg.format(name, self.dbname))
+
+    return True
diff --git a/bob/bio/vein/script/view_mask.py b/bob/bio/vein/script/view_mask.py
deleted file mode 100644
index 2156039f7184cbc3c6284c53dda765d383065803..0000000000000000000000000000000000000000
--- a/bob/bio/vein/script/view_mask.py
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Mon 07 Nov 2016 15:20:26 CET
-
-
-"""Visualizes masks applied to vein imagery
-
-Usage: %(prog)s [-v...] [options] <file> [<file>...]
-       %(prog)s --help
-       %(prog)s --version
-
-
-Arguments:
-  <file>  The HDF5 file to load image and mask from
-
-
-Options:
-  -h, --help             Shows this help message and exits
-  -V, --version          Prints the version and exits
-  -v, --verbose          Increases the output verbosity level
-  -s path, --save=path   If set, saves image into a file instead of displaying
-                         it
-
-
-Examples:
-
-  Visualize the mask on a single image:
-
-     $ %(prog)s data.hdf5
-
-  Visualize multiple masks (like in a proof-sheet):
-
-     $ %(prog)s *.hdf5
-
-"""
-
-
-import os
-import sys
-
-import bob.core
-logger = bob.core.log.setup("bob.bio.vein")
-
-from ..preprocessor import utils
-
-
-def main(user_input=None):
-
-  if user_input is not None:
-    argv = user_input
-  else:
-    argv = sys.argv[1:]
-
-  import docopt
-  import pkg_resources
-
-  completions = dict(
-      prog=os.path.basename(sys.argv[0]),
-      version=pkg_resources.require('bob.bio.vein')[0].version
-      )
-
-  args = docopt.docopt(
-      __doc__ % completions,
-      argv=argv,
-      version=completions['version'],
-      )
-
-  # Sets-up logging
-  verbosity = int(args['--verbose'])
-  bob.core.log.set_verbosity_level(logger, verbosity)
-
-  # Loads the image, the mask and save it to a PNG file
-  from ..preprocessor import utils
-  for filename in args['<file>']:
-    f = bob.io.base.HDF5File(filename)
-    image = f.read('image')
-    mask  = f.read('mask')
-    img = utils.draw_mask_over_image(image, mask)
-    if args['--save']:
-      img.save(args['--save'])
-    else:
-      img.show()
diff --git a/bob/bio/vein/script/view_sample.py b/bob/bio/vein/script/view_sample.py
new file mode 100644
index 0000000000000000000000000000000000000000..1dd0a54df742e9788e8e2ae29772ff079d7e09fb
--- /dev/null
+++ b/bob/bio/vein/script/view_sample.py
@@ -0,0 +1,252 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Mon 07 Nov 2016 15:20:26 CET
+
+
+"""Visualizes a particular sample throughout many processing stages
+
+Usage: %(prog)s [-v...] [-s <path>] <database> <processed> <stem> [<stem>...]
+       %(prog)s --help
+       %(prog)s --version
+
+
+Arguments:
+
+  <database>   Name of the database to use for creating the model (options are:
+               "fv3d" or "verafinger")
+  <processed>  Path with the directory holding the preprocessed and extracted
+               sub-directories containing the processing results of a
+               bob.bio.vein toolchain
+  <stem>       Name of the object on the database to display, without the root
+               or the extension
+
+
+Options:
+
+  -h, --help                Shows this help message and exits
+  -V, --version             Prints the version and exits
+  -v, --verbose             Increases the output verbosity level
+  -s <path>, --save=<path>  If set, saves image into a file instead of
+                            displaying it
+
+
+Examples:
+
+  Visualize the processing toolchain over a single image of VERA finger vein:
+
+     $ %(prog)s verafinger /mc client/sample
+
+  Visualize multiple masks (like in a proof-sheet):
+
+     $ %(prog)s verafinger /mc client/sample1 client/sample2
+
+"""
+
+
+import os
+import sys
+
+import numpy
+
+import schema
+import docopt
+
+import bob.core
+logger = bob.core.log.setup("bob.bio.vein")
+
+import matplotlib.pyplot as mpl
+from ..preprocessor import utils
+
+import bob.io.base
+import bob.io.image
+
+
+def save_figures(title, image, mask, image_pp, binary):
+  '''Saves individual images on a directory
+
+
+  Parameters:
+
+    title (str): A title for this plot
+
+    image (numpy.ndarray): The original image representing the finger vein (2D
+      array with dtype = ``uint8``)
+
+    mask (numpy.ndarray): A 2D boolean array with the same size of the original
+      image containing the pixels in which the image is valid (``True``) or
+      invalid (``False``).
+
+    image_pp (numpy.ndarray): A version of the original image, pre-processed by
+      one of the available algorithms
+
+    binary (numpy.ndarray): A binarized version of the original image in which
+      all pixels (should) represent vein (``True``) or not-vein (``False``)
+
+  '''
+
+  os.makedirs(title)
+  bob.io.base.save(image, os.path.join(title, 'original.png'))
+
+  # add preprocessed image
+  from ..preprocessor import utils
+  img = utils.draw_mask_over_image(image_pp, mask)
+  img = numpy.array(img).transpose(2,0,1)
+  bob.io.base.save(img[:3], os.path.join(title, 'preprocessed.png'))
+
+  # add binary image
+  bob.io.base.save(binary.astype('uint8')*255, os.path.join(title,
+    'binarized.png'))
+
+
+def proof_figure(title, image, mask, image_pp, binary=None):
+  '''Builds a proof canvas out of individual images
+
+
+  Parameters:
+
+    title (str): A title for this plot
+
+    image (numpy.ndarray): The original image representing the finger vein (2D
+      array with dtype = ``uint8``)
+
+    mask (numpy.ndarray): A 2D boolean array with the same size of the original
+      image containing the pixels in which the image is valid (``True``) or
+      invalid (``False``).
+
+    image_pp (numpy.ndarray): A version of the original image, pre-processed by
+      one of the available algorithms
+
+    binary (numpy.ndarray, Optional): A binarized version of the original image
+      in which all pixels (should) represent vein (``True``) or not-vein
+      (``False``)
+
+
+  Returns:
+
+    matplotlib.pyplot.Figure: A figure canvas containing the proof for the
+    particular sample on the database
+
+  '''
+
+  fig = mpl.figure(figsize=(6,9), dpi=100)
+
+  images = 3 if binary is not None else 2
+
+  # add original image
+  mpl.subplot(images, 1, 1)
+  mpl.title('%s - original' % title)
+  mpl.imshow(image, cmap="gray")
+
+  # add preprocessed image
+  from ..preprocessor import utils
+  img = utils.draw_mask_over_image(image_pp, mask)
+  mpl.subplot(images, 1, 2)
+  mpl.title('Preprocessed')
+  mpl.imshow(img)
+
+  if binary is not None:
+    # add binary image
+    mpl.subplot(3, 1, 3)
+    mpl.title('Binarized')
+    mpl.imshow(binary.astype('uint8')*255, cmap="gray")
+
+  return fig
+
+
+def validate(args):
+  '''Validates command-line arguments, returns parsed values
+
+  This function uses :py:mod:`schema` for validating :py:mod:`docopt`
+  arguments. Logging level is not checked by this procedure (actually, it is
+  ignored) and must be previously setup as some of the elements here may use
+  logging for outputing information.
+
+
+  Parameters:
+
+    args (dict): Dictionary of arguments as defined by the help message and
+      returned by :py:mod:`docopt`
+
+
+  Returns
+
+    dict: Validate dictionary with the same keys as the input and with values
+      possibly transformed by the validation procedure
+
+
+  Raises:
+
+    schema.SchemaError: in case one of the checked options does not validate.
+
+  '''
+
+  valid_databases = ('fv3d', 'verafinger')
+
+  sch = schema.Schema({
+    '<database>': schema.And(lambda n: n in valid_databases,
+      error='<database> must be one of %s' % ', '.join(valid_databases)),
+    str: object, #ignores strings we don't care about
+    }, ignore_extra_keys=True)
+
+  return sch.validate(args)
+
+
+def main(user_input=None):
+
+  if user_input is not None:
+    argv = user_input
+  else:
+    argv = sys.argv[1:]
+
+  import pkg_resources
+
+  completions = dict(
+      prog=os.path.basename(sys.argv[0]),
+      version=pkg_resources.require('bob.bio.vein')[0].version
+      )
+
+  args = docopt.docopt(
+      __doc__ % completions,
+      argv=argv,
+      version=completions['version'],
+      )
+
+  try:
+    from .validate import setup_logger
+    logger = setup_logger('bob.bio.vein', args['--verbose'])
+    args = validate(args)
+  except schema.SchemaError as e:
+    sys.exit(e)
+
+  if args['<database>'] == 'fv3d':
+    from ..configurations.fv3d import database as db
+  elif args['<database>'] == 'verafinger':
+    from ..configurations.verafinger import database as db
+
+  database_replacement = "%s/.bob_bio_databases.txt" % os.environ["HOME"]
+  db.replace_directories(database_replacement)
+  all_files = db.objects()
+
+  # Loads the image, the mask and save it to a PNG file
+  for stem in args['<stem>']:
+    f = [k for k in all_files if k.path == stem]
+    if len(f) == 0:
+      raise RuntimeError('File with stem "%s" does not exist on "%s"' % \
+          stem, args['<database>'])
+    f = f[0]
+    image = f.load(db.original_directory, db.original_extension)
+    pp_name = f.make_path(os.path.join(args['<processed>'], 'preprocessed'),
+        extension='.hdf5')
+    pp = bob.io.base.HDF5File(pp_name)
+    mask  = pp.read('mask')
+    image_pp = pp.read('image')
+    try:
+      binary = f.load(os.path.join(args['<processed>'], 'extracted'))
+    except:
+      binary = None
+    fig = proof_figure(stem, image, mask, image_pp, binary)
+    if args['--save']:
+      save_figures(args['--save'], image, mask, image_pp, binary)
+    else:
+      mpl.show()
+      print('Close window to continue...')
diff --git a/bob/bio/vein/script/watershed_mask.py b/bob/bio/vein/script/watershed_mask.py
new file mode 100644
index 0000000000000000000000000000000000000000..009f734315b2cfea9c37769d53b0fb774122b36c
--- /dev/null
+++ b/bob/bio/vein/script/watershed_mask.py
@@ -0,0 +1,284 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Wed  4 Oct 11:23:52 2017 CEST
+
+
+"""Preprocesses a fingervein image with a watershed/neural-net seeded mask
+
+Usage: %(prog)s [-v...] [-s <path>] [-f <float>] [-b <float>] [--scan]
+                <model> <database> [<stem>...]
+       %(prog)s --help
+       %(prog)s --version
+
+
+Arguments:
+
+  <model>     Path to model to use for find watershed markers
+  <database>  Name of the database to use for creating the model (options are:
+              "fv3d" or "verafinger")
+  <stem>      Name of the object on the database to display, without the root
+              or the extension. If none provided, run for all possible stems on
+              the database
+
+
+Options:
+
+  -h, --help                  Shows this help message and exits
+  -V, --version               Prints the version and exits
+  -v, --verbose               Increases the output verbosity level
+  -f, --fg-threshold=<float>  Foreground threshold value. Should be set to a
+                              number that is between 0.5 and 1.0. The higher,
+                              the less markers for the foreground watershed
+                              process will be produced. [default: 0.7]
+  -b, --bg-threshold=<float>  Background threshold value. Should be set to a
+                              number that is between 0.0 and 0.5. The smaller,
+                              the less markers for the foreground watershed
+                              process will be produced. [default: 0.3]
+  -S, --scan                  If set, ignores settings for the threshold and
+                              scans the whole range of threshold printing the
+                              Jaccard, M1 and M2 merith figures
+  -s <path>, --save=<path>    If set, saves image into a file instead of
+                              displaying it
+
+
+Examples:
+
+  Visualize the preprocessing toolchain over a single image
+
+     $ %(prog)s model.hdf5 verafinger sample-stem
+
+  Save the results of the preprocessing to a file. In this case, the program
+  runs non-interactively:
+
+     $ %(prog)s -s graphics.png model.hdf5 verafinger sample-stem
+
+  Scans the set of possible thresholds printing Jaccard, M1 and M2 indexes:
+
+     $ %(prog)s --scan model.hdf5 verafinger sample-stem
+
+"""
+
+
+import os
+import sys
+import time
+
+import numpy
+
+import schema
+import docopt
+
+import bob.core
+logger = bob.core.log.setup("bob.bio.vein")
+
+import matplotlib.pyplot as plt
+
+import bob.io.base
+import bob.io.image
+
+
+def validate(args):
+  '''Validates command-line arguments, returns parsed values
+
+  This function uses :py:mod:`schema` for validating :py:mod:`docopt`
+  arguments. Logging level is not checked by this procedure (actually, it is
+  ignored) and must be previously setup as some of the elements here may use
+  logging for outputing information.
+
+
+  Parameters:
+
+    args (dict): Dictionary of arguments as defined by the help message and
+      returned by :py:mod:`docopt`
+
+
+  Returns
+
+    dict: Validate dictionary with the same keys as the input and with values
+      possibly transformed by the validation procedure
+
+
+  Raises:
+
+    schema.SchemaError: in case one of the checked options does not validate.
+
+  '''
+
+  valid_databases = ('fv3d', 'verafinger')
+
+  sch = schema.Schema({
+    '<model>': schema.And(os.path.exists,
+      error='<model> should point to an existing path'),
+    '<database>': schema.And(lambda n: n in valid_databases,
+      error='<database> must be one of %s' % ', '.join(valid_databases)),
+    '--fg-threshold': schema.And(
+      schema.Use(float), lambda n: 0.5 < n < 1.0,
+      error='--fg-threshold should be a float between 0.5 and 1.0',
+      ),
+    '--bg-threshold': schema.And(
+      schema.Use(float), lambda n: 0.0 < n < 0.5,
+      error='--bg-threshold should be a float between 0.0 and 0.5',
+      ),
+    str: object, #ignores strings we don't care about
+    }, ignore_extra_keys=True)
+
+  return sch.validate(args)
+
+
+def make_figure(image, markers, edges, mask):
+  '''Returns a matplotlib figure with the detailed processing result'''
+
+  plt.clf() #completely clears the current figure
+  figure = plt.gcf()
+  plt.subplot(2,2,1)
+  _ = markers.copy().astype('uint8')
+  _[_==1] = 128
+  plt.imshow(_, cmap='gray')
+  plt.title('Markers')
+
+  plt.subplot(2,2,2)
+  _ = numpy.dstack([
+      (_ | (2550*edges).astype('uint8')),
+      _,
+      _,
+      ])
+  plt.imshow(_)
+  plt.title('Edges')
+
+  plt.subplot(2,2,3)
+  plt.imshow(mask.astype('uint8')*255, cmap='gray')
+  plt.title('Mask')
+
+  plt.subplot(2,2,4)
+  plt.imshow(image, cmap='gray')
+  red_mask = numpy.dstack([
+      (~mask).astype('uint8')*255,
+      numpy.zeros_like(image),
+      numpy.zeros_like(image),
+      ])
+  plt.imshow(red_mask, alpha=0.15)
+  plt.title('Image (masked)')
+
+  return figure
+
+
+def process_one(args, image, path):
+  '''Processes a single image'''
+
+  from bob.bio.vein.preprocessor import WatershedMask, AnnotatedRoIMask
+
+  # loads the processor once - avoids re-reading weights from the disk
+  processor = WatershedMask(
+      model=args['<model>'],
+      foreground_threshold=args['--fg-threshold'],
+      background_threshold=args['--bg-threshold'],
+      )
+
+  annotator = AnnotatedRoIMask()
+
+  from bob.bio.vein.preprocessor.utils import \
+      jaccard_index, intersect_ratio, intersect_ratio_of_complement
+
+  start = time.time()
+  markers, edges, mask = processor.run(image)
+  total_time = time.time() - start
+
+  # error
+  annotated_mask = annotator(image)
+  ji = jaccard_index(annotated_mask, mask)
+  m1 = intersect_ratio(annotated_mask, mask)
+  m2 = intersect_ratio_of_complement(annotated_mask, mask)
+  logger.debug('%s, %.2f, %.2f, %.2f, %g, %g, %g', path, total_time,
+    args['--fg-threshold'], args['--bg-threshold'], ji, m1, m2)
+
+  if not args['--scan']:
+
+    fig = make_figure(image, markers, edges, mask)
+    fig.suptitle('%s @ %s - JI=%.4f, M1=%.4f, M2=%.4f\n' \
+        '($\\tau_{FG}$ = %.2f - $\\tau_{BG}$ = %.2f)' % \
+        (path, args['<database>'], ji, m1, m2, args['--fg-threshold'],
+          args['--bg-threshold']), fontsize=12)
+
+    if args['--save']:
+      fig.savefig(args['--save'])
+    else:
+      print('Close the figure to continue...')
+      plt.show()
+
+  return (path, total_time, args['--fg-threshold'], args['--bg-threshold'],
+      ji, m1, m2)
+
+
+def eval_best_thresholds(results):
+  '''Evaluates the best thresholds taking into consideration various indexes'''
+
+  m1 = numpy.array([k[-2] for k in results])
+  m2 = numpy.array([k[-1] for k in results])
+  index = m1/m2
+  return index.argmax()
+
+
+def main(user_input=None):
+
+  if user_input is not None:
+    argv = user_input
+  else:
+    argv = sys.argv[1:]
+
+  import pkg_resources
+
+  completions = dict(
+      prog=os.path.basename(sys.argv[0]),
+      version=pkg_resources.require('bob.bio.vein')[0].version
+      )
+
+  args = docopt.docopt(
+      __doc__ % completions,
+      argv=argv,
+      version=completions['version'],
+      )
+
+  try:
+    from .validate import setup_logger
+    logger = setup_logger('bob.bio.vein', args['--verbose'])
+    args = validate(args)
+  except schema.SchemaError as e:
+    sys.exit(e)
+
+  if args['<database>'] == 'fv3d':
+    from ..configurations.fv3d import database as db
+  elif args['<database>'] == 'verafinger':
+    from ..configurations.verafinger import database as db
+
+  database_replacement = "%s/.bob_bio_databases.txt" % os.environ["HOME"]
+  db.replace_directories(database_replacement)
+  all_files = db.objects()
+
+  # if a specific <stem> was not provided, run for all possible stems
+  if not args['<stem>']:
+    args['<stem>'] = [k.path for k in all_files]
+
+  # Loads the image, the mask and save it to a PNG file
+  for stem in args['<stem>']:
+    f = [k for k in all_files if k.path == stem]
+    if len(f) == 0:
+      raise RuntimeError('File with stem "%s" does not exist on "%s"' % \
+          stem, args['<database>'])
+
+    f = f[0]
+    image = f.load(db.original_directory, db.original_extension)
+
+    if args['--scan']:
+      results = []
+      logger.debug('stem, time, fg_thres, bg_thres, jaccard, m1, m2')
+      for fg_threshold in numpy.arange(0.6, 1.0, step=0.1):
+        for bg_threshold in numpy.arange(0.1, 0.5, step=0.1):
+          args['--fg-threshold'] = fg_threshold
+          args['--bg-threshold'] = bg_threshold
+          results.append(process_one(args, image, f.path))
+      best_thresholds = eval_best_thresholds(results)
+      logger.info('%s: FG = %.2f | BG = %.2f | M1/M2 = %.2f', f.path,
+        results[best_thresholds][2], results[best_thresholds][3],
+        results[best_thresholds][-2]/results[best_thresholds][-1])
+    else:
+      process_one(args, image, f.path)
diff --git a/bob/bio/vein/tests/extractors/image.hdf5 b/bob/bio/vein/tests/extractors/image.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..bafea75e24b675df88c5879c589fec4f2d31ee7f
Binary files /dev/null and b/bob/bio/vein/tests/extractors/image.hdf5 differ
diff --git a/bob/bio/vein/tests/extractors/mask.hdf5 b/bob/bio/vein/tests/extractors/mask.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..e0770189463109fa56b0b21d6672bc9fb56862c2
Binary files /dev/null and b/bob/bio/vein/tests/extractors/mask.hdf5 differ
diff --git a/bob/bio/vein/tests/extractors/mc_bin_matlab.hdf5 b/bob/bio/vein/tests/extractors/mc_bin_matlab.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..df5dac3b258381a796b27087ba934c21a9b5520f
Binary files /dev/null and b/bob/bio/vein/tests/extractors/mc_bin_matlab.hdf5 differ
diff --git a/bob/bio/vein/tests/extractors/mc_g_matlab.hdf5 b/bob/bio/vein/tests/extractors/mc_g_matlab.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..64aaa64d820e71d74b9c6991ba586f9d97e9005e
Binary files /dev/null and b/bob/bio/vein/tests/extractors/mc_g_matlab.hdf5 differ
diff --git a/bob/bio/vein/tests/extractors/mc_vt_matlab.hdf5 b/bob/bio/vein/tests/extractors/mc_vt_matlab.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..e0fcd7d21c735cbce780ac541cbbdbb7dc9ccdaa
Binary files /dev/null and b/bob/bio/vein/tests/extractors/mc_vt_matlab.hdf5 differ
diff --git a/bob/bio/vein/tests/extractors/miuramax_input_fvr.mat b/bob/bio/vein/tests/extractors/miuramax_input_fvr.mat
deleted file mode 100644
index 2ab2dfd863819a018b46b64259a4b70b2ab97461..0000000000000000000000000000000000000000
Binary files a/bob/bio/vein/tests/extractors/miuramax_input_fvr.mat and /dev/null differ
diff --git a/bob/bio/vein/tests/extractors/miuramax_input_img.mat b/bob/bio/vein/tests/extractors/miuramax_input_img.mat
deleted file mode 100644
index 95e874fb7d0429b14d4543cee964fef7eb64abd5..0000000000000000000000000000000000000000
Binary files a/bob/bio/vein/tests/extractors/miuramax_input_img.mat and /dev/null differ
diff --git a/bob/bio/vein/tests/extractors/miuramax_output.mat b/bob/bio/vein/tests/extractors/miuramax_output.mat
deleted file mode 100644
index 6a3de98b8c500a73078a21dc44e5acc8ddb99563..0000000000000000000000000000000000000000
Binary files a/bob/bio/vein/tests/extractors/miuramax_output.mat and /dev/null differ
diff --git a/bob/bio/vein/tests/test.py b/bob/bio/vein/tests/test.py
index a43ca7bddf50cd29b80a37a6ad3e54c052847e30..8a594b22dfc5335b6091cd279b85d30ece16f113 100644
--- a/bob/bio/vein/tests/test.py
+++ b/bob/bio/vein/tests/test.py
@@ -14,7 +14,6 @@ the generated sphinx documentation)
 
 import os
 import numpy
-import numpy as np
 import nose.tools
 
 import pkg_resources
@@ -30,9 +29,92 @@ def F(parts):
   """Returns the test file path"""
 
   return pkg_resources.resource_filename(__name__, os.path.join(*parts))
- 
 
-def test_finger_crop():
+
+def test_cropping():
+
+  # tests if the cropping stage at preprocessors works as planned
+
+  from ..preprocessor.crop import FixedCrop, NoCrop
+
+  shape = (20, 17)
+  test_image = numpy.random.randint(0, 1000, size=shape, dtype=int)
+
+  dont_crop = NoCrop()
+  cropped = dont_crop(test_image)
+  nose.tools.eq_(test_image.shape, cropped.shape)
+  nose.tools.eq_((test_image-cropped).sum(), 0)
+
+  top = 5; bottom = 2; left=3; right=7
+  fixed_crop = FixedCrop(top, bottom, left, right)
+  cropped = fixed_crop(test_image)
+  nose.tools.eq_(cropped.shape, (shape[0]-(top+bottom), shape[1]-(left+right)))
+  nose.tools.eq_((test_image[top:-bottom,left:-right]-cropped).sum(), 0)
+
+  # tests metadata survives after cropping (and it is corrected)
+  from ..database import AnnotatedArray
+  annotations = [
+      (top-2, left+2), #slightly above and to the right
+      (top+3, shape[1]-(right+1)+3), #slightly down and to the right
+      (shape[0]-(bottom+1)+4, shape[1]-(right+1)-2),
+      (shape[0]-(bottom+1)+1, left),
+      ]
+  annotated_image = AnnotatedArray(test_image, metadata=dict(roi=annotations))
+  assert hasattr(annotated_image, 'metadata')
+  cropped = fixed_crop(annotated_image)
+  assert hasattr(cropped, 'metadata')
+  assert numpy.allclose(cropped.metadata['roi'], [
+    (0, 2),
+    (3, cropped.shape[1]-1),
+    (cropped.shape[0]-1, 4),
+    (cropped.shape[0]-1, 0),
+    ])
+
+
+def test_masking():
+
+  # tests if the masking stage at preprocessors work as planned
+
+  from ..preprocessor.mask import FixedMask, NoMask, AnnotatedRoIMask
+  from ..database import AnnotatedArray
+
+  shape = (17, 20)
+  test_image = numpy.random.randint(0, 1000, size=shape, dtype=int)
+
+  masker = NoMask()
+  mask = masker(test_image)
+  nose.tools.eq_(mask.dtype, numpy.dtype('bool'))
+  nose.tools.eq_(mask.shape, test_image.shape)
+  nose.tools.eq_(mask.sum(), numpy.prod(shape))
+
+  top = 4; bottom = 2; left=3; right=1
+  masker = FixedMask(top, bottom, left, right)
+  mask = masker(test_image)
+  nose.tools.eq_(mask.dtype, numpy.dtype('bool'))
+  nose.tools.eq_(mask.sum(), (shape[0]-(top+bottom)) * (shape[1]-(left+right)))
+  nose.tools.eq_(mask[top:-bottom,left:-right].sum(), mask.sum())
+
+  # this matches the previous "fixed" mask - notice we consider the pixels
+  # under the polygon line to be **part** of the RoI (mask position == True)
+  shape = (10, 10)
+  test_image = numpy.random.randint(0, 1000, size=shape, dtype=int)
+  annotations = [
+      (top, left),
+      (top, shape[1]-(right+1)),
+      (shape[0]-(bottom+1), shape[1]-(right+1)),
+      (shape[0]-(bottom+1), left),
+      ]
+  image = AnnotatedArray(test_image, metadata=dict(roi=annotations))
+  masker = AnnotatedRoIMask()
+  mask = masker(image)
+  nose.tools.eq_(mask.dtype, numpy.dtype('bool'))
+  nose.tools.eq_(mask.sum(), (shape[0]-(top+bottom)) * (shape[1]-(left+right)))
+  nose.tools.eq_(mask[top:-bottom,left:-right].sum(), mask.sum())
+
+
+def test_preprocessor():
+
+  # tests the whole preprocessing mechanism, compares to matlab source
 
   input_filename = F(('preprocessors', '0019_3_1_120509-160517.png'))
   output_img_filename  = F(('preprocessors',
@@ -42,9 +124,16 @@ def test_finger_crop():
 
   img = bob.io.base.load(input_filename)
 
-  from bob.bio.vein.preprocessor.FingerCrop import FingerCrop
-  preprocess = FingerCrop(fingercontour='leemaskMatlab', padding_width=0)
-  preproc, mask = preprocess(img)
+  from ..preprocessor import Preprocessor, NoCrop, LeeMask, \
+      HuangNormalization, NoFilter
+
+  processor = Preprocessor(
+      NoCrop(),
+      LeeMask(filter_height=40, filter_width=4),
+      HuangNormalization(padding_width=0, padding_constant=0),
+      NoFilter(),
+      )
+  preproc, mask = processor(img)
   #preprocessor_utils.show_mask_over_image(preproc, mask)
 
   mask_ref = bob.io.base.load(output_fvr_filename).astype('bool')
@@ -53,7 +142,7 @@ def test_finger_crop():
 
   assert numpy.mean(numpy.abs(mask - mask_ref)) < 1e-2
 
- # Very loose comparison!
+  # Very loose comparison!
   #preprocessor_utils.show_image(numpy.abs(preproc.astype('int16') - preproc_ref.astype('int16')).astype('uint8'))
   assert numpy.mean(numpy.abs(preproc - preproc_ref)) < 1.3e2
 
@@ -62,25 +151,40 @@ def test_max_curvature():
 
   #Maximum Curvature method against Matlab reference
 
-  input_img_filename  = F(('extractors', 'miuramax_input_img.mat'))
-  input_fvr_filename  = F(('extractors', 'miuramax_input_fvr.mat'))
-  output_filename     = F(('extractors', 'miuramax_output.mat'))
-
-  # Load inputs
-  input_img = bob.io.base.load(input_img_filename)
-  input_fvr = bob.io.base.load(input_fvr_filename)
+  image = bob.io.base.load(F(('extractors', 'image.hdf5')))
+  image = image.T
+  image = image.astype('float64')/255.
+  mask  = bob.io.base.load(F(('extractors', 'mask.hdf5')))
+  mask  = mask.T
+  mask  = mask.astype('bool')
+  vt_ref = bob.io.base.load(F(('extractors', 'mc_vt_matlab.hdf5')))
+  vt_ref = vt_ref.T
+  g_ref = bob.io.base.load(F(('extractors', 'mc_g_matlab.hdf5')))
+  g_ref = g_ref.T
+  bin_ref = bob.io.base.load(F(('extractors', 'mc_bin_matlab.hdf5')))
+  bin_ref = bin_ref.T
 
   # Apply Python implementation
-  from bob.bio.vein.extractor.MaximumCurvature import MaximumCurvature
-  MC = MaximumCurvature(5)
-  output_img = MC((input_img, input_fvr))
-
-  # Load Matlab reference
-  output_img_ref = bob.io.base.load(output_filename)
-
-  # Compare output of python's implementation to matlab reference
-  # (loose comparison!)
-  assert numpy.mean(numpy.abs(output_img - output_img_ref)) < 8e-3
+  from ..extractor.MaximumCurvature import MaximumCurvature
+  MC = MaximumCurvature(3) #value used to create references
+
+  kappa = MC.detect_valleys(image, mask)
+  Vt = MC.eval_vein_probabilities(kappa)
+  Cd = MC.connect_centres(Vt)
+  G = numpy.amax(Cd, axis=2)
+  bina = MC.binarise(G)
+
+  assert numpy.allclose(Vt, vt_ref, 1e-3, 1e-4), \
+      'Vt differs from reference by %s' % numpy.abs(Vt-vt_ref).sum()
+  # Note: due to Matlab implementation bug, can only compare in a limited
+  # range with a 3-pixel around frame
+  assert numpy.allclose(G[2:-3,2:-3], g_ref[2:-3,2:-3]), \
+      'G differs from reference by %s' % numpy.abs(G-g_ref).sum()
+  # We require no more than 30 pixels (from a total of 63'840) are different
+  # between ours and the matlab implementation
+  assert numpy.abs(bin_ref-bina).sum() < 30, \
+      'Binarized image differs from reference by %s' % \
+      numpy.abs(bin_ref-bina).sum()
 
 
 def test_max_curvature_HE():
@@ -89,16 +193,23 @@ def test_max_curvature_HE():
   # Read in input image
   input_img_filename = F(('preprocessors', '0019_3_1_120509-160517.png'))
   input_img = bob.io.base.load(input_img_filename)
-  
+
   # Preprocess the data and apply Histogram Equalization postprocessing (same parameters as in maximum_curvature.py configuration file + postprocessing)
-  from bob.bio.vein.preprocessor.FingerCrop import FingerCrop
-  FC = FingerCrop(postprocessing = 'HE')
-  preproc_data = FC(input_img)
+  from ..preprocessor import Preprocessor, NoCrop, LeeMask, \
+      HuangNormalization, HistogramEqualization
+  processor = Preprocessor(
+      NoCrop(),
+      LeeMask(filter_height=40, filter_width=4),
+      HuangNormalization(padding_width=0, padding_constant=0),
+      HistogramEqualization(),
+      )
+  preproc_data = processor(input_img)
 
   # Extract features from preprocessed and histogram equalized data using MC extractor (same parameters as in maximum_curvature.py configuration file)
-  from bob.bio.vein.extractor.MaximumCurvature import MaximumCurvature
+  from ..extractor.MaximumCurvature import MaximumCurvature
   MC = MaximumCurvature(sigma = 5)
   extr_data = MC(preproc_data)
+  #preprocessor_utils.show_image((255.*extr_data).astype('uint8'))
 
 
 def test_repeated_line_tracking():
@@ -114,7 +225,7 @@ def test_repeated_line_tracking():
   input_fvr = bob.io.base.load(input_fvr_filename)
 
   # Apply Python implementation
-  from bob.bio.vein.extractor.RepeatedLineTracking import RepeatedLineTracking
+  from ..extractor.RepeatedLineTracking import RepeatedLineTracking
   RLT = RepeatedLineTracking(3000, 1, 21, False)
   output_img = RLT((input_img, input_fvr))
 
@@ -132,14 +243,20 @@ def test_repeated_line_tracking_HE():
   # Read in input image
   input_img_filename = F(('preprocessors', '0019_3_1_120509-160517.png'))
   input_img = bob.io.base.load(input_img_filename)
-  
+
   # Preprocess the data and apply Histogram Equalization postprocessing (same parameters as in repeated_line_tracking.py configuration file + postprocessing)
-  from bob.bio.vein.preprocessor.FingerCrop import FingerCrop
-  FC = FingerCrop(postprocessing = 'HE')
-  preproc_data = FC(input_img)
+  from ..preprocessor import Preprocessor, NoCrop, LeeMask, \
+      HuangNormalization, HistogramEqualization
+  processor = Preprocessor(
+      NoCrop(),
+      LeeMask(filter_height=40, filter_width=4),
+      HuangNormalization(padding_width=0, padding_constant=0),
+      HistogramEqualization(),
+      )
+  preproc_data = processor(input_img)
 
   # Extract features from preprocessed and histogram equalized data using RLT extractor (same parameters as in repeated_line_tracking.py configuration file)
-  from bob.bio.vein.extractor.RepeatedLineTracking import RepeatedLineTracking
+  from ..extractor.RepeatedLineTracking import RepeatedLineTracking
   # Maximum number of iterations
   NUMBER_ITERATIONS = 3000
   # Distance between tracking point and cross section of profile
@@ -163,7 +280,7 @@ def test_wide_line_detector():
   input_fvr = bob.io.base.load(input_fvr_filename)
 
   # Apply Python implementation
-  from bob.bio.vein.extractor.WideLineDetector import WideLineDetector
+  from ..extractor.WideLineDetector import WideLineDetector
   WL = WideLineDetector(5, 1, 41, False)
   output_img = WL((input_img, input_fvr))
 
@@ -180,14 +297,20 @@ def test_wide_line_detector_HE():
   # Read in input image
   input_img_filename = F(('preprocessors', '0019_3_1_120509-160517.png'))
   input_img = bob.io.base.load(input_img_filename)
-  
+
   # Preprocess the data and apply Histogram Equalization postprocessing (same parameters as in wide_line_detector.py configuration file + postprocessing)
-  from bob.bio.vein.preprocessor.FingerCrop import FingerCrop
-  FC = FingerCrop(postprocessing = 'HE')
-  preproc_data = FC(input_img)
+  from ..preprocessor import Preprocessor, NoCrop, LeeMask, \
+      HuangNormalization, HistogramEqualization
+  processor = Preprocessor(
+      NoCrop(),
+      LeeMask(filter_height=40, filter_width=4),
+      HuangNormalization(padding_width=0, padding_constant=0),
+      HistogramEqualization(),
+      )
+  preproc_data = processor(input_img)
 
   # Extract features from preprocessed and histogram equalized data using WLD extractor (same parameters as in wide_line_detector.py configuration file)
-  from bob.bio.vein.extractor.WideLineDetector import WideLineDetector
+  from ..extractor.WideLineDetector import WideLineDetector
   # Radius of the circular neighbourhood region
   RADIUS_NEIGHBOURHOOD_REGION = 5
   NEIGHBOURHOOD_THRESHOLD = 1
@@ -210,7 +333,7 @@ def test_miura_match():
   probe_gen_vein = bob.io.base.load(probe_gen_filename)
   probe_imp_vein = bob.io.base.load(probe_imp_filename)
 
-  from bob.bio.vein.algorithm.MiuraMatch import MiuraMatch
+  from ..algorithm.MiuraMatch import MiuraMatch
   MM = MiuraMatch(ch=18, cw=28)
   score_gen = MM.score(template_vein, probe_gen_vein)
 
@@ -220,6 +343,25 @@ def test_miura_match():
   assert numpy.isclose(score_imp, 0.172906739278421)
 
 
+def test_correlate():
+
+  #Match Ratio method against Matlab reference
+
+  template_filename = F(('algorithms', '0001_2_1_120509-135338.mat'))
+  probe_gen_filename = F(('algorithms', '0001_2_2_120509-135558.mat'))
+  probe_imp_filename = F(('algorithms', '0003_2_1_120509-141255.mat'))
+
+  template_vein = bob.io.base.load(template_filename)
+  probe_gen_vein = bob.io.base.load(probe_gen_filename)
+  probe_imp_vein = bob.io.base.load(probe_imp_filename)
+
+  from ..algorithm.Correlate import Correlate
+  C = Correlate()
+  score_gen = C.score(template_vein, probe_gen_vein)
+
+  # we don't check here - no templates
+
+
 def test_assert_points():
 
   # Tests that point assertion works as expected
diff --git a/bob/bio/vein/tests/test_databases.py b/bob/bio/vein/tests/test_databases.py
index 08c7c5563b6635d677175f5f6b85dc6f311513bf..7b60e953b64ab74273ca4c1e076b9fc771cbad40 100644
--- a/bob/bio/vein/tests/test_databases.py
+++ b/bob/bio/vein/tests/test_databases.py
@@ -31,3 +31,14 @@ def test_verafinger():
     except IOError as e:
         raise SkipTest(
             "The database could not queried; probably the db.sql3 file is missing. Here is the error: '%s'" % e)
+
+
+@db_available('fv3d')
+def test_fv3d():
+    module = bob.bio.base.load_resource('fv3d', 'config',
+        preferred_package='bob.bio.vein')
+    try:
+        check_database(module.database, protocol='central', groups=('dev',))
+    except IOError as e:
+        raise SkipTest(
+            "The database could not queried; probably the db.sql3 file is missing. Here is the error: '%s'" % e)
diff --git a/bob/bio/vein/utils.py b/bob/bio/vein/utils.py
deleted file mode 100644
index f4fdb3bddba4c5daf1f019f147d9e36008d15f42..0000000000000000000000000000000000000000
--- a/bob/bio/vein/utils.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-
-import numpy
-import scipy.signal
-import bob.ip.base
-import bob.sp
-import bob.core
-
-
-def imfilter(a, b):
-  """Applies a 2D filtering between images
-
-  This implementation was created to work similarly like the Matlab one.
-
-
-  Parameters:
-
-    a (numpy.ndarray): A 2-dimensional :py:class:`numpy.ndarray` which
-      represents the image to be filtered. The dtype of the array is supposed
-      to be 64-floats. You can also pass an 8-bit unsigned integer array,
-      loaded from a file (for example). In this case it will be scaled as
-      with :py:func:`bob.core.convert` and the range reset to ``[0.0, 1.0]``.
-
-    b (numpy.ndarray): A 64-bit float 2-dimensional :py:class:`numpy.ndarray`
-      which represents the filter to be applied to the image. The input filter
-      has to be rotated by 180 degrees as we use
-      :py:func:`scipy.signal.convolve2d` to apply it. You can rotate your
-      filter ``b`` with the help of :py:func:`bob.ip.base.rotate`.
-
-  """
-
-  if a.dtype == numpy.uint8:
-      a = bob.core.convert(a, numpy.float64, (0,1))
-
-  shape = (a.shape[0] + b.shape[0] - 1, a.shape[1] + b.shape[1] - 1)
-  a_ext = numpy.ndarray(shape=shape, dtype=numpy.float64)
-  bob.sp.extrapolate_nearest(a, a_ext)
-
-  return scipy.signal.convolve2d(a_ext, b, 'valid')
diff --git a/develop.cfg b/develop.cfg
index 78534f116ead196a688d8695ba3eb96a310c296d..34c27f28f2916dcb9396fb0c5bba32e538ed5116 100644
--- a/develop.cfg
+++ b/develop.cfg
@@ -27,14 +27,16 @@ develop = src/bob.extension
           src/bob.db.verafinger
           src/bob.db.utfvp
           src/bob.db.putvein
+          src/bob.db.fv3d
           src/bob.learn.activation
           src/bob.learn.linear
+          src/bob.learn.mlp
           src/bob.learn.em
           src/bob.bio.base
           .
 
 ; options for bob.buildout
-debug = true
+debug = false
 verbose = true
 newest = false
 
@@ -53,8 +55,10 @@ bob.db.base = git git@gitlab.idiap.ch:bob/bob.db.base
 bob.db.verafinger = git git@gitlab.idiap.ch:bob/bob.db.verafinger
 bob.db.utfvp = git git@gitlab.idiap.ch:bob/bob.db.utfvp
 bob.db.putvein = git git@gitlab.idiap.ch:bob/bob.db.putvein
+bob.db.fv3d = git git@gitlab.idiap.ch:bob/bob.db.fv3d
 bob.learn.activation = git git@gitlab.idiap.ch:bob/bob.learn.activation
 bob.learn.linear = git git@gitlab.idiap.ch:bob/bob.learn.linear
+bob.learn.mlp = git git@gitlab.idiap.ch:bob/bob.learn.mlp
 bob.learn.em = git git@gitlab.idiap.ch:bob/bob.learn.em
 bob.bio.base = git git@gitlab.idiap.ch:bob/bob.bio.base
 
diff --git a/doc/api.rst b/doc/api.rst
index 80510bde729cb75e3bd8771f9cb6fc88a6d8b10d..9ea9c7804991d2c91c29b5ef78e7edb175ff15ca 100644
--- a/doc/api.rst
+++ b/doc/api.rst
@@ -15,6 +15,11 @@ which can be used for vein experiments.
 Database Interfaces
 -------------------
 
+Common Utilities
+================
+
+.. automodule:: bob.bio.vein.database
+
 Vera Fingervein Database
 ========================
 
diff --git a/doc/baselines.rst b/doc/baselines.rst
index a0bc3ddb6703c687eff61280ce6350e70faa8349..ca135d709766ab65632d9a1f173911b4aff04064 100644
--- a/doc/baselines.rst
+++ b/doc/baselines.rst
@@ -53,7 +53,7 @@ is available on the section :ref:`bob.bio.vein.resources`.
    instructions described in this guide. You **need first** to procure yourself
    the raw data files that correspond to *each* database used here in order to
    correctly run experiments with those data. Biometric data is considered
-   private date and, under EU regulations, cannot be distributed without a
+   private data and, under EU regulations, cannot be distributed without a
    consent or license. You may consult our
    :ref:`bob.bio.vein.resources.databases` resources section for checking
    currently supported databases and accessing download links for the raw data
@@ -74,6 +74,7 @@ is available on the section :ref:`bob.bio.vein.resources`.
 
       [YOUR_VERAFINGER_DIRECTORY] = /complete/path/to/verafinger
       [YOUR_UTFVP_DIRECTORY] = /complete/path/to/utfvp
+      [YOUR_FV3D_DIRECTORY] = /complete/path/to/fv3d
 
    Notice it is rather important to use the strings as described above,
    otherwise ``bob.bio.base`` will not be able to correctly load your images.
@@ -115,6 +116,18 @@ protocol, do the following:
 
       $ verify.py verafinger rlt parallel -vv
 
+   To run on the Idiap SGE grid using our stock
+   io-big-48-slots-4G-memory-enabled (see
+   :py:mod:`bob.bio.vein.configurations.gridio4g48`) configuration, use:
+
+   .. code-block:: sh
+
+      $ verify.py verafinger rlt grid -vv
+
+   You may also, optionally, use the configuration resource ``gridio4g48``,
+   which is just an alias of ``grid`` in this package.
+
+
 
 This command line selects and runs the following implementations for the
 toolchain:
@@ -214,34 +227,28 @@ This package may generate results for other combinations of protocols and
 databases. Here is a summary table for some variants (results expressed
 correspond to the the equal-error rate on the development set, in percentage):
 
-======================== ================= ====== ====== ====== ====== ======
-               Approach                     Vera Finger             UTFVP
------------------------------------------- -------------------- -------------
-   Feature Extractor      Post Processing   Full     B    Nom   1vsall  nom
-======================== ================= ====== ====== ====== ====== ======
-Repeated Line Tracking        None          23.9   24.1   24.9   1.7    1.4
-Repeated Line Tracking     Histogram Eq.    26.2   23.6   24.9   2.1    0.9
-Maximum Curvature             None           3.2    3.2    3.1   0.4    0.
-Maximum Curvature          Histogram Eq.     3.0    2.7    2.7   0.4    0.
-Wide Line Detector            None          10.2   10.2   10.5   2.3    1.7
-Wide Line Detector         Histogram Eq.     8.0    9.7    7.3   1.7    0.9
-======================== ================= ====== ====== ====== ====== ======
+======================== ====== ====== ====== ====== ======
+       Toolchain              Vera Finger         UTFVP
+------------------------ -------------------- -------------
+   Feature Extractor      Full     B    Nom   1vsall  nom
+======================== ====== ====== ====== ====== ======
+Repeated Line Tracking    23.9   24.1   24.9   1.7    1.4
+Wide Line Detector        10.2   10.2   10.5   2.3    1.7
+Maximum Curvature          3.2    3.2    3.1   0.4    0.
+======================== ====== ====== ====== ====== ======
 
 In a machine with 48 cores, running these baselines took the following time
 (hh:mm):
 
-======================== ================= ====== ====== ====== ====== ======
-               Approach                     Vera Finger             UTFVP
------------------------------------------- -------------------- -------------
-   Feature Extractor      Post Processing   Full     B    Nom   1vsall  nom
-======================== ================= ====== ====== ====== ====== ======
-Repeated Line Tracking        None          01:16  00:23  00:23  12:44  00:35
-Repeated Line Tracking     Histogram Eq.    00:50  00:23  00:23  13:00  00:35
-Maximum Curvature             None          03:28  00:54  00:59  58:34  01:48
-Maximum Curvature          Histogram Eq.    02:45  00:54  00:59  49:03  01:49
-Wide Line Detector            None          00:07  00:01  00:01  02:25  00:05
-Wide Line Detector         Histogram Eq.    00:04  00:01  00:01  02:04  00:06
-======================== ================= ====== ====== ====== ====== ======
+======================== ====== ====== ====== ====== ======
+       Toolchain              Vera Finger         UTFVP
+------------------------ -------------------- -------------
+   Feature Extractor      Full     B    Nom   1vsall  nom
+======================== ====== ====== ====== ====== ======
+Repeated Line Tracking    01:16  00:23  00:23  12:44  00:35
+Wide Line Detector        00:07  00:01  00:01  02:25  00:05
+Maximum Curvature         03:28  00:54  00:59  58:34  01:48
+======================== ====== ====== ====== ====== ======
 
 
 Modifying Baseline Experiments
@@ -313,6 +320,49 @@ This package contains other resources that can be used to evaluate different
 bits of the vein processing toolchain.
 
 
+Training the Watershed Finger region detector
+=============================================
+
+The correct detection of the finger boundaries is an important step of many
+algorithms for the recognition of finger veins. It allows to compensate for
+eventual rotation and scaling issues one might find when comparing models and
+probes. In this package, we propose a novel finger boundary detector based on
+the `Watershedding Morphological Algorithm
+<https://en.wikipedia.org/wiki/Watershed_(image_processing)>`. Watershedding
+works in three steps:
+
+1. Determine markers on the original image indicating the types of areas one
+   would like to detect (e.g. "finger" or "background")
+2. Determine a 2D (gray-scale) surface representing the original image in which
+   darker spots (representing valleys) are more likely to be filled by
+   surrounding markers. This is normally achieved by filtering the image with a
+   high-pass filter like Sobel or using an edge detector such as Canny.
+3. Run the watershed algorithm
+
+In order to determine markers for step 1, we train a neural network which
+outputs the likelihood of a point being part of a finger, given its coordinates
+and values of surrounding pixels.
+
+When used to run an experiment,
+:py:class:`bob.bio.vein.preprocessor.WatershedMask` requires you provide a
+*pre-trained* neural network model that presets the markers before
+watershedding takes place. In order to create one, you can run the program
+`markdet.py`:
+
+.. code-block:: sh
+
+   $ markdet.py --hidden=20 --samples=500 fv3d central dev
+
+You input, as arguments to this application, the database, protocol and subset
+name you wish to use for training the network. The data is loaded observing a
+total maximum number of samples from the dataset (passed with ``--samples=N``),
+the network is trained and recorded into an HDF5 file (by default, the file is
+called ``model.hdf5``, but the name can be changed with the option
+``--model=``).  Once you have a model, you can use the preprocessor mask by
+constructing an object and attaching it to the
+:py:class:`bob.bio.vein.preprocessor.Preprocessor` entry on your configuration.
+
+
 Region of Interest Goodness of Fit
 ==================================
 
@@ -345,23 +395,34 @@ mask
 can use the option ``-n 5`` to print the 5 worst cases according to each of the
 metrics.
 
-You can use the program ``view_mask.py`` to display the images after the
-preprocessing step using:
+
+Pipeline Display
+================
+
+You can use the program ``view_sample.py`` to display the images after
+full processing using:
 
 .. code-block:: sh
 
-   $ view_mask.py /path/to/verafinger/mc/preprocessed/098-F/098_R_1.hdf5 --save=example.png
-   $ # open example.png
+   $ ./bin/view_sample.py --save=output-dir verafinger /path/to/processed/directory 030-M/030_L_1
+   $ # open output-dir
 
-And you should be able to view an image like this (example taken from the Vera
-fingervein database, using the automatic annotator):
+And you should be able to view images like these (example taken from the Vera
+fingervein database, using the automatic annotator and Maximum Curvature
+feature extractor):
 
-.. figure:: img/vein-mask.*
+.. figure:: img/preprocessed.*
    :scale: 50%
 
    Example RoI overlayed on finger vein image of the Vera fingervein database,
-   as produced by the script ``view_mask.py``.
+   as produced by the script ``view_sample.py``.
+
+
+.. figure:: img/binarized.*
+   :scale: 50%
 
+   Example of fingervein image from the Vera fingervein database, binarized by
+   using Maximum Curvature, after pre-processing.
 
 
 .. include:: links.rst
diff --git a/doc/img/binarized.png b/doc/img/binarized.png
new file mode 100644
index 0000000000000000000000000000000000000000..7935a42d1bb8bc0b9abe649c0f5851e66b07762d
Binary files /dev/null and b/doc/img/binarized.png differ
diff --git a/doc/img/preprocessed.png b/doc/img/preprocessed.png
new file mode 100644
index 0000000000000000000000000000000000000000..7803a73b46667f959d6b6b242b4fa10b8c6370da
Binary files /dev/null and b/doc/img/preprocessed.png differ
diff --git a/doc/img/vein-mask.png b/doc/img/vein-mask.png
deleted file mode 100644
index 9a93206e3a09b02f76ade59e696af35a318bf9b5..0000000000000000000000000000000000000000
Binary files a/doc/img/vein-mask.png and /dev/null differ
diff --git a/doc/links.rst b/doc/links.rst
index 30721102cc8aaf4896ae8725614b2f244a45be26..0deb5c4989e717613218cd8fd7b3889645607bd6 100644
--- a/doc/links.rst
+++ b/doc/links.rst
@@ -20,3 +20,4 @@
 .. _mailing list: https://www.idiap.ch/software/bob/discuss
 .. _bob.bio.base: https://pypi.python.org/pypi/bob.bio.base
 .. _jaccard index: https://en.wikipedia.org/wiki/Jaccard_index
+.. _watershed: https://en.wikipedia.org/wiki/Watershed_(image_processing)
diff --git a/doc/resources.rst b/doc/resources.rst
index 297294373f0f88df5146d9bdd510bd73d76411b0..8e4730082a4ed08fe8dea9a1c1e90d54d1c393dc 100644
--- a/doc/resources.rst
+++ b/doc/resources.rst
@@ -100,3 +100,10 @@ Parallel Running
 .. automodule:: bob.bio.vein.configurations.parallel
    :members:
 
+
+Using SGE at Idiap
+==================
+
+.. automodule:: bob.bio.vein.configurations.gridio4g48
+   :members:
+
diff --git a/matlab/README.md b/matlab/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b2135fe6927006832f3ce00b91d7c7d69d703067
--- /dev/null
+++ b/matlab/README.md
@@ -0,0 +1,69 @@
+# Maximum Curvature and Miura Matching (Cross-correlation)
+
+This directory contains scripts to generate references from the Matlab library
+for Repeated Line Tracking (RLT) and Maximum Curvature extraction from B.Ton.
+The original source code download link is here: http://ch.mathworks.com/matlabcentral/fileexchange/35716-miura-et-al-vein-extraction-methods
+
+
+## Usage Instructions
+
+Make sure you have the `matlab` binary available on your path. At Idiap, this
+can be done by executing:
+
+```sh
+$ SETSHELL matlab
+$ which matlab
+/idiap/resource/software/matlab/stable/bin/matlab
+```
+
+Call `run.sh`, that will perform the maximum curvature on the provided image,
+and output various control variables in various HDF5 files:
+
+```sh
+$ run.sh ../bob/bio/vein/tests/extractors/image.hdf5 ../bob/bio/vein/tests/extractors/mask.hdf5 mc
+...
+```
+
+Or, a quicker way, without contaminating your environment:
+
+```sh
+$ setshell.py matlab ./run.sh ../bob/bio/vein/tests/extractors/image.hdf5 ../bob/bio/vein/tests/extractors/mask.hdf5 mc
+...
+```
+
+The program `run.sh` calls the function `lib/maximum_curvature.m`, which
+processes and dumps results to output files.
+
+After running, this program produces 5 files:
+
+* `*_kappa_matlab.hdf5`: contains the kappa variable
+* `*_v_matlab.hdf5`: contains the V variable
+* `*_vt_matlab.hdf5`: contains the accumulated Vt variable
+* `*_cd_matlab.hdf5`: contains the Cd variable
+* `*_g_matlab.hdf5`: contains the accumulated Cd variable called G (paper)
+* `*_bin_matlab.hdf5:`: the final binarised results (G thresholded)
+
+Once you have generated the references, you can compare the Maximum Curvature
+algorithm implemented in Python against the one in Matlab with the following
+command:
+
+```sh
+$ ../bin/python compare.py
+Comparing kappa[0]: 2.51624726501e-14
+Comparing kappa[1]: 2.10662186414e-13
+Comparing kappa[2]: 1.09901515494e-13
+Comparing kappa[3]: 1.0902340027e-13
+Comparing V[0]: 1.04325752096e-14
+Comparing V[1]: 8.5519523202e-14
+Comparing V[2]: 9.20009110336e-05
+Comparing V[3]: 4.02339072347e-14
+Comparing Vt: 9.20009111675e-05
+Comparing Cd[0]: 1.08636347658e-13
+Comparing Cd[1]: 2.8698038577e-14
+Comparing Cd[2]: 3.40774680626e-14
+Comparing Cd[3]: 3.2892922132e-14
+Comparing G: 1.57966982699e-13
+```
+
+The program displays the sum of absolute differences between the Matlab
+reference and Python's.
diff --git a/matlab/compare.py b/matlab/compare.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6c1de228ade9a93c22cc40a74a02e134f6292a0
--- /dev/null
+++ b/matlab/compare.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+# Run this script to output a debugging comparison of the Python implementation
+# against matlab references you just extracted
+
+import numpy
+import bob.io.base
+import bob.io.matlab
+from bob.bio.vein.extractor import MaximumCurvature
+
+# Load inputs
+image  = bob.io.base.load('../bob/bio/vein/tests/extractors/image.hdf5')
+image  = image.T.astype('float64')/255.
+region = bob.io.base.load('../bob/bio/vein/tests/extractors/mask.hdf5')
+region = region.T.astype('bool')
+
+# Loads matlab references
+kappa_matlab = bob.io.base.load('mc_kappa_matlab.hdf5')
+kappa_matlab = kappa_matlab.transpose(2,1,0)
+V_matlab = bob.io.base.load('mc_v_matlab.hdf5')
+V_matlab = V_matlab.transpose(2,1,0)
+Vt_matlab = bob.io.base.load('mc_vt_matlab.hdf5')
+Vt_matlab = Vt_matlab.T
+Cd_matlab = bob.io.base.load('mc_cd_matlab.hdf5')
+Cd_matlab = Cd_matlab.transpose(2,1,0)
+G_matlab = bob.io.base.load('mc_g_matlab.hdf5')
+G_matlab = G_matlab.T
+
+# Apply Python implementation
+from bob.bio.vein.extractor.MaximumCurvature import MaximumCurvature
+MC = MaximumCurvature(3)
+
+kappa = MC.detect_valleys(image, region) #OK
+Vt = MC.eval_vein_probabilities(kappa) #OK
+Cd = MC.connect_centres(Vt) #OK
+G = numpy.amax(Cd, axis=2) #OK
+
+# Compare outputs
+for k in range(4):
+  print('Comparing kappa[%d]: %s' % (k,
+    numpy.abs(kappa[...,k]-kappa_matlab[...,k]).sum()))
+
+print('Comparing Vt: %s' % numpy.abs(Vt-Vt_matlab).sum())
+
+for k in range(4):
+  print('Comparing Cd[%d]: %s' % (k,
+    numpy.abs(Cd[2:-3,2:-3,k]-Cd_matlab[2:-3,2:-3,k]).sum()))
+
+print('Comparing G: %s' % numpy.abs(G[2:-3,2:-3]-G_matlab[2:-3,2:-3]).sum())
diff --git a/matlab/lib/license.txt b/matlab/lib/license.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0225a2b84323c48b07c435b3fda6abc2b533eba4
--- /dev/null
+++ b/matlab/lib/license.txt
@@ -0,0 +1,24 @@
+Copyright (c) 2012, Bram Ton
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the distribution
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/matlab/lib/miura_match.m b/matlab/lib/miura_match.m
new file mode 100644
index 0000000000000000000000000000000000000000..ccc040659f30e576eff0a3d7a8e8f19558ce9be1
--- /dev/null
+++ b/matlab/lib/miura_match.m
@@ -0,0 +1,52 @@
+function score = miura_match(I, R, cw, ch)
+% This is the matching procedure described by Miura et al. in their paper.
+% A small difference is that this matching function calculates the match 
+% ratio instead of the mismatch ratio.
+
+% Parameters:
+%  I  - Input image
+%  R  - Registered template image
+%  cw - Maximum search displacement in x-direction
+%  ch - Maximum search displacement in y-direction
+
+% Returns:
+%  score - Value between 0 and 0.5, larger value is better match
+
+% Reference:
+% Feature extraction of finger vein patterns based on iterative line 
+%    tracking and its application to personal identification
+% N. Miura, A. Nagasaka, and T. Miyatake
+% Syst. Comput. Japan 35 (7 June 2004), pp. 61--71
+% doi: 10.1002/scj.v35:7
+
+% Author:  Bram Ton <b.t.ton@alumnus.utwente.nl>
+% Date:    20th December 2011
+% License: Simplified BSD License
+
+[h w] = size(R); % Get height and width of registered data
+
+% Determine value of match, just cross-correlation, see also xcorr2
+Nm = conv2(I, rot90(R(ch+1:h-ch, cw+1:w-cw),2), 'valid');
+
+% Maximum value of match
+[Nmm,mi] = max(Nm(:)); % (what about multiple maximum values ?)
+[t0,s0] = ind2sub(size(Nm),mi);
+
+% Normalize
+score = Nmm/(sum(sum(R(ch+1:h-ch, cw+1:w-cw))) + sum(sum(I(t0:t0+h-2*ch-1, s0:s0+w-2*cw-1))));
+
+% %% Bram Test
+% Ipad = zeros(h+2*ch,w+2*cw);
+% Ipad(ch+1:ch+h,cw+1:cw+w) = I;
+% 
+% Nm = conv2(Ipad, rot90(R,2), 'valid');
+% 
+% % Maximum value of match
+% [Nmm,mi] = max(Nm(:)); % (what about multiple maximum values ?)
+% [t0,s0] = ind2sub(size(Nm),mi);
+% 
+% % Normalize
+% score = Nmm/(sum(sum(R(ch+1:h-ch, cw+1:w-cw))) + sum(sum(Ipad(t0:t0+h-2*ch-1, s0:s0+w-2*cw-1))));
+% %score = max(max(normxcorr2(R(ch+1:h-ch, cw+1:w-cw),I)));
+
+
diff --git a/matlab/lib/miura_max_curvature.m b/matlab/lib/miura_max_curvature.m
new file mode 100644
index 0000000000000000000000000000000000000000..ee83ee23c59eeb1f54cdae2c0b026ab7a52b6ff3
--- /dev/null
+++ b/matlab/lib/miura_max_curvature.m
@@ -0,0 +1,311 @@
+function veins = miura_max_curvature(img, fvr, sigma, stem)
+% Maximum curvature method
+
+% Parameters:
+%  img    - Input vascular image
+%  fvr    - Finger vein region
+%  sigma  - Sigma used for determining derivatives
+%  stem   - (optional) name of the stem where to save partial results
+
+% Returns:
+%  veins - Vein image
+
+% Reference:
+% Extraction of finger-vein patterns using maximum curvature points in
+%   image profiles
+% N. Miura, A. Nagasaka, and T. Miyatake
+% IAPR conference on machine vision applications 9 (2005), pp. 347--350
+
+% Author:  Bram Ton <b.t.ton@alumnus.utwente.nl>
+% Date:    20th December 2011
+% License: Simplified BSD License
+
+% Changelog:
+% 2012/01/10 - Speed enhancement by rewriting diag() functions
+%              with linear indices.
+
+% Construct filter kernels
+winsize = ceil(4*sigma);
+[X,Y] = meshgrid(-winsize:winsize, -winsize:winsize);
+
+h = (1/(2*pi*sigma^2)).*exp(-(X.^2 + Y.^2)/(2*sigma^2));
+hx = (-X/(sigma^2)).*h;
+hxx = ((X.^2 - sigma^2)/(sigma^4)).*h;
+hy = hx';
+hyy = hxx';
+hxy = ((X.*Y)/(sigma^4)).*h;
+
+% Do the actual filtering
+fx  = imfilter(img, hx,  'replicate', 'conv');
+fxx = imfilter(img, hxx, 'replicate', 'conv');
+fy  = imfilter(img, hy,  'replicate', 'conv');
+fyy = imfilter(img, hyy, 'replicate', 'conv');
+fxy = imfilter(img, hxy, 'replicate', 'conv');
+f1  = 0.5*sqrt(2)*(fx + fy); % \
+f2  = 0.5*sqrt(2)*(fx - fy); % /
+f11 = 0.5*fxx + fxy + 0.5*fyy; % \\
+f22 = 0.5*fxx - fxy + 0.5*fyy; % //
+
+[img_h, img_w] = size(img); % Image height and width
+
+%% Calculate curvatures
+k = zeros(img_h, img_w, 4);
+k(:,:,1) = (fxx./((1 + fx.^2).^(3/2))).*fvr; % hor
+k(:,:,2) = (fyy./((1 + fy.^2).^(3/2))).*fvr; % ver
+k(:,:,3) = (f11./((1 + f1.^2).^(3/2))).*fvr; % \
+k(:,:,4) = (f22./((1 + f2.^2).^(3/2))).*fvr; % /
+
+if nargin >= 4
+  hdf5write(strcat(stem, '_kappa_matlab.hdf5'), '/kappa', k);
+end
+
+%% Scores
+V = zeros(img_h, img_w, 4);
+Vt = zeros(img_h, img_w);
+Wr = 0;
+
+% Horizontal direction
+bla = k(:,:,1) > 0;
+for y=1:img_h
+    for x=1:img_w
+        if(bla(y,x))
+            Wr = Wr + 1;
+        end
+
+        if ( Wr > 0 && (x == img_w || ~bla(y,x)) )
+            if (x == img_w)
+                % Reached edge of image
+                pos_end = x;
+            else
+                pos_end = x - 1;
+            end
+
+            pos_start = pos_end - Wr + 1; % Start pos of concave
+            [~, I] = max(k(y, pos_start:pos_end,1));
+            pos_max = pos_start + I - 1;
+            Scr = k(y,pos_max,1)*Wr;
+            V(y,pos_max,1) = V(y,pos_max,1) + Scr;
+            Vt(y,pos_max) = Vt(y,pos_max) + Scr;
+            Wr = 0;
+        end
+    end
+end
+
+% Vertical direction
+bla = k(:,:,2) > 0;
+for x=1:img_w
+    for y=1:img_h
+        if(bla(y,x))
+            Wr = Wr + 1;
+        end
+
+        if ( Wr > 0 && (y == img_h || ~bla(y,x)) )
+            if (x == img_h)
+                % Reached edge of image
+                pos_end = y;
+            else
+                pos_end = y - 1;
+            end
+
+            pos_start = pos_end - Wr + 1; % Start pos of concave
+            [~, I] = max(k(pos_start:pos_end,x,2));
+            pos_max = pos_start + I - 1;
+            Scr = k(pos_max,x,2)*Wr;
+            V(pos_max,x,2) = V(pos_max,x,2) + Scr;
+            Vt(pos_max,x) = Vt(pos_max,x) + Scr;
+            Wr = 0;
+        end
+    end
+end
+
+% Direction: \
+bla = k(:,:,3) > 0;
+for start=1:(img_w+img_h-1)
+    % Initial values
+    if (start <= img_w)
+        x = start;
+        y = 1;
+    else
+        x = 1;
+        y = start - img_w + 1;
+    end
+    done = false;
+
+    while ~done
+        if(bla(y,x))
+            Wr = Wr + 1;
+        end
+
+        if ( Wr > 0 && (y == img_h || x == img_w || ~bla(y,x)) )
+            if (y == img_h || x == img_w)
+                % Reached edge of image
+                pos_x_end = x;
+                pos_y_end = y;
+            else
+                pos_x_end = x - 1;
+                pos_y_end = y - 1;
+            end
+            pos_x_start = pos_x_end - Wr + 1;
+            pos_y_start = pos_y_end - Wr + 1;
+
+            %d = diag(k(pos_y_start:pos_y_end, pos_x_start:pos_x_end, 3));
+            % More efficient implementation than diag(..)
+            d = k(((pos_x_start-1)*img_h + pos_y_start + 2*img_w*img_h):(img_h + 1):((pos_x_end-1)*img_h + pos_y_end + 2*img_w*img_h));
+            [~, I] = max(d);
+            pos_x_max = pos_x_start + I - 1;
+            pos_y_max = pos_y_start + I - 1;
+            Scr = k(pos_y_max,pos_x_max,3)*Wr;
+            V(pos_y_max,pos_x_max,3) = V(pos_y_max,pos_x_max,3) + Scr;
+            Vt(pos_y_max,pos_x_max) = Vt(pos_y_max,pos_x_max) + Scr;
+            Wr = 0;
+        end
+
+        if((x == img_w) || (y == img_h))
+            done = true;
+        else
+            x = x + 1;
+            y = y + 1;
+        end
+    end
+end
+
+% Direction: /
+bla = k(:,:,4) > 0;
+for start=1:(img_w+img_h-1)
+    % Initial values
+    if (start <= img_w)
+        x = start;
+        y = img_h;
+    else
+        x = 1;
+        y = img_w+img_h-start;
+    end
+    done = false;
+
+    while ~done
+        if(bla(y,x))
+            Wr = Wr + 1;
+        end
+
+        if ( Wr > 0 && (y == 1 || x == img_w || ~bla(y,x)) )
+            if (y == 1 || x == img_w)
+                % Reached edge of image
+                pos_x_end = x;
+                pos_y_end = y;
+            else
+                pos_x_end = x - 1;
+                pos_y_end = y + 1;
+            end
+            pos_x_start = pos_x_end - Wr + 1;
+            pos_y_start = pos_y_end + Wr - 1;
+
+            %d = diag(flipud(k(pos_y_end:pos_y_start, pos_x_start:pos_x_end, 4)));
+            % More efficient implementation than diag(flipud(..))
+            d = k(((pos_x_start-1)*img_h + pos_y_start + 3*img_w*img_h):(img_h - 1):((pos_x_end-1)*img_h + pos_y_end + 3*img_w*img_h));
+            [~, I] = max(d);
+            pos_x_max = pos_x_start + I - 1;
+            pos_y_max = pos_y_start - I + 1;
+            Scr = k(pos_y_max,pos_x_max,4)*Wr;
+            V(pos_y_max,pos_x_max,4) = V(pos_y_max,pos_x_max,4) + Scr;
+            Vt(pos_y_max,pos_x_max) = Vt(pos_y_max,pos_x_max) + Scr;
+            Wr = 0;
+        end
+
+        if((x == img_w) || (y == 1))
+            done = true;
+        else
+            x = x + 1;
+            y = y - 1;
+        end
+    end
+end
+
+
+%Vt = V(:,:,1) + V(:,:,2) + V(:,:,3) + V(:,:,4);
+
+if nargin >= 4
+  hdf5write(strcat(stem, '_v_matlab.hdf5'), '/V', V);
+end
+
+if nargin >= 4
+  hdf5write(strcat(stem, '_vt_matlab.hdf5'), '/Vt', Vt);
+end
+
+%% Connection of vein centres
+Cd = zeros(img_h, img_w, 4);
+for x=3:img_w-3
+    for y=3:img_h-3
+        Cd(y,x,1) = min(max(Vt(y,x+1),  Vt(y,x+2))  ,...
+            max(Vt(y,x-1),  Vt(y,x-2)));   % Hor
+        Cd(y,x,2) = min(max(Vt(y+1,x),  Vt(y+2,x))  ,...
+            max(Vt(y-1,x),  Vt(y-2,x)));   % Vert
+        Cd(y,x,3) = min(max(Vt(y-1,x-1),Vt(y-2,x-2)),...
+            max(Vt(y+1,x+1),Vt(y+2,x+2))); % \
+        Cd(y,x,4) = min(max(Vt(y+1,x-1),Vt(y+2,x-2)),...
+            max(Vt(y-1,x+1),Vt(y-2,x+2))); % /
+    end
+end
+
+if nargin >= 4
+  hdf5write(strcat(stem, '_cd_matlab.hdf5'), '/Cd', Cd);
+end
+
+veins = max(Cd,[],3);
+
+if nargin >= 4
+  hdf5write(strcat(stem, '_g_matlab.hdf5'), '/G', veins);
+end
+
+md = median(veins(veins>0));
+veins_bin = veins > md;
+
+if nargin >= 4
+  hdf5write(strcat(stem, '_bin.hdf5'), '/binarised', uint8(veins_bin));
+end
+
+% %% Plot results
+% figure('Name', 'Second order derivatives');
+% subplot(2,2,1);
+%   imshow(fxx, []);
+%   title('Horizontal');
+% subplot(2,2,2);
+%   imshow(fyy, []);
+%   title('Vertical');
+% subplot(2,2,3);
+%   imshow(f11, []);
+%   title('\');
+% subplot(2,2,4);
+%   imshow(f22, []);
+%   title('/');
+%
+% figure('Name', 'Curvatures');
+% subplot(2,2,1);
+%   %imshow(log(k(:,:,1) + 1), []);
+%   imshow(k(:,:,1) > 0, []);
+%   title('Horizontal');
+% subplot(2,2,2);
+%   %imshow(log(k(:,:,2) + 1), []);
+%   imshow(k(:,:,2) > 0, []);
+%   title('Vertical');
+% subplot(2,2,3);
+%   %imshow(log(k(:,:,3) + 1), []);
+%   imshow(k(:,:,3) > 0, []);
+%   title('\');
+% subplot(2,2,4);
+%   %imshow(log(k(:,:,4) + 1), []);
+%   imshow(k(:,:,4) > 0, []);
+%   title('/');
+%
+% figure('Name', 'Scores');
+% subplot(2,2,1);
+%   imshow(V(:,:,1));
+%   title('Horizontal');
+% subplot(2,2,2);
+%   imshow(V(:,:,2));
+%   title('Vertical');
+% subplot(2,2,3);
+%   imshow(V(:,:,3));
+%   title('\');
+% subplot(2,2,4);
+%   imshow(V(:,:,3));
+%   title('/');
diff --git a/matlab/lib/miura_repeated_line_tracking.m b/matlab/lib/miura_repeated_line_tracking.m
new file mode 100644
index 0000000000000000000000000000000000000000..08b3eb55786c9686ef5e4a339fc2c6b16baf12a7
--- /dev/null
+++ b/matlab/lib/miura_repeated_line_tracking.m
@@ -0,0 +1,186 @@
+function veins = miura_repeated_line_tracking(img, fvr, iterations, r, W)
+% Repeated line tracking
+
+% Parameters:
+%  img        - Input vascular image
+%  fvr        - Binary image of the finger region
+%  iterations - Maximum number of iterations
+%  r - Distance between tracking point and cross section of the profile
+%  W - Width of profile
+
+% Returns:
+%  veins - Vein image
+
+% Reference:
+% Feature extraction of finger vein patterns based on repeated line 
+%    tracking and its application to personal identification
+% N. Miura, A. Nagasaka, and T. Miyatake
+% Machine Vision and Applications, Volume 15, Number 4 (2004), pp. 194--203
+% doi: 10.1007/s00138-004-0149-2
+
+% Author:  Bram Ton <b.t.ton@alumnus.utwente.nl>
+% Date:    20th December 2011
+% License: Simplified BSD License
+
+p_lr = 0.5;  % Probability of goin left or right
+p_ud = 0.25; % Probability of going up or down
+% writerObj = VideoWriter('peaks.avi');
+% open(writerObj);
+Tr = zeros(size(img));     % Locus space
+bla = [-1,-1; -1,0; -1,1; 0,-1; 0,0; 0,1; 1,-1; 1,0; 1,1];
+
+% Check if W is even
+if (mod(W,2) == 0)
+    disp('Error: W must be odd')
+end
+ro = round(r*sqrt(2)/2);   % r for oblique directions
+hW = (W-1)/2;              % half width for horz. and vert. directions
+hWo = round(hW*sqrt(2)/2); % half width for oblique directions
+
+% Omit unreachable borders
+fvr(1:r+hW,:) = 0;
+fvr(end-(r+hW-1):end,:) = 0;
+fvr(:,1:r+hW) = 0;
+fvr(:,end-(r+hW-1):end) = 0;
+
+%% Uniformly distributed starting points
+indices = find(fvr > 0);
+a = randperm(length(indices));
+a = a(1:iterations); % Limit to number of iterations
+[ys, xs] = ind2sub(size(img), indices(a));
+%ys = [200;20];% LET OP
+%xs=  [200;200];% LET OP
+
+%% Iterate through all starting points
+for it = 1:size(ys,1)
+    xc = xs(it); % Current tracking point, x
+    yc = ys(it); % Current tracking point, y
+    
+    % Determine the moving-direction attributes
+    % Going left or right ?
+    if rand() >= 0.5
+        Dlr = -1;  % Going left
+    else
+        Dlr = 1; % Going right 
+    end
+
+    % Going up or down ?
+    if rand() >= 0.5
+        Dud = -1;  % Going up
+    else
+        Dud = 1; % Going down 
+    end
+    
+    % Initialize locus-positition table Tc
+    Tc = false(size(img));
+
+    %Dlr = -1; Dud=-1;% LET OP
+    Vl = 1;
+    while Vl > 0;
+        %% Determine the moving candidate point set Nc
+        Nr = false(3);
+        Rnd = rand();
+        %Rnd = 0.8;% LET OP
+        if Rnd < p_lr
+            % Going left or right
+            Nr(:,2+Dlr) = true;
+        elseif (Rnd >= p_lr) && (Rnd < p_lr + p_ud)  
+            % Going up or down
+            Nr(2+Dud,:) = true;
+        else
+            % Going any direction
+            Nr = true(3);
+            Nr(2,2) = false;
+        end
+
+        tmp = find( ~Tc(yc-1:yc+1,xc-1:xc+1) & Nr & fvr(yc-1:yc+1,xc-1:xc+1) );
+        Nc =[xc + bla(tmp,1), yc + bla(tmp,2)];
+        
+        if size(Nc,1)==0
+            Vl=-1;
+            continue
+        end
+
+        %% Detect dark line direction near current tracking point
+        Vdepths = zeros(size(Nc,1),1); % Valley depths
+        for i = 1:size(Nc,1)
+            % Horizontal or vertical 
+            if Nc(i,2) == yc
+                % Horizontal plane
+                yp = Nc(i,2);
+                if Nc(i,1) > xc
+                    % Right direction
+                    xp = Nc(i,1) + r;
+                else
+                    % Left direction
+                    xp = Nc(i,1) - r;
+                end
+
+                Vdepths(i) = img(yp + hW, xp) - ...
+                    2*img(yp,xp) + ...
+                    img(yp - hW, xp);
+            elseif Nc(i,1) == xc
+                % Vertical plane
+                xp = Nc(i,1);
+                if Nc(i,2) > yc
+                    % Down direction
+                    yp = Nc(i,2) + r;
+                else
+                    % Up direction
+                    yp = Nc(i,2) - r;
+                end
+
+                Vdepths(i) = img(yp, xp + hW) - ...
+                    2*img(yp,xp) + ...
+                    img(yp, xp - hW);
+            end
+            
+            % Oblique directions
+            if ((Nc(i,1) > xc) && (Nc(i,2) < yc)) || ((Nc(i,1) < xc) && (Nc(i,2) > yc))
+                % Diagonal, up /
+                if Nc(i,1) > xc && Nc(i,2) < yc
+                    % Top right
+                    xp = Nc(i,1) + ro;
+                    yp = Nc(i,2) - ro;
+                else
+                    % Bottom left
+                    xp = Nc(i,1) - ro;
+                    yp = Nc(i,2) + ro;
+                end
+
+                Vdepths(i) = img(yp - hWo, xp - hWo) - ...
+                    2*img(yp,xp) + ...
+                    img(yp + hWo, xp + hWo);
+            else
+                % Diagonal, down \
+                if Nc(i,1) < xc && Nc(i,2) < yc
+                    % Top left
+                    xp = Nc(i,1) - ro;
+                    yp = Nc(i,2) - ro;
+                else
+                    % Bottom right
+                    xp = Nc(i,1) + ro;
+                    yp = Nc(i,2) + ro;
+                end
+                
+                Vdepths(i) = img(yp + hWo, xp - hWo) - ...
+                    2*img(yp,xp) + ...
+                    img(yp - hWo, xp + hWo);
+            end
+        end %  End search of candidates
+
+        [~, index] = max(Vdepths); %  Determine best candidate
+
+        % Register tracking information
+        Tc(yc, xc) = true;
+
+        % Increase value of tracking space
+        Tr(yc, xc) = Tr(yc, xc) + 1;
+        %writeVideo(writerObj,Tr);
+        
+        % Move tracking point
+        xc = Nc(index, 1);
+        yc = Nc(index, 2); 
+    end
+end
+veins = Tr;
\ No newline at end of file
diff --git a/matlab/lib/miura_usage.m b/matlab/lib/miura_usage.m
new file mode 100644
index 0000000000000000000000000000000000000000..51c815c993dddf069ee587fa24529cceb7fd7f64
--- /dev/null
+++ b/matlab/lib/miura_usage.m
@@ -0,0 +1,66 @@
+% Howto use the miura_* scripts.
+
+img = im2double(imread('finger.png')); % Read the image
+img = imresize(img,0.5);               % Downscale image
+
+% Get the valid region, this is a binary mask which indicates the region of 
+% the finger. For quick testing it is possible to use something like:
+% fvr = ones(size(img));
+% The lee_region() function can be found here:
+% http://www.mathworks.com/matlabcentral/fileexchange/35752-finger-region-localisation
+fvr = lee_region(img,4,40);    % Get finger region
+
+%% Extract veins using maximum curvature method
+sigma = 3; % Parameter
+v_max_curvature = miura_max_curvature(img,fvr,sigma);
+
+% Binarise the vein image
+md = median(v_max_curvature(v_max_curvature>0));
+v_max_curvature_bin = v_max_curvature > md; 
+
+%% Extract veins using repeated line tracking method
+max_iterations = 3000; r=1; W=17; % Parameters
+v_repeated_line = miura_repeated_line_tracking(img,fvr,max_iterations,r,W);
+
+% Binarise the vein image
+md = median(v_repeated_line(v_repeated_line>0));
+v_repeated_line_bin = v_repeated_line > md; 
+
+%% Match
+cw = 80; ch=30;
+% Note that the match score is between 0 and 0.5
+score = miura_match(double(v_repeated_line_bin), double(v_max_curvature_bin), cw, ch);
+fprintf('Match score: %6.4f %%\n', score);
+
+%% Visualise
+% Overlay the extracted veins on the original image
+overlay_max_curvature = zeros([size(img) 3]);
+overlay_max_curvature(:,:,1) = img;
+overlay_max_curvature(:,:,2) = img + 0.4*v_max_curvature_bin;
+overlay_max_curvature(:,:,3) = img;
+
+% Overlay the extracted veins on the original image
+overlay_repeated_line = zeros([size(img) 3]);
+overlay_repeated_line(:,:,1) = img;
+overlay_repeated_line(:,:,2) = img + 0.4*v_repeated_line_bin;
+overlay_repeated_line(:,:,3) = img;
+
+figure;
+subplot(3,2,1)
+  imshow(img,[])
+  title('Original captured image')
+subplot(3,2,2)
+  imshow(fvr)
+  title('Detected finger region')
+subplot(3,2,3)
+  imshow(v_max_curvature_bin)
+  title('Binarised veins extracted by maximum curvature method')
+subplot(3,2,4)
+  imshow(overlay_max_curvature)
+  title('Maximum curvature method')  
+subplot(3,2,5)
+  imshow(v_repeated_line_bin)
+  title('Binarised veins extracted by repeated line tracking method')
+subplot(3,2,6)
+  imshow(overlay_repeated_line)
+  title('Repeated line tracking method')
\ No newline at end of file
diff --git a/matlab/run.sh b/matlab/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b95ff2dd8e7cb6b8ce0d8d0883157d0e5d0842bd
--- /dev/null
+++ b/matlab/run.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+
+if [ $# == 0 ]; then
+  echo "usage: $0 input_image.mat input_region.mat output_stem"
+  exit 1
+fi
+
+_matlab=`which matlab`;
+
+if [ -z "${_matlab}" ]; then
+  echo "I cannot find a matlab binary to execute, please setup first"
+  exit 1
+fi
+
+# Does some environment manipulation to get paths right before we start
+_basedir="$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)"
+_matlabpath=${_basedir}/lib
+unset _basedir;
+if [ -n "${MATLABPATH}" ]; then
+  _matlabpath=${_matlabpath}:${MATLABPATH}
+fi
+export MATLABPATH=${_matlabpath};
+unset _matlabpath;
+
+# Calls matlab with our inputs
+${_matlab} -nodisplay -nosplash -nodesktop -r "image = im2double(hdf5read('${1}','/array')); region = double(hdf5read('${2}','/array')); miura_max_curvature(image, region, 3, '${3}'); quit;"
diff --git a/requirements.txt b/requirements.txt
index 4e9fece6314c48e6f4361cffad564d363cccc8c8..e0618a96aca0634a6f2ec76bfc274bd0792a2d7b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,6 +2,10 @@ setuptools
 numpy
 scipy
 pillow
+schema
+docopt
+scikit-image
+matplotlib
 bob.extension
 bob.core
 bob.io.base
@@ -12,4 +16,7 @@ bob.bio.base
 bob.db.utfvp
 bob.db.verafinger
 bob.db.putvein
-scikit-image
+bob.db.fv3d
+bob.learn.linear
+bob.learn.activation
+bob.learn.mlp
diff --git a/setup.py b/setup.py
index 78a4761d545589f9b4db2ec974b8ac990587dff6..26e7228bb904122345a50f1fab9fa05b93a14739 100644
--- a/setup.py
+++ b/setup.py
@@ -35,6 +35,7 @@ setup(
         # databases
         'verafinger = bob.bio.vein.configurations.verafinger',
         'utfvp = bob.bio.vein.configurations.utfvp',
+        'fv3d = bob.bio.vein.configurations.fv3d',
 
         # baselines
         'mc = bob.bio.vein.configurations.maximum_curvature',
@@ -43,11 +44,16 @@ setup(
 
         # other
         'parallel = bob.bio.vein.configurations.parallel',
+        'gridio4g48 = bob.bio.vein.configurations.gridio4g48',
+        'grid = bob.bio.vein.configurations.gridio4g48',
         ],
 
       'console_scripts': [
         'compare_rois.py = bob.bio.vein.script.compare_rois:main',
-        'view_mask.py = bob.bio.vein.script.view_mask:main',
+        'view_sample.py = bob.bio.vein.script.view_sample:main',
+        'blame.py = bob.bio.vein.script.blame:main',
+        'markdet.py = bob.bio.vein.script.markdet:main',
+        'watershed_mask.py = bob.bio.vein.script.watershed_mask:main',
         ]
       },