diff --git a/README.rst b/README.rst
index 36a84ba0cf13e771590b874bf748593906dcae9d..f9b3efbd9ec87753960ebe782d0e0b49e3c91f37 100644
--- a/README.rst
+++ b/README.rst
@@ -5,8 +5,8 @@
    :target: http://pythonhosted.org/bob.bio.vein/index.html
 .. image:: http://img.shields.io/badge/docs-latest-orange.png
    :target: https://www.idiap.ch/software/bob/docs/latest/bob/bob.bio.vein/master/index.html
-.. image:: https://gitlab.idiap.ch/bob/bob.bio.vein/badges/master/build.svg
-   :target: https://gitlab.idiap.ch/bob/bob.bio.vein/commits/master
+.. image:: https://gitlab.idiap.ch/bob/bob.bio.vein/badges/v2.1.0/build.svg
+   :target: https://gitlab.idiap.ch/bob/bob.bio.vein/commits/v2.1.0
 .. image:: https://img.shields.io/badge/gitlab-project-0000c0.svg
    :target: https://gitlab.idiap.ch/bob/bob.bio.vein
 .. image:: http://img.shields.io/pypi/v/bob.bio.vein.png
diff --git a/bob/bio/vein/algorithm/MiuraMatch.py b/bob/bio/vein/algorithm/MiuraMatch.py
index 83ad2ae5b7fe77d46bbe60e972d842058b68ce1b..65b2e307a0040515d5906470f7b08c594d1c1152 100644
--- a/bob/bio/vein/algorithm/MiuraMatch.py
+++ b/bob/bio/vein/algorithm/MiuraMatch.py
@@ -1,34 +1,49 @@
 #!/usr/bin/env python
 # vim: set fileencoding=utf-8 :
 
-import bob.sp
-import bob.ip.base
-
 import numpy
-import math
 import scipy.signal
 
 from bob.bio.base.algorithm import Algorithm
 
 
 class MiuraMatch (Algorithm):
-  """Finger vein matching: match ratio
+  """Finger vein matching: match ratio via cross-correlation
+
+  The method is based on "cross-correlation" between a model and a probe image.
+  It convolves the binary image(s) representing the model with the binary image
+  representing the probe (rotated by 180 degrees), and evaluates how they
+  cross-correlate. If the model and probe are very similar, the output of the
+  correlation corresponds to a single scalar and approaches a maximum. The
+  value is then normalized by the sum of the pixels lit in both binary images.
+  Therefore, the output of this method is a floating-point number in the range
+  :math:`[0, 0.5]`. The higher, the better match.
+
+  In case model and probe represent images from the same vein structure, but
+  are misaligned, the output is not guaranteed to be accurate. To mitigate this
+  aspect, Miura et al. proposed to add a *small* cropping factor to the model
+  image, assuming not much information is available on the borders (``ch``, for
+  the vertical direction and ``cw``, for the horizontal direction). This allows
+  the convolution to yield searches for different areas in the probe image. The
+  maximum value is then taken from the resulting operation. The convolution
+  result is normalized by the pixels lit in both the cropped model image and
+  the matching pixels on the probe that yield the maximum on the resulting
+  convolution.
+
+  For this to work properly, input images are supposed to be binary in nature,
+  with zeros and ones.
 
   Based on N. Miura, A. Nagasaka, and T. Miyatake. Feature extraction of finger
   vein patterns based on repeated line tracking and its application to personal
   identification. Machine Vision and Applications, Vol. 15, Num. 4, pp.
   194--203, 2004
 
+  Parameters:
 
-  **Parameters:**
+    ch (:py:class:`int`, optional): Maximum search displacement in y-direction.
 
-  ch : :py:class:`int`
-      Optional : Maximum search displacement in y-direction. Different
-      defult values based on the different features.
+    cw (:py:class:`int`, optional): Maximum search displacement in x-direction.
 
-  cw : :py:class:`int`
-      Optional : Maximum search displacement in x-direction. Different
-      defult values based on the different features.
   """
 
   def __init__(self,
@@ -58,39 +73,21 @@ class MiuraMatch (Algorithm):
     return numpy.array(enroll_features)
 
 
-  def convfft(self, t, a):
-    # Determine padding size in x and y dimension
-    size_t  = numpy.array(t.shape)
-    size_a  = numpy.array(a.shape)
-    outsize = size_t + size_a - 1
-
-    # Determine 2D cross correlation in Fourier domain
-    taux = numpy.zeros(outsize)
-    taux[0:size_t[0],0:size_t[1]] = t
-    Ft = bob.sp.fft(taux.astype(numpy.complex128))
-    aaux = numpy.zeros(outsize)
-    aaux[0:size_a[0],0:size_a[1]] = a
-    Fa = bob.sp.fft(aaux.astype(numpy.complex128))
+  def score(self, model, probe):
+    """Computes the score between the probe and the model.
 
-    convta = numpy.real(bob.sp.ifft(Ft*Fa))
+    Parameters:
 
-    [w, h] = size_t-size_a+1
-    output = convta[size_a[0]-1:size_a[0]-1+w, size_a[1]-1:size_a[1]-1+h]
+      model (numpy.ndarray): The model of the user to test the probe agains
 
-    return output
+      probe (numpy.ndarray): The probe to test
 
 
-  def score(self, model, probe):
-    """
-    Computes the score of the probe and the model.
+    Returns:
 
-    **Parameters:**
+      score (float): Value between 0 and 0.5, larger value means a better match
 
-    score : :py:class:`float`
-        Value between 0 and 0.5, larger value is better match
     """
-    #print model.shape
-    #print probe.shape
 
     I=probe.astype(numpy.float64)
 
@@ -100,22 +97,36 @@ class MiuraMatch (Algorithm):
     n_models = model.shape[0]
 
     scores = []
-    for i in range(n_models):
-      R=model[i,:].astype(numpy.float64)
+
+    # iterate over all models for a given individual
+    for md in model:
+
+      # erode model by (ch, cw)
+      R = md.astype(numpy.float64)
       h, w = R.shape
       crop_R = R[self.ch:h-self.ch, self.cw:w-self.cw]
-      rotate_R = numpy.zeros((crop_R.shape[0], crop_R.shape[1]))
-      bob.ip.base.rotate(crop_R, rotate_R, 180)
-      #FFT for scoring!
-      #Nm=bob.sp.ifft(bob.sp.fft(I)*bob.sp.fft(rotate_R))
-      Nm = self.convfft(I, rotate_R)
-      #Nm2 = scipy.signal.convolve2d(I, rotate_R, 'valid')
 
+      # correlates using scipy - fastest option available iff the self.ch and
+      # self.cw are height (>30). In this case, the number of components
+      # returned by the convolution is high and using an FFT-based method
+      # yields best results. Otherwise, you may try  the other options bellow
+      # -> check our test_correlation() method on the test units for more
+      # details and benchmarks.
+      Nm = scipy.signal.fftconvolve(I, numpy.rot90(crop_R, k=2), 'valid')
+      # 2nd best: use convolve2d or correlate2d directly;
+      # Nm = scipy.signal.convolve2d(I, numpy.rot90(crop_R, k=2), 'valid')
+      # 3rd best: use correlate2d
+      # Nm = scipy.signal.correlate2d(I, crop_R, 'valid')
+
+      # figures out where the maximum is on the resulting matrix
       t0, s0 = numpy.unravel_index(Nm.argmax(), Nm.shape)
+
+      # this is our output
       Nmm = Nm[t0,s0]
-      #Nmm = Nm.max()
-      #mi = numpy.argwhere(Nmm == Nm)
-      #t0, s0 = mi.flatten()[:2]
+
+      # normalizes the output by the number of pixels lit on the input
+      # matrices, taking into consideration the surface that produced the
+      # result (i.e., the eroded model and part of the probe)
       scores.append(Nmm/(sum(sum(crop_R)) + sum(sum(I[t0:t0+h-2*self.ch, s0:s0+w-2*self.cw]))))
 
     return numpy.mean(scores)
diff --git a/bob/bio/vein/configurations/utfvp.py b/bob/bio/vein/configurations/utfvp.py
index 6b40cc56f670662c451852e13f734ce83a0bc817..0a58bed17d1e8d61b806bad8f71f55cbb7b3e5fa 100644
--- a/bob/bio/vein/configurations/utfvp.py
+++ b/bob/bio/vein/configurations/utfvp.py
@@ -17,12 +17,12 @@ You can download the raw data of the `UTFVP`_ database by following the link.
 .. include:: links.rst
 """
 
-from bob.bio.vein.database import UtfvpBioDatabase
+from bob.bio.vein.database.utfvp import Database
 
 utfvp_directory = "[YOUR_UTFVP_DIRECTORY]"
 """Value of ``~/.bob_bio_databases.txt`` for this database"""
 
-database = UtfvpBioDatabase(
+database = Database(
     original_directory = utfvp_directory,
     original_extension = '.png',
     )
diff --git a/bob/bio/vein/configurations/verafinger.py b/bob/bio/vein/configurations/verafinger.py
index 9b642101ecfc00ea4879a2e6e1c2541c3652f2fc..21c332018459fa8314c0b5237984eddc091ff35f 100644
--- a/bob/bio/vein/configurations/verafinger.py
+++ b/bob/bio/vein/configurations/verafinger.py
@@ -15,12 +15,12 @@ the link.
 """
 
 
-from bob.bio.vein.database import VerafingerBioDatabase
+from bob.bio.vein.database.verafinger import Database
 
 verafinger_directory = "[YOUR_VERAFINGER_DIRECTORY]"
 """Value of ``~/.bob_bio_databases.txt`` for this database"""
 
-database = VerafingerBioDatabase(
+database = Database(
     original_directory = verafinger_directory,
     original_extension = '.png',
     )
diff --git a/bob/bio/vein/database/__init__.py b/bob/bio/vein/database/__init__.py
index e4da7db23a776c7d7ee84297a86009e46fd04959..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/bob/bio/vein/database/__init__.py
+++ b/bob/bio/vein/database/__init__.py
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-
-from .database import VeinBioFile
-from .verafinger import VerafingerBioDatabase
-from .utfvp import UtfvpBioDatabase
-
-# gets sphinx autodoc done right - don't remove it
-__all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/bob/bio/vein/database/database.py b/bob/bio/vein/database/database.py
index 6e78633b77722d413a3334f7c8a6ff968f997083..2d43d4331eb1dd9783860c487b5a8945258e1240 100644
--- a/bob/bio/vein/database/database.py
+++ b/bob/bio/vein/database/database.py
@@ -1,19 +1,29 @@
 #!/usr/bin/env python
 # vim: set fileencoding=utf-8 :
-# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
-# Wed 20 July 14:43:22 CEST 2016
+# Thu 03 Nov 2016 12:23:52 CET
+
+"""Single sample API"""
 
-"""
-  Verification API for bob.db.voxforge
-"""
 
 from bob.bio.base.database.file import BioFile
 
 
 class VeinBioFile(BioFile):
-    def __init__(self, client_id, path, file_id):
-        """
-        Initializes this File object with an File equivalent for
-        VoxForge database.
-        """
-        super(VeinBioFile, self).__init__(client_id=client_id, path=path, file_id=file_id)
+    """A "sample" object that is specific to vein recognition experiments
+
+
+    Parameters:
+
+      f (object): Low-level file (or sample) object that is kept inside
+
+    """
+
+    def __init__(self, f):
+        super(VeinBioFile, self).__init__(
+            client_id=f.model_id,
+            path=f.path,
+            file_id=f.id,
+            )
+
+        # keep copy of original low-level database file object
+        self.f = f
diff --git a/bob/bio/vein/database/utfvp.py b/bob/bio/vein/database/utfvp.py
index b8f09a049d3cfea5a8216b27d4ea6ccc4cb75aa6..ee924674f8564396c439980cd87104ce70bb3e1f 100644
--- a/bob/bio/vein/database/utfvp.py
+++ b/bob/bio/vein/database/utfvp.py
@@ -1,30 +1,49 @@
 #!/usr/bin/env python
 # vim: set fileencoding=utf-8 :
-# Tue 27 Sep 2016 16:49:05 CEST
+# Fri 04 Nov 2016 14:46:53 CET
 
-from .database import VeinBioFile
-from bob.bio.base.database import BioDatabase
+from bob.bio.base.database import BioFile, BioDatabase
 
 
-class UtfvpBioDatabase(BioDatabase):
+class File(BioFile):
+    """
+    Implements extra properties of vein files for the UTFVP Fingervein database
+
+
+    Parameters:
+
+      f (object): Low-level file (or sample) object that is kept inside
+
+    """
+
+    def __init__(self, f):
+
+        super(File, self).__init__(client_id=f.client_id, path=f.path,
+            file_id=f.id)
+        self.__f = f
+
+
+class Database(BioDatabase):
     """
     Implements verification API for querying UTFVP Fingervein database.
     """
 
     def __init__(self, **kwargs):
 
-        super(UtfvpBioDatabase, self).__init__(name='utfvp',
-            **kwargs)
+        super(Database, self).__init__(name='utfvp', **kwargs)
         from bob.db.utfvp.query import Database as LowLevelDatabase
         self.__db = LowLevelDatabase()
 
+
     def model_ids_with_protocol(self, groups=None, protocol=None, **kwargs):
         protocol = protocol if protocol is not None else self.protocol
         return self.__db.model_ids(groups=groups, protocol=protocol)
 
+
     def objects(self, groups=None, protocol=None, purposes=None,
         model_ids=None, **kwargs):
 
         retval = self.__db.objects(groups=groups, protocol=protocol,
             purposes=purposes, model_ids=model_ids, **kwargs)
-        return [VeinBioFile(client_id=f.client_id, path=f.path, file_id=f.id) for f in retval]
+
+        return [File(f) for f in retval]
diff --git a/bob/bio/vein/database/verafinger.py b/bob/bio/vein/database/verafinger.py
index 23d3ff269d74257aeeb20af97616b10633fb7fb8..69d1b025881c06f9e97b8a7fdd7570c58f2612fa 100644
--- a/bob/bio/vein/database/verafinger.py
+++ b/bob/bio/vein/database/verafinger.py
@@ -3,46 +3,85 @@
 # Tue 27 Sep 2016 16:48:57 CEST
 
 
-from .database import VeinBioFile
-from bob.bio.base.database import BioDatabase
+from bob.bio.base.database import BioFile, BioDatabase
 
 
-class VerafingerBioDatabase(BioDatabase):
-    """
-    Implements verification API for querying Vera Fingervein database.
-    """
+class File(BioFile):
+  """
+  Implements extra properties of vein files for the Vera Fingervein database
 
-    def __init__(self, **kwargs):
 
-        super(VerafingerBioDatabase, self).__init__(name='verafinger',
-            **kwargs)
-        from bob.db.verafinger.query import Database as LowLevelDatabase
-        self.__db = LowLevelDatabase()
+  Parameters:
 
-        self.low_level_group_names = ('train', 'dev')
-        self.high_level_group_names = ('world', 'dev')
+    f (object): Low-level file (or sample) object that is kept inside
 
-    def groups(self):
+  """
 
-        return self.convert_names_to_highlevel(self.__db.groups(),
-            self.low_level_group_names, self.high_level_group_names)
+  def __init__(self, f):
 
-    def client_id_from_model_id(self, model_id, group='dev'):
-        """Required as ``model_id != client_id`` on this database"""
+    super(File, self).__init__(client_id=f.unique_finger_name, path=f.path,
+        file_id=f.id)
+    self.__f = f
 
-        return self.__db.finger_name_from_model_id(model_id)
 
-    def model_ids_with_protocol(self, groups=None, protocol=None, **kwargs):
+  def mask(self):
+    """Returns the binary mask from the ROI annotations available"""
 
-        groups = self.convert_names_to_lowlevel(groups,
-            self.low_level_group_names, self.high_level_group_names)
-        return self.__db.model_ids(groups=groups, protocol=protocol)
+    from ..preprocessor.utils import poly_to_mask
 
-    def objects(self, groups=None, protocol=None, purposes=None,
-        model_ids=None, **kwargs):
+    # The size of images in this database is (250, 665) pixels (h, w)
+    return poly_to_mask((250, 665), self.__f.roi())
 
-        groups = self.convert_names_to_lowlevel(groups,
-            self.low_level_group_names, self.high_level_group_names)
-        retval = self.__db.objects(groups=groups, protocol=protocol,
-            purposes=purposes, model_ids=model_ids, **kwargs)
-        return [VeinBioFile(client_id=f.model_id, path=f.path, file_id=f.id) for f in retval]
+
+  def load(self, *args, **kwargs):
+    """(Overrides base method) Loads both image and mask"""
+
+    image = super(File, self).load(*args, **kwargs)
+
+    return image, self.mask()
+
+
+
+class Database(BioDatabase):
+  """
+  Implements verification API for querying Vera Fingervein database.
+  """
+
+  def __init__(self, **kwargs):
+
+    super(Database, self).__init__(name='verafinger', **kwargs)
+    from bob.db.verafinger.query import Database as LowLevelDatabase
+    self.__db = LowLevelDatabase()
+
+    self.low_level_group_names = ('train', 'dev')
+    self.high_level_group_names = ('world', 'dev')
+
+
+  def groups(self):
+
+    return self.convert_names_to_highlevel(self.__db.groups(),
+        self.low_level_group_names, self.high_level_group_names)
+
+
+  def client_id_from_model_id(self, model_id, group='dev'):
+    """Required as ``model_id != client_id`` on this database"""
+
+    return self.__db.finger_name_from_model_id(model_id)
+
+
+  def model_ids_with_protocol(self, groups=None, protocol=None, **kwargs):
+
+    groups = self.convert_names_to_lowlevel(groups,
+        self.low_level_group_names, self.high_level_group_names)
+    return self.__db.model_ids(groups=groups, protocol=protocol)
+
+
+  def objects(self, groups=None, protocol=None, purposes=None,
+      model_ids=None, **kwargs):
+
+    groups = self.convert_names_to_lowlevel(groups,
+        self.low_level_group_names, self.high_level_group_names)
+    retval = self.__db.objects(groups=groups, protocol=protocol,
+        purposes=purposes, model_ids=model_ids, **kwargs)
+
+    return [File(f) for f in retval]
diff --git a/bob/bio/vein/extractor/RepeatedLineTracking.py b/bob/bio/vein/extractor/RepeatedLineTracking.py
index c3c5d6ad2923cd298c7357dab6cc40924a6b9708..84074aff73c3760795626b6e516f00aff9a1fcef 100644
--- a/bob/bio/vein/extractor/RepeatedLineTracking.py
+++ b/bob/bio/vein/extractor/RepeatedLineTracking.py
@@ -85,10 +85,11 @@ class RepeatedLineTracking (Extractor):
     hWo = numpy.round(hW*math.sqrt(2)/2)       # half width for oblique directions
 
     # Omit unreachable borders
-    finger_mask[0:self.r+hW,:] = 0
-    finger_mask[finger_mask.shape[0]-(self.r+hW):,:] = 0
-    finger_mask[:,0:self.r+hW] = 0
-    finger_mask[:,finger_mask.shape[1]-(self.r+hW):] = 0
+    border = int(self.r+hW)
+    finger_mask[0:border,:] = 0
+    finger_mask[finger_mask.shape[0]-border:,:] = 0
+    finger_mask[:,0:border] = 0
+    finger_mask[:,finger_mask.shape[1]-border:] = 0
 
     ## Uniformly distributed starting points
     aux = numpy.argwhere( (finger_mask > 0) == True )
@@ -153,7 +154,7 @@ class RepeatedLineTracking (Extractor):
                     else:
                         # Left direction
                         xp = Nc[i,0] - self.r
-                    Vdepths[i] = finger_image[yp + hW, xp] - 2*finger_image[yp,xp] + finger_image[yp - hW, xp]
+                    Vdepths[i] = finger_image[int(yp + hW), int(xp)] - 2*finger_image[int(yp),int(xp)] + finger_image[int(yp - hW), int(xp)]
                 elif (Nc[i,0] == xc):
                     # Vertical plane
                     xp = Nc[i,0]
@@ -163,7 +164,7 @@ class RepeatedLineTracking (Extractor):
                     else:
                         # Up direction
                         yp = Nc[i,1] - self.r
-                    Vdepths[i] = finger_image[yp, xp + hW] - 2*finger_image[yp,xp] + finger_image[yp, xp - hW]
+                    Vdepths[i] = finger_image[int(yp), int(xp + hW)] - 2*finger_image[int(yp),int(xp)] + finger_image[int(yp), int(xp - hW)]
 
                 ## Oblique directions
                 if ( (Nc[i,0] > xc) and (Nc[i,1] < yc) ) or ( (Nc[i,0] < xc) and (Nc[i,1] > yc) ):
@@ -176,7 +177,7 @@ class RepeatedLineTracking (Extractor):
                         # Bottom left
                         xp = Nc[i,0] - ro
                         yp = Nc[i,1] + ro
-                    Vdepths[i] = finger_image[yp - hWo, xp - hWo] - 2*finger_image[yp,xp] + finger_image[yp + hWo, xp + hWo]
+                    Vdepths[i] = finger_image[int(yp - hWo), int(xp - hWo)] - 2*finger_image[int(yp),int(xp)] + finger_image[int(yp + hWo), int(xp + hWo)]
                 else:
                     # Diagonal, down \
                     if (Nc[i,0] < xc and Nc[i,1] < yc):
@@ -187,7 +188,7 @@ class RepeatedLineTracking (Extractor):
                         # Bottom right
                         xp = Nc[i,0] + ro
                         yp = Nc[i,1] + ro
-                    Vdepths[i] = finger_image[yp + hWo, xp - hWo] - 2*finger_image[yp,xp] + finger_image[yp - hWo, xp + hWo]
+                    Vdepths[i] = finger_image[int(yp + hWo), int(xp - hWo)] - 2*finger_image[int(yp),int(xp)] + finger_image[int(yp - hWo), int(xp + hWo)]
             # End search of candidates
             index = numpy.argmax(Vdepths)  #Determine best candidate
             # Register tracking information
diff --git a/bob/bio/vein/preprocessor/FingerCrop.py b/bob/bio/vein/preprocessor/FingerCrop.py
index f6dbd2e636c299df9be8d183a0aeaff1760c32e8..dcac900520e12d806a5b832d11dce06ece16ee6e 100644
--- a/bob/bio/vein/preprocessor/FingerCrop.py
+++ b/bob/bio/vein/preprocessor/FingerCrop.py
@@ -6,10 +6,6 @@ import numpy
 from PIL import Image
 
 import bob.io.base
-import bob.io.image
-import bob.ip.base
-import bob.sp
-import bob.core
 
 from bob.bio.base.preprocessor import Preprocessor
 
@@ -18,57 +14,70 @@ from .. import utils
 
 class FingerCrop (Preprocessor):
   """
-  Extracts the mask and pre-processes fingervein images.
+  Extracts the mask heuristically and pre-processes fingervein images.
 
   Based on the implementation: E.C. Lee, H.C. Lee and K.R. Park. Finger vein
   recognition using minutia-based alignment and local binary pattern-based
   feature extraction. International Journal of Imaging Systems and
   Technology. Vol. 19, No. 3, pp. 175-178, September 2009.
 
+  Finger orientation is based on B. Huang, Y. Dai, R. Li, D. Tang and W. Li,
+  Finger-vein authentication based on wide line detector and pattern
+  normalization, Proceedings on 20th International Conference on Pattern
+  Recognition (ICPR), 2010.
+
+  The ``konomask`` option is based on the work of M. Kono, H. Ueki and S.
+  Umemura. Near-infrared finger vein patterns for personal identification,
+  Applied Optics, Vol. 41, Issue 35, pp. 7429-7436 (2002).
+
   In this implementation, the finger image is (in this order):
 
-    1. Padded
-    2. The mask is extracted
-    3. The finger is normalized (made horizontal)
-    4. (optionally) Post processed
+    1. The mask is extracted (if ``annotation`` is not chosen as a parameter to
+       ``fingercontour``). Other mask extraction options correspond to
+       heuristics developed by Lee et al. (2009) or Kono et al. (2002)
+    2. The finger is normalized (made horizontal), via a least-squares
+       normalization procedure concerning the center of the annotated area,
+       width-wise. Before normalization, the image is padded to avoid loosing
+       pixels corresponding to veins during the rotation
+    3. (optionally) Post processed with histogram-equalization to enhance vein
+       information. Notice that only the area inside the mask is used for
+       normalization. Areas outside of the mask (where the mask is ``False``
+       are set to black)
 
 
-  **Parameters:**
+  Parameters:
 
-  mask_h : :py:class:`int`
-      Optional,  Height of contour mask in pixels, must be an even
-      number
+    mask_h (:py:obj:`int`, optional): Height of contour mask in pixels, must
+      be an even number (used by the methods ``leemaskMod`` or
+      ``leemaskMatlab``)
 
-  mask_w : :py:class:`int`
-      Optional,  Width of the contour mask in pixels
+    mask_w (:py:obj:`int`, optional): Width of the contour mask in pixels
+      (used by the methods ``leemaskMod`` or ``leemaskMatlab``)
 
-  padding_width : :py:class:`int`
-      Optional,  How much padding (in pixels) to add around
-      the borders of the input image. We normally always keep this value on its
-      default (5 pixels).
+    padding_width (:py:obj:`int`, optional): How much padding (in pixels) to
+      add around the borders of the input image. We normally always keep this
+      value on its default (5 pixels). This parameter is always used before
+      normalizing the finger orientation.
 
-  padding_constant : :py:class:`int`
-      Optional,  What is the value of the pixels added
-      to the padding. This number should be a value between 0 and 255. (From
-      Pedro Tome: for UTFVP (high-quality samples), use 0. For the VERA
+    padding_constant (:py:obj:`int`, optional): What is the value of the pixels
+      added to the padding. This number should be a value between 0 and 255.
+      (From Pedro Tome: for UTFVP (high-quality samples), use 0. For the VERA
       Fingervein database (low-quality samples), use 51 (that corresponds to
-      0.2 in a float image with values between 0 and 1).
-
-  fingercontour : :py:class:`str`
-      Optional,  Select between three finger contour
-      implementations: ``"leemaskMod"``, ``"leemaskMatlab"`` or ``"konomask"``.
-      (From Pedro Tome: the option ``leemaskMatlab`` was just implemented for
-      testing purposes so we could compare with MAT files generated from Matlab
-      code of other authors. He only used it with the UTFVP database, using
-      ``leemaskMod`` with that database yields slight worse results.)
-
-  postprocessing : :py:class:`str`
-      Optional,  Select between ``HE`` (histogram
-      equalization, as with :py:func:`bob.ip.base.histogram_equalization`),
-      ``HFE`` (high-frequency emphasis filter, with hard-coded parameters - see
-      implementation) or ``CircGabor`` (circular Gabor filter with band-width
-      1.12 octaves and standard deviation of 5 pixels (this is hard-coded)). By
-      default, no postprocessing is applied on the image.
+      0.2 in a float image with values between 0 and 1). This parameter is
+      always used before normalizing the finger orientation.
+
+    fingercontour (:py:obj:`str`, optional): Select between three finger
+      contour implementations: ``"leemaskMod"``, ``"leemaskMatlab"``,
+      ``"konomask"`` or ``annotation``. (From Pedro Tome: the option
+      ``leemaskMatlab`` was just implemented for testing purposes so we could
+      compare with MAT files generated from Matlab code of other authors. He
+      only used it with the UTFVP database, using ``leemaskMod`` with that
+      database yields slight worse results.)
+
+    postprocessing (:py:obj:`str`, optional): Select between ``HE`` (histogram
+      equalization, as with :py:func:`skimage.exposure.equalize_hist`) or
+      ``None`` (the default).
+
   """
 
 
@@ -106,8 +115,11 @@ class FingerCrop (Preprocessor):
 
     """
 
+    padded_image = numpy.pad(image, self.padding_width, 'constant',
+        constant_values = self.padding_constant)
+
     sigma = 5
-    img_h,img_w = image.shape
+    img_h,img_w = padded_image.shape
 
     # Determine lower half starting point
     if numpy.mod(img_h,2) == 0:
@@ -125,7 +137,7 @@ class FingerCrop (Preprocessor):
     hy = (-Y/(2*math.pi*sigma**4))*numpy.exp(-(X**2 + Y**2)/(2*sigma**2))
 
     # Filter the image with the directional kernel
-    fy = utils.imfilter(image, hy)
+    fy = utils.imfilter(padded_image, hy)
 
     # Upper part of filtred image
     img_filt_up = fy[0:half_img_h,:]
@@ -136,18 +148,17 @@ class FingerCrop (Preprocessor):
     y_lo = img_filt_lo.argmin(axis=0)
 
     # Fill region between upper and lower edges
-    finger_mask = numpy.ndarray(image.shape, numpy.bool)
+    finger_mask = numpy.ndarray(padded_image.shape, numpy.bool)
     finger_mask[:,:] = False
 
     for i in range(0,img_w):
-      finger_mask[y_up[i]:y_lo[i]+image.shape[0]-half_img_h+2,i] = True
+      finger_mask[y_up[i]:y_lo[i]+padded_image.shape[0]-half_img_h+2,i] = True
 
-    # Extract y-position of finger edges
-    edges = numpy.zeros((2,img_w))
-    edges[0,:] = y_up
-    edges[1,:] = y_lo + image.shape[0] - half_img_h + 1
-
-    return (finger_mask, edges)
+    if not self.padding_width:
+      return finger_mask
+    else:
+      w = self.padding_width
+      return finger_mask[w:-w,w:-w]
 
 
   def __leemaskMod__(self, image):
@@ -168,7 +179,7 @@ class FingerCrop (Preprocessor):
     a horizontal filter is also applied on the vertical axis.
 
 
-    **Parameters:**
+    Parameters:
 
     image (numpy.ndarray): raw image to use for finding the mask, as 2D array
         of unsigned 8-bit integers
@@ -186,8 +197,10 @@ class FingerCrop (Preprocessor):
 
     """
 
+    padded_image = numpy.pad(image, self.padding_width, 'constant',
+        constant_values = self.padding_constant)
 
-    img_h,img_w = image.shape
+    img_h,img_w = padded_image.shape
 
     # Determine lower half starting point
     half_img_h = img_h/2
@@ -195,29 +208,29 @@ class FingerCrop (Preprocessor):
 
     # Construct mask for filtering (up-bottom direction)
     mask = numpy.ones((self.mask_h, self.mask_w), dtype='float64')
-    mask[(self.mask_h/2):,:] = -1.0
+    mask[int(self.mask_h/2):,:] = -1.0
 
-    img_filt = utils.imfilter(image, mask)
+    img_filt = utils.imfilter(padded_image, mask)
 
     # Upper part of filtred image
-    img_filt_up = img_filt[:half_img_h,:]
+    img_filt_up = img_filt[:int(half_img_h),:]
     y_up = img_filt_up.argmax(axis=0)
 
     # Lower part of filtred image
-    img_filt_lo = img_filt[half_img_h:,:]
+    img_filt_lo = img_filt[int(half_img_h):,:]
     y_lo = img_filt_lo.argmin(axis=0)
 
-    img_filt = utils.imfilter(image, mask.T)
+    img_filt = utils.imfilter(padded_image, mask.T)
 
     # Left part of filtered image
-    img_filt_lf = img_filt[:,:half_img_w]
+    img_filt_lf = img_filt[:,:int(half_img_w)]
     y_lf = img_filt_lf.argmax(axis=1)
 
     # Right part of filtred image
-    img_filt_rg = img_filt[:,half_img_w:]
+    img_filt_rg = img_filt[:,int(half_img_w):]
     y_rg = img_filt_rg.argmin(axis=1)
 
-    finger_mask = numpy.zeros(image.shape, dtype='bool')
+    finger_mask = numpy.zeros(padded_image.shape, dtype='bool')
 
     for i in range(0,y_up.size):
         finger_mask[y_up[i]:y_lo[i]+img_filt_lo.shape[0]+1,i] = True
@@ -230,15 +243,11 @@ class FingerCrop (Preprocessor):
     # meadian
     finger_mask[:,int(numpy.median(y_rg)+img_filt_rg.shape[1]):] = False
 
-    # Extract y-position of finger edges
-    edges = numpy.zeros((2,img_w))
-    edges[0,:] = y_up
-    edges[0,0:int(round(numpy.mean(y_lf))+1)] = edges[0,int(round(numpy.mean(y_lf))+1)]
-
-    edges[1,:] = numpy.round(y_lo + img_filt_lo.shape[0])
-    edges[1,0:int(round(numpy.mean(y_lf))+1)] = edges[1,int(round(numpy.mean(y_lf))+1)]
-
-    return finger_mask, edges
+    if not self.padding_width:
+      return finger_mask
+    else:
+      w = self.padding_width
+      return finger_mask[w:-w,w:-w]
 
 
   def __leemaskMatlab__(self, image):
@@ -282,16 +291,19 @@ class FingerCrop (Preprocessor):
 
     """
 
-    img_h,img_w = image.shape
+    padded_image = numpy.pad(image, self.padding_width, 'constant',
+        constant_values = self.padding_constant)
+
+    img_h,img_w = padded_image.shape
 
     # Determine lower half starting point
-    half_img_h = img_h/2
+    half_img_h = int(img_h/2)
 
     # Construct mask for filtering
     mask = numpy.ones((self.mask_h,self.mask_w), dtype='float64')
-    mask[(self.mask_h/2):,:] = -1.0
+    mask[int(self.mask_h/2):,:] = -1.0
 
-    img_filt = utils.imfilter(image, mask)
+    img_filt = utils.imfilter(padded_image, mask)
 
     # Upper part of filtered image
     img_filt_up = img_filt[:half_img_h,:]
@@ -304,19 +316,18 @@ class FingerCrop (Preprocessor):
     # Translation: for all columns of the input image, set to True all pixels
     # of the mask from index where the maxima occurred in the upper part until
     # the index where the minima occurred in the lower part.
-    finger_mask = numpy.zeros(image.shape, dtype='bool')
+    finger_mask = numpy.zeros(padded_image.shape, dtype='bool')
     for i in range(img_filt.shape[1]):
       finger_mask[y_up[i]:(y_lo[i]+img_filt_lo.shape[0]+1), i] = True
 
-    # Extract y-position of finger edges
-    edges = numpy.zeros((2,img_w), dtype='float64')
-    edges[0,:] = y_up
-    edges[1,:] = numpy.round(y_lo + img_filt_lo.shape[0])
-
-    return finger_mask, edges
+    if not self.padding_width:
+      return finger_mask
+    else:
+      w = self.padding_width
+      return finger_mask[w:-w,w:-w]
 
 
-  def __huangnormalization__(self, image, mask, edges):
+  def __huangnormalization__(self, image, mask):
     """
     Simple finger normalization.
 
@@ -342,10 +353,6 @@ class FingerCrop (Preprocessor):
 
     mask (numpy.ndarray): mask to normalize as 2D array of booleans
 
-    edges (numpy.ndarray): edges of the mask, 2D array with 2 rows and as
-        many columns as the input image, containing the start of the mask and
-        the end of the mask.
-
 
     **Returns:**
 
@@ -354,12 +361,19 @@ class FingerCrop (Preprocessor):
 
     numpy.ndarray: A 2D boolean array with the same shape and data type of
         the input mask representing the newly aligned mask.
+
     """
 
     img_h, img_w = image.shape
 
+    # Calculates the mask edges along the columns
+    edges = numpy.zeros((2, mask.shape[1]), dtype=int)
+
+    edges[0,:] = mask.argmax(axis=0) # get upper edges
+    edges[1,:] = len(mask) - numpy.flipud(mask).argmax(axis=0) - 1
+
     bl = edges.mean(axis=0) #baseline
-    x = numpy.arange(0,img_w)
+    x = numpy.arange(0, edges.shape[1])
     A = numpy.vstack([x, numpy.ones(len(x))]).T
 
     # Fit a straight line through the base line points
@@ -395,18 +409,23 @@ class FingerCrop (Preprocessor):
     Tinv = numpy.linalg.inv(T)
     Tinvtuple = (Tinv[0,0],Tinv[0,1], Tinv[0,2], Tinv[1,0],Tinv[1,1],Tinv[1,2])
 
-    img=Image.fromarray(image)
-    image_norm = img.transform(img.size, Image.AFFINE, Tinvtuple, resample=Image.BICUBIC)
-    image_norm = numpy.array(image_norm)
+    def _afftrans(img):
+      '''Applies the affine transform on the resulting image'''
 
-    finger_mask = numpy.zeros(mask.shape)
-    finger_mask[mask] = 1
+      t = Image.fromarray(img.astype('uint8'))
+      w, h = t.size #pillow image is encoded w, h
+      w += 2*self.padding_width
+      h += 2*self.padding_width
+      t = t.transform(
+          (w,h),
+          Image.AFFINE,
+          Tinvtuple,
+          resample=Image.BICUBIC,
+          fill=self.padding_constant)
 
-    img_mask=Image.fromarray(finger_mask)
-    mask_norm = img_mask.transform(img_mask.size, Image.AFFINE, Tinvtuple, resample=Image.BICUBIC)
-    mask_norm = numpy.array(mask_norm).astype('bool')
+      return numpy.array(t).astype(img.dtype)
 
-    return (image_norm, mask_norm)
+    return _afftrans(image), _afftrans(mask)
 
 
   def __HE__(self, image, mask):
@@ -416,7 +435,7 @@ class FingerCrop (Preprocessor):
     In this implementation, only the pixels that lie inside the mask will be
     used to calculate the histogram equalization parameters. Because of this
     particularity, we don't use Bob's implementation for histogram equalization
-    and have one based exclusively on NumPy.
+    and have one based exclusively on scikit-image.
 
 
     **Parameters:**
@@ -434,142 +453,65 @@ class FingerCrop (Preprocessor):
     numpy.ndarray: normalized image as a 2D array of unsigned 8-bit integers
 
     """
+    from skimage.exposure import equalize_hist
 
-    image_histogram, bins = numpy.histogram(image[mask], 256, normed=True)
-    cdf = image_histogram.cumsum() # cumulative distribution function
-    cdf = 255 * cdf / cdf[-1] # normalize
+    retval = equalize_hist(image, mask=mask)
 
-    # use linear interpolation of cdf to find new pixel values
-    image_equalized = numpy.interp(image.flatten(), bins[:-1], cdf)
-    image_equalized = image_equalized.reshape(image.shape)
-
-    # normalized image to be returned is a composition of the original image
-    # (background) and the equalized image (finger area)
-    retval = image.copy()
-    retval[mask] = image_equalized[mask]
+    # make the parts outside the mask totally black
+    retval[~mask] = 0
 
     return retval
 
 
-  def __circularGabor__(self, image, bw, sigma):
-    """
-    Applies a circular gabor filter on the input image, with parameters.
-
+  def __call__(self, data, annotations=None):
+    """Reads the input image or (image, mask) and prepares for fex.
 
-    **Parameters:**
+    Parameters:
 
-    image (numpy.ndarray): raw image to be filtered, as 2D array of
-          unsigned 8-bit integers
+      data (numpy.ndarray, tuple): Either a :py:class:`numpy.ndarray`
+        containing a gray-scaled image with dtype ``uint8`` or a 2-tuple
+        containing both the gray-scaled image and a mask, with the same size of
+        the image, with dtype ``bool`` containing the points which should be
+        considered part of the finger
 
-    bw (float): bandwidth (1.12 octave)
 
-    sigma (int): standard deviation (5  pixels)
+    Returns:
 
+      numpy.ndarray: The image, preprocessed and normalized
 
-    **Returns:**
+      numpy.ndarray: A mask, of the same size of the image, indicating where
+      the valid data for the object is.
 
-    numpy.ndarray: normalized image as a 2D array of unsigned 8-bit integers
     """
 
-    # Converts image to doubles
-    image_new = bob.core.convert(image,numpy.float64,(0,1),(0,255))
-    img_h, img_w = image_new.shape
-
-    fc = (1/math.pi * math.sqrt(math.log(2)/2) * (2**bw+1)/(2**bw-1))/sigma
-
-    sz = numpy.fix(8*numpy.max([sigma,sigma]))
-
-    if numpy.mod(sz,2) == 0: sz = sz+1
-
-    #Constructs filter kernel
-    winsize = numpy.fix(sz/2)
-
-    x = numpy.arange(-winsize, winsize+1)
-    y = numpy.arange(winsize, numpy.fix(-sz/2)-1, -1)
-    X, Y = numpy.meshgrid(x, y)
-    # X (right +)
-    # Y (up +)
-
-    gaborfilter = numpy.exp(-0.5*(X**2/sigma**2+Y**2/sigma**2))*numpy.cos(2*math.pi*fc*numpy.sqrt(X**2+Y**2))*(1/(2*math.pi*sigma))
-
-    # Without normalisation
-    #gaborfilter = numpy.exp(-0.5*(X**2/sigma**2+Y**2/sigma**2))*numpy.cos(2*math.pi*fc*numpy.sqrt(X**2+Y**2))
-
-    imageEnhance = utils.imfilter(image, gaborfilter)
-    imageEnhance = numpy.abs(imageEnhance)
-
-    return bob.core.convert(imageEnhance,numpy.uint8, (0,255),
-        (imageEnhance.min(),imageEnhance.max()))
-
-
-  def __HFE__(self,image):
-    """
-    High Frequency Emphasis Filtering (HFE).
-    """
-
-    ### Hard-coded parameters for the HFE filtering
-    D0 = 0.025
-    a = 0.6
-    b = 1.2
-    n = 2.0
-
-    # converts image to doubles
-    image_new = bob.core.convert(image,numpy.float64, (0,1), (0,255))
-    img_h, img_w = image_new.shape
-
-    # DFT
-    Ffreq = bob.sp.fftshift(bob.sp.fft(image_new.astype(numpy.complex128))/math.sqrt(img_h*img_w))
-
-    row = numpy.arange(1,img_w+1)
-    x = (numpy.tile(row,(img_h,1)) - (numpy.fix(img_w/2)+1)) /img_w
-    col = numpy.arange(1,img_h+1)
-    y =  (numpy.tile(col,(img_w,1)).T - (numpy.fix(img_h/2)+1))/img_h
-
-    # D  is  the  distance  from  point  (u,v)  to  the  centre  of the
-    # frequency rectangle.
-    radius = numpy.sqrt(x**2 + y**2)
-
-    f = a + b / (1.0 + (D0 / radius)**(2*n))
-    Ffreq = Ffreq * f
-
-    # implements the inverse DFT
-    imageEnhance = bob.sp.ifft(bob.sp.ifftshift(Ffreq))
-
-    # skips complex part
-    imageEnhance = numpy.abs(imageEnhance)
-
-    # renormalizes and returns
-    return bob.core.convert(imageEnhance, numpy.uint8, (0, 255),
-        (imageEnhance.min(), imageEnhance.max()))
-
-
-  def __call__(self, image, annotations=None):
-    """
-    Reads the input image, extract the mask of the fingervein, postprocesses.
-    """
-
-    # 1. Pads the input image if any padding should be added
-    image = numpy.pad(image, self.padding_width, 'constant',
-        constant_values = self.padding_constant)
+    if isinstance(data, numpy.ndarray):
+      image = data
+      mask = None
+    else:
+      image, mask = data
 
     ## Finger edges and contour extraction:
     if self.fingercontour == 'leemaskMatlab':
-      mask, edges = self.__leemaskMatlab__(image) #for UTFVP
+      mask = self.__leemaskMatlab__(image) #for UTFVP
     elif self.fingercontour == 'leemaskMod':
-      mask, edges = self.__leemaskMod__(image) #for VERA
+      mask = self.__leemaskMod__(image) #for VERA
     elif self.fingercontour == 'konomask':
-      mask, edges = self.__konomask__(image, sigma=5)
+      mask = self.__konomask__(image, sigma=5)
+    elif self.fingercontour == 'annotation':
+      if mask is None:
+        raise RuntimeError("Cannot use fingercontour=annotation - the " \
+            "current sample being processed does not provide a mask")
+    else:
+      raise RuntimeError("Please choose between leemaskMod, leemaskMatlab, " \
+          "konomask or annotation for parameter 'fingercontour'. %s is not " \
+          "valid" % self.fingercontour)
 
     ## Finger region normalization:
-    image_norm, mask_norm = self.__huangnormalization__(image, mask, edges)
+    image_norm, mask_norm = self.__huangnormalization__(image, mask)
 
     ## veins enhancement:
     if self.postprocessing == 'HE':
       image_norm = self.__HE__(image_norm, mask_norm)
-    elif self.postprocessing == 'HFE':
-      image_norm = self.__HFE__(image_norm)
-    elif self.postprocessing == 'CircGabor':
-      image_norm = self.__circularGabor__(image_norm, 1.12, 5)
 
     ## returns the normalized image and the finger mask
     return image_norm, mask_norm
@@ -580,13 +522,11 @@ class FingerCrop (Preprocessor):
 
     f = bob.io.base.HDF5File(filename, 'w')
     f.set('image', data[0])
-    f.set('finger_mask', data[1])
+    f.set('mask', data[1])
 
 
   def read_data(self, filename):
     '''Overrides the default method implementation to handle our tuple'''
 
     f = bob.io.base.HDF5File(filename, 'r')
-    image = f.read('image')
-    finger_mask = f.read('finger_mask')
-    return (image, finger_mask)
+    return f.read('image'), f.read('mask')
diff --git a/bob/bio/vein/preprocessor/utils.py b/bob/bio/vein/preprocessor/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..84cce9a92bcd05361ed24b1d38af42172602ee66
--- /dev/null
+++ b/bob/bio/vein/preprocessor/utils.py
@@ -0,0 +1,312 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+"""Utilities for preprocessing vein imagery"""
+
+import numpy
+
+
+def assert_points(area, points):
+  """Checks all points fall within the determined shape region, inclusively
+
+  This assertion function, test all points given in ``points`` fall within a
+  certain area provided in ``area``.
+
+
+  Parameters:
+
+    area (tuple): A tuple containing the size of the limiting area where the
+      points should all be in.
+
+    points (numpy.ndarray): A 2D numpy ndarray with any number of rows (points)
+      and 2 columns (representing ``y`` and ``x`` coordinates respectively), or
+      any type convertible to this format. This array contains the points that
+      will be checked for conformity. In case one of the points doesn't fall
+      into the determined area an assertion is raised.
+
+
+  Raises:
+
+    AssertionError: In case one of the input points does not fall within the
+      area defined.
+
+  """
+
+  for k in points:
+    assert 0 <= k[0] < area[0] and 0 <= k[1] < area[1], \
+        "Point (%d, %d) is not inside the region determined by area " \
+        "(%d, %d)" % (k[0], k[1], area[0], area[1])
+
+
+def fix_points(area, points):
+  """Checks/fixes all points so they fall within the determined shape region
+
+  Points which are lying outside the determined area will be brought into the
+  area by moving the offending coordinate to the border of the said area.
+
+
+  Parameters:
+
+    area (tuple): A tuple containing the size of the limiting area where the
+      points should all be in.
+
+    points (numpy.ndarray): A 2D :py:class:`numpy.ndarray` with any number of
+      rows (points) and 2 columns (representing ``y`` and ``x`` coordinates
+      respectively), or any type convertible to this format. This array
+      contains the points that will be checked/fixed for conformity. In case
+      one of the points doesn't fall into the determined area, it is silently
+      corrected so it does.
+
+
+  Returns:
+
+    numpy.ndarray: A **new** array of points with corrected coordinates
+
+  """
+
+  retval = numpy.array(points).copy()
+
+  retval[retval<0] = 0 #floor at 0 for both axes
+  y, x = retval[:,0], retval[:,1]
+  y[y>=area[0]] = area[0] - 1
+  x[x>=area[1]] = area[1] - 1
+
+  return retval
+
+
+def poly_to_mask(shape, points):
+  """Generates a binary mask from a set of 2D points
+
+
+  Parameters:
+
+    shape (tuple): A tuple containing the size of the output mask in height and
+      width, for Bob compatibility ``(y, x)``.
+
+    points (list): A list of tuples containing the polygon points that form a
+      region on the target mask. A line connecting these points will be drawn
+      and all the points in the mask that fall on or within the polygon line,
+      will be set to ``True``. All other points will have a value of ``False``.
+
+
+  Returns:
+
+    numpy.ndarray: A 2D numpy ndarray with ``dtype=bool`` with the mask
+    generated with the determined shape, using the points for the polygon.
+
+  """
+  from PIL import Image, ImageDraw
+
+  # n.b.: PIL images are (x, y), while Bob shapes are represented in (y, x)!
+  mask = Image.new('L', (shape[1], shape[0]))
+
+  # coverts whatever comes in into a list of tuples for PIL
+  fixed = tuple(map(tuple, numpy.roll(fix_points(shape, points), 1, 1)))
+
+  # draws polygon
+  ImageDraw.Draw(mask).polygon(fixed, fill=255)
+
+  return numpy.array(mask, dtype=numpy.bool)
+
+
+def mask_to_image(mask, dtype=numpy.uint8):
+  """Converts a binary (boolean) mask into an integer or floating-point image
+
+  This function converts a boolean binary mask into an image of the desired
+  type by setting the points where ``False`` is set to 0 and points where
+  ``True`` is set to the most adequate value taking into consideration the
+  destination data type ``dtype``. Here are support types and their ranges:
+
+    * numpy.uint8: ``[0, (2^8)-1]``
+    * numpy.uint16: ``[0, (2^16)-1]``
+    * numpy.uint32: ``[0, (2^32)-1]``
+    * numpy.uint64: ``[0, (2^64)-1]``
+    * numpy.float32: ``[0, 1.0]`` (fixed)
+    * numpy.float64: ``[0, 1.0]`` (fixed)
+    * numpy.float128: ``[0, 1.0]`` (fixed)
+
+  All other types are currently unsupported.
+
+
+  Parameters:
+
+    mask (numpy.ndarray): A 2D numpy ndarray with boolean data type, containing
+      the mask that will be converted into an image.
+
+    dtype (numpy.dtype): A valid numpy data-type from the list above for the
+      resulting image
+
+
+  Returns:
+
+    numpy.ndarray: With the designated data type, containing the binary image
+    formed from the mask.
+
+
+  Raises:
+
+    TypeError: If the type is not supported by this function
+
+  """
+
+  dtype = numpy.dtype(dtype)
+  retval = mask.astype(dtype)
+
+  if dtype in (numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64):
+    retval[retval == 1] = numpy.iinfo(dtype).max
+
+  elif dtype in (numpy.float32, numpy.float64, numpy.float128):
+    pass
+
+  else:
+    raise TypeError("Data type %s is unsupported" % dtype)
+
+  return retval
+
+
+def show_image(image):
+  """Shows a single image
+
+  Parameters:
+
+    image (numpy.ndarray): A 2D numpy.ndarray compose of 8-bit unsigned
+      integers containing the original image
+
+  """
+
+  from PIL import Image
+  img = Image.fromarray(image)
+  img.show()
+
+
+def show_mask_over_image(image, mask, color='red'):
+  """Plots the mask over the image of a finger, for debugging purposes
+
+  Parameters:
+
+    image (numpy.ndarray): A 2D numpy.ndarray compose of 8-bit unsigned
+      integers containing the original image
+
+    mask (numpy.ndarray): A 2D numpy.ndarray compose of boolean values
+      containing the calculated mask
+
+  """
+
+  from PIL import Image
+
+  img = Image.fromarray(image).convert(mode='RGBA')
+  msk = Image.fromarray((~mask).astype('uint8')*80)
+  red = Image.new('RGBA', img.size, color=color)
+  img.paste(red, mask=msk)
+  img.show()
+
+
+def jaccard_index(a, b):
+  """Calculates the intersection over union for two masks
+
+  This function calculates the Jaccard index:
+
+  .. math::
+
+     J(A,B) &= \\frac{|A \cap B|}{|A \\cup B|} \\\\
+            &= \\frac{|A \cap B|}{|A|+|B|-|A \\cup B|}
+
+
+  Parameters:
+
+    a (numpy.ndarray): A 2D numpy array with dtype :py:obj:`bool`
+
+    b (numpy.ndarray): A 2D numpy array with dtype :py:obj:`bool`
+
+
+  Returns:
+
+    float: The floating point number that corresponds to the Jaccard index. The
+    float value lies inside the interval :math:`[0, 1]`. If ``a`` and ``b`` are
+    equal, then the similarity is maximum and the value output is ``1.0``. If
+    the areas are exclusive, then the value output by this function is ``0.0``.
+
+  """
+
+  return (a & b).sum().astype(float) / (a | b).sum().astype(float)
+
+
+def intersect_ratio(a, b):
+  """Calculates the intersection ratio between a probe and ground-truth
+
+  This function calculates the intersection ratio between a probe mask
+  (:math:`B`) and a ground-truth mask (:math:`A`; probably generated from an
+  annotation), and returns the ratio of overlap when the probe is compared to
+  the ground-truth data:
+
+  .. math::
+
+     R(A,B) = \\frac{|A \\cap B|}{|A|}
+
+  So, if the probe occupies the entirety of the ground-truth data, then the
+  output of this function is ``1.0``, otherwise, if areas are exclusive, then
+  this function returns ``0.0``. The output of this function should be analyzed
+  against the output of :py:func:`intersect_ratio_of_complement`, which
+  provides the complementary information about the intersection of the areas
+  being analyzed.
+
+
+  Parameters:
+
+    a (numpy.ndarray): A 2D numpy array with dtype :py:obj:`bool`, that
+      corresponds to the **ground-truth object**
+
+    b (numpy.ndarray): A 2D numpy array with dtype :py:obj:`bool`, that
+      corresponds to the probe object that will be compared to the ground-truth
+
+
+  Returns:
+
+    float: The floating point number that corresponds to the overlap ratio. The
+    float value lies inside the interval :math:`[0, 1]`.
+
+  """
+
+  return (a & b).sum().astype(float) / a.sum().astype(float)
+
+
+def intersect_ratio_of_complement(a, b):
+  """Calculates the intersection ratio between a probe and the ground-truth
+  complement
+
+  This function calculates the intersection ratio between a probe mask
+  (:math:`B`) and *the complement* of a ground-truth mask (:math:`A`; probably
+  generated from an annotation), and returns the ratio of overlap when the
+  probe is compared to the ground-truth data:
+
+  .. math::
+
+     R(A,B) = \\frac{|A^c \\cap B|}{|A|} = B \\setminus A
+
+
+  So, if the probe is totally inside the ground-truth data, then the output of
+  this function is ``0.0``, otherwise, if areas are exclusive for example, then
+  this function outputs greater than zero. The output of this function should
+  be analyzed against the output of :py:func:`intersect_ratio`, which provides
+  the complementary information about the intersection of the areas being
+  analyzed.
+
+  Parameters:
+
+    a (numpy.ndarray): A 2D numpy array with dtype :py:obj:`bool`, that
+      corresponds to the **ground-truth object**
+
+    b (numpy.ndarray): A 2D numpy array with dtype :py:obj:`bool`, that
+      corresponds to the probe object that will be compared to the ground-truth
+
+
+  Returns:
+
+    float: The floating point number that corresponds to the overlap ratio
+    between the probe area and the *complement* of the ground-truth area.
+    There are no bounds for the float value on the right side:
+    :math:`[0, +\\infty)`.
+
+  """
+
+  return ((~a) & b).sum().astype(float) / a.sum().astype(float)
diff --git a/bob/bio/vein/preprocessor/utils/__init__.py b/bob/bio/vein/preprocessor/utils/__init__.py
deleted file mode 100644
index 9992cb1d2637003c25066c4930c6b9962eac2333..0000000000000000000000000000000000000000
--- a/bob/bio/vein/preprocessor/utils/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from .utils import ManualRoiCut
-from .utils import ConstructVeinImage
-from .utils import NormalizeImageRotation
-
-# gets sphinx autodoc done right - don't remove it
-__all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/bob/bio/vein/preprocessor/utils/utils.py b/bob/bio/vein/preprocessor/utils/utils.py
deleted file mode 100644
index 00d749c5e433f1a32742d73e2140e70137c866d6..0000000000000000000000000000000000000000
--- a/bob/bio/vein/preprocessor/utils/utils.py
+++ /dev/null
@@ -1,359 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Aug  5 17:12:41 2016
-"""
-
-# import what is needed:
-import numpy as np
-from PIL import Image, ImageDraw, ImageFilter
-import scipy.ndimage
-from scipy.signal import convolve2d
-import scipy.ndimage.filters as fi
-import os
-import six
-
-
-class ManualRoiCut():
-  """
-  Class for manual roi extraction -- ``ManualRoiCut``.
-  
-  Args:
-    annotation (``File``, :py:class:`list`): The name of annotation file, with full path containing 
-      ROI annotation data (``Bob`` format, ``(x, y)``) **or** the list of annotation 
-      points (tuples) in ``Bob`` format -- ``(x, y)``. A *fail-safe* operation is implemented
-      ensuring that the annotation points are inside the image to be annotated.
-    image (``File``, :py:class:`numpy.ndarray`), optional: The name of the image to be annotation -  
-      full path or image data as :py:class:`numpy.ndarray`. Image is an optional parameter
-      because it isn't needed to generate ROI binary mask.
-    sizes (``tuple``): optional - a tuple of image size in ``Bob`` format ``(x,y)``.
-      This parameter is used **if** no image is given to generate binary mask.
-  
-  Returns:
-    A ``uint8`` :py:class:`numpy.ndarray` 2D array (image) containing ROI mask.
-    Value ``1`` determines ROI area, value ``0`` -- outside ROI area. ``uint8``
-    is chosen so that annotations could be used in the ``bob.bio.vein`` platform
-    (there seems to be problems when saving / loading ``bool`` objects).
-    
-  Examples:
-    -  generate ROI mask::
-    
-        from bob.bio.vein.preprocessors.utils import ManualRoiCut
-        roi = ManualRoiCut(roi_annotation_points).roi_mask()
-  
-    - replace image's outside-ROI with value ``pixel_value``::
-    
-        from bob.bio.vein.preprocessors.utils import ManualRoiCut
-        image_cutted = ManualRoiCut(roi_annotation_points, image).roi_image(pixel_value=0)
-  """
-  def __init__(self,annotation, image = None, sizes = (480, 480)):
-    if image is not None:
-      if isinstance(image, six.string_types):
-        if os.path.exists(image):
-          image = Image.open(image)
-          self.image = np.array(image)
-        else:
-          raise IOError("Doesn't exist file: {}".format(annotation))
-          return 1
-      else:
-        self.image = np.array(image)
-      self.size_y = self.image.shape[0]
-      self.size_x = self.image.shape[1]
-    else:
-      self.image = None
-      self.size_y = sizes[1]
-      self.size_x = sizes[0]
-    if isinstance(annotation, six.string_types):
-      if os.path.exists(annotation):
-        with open(annotation,'r') as f:
-          retval = np.loadtxt(f, ndmin=2)
-        self.annotation = list([tuple([self.__test_size__(k[1],self.size_y), self.__test_size__(k[0],self.size_x)]) for k in retval])
-      else:
-        raise IOError("Doesn' t exist file: {}".format(annotation))
-        return 1
-    else :
-      # Convert from Bob format(x,y) to regular (y, x)
-      self.annotation = list([tuple([self.__test_size__(k[1],self.size_y), self.__test_size__(k[0],self.size_x)]) for k in annotation])
-
-
-  def __test_size__(self, test_value, max_value):
-    if test_value >= 0 and test_value < max_value:
-      return test_value
-    elif test_value >= 0 and test_value < 60000:
-      return max_value
-    else:
-      return 0
-
-
-  def roi_mask(self):
-    """Method ``roi_mask`` - generates ROI mask.
-    
-    Returns:
-      A ``uint8`` :py:class:`numpy.ndarray` 2D array (image)
-      containing ROI mask. Value ``1`` determines ROI area, ``0`` -- outside
-      ROI area.
-    """
-    mask = Image.new('L', (self.size_x, self.size_y), 0)
-    ImageDraw.Draw(mask).polygon(self.annotation, outline=1, fill=1)
-    mask = np.array(mask, dtype = np.uint8)
-    mask = 0 < mask
-    return mask
-
-
-  def roi_image(self, pixel_value = 0):
-    """Method roi_image - replaces outside ROI pixel values with ``pixel_value``
-    (default - 0).
-    
-    Args:
-      pixel_value (``integer``): if given, outside-ROI region is replaced with this 
-        value. By default replaced with 0.
-      
-    Returns:
-      A copy of image that class was initialized with, outside ROI pixel
-      values are replaced with ``pixel_value``.
-    """
-    if self.image is not None:
-        mask = self.roi_mask()
-        self.image[mask == 0] = pixel_value
-        return self.image
-    else:
-        raise IOError("No input image given, can't perform non-ROI region removal")
-        return 1
-
-
-def ConstructVeinImage(annotation_dictionary, center = False):
-  """
-  Constructs a binary image from manual annotations. The class is made to be used with
-  the ``bob.db.biowave_v1`` database.
-  
-  The returned 2D array (see ``return value``, below) corresponds to a person's
-  vein pattern, marked by human-expert.
-  
-  Args:
-    annotation_dictionary (:py:class:`dict`): Dictionary containing image and annotation data.
-      Such :py:class:`dict` can be returned by the high level ``bob.db.biowave_v1`` 
-      implementation of the ``bob.db.biowave_v1`` database. It is supposed to contain
-      fields (as can be returned by the ``bob.db.biowave_v1`` high level implementation):
-
-      - ``image``
-      - ``roi_annotations``
-      - ``vein_annotations``
-      
-      Although only the ``image.shape[0]``, ``image.shape[1]`` and variable 
-      ``vein_annotations`` are used.
-    center (:py:class:`bool`): Flag, if set to ``True``, annotations are centered.
-  
-  Returns:
-    :py:class:`numpy.ndarray` : A 2D array with ``uint8`` values - value ``1``
-    represents annotated vein object. The output image is constructed using
-    annotation information - points.
-    Each line's points are connected and 5 pixels wide line is drawn. After 
-    all lines are drawn, lines are smoothed using Median filter with 
-    size 5x5 pixels.
-    
-  Examples:
-    Example to import the utils and run the function::
-        
-        from bob.bio.vein.preprocessors.utils import ConstructVeinImage
-        vein_image = ConstructVeinImage(annotation_dictionary, center = True)
-  """
-  image            = annotation_dictionary["image"]
-  #roi_annotations  = annotation_dictionary["roi_annotations"]
-  vein_annotations = annotation_dictionary["vein_annotations"]
-
-  im = Image.new('L', (image.shape[0], image.shape[1]), (0)) 
-  draw = ImageDraw.Draw(im)
-  if center == True:
-    xes_all = [point[1] for line in vein_annotations for point in line]
-    yes_all = [point[0] for line in vein_annotations for point in line]
-    for line in vein_annotations:
-      xes = [point[1] - np.round(np.mean(xes_all)) + 239 for point in line]
-      yes = [point[0] - np.round(np.mean(yes_all)) + 239 for point in line]
-      for point in range(len(line) - 1):
-        draw.line((xes[point],yes[point], xes[point+1], yes[point+1]), fill=1, width = 5)
-  else:
-    for line in vein_annotations:
-      xes = [point[1] for point in line]
-      yes = [point[0] for point in line]
-      for point in range(len(line) - 1):
-        draw.line((xes[point],yes[point], xes[point+1], yes[point+1]), fill=1, width = 5)
-  im = im.filter(ImageFilter.MedianFilter(5))
-  im = np.array(im, dtype = np.uint8)
-  return im
-
-
-# help functions for the ``NormalizeImageRotation`` function
-def __rotate_point__(x,y, angle):
-  """
-  [xp, yp] = __rotate_point__(x,y, angle)
-  """
-  if type(x) is list:
-    if len(x) != len(y):
-      raise IOError("Length of x and y should be equal")
-    xp = []
-    yp = []
-    for nr in range(len(x)):
-      xp.append(x[nr] * np.cos(np.radians(angle)) - y[nr] * np.sin(np.radians(angle)))
-      yp.append(y[nr] * np.cos(np.radians(angle)) + x[nr] * np.sin(np.radians(angle)))
-  else:
-    xp = x * np.cos(np.radians(angle)) - y * np.sin(np.radians(angle))
-    yp = y * np.cos(np.radians(angle)) + x * np.sin(np.radians(angle))
-  
-  return int(np.round(xp)), int(np.round(yp))
-
-
-def __guss_mask__(guss_size=27, sigma=6):
-    """Returns a 2D Gaussian kernel array."""
-    inp = np.zeros((guss_size, guss_size))
-    inp[guss_size//2, guss_size//2] = 1
-    return fi.gaussian_filter(inp, sigma)
-
-
-def __ramp__(a):
-  a = np.array(a)
-  a[a < 0]=0 
-  return a
-
-
-def __vein_filter__(image, a = 3, b = 4, sigma = 4, guss_size = 15, only_lines = True, dark_lines = True):
-  """
-  Vein filter
-  """
-  if dark_lines == True:
-    Z = 1
-  else:
-    Z = -1
-  
-  if type(image) != np.ndarray:
-    image = np.array(image, dtype = np.float)
-  
-  padsize = 2*a+b
-  gaussian_mask = __guss_mask__(guss_size, sigma)
-  
-  f2 = np.lib.pad(image, ((padsize, padsize), (padsize, padsize)), 'edge')
-  f2 = convolve2d(f2, gaussian_mask, mode='same')
-  
-  result = np.zeros(image.shape)
-  
-  for angle in np.arange(0,179,11.25 / 2):
-    [ap, bp] = __rotate_point__(-b,-2*a, angle)
-    mask_1 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
-    
-    [ap, bp] = __rotate_point__(-b,-1*a, angle)
-    mask_2 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
-    
-    [ap, bp] = __rotate_point__(-b,   0, angle)
-    mask_3 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
-    
-    [ap, bp] = __rotate_point__(-b, 1*a, angle)
-    mask_4 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
-    
-    [ap, bp] = __rotate_point__(-b, 2*a, angle)
-    mask_5 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
-    
-    [ap, bp] = __rotate_point__(+b,-2*a, angle)
-    mask_6 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp] 
-    
-    [ap, bp] = __rotate_point__(+b,-1*a, angle)
-    mask_7 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
-    
-    [ap, bp] = __rotate_point__(+b,   0, angle)
-    mask_8 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
-    
-    [ap, bp] = __rotate_point__(+b, 1*a, angle)
-    mask_9 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
-
-    [ap, bp] = __rotate_point__(+b, 2*a, angle)
-    mask_10 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
-    
-    amplitude_rez = __ramp__(Z*(mask_1+mask_5+mask_6+mask_10)*3 \
-                     -Z*(mask_2+mask_3+mask_4+mask_7+mask_8+mask_9)*2)
-                     
-    if only_lines == True:
-      col = np.zeros((6,image.shape[0], image.shape[1]))
-      col[0] = np.minimum(__ramp__(-Z*mask_2+Z*mask_1),__ramp__(-Z*mask_2+Z*mask_5))
-      col[1] = np.minimum(__ramp__(-Z*mask_3+Z*mask_1),__ramp__(-Z*mask_3+Z*mask_5))
-      col[2] = np.minimum(__ramp__(-Z*mask_4+Z*mask_1),__ramp__(-Z*mask_4+Z*mask_5))
-      col[3] = np.minimum(__ramp__(-Z*mask_7+Z*mask_6),__ramp__(-Z*mask_7+Z*mask_10))
-      col[4] = np.minimum(__ramp__(-Z*mask_8+Z*mask_6),__ramp__(-Z*mask_8+Z*mask_10))
-      col[5] = np.minimum(__ramp__(-Z*mask_9+Z*mask_6),__ramp__(-Z*mask_9+Z*mask_10))
-      angle_rez = np.min(col, axis = 0)
-      amplitude_rez[angle_rez==0] = 0
-      
-    result = result + amplitude_rez*np.exp(1j*2*(angle - 90)*np.pi/180)
-    
-  result = np.abs(result) * np.exp(1j*np.angle(result)/2)
-  return result
-
-
-def __get_rotatation_angle__(image, dark_lines = False):
-  """
-  angle = get_rotatation_angle(image)
-  
-  Returns the rotation angle in deg.
-  """
-  result = __vein_filter__(image, a = 4, b = 1, sigma = 2, guss_size = 15, only_lines = True, dark_lines = False)
-  result_nonzero = result[np.abs(result) > np.abs(result).max() / 2]
-  result_angle = np.angle(result_nonzero, deg=True)
-  angle = result_angle.mean()
-  return angle
-
-
-def __rotate_image__(image, angle):
-  """
-  image = rotate_image(image, angle)
-  """
-  image = scipy.ndimage.rotate(image, angle, reshape = False, cval=0)
-  image[image > 255] = 255
-  image[image < 0]   = 0
-  return image
-
-
-def __align_image__(image, precision = 0.5, iterations = 25, dark_lines = False):
-  """
-  [image, rotation_angle, angle_error] = align_image(image, precision = 0.5, iterations = 25)
-  """
-  rotation_angle = 0
-  angle_error = __get_rotatation_angle__(image, dark_lines)
-  if abs(angle_error) <= precision:
-    return image, rotation_angle, angle_error
-  for k in range(iterations):
-    rotation_angle = rotation_angle + (angle_error * 0.33)
-    image = __rotate_image__(image, angle_error * 0.33)
-    angle_error = __get_rotatation_angle__(image, dark_lines)
-    #print(rotation_angle)
-    if abs(angle_error) <= precision or k == iterations - 1:
-      return image, rotation_angle, angle_error
-
-
-def NormalizeImageRotation(image, dark_lines = False):
-  """
-  function ``NormalizeImageRotation`` - automatically rotates image by a self-defined angle.
-  
-  So far tested only with annotations (binary images). Algorithm iteratively
-  searches for rotation angle such that when image is filtered with the 
-  ``vein filter`` (As published in the BIOSIG 2015), the ``mean`` filtered 
-  image's vector angle (for the pixels in filtered image with a magnitude at least 1/2 of the 
-  maximal value of the filtered image) is ``+/- 0.5`` [deg].
-  
-  Args:
-    image (:py:class:`numpy.ndarray`) : A 2D array containing input image. 
-      Currently tested only with binary images.
-    dark_lines (:py:class:`bool`) : A flag (default value - ``False``)
-      that determines what kind of lines algorithm is going to search for to 
-      align the image. With default value ``False`` it will search for *whiter than
-      background* lines (as is the case with annotations). If set 
-      to ``True`` -- will search for *darker than background* lines 
-      (as is the case with vein images).
-      
-  Returns:
-    :py:class:`numpy.ndarray` : A 2D array with rotated input image
-  
-  Examples:
-    Example to import the utils and use the function::
-        
-        from bob.bio.vein.preprocessors.utils import NormalizeImageRotation
-        image = NormalizeImageRotation(image, dark_lines = False)
-  """
-  [rotated_image, rotation_angle, angle_error] = __align_image__(image = image, dark_lines = dark_lines)
-  rotated_image = np.array(rotated_image, dtype = image.dtype)
-  return rotated_image
diff --git a/bob/bio/vein/tests/preprocessors/ConstructAnnotations.npy b/bob/bio/vein/tests/preprocessors/ConstructAnnotations.npy
deleted file mode 100644
index 33c4f26d2a7bb119f5bea4750e7daa67d3759744..0000000000000000000000000000000000000000
Binary files a/bob/bio/vein/tests/preprocessors/ConstructAnnotations.npy and /dev/null differ
diff --git a/bob/bio/vein/tests/preprocessors/ConstructAnnotations.png b/bob/bio/vein/tests/preprocessors/ConstructAnnotations.png
deleted file mode 100644
index 3c87424906f75834b90c9158454101dc27bed444..0000000000000000000000000000000000000000
Binary files a/bob/bio/vein/tests/preprocessors/ConstructAnnotations.png and /dev/null differ
diff --git a/bob/bio/vein/tests/preprocessors/ConstructAnnotations.txt b/bob/bio/vein/tests/preprocessors/ConstructAnnotations.txt
deleted file mode 100644
index 83beedf262d2f40b46b17c4b0d248f33574954c6..0000000000000000000000000000000000000000
--- a/bob/bio/vein/tests/preprocessors/ConstructAnnotations.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-11 91
-8 322
-76 320
-114 307
-140 300
-176 292
-225 292
-269 288
-330 287
-405 288
-436 290
-456 288
-468 276
-473 242
-472 208
-470 184
-466 146
-455 116
-440 93
-424 77
-397 69
-358 64
-298 60
-247 52
-201 38
-160 25
-130 7
-106 7
-81 16
-46 46
-22 71
diff --git a/bob/bio/vein/tests/test.py b/bob/bio/vein/tests/test.py
index e55d10537c97622c6c5006856ccb2830e3534187..ab9b76f9c6c3adddcba10568226bf8a4bd242766 100644
--- a/bob/bio/vein/tests/test.py
+++ b/bob/bio/vein/tests/test.py
@@ -23,6 +23,8 @@ import bob.io.base
 import bob.io.matlab
 import bob.io.image
 
+from ..preprocessor import utils as preprocessor_utils
+
 
 def F(parts):
   """Returns the test file path"""
@@ -30,42 +32,6 @@ def F(parts):
   return pkg_resources.resource_filename(__name__, os.path.join(*parts))
 
 
-def _show_image(image):
-  """Shows a single image
-
-  Parameters:
-
-    image (numpy.ndarray): A 2D numpy.ndarray compose of 8-bit unsigned
-      integers containing the original image
-
-  """
-
-  from PIL import Image
-  img = Image.fromarray(image)
-  img.show()
-
-
-def _show_mask_over_image(image, mask, color='red'):
-  """Plots the mask over the image of a finger, for debugging purposes
-
-  Parameters:
-
-    image (numpy.ndarray): A 2D numpy.ndarray compose of 8-bit unsigned
-      integers containing the original image
-
-    mask (numpy.ndarray): A 2D numpy.ndarray compose of boolean values
-      containing the calculated mask
-
-  """
-
-  from PIL import Image
-  img = Image.fromarray(image).convert(mode='RGBA')
-  msk = Image.fromarray((~mask).astype('uint8')*80)
-  red = Image.new('RGBA', img.size, color=color)
-  img.paste(red, mask=msk)
-  img.show()
-
-
 def test_finger_crop():
 
   input_filename = F(('preprocessors', '0019_3_1_120509-160517.png'))
@@ -80,7 +46,7 @@ def test_finger_crop():
   preprocess = FingerCrop(fingercontour='leemaskMatlab', padding_width=0)
 
   preproc, mask = preprocess(img)
-  #_show_mask_over_image(preproc, mask)
+  #preprocessor_utils.show_mask_over_image(preproc, mask)
 
   mask_ref = bob.io.base.load(output_fvr_filename).astype('bool')
   preproc_ref = bob.core.convert(bob.io.base.load(output_img_filename),
@@ -88,12 +54,12 @@ def test_finger_crop():
 
   assert numpy.mean(numpy.abs(mask - mask_ref)) < 1e-2
 
-  # Very loose comparison!
-  #_show_image(numpy.abs(preproc.astype('int16') - preproc_ref.astype('int16')).astype('uint8'))
+ # Very loose comparison!
+  #preprocessor_utils.show_image(numpy.abs(preproc.astype('int16') - preproc_ref.astype('int16')).astype('uint8'))
   assert numpy.mean(numpy.abs(preproc - preproc_ref)) < 1.3e2
 
 
-def test_miuramax():
+def test_max_curvature():
 
   #Maximum Curvature method against Matlab reference
 
@@ -118,7 +84,7 @@ def test_miuramax():
   assert numpy.mean(numpy.abs(output_img - output_img_ref)) < 8e-3
 
 
-def test_miurarlt():
+def test_repeated_line_tracking():
 
   #Repeated Line Tracking method against Matlab reference
 
@@ -143,7 +109,7 @@ def test_miurarlt():
   assert numpy.mean(numpy.abs(output_img - output_img_ref)) < 0.5
 
 
-def test_huangwl():
+def test_wide_line_detector():
 
   #Wide Line Detector method against Matlab reference
 
@@ -187,67 +153,328 @@ def test_miura_match():
 
   score_imp = MM.score(template_vein, probe_imp_vein)
   assert numpy.isclose(score_imp, 0.172906739278421)
-  
-def test_manualRoiCut():
-    """
-    Test ManualRoitCut
-    """
-    from bob.bio.vein.preprocessor.utils import ManualRoiCut
-    image_path      = F(('preprocessors', '0019_3_1_120509-160517.png'))
-    annotation_path  = F(('preprocessors', '0019_3_1_120509-160517.txt'))
-
-    c = ManualRoiCut(annotation_path, image_path)
-    mask_1 = c.roi_mask()
-    image_1 = c.roi_image()
-    # create mask using size:
-    c = ManualRoiCut(annotation_path, sizes=(672,380))
-    mask_2 = c.roi_mask()
-    
-    # loading image:
-    image = bob.io.base.load(image_path)
-    c = ManualRoiCut(annotation_path, image)
-    mask_3 = c.roi_mask()
-    image_3 = c.roi_image()
-    # load text file:
-    with open(annotation_path,'r') as f:
-        retval = numpy.loadtxt(f, ndmin=2)
-        
-    # carefully -- this is BOB format --- (x,y)
-    annotation = list([tuple([k[0], k[1]]) for k in retval])
-    c = ManualRoiCut(annotation, image)
-    mask_4 = c.roi_mask()
-    image_4 = c.roi_image()
-    
-    assert (mask_1 == mask_2).all()
-    assert (mask_1 == mask_3).all()
-    assert (mask_1 == mask_4).all()
-    assert (image_1 == image_3).all()
-    assert (image_1 == image_4).all()
-    
-def test_ConstructAnnotations():
-  """
-  Test ConstructAnnotations preprocessor
-  """
-  image_filename = F( ( 'preprocessors', 'ConstructAnnotations.png' ) )
-  roi_annotations_filename = F( ( 'preprocessors', 'ConstructAnnotations.txt' ) )
-  vein_annotations_filename = F( ( 'preprocessors', 'ConstructAnnotations.npy' ) )
-
-  image = bob.io.base.load( image_filename )
-  roi_annotations = np.loadtxt(roi_annotations_filename, dtype='uint16')
-  roi_annotations =  [tuple([point[0], point[1]]) for point in roi_annotations]
-  fp = open(vein_annotations_filename, 'rb')
-  vein_annotations = np.load(fp)
-  vein_annotations = vein_annotations['arr_0'].tolist()
-  fp.close()
-  vein_annotations = [[tuple([point[0], point[1]]) for point in line] for line in vein_annotations]
-  
-  annotation_dictionary = {"image" : image, "roi_annotations" : roi_annotations, "vein_annotations" : vein_annotations}
-  from bob.bio.vein.preprocessor.utils import ConstructVeinImage
-  from bob.bio.vein.preprocessor.utils import NormalizeImageRotation
-  output = ConstructVeinImage(annotation_dictionary, center = True)
-  output = NormalizeImageRotation(output, dark_lines = False)
-  assert np.array_equal(output, image)
-  
-  
-  
-  
+
+
+def test_assert_points():
+
+  # Tests that point assertion works as expected
+  area = (10, 5)
+  inside = [(0,0), (3,2), (9, 4)]
+  preprocessor_utils.assert_points(area, inside) #should not raise
+
+  def _check_outside(point):
+    # should raise, otherwise it is an error
+    try:
+      preprocessor_utils.assert_points(area, [point])
+    except AssertionError as e:
+      assert str(point) in str(e)
+    else:
+      raise AssertionError("Did not assert %s is outside of %s" % (point, area))
+
+  outside = [(-1, 0), (10, 0), (0, 5), (10, 5), (15,12)]
+  for k in outside: _check_outside(k)
+
+
+def test_fix_points():
+
+  # Tests that point clipping works as expected
+  area = (10, 5)
+  inside = [(0,0), (3,2), (9, 4)]
+  fixed = preprocessor_utils.fix_points(area, inside)
+  assert numpy.array_equal(inside, fixed), '%r != %r' % (inside, fixed)
+
+  fixed = preprocessor_utils.fix_points(area, [(-1, 0)])
+  assert numpy.array_equal(fixed, [(0, 0)])
+
+  fixed = preprocessor_utils.fix_points(area, [(10, 0)])
+  assert numpy.array_equal(fixed, [(9, 0)])
+
+  fixed = preprocessor_utils.fix_points(area, [(0, 5)])
+  assert numpy.array_equal(fixed, [(0, 4)])
+
+  fixed = preprocessor_utils.fix_points(area, [(10, 5)])
+  assert numpy.array_equal(fixed, [(9, 4)])
+
+  fixed = preprocessor_utils.fix_points(area, [(15, 12)])
+  assert numpy.array_equal(fixed, [(9, 4)])
+
+
+def test_poly_to_mask():
+
+  # Tests we can generate a mask out of a polygon correctly
+  area = (10, 9) #10 rows, 9 columns
+  polygon = [(2, 2), (2, 7), (7, 7), (7, 2)] #square shape, (y, x) format
+  mask = preprocessor_utils.poly_to_mask(area, polygon)
+  nose.tools.eq_(mask.dtype, numpy.bool)
+
+  # This should be the output:
+  expected = numpy.array([
+      [False, False, False, False, False, False, False, False, False],
+      [False, False, False, False, False, False, False, False, False],
+      [False, False, True,  True,  True,  True,  True,  True,  False],
+      [False, False, True,  True,  True,  True,  True,  True,  False],
+      [False, False, True,  True,  True,  True,  True,  True,  False],
+      [False, False, True,  True,  True,  True,  True,  True,  False],
+      [False, False, True,  True,  True,  True,  True,  True,  False],
+      [False, False, True,  True,  True,  True,  True,  True,  False],
+      [False, False, False, False, False, False, False, False, False],
+      [False, False, False, False, False, False, False, False, False],
+      ])
+  assert numpy.array_equal(mask, expected)
+
+  polygon = [(3, 2), (5, 7), (8, 7), (7, 3)] #trapezoid, (y, x) format
+  mask = preprocessor_utils.poly_to_mask(area, polygon)
+  nose.tools.eq_(mask.dtype, numpy.bool)
+
+  # This should be the output:
+  expected = numpy.array([
+      [False, False, False, False, False, False, False, False, False],
+      [False, False, False, False, False, False, False, False, False],
+      [False, False, False, False, False, False, False, False, False],
+      [False, False, True,  False, False, False, False, False, False],
+      [False, False, True,  True,  True,  False, False, False, False],
+      [False, False, False, True,  True,  True,  True,  True,  False],
+      [False, False, False, True,  True,  True,  True,  True,  False],
+      [False, False, False, True,  True,  True,  True,  True,  False],
+      [False, False, False, False, False, False, False, True,  False],
+      [False, False, False, False, False, False, False, False, False],
+      ])
+  assert numpy.array_equal(mask, expected)
+
+
+def test_mask_to_image():
+
+  # Tests we can correctly convert a boolean array into an image
+  # that makes sense according to the data types
+  sample = numpy.array([False, True])
+  nose.tools.eq_(sample.dtype, numpy.bool)
+
+  def _check_uint(n):
+    conv = preprocessor_utils.mask_to_image(sample, 'uint%d' % n)
+    nose.tools.eq_(conv.dtype, getattr(numpy, 'uint%d' % n))
+    target = [0, (2**n)-1]
+    assert numpy.array_equal(conv, target), '%r != %r' % (conv, target)
+
+  _check_uint(8)
+  _check_uint(16)
+  _check_uint(32)
+  _check_uint(64)
+
+  def _check_float(n):
+    conv = preprocessor_utils.mask_to_image(sample, 'float%d' % n)
+    nose.tools.eq_(conv.dtype, getattr(numpy, 'float%d' % n))
+    assert numpy.array_equal(conv, [0, 1.0]), '%r != %r' % (conv, target)
+
+  _check_float(32)
+  _check_float(64)
+  _check_float(128)
+
+
+  # This should be unsupported
+  try:
+    conv = preprocessor_utils.mask_to_image(sample, 'int16')
+  except TypeError as e:
+    assert 'int16' in str(e)
+  else:
+    raise AssertionError('Conversion to int16 did not trigger a TypeError')
+
+
+def test_jaccard_index():
+
+  # Tests to verify the Jaccard index calculation is accurate
+  a = numpy.array([
+    [False, False],
+    [True, True],
+    ])
+
+  b = numpy.array([
+    [True, True],
+    [True, False],
+    ])
+
+  nose.tools.eq_(preprocessor_utils.jaccard_index(a, b), 1.0/4.0)
+  nose.tools.eq_(preprocessor_utils.jaccard_index(a, a), 1.0)
+  nose.tools.eq_(preprocessor_utils.jaccard_index(b, b), 1.0)
+  nose.tools.eq_(preprocessor_utils.jaccard_index(a, numpy.ones(a.shape, dtype=bool)), 2.0/4.0)
+  nose.tools.eq_(preprocessor_utils.jaccard_index(a, numpy.zeros(a.shape, dtype=bool)), 0.0)
+  nose.tools.eq_(preprocessor_utils.jaccard_index(b, numpy.ones(b.shape, dtype=bool)), 3.0/4.0)
+  nose.tools.eq_(preprocessor_utils.jaccard_index(b, numpy.zeros(b.shape, dtype=bool)), 0.0)
+
+
+def test_intersection_ratio():
+
+  # Tests to verify the intersection ratio calculation is accurate
+  a = numpy.array([
+    [False, False],
+    [True, True],
+    ])
+
+  b = numpy.array([
+    [True, False],
+    [True, False],
+    ])
+
+  nose.tools.eq_(preprocessor_utils.intersect_ratio(a, b), 1.0/2.0)
+  nose.tools.eq_(preprocessor_utils.intersect_ratio(a, a), 1.0)
+  nose.tools.eq_(preprocessor_utils.intersect_ratio(b, b), 1.0)
+  nose.tools.eq_(preprocessor_utils.intersect_ratio(a, numpy.ones(a.shape, dtype=bool)), 1.0)
+  nose.tools.eq_(preprocessor_utils.intersect_ratio(a, numpy.zeros(a.shape, dtype=bool)), 0)
+  nose.tools.eq_(preprocessor_utils.intersect_ratio(b, numpy.ones(b.shape, dtype=bool)), 1.0)
+  nose.tools.eq_(preprocessor_utils.intersect_ratio(b, numpy.zeros(b.shape, dtype=bool)), 0)
+
+  nose.tools.eq_(preprocessor_utils.intersect_ratio_of_complement(a, b), 1.0/2.0)
+  nose.tools.eq_(preprocessor_utils.intersect_ratio_of_complement(a, a), 0.0)
+  nose.tools.eq_(preprocessor_utils.intersect_ratio_of_complement(b, b), 0.0)
+  nose.tools.eq_(preprocessor_utils.intersect_ratio_of_complement(a, numpy.ones(a.shape, dtype=bool)), 1.0)
+  nose.tools.eq_(preprocessor_utils.intersect_ratio_of_complement(a, numpy.zeros(a.shape, dtype=bool)), 0)
+  nose.tools.eq_(preprocessor_utils.intersect_ratio_of_complement(b, numpy.ones(b.shape, dtype=bool)), 1.0)
+  nose.tools.eq_(preprocessor_utils.intersect_ratio_of_complement(b, numpy.zeros(b.shape, dtype=bool)), 0)
+
+
+def test_correlation():
+
+  # A test for convolution performance. Correlations are used on the Miura
+  # Match algorithm, therefore we want to make sure we can perform them as fast
+  # as possible.
+  import numpy
+  import scipy.signal
+  import bob.sp
+
+  # Rough example from Vera fingervein database
+  Y = 250
+  X = 600
+  CH = 80
+  CW = 90
+
+  def gen_ab():
+    a = numpy.random.randint(256, size=(Y, X)).astype(float)
+    b = numpy.random.randint(256, size=(Y-CH, X-CW)).astype(float)
+    return a, b
+
+
+  def bob_function(a, b):
+
+    # rotate input image by 180 degrees
+    b = numpy.rot90(b, k=2)
+
+    # Determine padding size in x and y dimension
+    size_a  = numpy.array(a.shape)
+    size_b  = numpy.array(b.shape)
+    outsize = size_a + size_b - 1
+
+    # Determine 2D cross correlation in Fourier domain
+    a2 = numpy.zeros(outsize)
+    a2[0:size_a[0],0:size_a[1]] = a
+    Fa = bob.sp.fft(a2.astype(numpy.complex128))
+
+    b2 = numpy.zeros(outsize)
+    b2[0:size_b[0],0:size_b[1]] = b
+    Fb = bob.sp.fft(b2.astype(numpy.complex128))
+
+    conv_ab = numpy.real(bob.sp.ifft(Fa*Fb))
+
+    h, w = size_a - size_b + 1
+
+    Nm = conv_ab[size_b[0]-1:size_b[0]-1+h, size_b[1]-1:size_b[1]-1+w]
+
+    t0, s0 = numpy.unravel_index(Nm.argmax(), Nm.shape)
+
+    # this is our output
+    Nmm = Nm[t0,s0]
+
+    # normalizes the output by the number of pixels lit on the input
+    # matrices, taking into consideration the surface that produced the
+    # result (i.e., the eroded model and part of the probe)
+    h, w = b.shape
+    return Nmm/(sum(sum(b)) + sum(sum(a[t0:t0+h-2*CH, s0:s0+w-2*CW])))
+
+
+  def scipy_function(a, b):
+    b = numpy.rot90(b, k=2)
+
+    Nm = scipy.signal.convolve2d(a, b, 'valid')
+
+    # figures out where the maximum is on the resulting matrix
+    t0, s0 = numpy.unravel_index(Nm.argmax(), Nm.shape)
+
+    # this is our output
+    Nmm = Nm[t0,s0]
+
+    # normalizes the output by the number of pixels lit on the input
+    # matrices, taking into consideration the surface that produced the
+    # result (i.e., the eroded model and part of the probe)
+    h, w = b.shape
+    return Nmm/(sum(sum(b)) + sum(sum(a[t0:t0+h-2*CH, s0:s0+w-2*CW])))
+
+
+  def scipy2_function(a, b):
+    b = numpy.rot90(b, k=2)
+    Nm = scipy.signal.fftconvolve(a, b, 'valid')
+
+    # figures out where the maximum is on the resulting matrix
+    t0, s0 = numpy.unravel_index(Nm.argmax(), Nm.shape)
+
+    # this is our output
+    Nmm = Nm[t0,s0]
+
+    # normalizes the output by the number of pixels lit on the input
+    # matrices, taking into consideration the surface that produced the
+    # result (i.e., the eroded model and part of the probe)
+    h, w = b.shape
+    return Nmm/(sum(sum(b)) + sum(sum(a[t0:t0+h-2*CH, s0:s0+w-2*CW])))
+
+
+  def scipy3_function(a, b):
+    Nm = scipy.signal.correlate2d(a, b, 'valid')
+
+    # figures out where the maximum is on the resulting matrix
+    t0, s0 = numpy.unravel_index(Nm.argmax(), Nm.shape)
+
+    # this is our output
+    Nmm = Nm[t0,s0]
+
+    # normalizes the output by the number of pixels lit on the input
+    # matrices, taking into consideration the surface that produced the
+    # result (i.e., the eroded model and part of the probe)
+    h, w = b.shape
+    return Nmm/(sum(sum(b)) + sum(sum(a[t0:t0+h-2*CH, s0:s0+w-2*CW])))
+
+  a, b = gen_ab()
+
+  assert numpy.allclose(bob_function(a, b), scipy_function(a, b))
+  assert numpy.allclose(scipy_function(a, b), scipy2_function(a, b))
+  assert numpy.allclose(scipy2_function(a, b), scipy3_function(a, b))
+
+  # if you want to test timings, uncomment the following section
+  '''
+  import time
+
+  start = time.clock()
+  N = 10
+  for i in range(N):
+    a, b = gen_ab()
+    bob_function(a, b)
+  total = time.clock() - start
+  print('bob implementation, %d iterations - %.2e per iteration' % (N, total/N))
+
+  start = time.clock()
+  for i in range(N):
+    a, b = gen_ab()
+    scipy_function(a, b)
+  total = time.clock() - start
+  print('scipy+convolve, %d iterations - %.2e per iteration' % (N, total/N))
+
+  start = time.clock()
+  for i in range(N):
+    a, b = gen_ab()
+    scipy2_function(a, b)
+  total = time.clock() - start
+  print('scipy+fftconvolve, %d iterations - %.2e per iteration' % (N, total/N))
+
+  start = time.clock()
+  for i in range(N):
+    a, b = gen_ab()
+    scipy3_function(a, b)
+  total = time.clock() - start
+  print('scipy+correlate2d, %d iterations - %.2e per iteration' % (N, total/N))
+  '''
diff --git a/doc/api.rst b/doc/api.rst
index 69648f71996570fd8df3468082a962348322d57a..80510bde729cb75e3bd8771f9cb6fc88a6d8b10d 100644
--- a/doc/api.rst
+++ b/doc/api.rst
@@ -15,7 +15,15 @@ which can be used for vein experiments.
 Database Interfaces
 -------------------
 
-.. automodule:: bob.bio.vein.database
+Vera Fingervein Database
+========================
+
+.. automodule:: bob.bio.vein.database.verafinger
+
+UTFVP Database
+==============
+
+.. automodule:: bob.bio.vein.database.utfvp
 
 
 Pre-processors
diff --git a/doc/baselines.rst b/doc/baselines.rst
index 35a55a1f276cbf09678319999ff698d57a6460be..b9ce77a1dad621a4d7cb50314d532e32ebbef0a8 100644
--- a/doc/baselines.rst
+++ b/doc/baselines.rst
@@ -107,10 +107,10 @@ protocol, do the following:
    submit your job for SGE execution, you can run it in parallel (using 4
    parallel tasks) by adding the options ``--parallel=4 --nice=10``.
 
-   Optionally, you may use the ``parallel` resource configuration which already
-   sets the number of parallel jobs to the number of hardware cores you have
-   installed on your machine (as with :py:func:`multiprocessing.cpu_count`) and
-   sets ``nice=10``. For example:
+   Optionally, you may use the ``parallel`` resource configuration which
+   already sets the number of parallel jobs to the number of hardware cores you
+   have installed on your machine (as with
+   :py:func:`multiprocessing.cpu_count`) and sets ``nice=10``. For example:
 
    .. code-block:: sh
 
diff --git a/doc/conf.py b/doc/conf.py
index 0bfbf6f46b3e36506391f3b0f660e010cc27f533..79293bccd89fe0646494daad2f0999b2798e983a 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -243,6 +243,11 @@ if os.path.exists(sphinx_requirements):
 else:
     intersphinx_mapping = link_documentation()
 
+# Add scikit-image link
+skimage_version = pkg_resources.require('scikit-image')[0].version
+skimage_version = '.'.join(skimage_version.split('.')[:2])
+intersphinx_mapping['http://scikit-image.org/docs/%s.x' % skimage_version] = \
+    None
 
 # We want to remove all private (i.e. _. or __.__) members
 # that are not in the list of accepted functions
diff --git a/doc/nitpick-exceptions.txt b/doc/nitpick-exceptions.txt
index 915108c8a1e41e17b4fbf7b332a0d4791ee2002b..635fc7d6aa4135600d0e928d58e3934ef7b44f3b 100644
--- a/doc/nitpick-exceptions.txt
+++ b/doc/nitpick-exceptions.txt
@@ -1,3 +1,4 @@
-
 # These are not documented at all in Python 2.7, but works for 3.x
+py:exc TypeError
 py:exc ValueError
+py:exc AssertionError
diff --git a/requirements.txt b/requirements.txt
index 368de5ebe5467d8db073beb022872e39906a91b1..4e9fece6314c48e6f4361cffad564d363cccc8c8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -11,4 +11,5 @@ bob.ip.base
 bob.bio.base
 bob.db.utfvp
 bob.db.verafinger
-bob.db.putvein
\ No newline at end of file
+bob.db.putvein
+scikit-image
diff --git a/version.txt b/version.txt
index 32408126ec211aba4d227f21a0485a408cb23bbb..50aea0e7aba1ab64fce04e96fb64bf9599a1c2a5 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-2.0.0a0
\ No newline at end of file
+2.1.0
\ No newline at end of file