diff --git a/bob/bio/vein/preprocessor/__init__.py b/bob/bio/vein/preprocessor/__init__.py
index 9ad9c7c63c2f4876c23634a57e22511a8a14d19a..d0afb7398d206f1b9a02428de63d5aa60e92a9c4 100644
--- a/bob/bio/vein/preprocessor/__init__.py
+++ b/bob/bio/vein/preprocessor/__init__.py
@@ -1,6 +1,6 @@
 from .crop import Cropper, FixedCrop, NoCrop
 from .mask import Padder, Masker, FixedMask, NoMask, AnnotatedRoIMask
-from .mask import KonoMask, LeeMask, TomesLeeMask, WatershedMask
+from .mask import KonoMask, LeeMask, TomesLeeMask
 from .normalize import Normalizer, NoNormalization, HuangNormalization
 from .filters import Filter, NoFilter, HistogramEqualization
 from .preprocessor import Preprocessor
@@ -31,7 +31,6 @@ __appropriate__(
     KonoMask,
     LeeMask,
     TomesLeeMask,
-    WatershedMask,
     Normalizer,
     NoNormalization,
     HuangNormalization,
diff --git a/bob/bio/vein/preprocessor/mask.py b/bob/bio/vein/preprocessor/mask.py
index e56439cfe47286576683337ec4ac411844b985a6..45b615df874d0bc9a7da93dca89aa783d4c14456 100644
--- a/bob/bio/vein/preprocessor/mask.py
+++ b/bob/bio/vein/preprocessor/mask.py
@@ -477,193 +477,3 @@ class TomesLeeMask(Masker):
     else:
       w = self.padder.padding_width
       return finger_mask[w:-w,w:-w]
-
-
-class WatershedMask(Masker):
-  """Estimates the finger region given an input NIR image using Watershedding
-
-  This method uses the `Watershedding Morphological Algorithm
-  <https://en.wikipedia.org/wiki/Watershed_(image_processing)>` for determining
-  the finger mask given an input image.
-
-  The masker works first by determining image edges using a simple 2-D Sobel
-  filter. The next step is to determine markers in the image for both the
-  finger region and background. Markers are set on the image by using a
-  pre-trained feed-forward neural network model (multi-layer perceptron or MLP)
-  learned from existing annotations. The model is trained in a separate
-  program and operates on 3x3 regions around the pixel to be predicted for
-  finger/background. The ``(y,x)`` location also is provided as input to the
-  classifier. The feature vector is then composed of 9 pixel values plus the
-  ``y`` and ``x`` (normalized) coordinates of the pixel. The network then
-  provides a prediction that depends on these input parameters. The closer the
-  output is to ``1.0``, the more likely it is from within the finger region.
-
-  Values output by the network are thresholded in order to remove uncertain
-  markers. The ``threshold`` parameter is configurable.
-
-  A series of morphological opening operations is used to, given the neural net
-  markers, remove noise before watershedding the edges from the Sobel'ed
-  original image.
-
-
-  Parameters:
-
-    model (str): Path to the model file to be used for generating
-      finger/background markers. This model should be pre-trained using a
-      separate program.
-
-    foreground_threshold (float): Threshold given a logistic regression output
-      (interval :math:`[0, 1]`) for which we consider finger markers provided
-      by the network.  The higher the value, the more selective the algorithm
-      will be and the less (foreground) markers will be used from the network
-      selection. This value should be a floating point number in the open-set
-      interval :math:`(0.0, 1.0)`.  If ``background_threshold`` is not set,
-      values for background selection will be set to :math:`1.0-T`, where ``T``
-      represents this threshold.
-
-    background_threshold (float): Threshold given a logistic regression output
-      (interval :math:`[0, 1]`) for which we consider finger markers provided
-      by the network.  The smaller the value, the more selective the algorithm
-      will be and the less (background) markers will be used from the network
-      selection. This value should be a floating point number in the open-set
-      interval :math:`(0.0, 1.0)`.  If ``foreground_threshold`` is not set,
-      values for foreground selection will be set to :math:`1.0-T`, where ``T``
-      represents this threshold.
-
-
-  """
-
-
-  def __init__(self, model, foreground_threshold, background_threshold):
-
-    import bob.io.base
-    import bob.learn.mlp
-    import bob.learn.activation
-
-    self.labeller = bob.learn.mlp.Machine((11,10,1))
-    h5f = bob.io.base.HDF5File(model)
-    self.labeller.load(h5f)
-    self.labeller.output_activation = bob.learn.activation.Logistic()
-    del h5f
-
-    # adjust threshold from background and foreground
-    if foreground_threshold is None and background_threshold is not None:
-      foreground_threshold = 1 - background_threshold
-    if background_threshold is None and foreground_threshold is not None:
-      background_threshold = 1 - foreground_threshold
-    if foreground_threshold is None and background_threshold is None:
-      foreground_threshold = 0.5
-      background_threshold = 0.5
-
-    self.foreground_threshold = foreground_threshold
-    self.background_threshold = background_threshold
-
-
-  class _filterfun(object):
-    '''Callable for filtering the input image with marker predictions'''
-
-
-    def __init__(self, image, labeller):
-      self.labeller = labeller
-      self.features = numpy.zeros(self.labeller.shape[0], dtype='float64')
-      self.output = numpy.zeros(self.labeller.shape[-1], dtype='float64')
-
-      # builds indexes before hand, based on image dimensions
-      idx = numpy.mgrid[:image.shape[0], :image.shape[1]]
-      self.indexes = numpy.array([idx[0].flatten(), idx[1].flatten()],
-          dtype='float64')
-      self.indexes[0,:] /= image.shape[0]
-      self.indexes[1,:] /= image.shape[1]
-      self.current = 0
-
-
-    def __call__(self, arr):
-
-      self.features[:9] = arr.astype('float64')/255
-      self.features[-2:] = self.indexes[:,self.current]
-      self.current += 1
-      return self.labeller(self.features, self.output)
-
-
-  def run(self, image):
-    '''Fully preprocesses the input image and returns intermediate results
-
-      Parameters:
-
-        image (numpy.ndarray): A 2D numpy array of type ``uint8`` with the
-          input image
-
-
-      Returns:
-
-        numpy.ndarray: A 2D numpy array of type ``uint8`` with the markers for
-        foreground and background, selected by the neural network model
-
-        numpy.ndarray: A 2D numpy array of type ``float64`` with the edges used
-        to define the borders of the watermasking process
-
-        numpy.ndarray: A 2D numpy array of type boolean with the caculated
-        mask. ``True`` values correspond to regions where the finger is
-        located
-
-    '''
-
-    # applies the pre-trained neural network model to get predictions about
-    # finger/background regions
-    function = WatershedMask._filterfun(image, self.labeller)
-    predictions = numpy.zeros(image.shape, 'float64')
-    scipy.ndimage.filters.generic_filter(image, function,
-        size=3, mode='nearest', output=predictions)
-
-    selector = skimage.morphology.disk(radius=5)
-
-    # applies a morphological "opening" operation
-    # (https://en.wikipedia.org/wiki/Opening_(morphology)) to remove outliers
-    markers_bg = numpy.where(predictions<self.background_threshold, 1, 0)
-    markers_bg = skimage.morphology.opening(markers_bg, selem=selector)
-    markers_fg = numpy.where(predictions>=self.foreground_threshold, 255, 0)
-    markers_fg = skimage.morphology.opening(markers_fg, selem=selector)
-
-    # avoids markers on finger borders
-    selector = skimage.morphology.disk(radius=2)
-    markers_fg = skimage.morphology.erosion(markers_fg, selem=selector)
-
-    # the final markers are a combination of foreground and background markers
-    markers = markers_fg | markers_bg
-
-    # this will determine the natural boundaries in the image where the
-    # flooding will be limited - dialation is applied on the output of the
-    # Sobel filter to well mark the finger boundaries
-    edges = skimage.filters.sobel(image)
-    edges = skimage.morphology.dilation(edges, selem=selector)
-
-    # applies watersheding to get a final estimate of the finger mask
-    segmentation = skimage.morphology.watershed(edges, markers)
-
-    # removes small perturbations and makes the finger region more uniform
-    segmentation[segmentation==1] = 0
-    mask = skimage.morphology.binary_opening(segmentation.astype('bool'),
-        selem=selector)
-
-    return markers, edges, mask
-
-
-  def __call__(self, image):
-    '''Inputs an image, returns a mask (numpy boolean array)
-
-      Parameters:
-
-        image (numpy.ndarray): A 2D numpy array of type ``uint8`` with the
-          input image
-
-
-      Returns:
-
-        numpy.ndarray: A 2D numpy array of type boolean with the caculated
-        mask. ``True`` values correspond to regions where the finger is
-        located
-
-    '''
-
-    markers, edges, mask = self.run(image)
-    return mask
diff --git a/bob/bio/vein/script/markdet.py b/bob/bio/vein/script/markdet.py
deleted file mode 100644
index 687eb5621d6e0d0970b0fa95ca3e5cdcb0fd10d5..0000000000000000000000000000000000000000
--- a/bob/bio/vein/script/markdet.py
+++ /dev/null
@@ -1,311 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-
-
-"""Trains a new MLP to perform pre-watershed marker detection
-
-Usage: %(prog)s [-v...] [--samples=N] [--model=PATH] [--points=N] [--hidden=N]
-                [--batch=N] [--iterations=N] <database> <protocol> <group>
-       %(prog)s --help
-       %(prog)s --version
-
-
-Arguments:
-
-  <database>  Name of the database to use for creating the model (options are:
-              "fv3d" or "verafinger")
-  <protocol>  Name of the protocol to use for creating the model (options
-              depend on the database chosen)
-  <group>     Name of the group to use on the database/protocol with the
-              samples to use for training the model (options are: "train",
-              "dev" or "eval")
-
-Options:
-
-  -h, --help             Shows this help message and exits
-  -V, --version          Prints the version and exits
-  -v, --verbose          Increases the output verbosity level. Using "-vv"
-                         allows the program to output informational messages as
-                         it goes along.
-  -m PATH, --model=PATH  Path to the generated model file [default: model.hdf5]
-  -s N, --samples=N      Maximum number of samples to use for training. If not
-                         set, use all samples
-  -p N, --points=N       Maximum number of samples to use for plotting
-                         ground-truth and classification errors. The more
-                         points, the less responsive the plot becomes
-                         [default: 1000]
-  -H N, --hidden=N       Number of neurons on the hidden layer of the
-                         multi-layer perceptron [default: 5]
-  -b N, --batch=N        Number of samples to use for every batch [default: 1]
-  -i N, --iterations=N   Number of iterations to train the neural net for
-                         [default: 2000]
-
-
-Examples:
-
-  Trains on the 3D Fingervein database:
-
-     $ %(prog)s -vv fv3d central dev
-
-  Saves the model to a different file, use only 100 samples:
-
-    $ %(prog)s -vv -s 100 --model=/path/to/saved-model.hdf5 fv3d central dev
-
-"""
-
-
-import os
-import sys
-import schema
-import docopt
-import numpy
-import skimage
-
-
-def validate(args):
-  '''Validates command-line arguments, returns parsed values
-
-  This function uses :py:mod:`schema` for validating :py:mod:`docopt`
-  arguments. Logging level is not checked by this procedure (actually, it is
-  ignored) and must be previously setup as some of the elements here may use
-  logging for outputing information.
-
-
-  Parameters:
-
-    args (dict): Dictionary of arguments as defined by the help message and
-      returned by :py:mod:`docopt`
-
-
-  Returns
-
-    dict: Validate dictionary with the same keys as the input and with values
-      possibly transformed by the validation procedure
-
-
-  Raises:
-
-    schema.SchemaError: in case one of the checked options does not validate.
-
-  '''
-
-  from .validate import check_model_does_not_exist, validate_protocol, \
-      validate_group
-
-  sch = schema.Schema({
-    '--model': check_model_does_not_exist,
-    '--samples': schema.Or(schema.Use(int), None),
-    '--points': schema.Use(int),
-    '--hidden': schema.Use(int),
-    '--batch': schema.Use(int),
-    '--iterations': schema.Use(int),
-    '<database>': lambda n: n in ('fv3d', 'verafinger'),
-    '<protocol>': validate_protocol(args['<database>']),
-    '<group>': validate_group(args['<database>']),
-    str: object, #ignores strings we don't care about
-    }, ignore_extra_keys=True)
-
-  return sch.validate(args)
-
-
-def main(user_input=None):
-
-  if user_input is not None:
-    argv = user_input
-  else:
-    argv = sys.argv[1:]
-
-  import pkg_resources
-
-  completions = dict(
-      prog=os.path.basename(sys.argv[0]),
-      version=pkg_resources.require('bob.bio.vein')[0].version
-      )
-
-  args = docopt.docopt(
-      __doc__ % completions,
-      argv=argv,
-      version=completions['version'],
-      )
-
-  try:
-    from .validate import setup_logger
-    logger = setup_logger('bob.bio.vein', args['--verbose'])
-    args = validate(args)
-  except schema.SchemaError as e:
-    sys.exit(e)
-
-  if args['<database>'] == 'fv3d':
-    from bob.bio.vein.config.fv3d import database as db
-  elif args['<database>'] == 'verafinger':
-    from bob.bio.vein.config.verafinger import database as db
-  else:
-    raise schema.SchemaError('Database %s is not supported' % \
-        args['<database>'])
-
-  database_replacement = "%s/.bob_bio_databases.txt" % os.environ["HOME"]
-  db.replace_directories(database_replacement)
-  objects = db.objects(protocol=args['<protocol>'], groups=args['<group>'])
-  if args['--samples'] is None:
-    args['--samples'] = len(objects)
-
-  from ..preprocessor.utils import poly_to_mask
-  features = None
-  target = None
-  loaded = 0
-  for k, sample in enumerate(objects):
-
-    if args['--samples'] is not None and loaded >= args['--samples']:
-      break
-    path = sample.make_path(directory=db.original_directory,
-        extension=db.original_extension)
-    logger.info('Loading sample %d/%d (%s)...', loaded, len(objects), path)
-    image = sample.load(directory=db.original_directory,
-        extension=db.original_extension)
-    if not (hasattr(image, 'metadata') and 'roi' in image.metadata):
-      logger.info('Skipping sample (no ROI)')
-      continue
-    loaded += 1
-
-    # copy() required by skimage.util.shape.view_as_windows()
-    image = image.copy().astype('float64') / 255.
-    windows = skimage.util.shape.view_as_windows(image, (3,3))
-
-    if features is None and target is None:
-      features = numpy.zeros(
-          (args['--samples']*windows.shape[0]*windows.shape[1],
-            windows.shape[2]*windows.shape[3]+2), dtype='float64')
-      target = numpy.zeros(args['--samples']*windows.shape[0]*windows.shape[1],
-          dtype='bool')
-
-    mask = poly_to_mask(image.shape, image.metadata['roi'])
-
-    mask = mask[1:-1, 1:-1]
-    for y in range(windows.shape[0]):
-      for x in range(windows.shape[1]):
-        idx = ((loaded-1)*windows.shape[0]*windows.shape[1]) + \
-            (y*windows.shape[1]) + x
-        features[idx,:-2] = windows[y,x].flatten()
-        features[idx,-2] = y+1
-        features[idx,-1] = x+1
-        target[idx] = mask[y,x]
-
-  # if number of loaded samples is smaller than expected, clip features array
-  features = features[:loaded*windows.shape[0]*windows.shape[1]]
-  target = target[:loaded*windows.shape[0]*windows.shape[1]]
-
-  # normalize w.r.t. dimensions
-  features[:,-2] /= image.shape[0]
-  features[:,-1] /= image.shape[1]
-
-  target_float = target.astype('float64')
-  target_float[~target] = -1.0
-  target_float = target_float.reshape(len(target), 1)
-  positives = features[target]
-  negatives = features[~target]
-  logger.info('There are %d samples on input dataset', len(target))
-  logger.info('  %d are negatives', len(negatives))
-  logger.info('  %d are positives', len(positives))
-
-  import bob.learn.mlp
-
-  # by default, machine uses hyperbolic tangent output
-  machine = bob.learn.mlp.Machine((features.shape[1], args['--hidden'], 1))
-  machine.randomize() #initialize weights randomly
-  loss = bob.learn.mlp.SquareError(machine.output_activation)
-  train_biases = True
-  trainer = bob.learn.mlp.RProp(args['--batch'], loss, machine, train_biases)
-  trainer.reset()
-  shuffler = bob.learn.mlp.DataShuffler([negatives, positives],
-      [[-1.0], [+1.0]])
-
-  # start cost
-  output = machine(features)
-  cost = loss.f(output, target_float)
-  logger.info('[initial] MSE = %g', cost.mean())
-
-  # trains the network until the error is near zero
-  for i in range(args['--iterations']):
-    try:
-      _feats, _tgts = shuffler.draw(args['--batch'])
-      trainer.train(machine, _feats, _tgts)
-      logger.info('[%d] MSE = %g', i, trainer.cost(_tgts))
-    except KeyboardInterrupt:
-      print() #avoids the ^C line
-      logger.info('Gracefully stopping training before limit (%d iterations)',
-          args['--batch'])
-      break
-
-  # describe errors
-  neg_output = machine(negatives)
-  pos_output = machine(positives)
-  neg_errors = neg_output >= 0
-  pos_errors = pos_output < 0
-  hter_train = ((sum(neg_errors) / float(len(negatives))) + \
-      (sum(pos_errors)) / float(len(positives))) / 2.0
-  logger.info('Training set HTER: %.2f%%', 100*hter_train)
-  logger.info('  Errors on negatives: %d / %d', sum(neg_errors), len(negatives))
-  logger.info('  Errors on positives: %d / %d', sum(pos_errors), len(positives))
-
-  threshold = 0.8
-  neg_errors = neg_output >= threshold
-  pos_errors = pos_output < -threshold
-  hter_train = ((sum(neg_errors) / float(len(negatives))) + \
-      (sum(pos_errors)) / float(len(positives))) / 2.0
-  logger.info('Training set HTER (threshold=%g): %.2f%%', threshold,
-      100*hter_train)
-  logger.info('  Errors on negatives: %d / %d', sum(neg_errors), len(negatives))
-  logger.info('  Errors on positives: %d / %d', sum(pos_errors), len(positives))
-  # plot separation threshold
-  import matplotlib.pyplot as plt
-  from mpl_toolkits.mplot3d import Axes3D
-
-  # only plot N random samples otherwise it makes it too slow
-  N = numpy.random.randint(min(len(negatives), len(positives)),
-      size=min(len(negatives), len(positives), args['--points']))
-
-  fig = plt.figure()
-
-  ax = fig.add_subplot(211, projection='3d')
-  ax.scatter(image.shape[1]*negatives[N,-1], image.shape[0]*negatives[N,-2],
-      255*negatives[N,4], label='negatives', color='blue', marker='.')
-  ax.scatter(image.shape[1]*positives[N,-1], image.shape[0]*positives[N,-2],
-      255*positives[N,4], label='positives', color='red', marker='.')
-  ax.set_xlabel('Width')
-  ax.set_xlim(0, image.shape[1])
-  ax.set_ylabel('Height')
-  ax.set_ylim(0, image.shape[0])
-  ax.set_zlabel('Intensity')
-  ax.set_zlim(0, 255)
-  ax.legend()
-  ax.grid()
-  ax.set_title('Ground Truth')
-  plt.tight_layout()
-
-  ax = fig.add_subplot(212, projection='3d')
-  neg_plot = negatives[neg_output[:,0]>=threshold]
-  pos_plot = positives[pos_output[:,0]<-threshold]
-  N = numpy.random.randint(min(len(neg_plot), len(pos_plot)),
-      size=min(len(neg_plot), len(pos_plot), args['--points']))
-  ax.scatter(image.shape[1]*neg_plot[N,-1], image.shape[0]*neg_plot[N,-2],
-      255*neg_plot[N,4], label='negatives', color='red', marker='.')
-  ax.scatter(image.shape[1]*pos_plot[N,-1], image.shape[0]*pos_plot[N,-2],
-      255*pos_plot[N,4], label='positives', color='blue', marker='.')
-  ax.set_xlabel('Width')
-  ax.set_xlim(0, image.shape[1])
-  ax.set_ylabel('Height')
-  ax.set_ylim(0, image.shape[0])
-  ax.set_zlabel('Intensity')
-  ax.set_zlim(0, 255)
-  ax.legend()
-  ax.grid()
-  ax.set_title('Classifier Errors')
-  plt.tight_layout()
-
-  print('Close plot window to save model and end program...')
-  plt.show()
-  import bob.io.base
-  h5f = bob.io.base.HDF5File(args['--model'], 'w')
-  machine.save(h5f)
-  del h5f
-  logger.info('Saved MLP model to %s', args['--model'])
diff --git a/bob/bio/vein/script/watershed_mask.py b/bob/bio/vein/script/watershed_mask.py
deleted file mode 100644
index 924cd6935c48b52f0448c1bb0a6671fdecbf9c33..0000000000000000000000000000000000000000
--- a/bob/bio/vein/script/watershed_mask.py
+++ /dev/null
@@ -1,308 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Wed  4 Oct 11:23:52 2017 CEST
-
-
-"""Preprocesses a fingervein image with a watershed/neural-net seeded mask
-
-Usage: %(prog)s [-v...] [-s <path>] [-f <float>] [-b <float>] [--scan]
-                <model> <database> [<stem>...]
-       %(prog)s --help
-       %(prog)s --version
-
-
-Arguments:
-
-  <model>     Path to model to use for find watershed markers
-  <database>  Name of the database to use for creating the model (options are:
-              "fv3d" or "verafinger")
-  <stem>      Name of the object on the database to display, without the root
-              or the extension. If none provided, run for all possible stems on
-              the database
-
-
-Options:
-
-  -h, --help                  Shows this help message and exits
-  -V, --version               Prints the version and exits
-  -v, --verbose               Increases the output verbosity level
-  -f, --fg-threshold=<float>  Foreground threshold value. Should be set to a
-                              number that is between 0.5 and 1.0. The higher,
-                              the less markers for the foreground watershed
-                              process will be produced. [default: 0.7]
-  -b, --bg-threshold=<float>  Background threshold value. Should be set to a
-                              number that is between 0.0 and 0.5. The smaller,
-                              the less markers for the foreground watershed
-                              process will be produced. [default: 0.3]
-  -S, --scan                  If set, ignores settings for the threshold and
-                              scans the whole range of threshold printing the
-                              Jaccard, M1 and M2 merith figures
-  -s <path>, --save=<path>    If set, saves individual image into files instead
-                              of displaying the result of processing. Pass the
-                              name of directory that will be created and
-                              suffixed with the paths of original images.
-
-
-Examples:
-
-  Visualize the preprocessing toolchain over a single image
-
-     $ %(prog)s model.hdf5 verafinger sample-stem
-
-  Save the results of the preprocessing to several files. In this case, the
-  program runs non-interactively:
-
-     $ %(prog)s -s graphics model.hdf5 verafinger sample-stem
-
-  Scans the set of possible thresholds printing Jaccard, M1 and M2 indexes:
-
-     $ %(prog)s --scan model.hdf5 verafinger sample-stem
-
-"""
-
-
-import os
-import sys
-import time
-
-import numpy
-
-import schema
-import docopt
-
-import bob.core
-logger = bob.core.log.setup("bob.bio.vein")
-
-import matplotlib.pyplot as plt
-
-import bob.io.base
-import bob.io.image
-
-
-def validate(args):
-  '''Validates command-line arguments, returns parsed values
-
-  This function uses :py:mod:`schema` for validating :py:mod:`docopt`
-  arguments. Logging level is not checked by this procedure (actually, it is
-  ignored) and must be previously setup as some of the elements here may use
-  logging for outputing information.
-
-
-  Parameters:
-
-    args (dict): Dictionary of arguments as defined by the help message and
-      returned by :py:mod:`docopt`
-
-
-  Returns
-
-    dict: Validate dictionary with the same keys as the input and with values
-      possibly transformed by the validation procedure
-
-
-  Raises:
-
-    schema.SchemaError: in case one of the checked options does not validate.
-
-  '''
-
-  valid_databases = ('fv3d', 'verafinger')
-
-  sch = schema.Schema({
-    '<model>': schema.And(os.path.exists,
-      error='<model> should point to an existing path'),
-    '<database>': schema.And(lambda n: n in valid_databases,
-      error='<database> must be one of %s' % ', '.join(valid_databases)),
-    '--fg-threshold': schema.And(
-      schema.Use(float), lambda n: 0.5 < n < 1.0,
-      error='--fg-threshold should be a float between 0.5 and 1.0',
-      ),
-    '--bg-threshold': schema.And(
-      schema.Use(float), lambda n: 0.0 < n < 0.5,
-      error='--bg-threshold should be a float between 0.0 and 0.5',
-      ),
-    str: object, #ignores strings we don't care about
-    }, ignore_extra_keys=True)
-
-  return sch.validate(args)
-
-
-def save_figures(title, image, markers, edges, mask):
-  '''Saves individual images on a directory
-  '''
-
-  dirname = os.path.dirname(title)
-  if not os.path.exists(dirname): os.makedirs(dirname)
-  bob.io.base.save(image, os.path.join(title, 'original.png'))
-
-  _ = markers.copy().astype('uint8')
-  _[_==1] = 128
-  bob.io.base.save(_, os.path.join(title, 'markers.png'))
-
-  bob.io.base.save((255*edges).astype('uint8'), os.path.join(title,'edges.png'))
-
-  bob.io.base.save(mask.astype('uint8')*255, os.path.join(title, 'mask.png'))
-
-  from ..preprocessor.utils import draw_mask_over_image
-
-  masked_image = draw_mask_over_image(image, mask)
-  masked_image.save(os.path.join(title, 'masked.png'))
-
-
-def make_figure(image, markers, edges, mask):
-  '''Returns a matplotlib figure with the detailed processing result'''
-
-  plt.clf() #completely clears the current figure
-  figure = plt.gcf()
-  plt.subplot(2,2,1)
-  _ = markers.copy().astype('uint8')
-  _[_==1] = 128
-  plt.imshow(_, cmap='gray')
-  plt.title('Markers')
-
-  plt.subplot(2,2,2)
-  _ = numpy.dstack([
-      (_ | (255*edges).astype('uint8')),
-      _,
-      _,
-      ])
-  plt.imshow(_)
-  plt.title('Edges')
-
-  plt.subplot(2,2,3)
-  plt.imshow(mask.astype('uint8')*255, cmap='gray')
-  plt.title('Mask')
-
-  plt.subplot(2,2,4)
-  plt.imshow(image, cmap='gray')
-  red_mask = numpy.dstack([
-      (~mask).astype('uint8')*255,
-      numpy.zeros_like(image),
-      numpy.zeros_like(image),
-      ])
-  plt.imshow(red_mask, alpha=0.15)
-  plt.title('Image (masked)')
-
-  return figure
-
-
-def process_one(args, image, path):
-  '''Processes a single image'''
-
-  from bob.bio.vein.preprocessor import WatershedMask, AnnotatedRoIMask
-
-  # loads the processor once - avoids re-reading weights from the disk
-  processor = WatershedMask(
-      model=args['<model>'],
-      foreground_threshold=args['--fg-threshold'],
-      background_threshold=args['--bg-threshold'],
-      )
-
-  annotator = AnnotatedRoIMask()
-
-  from bob.bio.vein.preprocessor.utils import \
-      jaccard_index, intersect_ratio, intersect_ratio_of_complement
-
-  start = time.time()
-  markers, edges, mask = processor.run(image)
-  total_time = time.time() - start
-
-  # error
-  annotated_mask = annotator(image)
-  ji = jaccard_index(annotated_mask, mask)
-  m1 = intersect_ratio(annotated_mask, mask)
-  m2 = intersect_ratio_of_complement(annotated_mask, mask)
-  logger.debug('%s, %.2f, %.2f, %.2f, %g, %g, %g', path, total_time,
-    args['--fg-threshold'], args['--bg-threshold'], ji, m1, m2)
-
-  if not args['--scan']:
-
-    if args['--save']:
-      dest = os.path.join(args['--save'], path)
-      save_figures(dest, image, markers, edges, mask)
-    else:
-      fig = make_figure(image, markers, edges, mask)
-      fig.suptitle('%s @ %s - JI=%.4f, M1=%.4f, M2=%.4f\n' \
-          '($\\tau_{FG}$ = %.2f - $\\tau_{BG}$ = %.2f)' % \
-          (path, args['<database>'], ji, m1, m2, args['--fg-threshold'],
-            args['--bg-threshold']), fontsize=12)
-      print('Close the figure to continue...')
-      plt.show()
-
-  return (path, total_time, args['--fg-threshold'], args['--bg-threshold'],
-      ji, m1, m2)
-
-
-def eval_best_thresholds(results):
-  '''Evaluates the best thresholds taking into consideration various indexes'''
-
-  m1 = numpy.array([k[-2] for k in results])
-  m2 = numpy.array([k[-1] for k in results])
-  index = m1/m2
-  return index.argmax()
-
-
-def main(user_input=None):
-
-  if user_input is not None:
-    argv = user_input
-  else:
-    argv = sys.argv[1:]
-
-  import pkg_resources
-
-  completions = dict(
-      prog=os.path.basename(sys.argv[0]),
-      version=pkg_resources.require('bob.bio.vein')[0].version
-      )
-
-  args = docopt.docopt(
-      __doc__ % completions,
-      argv=argv,
-      version=completions['version'],
-      )
-
-  try:
-    from .validate import setup_logger
-    logger = setup_logger('bob.bio.vein', args['--verbose'])
-    args = validate(args)
-  except schema.SchemaError as e:
-    sys.exit(e)
-
-  if args['<database>'] == 'fv3d':
-    from bob.bio.vein.config.fv3d import database as db
-  elif args['<database>'] == 'verafinger':
-    from bob.bio.vein.config.verafinger import database as db
-
-  database_replacement = "%s/.bob_bio_databases.txt" % os.environ["HOME"]
-  db.replace_directories(database_replacement)
-  all_files = db.objects()
-
-  # if a specific <stem> was not provided, run for all possible stems
-  if not args['<stem>']:
-    args['<stem>'] = [k.path for k in all_files]
-
-  # Loads the image, the mask and save it to a PNG file
-  for stem in args['<stem>']:
-    f = [k for k in all_files if k.path == stem]
-    if len(f) == 0:
-      raise RuntimeError('File with stem "%s" does not exist on "%s"' % \
-          stem, args['<database>'])
-
-    f = f[0]
-    image = f.load(db.original_directory, db.original_extension)
-
-    if args['--scan']:
-      results = []
-      logger.debug('stem, time, fg_thres, bg_thres, jaccard, m1, m2')
-      for fg_threshold in numpy.arange(0.6, 1.0, step=0.1):
-        for bg_threshold in numpy.arange(0.1, 0.5, step=0.1):
-          args['--fg-threshold'] = fg_threshold
-          args['--bg-threshold'] = bg_threshold
-          results.append(process_one(args, image, f.path))
-      best_thresholds = eval_best_thresholds(results)
-      logger.info('%s: FG = %.2f | BG = %.2f | M1/M2 = %.2f', f.path,
-        results[best_thresholds][2], results[best_thresholds][3],
-        results[best_thresholds][-2]/results[best_thresholds][-1])
-    else:
-      process_one(args, image, f.path)
diff --git a/conda/meta.yaml b/conda/meta.yaml
index 4b4822849ab11a306ffd16f1a12806cbffb35226..01a0051fb02f430478164a8254d45eb47c49877a 100644
--- a/conda/meta.yaml
+++ b/conda/meta.yaml
@@ -10,8 +10,6 @@ build:
     - compare_rois.py = bob.bio.vein.script.compare_rois:main
     - view_sample.py = bob.bio.vein.script.view_sample:main
     - blame.py = bob.bio.vein.script.blame:main
-    - markdet.py = bob.bio.vein.script.markdet:main
-    - watershed_mask.py = bob.bio.vein.script.watershed_mask:main
   number: {{ environ.get('BOB_BUILD_NUMBER', 0) }}
   run_exports:
     - {{ pin_subpackage(name) }}
@@ -41,8 +39,6 @@ requirements:
     - bob.ip.color
     - bob.bio.base
     - bob.learn.linear
-    - bob.learn.activation
-    - bob.learn.mlp
 
   run:
     - python
@@ -62,8 +58,6 @@ test:
     - compare_rois.py --help
     - view_sample.py --help
     - blame.py --help
-    - markdet.py --help
-    - watershed_mask.py --help
     - nosetests --with-coverage --cover-package={{ name }} -sv {{ name }}
     - sphinx-build -aEW {{ project_dir }}/doc {{ project_dir }}/sphinx
     - sphinx-build -aEb doctest {{ project_dir }}/doc sphinx
diff --git a/doc/baselines.rst b/doc/baselines.rst
index 28c21c3625e539dcb691d2d52cfab344afe66abd..61018d91f1d077be827c234b5c44e8fadbf58732 100644
--- a/doc/baselines.rst
+++ b/doc/baselines.rst
@@ -17,7 +17,7 @@ $ bob bio pipelines vanilla-biometrics [DATABASE_NAME] [BASELINE]
   Both, `[DATABASE_NAME]` and `[BASELINE]` can be either python resources or
   python files.
 
-  Please, refer to :ref:`bob.bio.base <bob.bio.base>` for more information.  
+  Please, refer to :ref:`bob.bio.base <bob.bio.base>` for more information.
 
 
 Repeated Line-Tracking with Miura Matching
@@ -37,8 +37,8 @@ protocol, do the following:
 .. tip::
 
    If you have more processing cores on your local machine and don't want to
-   submit your job for SGE execution, you can run it in parallel by adding the options ``-l local-parallel``. 
-   
+   submit your job for SGE execution, you can run it in parallel by adding the options ``-l local-parallel``.
+
    .. code-block:: sh
 
       $ bob bio pipelines vanilla-biometrics verafinger rlt -vv -c -l local-parallel
@@ -261,49 +261,6 @@ This package contains other resources that can be used to evaluate different
 bits of the vein processing toolchain.
 
 
-Training the Watershed Finger region detector
-=============================================
-
-The correct detection of the finger boundaries is an important step of many
-algorithms for the recognition of finger veins. It allows to compensate for
-eventual rotation and scaling issues one might find when comparing models and
-probes. In this package, we propose a novel finger boundary detector based on
-the `Watershedding Morphological Algorithm
-<https://en.wikipedia.org/wiki/Watershed_(image_processing)>`. Watershedding
-works in three steps:
-
-1. Determine markers on the original image indicating the types of areas one
-   would like to detect (e.g. "finger" or "background")
-2. Determine a 2D (gray-scale) surface representing the original image in which
-   darker spots (representing valleys) are more likely to be filled by
-   surrounding markers. This is normally achieved by filtering the image with a
-   high-pass filter like Sobel or using an edge detector such as Canny.
-3. Run the watershed algorithm
-
-In order to determine markers for step 1, we train a neural network which
-outputs the likelihood of a point being part of a finger, given its coordinates
-and values of surrounding pixels.
-
-When used to run an experiment,
-:py:class:`bob.bio.vein.preprocessor.WatershedMask` requires you provide a
-*pre-trained* neural network model that presets the markers before
-watershedding takes place. In order to create one, you can run the program
-`bob_bio_vein_markdet.py`:
-
-.. code-block:: sh
-
-   $ bob_bio_vein_markdet.py --hidden=20 --samples=500 fv3d central dev
-
-You input, as arguments to this application, the database, protocol and subset
-name you wish to use for training the network. The data is loaded observing a
-total maximum number of samples from the dataset (passed with ``--samples=N``),
-the network is trained and recorded into an HDF5 file (by default, the file is
-called ``model.hdf5``, but the name can be changed with the option
-``--model=``).  Once you have a model, you can use the preprocessor mask by
-constructing an object and attaching it to the
-:py:class:`bob.bio.vein.preprocessor.Preprocessor` entry on your configuration.
-
-
 Region of Interest Goodness of Fit
 ==================================
 
diff --git a/doc/links.rst b/doc/links.rst
index b3b9b2f7d2928d59ef95295977082a0ee66b589a..9f45ed7da06ec483438c111cd75054aabd415464 100644
--- a/doc/links.rst
+++ b/doc/links.rst
@@ -21,5 +21,4 @@
 .. _mailing list: https://www.idiap.ch/software/bob/discuss
 .. _bob.bio.base: https://pypi.python.org/pypi/bob.bio.base
 .. _jaccard index: https://en.wikipedia.org/wiki/Jaccard_index
-.. _watershed: https://en.wikipedia.org/wiki/Watershed_(image_processing)
 .. _gridtk: https://pypi.python.org/pypi/gridtk
diff --git a/requirements.txt b/requirements.txt
index e010b07b3ea0f3f385cd4565ea5978b4354f0572..fa88f2e7f70b101265dcab29c3a48dc14d596844 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -14,5 +14,3 @@ bob.ip.base
 bob.ip.color
 bob.bio.base
 bob.learn.linear
-bob.learn.activation
-bob.learn.mlp
diff --git a/setup.py b/setup.py
index c178b58ae38804880595d59cff9b7808f67ec70e..fbbe5a436ac65f84cedbffc656f40a2860e7bd45 100644
--- a/setup.py
+++ b/setup.py
@@ -70,8 +70,6 @@ setup(
             "bob_bio_vein_compare_rois.py = bob.bio.vein.script.compare_rois:main",
             "bob_bio_vein_view_sample.py = bob.bio.vein.script.view_sample:main",
             "bob_bio_vein_blame.py = bob.bio.vein.script.blame:main",
-            "bob_bio_vein_markdet.py = bob.bio.vein.script.markdet:main",
-            "bob_bio_vein_watershed_mask.py = bob.bio.vein.script.watershed_mask:main",
         ],
     },
     classifiers=[