diff --git a/LICENSE b/COPYING
similarity index 100%
rename from LICENSE
rename to COPYING
diff --git a/MANIFEST.in b/MANIFEST.in
index 4a6bfd23ab0b5beb5c0da50537912ceef7183e02..946a661e098c89b0b5b505c5b22594f474fd8b9a 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,2 +1,2 @@
-include README.rst buildout.cfg LICENSE version.txt requirements.txt
+include README.rst buildout.cfg COPYING version.txt requirements.txt
 recursive-include doc *.rst *.png *.ico *.txt
diff --git a/bob/ip/binseg/configs/datasets/imagefolderinference.py b/bob/ip/binseg/configs/datasets/imagefolderinference.py
index ba760e876811ef5bc80259a7008e6e66e23958f4..634566b9c0f8c899474494651044f138e610f724 100644
--- a/bob/ip/binseg/configs/datasets/imagefolderinference.py
+++ b/bob/ip/binseg/configs/datasets/imagefolderinference.py
@@ -7,7 +7,8 @@ from bob.ip.binseg.data.imagefolderinference import ImageFolderInference
 #### Config ####
 
 # add your transforms below
-transforms = Compose([  
+transforms = Compose([
+                        ToRGB(),
                         CenterCrop((544,544))
                         ,ToTensor()
                     ])
diff --git a/bob/ip/binseg/data/imagefolderinference.py b/bob/ip/binseg/data/imagefolderinference.py
index 79e57e245ffbc3bd369f29523f36f1d09cabc833..c4218755f099e5455e474b67fcaa004f99f46ca4 100644
--- a/bob/ip/binseg/data/imagefolderinference.py
+++ b/bob/ip/binseg/data/imagefolderinference.py
@@ -8,24 +8,45 @@ import torch
 import torchvision.transforms.functional as VF
 import bob.io.base
 
-def get_file_lists(data_path):
+def get_file_lists(data_path, glob):
+    """
+    Recursively retrieves file lists from a given path, matching a given glob
+
+    This function will use :py:meth:`pathlib.Path.rglob`, together with the
+    provided glob pattern to search for anything the desired filename.
+    """
+
     data_path = Path(data_path)
-    image_file_names = np.array(sorted(list(data_path.glob('*'))))
+    image_file_names = np.array(sorted(list(data_path.rglob(glob))))
     return image_file_names
 
 class ImageFolderInference(Dataset):
     """
     Generic ImageFolder containing images for inference
-    
+
+    Notice that this implementation, contrary to its sister
+    :py:class:`.ImageFolder`, does not *automatically*
+    convert the input image to RGB, before passing it to the transforms, so it
+    is possible to accomodate a wider range of input types (e.g. 16-bit PNG
+    images).
+
     Parameters
     ----------
     path : str
         full path to root of dataset
-    
+
+    glob : str
+        glob that can be used to filter-down files to be loaded on the provided
+        path
+
+    transform : list
+        List of transformations to apply to every input sample
+
     """
-    def __init__(self, path, transform = None):
+    def __init__(self, path, glob='*', transform = None):
         self.transform = transform
-        self.img_file_list = get_file_lists(path)
+        self.path = path
+        self.img_file_list = get_file_lists(path, glob)
 
     def __len__(self):
         """
@@ -35,27 +56,27 @@ class ImageFolderInference(Dataset):
             size of the dataset
         """
         return len(self.img_file_list)
-    
+
     def __getitem__(self,index):
         """
         Parameters
         ----------
         index : int
-        
+
         Returns
         -------
         list
             dataitem [img_name, img]
         """
         img_path = self.img_file_list[index]
-        img_name = img_path.name
-        img = Image.open(img_path).convert(mode='RGB')
-    
+        img_name = img_path.relative_to(self.path).as_posix()
+        img = Image.open(img_path)
+
         sample = [img]
-        
+
         if self.transform :
             sample = self.transform(*sample)
-        
+
         sample.insert(0,img_name)
-        
+
         return sample
diff --git a/bob/ip/binseg/data/transforms.py b/bob/ip/binseg/data/transforms.py
index 1336ff135c89f4e33db6add57910f3894cbe5dcb..b97fe7959ba95e571462d6f6d9d2ddade9fad226 100644
--- a/bob/ip/binseg/data/transforms.py
+++ b/bob/ip/binseg/data/transforms.py
@@ -11,6 +11,7 @@ import math
 from math import floor
 import warnings
 import collections
+import bob.core
 
 _pil_interpolation_to_str = {
     Image.NEAREST: 'PIL.Image.NEAREST',
@@ -22,7 +23,7 @@ _pil_interpolation_to_str = {
 }
 Iterable = collections.abc.Iterable
 
-# Compose 
+# Compose
 
 class Compose:
     """Composes several transforms.
@@ -62,7 +63,7 @@ class CenterCrop:
     """
     def __init__(self, size):
         self.size = size
-        
+
     def __call__(self, *args):
         return [VF.center_crop(img, self.size) for img in args]
 
@@ -70,24 +71,24 @@ class CenterCrop:
 class Crop:
     """
     Crop at the given coordinates.
-    
+
     Attributes
     ----------
-    i : int 
+    i : int
         upper pixel coordinate.
-    j : int 
+    j : int
         left pixel coordinate.
-    h : int 
+    h : int
         height of the cropped image.
-    w : int 
+    w : int
         width of the cropped image.
     """
     def __init__(self, i, j, h, w):
         self.i = i
         self.j = j
-        self.h = h 
-        self.w = w 
-    
+        self.h = h
+        self.w = w
+
     def __call__(self, *args):
         return [img.crop((self.j, self.i, self.j + self.w, self.i + self.h)) for img in args]
 
@@ -97,34 +98,61 @@ class Pad:
 
     Attributes
     ----------
-    padding : int or tuple 
-        padding on each border. If a single int is provided this is used to pad all borders. 
+    padding : int or tuple
+        padding on each border. If a single int is provided this is used to pad all borders.
         If tuple of length 2 is provided this is the padding on left/right and top/bottom respectively.
         If a tuple of length 4 is provided this is the padding for the left, top, right and bottom borders respectively.
-    
+
     fill : int
-        pixel fill value for constant fill. Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively. 
-        This value is only used when the padding_mode is constant   
+        pixel fill value for constant fill. Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively.
+        This value is only used when the padding_mode is constant
     """
     def __init__(self, padding, fill=0):
         self.padding = padding
         self.fill = fill
-        
+
     def __call__(self, *args):
         return [VF.pad(img, self.padding, self.fill, padding_mode='constant') for img in args]
-    
+
+class AutoLevel16to8:
+    """Converts a 16-bit image to 8-bit representation using "auto-level"
+
+    This transform assumes that the input images are gray-scaled.
+
+    To auto-level, we calculate the maximum and the minimum of the image, and
+    consider such a range should be mapped to the [0,255] range of the
+    destination image.
+    """
+    def _process_one(self, img):
+        return Image.fromarray(bob.core.convert(img, 'uint8', (0,255),
+            img.getextrema()))
+
+    def __call__(self, *args):
+        return [self._process_one(img) for img in args]
+
+class ToRGB:
+    """Converts from any input format to RGB, using an ADAPTIVE conversion.
+
+    This transform takes the input image and converts it to RGB using
+    py:method:`Image.Image.convert`, with `mode='RGB'` and using all other
+    defaults.  This may be aggressive if applied to 16-bit images without
+    further considerations.
+    """
+    def __call__(self, *args):
+        return [img.convert(mode="RGB") for img in args]
+
 class ToTensor:
     """Converts :py:class:`PIL.Image.Image` to :py:class:`torch.Tensor` """
     def __call__(self, *args):
         return [VF.to_tensor(img) for img in args]
 
-        
+
 # Augmentations
 
 class RandomHFlip:
     """
     Flips horizontally
-    
+
     Attributes
     ----------
     prob : float
@@ -132,50 +160,50 @@ class RandomHFlip:
     """
     def __init__(self, prob = 0.5):
         self.prob = prob
-        
+
     def __call__(self, *args):
         if random.random() < self.prob:
             return [VF.hflip(img) for img in args]
-            
+
         else:
             return args
-    
-    
+
+
 class RandomVFlip:
     """
     Flips vertically
-    
+
     Attributes
     ----------
-    prob : float 
+    prob : float
         probability at which imgage is flipped. Defaults to ``0.5``
     """
     def __init__(self, prob = 0.5):
         self.prob = prob
-        
+
     def __call__(self, *args):
         if random.random() < self.prob:
             return [VF.vflip(img) for img in args]
-            
+
         else:
             return args
-    
+
 
 class RandomRotation:
     """
     Rotates by degree
-    
+
     Attributes
     ----------
     degree_range : tuple
         range of degrees in which image and ground truth are rotated. Defaults to ``(-15, +15)``
-    prob : float 
+    prob : float
         probability at which imgage is rotated. Defaults to ``0.5``
     """
     def __init__(self, degree_range = (-15, +15), prob = 0.5):
         self.prob = prob
         self.degree_range = degree_range
-        
+
     def __call__(self, *args):
         if random.random() < self.prob:
             degree = random.randint(*self.degree_range)
@@ -184,21 +212,21 @@ class RandomRotation:
             return args
 
 class ColorJitter(object):
-    """ 
+    """
     Randomly change the brightness, contrast, saturation and hue
-    
+
     Attributes
     ----------
-    brightness : float 
+    brightness : float
         how much to jitter brightness. brightness_factor
         is chosen uniformly from ``[max(0, 1 - brightness), 1 + brightness]``.
     contrast : float
         how much to jitter contrast. contrast_factor
         is chosen uniformly from ``[max(0, 1 - contrast), 1 + contrast]``.
-    saturation : float 
+    saturation : float
         how much to jitter saturation. saturation_factor
         is chosen uniformly from ``[max(0, 1 - saturation), 1 + saturation]``.
-    hue : float 
+    hue : float
         how much to jitter hue. hue_factor is chosen uniformly from
         ``[-hue, hue]``. Should be >=0 and <= 0.5
     prob : float
@@ -247,21 +275,21 @@ class ColorJitter(object):
 
 class RandomResizedCrop:
     """Crop to random size and aspect ratio.
-    A crop of random size of the original size and a random aspect ratio of 
-    the original aspect ratio is made. This crop is finally resized to 
+    A crop of random size of the original size and a random aspect ratio of
+    the original aspect ratio is made. This crop is finally resized to
     given size. This is popularly used to train the Inception networks.
-    
+
     Attributes
     ----------
-    size : int 
+    size : int
         expected output size of each edge
-    scale : tuple 
+    scale : tuple
         range of size of the origin size cropped. Defaults to ``(0.08, 1.0)``
     ratio : tuple
         range of aspect ratio of the origin aspect ratio cropped. Defaults to ``(3. / 4., 4. / 3.)``
     interpolation :
         Defaults to ``PIL.Image.BILINEAR``
-    prob : float 
+    prob : float
         probability at which the operation is applied. Defaults to ``0.5``
     """
 
@@ -332,7 +360,7 @@ class RandomResizedCrop:
 
 class Resize:
     """Resize to given size.
-    
+
     Attributes
     ----------
     size : tuple or int
diff --git a/bob/ip/binseg/engine/inferencer.py b/bob/ip/binseg/engine/inferencer.py
index c2dd6443152cef96fba9dfdbe5a3907f948e8ebf..c1ed5fafa2aa20b46a749fcdca3f29a46a003455 100644
--- a/bob/ip/binseg/engine/inferencer.py
+++ b/bob/ip/binseg/engine/inferencer.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
-import os 
+import os
 import logging
 import time
 import datetime
@@ -30,7 +30,7 @@ def batch_metrics(predictions, ground_truths, names, output_folder, logger):
     ground_truths : :py:class:`torch.Tensor`
         tensor with binary ground-truth
     names : list
-        list of file names 
+        list of file names
     output_folder : str
         output path
     logger : :py:class:`logging.Logger`
@@ -38,7 +38,7 @@ def batch_metrics(predictions, ground_truths, names, output_folder, logger):
 
     Returns
     -------
-    list 
+    list
         list containing batch metrics: ``[name, threshold, precision, recall, specificity, accuracy, jaccard, f1_score]``
     """
     step_size = 0.01
@@ -50,25 +50,25 @@ def batch_metrics(predictions, ground_truths, names, output_folder, logger):
 
         file_name = "{}.csv".format(names[j])
         logger.info("saving {}".format(file_name))
-        
+
         with open (os.path.join(output_folder,file_name), "w+") as outfile:
 
             outfile.write("threshold, precision, recall, specificity, accuracy, jaccard, f1_score\n")
 
-            for threshold in np.arange(0.0,1.0,step_size):        
+            for threshold in np.arange(0.0,1.0,step_size):
                 # threshold
                 binary_pred = torch.gt(predictions[j], threshold).byte()
 
                 # equals and not-equals
                 equals = torch.eq(binary_pred, gts) # tensor
                 notequals = torch.ne(binary_pred, gts) # tensor
-                
-                # true positives 
+
+                # true positives
                 tp_tensor = (gts * binary_pred ) # tensor
                 tp_count = torch.sum(tp_tensor).item() # scalar
 
-                # false positives 
-                fp_tensor = torch.eq((binary_pred + tp_tensor), 1) 
+                # false positives
+                fp_tensor = torch.eq((binary_pred + tp_tensor), 1)
                 fp_count = torch.sum(fp_tensor).item()
 
                 # true negatives
@@ -80,14 +80,14 @@ def batch_metrics(predictions, ground_truths, names, output_folder, logger):
                 fn_count = torch.sum(fn_tensor).item()
 
                 # calc metrics
-                metrics = base_metrics(tp_count, fp_count, tn_count, fn_count)    
-                
-                # write to disk 
+                metrics = base_metrics(tp_count, fp_count, tn_count, fn_count)
+
+                # write to disk
                 outfile.write("{:.2f},{:.5f},{:.5f},{:.5f},{:.5f},{:.5f},{:.5f} \n".format(threshold, *metrics))
-                
+
                 batch_metrics.append([names[j],threshold, *metrics ])
 
-    
+
     return batch_metrics
 
 
@@ -100,19 +100,21 @@ def save_probability_images(predictions, names, output_folder, logger):
     predictions : :py:class:`torch.Tensor`
         tensor with pixel-wise probabilities
     names : list
-        list of file names 
+        list of file names
     output_folder : str
         output path
     logger : :py:class:`logging.Logger`
         python logger
     """
-    images_subfolder = os.path.join(output_folder,'images') 
-    if not os.path.exists(images_subfolder): os.makedirs(images_subfolder)
+    images_subfolder = os.path.join(output_folder,'images')
     for j in range(predictions.size()[0]):
         img = VF.to_pil_image(predictions.cpu().data[j])
         filename = '{}.png'.format(names[j].split(".")[0])
-        logger.info("saving {}".format(filename))
-        img.save(os.path.join(images_subfolder, filename))
+        fullpath = os.path.join(images_subfolder, filename)
+        logger.info("saving {}".format(fullpath))
+        fulldir = os.path.dirname(fullpath)
+        if not os.path.exists(fulldir): os.makedirs(fulldir)
+        img.save(fullpath)
 
 def save_hdf(predictions, names, output_folder, logger):
     """
@@ -123,19 +125,22 @@ def save_hdf(predictions, names, output_folder, logger):
     predictions : :py:class:`torch.Tensor`
         tensor with pixel-wise probabilities
     names : list
-        list of file names 
+        list of file names
     output_folder : str
         output path
     logger : :py:class:`logging.Logger`
         python logger
     """
-    hdf5_subfolder = os.path.join(output_folder,'hdf5') 
+    hdf5_subfolder = os.path.join(output_folder,'hdf5')
     if not os.path.exists(hdf5_subfolder): os.makedirs(hdf5_subfolder)
     for j in range(predictions.size()[0]):
         img = predictions.cpu().data[j].squeeze(0).numpy()
         filename = '{}.hdf5'.format(names[j].split(".")[0])
+        fullpath = os.path.join(hdf5_subfolder, filename)
         logger.info("saving {}".format(filename))
-        bob.io.base.save(img, os.path.join(hdf5_subfolder, filename))
+        fulldir = os.path.dirname(fullpath)
+        if not os.path.exists(fulldir): os.makedirs(fulldir)
+        bob.io.base.save(img, fullpath)
 
 def do_inference(
     model,
@@ -146,7 +151,7 @@ def do_inference(
 
     """
     Run inference and calculate metrics
-    
+
     Parameters
     ---------
     model : :py:class:`torch.nn.Module`
@@ -159,18 +164,18 @@ def do_inference(
     logger = logging.getLogger("bob.ip.binseg.engine.inference")
     logger.info("Start evaluation")
     logger.info("Output folder: {}, Device: {}".format(output_folder, device))
-    results_subfolder = os.path.join(output_folder,'results') 
+    results_subfolder = os.path.join(output_folder,'results')
     os.makedirs(results_subfolder,exist_ok=True)
-    
+
     model.eval().to(device)
-    # Sigmoid for probabilities 
-    sigmoid = torch.nn.Sigmoid() 
+    # Sigmoid for probabilities
+    sigmoid = torch.nn.Sigmoid()
 
     # Setup timers
     start_total_time = time.time()
     times = []
 
-    # Collect overall metrics 
+    # Collect overall metrics
     metrics = []
 
     for samples in tqdm(data_loader):
@@ -181,50 +186,50 @@ def do_inference(
             start_time = time.perf_counter()
 
             outputs = model(images)
-            
-            # necessary check for hed architecture that uses several outputs 
+
+            # necessary check for hed architecture that uses several outputs
             # for loss calculation instead of just the last concatfuse block
             if isinstance(outputs,list):
                 outputs = outputs[-1]
-            
+
             probabilities = sigmoid(outputs)
-            
+
             batch_time = time.perf_counter() - start_time
             times.append(batch_time)
             logger.info("Batch time: {:.5f} s".format(batch_time))
-            
+
             b_metrics = batch_metrics(probabilities, ground_truths, names,results_subfolder, logger)
             metrics.extend(b_metrics)
-            
+
             # Create probability images
             save_probability_images(probabilities, names, output_folder, logger)
             # save hdf5
             save_hdf(probabilities, names, output_folder, logger)
 
-    # DataFrame 
+    # DataFrame
     df_metrics = pd.DataFrame(metrics,columns= \
                            ["name",
                             "threshold",
-                            "precision", 
-                            "recall", 
-                            "specificity", 
-                            "accuracy", 
-                            "jaccard", 
+                            "precision",
+                            "recall",
+                            "specificity",
+                            "accuracy",
+                            "jaccard",
                             "f1_score"])
 
     # Report and Averages
     metrics_file = "Metrics.csv".format(model.name)
     metrics_path = os.path.join(results_subfolder, metrics_file)
     logger.info("Saving average over all input images: {}".format(metrics_file))
-    
+
     avg_metrics = df_metrics.groupby('threshold').mean()
     std_metrics = df_metrics.groupby('threshold').std()
 
-    # Uncomment below for F1-score calculation based on average precision and metrics instead of 
+    # Uncomment below for F1-score calculation based on average precision and metrics instead of
     # F1-scores of individual images. This method is in line with Maninis et. al. (2016)
     #avg_metrics["f1_score"] =  (2* avg_metrics["precision"]*avg_metrics["recall"])/ \
     #    (avg_metrics["precision"]+avg_metrics["recall"])
-    
+
     avg_metrics["std_pr"] = std_metrics["precision"]
     avg_metrics["pr_upper"] = avg_metrics['precision'] + avg_metrics["std_pr"]
     avg_metrics["pr_lower"] = avg_metrics['precision'] - avg_metrics["std_pr"]
@@ -232,13 +237,13 @@ def do_inference(
     avg_metrics["re_upper"] = avg_metrics['recall'] + avg_metrics["std_re"]
     avg_metrics["re_lower"] = avg_metrics['recall'] - avg_metrics["std_re"]
     avg_metrics["std_f1"] = std_metrics["f1_score"]
-    
+
     avg_metrics.to_csv(metrics_path)
     maxf1 = avg_metrics['f1_score'].max()
     optimal_f1_threshold = avg_metrics['f1_score'].idxmax()
-    
+
     logger.info("Highest F1-score of {:.5f}, achieved at threshold {}".format(maxf1, optimal_f1_threshold))
-    
+
     # Plotting
     np_avg_metrics = avg_metrics.to_numpy().T
     fig_name = "precision_recall.pdf"
@@ -246,7 +251,7 @@ def do_inference(
     fig = precision_recall_f1iso_confintval([np_avg_metrics[0]],[np_avg_metrics[1]],[np_avg_metrics[7]],[np_avg_metrics[8]],[np_avg_metrics[10]],[np_avg_metrics[11]], [model.name,None], title=output_folder)
     fig_filename = os.path.join(results_subfolder, fig_name)
     fig.savefig(fig_filename)
-    
+
     # Report times
     total_inference_time = str(datetime.timedelta(seconds=int(sum(times))))
     average_batch_inference_time = np.mean(times)
@@ -256,7 +261,7 @@ def do_inference(
 
     times_file = "Times.txt"
     logger.info("saving {}".format(times_file))
- 
+
     with open (os.path.join(results_subfolder,times_file), "w+") as outfile:
         date = datetime.datetime.now()
         outfile.write("Date: {} \n".format(date.strftime("%Y-%m-%d %H:%M:%S")))
@@ -264,7 +269,7 @@ def do_inference(
         outfile.write("Average batch inference time: {} \n".format(average_batch_inference_time))
         outfile.write("Total inference time: {} \n".format(total_inference_time))
 
-    # Save model summary 
+    # Save model summary
     summary_file = 'ModelSummary.txt'
     logger.info("saving {}".format(summary_file))
 
diff --git a/bob/ip/binseg/engine/predicter.py b/bob/ip/binseg/engine/predicter.py
index b6e8ad06da54b43a538cf4fc7805cc63e6966cee..ebd09ac5e84d4f9a81a20f72c3919f42071fb73d 100644
--- a/bob/ip/binseg/engine/predicter.py
+++ b/bob/ip/binseg/engine/predicter.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
-import os 
+import os
 import logging
 import time
 import datetime
@@ -24,7 +24,7 @@ def do_predict(
 
     """
     Run inference and calculate metrics
-    
+
     Parameters
     ---------
     model : :py:class:`torch.nn.Module`
@@ -37,12 +37,12 @@ def do_predict(
     logger = logging.getLogger("bob.ip.binseg.engine.inference")
     logger.info("Start evaluation")
     logger.info("Output folder: {}, Device: {}".format(output_folder, device))
-    results_subfolder = os.path.join(output_folder,'results') 
+    results_subfolder = os.path.join(output_folder,'results')
     os.makedirs(results_subfolder,exist_ok=True)
-    
+
     model.eval().to(device)
-    # Sigmoid for probabilities 
-    sigmoid = torch.nn.Sigmoid() 
+    # Sigmoid for probabilities
+    sigmoid = torch.nn.Sigmoid()
 
     # Setup timers
     start_total_time = time.time()
@@ -55,24 +55,24 @@ def do_predict(
             start_time = time.perf_counter()
 
             outputs = model(images)
-            
-            # necessary check for hed architecture that uses several outputs 
+
+            # necessary check for hed architecture that uses several outputs
             # for loss calculation instead of just the last concatfuse block
             if isinstance(outputs,list):
                 outputs = outputs[-1]
-            
+
             probabilities = sigmoid(outputs)
-            
+
             batch_time = time.perf_counter() - start_time
             times.append(batch_time)
             logger.info("Batch time: {:.5f} s".format(batch_time))
-            
+
             # Create probability images
             save_probability_images(probabilities, names, output_folder, logger)
             # Save hdf5
             save_hdf(probabilities, names, output_folder, logger)
 
- 
+
     # Report times
     total_inference_time = str(datetime.timedelta(seconds=int(sum(times))))
     average_batch_inference_time = np.mean(times)
@@ -82,7 +82,7 @@ def do_predict(
 
     times_file = "Times.txt"
     logger.info("saving {}".format(times_file))
- 
+
     with open (os.path.join(results_subfolder,times_file), "w+") as outfile:
         date = datetime.datetime.now()
         outfile.write("Date: {} \n".format(date.strftime("%Y-%m-%d %H:%M:%S")))
diff --git a/bob/ip/binseg/utils/plot.py b/bob/ip/binseg/utils/plot.py
index ceb268d06ee29c42c750e58fc37a1cfd438af2fb..bd9c2131d41e9b39fcf40b4484501655e45a1a73 100644
--- a/bob/ip/binseg/utils/plot.py
+++ b/bob/ip/binseg/utils/plot.py
@@ -3,7 +3,7 @@
 
 import numpy as np
 import os
-import csv 
+import csv
 import pandas as pd
 import PIL
 from PIL import Image,ImageFont, ImageDraw
@@ -13,62 +13,62 @@ import torch
 def precision_recall_f1iso(precision, recall, names, title=None):
     """
     Author: Andre Anjos (andre.anjos@idiap.ch).
-    
-    Creates a precision-recall plot of the given data.   
+
+    Creates a precision-recall plot of the given data.
     The plot will be annotated with F1-score iso-lines (in which the F1-score
-    maintains the same value)   
-    
+    maintains the same value)
+
     Parameters
-    ----------  
+    ----------
     precision : :py:class:`numpy.ndarray` or :py:class:`list`
         A list of 1D np arrays containing the Y coordinates of the plot, or
         the precision, or a 2D np array in which the rows correspond to each
-        of the system's precision coordinates.  
+        of the system's precision coordinates.
     recall : :py:class:`numpy.ndarray` or :py:class:`list`
         A list of 1D np arrays containing the X coordinates of the plot, or
         the recall, or a 2D np array in which the rows correspond to each
-        of the system's recall coordinates. 
+        of the system's recall coordinates.
     names : :py:class:`list`
         An iterable over the names of each of the systems along the rows of
-        ``precision`` and ``recall``      
+        ``precision`` and ``recall``
     title : :py:class:`str`, optional
-        A title for the plot. If not set, omits the title   
+        A title for the plot. If not set, omits the title
 
     Returns
-    ------- 
+    -------
     matplotlib.figure.Figure
-        A matplotlib figure you can save or display 
-    """ 
+        A matplotlib figure you can save or display
+    """
     import matplotlib
     matplotlib.use('agg')
-    import matplotlib.pyplot as plt 
+    import matplotlib.pyplot as plt
     from itertools import cycle
-    fig, ax1 = plt.subplots(1)  
+    fig, ax1 = plt.subplots(1)
     lines = ["-","--","-.",":"]
     linecycler = cycle(lines)
-    for p, r, n in zip(precision, recall, names):   
+    for p, r, n in zip(precision, recall, names):
         # Plots only from the point where recall reaches its maximum, otherwise, we
         # don't see a curve...
         i = r.argmax()
         pi = p[i:]
-        ri = r[i:]    
+        ri = r[i:]
         valid = (pi+ri) > 0
-        f1 = 2 * (pi[valid]*ri[valid]) / (pi[valid]+ri[valid])    
+        f1 = 2 * (pi[valid]*ri[valid]) / (pi[valid]+ri[valid])
         # optimal point along the curve
         argmax = f1.argmax()
         opi = pi[argmax]
         ori = ri[argmax]
         # Plot Recall/Precision as threshold changes
-        ax1.plot(ri[pi>0], pi[pi>0], next(linecycler), label='[F={:.4f}] {}'.format(f1.max(), n),) 
+        ax1.plot(ri[pi>0], pi[pi>0], next(linecycler), label='[F={:.4f}] {}'.format(f1.max(), n),)
         ax1.plot(ori,opi, marker='o', linestyle=None, markersize=3, color='black')
-    ax1.grid(linestyle='--', linewidth=1, color='gray', alpha=0.2)  
+    ax1.grid(linestyle='--', linewidth=1, color='gray', alpha=0.2)
     if len(names) > 1:
-        plt.legend(loc='lower left', framealpha=0.5)  
+        plt.legend(loc='lower left', framealpha=0.5)
     ax1.set_xlabel('Recall')
     ax1.set_ylabel('Precision')
     ax1.set_xlim([0.0, 1.0])
-    ax1.set_ylim([0.0, 1.0])    
-    if title is not None: ax1.set_title(title)  
+    ax1.set_ylim([0.0, 1.0])
+    if title is not None: ax1.set_title(title)
     # Annotates plot with F1-score iso-lines
     ax2 = ax1.twinx()
     f_scores = np.linspace(0.1, 0.9, num=9)
@@ -79,70 +79,70 @@ def precision_recall_f1iso(precision, recall, names, title=None):
         y = f_score * x / (2 * x - f_score)
         l, = plt.plot(x[y >= 0], y[y >= 0], color='green', alpha=0.1)
         tick_locs.append(y[-1])
-        tick_labels.append('%.1f' % f_score)  
+        tick_labels.append('%.1f' % f_score)
     ax2.tick_params(axis='y', which='both', pad=0, right=False, left=False)
     ax2.set_ylabel('iso-F', color='green', alpha=0.3)
     ax2.set_ylim([0.0, 1.0])
-    ax2.yaxis.set_label_coords(1.015, 0.97) 
-    ax2.set_yticks(tick_locs) #notice these are invisible   
+    ax2.yaxis.set_label_coords(1.015, 0.97)
+    ax2.set_yticks(tick_locs) #notice these are invisible
     for k in ax2.set_yticklabels(tick_labels):
         k.set_color('green')
         k.set_alpha(0.3)
-        k.set_size(8) 
+        k.set_size(8)
     # we should see some of axes 1 axes
     ax1.spines['right'].set_visible(False)
     ax1.spines['top'].set_visible(False)
     ax1.spines['left'].set_position(('data', -0.015))
-    ax1.spines['bottom'].set_position(('data', -0.015)) 
+    ax1.spines['bottom'].set_position(('data', -0.015))
     # we shouldn't see any of axes 2 axes
     ax2.spines['right'].set_visible(False)
     ax2.spines['top'].set_visible(False)
     ax2.spines['left'].set_visible(False)
-    ax2.spines['bottom'].set_visible(False) 
-    plt.tight_layout()  
-    return fig  
+    ax2.spines['bottom'].set_visible(False)
+    plt.tight_layout()
+    return fig
 
 def precision_recall_f1iso_confintval(precision, recall, pr_upper, pr_lower, re_upper, re_lower, names, title=None):
     """
     Author: Andre Anjos (andre.anjos@idiap.ch).
-    
-    Creates a precision-recall plot of the given data.   
+
+    Creates a precision-recall plot of the given data.
     The plot will be annotated with F1-score iso-lines (in which the F1-score
-    maintains the same value)   
-    
+    maintains the same value)
+
     Parameters
-    ----------  
+    ----------
     precision : :py:class:`numpy.ndarray` or :py:class:`list`
         A list of 1D np arrays containing the Y coordinates of the plot, or
         the precision, or a 2D np array in which the rows correspond to each
-        of the system's precision coordinates.  
+        of the system's precision coordinates.
     recall : :py:class:`numpy.ndarray` or :py:class:`list`
         A list of 1D np arrays containing the X coordinates of the plot, or
         the recall, or a 2D np array in which the rows correspond to each
-        of the system's recall coordinates. 
+        of the system's recall coordinates.
     names : :py:class:`list`
         An iterable over the names of each of the systems along the rows of
-        ``precision`` and ``recall``      
+        ``precision`` and ``recall``
     title : :py:class:`str`, optional
-        A title for the plot. If not set, omits the title   
+        A title for the plot. If not set, omits the title
 
     Returns
-    ------- 
+    -------
     matplotlib.figure.Figure
-        A matplotlib figure you can save or display 
-    """ 
+        A matplotlib figure you can save or display
+    """
     import matplotlib
     matplotlib.use('agg')
-    import matplotlib.pyplot as plt 
+    import matplotlib.pyplot as plt
     from itertools import cycle
-    fig, ax1 = plt.subplots(1)  
+    fig, ax1 = plt.subplots(1)
     lines = ["-","--","-.",":"]
     colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
               '#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
               '#bcbd22', '#17becf']
     colorcycler = cycle(colors)
     linecycler = cycle(lines)
-    for p, r, pu, pl, ru, rl, n in zip(precision, recall, pr_upper, pr_lower, re_upper, re_lower, names):   
+    for p, r, pu, pl, ru, rl, n in zip(precision, recall, pr_upper, pr_lower, re_upper, re_lower, names):
         # Plots only from the point where recall reaches its maximum, otherwise, we
         # don't see a curve...
         i = r.argmax()
@@ -151,24 +151,24 @@ def precision_recall_f1iso_confintval(precision, recall, pr_upper, pr_lower, re_
         pui = pu[i:]
         pli = pl[i:]
         rui = ru[i:]
-        rli = rl[i:]    
+        rli = rl[i:]
         valid = (pi+ri) > 0
-        f1 = 2 * (pi[valid]*ri[valid]) / (pi[valid]+ri[valid])    
+        f1 = 2 * (pi[valid]*ri[valid]) / (pi[valid]+ri[valid])
         # optimal point along the curve
         argmax = f1.argmax()
         opi = pi[argmax]
         ori = ri[argmax]
         # Plot Recall/Precision as threshold changes
-        ax1.plot(ri[pi>0], pi[pi>0], next(linecycler), label='[F={:.4f}] {}'.format(f1.max(), n),) 
+        ax1.plot(ri[pi>0], pi[pi>0], next(linecycler), label='[F={:.4f}] {}'.format(f1.max(), n),)
         ax1.plot(ori,opi, marker='o', linestyle=None, markersize=3, color='black')
         # Plot confidence
         # Upper bound
-        #ax1.plot(r95ui[p95ui>0], p95ui[p95ui>0]) 
+        #ax1.plot(r95ui[p95ui>0], p95ui[p95ui>0])
         # Lower bound
         #ax1.plot(r95li[p95li>0], p95li[p95li>0])
         # create the limiting polygon
         vert_x = np.concatenate((rui[pui>0], rli[pli>0][::-1]))
-        vert_y = np.concatenate((pui[pui>0], pli[pli>0][::-1])) 
+        vert_y = np.concatenate((pui[pui>0], pli[pli>0][::-1]))
         # hacky workaround to plot 2nd human
         if np.isclose(np.mean(rui), rui[1], rtol=1e-05):
             print('found human')
@@ -177,14 +177,14 @@ def precision_recall_f1iso_confintval(precision, recall, pr_upper, pr_lower, re_
             p = plt.Polygon(np.column_stack((vert_x, vert_y)), facecolor=next(colorcycler), alpha=.2, edgecolor='none',lw=.2)
         ax1.add_artist(p)
 
-    ax1.grid(linestyle='--', linewidth=1, color='gray', alpha=0.2)  
+    ax1.grid(linestyle='--', linewidth=1, color='gray', alpha=0.2)
     if len(names) > 1:
-        plt.legend(loc='lower left', framealpha=0.5)  
+        plt.legend(loc='lower left', framealpha=0.5)
     ax1.set_xlabel('Recall')
     ax1.set_ylabel('Precision')
     ax1.set_xlim([0.0, 1.0])
-    ax1.set_ylim([0.0, 1.0])    
-    if title is not None: ax1.set_title(title)  
+    ax1.set_ylim([0.0, 1.0])
+    if title is not None: ax1.set_title(title)
     # Annotates plot with F1-score iso-lines
     ax2 = ax1.twinx()
     f_scores = np.linspace(0.1, 0.9, num=9)
@@ -195,45 +195,45 @@ def precision_recall_f1iso_confintval(precision, recall, pr_upper, pr_lower, re_
         y = f_score * x / (2 * x - f_score)
         l, = plt.plot(x[y >= 0], y[y >= 0], color='green', alpha=0.1)
         tick_locs.append(y[-1])
-        tick_labels.append('%.1f' % f_score)  
+        tick_labels.append('%.1f' % f_score)
     ax2.tick_params(axis='y', which='both', pad=0, right=False, left=False)
     ax2.set_ylabel('iso-F', color='green', alpha=0.3)
     ax2.set_ylim([0.0, 1.0])
-    ax2.yaxis.set_label_coords(1.015, 0.97) 
-    ax2.set_yticks(tick_locs) #notice these are invisible   
+    ax2.yaxis.set_label_coords(1.015, 0.97)
+    ax2.set_yticks(tick_locs) #notice these are invisible
     for k in ax2.set_yticklabels(tick_labels):
         k.set_color('green')
         k.set_alpha(0.3)
-        k.set_size(8) 
+        k.set_size(8)
     # we should see some of axes 1 axes
     ax1.spines['right'].set_visible(False)
     ax1.spines['top'].set_visible(False)
     ax1.spines['left'].set_position(('data', -0.015))
-    ax1.spines['bottom'].set_position(('data', -0.015)) 
+    ax1.spines['bottom'].set_position(('data', -0.015))
     # we shouldn't see any of axes 2 axes
     ax2.spines['right'].set_visible(False)
     ax2.spines['top'].set_visible(False)
     ax2.spines['left'].set_visible(False)
-    ax2.spines['bottom'].set_visible(False) 
-    plt.tight_layout()  
-    return fig  
+    ax2.spines['bottom'].set_visible(False)
+    plt.tight_layout()
+    return fig
 
 def loss_curve(df, title):
     """ Creates a loss curve given a Dataframe with column names:
 
     ``['avg. loss', 'median loss','lr','max memory']``
-    
+
     Parameters
     ----------
     df : :py:class:`pandas.DataFrame`
-    
+
     Returns
     -------
     matplotlib.figure.Figure
-    """   
+    """
     import matplotlib
     matplotlib.use('agg')
-    import matplotlib.pyplot as plt 
+    import matplotlib.pyplot as plt
     ax1 = df.plot(y="median loss", grid=True)
     ax1.set_title(title)
     ax1.set_ylabel('median loss')
@@ -241,7 +241,7 @@ def loss_curve(df, title):
     ax2 = df['lr'].plot(secondary_y=True,legend=True,grid=True,)
     ax2.set_ylabel('lr')
     ax1.set_xlabel('epoch')
-    plt.tight_layout()  
+    plt.tight_layout()
     fig = ax1.get_figure()
     return fig
 
@@ -249,12 +249,12 @@ def loss_curve(df, title):
 def read_metricscsv(file):
     """
     Read precision and recall from csv file
-    
+
     Parameters
     ----------
     file : str
         path to file
-    
+
     Returns
     -------
     :py:class:`numpy.ndarray`
@@ -283,7 +283,7 @@ def read_metricscsv(file):
 def plot_overview(outputfolders,title):
     """
     Plots comparison chart of all trained models
-    
+
     Parameters
     ----------
     outputfolder : list
@@ -303,7 +303,7 @@ def plot_overview(outputfolders,title):
     names = []
     params = []
     for folder in outputfolders:
-        # metrics 
+        # metrics
         metrics_path = os.path.join(folder,'results/Metrics.csv')
         pr, re, pr_upper, pr_lower, re_upper, re_lower = read_metricscsv(metrics_path)
         precisions.append(pr)
@@ -335,7 +335,7 @@ def metricsviz(dataset
                 ,overlayed=True):
     """ Visualizes true positives, false positives and false negatives
     Default colors TP: Gray, FP: Cyan, FN: Orange
-    
+
     Parameters
     ----------
     dataset : :py:class:`torch.utils.data.Dataset`
@@ -354,27 +354,27 @@ def metricsviz(dataset
         name  = sample[0]
         img = VF.to_pil_image(sample[1]) # PIL Image
         gt = sample[2].byte() # byte tensor
-        
-        # read metrics 
+
+        # read metrics
         metrics = pd.read_csv(os.path.join(output_path,'results','Metrics.csv'))
         optimal_threshold = metrics['threshold'][metrics['f1_score'].idxmax()]
-        
-        # read probability output 
+
+        # read probability output
         pred = Image.open(os.path.join(output_path,'images',name))
         pred = pred.convert(mode='L')
         pred = VF.to_tensor(pred)
         binary_pred = torch.gt(pred, optimal_threshold).byte()
-        
+
         # calc metrics
         # equals and not-equals
         equals = torch.eq(binary_pred, gt) # tensor
-        notequals = torch.ne(binary_pred, gt) # tensor      
-        # true positives 
+        notequals = torch.ne(binary_pred, gt) # tensor
+        # true positives
         tp_tensor = (gt * binary_pred ) # tensor
         tp_pil = VF.to_pil_image(tp_tensor.float())
         tp_pil_colored = PIL.ImageOps.colorize(tp_pil, (0,0,0), tp_color)
-        # false positives 
-        fp_tensor = torch.eq((binary_pred + tp_tensor), 1) 
+        # false positives
+        fp_tensor = torch.eq((binary_pred + tp_tensor), 1)
         fp_pil = VF.to_pil_image(fp_tensor.float())
         fp_pil_colored = PIL.ImageOps.colorize(fp_pil, (0,0,0), fp_color)
         # false negatives
@@ -385,7 +385,7 @@ def metricsviz(dataset
         # paste together
         tp_pil_colored.paste(fp_pil_colored,mask=fp_pil)
         tp_pil_colored.paste(fn_pil_colored,mask=fn_pil)
-        
+
         if overlayed:
             tp_pil_colored = PIL.Image.blend(img, tp_pil_colored, 0.4)
             img_metrics = pd.read_csv(os.path.join(output_path,'results',name+'.csv'))
@@ -396,15 +396,17 @@ def metricsviz(dataset
             fnt = ImageFont.truetype('FreeMono.ttf', fnt_size)
             draw.text((0, 0),"F1: {:.4f}".format(f1),(255,255,255),font=fnt)
 
-        # save to disk 
+        # save to disk
         overlayed_path = os.path.join(output_path,'tpfnfpviz')
-        if not os.path.exists(overlayed_path): os.makedirs(overlayed_path)
-        tp_pil_colored.save(os.path.join(overlayed_path,name))
+        fullpath = os.path.join(overlayed_path, name)
+        fulldir = os.path.dirname(fullpath)
+        if not os.path.exists(fulldir): os.makedirs(fulldir)
+        tp_pil_colored.save(fullpath)
 
 
 def overlay(dataset, output_path):
     """Overlays prediction probabilities vessel tree with original test image.
-    
+
     Parameters
     ----------
     dataset : :py:class:`torch.utils.data.Dataset`
@@ -416,8 +418,8 @@ def overlay(dataset, output_path):
         # get sample
         name  = sample[0]
         img = VF.to_pil_image(sample[1]) # PIL Image
-        
-        # read probability output 
+
+        # read probability output
         pred = Image.open(os.path.join(output_path,'images',name)).convert(mode='L')
         # color and overlay
         pred_green = PIL.ImageOps.colorize(pred, (0,0,0), (0,255,0))
@@ -430,14 +432,16 @@ def overlay(dataset, output_path):
         #draw.text((0, 0),"F1: {:.4f}".format(f1),(255,255,255),font=fnt)
         # save to disk
         overlayed_path = os.path.join(output_path,'overlayed')
-        if not os.path.exists(overlayed_path): os.makedirs(overlayed_path)
-        overlayed.save(os.path.join(overlayed_path,name))
+        fullpath = os.path.join(overlayed_path, name)
+        fulldir = os.path.dirname(fullpath)
+        if not os.path.exists(fulldir): os.makedirs(fulldir)
+        overlayed.save(fullpath)
 
 
 def savetransformedtest(dataset, output_path):
-    """Save the test images as they are fed into the neural network. 
+    """Save the test images as they are fed into the neural network.
     Makes it easier to create overlay animations (e.g. slide)
-    
+
     Parameters
     ----------
     dataset : :py:class:`torch.utils.data.Dataset`
@@ -449,8 +453,10 @@ def savetransformedtest(dataset, output_path):
         # get sample
         name  = sample[0]
         img = VF.to_pil_image(sample[1]) # PIL Image
-        
+
         # save to disk
         testimg_path = os.path.join(output_path,'transformedtestimages')
-        if not os.path.exists(testimg_path): os.makedirs(testimg_path)
-        img.save(os.path.join(testimg_path,name))
+        fullpath = os.path.join(testimg_path, name)
+        fulldir = os.path.dirname(fullpath)
+        if not os.path.exists(fulldir): os.makedirs(fulldir)
+        img.save(fullpath)
diff --git a/conda/meta.yaml b/conda/meta.yaml
index 0a33ea32cb27975a95d66c3ac2d36b2a0aa13a03..1a7c95acb6da05634579c283c8de6248c82a7677 100644
--- a/conda/meta.yaml
+++ b/conda/meta.yaml
@@ -39,6 +39,7 @@ requirements:
     - matplotlib
     - tqdm
     - tabulate
+    - bob.core
 
 test:
   imports:
@@ -75,4 +76,3 @@ about:
   home: https://www.idiap.ch/software/bob/
   license: GNU General Public License v3 (GPLv3)
   license_family: GPL
-  license_file: ../LICENSE
diff --git a/doc/api.rst b/doc/api.rst
index a6608a637c5a55b0e73a3d15c54364c4b8f37ffa..1eade48273e9a1117484ac0d14a7df6b510b5e3d 100644
--- a/doc/api.rst
+++ b/doc/api.rst
@@ -17,9 +17,11 @@ PyTorch ImageFolder Dataset
 ===========================
 .. automodule:: bob.ip.binseg.data.imagefolder
 
+.. automodule:: bob.ip.binseg.data.imagefolderinference
+
 Transforms
 ==========
-.. note:: 
+.. note::
     All transforms work with :py:class:`PIL.Image.Image` objects. We make heavy use of the
     `torchvision package`_
 
diff --git a/requirements.txt b/requirements.txt
index 719383e1b5ee6b907c96f6edf6e7da9de184b5a9..82b7843aa0e051bad5f22858752465620aca6772 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -7,3 +7,4 @@ pandas
 matplotlib
 tqdm
 tabulate
+bob.core