diff --git a/bob/ip/binseg/configs/__init__.py b/bob/ip/binseg/configs/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ca5e07cb73f0bdddcb863ef497955964087e301
--- /dev/null
+++ b/bob/ip/binseg/configs/__init__.py
@@ -0,0 +1,3 @@
+# see https://docs.python.org/3/library/pkgutil.html
+from pkgutil import extend_path
+__path__ = extend_path(__path__, __name__)
\ No newline at end of file
diff --git a/bob/ip/binseg/configs/datasets/__init__.py b/bob/ip/binseg/configs/datasets/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ca5e07cb73f0bdddcb863ef497955964087e301
--- /dev/null
+++ b/bob/ip/binseg/configs/datasets/__init__.py
@@ -0,0 +1,3 @@
+# see https://docs.python.org/3/library/pkgutil.html
+from pkgutil import extend_path
+__path__ = extend_path(__path__, __name__)
\ No newline at end of file
diff --git a/bob/ip/binseg/configs/models/__init__.py b/bob/ip/binseg/configs/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ca5e07cb73f0bdddcb863ef497955964087e301
--- /dev/null
+++ b/bob/ip/binseg/configs/models/__init__.py
@@ -0,0 +1,3 @@
+# see https://docs.python.org/3/library/pkgutil.html
+from pkgutil import extend_path
+__path__ = extend_path(__path__, __name__)
\ No newline at end of file
diff --git a/bob/ip/binseg/configs/models/driulayerwise.py b/bob/ip/binseg/configs/models/driulayerwise.py
new file mode 100644
index 0000000000000000000000000000000000000000..390145c5189e008a04fb15f7dda58a710d1b57a4
--- /dev/null
+++ b/bob/ip/binseg/configs/models/driulayerwise.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from torch.optim.lr_scheduler import MultiStepLR
+from bob.ip.binseg.modeling.driu import build_driu
+import torch.optim as optim
+from torch.nn import BCEWithLogitsLoss
+from bob.ip.binseg.utils.model_zoo import modelurls
+from bob.ip.binseg.modeling.losses import WeightedBCELogitsLoss
+from bob.ip.binseg.engine.adabound import AdaBound
+
+##### Config #####
+lr = 0.001
+betas = (0.9, 0.999)
+eps = 1e-08
+weight_decay = 0
+final_lr = 0.1
+gamma = 1e-3
+eps = 1e-8
+amsbound = False
+
+scheduler_milestones = [150]
+scheduler_gamma = 0.1
+
+# model
+model = build_driu()
+
+# pretrained backbone
+pretrained_backbone = modelurls['vgg16']
+
+# optimizer
+optimizer = AdaBound(model.parameters(), lr=lr, betas=betas, final_lr=final_lr, gamma=gamma,
+                 eps=eps, weight_decay=weight_decay, amsbound=amsbound) 
+optim = AdaBound(
+    [
+        {"params": model.backbone.parameters(), "lr": 0.0001,"betas":betas, "final_lr":0.01, "gamma":gamma, "eps" : eps},
+        {"params": model.head.parameters(), "lr": 0.001,"betas":betas, "final_lr":0.1, "gamma":gamma, "eps" : eps},
+    ],
+    betas=betas
+    ,final_lr=final_lr
+    ,gamma=gamma
+    ,eps=eps
+    ,weight_decay=weight_decay
+    ,amsbound=amsbound
+    ,lr=0.00001
+)
+# criterion
+criterion = WeightedBCELogitsLoss(reduction='mean')
+
+# scheduler
+scheduler = MultiStepLR(optimizer, milestones=scheduler_milestones, gamma=scheduler_gamma)
diff --git a/bob/ip/binseg/data/binsegdataset.py b/bob/ip/binseg/data/binsegdataset.py
index 82725efc2ddcbc680df5f2f2afa93b969bda3149..27853d14cb4b3630933106cf781d16f62087cc90 100644
--- a/bob/ip/binseg/data/binsegdataset.py
+++ b/bob/ip/binseg/data/binsegdataset.py
@@ -11,7 +11,7 @@ class BinSegDataset(Dataset):
     It supports indexing such that dataset[i] can be used to get ith sample, e.g.: 
     img, gt, mask, name = db[0]
     
-    Attributes
+    Parameters
     ----------
     database  : binary segmentation `bob.db.database`
                
diff --git a/bob/ip/binseg/data/transforms.py b/bob/ip/binseg/data/transforms.py
index 4518668d57f07bab4e3e1028950881316f5fa5e3..aa20d5d4e4cf7154f59ee13c63b6f730e653b653 100644
--- a/bob/ip/binseg/data/transforms.py
+++ b/bob/ip/binseg/data/transforms.py
@@ -170,8 +170,8 @@ class ColorJitter(object):
     """ 
     Randomly change the brightness, contrast and saturation of an image.
     
-    Attributes
-    -----------
+    Parameters
+    ----------
 
         brightness : float
                         How much to jitter brightness. brightness_factor
@@ -196,12 +196,6 @@ class ColorJitter(object):
 
     @staticmethod
     def get_params(brightness, contrast, saturation, hue):
-        """Get a randomized transform to be applied on image.
-        Arguments are same as that of __init__.
-        Returns:
-            Transform which randomly adjusts brightness, contrast and
-            saturation in a random order.
-        """
         transforms = []
         if brightness > 0:
             brightness_factor = random.uniform(max(0, 1 - brightness), 1 + brightness)
diff --git a/bob/ip/binseg/engine/inferencer.py b/bob/ip/binseg/engine/inferencer.py
index c8166a4729153335eafd7e15afbd1aa7385afe7b..b57a2de0351727117c2d105066bae22a0d6acf62 100644
--- a/bob/ip/binseg/engine/inferencer.py
+++ b/bob/ip/binseg/engine/inferencer.py
@@ -5,20 +5,20 @@ import os
 import logging
 import time
 import datetime
-from tqdm import tqdm
-import torch
 import numpy as np
-import pickle
+import torch
 import pandas as pd
+import torchvision.transforms.functional as VF
+from tqdm import tqdm
 
 from bob.ip.binseg.utils.metric import SmoothedValue, base_metrics
 from bob.ip.binseg.utils.plot import precision_recall_f1iso
 
-import torchvision.transforms.functional as VF
+
 
 def batch_metrics(predictions, ground_truths, masks, names, output_folder, logger):
     """
-    calculates metrics on the batch and saves it to disc
+    Calculates metrics on the batch and saves it to disc
 
     Parameters
     ----------
@@ -27,7 +27,7 @@ def batch_metrics(predictions, ground_truths, masks, names, output_folder, logge
     mask : :py:class:torch.Tensor
     names : list
     output_folder : str
-    logger : logger
+    logger : :py:class:logging
 
     Returns
     -------
@@ -86,24 +86,44 @@ def batch_metrics(predictions, ground_truths, masks, names, output_folder, logge
     return batch_metrics
 
 
-
 def save_probability_images(predictions, names, output_folder, logger):
+    """
+    Saves probability maps as tif image
+
+    Parameters
+    ----------
+    predictions : :py:class:torch.Tensor
+    names : list
+    output_folder : str
+    logger :  :py:class:logging
+    """
     images_subfolder = os.path.join(output_folder,'images') 
     if not os.path.exists(images_subfolder): os.makedirs(images_subfolder)
     for j in range(predictions.size()[0]):
         img = VF.to_pil_image(predictions.cpu().data[j])
-        filename = '{}_prob.gif'.format(names[j])
+        filename = '{}.tif'.format(names[j])
         logger.info("saving {}".format(filename))
         img.save(os.path.join(images_subfolder, filename))
 
 
-
 def do_inference(
     model,
     data_loader,
     device,
     output_folder = None
 ):
+
+    """
+    Run inference and calculate metrics
+    
+    Paramters
+    ---------
+    model : :py:class:torch.nn.Module
+    data_loader : py:class:torch.torch.utils.data.DataLoader
+    device : str
+                'cpu' or 'cuda'
+    output_folder : str
+    """
     logger = logging.getLogger("bob.ip.binseg.engine.inference")
     logger.info("Start evaluation")
     logger.info("Split: {}, Output folder: {}, Device: {}".format(data_loader.dataset.split, output_folder, device))
@@ -128,10 +148,12 @@ def do_inference(
             start_time = time.perf_counter()
 
             outputs = model(images)
+            
             # necessary check for hed architecture that uses several outputs 
             # for loss calculation instead of just the last concatfuse block
             if isinstance(outputs,list):
                 outputs = outputs[-1]
+            
             probabilities = sigmoid(outputs)
             
             batch_time = time.perf_counter() - start_time
@@ -140,10 +162,11 @@ def do_inference(
             
             b_metrics = batch_metrics(probabilities, ground_truths, masks, names,results_subfolder, logger)
             metrics.extend(b_metrics)
+            
             # Create probability images
             save_probability_images(probabilities, names, output_folder, logger)
 
-
+    # DataFrame 
     df_metrics = pd.DataFrame(metrics,columns= \
                            ["name",
                             "threshold",
@@ -187,7 +210,7 @@ def do_inference(
 
     times_file = "Times.txt".format(model.name)
     logger.info("saving {}".format(times_file))
-        
+ 
     with open (os.path.join(results_subfolder,times_file), "w+") as outfile:
         date = datetime.datetime.now()
         outfile.write("Date: {} \n".format(date.strftime("%Y-%m-%d %H:%M:%S")))
diff --git a/bob/ip/binseg/engine/trainer.py b/bob/ip/binseg/engine/trainer.py
index c84a0733f36cc82269e74d7ec2a8df3c8e7523b9..b5a60bb292c01fa62ecf9bfbc1a59d82e8cdfdba 100644
--- a/bob/ip/binseg/engine/trainer.py
+++ b/bob/ip/binseg/engine/trainer.py
@@ -1,17 +1,18 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+import os 
 import logging
 import time
 import datetime
-from tqdm import tqdm
 import torch
-import os 
 import pandas as pd
+from tqdm import tqdm
 
 from bob.ip.binseg.utils.metric import SmoothedValue
 from bob.ip.binseg.utils.plot import loss_curve
 
+
 def do_train(
     model,
     data_loader,
@@ -24,7 +25,24 @@ def do_train(
     arguments,
     output_folder
 ):
-    """ Trains the model """
+    """ 
+    Trains the model
+    
+    Parameters
+    ----------
+    model : :py:class:torch.nn.Module
+    data_loader : py:class:torch.torch.utils.data.DataLoader
+    optimizer : py:class.torch.torch.optim.Optimizer
+    criterion : py:class.torch.nn.modules.loss._Loss
+    scheduler : py:class.torch.torch.optim._LRScheduler
+    checkpointer : bob.ip.binseg.utils.checkpointer.DetectronCheckpointer
+    checkpoint_period : int
+    device : str
+                'cpu' or 'cuda'
+    arguments : dict
+    output_folder : str
+
+    """
     logger = logging.getLogger("bob.ip.binseg.engine.trainer")
     logger.info("Start training")
     start_epoch = arguments["epoch"]
@@ -42,15 +60,17 @@ def do_train(
             losses = SmoothedValue(len(data_loader))
             epoch = epoch + 1
             arguments["epoch"] = epoch
+            
+            # Epoch time
             start_epoch_time = time.time()
 
             for images, ground_truths, masks, _ in tqdm(data_loader):
 
                 images = images.to(device)
                 ground_truths = ground_truths.to(device)
-                #masks = masks.to(device) 
 
                 outputs = model(images)
+
                 loss = criterion(outputs, ground_truths)
                 optimizer.zero_grad()
                 loss.backward()
diff --git a/bob/ip/binseg/modeling/driu.py b/bob/ip/binseg/modeling/driu.py
index b1478f63dfd04708d62d212c72e9fb746946c787..fa367e61f992060a56b90950bb8bab6d8f84ec1b 100644
--- a/bob/ip/binseg/modeling/driu.py
+++ b/bob/ip/binseg/modeling/driu.py
@@ -8,6 +8,10 @@ from bob.ip.binseg.modeling.backbones.vgg import vgg16
 from bob.ip.binseg.modeling.make_layers import conv_with_kaiming_uniform,convtrans_with_kaiming_uniform, UpsampleCropBlock
 
 class ConcatFuseBlock(nn.Module):
+    """ 
+    Takes in four feature maps with 16 channels each, concatenates them 
+    and applies a 1x1 convolution with 1 output channel. 
+    """
     def __init__(self):
         super().__init__()
         self.conv = conv_with_kaiming_uniform(4*16,1,1,1,0)
@@ -20,16 +24,18 @@ class ConcatFuseBlock(nn.Module):
 class DRIU(nn.Module):
     """
     DRIU head module
-    Attributes
+    
+    Parameters
     ----------
-        in_channels_list (list[int]): number of channels for each feature map that is returned from backbone
+    in_channels_list : list
+                        number of channels for each feature map that is returned from backbone
     """
     def __init__(self, in_channels_list=None):
         super(DRIU, self).__init__()
         in_conv_1_2_16, in_upsample2, in_upsample_4, in_upsample_8 = in_channels_list
 
         self.conv1_2_16 = nn.Conv2d(in_conv_1_2_16, 16, 3, 1, 1)
-        # Upsample
+        # Upsample layers
         self.upsample2 = UpsampleCropBlock(in_upsample2, 16, 4, 2, 0)
         self.upsample4 = UpsampleCropBlock(in_upsample_4, 16, 8, 4, 0)
         self.upsample8 = UpsampleCropBlock(in_upsample_8, 16, 16, 8, 0)
@@ -39,8 +45,10 @@ class DRIU(nn.Module):
 
     def forward(self,x):
         """
-        Arguments:
-            x (list[Tensor]): tensor as returned from the backbone network.
+        Parameters
+        ----------
+        x : list
+                list of tensors as returned from the backbone network.
                 First element: height and width of input image. 
                 Remaining elements: feature maps for each feature level.
         """
@@ -53,6 +61,13 @@ class DRIU(nn.Module):
         return out
 
 def build_driu():
+    """ 
+    Adds backbone and head together
+
+    Returns
+    -------
+    model : :py:class:torch.nn.Module
+    """
     backbone = vgg16(pretrained=False, return_features = [3, 8, 14, 22])
     driu_head = DRIU([64, 128, 256, 512])
 
diff --git a/bob/ip/binseg/modeling/hed.py b/bob/ip/binseg/modeling/hed.py
index 6a3e1d8c90b9e1a5608e64b9fd004bbec3a33705..6a8349fd441ad37edf80628a849d11e2f0af56cd 100644
--- a/bob/ip/binseg/modeling/hed.py
+++ b/bob/ip/binseg/modeling/hed.py
@@ -8,6 +8,10 @@ from bob.ip.binseg.modeling.backbones.vgg import vgg16
 from bob.ip.binseg.modeling.make_layers import conv_with_kaiming_uniform, convtrans_with_kaiming_uniform, UpsampleCropBlock
 
 class ConcatFuseBlock(nn.Module):
+    """ 
+    Takes in five feature maps with one channel each, concatenates thems 
+    and applies a 1x1 convolution with 1 output channel. 
+    """
     def __init__(self):
         super().__init__()
         self.conv = conv_with_kaiming_uniform(5,1,1,1,0)
@@ -20,11 +24,11 @@ class ConcatFuseBlock(nn.Module):
 class HED(nn.Module):
     """
     HED head module
-    Attributes
+    
+    Parameters
     ----------
-        in_channels_list (list[int]): number of channels for each feature map that
-        will be fed
-        
+    in_channels_list : list
+                        number of channels for each feature map that is returned from backbone
     """
     def __init__(self, in_channels_list=None):
         super(HED, self).__init__()
@@ -41,8 +45,12 @@ class HED(nn.Module):
 
     def forward(self,x):
         """
-        Arguments:
-            x (list[Tensor]): feature maps for each feature level.
+        Parameters
+        ----------
+        x : list
+                list of tensors as returned from the backbone network.
+                First element: height and width of input image. 
+                Remaining elements: feature maps for each feature level.
         """
         hw = x[0]
         conv1_2_16 = self.conv1_2_16(x[1])  
@@ -56,6 +64,13 @@ class HED(nn.Module):
         return out
 
 def build_hed():
+    """ 
+    Adds backbone and head together
+
+    Returns
+    -------
+    model : :py:class:torch.nn.Module
+    """
     backbone = vgg16(pretrained=False, return_features = [3, 8, 14, 22, 29])
     hed_head = HED([64, 128, 256, 512, 512])
 
diff --git a/bob/ip/binseg/modeling/m2u.py b/bob/ip/binseg/modeling/m2u.py
index e393625220a2ed1cb7e6f14f9867f2f9263bfed6..13602eb3d23abb04905b2e75d32791802ac608e5 100644
--- a/bob/ip/binseg/modeling/m2u.py
+++ b/bob/ip/binseg/modeling/m2u.py
@@ -40,9 +40,11 @@ class LastDecoderBlock(nn.Module):
 class M2U(nn.Module):
     """
     M2U-Net head module
-    Attributes
+    
+    Parameters
     ----------
-        in_channels_list (list[int]): number of channels for each feature map that is returned from backbone
+    in_channels_list : list
+                        number of channels for each feature map that is returned from backbone
     """
     def __init__(self, in_channels_list=None,upsamplemode='bilinear',expand_ratio=0.15):
         super(M2U, self).__init__()
@@ -67,6 +69,14 @@ class M2U(nn.Module):
                 m.bias.data.zero_()
     
     def forward(self,x):
+        """
+        Parameters
+        ----------
+        x : list
+                list of tensors as returned from the backbone network.
+                First element: height and width of input image. 
+                Remaining elements: feature maps for each feature level.
+        """
         decode4 = self.decode4(x[5],x[4])    # 96, 32
         decode3 = self.decode3(decode4,x[3]) # 64, 24
         decode2 = self.decode2(decode3,x[2]) # 44, 16
@@ -75,6 +85,13 @@ class M2U(nn.Module):
         return decode1
 
 def build_m2unet():
+    """ 
+    Adds backbone and head together
+
+    Returns
+    -------
+    model : :py:class:torch.nn.Module
+    """
     backbone = MobileNetV2(return_features = [1,3,6,13], m2u=True)
     m2u_head = M2U(in_channels_list=[16, 24, 32, 96])
 
diff --git a/bob/ip/binseg/modeling/resunet.py b/bob/ip/binseg/modeling/resunet.py
index 7ee949617b1d4ea1a58d60df375b665522999677..38f66cddf787b8e48f8d7f8aeb50001121c229eb 100644
--- a/bob/ip/binseg/modeling/resunet.py
+++ b/bob/ip/binseg/modeling/resunet.py
@@ -12,9 +12,11 @@ from bob.ip.binseg.modeling.backbones.resnet import resnet50
 class ResUNet(nn.Module):
     """
     UNet head module for ResNet backbones
-    Attributes
+    
+    Parameters
     ----------
-        in_channels_list (list[int]): number of channels for each feature map that is returned from backbone
+    in_channels_list : list
+                        number of channels for each feature map that is returned from backbone
     """
     def __init__(self, in_channels_list=None, pixel_shuffle=False):
         super(ResUNet, self).__init__()
@@ -36,8 +38,10 @@ class ResUNet(nn.Module):
 
     def forward(self,x):
         """
-        Arguments:
-            x (list[Tensor]): tensor as returned from the backbone network.
+        Parameters
+        ----------
+        x : list
+                list of tensors as returned from the backbone network.
                 First element: height and width of input image. 
                 Remaining elements: feature maps for each feature level.
         """
@@ -51,6 +55,13 @@ class ResUNet(nn.Module):
         return out
 
 def build_res50unet():
+    """ 
+    Adds backbone and head together
+
+    Returns
+    -------
+    model : :py:class:torch.nn.Module
+    """
     backbone = resnet50(pretrained=False, return_features = [2, 4, 5, 6, 7])
     unet_head  = ResUNet([64, 256, 512, 1024, 2048],pixel_shuffle=False)
     model = nn.Sequential(OrderedDict([("backbone", backbone), ("head", unet_head)]))
diff --git a/bob/ip/binseg/modeling/unet.py b/bob/ip/binseg/modeling/unet.py
index 9be2b498aa37c610686aa2e1daaea3be34c27d1e..d0db666bfa12d479ada6aa0f52e601b085a3a484 100644
--- a/bob/ip/binseg/modeling/unet.py
+++ b/bob/ip/binseg/modeling/unet.py
@@ -12,9 +12,11 @@ from bob.ip.binseg.modeling.backbones.vgg import vgg16
 class UNet(nn.Module):
     """
     UNet head module
-    Attributes
+    
+    Parameters
     ----------
-        in_channels_list (list[int]): number of channels for each feature map that is returned from backbone
+    in_channels_list : list
+                        number of channels for each feature map that is returned from backbone
     """
     def __init__(self, in_channels_list=None, pixel_shuffle=False):
         super(UNet, self).__init__()
@@ -30,8 +32,10 @@ class UNet(nn.Module):
 
     def forward(self,x):
         """
-        Arguments:
-            x (list[Tensor]): tensor as returned from the backbone network.
+        Parameters
+        ----------
+        x : list
+                list of tensors as returned from the backbone network.
                 First element: height and width of input image. 
                 Remaining elements: feature maps for each feature level.
         """
@@ -44,6 +48,13 @@ class UNet(nn.Module):
         return out
 
 def build_unet():
+    """ 
+    Adds backbone and head together
+
+    Returns
+    -------
+    model : :py:class:torch.nn.Module
+    """
     backbone = vgg16(pretrained=False, return_features = [3, 8, 14, 22, 29])
     unet_head = UNet([64, 128, 256, 512, 512], pixel_shuffle=False)
 
diff --git a/bob/ip/binseg/script/__init__.py b/bob/ip/binseg/script/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..2ca5e07cb73f0bdddcb863ef497955964087e301 100644
--- a/bob/ip/binseg/script/__init__.py
+++ b/bob/ip/binseg/script/__init__.py
@@ -0,0 +1,3 @@
+# see https://docs.python.org/3/library/pkgutil.html
+from pkgutil import extend_path
+__path__ = extend_path(__path__, __name__)
\ No newline at end of file
diff --git a/bob/ip/binseg/script/binseg.py b/bob/ip/binseg/script/binseg.py
index d57406f707c752a06af384c1cd009c383eb15659..8fc667b1bdd3d4c22f926116f82c61b4841f2579 100644
--- a/bob/ip/binseg/script/binseg.py
+++ b/bob/ip/binseg/script/binseg.py
@@ -75,6 +75,7 @@ def binseg():
     )
 @click.option(
     '--pretrained-backbone',
+    '-t',
     required=True,
     cls=ResourceOption
     )
@@ -122,6 +123,8 @@ def train(model
         ,checkpoint_period
         ,device
         ,**kwargs):
+    """ Train a model """
+    
     if not os.path.exists(output_path): os.makedirs(output_path)
     
     # PyTorch dataloader
@@ -198,7 +201,7 @@ def test(model
         ,batch_size
         ,dataset
         , **kwargs):
-
+    """ Run inference and evalaute the model performance """
 
     # PyTorch dataloader
     data_loader = DataLoader(
@@ -257,7 +260,7 @@ def testcheckpoints(model
         ,dataset
         , **kwargs):
 
-
+    """ Run inference and evaluate all checkpoints saved for a model"""
     # PyTorch dataloader
     data_loader = DataLoader(
         dataset = dataset
diff --git a/bob/ip/binseg/test/test_basemetrics.py b/bob/ip/binseg/test/test_basemetrics.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf478ac788d038dd9038ca5d5b5cf7aa1ac7a83a
--- /dev/null
+++ b/bob/ip/binseg/test/test_basemetrics.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import unittest
+import numpy as np
+from bob.ip.binseg.utils.metric import base_metrics
+import random
+
+class Tester(unittest.TestCase):
+    """
+    Unit test for base metrics
+    """
+    def setUp(self):
+        self.tp = random.randint(1, 100)
+        self.fp = random.randint(1, 100)
+        self.tn = random.randint(1, 100)
+        self.fn = random.randint(1, 100)
+    
+    def test_precision(self):
+        precision = base_metrics(self.tp, self.fp, self.tn, self.fn)[0]
+        self.assertEqual((self.tp)/(self.tp + self.fp),precision)
+
+    def test_recall(self):
+        recall = base_metrics(self.tp, self.fp, self.tn, self.fn)[1]
+        self.assertEqual((self.tp)/(self.tp + self.fn),recall)
+
+    def test_specificity(self):
+        specificity = base_metrics(self.tp, self.fp, self.tn, self.fn)[2]
+        self.assertEqual((self.tn)/(self.tn + self.fp),specificity)
+    
+    def test_accuracy(self):
+        accuracy = base_metrics(self.tp, self.fp, self.tn, self.fn)[3]
+        self.assertEqual((self.tp + self.tn)/(self.tp + self.tn + self.fp + self.fn), accuracy)
+
+    def test_jaccard(self):
+        jaccard = base_metrics(self.tp, self.fp, self.tn, self.fn)[4]
+        self.assertEqual(self.tp / (self.tp+self.fp+self.fn), jaccard)
+
+    def test_f1(self):
+        f1 = base_metrics(self.tp, self.fp, self.tn, self.fn)[5]
+        self.assertEqual((2.0 * self.tp ) / (2.0 * self.tp + self.fp + self.fn ),f1)
+        
+if __name__ == '__main__':
+    unittest.main()
\ No newline at end of file
diff --git a/bob/ip/binseg/test/test_batchmetrics.py b/bob/ip/binseg/test/test_batchmetrics.py
new file mode 100644
index 0000000000000000000000000000000000000000..93d573e809e3ee17f2af4a61cb71a36ab49849c1
--- /dev/null
+++ b/bob/ip/binseg/test/test_batchmetrics.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import unittest
+import numpy as np
+from bob.ip.binseg.engine.inferencer import batch_metrics
+import random
+import shutil, tempfile
+import logging
+import torch
+
+class Tester(unittest.TestCase):
+    """
+    Unit test for batch metrics
+    """
+    def setUp(self):
+        self.tp = random.randint(1, 100)
+        self.fp = random.randint(1, 100)
+        self.tn = random.randint(1, 100)
+        self.fn = random.randint(1, 100)
+        self.predictions = torch.rand(size=(2,1,420,420))
+        self.ground_truths = torch.randint(low=0, high=2, size=(2,1,420,420))
+        self.masks = None
+        self.names = ['Bob','Tim'] 
+        self.output_folder = tempfile.mkdtemp()
+        self.logger = logging.getLogger(__name__)
+
+    def tearDown(self):
+        # Remove the temporary folder after the test
+        shutil.rmtree(self.output_folder)
+    
+    def test_batch_metrics(self):
+        bm = batch_metrics(self.predictions, self.ground_truths, self.masks, self.names, self.output_folder, self.logger)
+        self.assertEqual(len(bm),2*100)
+        for metric in bm:
+            # check whether f1 score agree
+            self.assertAlmostEqual(metric[-1],2*(metric[-6]*metric[-5])/(metric[-6]+metric[-5]))
+
+if __name__ == '__main__':
+    unittest.main()
\ No newline at end of file
diff --git a/bob/ip/binseg/test/test_models.py b/bob/ip/binseg/test/test_models.py
index bf796565c69be5e92eaeaebc5603736a889b488e..a5f37d3fb3ef05c1241544bef53a79e1166a6b34 100644
--- a/bob/ip/binseg/test/test_models.py
+++ b/bob/ip/binseg/test/test_models.py
@@ -6,27 +6,42 @@ import unittest
 import numpy as np
 from bob.ip.binseg.modeling.driu import build_driu
 from bob.ip.binseg.modeling.hed import build_hed
+from bob.ip.binseg.modeling.unet import build_unet
+from bob.ip.binseg.modeling.resunet import build_res50unet
 
 class Tester(unittest.TestCase):
     """
     Unit test for model architectures
     """
-    x = torch.randn(1, 3, 544, 544)
-    hw = np.array(x.shape)[[2,3]]
+    def setUp(self):
+        self.x = torch.randn(1, 3, 544, 544)
+        self.hw = np.array(self.x.shape)[[2,3]]
     
     def test_driu(self):
         model = build_driu()
-        out = model(Tester.x)
+        out = model(self.x)
         out_hw = np.array(out.shape)[[2,3]]
-        self.assertEqual(Tester.hw.all(), out_hw.all())
+        self.assertEqual(self.hw.all(), out_hw.all())
 
 
     def test_hed(self):
         model = build_hed()
-        out = model(Tester.x)
+        out = model(self.x)
         # NOTE: HED outputs a list of length 4. We test only for the last concat-fuse layer
         out_hw = np.array(out[4].shape)[[2,3]]
-        self.assertEqual(Tester.hw.all(), out_hw.all())
+        self.assertEqual(self.hw.all(), out_hw.all())
+
+    def test_unet(self):
+        model = build_unet()
+        out = model(self.x)
+        out_hw = np.array(out.shape)[[2,3]]
+        self.assertEqual(self.hw.all(), out_hw.all())
+
+    def test_resunet(self):
+        model = build_res50unet()
+        out = model(self.x)
+        out_hw = np.array(out.shape)[[2,3]]
+        self.assertEqual(self.hw.all(), out_hw.all())
 
 
 if __name__ == '__main__':
diff --git a/precision_recall_comparison.pdf b/precision_recall_comparison.pdf
deleted file mode 100644
index 1568e9c1a33e6421a2fd77af3bb6d720f1af156d..0000000000000000000000000000000000000000
Binary files a/precision_recall_comparison.pdf and /dev/null differ
diff --git a/setup.py b/setup.py
index 8c8bb0bfadff16e0e6de4a83c428ab648b34ffdf..eb8429e7503e244f61d92e8f9d693ee38db4df2f 100644
--- a/setup.py
+++ b/setup.py
@@ -59,6 +59,7 @@ setup(
           'M2UNet = bob.ip.binseg.configs.models.m2unet',
           'UNet = bob.ip.binseg.configs.models.unet',
           'ResUNet = bob.ip.binseg.configs.models.resunet',
+          'ShapeResUNet = bob.ip.binseg.configs.models.shaperesunet',
           'DRIUADABOUND = bob.ip.binseg.configs.models.driuadabound',
           'DRIVETRAIN = bob.ip.binseg.configs.datasets.drivetrain',
           'DRIVECROPTRAIN = bob.ip.binseg.configs.datasets.drivecroptrain',