diff --git a/bob/ip/binseg/engine/inferencer.py b/bob/ip/binseg/engine/inferencer.py
index 91191ab0760d44d2ffebc1c8a5af1d9a6b3d2054..feb78b3ec79550b91519a938c3e4dd96002f5442 100644
--- a/bob/ip/binseg/engine/inferencer.py
+++ b/bob/ip/binseg/engine/inferencer.py
@@ -102,7 +102,7 @@ def batch_metrics(predictions, ground_truths, names, output_folder):
     return batch_metrics
 
 
-def save_probability_images(predictions, names, output_folder, logger):
+def save_probability_images(predictions, names, output_folder):
     """
     Saves probability maps as image in the same format as the test image
 
@@ -118,10 +118,6 @@ def save_probability_images(predictions, names, output_folder, logger):
 
     output_folder : str
         output path
-
-    logger : :py:class:`logging.Logger`
-        python logger
-
     """
 
     images_subfolder = os.path.join(output_folder, "images")
@@ -136,6 +132,33 @@ def save_probability_images(predictions, names, output_folder, logger):
         img.save(fullpath)
 
 
+def save_hdf(predictions, names, output_folder):
+    """
+    Saves probability maps as image in the same format as the test image
+
+    Parameters
+    ----------
+    predictions : :py:class:`torch.Tensor`
+        tensor with pixel-wise probabilities
+    names : list
+        list of file names
+    output_folder : str
+        output path
+    """
+    hdf5_subfolder = os.path.join(output_folder, "hdf5")
+    if not os.path.exists(hdf5_subfolder):
+        os.makedirs(hdf5_subfolder)
+    for j in range(predictions.size()[0]):
+        img = predictions.cpu().data[j].squeeze(0).numpy()
+        filename = "{}.hdf5".format(names[j].split(".")[0])
+        fullpath = os.path.join(hdf5_subfolder, filename)
+        logger.info("saving {}".format(filename))
+        fulldir = os.path.dirname(fullpath)
+        if not os.path.exists(fulldir):
+            os.makedirs(fulldir)
+        bob.io.base.save(img, fullpath)
+
+
 def do_inference(model, data_loader, device, output_folder=None):
     """
     Runs inference and calculate metrics
@@ -187,14 +210,14 @@ def do_inference(model, data_loader, device, output_folder=None):
             logger.info("Batch time: {:.5f} s".format(batch_time))
 
             b_metrics = batch_metrics(
-                probabilities, ground_truths, names, results_subfolder, logger
+                probabilities, ground_truths, names, results_subfolder
             )
             metrics.extend(b_metrics)
 
             # Create probability images
-            save_probability_images(probabilities, names, output_folder, logger)
+            save_probability_images(probabilities, names, output_folder)
             # save hdf5
-            save_hdf(probabilities, names, output_folder, logger)
+            save_hdf(probabilities, names, output_folder)
 
     # DataFrame
     df_metrics = pd.DataFrame(
diff --git a/bob/ip/binseg/test/test_batchmetrics.py b/bob/ip/binseg/test/test_batchmetrics.py
index ca00945d604edf35629719867999b0d8cdbf659f..045b36385cb1a5edff0e734fc1656414ff6560ef 100644
--- a/bob/ip/binseg/test/test_batchmetrics.py
+++ b/bob/ip/binseg/test/test_batchmetrics.py
@@ -35,7 +35,6 @@ class Tester(unittest.TestCase):
             self.ground_truths,
             self.names,
             self.output_folder,
-            self.logger,
         )
         self.assertEqual(len(bm), 2 * 100)
         for metric in bm:
diff --git a/bob/ip/binseg/utils/evaluate.py b/bob/ip/binseg/utils/evaluate.py
index 5c3929ef9d7df8df36305c66f37585f745af783f..a921f61f96b63d03964dcfe7c06a4c06ad567304 100644
--- a/bob/ip/binseg/utils/evaluate.py
+++ b/bob/ip/binseg/utils/evaluate.py
@@ -21,7 +21,7 @@ import logging
 logger = logging.getLogger(__name__)
 
 
-def batch_metrics(predictions, ground_truths, names, output_folder, logger):
+def batch_metrics(predictions, ground_truths, names, output_folder):
     """
     Calculates metrics on the batch and saves it to disc
 
@@ -35,8 +35,6 @@ def batch_metrics(predictions, ground_truths, names, output_folder, logger):
         list of file names
     output_folder : str
         output path
-    logger : :py:class:`logging.Logger`
-        python logger
 
     Returns
     -------
@@ -141,7 +139,7 @@ def do_eval(
         probabilities = to_tensor(probabilities)
 
         b_metrics = batch_metrics(
-            probabilities, ground_truths, names, results_subfolder, logger
+            probabilities, ground_truths, names, results_subfolder
         )
         metrics.extend(b_metrics)