diff --git a/bob/ip/binseg/configs/datasets/covd_drive.py b/bob/ip/binseg/configs/datasets/covd_drive.py
index e776504794cdaac0096ca23bc0754fabd59da145..0b56c91c066d8806cd371bc1484acecd85244ad6 100644
--- a/bob/ip/binseg/configs/datasets/covd_drive.py
+++ b/bob/ip/binseg/configs/datasets/covd_drive.py
@@ -37,8 +37,8 @@ _iostar = SampleList2TorchDataset(_raw_iostar.subsets("vessel")["train"],
 
 from bob.ip.binseg.data.hrf import dataset as _raw_hrf
 _hrf_transforms = [Resize((363)), Pad((0, 90, 0, 91))] + _DA
-dataset = SampleList2TorchDataset(_raw_hrf.subsets("default")["train"],
+_hrf = SampleList2TorchDataset(_raw_hrf.subsets("default")["train"],
         transforms=_hrf_transforms)
 
 import torch.utils.data
-dataset = torch.utils.data.ConcatDataset([_stare, _chase, _hrf, _iostar])
+dataset = torch.utils.data.ConcatDataset([_stare, _chase, _iostar, _hrf])
diff --git a/bob/ip/binseg/configs/datasets/covd_stare.py b/bob/ip/binseg/configs/datasets/covd_stare.py
index 387f6fa45dd74e22ccb4b880909d2dc371d45557..d77e85a7b5bb6f553663cbbe610e5555d286b85c 100644
--- a/bob/ip/binseg/configs/datasets/covd_stare.py
+++ b/bob/ip/binseg/configs/datasets/covd_stare.py
@@ -40,12 +40,12 @@ _chase = SampleList2TorchDataset(_raw_chase.subsets("default")["train"],
 
 from bob.ip.binseg.data.iostar import dataset as _raw_iostar
 _iostar_transforms = [Pad((81, 0, 81, 0)), Resize(608)] + _DA
-dataset = SampleList2TorchDataset(_raw_iostar.subsets("vessel")["train"],
+_iostar = SampleList2TorchDataset(_raw_iostar.subsets("vessel")["train"],
         transforms=_iostar_transforms)
 
 from bob.ip.binseg.data.hrf import dataset as _raw_hrf
 _hrf_transforms = [Pad((0, 345, 0, 345)), Resize(608)] + _DA
-dataset = SampleList2TorchDataset(_raw_hrf.subsets("default")["train"],
+_hrf = SampleList2TorchDataset(_raw_hrf.subsets("default")["train"],
         transforms=_hrf_transforms)
 
 import torch.utils.data
diff --git a/bob/ip/binseg/configs/datasets/csv.py b/bob/ip/binseg/configs/datasets/csv.py
index 14c394de88a321c179cf51309a4884fccddaf000..a03fcbad072fcff39f0fb84fc90e3f532ce3273e 100644
--- a/bob/ip/binseg/configs/datasets/csv.py
+++ b/bob/ip/binseg/configs/datasets/csv.py
@@ -73,7 +73,7 @@ More information:
 # First, define how to access and load the raw data. Our package provides some
 # stock loaders we use for other datasets. You may have a look at the
 # documentation of that module for details.
-from bob.ip.binseg.data.loaders import (
+from bob.ip.binseg.data.loader import (
     load_pil_rgb,
     load_pil_1,
     data_path_keymaker,
@@ -129,4 +129,4 @@ _transforms = [
 # This class also inherits from pytorch Dataset and respect its required API.
 # See the documentation for details.
 from bob.ip.binseg.data.utils import SampleList2TorchDataset
-dataset = SampleList2TorchDataset(raw_dataset.subset("data"), _transforms)
+#dataset = SampleList2TorchDataset(raw_dataset.samples("data"), _transforms)
diff --git a/bob/ip/binseg/configs/datasets/folder.py b/bob/ip/binseg/configs/datasets/folder.py
deleted file mode 100644
index da52a931b3ac2bdc8de851e3241baf3efe50e691..0000000000000000000000000000000000000000
--- a/bob/ip/binseg/configs/datasets/folder.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""Example self-scanning folder-based dataset
-
-In case you have data that is organized on your filesystem, this configuration
-shows an example setup so you can feed such files **without** ground-truth to
-predict vessel probalities using one of our trained models.  There can be any
-number of images within the root folder of your dataset, with any kind of
-subfolder arrangements.  For example:
-
-.. code-block:: text
-
-   ├── image_1.png
-   └── subdir1
-       ├── image_subdir_1.jpg
-       ├── ...
-       └── image_subdir_k.jpg
-   ├── ...
-   └── image_n.png
-
-Use the path leading to the root of your dataset, and replace ``<path.csv>`` on
-the example code for this configuration, that you must copy locally to make
-changes:
-
-.. code-block:: sh
-
-   $ bob binseg config copy folder-dataset-example mydataset.py
-   # edit mydataset.py as explained here
-
-Fine-tune the transformations for your particular purpose.
-
-Keep in mind that specific models require that you feed images respecting
-certain restrictions (input dimensions, image centering, etc.).  Check the
-configuration that was used to train models and try to match it as well as
-possible.
-"""
-
-# add your transforms below - these are just examples
-from bob.ip.binseg.data.transforms import CenterCrop
-_transforms = [
-        #CenterCrop((544, 544)),
-    ]
-
-from bob.ip.binseg.data.folderdataset import FolderDataset
-#dataset = FolderDataset("<path.csv>", glob="*.*", transforms=_transforms)
diff --git a/doc/evaluation.rst b/doc/evaluation.rst
index 48f7a91efbbec8da0bec6723ddbadf47a4d4d358..28a0bf65f0cffb9f3144f5990a3549f12de976e1 100644
--- a/doc/evaluation.rst
+++ b/doc/evaluation.rst
@@ -45,17 +45,13 @@ Inference on a custom dataset
 =============================
 
 If you would like to test your own data against one of the pre-trained models,
-you need to instantiate one of:
-
-* :py:mod:`A CSV-based configuration <bob.ip.binseg.configs.datasets.csv>`
-* :py:mod:`A folder-based configuration <bob.ip.binseg.configs.datasets.folder>`
+you need to instantiate :py:mod:`A CSV-based configuration
+<bob.ip.binseg.configs.datasets.csv>`
 
 Read the appropriate module documentation for details.
 
 .. code-block:: bash
 
-   $ bob binseg config copy folder-dataset-example mydataset.py
-   # or
    $ bob binseg config copy csv-dataset-example mydataset.py
    # edit mydataset.py to your liking
    $ bob binseg predict -vv <model> -w <path/to/model.pth> ./mydataset.py