From e6a4630b62e740319e1e107b2abc7e390dac4735 Mon Sep 17 00:00:00 2001
From: Andre Anjos <andre.dos.anjos@gmail.com>
Date: Mon, 23 Mar 2020 22:56:17 +0100
Subject: [PATCH] [configs] Documented all configuration files; Added script to
 list/describe/copy configuration files; Re-structured user guide

---
 bob/ip/binseg/configs/datasets/chasedb1.py    |  25 ++-
 .../binseg/configs/datasets/chasedb1test.py   |  22 ++
 bob/ip/binseg/configs/datasets/drionsdb.py    |  20 +-
 .../binseg/configs/datasets/drionsdbtest.py   |  17 ++
 .../binseg/configs/datasets/dristhigs1cup.py  |  20 ++
 .../configs/datasets/dristhigs1cuptest.py     |  21 ++
 .../binseg/configs/datasets/dristhigs1od.py   |  20 ++
 .../configs/datasets/dristhigs1odtest.py      |  20 ++
 bob/ip/binseg/configs/datasets/drive.py       |  12 ++
 bob/ip/binseg/configs/datasets/drivetest.py   |  12 ++
 bob/ip/binseg/configs/datasets/hrf1168.py     |  17 +-
 bob/ip/binseg/configs/datasets/hrftest.py     |  13 ++
 bob/ip/binseg/configs/datasets/iostarod.py    |  14 ++
 .../binseg/configs/datasets/iostarodtest.py   |  14 ++
 .../binseg/configs/datasets/iostarvessel.py   |  14 ++
 .../configs/datasets/iostarvesseltest.py      |  14 ++
 bob/ip/binseg/configs/datasets/refugecup.py   |  22 +-
 .../binseg/configs/datasets/refugecuptest.py  |  25 +++
 bob/ip/binseg/configs/datasets/refugeod.py    |  20 ++
 .../binseg/configs/datasets/refugeodtest.py   |  25 +++
 bob/ip/binseg/configs/datasets/rimoner3cup.py |  15 ++
 .../configs/datasets/rimoner3cuptest.py       |  15 ++
 bob/ip/binseg/configs/datasets/rimoner3od.py  |  15 ++
 .../binseg/configs/datasets/rimoner3odtest.py |  15 ++
 bob/ip/binseg/configs/datasets/stare.py       |  17 +-
 bob/ip/binseg/configs/datasets/staretest.py   |  15 ++
 bob/ip/binseg/configs/models/driu.py          |   9 +
 bob/ip/binseg/configs/models/driubn.py        |  10 +
 bob/ip/binseg/configs/models/driubnssl.py     |  11 +
 bob/ip/binseg/configs/models/driuod.py        |   9 +
 bob/ip/binseg/configs/models/driussl.py       |  10 +
 bob/ip/binseg/configs/models/hed.py           |  11 +
 bob/ip/binseg/configs/models/m2unet.py        |  13 ++
 bob/ip/binseg/configs/models/m2unetssl.py     |  16 ++
 bob/ip/binseg/configs/models/resunet.py       |  13 ++
 bob/ip/binseg/configs/models/unet.py          |  11 +
 bob/ip/binseg/script/binseg.py                |  28 ++-
 bob/ip/binseg/script/config.py                | 189 ++++++++++++++++++
 conda/meta.yaml                               |   9 +
 doc/api.rst                                   |   2 +
 doc/datasets.rst                              |   1 -
 doc/index.rst                                 |   7 +-
 doc/references.rst                            |  15 ++
 doc/training.rst                              |  12 +-
 doc/usage.rst                                 |  43 ++++
 setup.py                                      |  97 ++++-----
 46 files changed, 909 insertions(+), 66 deletions(-)
 create mode 100644 bob/ip/binseg/script/config.py
 create mode 100644 doc/usage.rst

diff --git a/bob/ip/binseg/configs/datasets/chasedb1.py b/bob/ip/binseg/configs/datasets/chasedb1.py
index 605fd0a7..f9206da1 100644
--- a/bob/ip/binseg/configs/datasets/chasedb1.py
+++ b/bob/ip/binseg/configs/datasets/chasedb1.py
@@ -1,6 +1,29 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""CHASE-DB1 (training set) for Vessel Segmentation
+
+The CHASE_DB1 is a retinal vessel reference dataset acquired from multiethnic
+school children. This database is a part of the Child Heart and Health Study in
+England (CHASE), a cardiovascular health survey in 200 primary schools in
+London, Birmingham, and Leicester. The ocular imaging was carried out in
+46 schools and demonstrated associations between retinal vessel tortuosity and
+early risk factors for cardiovascular disease in over 1000 British primary
+school children of different ethnic origin. The retinal images of both of the
+eyes of each child were recorded with a hand-held Nidek NM-200-D fundus camera.
+The images were captured at 30 degrees FOV camera. The dataset of images are
+characterized by having nonuniform back-ground illumination, poor contrast of
+blood vessels as compared with the background and wider arteriolars that have a
+bright strip running down the centre known as the central vessel reflex.
+
+* Reference: [CHASEDB1-2012]_
+* Original resolution (height x width): 960 x 999
+* Configuration resolution: 960 x 960 (after hand-specified crop)
+* Training samples: 20
+* Split reference: [CHASEDB1-2012]_
+"""
+
+
 from bob.db.chasedb1 import Database as CHASEDB1
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
@@ -9,7 +32,7 @@ from bob.ip.binseg.data.binsegdataset import BinSegDataset
 
 transforms = Compose(
     [
-        Crop(0, 18, 960, 960),
+        Crop(0, 18, 960, 960),  #(upper, left, height, width)
         RandomHFlip(),
         RandomVFlip(),
         RandomRotation(),
diff --git a/bob/ip/binseg/configs/datasets/chasedb1test.py b/bob/ip/binseg/configs/datasets/chasedb1test.py
index 17be7aa1..46245430 100644
--- a/bob/ip/binseg/configs/datasets/chasedb1test.py
+++ b/bob/ip/binseg/configs/datasets/chasedb1test.py
@@ -1,6 +1,28 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""CHASE-DB1 (test set) for Vessel Segmentation
+
+The CHASE_DB1 is a retinal vessel reference dataset acquired from multiethnic
+school children. This database is a part of the Child Heart and Health Study in
+England (CHASE), a cardiovascular health survey in 200 primary schools in
+London, Birmingham, and Leicester. The ocular imaging was carried out in
+46 schools and demonstrated associations between retinal vessel tortuosity and
+early risk factors for cardiovascular disease in over 1000 British primary
+school children of different ethnic origin. The retinal images of both of the
+eyes of each child were recorded with a hand-held Nidek NM-200-D fundus camera.
+The images were captured at 30 degrees FOV camera. The dataset of images are
+characterized by having nonuniform back-ground illumination, poor contrast of
+blood vessels as compared with the background and wider arteriolars that have a
+bright strip running down the centre known as the central vessel reflex.
+
+* Reference: [CHASEDB1-2012]_
+* Original resolution (height x width): 960 x 999
+* Configuration resolution: 960 x 960 (after hand-specified crop)
+* Test samples: 8
+* Split reference: [CHASEDB1-2012]_
+"""
+
 from bob.db.chasedb1 import Database as CHASEDB1
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/drionsdb.py b/bob/ip/binseg/configs/datasets/drionsdb.py
index 0a03dadf..7b1730db 100644
--- a/bob/ip/binseg/configs/datasets/drionsdb.py
+++ b/bob/ip/binseg/configs/datasets/drionsdb.py
@@ -1,6 +1,24 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRIONS-DB (training set) for Optic Disc Segmentation
+
+The dataset originates from data collected from 55 patients with glaucoma
+(23.1%) and eye hypertension (76.9%), and random selected from an eye fundus
+image base belonging to the Ophthalmology Service at Miguel Servet Hospital,
+Saragossa (Spain).  It contains 110 eye fundus images with a resolution of 600
+x 400. Two sets of ground-truth optic disc annotations are available. The first
+set is commonly used for training and testing. The second set acts as a “human”
+baseline.
+
+* Reference: [DRIONSDB-2008]_
+* Original resolution (height x width): 400 x 600
+* Configuration resolution: 416 x 608 (after padding)
+* Training samples: 60
+* Split reference: [MANINIS-2016]_
+"""
+
+
 from bob.db.drionsdb import Database as DRIONS
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
@@ -9,7 +27,7 @@ from bob.ip.binseg.data.binsegdataset import BinSegDataset
 
 transforms = Compose(
     [
-        Pad((4, 8, 4, 8)),
+        Pad((4, 8, 4, 8)),  #(left, top, right, bottom)
         RandomHFlip(),
         RandomVFlip(),
         RandomRotation(),
diff --git a/bob/ip/binseg/configs/datasets/drionsdbtest.py b/bob/ip/binseg/configs/datasets/drionsdbtest.py
index 75bcbb58..8b1a0c87 100644
--- a/bob/ip/binseg/configs/datasets/drionsdbtest.py
+++ b/bob/ip/binseg/configs/datasets/drionsdbtest.py
@@ -1,6 +1,23 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRIONS-DB (test set) for Optic Disc Segmentation
+
+The dataset originates from data collected from 55 patients with glaucoma
+(23.1%) and eye hypertension (76.9%), and random selected from an eye fundus
+image base belonging to the Ophthalmology Service at Miguel Servet Hospital,
+Saragossa (Spain).  It contains 110 eye fundus images with a resolution of 600
+x 400. Two sets of ground-truth optic disc annotations are available. The first
+set is commonly used for training and testing. The second set acts as a “human”
+baseline.
+
+* Reference: [DRIONSDB-2008]_
+* Original resolution (height x width): 400 x 600
+* Configuration resolution: 416 x 608 (after padding)
+* Training samples: 50
+* Split reference: [MANINIS-2016]_
+"""
+
 from bob.db.drionsdb import Database as DRIONS
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/dristhigs1cup.py b/bob/ip/binseg/configs/datasets/dristhigs1cup.py
index a1da30ad..485a5e68 100644
--- a/bob/ip/binseg/configs/datasets/dristhigs1cup.py
+++ b/bob/ip/binseg/configs/datasets/dristhigs1cup.py
@@ -1,6 +1,26 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRISHTI-GS1 (training set) for Cup Segmentation
+
+Drishti-GS is a dataset meant for validation of segmenting OD, cup and
+detecting notching.  The images in the Drishti-GS dataset have been collected
+and annotated by Aravind Eye hospital, Madurai, India. This dataset is of a
+single population as all subjects whose eye images are part of this dataset are
+Indians.
+
+The dataset is divided into two: a training set and a testing set of images.
+Training images (50) are provided with groundtruths for OD and Cup segmentation
+and notching information.
+
+* Reference: [DRISHTIGS1-2014]_
+* Original resolution (height x width): varying (min: 1749 x 2045, max: 1845 x
+  2468)
+* Configuration resolution: 1760 x 2048 (after center cropping)
+* Training samples: 50
+* Split reference: [DRISHTIGS1-2014]_
+"""
+
 from bob.db.drishtigs1 import Database as DRISHTI
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/dristhigs1cuptest.py b/bob/ip/binseg/configs/datasets/dristhigs1cuptest.py
index e35eabf0..511b5273 100644
--- a/bob/ip/binseg/configs/datasets/dristhigs1cuptest.py
+++ b/bob/ip/binseg/configs/datasets/dristhigs1cuptest.py
@@ -1,5 +1,26 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
+
+"""DRISHTI-GS1 (test set) for Cup Segmentation
+
+Drishti-GS is a dataset meant for validation of segmenting OD, cup and
+detecting notching.  The images in the Drishti-GS dataset have been collected
+and annotated by Aravind Eye hospital, Madurai, India. This dataset is of a
+single population as all subjects whose eye images are part of this dataset are
+Indians.
+
+The dataset is divided into two: a training set and a testing set of images.
+Training images (50) are provided with groundtruths for OD and Cup segmentation
+and notching information.
+
+* Reference: [DRISHTIGS1-2014]_
+* Original resolution (height x width): varying (min: 1749 x 2045, max: 1845 x
+  2468)
+* Configuration resolution: 1760 x 2048 (after center cropping)
+* Test samples: 51
+* Split reference: [DRISHTIGS1-2014]_
+"""
+
 from bob.db.drishtigs1 import Database as DRISHTI
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/dristhigs1od.py b/bob/ip/binseg/configs/datasets/dristhigs1od.py
index 3421ebe6..5c46540a 100644
--- a/bob/ip/binseg/configs/datasets/dristhigs1od.py
+++ b/bob/ip/binseg/configs/datasets/dristhigs1od.py
@@ -1,6 +1,26 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRISHTI-GS1 (training set) for Optic Disc Segmentation
+
+Drishti-GS is a dataset meant for validation of segmenting OD, cup and
+detecting notching.  The images in the Drishti-GS dataset have been collected
+and annotated by Aravind Eye hospital, Madurai, India. This dataset is of a
+single population as all subjects whose eye images are part of this dataset are
+Indians.
+
+The dataset is divided into two: a training set and a testing set of images.
+Training images (50) are provided with groundtruths for OD and Cup segmentation
+and notching information.
+
+* Reference: [DRISHTIGS1-2014]_
+* Original resolution (height x width): varying (min: 1749 x 2045, max: 1845 x
+  2468)
+* Configuration resolution: 1760 x 2048 (after center cropping)
+* Training samples: 50
+* Split reference: [DRISHTIGS1-2014]_
+"""
+
 from bob.db.drishtigs1 import Database as DRISHTI
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/dristhigs1odtest.py b/bob/ip/binseg/configs/datasets/dristhigs1odtest.py
index 1fdc8a28..e5e5c36a 100644
--- a/bob/ip/binseg/configs/datasets/dristhigs1odtest.py
+++ b/bob/ip/binseg/configs/datasets/dristhigs1odtest.py
@@ -1,6 +1,26 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRISHTI-GS1 (test set) for Optic Disc Segmentation
+
+Drishti-GS is a dataset meant for validation of segmenting OD, cup and
+detecting notching.  The images in the Drishti-GS dataset have been collected
+and annotated by Aravind Eye hospital, Madurai, India. This dataset is of a
+single population as all subjects whose eye images are part of this dataset are
+Indians.
+
+The dataset is divided into two: a training set and a testing set of images.
+Training images (50) are provided with groundtruths for OD and Cup segmentation
+and notching information.
+
+* Reference: [DRISHTIGS1-2014]_
+* Original resolution (height x width): varying (min: 1749 x 2045, max: 1845 x
+  2468)
+* Configuration resolution: 1760 x 2048 (after center cropping)
+* Test samples: 51
+* Split reference: [DRISHTIGS1-2014]_
+"""
+
 from bob.db.drishtigs1 import Database as DRISHTI
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/drive.py b/bob/ip/binseg/configs/datasets/drive.py
index 04819dc0..179e2e25 100644
--- a/bob/ip/binseg/configs/datasets/drive.py
+++ b/bob/ip/binseg/configs/datasets/drive.py
@@ -1,6 +1,18 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRIVE (training set) for Vessel Segmentation
+
+The DRIVE database has been established to enable comparative studies on
+segmentation of blood vessels in retinal images.
+
+* Reference: [DRIVE-2004]_
+* Original resolution (height x width): 584 x 565
+* This configuration resolution: 544 x 544 (center-crop)
+* Training samples: 20
+* Split reference: [DRIVE-2004]_
+"""
+
 from bob.db.drive import Database as DRIVE
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/drivetest.py b/bob/ip/binseg/configs/datasets/drivetest.py
index c6bff8ca..2f0aa772 100644
--- a/bob/ip/binseg/configs/datasets/drivetest.py
+++ b/bob/ip/binseg/configs/datasets/drivetest.py
@@ -1,6 +1,18 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRIVE (test set) for Vessel Segmentation
+
+The DRIVE database has been established to enable comparative studies on
+segmentation of blood vessels in retinal images.
+
+* Reference: [DRIVE-2004]_
+* Original resolution (height x width): 584 x 565
+* Configuration resolution: 544 x 544 (after center-crop)
+* Test samples: 20
+* Split reference: [DRIVE-2004]_
+"""
+
 from bob.db.drive import Database as DRIVE
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/hrf1168.py b/bob/ip/binseg/configs/datasets/hrf1168.py
index 4467c02c..64ebef45 100644
--- a/bob/ip/binseg/configs/datasets/hrf1168.py
+++ b/bob/ip/binseg/configs/datasets/hrf1168.py
@@ -1,6 +1,19 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""HRF (training set) for Vessel Segmentation
+
+The database includes 15 images of each healthy, diabetic retinopathy (DR), and
+glaucomatous eyes.  It contains 45 eye fundus images with a resolution of 3304
+x 2336. One set of ground-truth vessel annotations is available.
+
+* Reference: [HRF-2013]_
+* Original resolution (height x width): 2336 x 3504
+* Configuration resolution: 1168 x 1648 (after specific cropping and rescaling)
+* Training samples: 15
+* Split reference: [ORLANDO-2017]_
+"""
+
 from bob.db.hrf import Database as HRF
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
@@ -9,8 +22,8 @@ from bob.ip.binseg.data.binsegdataset import BinSegDataset
 
 transforms = Compose(
     [
-        Crop(0, 108, 2336, 3296),
-        Resize((1168)),
+        Crop(0, 108, 2336, 3296),  #(upper, left, height, width)
+        Resize((1168)),  # applies to the smaller edge
         RandomHFlip(),
         RandomVFlip(),
         RandomRotation(),
diff --git a/bob/ip/binseg/configs/datasets/hrftest.py b/bob/ip/binseg/configs/datasets/hrftest.py
index d7c32c2a..d106365a 100644
--- a/bob/ip/binseg/configs/datasets/hrftest.py
+++ b/bob/ip/binseg/configs/datasets/hrftest.py
@@ -1,6 +1,19 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""HRF (test set) for Vessel Segmentation
+
+The database includes 15 images of each healthy, diabetic retinopathy (DR), and
+glaucomatous eyes.  It contains 45 eye fundus images with a resolution of 3304
+x 2336. One set of ground-truth vessel annotations is available.
+
+* Reference: [HRF-2013]_
+* Original resolution (height x width): 2336 x 3504
+* Configuration resolution: 1168 x 1648 (after specific cropping and rescaling)
+* Test samples: 30
+* Split reference: [ORLANDO-2017]_
+"""
+
 from bob.db.hrf import Database as HRF
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/iostarod.py b/bob/ip/binseg/configs/datasets/iostarod.py
index e043f416..c905a05f 100644
--- a/bob/ip/binseg/configs/datasets/iostarod.py
+++ b/bob/ip/binseg/configs/datasets/iostarod.py
@@ -1,6 +1,20 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""IOSTAR (training set) for Optic Disc Segmentation
+
+The IOSTAR vessel segmentation dataset includes 30 images with a resolution of
+1024 × 1024 pixels. All the vessels in this dataset are annotated by a group of
+experts working in the field of retinal image analysis. Additionally the
+dataset includes annotations for the optic disc and the artery/vein ratio.
+
+* Reference: [IOSTAR-2016]_
+* Original resolution (height x width): 1024 x 1024
+* Configuration resolution: 1024 x 1024
+* Training samples: 20
+* Split reference: [MEYER-2017]_
+"""
+
 from bob.db.iostar import Database as IOSTAR
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/iostarodtest.py b/bob/ip/binseg/configs/datasets/iostarodtest.py
index a4e9b4c8..d4650b3e 100644
--- a/bob/ip/binseg/configs/datasets/iostarodtest.py
+++ b/bob/ip/binseg/configs/datasets/iostarodtest.py
@@ -1,6 +1,20 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""IOSTAR (test set) for Optic Disc Segmentation
+
+The IOSTAR vessel segmentation dataset includes 30 images with a resolution of
+1024 × 1024 pixels. All the vessels in this dataset are annotated by a group of
+experts working in the field of retinal image analysis. Additionally the
+dataset includes annotations for the optic disc and the artery/vein ratio.
+
+* Reference: [IOSTAR-2016]_
+* Original resolution (height x width): 1024 x 1024
+* Configuration resolution: 1024 x 1024
+* Test samples: 10
+* Split reference: [MEYER-2017]_
+"""
+
 from bob.db.iostar import Database as IOSTAR
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/iostarvessel.py b/bob/ip/binseg/configs/datasets/iostarvessel.py
index 5fa8ebb6..9cda4256 100644
--- a/bob/ip/binseg/configs/datasets/iostarvessel.py
+++ b/bob/ip/binseg/configs/datasets/iostarvessel.py
@@ -1,6 +1,20 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""IOSTAR (training set) for Vessel Segmentation
+
+The IOSTAR vessel segmentation dataset includes 30 images with a resolution of
+1024 × 1024 pixels. All the vessels in this dataset are annotated by a group of
+experts working in the field of retinal image analysis. Additionally the
+dataset includes annotations for the optic disc and the artery/vein ratio.
+
+* Reference: [IOSTAR-2016]_
+* Original resolution (height x width): 1024 x 1024
+* Configuration resolution: 1024 x 1024
+* Training samples: 20
+* Split reference: [MEYER-2017]_
+"""
+
 from bob.db.iostar import Database as IOSTAR
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/iostarvesseltest.py b/bob/ip/binseg/configs/datasets/iostarvesseltest.py
index 18ec9f2e..54a7993e 100644
--- a/bob/ip/binseg/configs/datasets/iostarvesseltest.py
+++ b/bob/ip/binseg/configs/datasets/iostarvesseltest.py
@@ -1,6 +1,20 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""IOSTAR (test set) for Vessel Segmentation
+
+The IOSTAR vessel segmentation dataset includes 30 images with a resolution of
+1024 × 1024 pixels. All the vessels in this dataset are annotated by a group of
+experts working in the field of retinal image analysis. Additionally the
+dataset includes annotations for the optic disc and the artery/vein ratio.
+
+* Reference: [IOSTAR-2016]_
+* Original resolution (height x width): 1024 x 1024
+* Configuration resolution: 1024 x 1024
+* Training samples: 10
+* Split reference: [MEYER-2017]_
+"""
+
 from bob.db.iostar import Database as IOSTAR
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/refugecup.py b/bob/ip/binseg/configs/datasets/refugecup.py
index 1100f508..b90400a7 100644
--- a/bob/ip/binseg/configs/datasets/refugecup.py
+++ b/bob/ip/binseg/configs/datasets/refugecup.py
@@ -1,6 +1,26 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""REFUGE (training set) for Cup Segmentation
+
+The dataset consists of 1200 color fundus photographs, created for a MICCAI
+challenge. The goal of the challenge is to evaluate and compare automated
+algorithms for glaucoma detection and optic disc/cup segmentation on a common
+dataset of retinal fundus images.
+
+* Reference: [REFUGE-2018]_
+* Original resolution (height x width): 2056 x 2124
+* Configuration resolution: 1632 x 1632 (after center cropping)
+* Training samples: 400
+* Split reference: [REFUGE-2018]_
+
+.. warning:
+
+   Notice that the data cropping/resizing algorithm applied on training and
+   "validation" data are slightly different and need to be cross-checked.
+
+"""
+
 from bob.db.refuge import Database as REFUGE
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
@@ -10,7 +30,7 @@ from bob.ip.binseg.data.binsegdataset import BinSegDataset
 transforms = Compose(
     [
         Resize((1539)),
-        Pad((21, 46, 22, 47)),
+        Pad((21, 46, 22, 47)),  #(left, top, right, bottom)
         RandomHFlip(),
         RandomVFlip(),
         RandomRotation(),
diff --git a/bob/ip/binseg/configs/datasets/refugecuptest.py b/bob/ip/binseg/configs/datasets/refugecuptest.py
index 5e600307..a24ec43c 100644
--- a/bob/ip/binseg/configs/datasets/refugecuptest.py
+++ b/bob/ip/binseg/configs/datasets/refugecuptest.py
@@ -1,6 +1,31 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""REFUGE (validation set) for Cup Segmentation
+
+The dataset consists of 1200 color fundus photographs, created for a MICCAI
+challenge. The goal of the challenge is to evaluate and compare automated
+algorithms for glaucoma detection and optic disc/cup segmentation on a common
+dataset of retinal fundus images.
+
+* Reference: [REFUGE-2018]_
+* Original resolution (height x width): 2056 x 2124
+* Configuration resolution: 1632 x 1632 (after center cropping)
+* Validation samples: 400
+* Split reference: [REFUGE-2018]_
+
+.. warning:
+
+   Notice 2 aspects before using these configurations:
+
+   1. The data cropping/resizing algorithm applied on training and "validation"
+      data are slightly different and need to be cross-checked.
+   2. This is the **validation** set!  The real **test** set is still not
+      integrated to the originating bob.db.refuge package: See
+      https://gitlab.idiap.ch/bob/bob.db.refuge/issues/1
+
+"""
+
 from bob.db.refuge import Database as REFUGE
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/refugeod.py b/bob/ip/binseg/configs/datasets/refugeod.py
index 4435640e..da226c8b 100644
--- a/bob/ip/binseg/configs/datasets/refugeod.py
+++ b/bob/ip/binseg/configs/datasets/refugeod.py
@@ -1,6 +1,26 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""REFUGE (training set) for Optic Disc Segmentation
+
+The dataset consists of 1200 color fundus photographs, created for a MICCAI
+challenge. The goal of the challenge is to evaluate and compare automated
+algorithms for glaucoma detection and optic disc/cup segmentation on a common
+dataset of retinal fundus images.
+
+* Reference: [REFUGE-2018]_
+* Original resolution (height x width): 2056 x 2124
+* Configuration resolution: 1632 x 1632 (after center cropping)
+* Training samples: 400
+* Split reference: [REFUGE-2018]_
+
+.. warning:
+
+   Notice that the data cropping/resizing algorithm applied on training and
+   "validation" data are slightly different and need to be cross-checked.
+
+"""
+
 from bob.db.refuge import Database as REFUGE
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/refugeodtest.py b/bob/ip/binseg/configs/datasets/refugeodtest.py
index b77d3e28..a14ff84b 100644
--- a/bob/ip/binseg/configs/datasets/refugeodtest.py
+++ b/bob/ip/binseg/configs/datasets/refugeodtest.py
@@ -1,6 +1,31 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""REFUGE (validation set) for Optic Disc Segmentation
+
+The dataset consists of 1200 color fundus photographs, created for a MICCAI
+challenge. The goal of the challenge is to evaluate and compare automated
+algorithms for glaucoma detection and optic disc/cup segmentation on a common
+dataset of retinal fundus images.
+
+* Reference: [REFUGE-2018]_
+* Original resolution (height x width): 2056 x 2124
+* Configuration resolution: 1632 x 1632 (after center cropping)
+* Validation samples: 400
+* Split reference: [REFUGE-2018]_
+
+.. warning:
+
+   Notice 2 aspects before using these configurations:
+
+   1. The data cropping/resizing algorithm applied on training and "validation"
+      data are slightly different and need to be cross-checked.
+   2. This is the **validation** set!  The real **test** set is still not
+      integrated to the originating bob.db.refuge package: See
+      https://gitlab.idiap.ch/bob/bob.db.refuge/issues/1
+
+"""
+
 from bob.db.refuge import Database as REFUGE
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/rimoner3cup.py b/bob/ip/binseg/configs/datasets/rimoner3cup.py
index 0fad0285..361e35f4 100644
--- a/bob/ip/binseg/configs/datasets/rimoner3cup.py
+++ b/bob/ip/binseg/configs/datasets/rimoner3cup.py
@@ -1,6 +1,21 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""RIM-ONE r3 (training set) for Cup Segmentation
+
+The dataset contains 159 stereo eye fundus images with a resolution of 2144 x
+1424. The right part of the stereo image is disregarded. Two sets of
+ground-truths for optic disc and optic cup are available. The first set is
+commonly used for training and testing. The second set acts as a “human”
+baseline.
+
+* Reference: [RIMONER3-2015]_
+* Original resolution (height x width): 1424 x 1072
+* Configuration resolution: 1440 x 1088 (after padding)
+* Training samples: 99
+* Split reference: [MANINIS-2016]_
+"""
+
 from bob.db.rimoner3 import Database as RIMONER3
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/rimoner3cuptest.py b/bob/ip/binseg/configs/datasets/rimoner3cuptest.py
index 86465331..c897048f 100644
--- a/bob/ip/binseg/configs/datasets/rimoner3cuptest.py
+++ b/bob/ip/binseg/configs/datasets/rimoner3cuptest.py
@@ -1,6 +1,21 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""RIM-ONE r3 (test set) for Cup Segmentation
+
+The dataset contains 159 stereo eye fundus images with a resolution of 2144 x
+1424. The right part of the stereo image is disregarded. Two sets of
+ground-truths for optic disc and optic cup are available. The first set is
+commonly used for training and testing. The second set acts as a “human”
+baseline.
+
+* Reference: [RIMONER3-2015]_
+* Original resolution (height x width): 1424 x 1072
+* Configuration resolution: 1440 x 1088 (after padding)
+* Test samples: 60
+* Split reference: [MANINIS-2016]_
+"""
+
 from bob.db.rimoner3 import Database as RIMONER3
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/rimoner3od.py b/bob/ip/binseg/configs/datasets/rimoner3od.py
index a465342a..60e0a571 100644
--- a/bob/ip/binseg/configs/datasets/rimoner3od.py
+++ b/bob/ip/binseg/configs/datasets/rimoner3od.py
@@ -1,6 +1,21 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""RIM-ONE r3 (training set) for Optic Disc Segmentation
+
+The dataset contains 159 stereo eye fundus images with a resolution of 2144 x
+1424. The right part of the stereo image is disregarded. Two sets of
+ground-truths for optic disc and optic cup are available. The first set is
+commonly used for training and testing. The second set acts as a “human”
+baseline.
+
+* Reference: [RIMONER3-2015]_
+* Original resolution (height x width): 1424 x 1072
+* Configuration resolution: 1440 x 1088 (after padding)
+* Training samples: 99
+* Split reference: [MANINIS-2016]_
+"""
+
 from bob.db.rimoner3 import Database as RIMONER3
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/rimoner3odtest.py b/bob/ip/binseg/configs/datasets/rimoner3odtest.py
index 6e4dd1a6..0a633fd6 100644
--- a/bob/ip/binseg/configs/datasets/rimoner3odtest.py
+++ b/bob/ip/binseg/configs/datasets/rimoner3odtest.py
@@ -1,6 +1,21 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""RIM-ONE r3 (test set) for Optic Disc Segmentation
+
+The dataset contains 159 stereo eye fundus images with a resolution of 2144 x
+1424. The right part of the stereo image is disregarded. Two sets of
+ground-truths for optic disc and optic cup are available. The first set is
+commonly used for training and testing. The second set acts as a “human”
+baseline.
+
+* Reference: [RIMONER3-2015]_
+* Original resolution (height x width): 1424 x 1072
+* Configuration resolution: 1440 x 1088 (after padding)
+* Test samples: 60
+* Split reference: [MANINIS-2016]_
+"""
+
 from bob.db.rimoner3 import Database as RIMONER3
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/stare.py b/bob/ip/binseg/configs/datasets/stare.py
index 0f93cc78..4e11db03 100644
--- a/bob/ip/binseg/configs/datasets/stare.py
+++ b/bob/ip/binseg/configs/datasets/stare.py
@@ -1,6 +1,21 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""STARE (training set) for Vessel Segmentation
+
+A subset of the original STARE dataset contains 20 annotated eye fundus images
+with a resolution of 605 x 700 (height x width). Two sets of ground-truth
+vessel annotations are available. The first set by Adam Hoover is commonly used
+for training and testing. The second set by Valentina Kouznetsova acts as a
+“human” baseline.
+
+* Reference: [STARE-2000]_
+* Original resolution (height x width): 605 x 700
+* Configuration resolution: 608 x 704 (after padding)
+* Training samples: 10
+* Split reference: [MANINIS-2016]_
+"""
+
 from bob.db.stare import Database as STARE
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
@@ -9,7 +24,7 @@ from bob.ip.binseg.data.binsegdataset import BinSegDataset
 
 transforms = Compose(
     [
-        Pad((2, 1, 2, 2)),
+        Pad((2, 1, 2, 2)),  #(left, top, right, bottom)
         RandomHFlip(),
         RandomVFlip(),
         RandomRotation(),
diff --git a/bob/ip/binseg/configs/datasets/staretest.py b/bob/ip/binseg/configs/datasets/staretest.py
index ac03e2a7..e296ac7a 100644
--- a/bob/ip/binseg/configs/datasets/staretest.py
+++ b/bob/ip/binseg/configs/datasets/staretest.py
@@ -1,6 +1,21 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""STARE (test set) for Vessel Segmentation
+
+A subset of the original STARE dataset contains 20 annotated eye fundus images
+with a resolution of 605 x 700 (height x width). Two sets of ground-truth
+vessel annotations are available. The first set by Adam Hoover is commonly used
+for training and testing. The second set by Valentina Kouznetsova acts as a
+“human” baseline.
+
+* Reference: [STARE-2000]_
+* Original resolution (height x width): 605 x 700
+* Configuration resolution: 608 x 704 (after padding)
+* Test samples: 10
+* Split reference: [MANINIS-2016]_
+"""
+
 from bob.db.stare import Database as STARE
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/models/driu.py b/bob/ip/binseg/configs/models/driu.py
index cbf4f41e..cdc9cb89 100644
--- a/bob/ip/binseg/configs/models/driu.py
+++ b/bob/ip/binseg/configs/models/driu.py
@@ -1,6 +1,15 @@
 #!/usr/bin/env python
 # coding=utf-8
 
+"""DRIU Network for Vessel Segmentation
+
+Deep Retinal Image Understanding (DRIU), a unified framework of retinal image
+analysis that provides both retinal vessel and optic disc segmentation using
+deep Convolutional Neural Networks (CNNs).
+
+Reference: [MANINIS-2016]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.driu import build_driu
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/configs/models/driubn.py b/bob/ip/binseg/configs/models/driubn.py
index 2e69cadc..4e3a4b3c 100644
--- a/bob/ip/binseg/configs/models/driubn.py
+++ b/bob/ip/binseg/configs/models/driubn.py
@@ -1,6 +1,16 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRIU Network for Vessel Segmentation with Batch Normalization
+
+Deep Retinal Image Understanding (DRIU), a unified framework of retinal image
+analysis that provides both retinal vessel and optic disc segmentation using
+deep Convolutional Neural Networks (CNNs).  This implementation includes batch
+normalization as a regularization mechanism.
+
+Reference: [MANINIS-2016]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.driubn import build_driu
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/configs/models/driubnssl.py b/bob/ip/binseg/configs/models/driubnssl.py
index e04a47a1..d7a4d295 100644
--- a/bob/ip/binseg/configs/models/driubnssl.py
+++ b/bob/ip/binseg/configs/models/driubnssl.py
@@ -1,6 +1,17 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRIU Network for Vessel Segmentation using SSL and Batch Normalization
+
+Deep Retinal Image Understanding (DRIU), a unified framework of retinal image
+analysis that provides both retinal vessel and optic disc segmentation using
+deep Convolutional Neural Networks (CNNs).  This version of our model includes
+a loss that is suitable for Semi-Supervised Learning (SSL).  This version also
+includes batch normalization as a regularization mechanism.
+
+Reference: [MANINIS-2016]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.driubn import build_driu
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/configs/models/driuod.py b/bob/ip/binseg/configs/models/driuod.py
index 5fdbf582..9535c89a 100644
--- a/bob/ip/binseg/configs/models/driuod.py
+++ b/bob/ip/binseg/configs/models/driuod.py
@@ -1,6 +1,15 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRIU Network for Optic Disc Segmentation
+
+Deep Retinal Image Understanding (DRIU), a unified framework of retinal image
+analysis that provides both retinal vessel and optic disc segmentation using
+deep Convolutional Neural Networks (CNNs).
+
+Reference: [MANINIS-2016]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.driuod import build_driuod
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/configs/models/driussl.py b/bob/ip/binseg/configs/models/driussl.py
index 38c96982..19d94f94 100644
--- a/bob/ip/binseg/configs/models/driussl.py
+++ b/bob/ip/binseg/configs/models/driussl.py
@@ -1,6 +1,16 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRIU Network for Vessel Segmentation using SSL
+
+Deep Retinal Image Understanding (DRIU), a unified framework of retinal image
+analysis that provides both retinal vessel and optic disc segmentation using
+deep Convolutional Neural Networks (CNNs).  This version of our model includes
+a loss that is suitable for Semi-Supervised Learning (SSL).
+
+Reference: [MANINIS-2016]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.driu import build_driu
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/configs/models/hed.py b/bob/ip/binseg/configs/models/hed.py
index 0a6b3250..6a9d7e82 100644
--- a/bob/ip/binseg/configs/models/hed.py
+++ b/bob/ip/binseg/configs/models/hed.py
@@ -1,6 +1,17 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+
+"""HED Network for Vessel Segmentation
+
+Holistically-nested edge detection (HED), turns pixel-wise edge classification
+into image-to-image prediction by means of a deep learning model that leverages
+fully convolutional neural networks and deeply-supervised nets.
+
+Reference: [XIE-2015]_
+"""
+
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.hed import build_hed
 from bob.ip.binseg.modeling.losses import HEDSoftJaccardBCELogitsLoss
diff --git a/bob/ip/binseg/configs/models/m2unet.py b/bob/ip/binseg/configs/models/m2unet.py
index a1626c91..2edc0372 100644
--- a/bob/ip/binseg/configs/models/m2unet.py
+++ b/bob/ip/binseg/configs/models/m2unet.py
@@ -1,6 +1,19 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""MobileNetV2 U-Net Model for Vessel Segmentation
+
+The MobileNetV2 architecture is based on an inverted residual structure where
+the input and output of the residual block are thin bottleneck layers opposite
+to traditional residual models which use expanded representations in the input
+an MobileNetV2 uses lightweight depthwise convolutions to filter features in
+the intermediate expansion layer.  This model implements a MobileNetV2 U-Net
+model, henceforth named M2U-Net, combining the strenghts of U-Net for medical
+segmentation applications and the speed of MobileNetV2 networks.
+
+References: [SANDLER-2018]_, [RONNEBERGER-2015]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.m2u import build_m2unet
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/configs/models/m2unetssl.py b/bob/ip/binseg/configs/models/m2unetssl.py
index a6ef11a5..402a84d6 100644
--- a/bob/ip/binseg/configs/models/m2unetssl.py
+++ b/bob/ip/binseg/configs/models/m2unetssl.py
@@ -1,6 +1,22 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+
+"""MobileNetV2 U-Net Model for Vessel Segmentation using SSL
+
+The MobileNetV2 architecture is based on an inverted residual structure where
+the input and output of the residual block are thin bottleneck layers opposite
+to traditional residual models which use expanded representations in the input
+an MobileNetV2 uses lightweight depthwise convolutions to filter features in
+the intermediate expansion layer.  This model implements a MobileNetV2 U-Net
+model, henceforth named M2U-Net, combining the strenghts of U-Net for medical
+segmentation applications and the speed of MobileNetV2 networks.  This version
+of our model includes a loss that is suitable for Semi-Supervised Learning
+(SSL).
+
+References: [SANDLER-2018]_, [RONNEBERGER-2015]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.m2u import build_m2unet
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/configs/models/resunet.py b/bob/ip/binseg/configs/models/resunet.py
index ca9bc8b4..ff7e26e5 100644
--- a/bob/ip/binseg/configs/models/resunet.py
+++ b/bob/ip/binseg/configs/models/resunet.py
@@ -1,6 +1,19 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""Residual U-Net for Vessel Segmentation
+
+A semantic segmentation neural network which combines the strengths of residual
+learning and U-Net is proposed for road area extraction.  The network is built
+with residual units and has similar architecture to that of U-Net. The benefits
+of this model is two-fold: first, residual units ease training of deep
+networks. Second, the rich skip connections within the network could facilitate
+information propagation, allowing us to design networks with fewer parameters
+however better performance.
+
+Reference: [ZHANG-2017]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.resunet import build_res50unet
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/configs/models/unet.py b/bob/ip/binseg/configs/models/unet.py
index 59d96eac..ee1eddb7 100644
--- a/bob/ip/binseg/configs/models/unet.py
+++ b/bob/ip/binseg/configs/models/unet.py
@@ -1,6 +1,17 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""U-Net for Vessel Segmentation
+
+U-Net is a convolutional neural network that was developed for biomedical image
+segmentation at the Computer Science Department of the University of Freiburg,
+Germany.  The network is based on the fully convolutional network (FCN) and its
+architecture was modified and extended to work with fewer training images and
+to yield more precise segmentations.
+
+Reference: [RONNEBERGER-2015]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.unet import build_unet
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/script/binseg.py b/bob/ip/binseg/script/binseg.py
index 5bb0ece6..88a995d1 100644
--- a/bob/ip/binseg/script/binseg.py
+++ b/bob/ip/binseg/script/binseg.py
@@ -39,11 +39,31 @@ logger = logging.getLogger(__name__)
 @with_plugins(pkg_resources.iter_entry_points("bob.ip.binseg.cli"))
 @click.group(cls=AliasedGroup)
 def binseg():
-    """Binary 2D Fundus Image Segmentation Benchmark commands."""
+    """Binary 2D Image Segmentation Benchmark commands."""
 
 
 # Train
-@binseg.command(entry_point_group="bob.ip.binseg.config", cls=ConfigCommand)
+@binseg.command(entry_point_group="bob.ip.binseg.config", cls=ConfigCommand,
+    epilog="""
+Examples:
+
+  1. Builds recipe from one of our build dependencies (inside bob.conda):
+
+\b
+     $ cd bob.conda
+     $ bdt build -vv conda/libblitz
+
+
+  2. Builds recipe from one of our packages, for Python 3.6 (if that is not already the default for you):
+
+     $ bdt build --python=3.6 -vv path/to/conda/dir
+
+
+  3. To build multiple recipes, just pass the paths to them:
+
+     $ bdt build --python=3.6 -vv path/to/recipe-dir1 path/to/recipe-dir2
+"""
+        )
 @click.option(
     "--output-path", "-o", required=True, default="output", cls=ResourceOption
 )
@@ -173,7 +193,7 @@ def train(
 )
 @verbosity_option(cls=ResourceOption)
 def test(model, output_path, device, batch_size, dataset, weight, **kwargs):
-    """ Run inference and evalaute the model performance """
+    """ Run inference and evaluate the model performance """
 
     # PyTorch dataloader
     data_loader = DataLoader(
@@ -420,7 +440,7 @@ def transformfolder(source_path, target_path, transforms, **kwargs):
 )
 @verbosity_option(cls=ResourceOption)
 def predict(model, output_path, device, batch_size, dataset, weight, **kwargs):
-    """ Run inference and evalaute the model performance """
+    """ Run inference and evaluate the model performance """
 
     # PyTorch dataloader
     data_loader = DataLoader(
diff --git a/bob/ip/binseg/script/config.py b/bob/ip/binseg/script/config.py
new file mode 100644
index 00000000..72ceb2b4
--- /dev/null
+++ b/bob/ip/binseg/script/config.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+import shutil
+import inspect
+
+import click
+import pkg_resources
+from click_plugins import with_plugins
+
+from bob.extension.scripts.click_helper import (
+    verbosity_option,
+    AliasedGroup,
+)
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+@click.group(cls=AliasedGroup)
+def config():
+    """Commands for listing, describing and copying configuration resources"""
+    pass
+
+
+@config.command(
+    epilog="""
+\b
+Examples:
+
+\b
+  1. Lists all configuration resources (type: bob.ip.binseg.config) installed:
+
+\b
+     $ bob binseg config list
+
+
+\b
+  2. Lists all configuration resources and their descriptions (notice this may
+     be slow as it needs to load all modules once):
+
+\b
+     $ bob binseg config list -v
+
+"""
+)
+@verbosity_option()
+def list(verbose):
+    """Lists configuration files installed"""
+
+    entry_points = pkg_resources.iter_entry_points("bob.ip.binseg.config")
+    entry_points = dict([(k.name, k) for k in entry_points])
+
+    # all modules with configuration resources
+    modules = set(
+        k.module_name.rsplit(".", 1)[0] for k in entry_points.values()
+    )
+
+    # sort data entries by originating module
+    entry_points_by_module = {}
+    for k in modules:
+        entry_points_by_module[k] = {}
+        for name, ep in entry_points.items():
+            if ep.module_name.startswith(k):
+                entry_points_by_module[k][name] = ep
+
+    for config_type in sorted(entry_points_by_module):
+
+        # calculates the longest config name so we offset the printing
+        longest_name_length = max(
+            len(k) for k in entry_points_by_module[config_type].keys()
+        )
+
+        # set-up printing options
+        print_string = "  %%-%ds   %%s" % (longest_name_length,)
+        # 79 - 4 spaces = 75 (see string above)
+        description_leftover = 75 - longest_name_length
+
+        print("module: %s" % (config_type,))
+        for name in sorted(entry_points_by_module[config_type]):
+            ep = entry_points[name]
+
+            if verbose >= 1:
+                module = ep.load()
+                doc = inspect.getdoc(module)
+                if doc is not None:
+                    summary = doc.split("\n\n")[0]
+                else:
+                    summary = "<DOCSTRING NOT AVAILABLE>"
+            else:
+                summary = ""
+
+            summary = (
+                (summary[: (description_leftover - 3)] + "...")
+                if len(summary) > (description_leftover - 3)
+                else summary
+            )
+
+            print(print_string % (name, summary))
+
+
+@config.command(
+    epilog="""
+\b
+Examples:
+
+\b
+  1. Describes the DRIVE (training) dataset configuration:
+
+\b
+     $ bob binseg config describe drive
+
+
+\b
+  2. Describes the DRIVE (training) dataset configuration and lists its
+     contents:
+
+\b
+     $ bob binseg config describe drive -v
+
+"""
+)
+@click.argument(
+    "name", required=True, nargs=-1,
+)
+@verbosity_option()
+def describe(name, verbose):
+    """Describes a specific configuration file"""
+
+    entry_points = pkg_resources.iter_entry_points("bob.ip.binseg.config")
+    entry_points = dict([(k.name, k) for k in entry_points])
+
+    for k in name:
+        if k not in entry_points:
+            logger.error("Cannot find configuration resource '%s'", k)
+            continue
+        ep = entry_points[k]
+        print("Configuration: %s" % (ep.name,))
+        print("Python Module: %s" % (ep.module_name,))
+        print("")
+        mod = ep.load()
+
+        if verbose >= 1:
+            fname = inspect.getfile(mod)
+            print("Contents:")
+            with open(fname, "r") as f:
+                print(f.read())
+        else:  #only output documentation
+            print("Documentation:")
+            print(inspect.getdoc(mod))
+
+
+@config.command(
+    epilog="""
+\b
+Examples:
+
+\b
+  1. Makes a copy of one of the stock configuration files locally, so it can be
+     adapted:
+
+\b
+     $ bob binseg config copy drive -vvv newdataset.py
+
+
+"""
+)
+@click.argument(
+    "source", required=True, nargs=1,
+)
+@click.argument(
+    "destination", required=True, nargs=1,
+)
+@verbosity_option()
+def copy(source, destination, verbose):
+    """Copies a specific configuration resource so it can be modified locally"""
+
+    entry_points = pkg_resources.iter_entry_points("bob.ip.binseg.config")
+    entry_points = dict([(k.name, k) for k in entry_points])
+
+    if source not in entry_points:
+        logger.error("Cannot find configuration resource '%s'", source)
+        return 1
+    ep = entry_points[source]
+    mod = ep.load()
+    src_name = inspect.getfile(mod)
+    logger.info('cp %s -> %s' % (src_name, destination))
+    shutil.copyfile(src_name, destination)
diff --git a/conda/meta.yaml b/conda/meta.yaml
index 57b6a20a..2a29081e 100644
--- a/conda/meta.yaml
+++ b/conda/meta.yaml
@@ -47,6 +47,15 @@ test:
   commands:
     # test commands ("script" entry-points) from your package here
     - bob binseg --help
+    - bob binseg config --help
+    - bob binseg config list --help
+    - bob binseg config list
+    - bob binseg config list -v
+    - bob binseg config describe --help
+    - bob binseg config describe drive
+    - bob binseg config describe drive -v
+    - bob binseg config copy --help
+    - bob binseg config copy drive /tmp/test.py
     - bob binseg compare --help
     - bob binseg evalpred --help
     - bob binseg gridtable --help
diff --git a/doc/api.rst b/doc/api.rst
index 5e836692..f9dbddb8 100644
--- a/doc/api.rst
+++ b/doc/api.rst
@@ -93,6 +93,8 @@ Scripts
    bob.ip.binseg.script.binseg
 
 
+.. _bob.ip.binseg.configs:
+
 Preset Configurations
 ---------------------
 
diff --git a/doc/datasets.rst b/doc/datasets.rst
index bd77ab1f..54b45744 100644
--- a/doc/datasets.rst
+++ b/doc/datasets.rst
@@ -13,7 +13,6 @@ to generate iterators for training and testing.
 
 
 .. list-table::
-   :header-rows: 1
 
    * - Dataset
      - Reference
diff --git a/doc/index.rst b/doc/index.rst
index 04c2d851..e8fe3faa 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -42,15 +42,12 @@ Users Guide
    :maxdepth: 2
 
    setup
-   datasets
-   training
-   evaluation
+   usage
    benchmarkresults
    covdresults
-   plotting
-   visualization
    acknowledgements
    references
+   datasets
    api
 
 
diff --git a/doc/references.rst b/doc/references.rst
index 97ad5358..6b942813 100644
--- a/doc/references.rst
+++ b/doc/references.rst
@@ -89,3 +89,18 @@
 .. [LIN-2018] *J. Lin*, **pytorch-mobilenet-v2: A PyTorch implementation of
    MobileNetV2**, 2018.  Last accessed: 21.03.2020.
    https://github.com/tonylins/pytorch-mobilenet-v2
+
+.. [XIE-2015] *S. Xie and Z. Tu*, **Holistically-Nested Edge Detection**, 2015
+   IEEE International Conference on Computer Vision (ICCV), Santiago, 2015, pp.
+   1395-1403.  https://doi.org/10.1109/ICCV.2015.164
+
+.. [RONNEBERGER-2015] *O. Ronneberger, P. Fischer, T. Brox*, **U-Net:
+   Convolutional Networks for Biomedical Image Segmentation**, 2015.
+   https://arxiv.org/abs/1505.04597
+
+.. [ZHANG-2017] *Z. Zhang, Q. Liu, Y. Wang*, **Road Extraction by Deep Residual
+   U-Net**, 2017. https://arxiv.org/abs/1711.10684
+
+.. [SANDLER-2018] *M. Sandler, A. Howard, M. Zhu, A. Zhmoginov, L.-C.h Chen*,
+   **MobileNetV2: Inverted Residuals and Linear Bottlenecks**, 2018.
+   https://arxiv.org/abs/1801.04381
diff --git a/doc/training.rst b/doc/training.rst
index e23d1bfc..30e483f4 100644
--- a/doc/training.rst
+++ b/doc/training.rst
@@ -1,14 +1,20 @@
 .. -*- coding: utf-8 -*-
-.. _bob.ip.binseg.training:
 
+.. _bob.ip.binseg.training:
 
 ==========
  Training
 ==========
 
+To train a new FCN, use the command-line interface (CLI) application ``bob
+binseg train``, available on your prompt.  To use this CLI, you must define
+the input dataset that will be used to train the FCN, as well as the type of
+model that will be trained.  You may issue ``bob binseg train --help`` for a
+help message containing more detailed instructions.
+
 To replicate our results, use our main application ``bob binseg train``
-followed by the model configuration, and dataset configuration files.  Use ``bob
-binseg train --help`` for more information.
+followed by the model configuration, and dataset configuration files, and/or
+command-line options.  Use ``bob binseg train --help`` for more information.
 
 .. note::
 
diff --git a/doc/usage.rst b/doc/usage.rst
new file mode 100644
index 00000000..2c8882e4
--- /dev/null
+++ b/doc/usage.rst
@@ -0,0 +1,43 @@
+.. -*- coding: utf-8 -*-
+
+.. _bob.ip.binseg.usage:
+
+==================
+ Usage Guidelines
+==================
+
+This package supports a fully reproducible research experimentation cycle for
+semantic binary segmentation with support for the following activities:
+
+* Training: Images are fed to a Fully Convolutional Deep Neural Network (FCN),
+  that is trained to reconstruct annotations (pre-segmented binary maps),
+  automatically, via error back propagation.  The objective of this phase is to
+  produce an FCN model.
+* Inference: The FCN is used to generate vessel map predictions
+* Evaluation: Vessel map predictions are used evaluate FCN performance against
+  test data, generate ROC curves or visualize prediction results overlayed on
+  the original raw images.
+
+Each application is implemented as a command-line utility, that is configurable
+using :ref:`Bob's extensible configuration framework
+<bob.extension.framework>`.  In essence, each command-line option may be
+provided as a variable with the same name in a Python file.  Each file may
+combine any number of variables that are pertinent to an application.  We
+provide a number of :ref:`preset configuration files <bob.ip.binseg.configs>`
+that can be used in one or more of the activities described above.  Our
+command-line framework allows you to refer to these preset configuration files
+using special names (a.k.a. "resources"), that procure and load these for you
+automatically.  Aside preset configuration files, you may also create your own
+to extend existing baseline experiments.
+
+
+.. toctree::
+   :maxdepth: 2
+
+   training
+   evaluation
+   plotting
+   visualization
+
+
+.. include:: links.rst
diff --git a/setup.py b/setup.py
index 51f4125a..0b3b97be 100644
--- a/setup.py
+++ b/setup.py
@@ -39,77 +39,80 @@ setup(
             "train = bob.ip.binseg.script.binseg:train",
             "test = bob.ip.binseg.script.binseg:test",
             "visualize = bob.ip.binseg.script.binseg:visualize",
+            "config = bob.ip.binseg.script.config:config",
         ],
         # bob train configurations
         "bob.ip.binseg.config": [
 
             # models
-            "DRIU = bob.ip.binseg.configs.models.driu",
-            "DRIUBN = bob.ip.binseg.configs.models.driubn",
-            "DRIUSSL = bob.ip.binseg.configs.models.driussl",
-            "DRIUBNSSL = bob.ip.binseg.configs.models.driubnssl",
-            "DRIUOD = bob.ip.binseg.configs.models.driuod",
-            "HED = bob.ip.binseg.configs.models.hed",
-            "M2UNet = bob.ip.binseg.configs.models.m2unet",
-            "M2UNetSSL = bob.ip.binseg.configs.models.m2unetssl",
-            "UNet = bob.ip.binseg.configs.models.unet",
-            "ResUNet = bob.ip.binseg.configs.models.resunet",
+            "driu = bob.ip.binseg.configs.models.driu",
+            "driu-bn = bob.ip.binseg.configs.models.driubn",
+            "driu-ssl = bob.ip.binseg.configs.models.driussl",
+            "driu-bn-ssl = bob.ip.binseg.configs.models.driubnssl",
+            "driu-od = bob.ip.binseg.configs.models.driuod",
+            "hed = bob.ip.binseg.configs.models.hed",
+            "m2unet = bob.ip.binseg.configs.models.m2unet",
+            "m2unet-ssl = bob.ip.binseg.configs.models.m2unetssl",
+            "unet = bob.ip.binseg.configs.models.unet",
+            "resunet = bob.ip.binseg.configs.models.resunet",
 
             # datasets
-            "IMAGEFOLDER = bob.ip.binseg.configs.datasets.imagefolder",
+            "imagefolder = bob.ip.binseg.configs.datasets.imagefolder",
 
             # drive dataset (numbers represent target resolution)
-            "DRIVE = bob.ip.binseg.configs.datasets.drive",
-            "DRIVETEST = bob.ip.binseg.configs.datasets.drivetest",
-            "COVD-DRIVE = bob.ip.binseg.configs.datasets.starechasedb1iostarhrf544",
-            "COVD-DRIVE_SSL = bob.ip.binseg.configs.datasets.starechasedb1iostarhrf544ssldrive",
+            "drive = bob.ip.binseg.configs.datasets.drive",
+            "covd-drive = bob.ip.binseg.configs.datasets.starechasedb1iostarhrf544",
+            "covd-drive-ssl = bob.ip.binseg.configs.datasets.starechasedb1iostarhrf544ssldrive",
+            "drive-test = bob.ip.binseg.configs.datasets.drivetest",
 
             # stare dataset (numbers represent target resolution)
-            "STARE = bob.ip.binseg.configs.datasets.stare",
-            "STARETEST = bob.ip.binseg.configs.datasets.staretest",
-            "COVD-STARE = bob.ip.binseg.configs.datasets.drivechasedb1iostarhrf608",
-            "COVD-STARE_SSL = bob.ip.binseg.configs.datasets.drivechasedb1iostarhrf608sslstare",
+            "stare = bob.ip.binseg.configs.datasets.stare",
+            "covd-stare = bob.ip.binseg.configs.datasets.drivechasedb1iostarhrf608",
+            "covd-stare-ssl = bob.ip.binseg.configs.datasets.drivechasedb1iostarhrf608sslstare",
+            "stare-test = bob.ip.binseg.configs.datasets.staretest",
 
             # iostar vessel (numbers represent target resolution)
-            "IOSTAROD = bob.ip.binseg.configs.datasets.iostarod",
-            "IOSTARODTEST = bob.ip.binseg.configs.datasets.iostarodtest",
-            "IOSTARVESSEL = bob.ip.binseg.configs.datasets.iostarvessel",
-            "IOSTARVESSELTEST = bob.ip.binseg.configs.datasets.iostarvesseltest",
-            "COVD-IOSTARVESSEL = bob.ip.binseg.configs.datasets.drivestarechasedb1hrf1024",
-            "COVD-IOSTARVESSEL_SSL = bob.ip.binseg.configs.datasets.drivestarechasedb1hrf1024ssliostar",
+            "iostar-vessel = bob.ip.binseg.configs.datasets.iostarvessel",
+            "covd-iostar-vessel = bob.ip.binseg.configs.datasets.drivestarechasedb1hrf1024",
+            "covd-iostar-vessel-ssl = bob.ip.binseg.configs.datasets.drivestarechasedb1hrf1024ssliostar",
+            "iostar-vessel-test = bob.ip.binseg.configs.datasets.iostarvesseltest",
+
+            # iostar optic disc
+            "iostarod = bob.ip.binseg.configs.datasets.iostarod",
+            "iostarodtest = bob.ip.binseg.configs.datasets.iostarodtest",
 
             # hrf (numbers represent target resolution)
-            "HRF = bob.ip.binseg.configs.datasets.hrf1168",
-            "HRFTEST = bob.ip.binseg.configs.datasets.hrftest",
-            "COVD-HRF = bob.ip.binseg.configs.datasets.drivestarechasedb1iostar1168",
-            "COVD-HRF_SSL = bob.ip.binseg.configs.datasets.drivestarechasedb1iostar1168sslhrf",
+            "hrf = bob.ip.binseg.configs.datasets.hrf1168",
+            "covd-hrf = bob.ip.binseg.configs.datasets.drivestarechasedb1iostar1168",
+            "covd-hrf-ssl = bob.ip.binseg.configs.datasets.drivestarechasedb1iostar1168sslhrf",
+            "hrftest-test = bob.ip.binseg.configs.datasets.hrftest",
 
             # chase-db1 (numbers represent target resolution)
-            "CHASEDB1 = bob.ip.binseg.configs.datasets.chasedb1",
-            "CHASEDB1TEST = bob.ip.binseg.configs.datasets.chasedb1test",
-            "COVD-CHASEDB1 = bob.ip.binseg.configs.datasets.drivestareiostarhrf960",
-            "COVD-CHASEDB1_SSL = bob.ip.binseg.configs.datasets.drivestareiostarhrf960sslchase",
+            "chasedb1 = bob.ip.binseg.configs.datasets.chasedb1",
+            "covd-chasedb1 = bob.ip.binseg.configs.datasets.drivestareiostarhrf960",
+            "covd-chasedb1-ssl = bob.ip.binseg.configs.datasets.drivestareiostarhrf960sslchase",
+            "chasedb1-test = bob.ip.binseg.configs.datasets.chasedb1test",
 
             # drionsdb
-            "DRIONSDB = bob.ip.binseg.configs.datasets.drionsdb",
-            "DRIONSDBTEST = bob.ip.binseg.configs.datasets.drionsdbtest",
+            "drionsdb = bob.ip.binseg.configs.datasets.drionsdb",
+            "drionsdb-test = bob.ip.binseg.configs.datasets.drionsdbtest",
 
             # drishtigs
-            "DRISHTIGS1OD = bob.ip.binseg.configs.datasets.dristhigs1od",
-            "DRISHTIGS1ODTEST = bob.ip.binseg.configs.datasets.dristhigs1odtest",
-            "DRISHTIGS1CUP = bob.ip.binseg.configs.datasets.dristhigs1cup",
-            "DRISHTIGS1CUPTEST = bob.ip.binseg.configs.datasets.dristhigs1cuptest",
+            "drishtigs1-od = bob.ip.binseg.configs.datasets.dristhigs1od",
+            "drishtigs1-od-test = bob.ip.binseg.configs.datasets.dristhigs1odtest",
+            "drishtigs1-cup = bob.ip.binseg.configs.datasets.dristhigs1cup",
+            "drishtigs1-cup-test = bob.ip.binseg.configs.datasets.dristhigs1cuptest",
             # refuge
-            "REFUGECUP = bob.ip.binseg.configs.datasets.refugecup",
-            "REFUGECUPTEST = bob.ip.binseg.configs.datasets.refugecuptest",
-            "REFUGEOD = bob.ip.binseg.configs.datasets.refugeod",
-            "REFUGEODTEST = bob.ip.binseg.configs.datasets.refugeodtest",
+            "refuge-cup = bob.ip.binseg.configs.datasets.refugecup",
+            "refuge-cup-test = bob.ip.binseg.configs.datasets.refugecuptest",
+            "refuge-od = bob.ip.binseg.configs.datasets.refugeod",
+            "refuge-od-test = bob.ip.binseg.configs.datasets.refugeodtest",
 
             # rim one r3
-            "RIMONER3CUP = bob.ip.binseg.configs.datasets.rimoner3cup",
-            "RIMONER3CUPTEST = bob.ip.binseg.configs.datasets.rimoner3cuptest",
-            "RIMONER3OD = bob.ip.binseg.configs.datasets.rimoner3od",
-            "RIMONER3ODTEST = bob.ip.binseg.configs.datasets.rimoner3odtest",
+            "rimoner3-cup = bob.ip.binseg.configs.datasets.rimoner3cup",
+            "rimoner3-cup-test = bob.ip.binseg.configs.datasets.rimoner3cuptest",
+            "rimoner3-od = bob.ip.binseg.configs.datasets.rimoner3od",
+            "rimoner3-od-test = bob.ip.binseg.configs.datasets.rimoner3odtest",
         ],
     },
     # check classifiers, add and remove as you see fit
-- 
GitLab