diff --git a/bob/ip/binseg/configs/datasets/chasedb1.py b/bob/ip/binseg/configs/datasets/chasedb1.py
index 605fd0a7a3d41bd99bc0bf9cd5646368990f8b17..f9206da1627c80e2ade275c4516a51d53365aed2 100644
--- a/bob/ip/binseg/configs/datasets/chasedb1.py
+++ b/bob/ip/binseg/configs/datasets/chasedb1.py
@@ -1,6 +1,29 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""CHASE-DB1 (training set) for Vessel Segmentation
+
+The CHASE_DB1 is a retinal vessel reference dataset acquired from multiethnic
+school children. This database is a part of the Child Heart and Health Study in
+England (CHASE), a cardiovascular health survey in 200 primary schools in
+London, Birmingham, and Leicester. The ocular imaging was carried out in
+46 schools and demonstrated associations between retinal vessel tortuosity and
+early risk factors for cardiovascular disease in over 1000 British primary
+school children of different ethnic origin. The retinal images of both of the
+eyes of each child were recorded with a hand-held Nidek NM-200-D fundus camera.
+The images were captured at 30 degrees FOV camera. The dataset of images are
+characterized by having nonuniform back-ground illumination, poor contrast of
+blood vessels as compared with the background and wider arteriolars that have a
+bright strip running down the centre known as the central vessel reflex.
+
+* Reference: [CHASEDB1-2012]_
+* Original resolution (height x width): 960 x 999
+* Configuration resolution: 960 x 960 (after hand-specified crop)
+* Training samples: 20
+* Split reference: [CHASEDB1-2012]_
+"""
+
+
 from bob.db.chasedb1 import Database as CHASEDB1
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
@@ -9,7 +32,7 @@ from bob.ip.binseg.data.binsegdataset import BinSegDataset
 
 transforms = Compose(
     [
-        Crop(0, 18, 960, 960),
+        Crop(0, 18, 960, 960),  #(upper, left, height, width)
         RandomHFlip(),
         RandomVFlip(),
         RandomRotation(),
diff --git a/bob/ip/binseg/configs/datasets/chasedb1test.py b/bob/ip/binseg/configs/datasets/chasedb1test.py
index 17be7aa11eafe25178d55e1b44b8a87ab2e2d5c0..4624543071270b8bf717fad11be9d8132b0a19c3 100644
--- a/bob/ip/binseg/configs/datasets/chasedb1test.py
+++ b/bob/ip/binseg/configs/datasets/chasedb1test.py
@@ -1,6 +1,28 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""CHASE-DB1 (test set) for Vessel Segmentation
+
+The CHASE_DB1 is a retinal vessel reference dataset acquired from multiethnic
+school children. This database is a part of the Child Heart and Health Study in
+England (CHASE), a cardiovascular health survey in 200 primary schools in
+London, Birmingham, and Leicester. The ocular imaging was carried out in
+46 schools and demonstrated associations between retinal vessel tortuosity and
+early risk factors for cardiovascular disease in over 1000 British primary
+school children of different ethnic origin. The retinal images of both of the
+eyes of each child were recorded with a hand-held Nidek NM-200-D fundus camera.
+The images were captured at 30 degrees FOV camera. The dataset of images are
+characterized by having nonuniform back-ground illumination, poor contrast of
+blood vessels as compared with the background and wider arteriolars that have a
+bright strip running down the centre known as the central vessel reflex.
+
+* Reference: [CHASEDB1-2012]_
+* Original resolution (height x width): 960 x 999
+* Configuration resolution: 960 x 960 (after hand-specified crop)
+* Test samples: 8
+* Split reference: [CHASEDB1-2012]_
+"""
+
 from bob.db.chasedb1 import Database as CHASEDB1
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/drionsdb.py b/bob/ip/binseg/configs/datasets/drionsdb.py
index 0a03dadfcb0cfab24da924546ece9d221651b31b..7b1730db3050b623c46da8726f621b504564cbb8 100644
--- a/bob/ip/binseg/configs/datasets/drionsdb.py
+++ b/bob/ip/binseg/configs/datasets/drionsdb.py
@@ -1,6 +1,24 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRIONS-DB (training set) for Optic Disc Segmentation
+
+The dataset originates from data collected from 55 patients with glaucoma
+(23.1%) and eye hypertension (76.9%), and random selected from an eye fundus
+image base belonging to the Ophthalmology Service at Miguel Servet Hospital,
+Saragossa (Spain).  It contains 110 eye fundus images with a resolution of 600
+x 400. Two sets of ground-truth optic disc annotations are available. The first
+set is commonly used for training and testing. The second set acts as a “human”
+baseline.
+
+* Reference: [DRIONSDB-2008]_
+* Original resolution (height x width): 400 x 600
+* Configuration resolution: 416 x 608 (after padding)
+* Training samples: 60
+* Split reference: [MANINIS-2016]_
+"""
+
+
 from bob.db.drionsdb import Database as DRIONS
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
@@ -9,7 +27,7 @@ from bob.ip.binseg.data.binsegdataset import BinSegDataset
 
 transforms = Compose(
     [
-        Pad((4, 8, 4, 8)),
+        Pad((4, 8, 4, 8)),  #(left, top, right, bottom)
         RandomHFlip(),
         RandomVFlip(),
         RandomRotation(),
diff --git a/bob/ip/binseg/configs/datasets/drionsdbtest.py b/bob/ip/binseg/configs/datasets/drionsdbtest.py
index 75bcbb58abf2e56539ec907f3fae5d9ad11e0d81..8b1a0c87feeae25b30dfa13212fa4607d31a7353 100644
--- a/bob/ip/binseg/configs/datasets/drionsdbtest.py
+++ b/bob/ip/binseg/configs/datasets/drionsdbtest.py
@@ -1,6 +1,23 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRIONS-DB (test set) for Optic Disc Segmentation
+
+The dataset originates from data collected from 55 patients with glaucoma
+(23.1%) and eye hypertension (76.9%), and random selected from an eye fundus
+image base belonging to the Ophthalmology Service at Miguel Servet Hospital,
+Saragossa (Spain).  It contains 110 eye fundus images with a resolution of 600
+x 400. Two sets of ground-truth optic disc annotations are available. The first
+set is commonly used for training and testing. The second set acts as a “human”
+baseline.
+
+* Reference: [DRIONSDB-2008]_
+* Original resolution (height x width): 400 x 600
+* Configuration resolution: 416 x 608 (after padding)
+* Training samples: 50
+* Split reference: [MANINIS-2016]_
+"""
+
 from bob.db.drionsdb import Database as DRIONS
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/dristhigs1cup.py b/bob/ip/binseg/configs/datasets/dristhigs1cup.py
index a1da30ad0392662cbfdaadd0ace7b011d43a7af2..485a5e686b606a7f2d5adc9dea4e3cff2da4f7fe 100644
--- a/bob/ip/binseg/configs/datasets/dristhigs1cup.py
+++ b/bob/ip/binseg/configs/datasets/dristhigs1cup.py
@@ -1,6 +1,26 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRISHTI-GS1 (training set) for Cup Segmentation
+
+Drishti-GS is a dataset meant for validation of segmenting OD, cup and
+detecting notching.  The images in the Drishti-GS dataset have been collected
+and annotated by Aravind Eye hospital, Madurai, India. This dataset is of a
+single population as all subjects whose eye images are part of this dataset are
+Indians.
+
+The dataset is divided into two: a training set and a testing set of images.
+Training images (50) are provided with groundtruths for OD and Cup segmentation
+and notching information.
+
+* Reference: [DRISHTIGS1-2014]_
+* Original resolution (height x width): varying (min: 1749 x 2045, max: 1845 x
+  2468)
+* Configuration resolution: 1760 x 2048 (after center cropping)
+* Training samples: 50
+* Split reference: [DRISHTIGS1-2014]_
+"""
+
 from bob.db.drishtigs1 import Database as DRISHTI
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/dristhigs1cuptest.py b/bob/ip/binseg/configs/datasets/dristhigs1cuptest.py
index e35eabf0ab2cbad1ae89b4cc4ef3685df3e3e981..511b52731208d332265e2a3e5432f04b697edde0 100644
--- a/bob/ip/binseg/configs/datasets/dristhigs1cuptest.py
+++ b/bob/ip/binseg/configs/datasets/dristhigs1cuptest.py
@@ -1,5 +1,26 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
+
+"""DRISHTI-GS1 (test set) for Cup Segmentation
+
+Drishti-GS is a dataset meant for validation of segmenting OD, cup and
+detecting notching.  The images in the Drishti-GS dataset have been collected
+and annotated by Aravind Eye hospital, Madurai, India. This dataset is of a
+single population as all subjects whose eye images are part of this dataset are
+Indians.
+
+The dataset is divided into two: a training set and a testing set of images.
+Training images (50) are provided with groundtruths for OD and Cup segmentation
+and notching information.
+
+* Reference: [DRISHTIGS1-2014]_
+* Original resolution (height x width): varying (min: 1749 x 2045, max: 1845 x
+  2468)
+* Configuration resolution: 1760 x 2048 (after center cropping)
+* Test samples: 51
+* Split reference: [DRISHTIGS1-2014]_
+"""
+
 from bob.db.drishtigs1 import Database as DRISHTI
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/dristhigs1od.py b/bob/ip/binseg/configs/datasets/dristhigs1od.py
index 3421ebe63ceb25a1e3cd5243971a41ad161e8cec..5c46540a32bde4da2c33932ea50d78f291cc2d01 100644
--- a/bob/ip/binseg/configs/datasets/dristhigs1od.py
+++ b/bob/ip/binseg/configs/datasets/dristhigs1od.py
@@ -1,6 +1,26 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRISHTI-GS1 (training set) for Optic Disc Segmentation
+
+Drishti-GS is a dataset meant for validation of segmenting OD, cup and
+detecting notching.  The images in the Drishti-GS dataset have been collected
+and annotated by Aravind Eye hospital, Madurai, India. This dataset is of a
+single population as all subjects whose eye images are part of this dataset are
+Indians.
+
+The dataset is divided into two: a training set and a testing set of images.
+Training images (50) are provided with groundtruths for OD and Cup segmentation
+and notching information.
+
+* Reference: [DRISHTIGS1-2014]_
+* Original resolution (height x width): varying (min: 1749 x 2045, max: 1845 x
+  2468)
+* Configuration resolution: 1760 x 2048 (after center cropping)
+* Training samples: 50
+* Split reference: [DRISHTIGS1-2014]_
+"""
+
 from bob.db.drishtigs1 import Database as DRISHTI
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/dristhigs1odtest.py b/bob/ip/binseg/configs/datasets/dristhigs1odtest.py
index 1fdc8a28e1b6e52838be96e6601047c5295b06b5..e5e5c36a3dd8ce9384a120b9f40c99cf0a66306e 100644
--- a/bob/ip/binseg/configs/datasets/dristhigs1odtest.py
+++ b/bob/ip/binseg/configs/datasets/dristhigs1odtest.py
@@ -1,6 +1,26 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRISHTI-GS1 (test set) for Optic Disc Segmentation
+
+Drishti-GS is a dataset meant for validation of segmenting OD, cup and
+detecting notching.  The images in the Drishti-GS dataset have been collected
+and annotated by Aravind Eye hospital, Madurai, India. This dataset is of a
+single population as all subjects whose eye images are part of this dataset are
+Indians.
+
+The dataset is divided into two: a training set and a testing set of images.
+Training images (50) are provided with groundtruths for OD and Cup segmentation
+and notching information.
+
+* Reference: [DRISHTIGS1-2014]_
+* Original resolution (height x width): varying (min: 1749 x 2045, max: 1845 x
+  2468)
+* Configuration resolution: 1760 x 2048 (after center cropping)
+* Test samples: 51
+* Split reference: [DRISHTIGS1-2014]_
+"""
+
 from bob.db.drishtigs1 import Database as DRISHTI
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/drive.py b/bob/ip/binseg/configs/datasets/drive.py
index 04819dc0ef1b59038a9e752dbc78003e9af78c28..179e2e258aeba8d0dbbdeacb4a719252908ec4d6 100644
--- a/bob/ip/binseg/configs/datasets/drive.py
+++ b/bob/ip/binseg/configs/datasets/drive.py
@@ -1,6 +1,18 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRIVE (training set) for Vessel Segmentation
+
+The DRIVE database has been established to enable comparative studies on
+segmentation of blood vessels in retinal images.
+
+* Reference: [DRIVE-2004]_
+* Original resolution (height x width): 584 x 565
+* This configuration resolution: 544 x 544 (center-crop)
+* Training samples: 20
+* Split reference: [DRIVE-2004]_
+"""
+
 from bob.db.drive import Database as DRIVE
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/drivetest.py b/bob/ip/binseg/configs/datasets/drivetest.py
index c6bff8ca07bf2216a322987f04844af878f266fe..2f0aa772c8862f8b485c32e73c9a5ad965071663 100644
--- a/bob/ip/binseg/configs/datasets/drivetest.py
+++ b/bob/ip/binseg/configs/datasets/drivetest.py
@@ -1,6 +1,18 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRIVE (test set) for Vessel Segmentation
+
+The DRIVE database has been established to enable comparative studies on
+segmentation of blood vessels in retinal images.
+
+* Reference: [DRIVE-2004]_
+* Original resolution (height x width): 584 x 565
+* Configuration resolution: 544 x 544 (after center-crop)
+* Test samples: 20
+* Split reference: [DRIVE-2004]_
+"""
+
 from bob.db.drive import Database as DRIVE
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/hrf1168.py b/bob/ip/binseg/configs/datasets/hrf1168.py
index 4467c02c4f0312a72afbea385c5246f466208ea1..64ebef45239073173a0890133634e2cbc9a5ff0f 100644
--- a/bob/ip/binseg/configs/datasets/hrf1168.py
+++ b/bob/ip/binseg/configs/datasets/hrf1168.py
@@ -1,6 +1,19 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""HRF (training set) for Vessel Segmentation
+
+The database includes 15 images of each healthy, diabetic retinopathy (DR), and
+glaucomatous eyes.  It contains 45 eye fundus images with a resolution of 3304
+x 2336. One set of ground-truth vessel annotations is available.
+
+* Reference: [HRF-2013]_
+* Original resolution (height x width): 2336 x 3504
+* Configuration resolution: 1168 x 1648 (after specific cropping and rescaling)
+* Training samples: 15
+* Split reference: [ORLANDO-2017]_
+"""
+
 from bob.db.hrf import Database as HRF
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
@@ -9,8 +22,8 @@ from bob.ip.binseg.data.binsegdataset import BinSegDataset
 
 transforms = Compose(
     [
-        Crop(0, 108, 2336, 3296),
-        Resize((1168)),
+        Crop(0, 108, 2336, 3296),  #(upper, left, height, width)
+        Resize((1168)),  # applies to the smaller edge
         RandomHFlip(),
         RandomVFlip(),
         RandomRotation(),
diff --git a/bob/ip/binseg/configs/datasets/hrftest.py b/bob/ip/binseg/configs/datasets/hrftest.py
index d7c32c2a3b5913a2f6ec3a7a19b17d9ab9599770..d106365a05e1e245a3d3073d1c25ec9489b883a4 100644
--- a/bob/ip/binseg/configs/datasets/hrftest.py
+++ b/bob/ip/binseg/configs/datasets/hrftest.py
@@ -1,6 +1,19 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""HRF (test set) for Vessel Segmentation
+
+The database includes 15 images of each healthy, diabetic retinopathy (DR), and
+glaucomatous eyes.  It contains 45 eye fundus images with a resolution of 3304
+x 2336. One set of ground-truth vessel annotations is available.
+
+* Reference: [HRF-2013]_
+* Original resolution (height x width): 2336 x 3504
+* Configuration resolution: 1168 x 1648 (after specific cropping and rescaling)
+* Test samples: 30
+* Split reference: [ORLANDO-2017]_
+"""
+
 from bob.db.hrf import Database as HRF
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/iostarod.py b/bob/ip/binseg/configs/datasets/iostarod.py
index e043f4166108bfda501210a5770d5b49890406a4..c905a05f3bf667a8295ea5b4f52c27f5648ea59a 100644
--- a/bob/ip/binseg/configs/datasets/iostarod.py
+++ b/bob/ip/binseg/configs/datasets/iostarod.py
@@ -1,6 +1,20 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""IOSTAR (training set) for Optic Disc Segmentation
+
+The IOSTAR vessel segmentation dataset includes 30 images with a resolution of
+1024 × 1024 pixels. All the vessels in this dataset are annotated by a group of
+experts working in the field of retinal image analysis. Additionally the
+dataset includes annotations for the optic disc and the artery/vein ratio.
+
+* Reference: [IOSTAR-2016]_
+* Original resolution (height x width): 1024 x 1024
+* Configuration resolution: 1024 x 1024
+* Training samples: 20
+* Split reference: [MEYER-2017]_
+"""
+
 from bob.db.iostar import Database as IOSTAR
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/iostarodtest.py b/bob/ip/binseg/configs/datasets/iostarodtest.py
index a4e9b4c8cb4b505a62e21000bdc37bcbf99b4a08..d4650b3ef1e93706fa923542514af851d90e802a 100644
--- a/bob/ip/binseg/configs/datasets/iostarodtest.py
+++ b/bob/ip/binseg/configs/datasets/iostarodtest.py
@@ -1,6 +1,20 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""IOSTAR (test set) for Optic Disc Segmentation
+
+The IOSTAR vessel segmentation dataset includes 30 images with a resolution of
+1024 × 1024 pixels. All the vessels in this dataset are annotated by a group of
+experts working in the field of retinal image analysis. Additionally the
+dataset includes annotations for the optic disc and the artery/vein ratio.
+
+* Reference: [IOSTAR-2016]_
+* Original resolution (height x width): 1024 x 1024
+* Configuration resolution: 1024 x 1024
+* Test samples: 10
+* Split reference: [MEYER-2017]_
+"""
+
 from bob.db.iostar import Database as IOSTAR
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/iostarvessel.py b/bob/ip/binseg/configs/datasets/iostarvessel.py
index 5fa8ebb66b312462cb321b0c834f354fb0e48a36..9cda4256077ed73eb58bb58be7c17df8d13bfa8a 100644
--- a/bob/ip/binseg/configs/datasets/iostarvessel.py
+++ b/bob/ip/binseg/configs/datasets/iostarvessel.py
@@ -1,6 +1,20 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""IOSTAR (training set) for Vessel Segmentation
+
+The IOSTAR vessel segmentation dataset includes 30 images with a resolution of
+1024 × 1024 pixels. All the vessels in this dataset are annotated by a group of
+experts working in the field of retinal image analysis. Additionally the
+dataset includes annotations for the optic disc and the artery/vein ratio.
+
+* Reference: [IOSTAR-2016]_
+* Original resolution (height x width): 1024 x 1024
+* Configuration resolution: 1024 x 1024
+* Training samples: 20
+* Split reference: [MEYER-2017]_
+"""
+
 from bob.db.iostar import Database as IOSTAR
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/iostarvesseltest.py b/bob/ip/binseg/configs/datasets/iostarvesseltest.py
index 18ec9f2e1005b549eec7c921b7bbd41912f621c3..54a7993e3df0127f420eec5325c276a751b92d75 100644
--- a/bob/ip/binseg/configs/datasets/iostarvesseltest.py
+++ b/bob/ip/binseg/configs/datasets/iostarvesseltest.py
@@ -1,6 +1,20 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""IOSTAR (test set) for Vessel Segmentation
+
+The IOSTAR vessel segmentation dataset includes 30 images with a resolution of
+1024 × 1024 pixels. All the vessels in this dataset are annotated by a group of
+experts working in the field of retinal image analysis. Additionally the
+dataset includes annotations for the optic disc and the artery/vein ratio.
+
+* Reference: [IOSTAR-2016]_
+* Original resolution (height x width): 1024 x 1024
+* Configuration resolution: 1024 x 1024
+* Training samples: 10
+* Split reference: [MEYER-2017]_
+"""
+
 from bob.db.iostar import Database as IOSTAR
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/refugecup.py b/bob/ip/binseg/configs/datasets/refugecup.py
index 1100f50897b91de4624e59bb24840566fb4115f9..b90400a75cebb9eed0f3980f10cc3cef4e0d4cf0 100644
--- a/bob/ip/binseg/configs/datasets/refugecup.py
+++ b/bob/ip/binseg/configs/datasets/refugecup.py
@@ -1,6 +1,26 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""REFUGE (training set) for Cup Segmentation
+
+The dataset consists of 1200 color fundus photographs, created for a MICCAI
+challenge. The goal of the challenge is to evaluate and compare automated
+algorithms for glaucoma detection and optic disc/cup segmentation on a common
+dataset of retinal fundus images.
+
+* Reference: [REFUGE-2018]_
+* Original resolution (height x width): 2056 x 2124
+* Configuration resolution: 1632 x 1632 (after center cropping)
+* Training samples: 400
+* Split reference: [REFUGE-2018]_
+
+.. warning:
+
+   Notice that the data cropping/resizing algorithm applied on training and
+   "validation" data are slightly different and need to be cross-checked.
+
+"""
+
 from bob.db.refuge import Database as REFUGE
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
@@ -10,7 +30,7 @@ from bob.ip.binseg.data.binsegdataset import BinSegDataset
 transforms = Compose(
     [
         Resize((1539)),
-        Pad((21, 46, 22, 47)),
+        Pad((21, 46, 22, 47)),  #(left, top, right, bottom)
         RandomHFlip(),
         RandomVFlip(),
         RandomRotation(),
diff --git a/bob/ip/binseg/configs/datasets/refugecuptest.py b/bob/ip/binseg/configs/datasets/refugecuptest.py
index 5e6003079d226bb03ed8d4549741dca6f05ead6a..a24ec43c0998fdb863f916bdd15b5b6fee9c6c2e 100644
--- a/bob/ip/binseg/configs/datasets/refugecuptest.py
+++ b/bob/ip/binseg/configs/datasets/refugecuptest.py
@@ -1,6 +1,31 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""REFUGE (validation set) for Cup Segmentation
+
+The dataset consists of 1200 color fundus photographs, created for a MICCAI
+challenge. The goal of the challenge is to evaluate and compare automated
+algorithms for glaucoma detection and optic disc/cup segmentation on a common
+dataset of retinal fundus images.
+
+* Reference: [REFUGE-2018]_
+* Original resolution (height x width): 2056 x 2124
+* Configuration resolution: 1632 x 1632 (after center cropping)
+* Validation samples: 400
+* Split reference: [REFUGE-2018]_
+
+.. warning:
+
+   Notice 2 aspects before using these configurations:
+
+   1. The data cropping/resizing algorithm applied on training and "validation"
+      data are slightly different and need to be cross-checked.
+   2. This is the **validation** set!  The real **test** set is still not
+      integrated to the originating bob.db.refuge package: See
+      https://gitlab.idiap.ch/bob/bob.db.refuge/issues/1
+
+"""
+
 from bob.db.refuge import Database as REFUGE
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/refugeod.py b/bob/ip/binseg/configs/datasets/refugeod.py
index 4435640ea5b741bfa550d5c0a0aaad4f4e22095e..da226c8bd636bccbc28ff1f2952f30e30dc074ce 100644
--- a/bob/ip/binseg/configs/datasets/refugeod.py
+++ b/bob/ip/binseg/configs/datasets/refugeod.py
@@ -1,6 +1,26 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""REFUGE (training set) for Optic Disc Segmentation
+
+The dataset consists of 1200 color fundus photographs, created for a MICCAI
+challenge. The goal of the challenge is to evaluate and compare automated
+algorithms for glaucoma detection and optic disc/cup segmentation on a common
+dataset of retinal fundus images.
+
+* Reference: [REFUGE-2018]_
+* Original resolution (height x width): 2056 x 2124
+* Configuration resolution: 1632 x 1632 (after center cropping)
+* Training samples: 400
+* Split reference: [REFUGE-2018]_
+
+.. warning:
+
+   Notice that the data cropping/resizing algorithm applied on training and
+   "validation" data are slightly different and need to be cross-checked.
+
+"""
+
 from bob.db.refuge import Database as REFUGE
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/refugeodtest.py b/bob/ip/binseg/configs/datasets/refugeodtest.py
index b77d3e28208a0511a02149ecb8ca006b666c17e2..a14ff84bc2ab67106823b32e49dac6e55c8189bc 100644
--- a/bob/ip/binseg/configs/datasets/refugeodtest.py
+++ b/bob/ip/binseg/configs/datasets/refugeodtest.py
@@ -1,6 +1,31 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""REFUGE (validation set) for Optic Disc Segmentation
+
+The dataset consists of 1200 color fundus photographs, created for a MICCAI
+challenge. The goal of the challenge is to evaluate and compare automated
+algorithms for glaucoma detection and optic disc/cup segmentation on a common
+dataset of retinal fundus images.
+
+* Reference: [REFUGE-2018]_
+* Original resolution (height x width): 2056 x 2124
+* Configuration resolution: 1632 x 1632 (after center cropping)
+* Validation samples: 400
+* Split reference: [REFUGE-2018]_
+
+.. warning:
+
+   Notice 2 aspects before using these configurations:
+
+   1. The data cropping/resizing algorithm applied on training and "validation"
+      data are slightly different and need to be cross-checked.
+   2. This is the **validation** set!  The real **test** set is still not
+      integrated to the originating bob.db.refuge package: See
+      https://gitlab.idiap.ch/bob/bob.db.refuge/issues/1
+
+"""
+
 from bob.db.refuge import Database as REFUGE
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/rimoner3cup.py b/bob/ip/binseg/configs/datasets/rimoner3cup.py
index 0fad0285ee45ba9dc53a98348a5f8401daa80215..361e35f4a82686397de85aa7b72471789dab1c90 100644
--- a/bob/ip/binseg/configs/datasets/rimoner3cup.py
+++ b/bob/ip/binseg/configs/datasets/rimoner3cup.py
@@ -1,6 +1,21 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""RIM-ONE r3 (training set) for Cup Segmentation
+
+The dataset contains 159 stereo eye fundus images with a resolution of 2144 x
+1424. The right part of the stereo image is disregarded. Two sets of
+ground-truths for optic disc and optic cup are available. The first set is
+commonly used for training and testing. The second set acts as a “human”
+baseline.
+
+* Reference: [RIMONER3-2015]_
+* Original resolution (height x width): 1424 x 1072
+* Configuration resolution: 1440 x 1088 (after padding)
+* Training samples: 99
+* Split reference: [MANINIS-2016]_
+"""
+
 from bob.db.rimoner3 import Database as RIMONER3
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/rimoner3cuptest.py b/bob/ip/binseg/configs/datasets/rimoner3cuptest.py
index 8646533154ef6d09cb1669fefc578f1a4a97e710..c897048f11f1bf57824fd33171551191bb397625 100644
--- a/bob/ip/binseg/configs/datasets/rimoner3cuptest.py
+++ b/bob/ip/binseg/configs/datasets/rimoner3cuptest.py
@@ -1,6 +1,21 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""RIM-ONE r3 (test set) for Cup Segmentation
+
+The dataset contains 159 stereo eye fundus images with a resolution of 2144 x
+1424. The right part of the stereo image is disregarded. Two sets of
+ground-truths for optic disc and optic cup are available. The first set is
+commonly used for training and testing. The second set acts as a “human”
+baseline.
+
+* Reference: [RIMONER3-2015]_
+* Original resolution (height x width): 1424 x 1072
+* Configuration resolution: 1440 x 1088 (after padding)
+* Test samples: 60
+* Split reference: [MANINIS-2016]_
+"""
+
 from bob.db.rimoner3 import Database as RIMONER3
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/rimoner3od.py b/bob/ip/binseg/configs/datasets/rimoner3od.py
index a465342afa2470fd94b3f7aa735d67dd28126b84..60e0a571ce37e74bcd7f3c859cd6c6d8406c314c 100644
--- a/bob/ip/binseg/configs/datasets/rimoner3od.py
+++ b/bob/ip/binseg/configs/datasets/rimoner3od.py
@@ -1,6 +1,21 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""RIM-ONE r3 (training set) for Optic Disc Segmentation
+
+The dataset contains 159 stereo eye fundus images with a resolution of 2144 x
+1424. The right part of the stereo image is disregarded. Two sets of
+ground-truths for optic disc and optic cup are available. The first set is
+commonly used for training and testing. The second set acts as a “human”
+baseline.
+
+* Reference: [RIMONER3-2015]_
+* Original resolution (height x width): 1424 x 1072
+* Configuration resolution: 1440 x 1088 (after padding)
+* Training samples: 99
+* Split reference: [MANINIS-2016]_
+"""
+
 from bob.db.rimoner3 import Database as RIMONER3
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/rimoner3odtest.py b/bob/ip/binseg/configs/datasets/rimoner3odtest.py
index 6e4dd1a6c72ad8cfabf8d7e91f5a63ce6975a90e..0a633fd6de578f088961d7fdbeffe751011d459a 100644
--- a/bob/ip/binseg/configs/datasets/rimoner3odtest.py
+++ b/bob/ip/binseg/configs/datasets/rimoner3odtest.py
@@ -1,6 +1,21 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""RIM-ONE r3 (test set) for Optic Disc Segmentation
+
+The dataset contains 159 stereo eye fundus images with a resolution of 2144 x
+1424. The right part of the stereo image is disregarded. Two sets of
+ground-truths for optic disc and optic cup are available. The first set is
+commonly used for training and testing. The second set acts as a “human”
+baseline.
+
+* Reference: [RIMONER3-2015]_
+* Original resolution (height x width): 1424 x 1072
+* Configuration resolution: 1440 x 1088 (after padding)
+* Test samples: 60
+* Split reference: [MANINIS-2016]_
+"""
+
 from bob.db.rimoner3 import Database as RIMONER3
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/datasets/stare.py b/bob/ip/binseg/configs/datasets/stare.py
index 0f93cc788f307a1d4145b9a8247df0e85828f3b4..4e11db035e9e0261d1f87ccf933894a0ad8b279f 100644
--- a/bob/ip/binseg/configs/datasets/stare.py
+++ b/bob/ip/binseg/configs/datasets/stare.py
@@ -1,6 +1,21 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""STARE (training set) for Vessel Segmentation
+
+A subset of the original STARE dataset contains 20 annotated eye fundus images
+with a resolution of 605 x 700 (height x width). Two sets of ground-truth
+vessel annotations are available. The first set by Adam Hoover is commonly used
+for training and testing. The second set by Valentina Kouznetsova acts as a
+“human” baseline.
+
+* Reference: [STARE-2000]_
+* Original resolution (height x width): 605 x 700
+* Configuration resolution: 608 x 704 (after padding)
+* Training samples: 10
+* Split reference: [MANINIS-2016]_
+"""
+
 from bob.db.stare import Database as STARE
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
@@ -9,7 +24,7 @@ from bob.ip.binseg.data.binsegdataset import BinSegDataset
 
 transforms = Compose(
     [
-        Pad((2, 1, 2, 2)),
+        Pad((2, 1, 2, 2)),  #(left, top, right, bottom)
         RandomHFlip(),
         RandomVFlip(),
         RandomRotation(),
diff --git a/bob/ip/binseg/configs/datasets/staretest.py b/bob/ip/binseg/configs/datasets/staretest.py
index ac03e2a7e2ba1def5994e5bbf6325f381db2be01..e296ac7a5086517f2580686e845d111b146e9cf7 100644
--- a/bob/ip/binseg/configs/datasets/staretest.py
+++ b/bob/ip/binseg/configs/datasets/staretest.py
@@ -1,6 +1,21 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""STARE (test set) for Vessel Segmentation
+
+A subset of the original STARE dataset contains 20 annotated eye fundus images
+with a resolution of 605 x 700 (height x width). Two sets of ground-truth
+vessel annotations are available. The first set by Adam Hoover is commonly used
+for training and testing. The second set by Valentina Kouznetsova acts as a
+“human” baseline.
+
+* Reference: [STARE-2000]_
+* Original resolution (height x width): 605 x 700
+* Configuration resolution: 608 x 704 (after padding)
+* Test samples: 10
+* Split reference: [MANINIS-2016]_
+"""
+
 from bob.db.stare import Database as STARE
 from bob.ip.binseg.data.transforms import *
 from bob.ip.binseg.data.binsegdataset import BinSegDataset
diff --git a/bob/ip/binseg/configs/models/driu.py b/bob/ip/binseg/configs/models/driu.py
index cbf4f41ed129193935d62a2647d654d8a25ab436..cdc9cb89fa3618615dbe46bba61d25f76245fd47 100644
--- a/bob/ip/binseg/configs/models/driu.py
+++ b/bob/ip/binseg/configs/models/driu.py
@@ -1,6 +1,15 @@
 #!/usr/bin/env python
 # coding=utf-8
 
+"""DRIU Network for Vessel Segmentation
+
+Deep Retinal Image Understanding (DRIU), a unified framework of retinal image
+analysis that provides both retinal vessel and optic disc segmentation using
+deep Convolutional Neural Networks (CNNs).
+
+Reference: [MANINIS-2016]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.driu import build_driu
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/configs/models/driubn.py b/bob/ip/binseg/configs/models/driubn.py
index 2e69cadc6dcefba74840d36d0e68716a062fa2a9..4e3a4b3c9121b9db5ba64805da2c24d56febbf9c 100644
--- a/bob/ip/binseg/configs/models/driubn.py
+++ b/bob/ip/binseg/configs/models/driubn.py
@@ -1,6 +1,16 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRIU Network for Vessel Segmentation with Batch Normalization
+
+Deep Retinal Image Understanding (DRIU), a unified framework of retinal image
+analysis that provides both retinal vessel and optic disc segmentation using
+deep Convolutional Neural Networks (CNNs).  This implementation includes batch
+normalization as a regularization mechanism.
+
+Reference: [MANINIS-2016]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.driubn import build_driu
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/configs/models/driubnssl.py b/bob/ip/binseg/configs/models/driubnssl.py
index e04a47a1827e210ee62044d8a0f07227f04ffeae..d7a4d295eb1d7b38c91ad486e4db75096911d046 100644
--- a/bob/ip/binseg/configs/models/driubnssl.py
+++ b/bob/ip/binseg/configs/models/driubnssl.py
@@ -1,6 +1,17 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRIU Network for Vessel Segmentation using SSL and Batch Normalization
+
+Deep Retinal Image Understanding (DRIU), a unified framework of retinal image
+analysis that provides both retinal vessel and optic disc segmentation using
+deep Convolutional Neural Networks (CNNs).  This version of our model includes
+a loss that is suitable for Semi-Supervised Learning (SSL).  This version also
+includes batch normalization as a regularization mechanism.
+
+Reference: [MANINIS-2016]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.driubn import build_driu
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/configs/models/driuod.py b/bob/ip/binseg/configs/models/driuod.py
index 5fdbf58220b9c24f03f33334e06cc729366f738b..9535c89ab5195b0d3f93971f16ad8cd9d336aab7 100644
--- a/bob/ip/binseg/configs/models/driuod.py
+++ b/bob/ip/binseg/configs/models/driuod.py
@@ -1,6 +1,15 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRIU Network for Optic Disc Segmentation
+
+Deep Retinal Image Understanding (DRIU), a unified framework of retinal image
+analysis that provides both retinal vessel and optic disc segmentation using
+deep Convolutional Neural Networks (CNNs).
+
+Reference: [MANINIS-2016]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.driuod import build_driuod
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/configs/models/driussl.py b/bob/ip/binseg/configs/models/driussl.py
index 38c969827cf930e2aa0740b4d2d4a57c0fcbd8e5..19d94f942432956ec5491ba4d80ac952cec74c9e 100644
--- a/bob/ip/binseg/configs/models/driussl.py
+++ b/bob/ip/binseg/configs/models/driussl.py
@@ -1,6 +1,16 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""DRIU Network for Vessel Segmentation using SSL
+
+Deep Retinal Image Understanding (DRIU), a unified framework of retinal image
+analysis that provides both retinal vessel and optic disc segmentation using
+deep Convolutional Neural Networks (CNNs).  This version of our model includes
+a loss that is suitable for Semi-Supervised Learning (SSL).
+
+Reference: [MANINIS-2016]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.driu import build_driu
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/configs/models/hed.py b/bob/ip/binseg/configs/models/hed.py
index 0a6b3250caff4446629980aa3a5825d73fcb17e0..6a9d7e82f211b2bd6357ad1cc19095765695af66 100644
--- a/bob/ip/binseg/configs/models/hed.py
+++ b/bob/ip/binseg/configs/models/hed.py
@@ -1,6 +1,17 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+
+"""HED Network for Vessel Segmentation
+
+Holistically-nested edge detection (HED), turns pixel-wise edge classification
+into image-to-image prediction by means of a deep learning model that leverages
+fully convolutional neural networks and deeply-supervised nets.
+
+Reference: [XIE-2015]_
+"""
+
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.hed import build_hed
 from bob.ip.binseg.modeling.losses import HEDSoftJaccardBCELogitsLoss
diff --git a/bob/ip/binseg/configs/models/m2unet.py b/bob/ip/binseg/configs/models/m2unet.py
index a1626c9145812783d32f66c8f7c4a49ce20d54b6..2edc0372b5b34c59e5feb9baf8b63b97f547441d 100644
--- a/bob/ip/binseg/configs/models/m2unet.py
+++ b/bob/ip/binseg/configs/models/m2unet.py
@@ -1,6 +1,19 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""MobileNetV2 U-Net Model for Vessel Segmentation
+
+The MobileNetV2 architecture is based on an inverted residual structure where
+the input and output of the residual block are thin bottleneck layers opposite
+to traditional residual models which use expanded representations in the input
+an MobileNetV2 uses lightweight depthwise convolutions to filter features in
+the intermediate expansion layer.  This model implements a MobileNetV2 U-Net
+model, henceforth named M2U-Net, combining the strenghts of U-Net for medical
+segmentation applications and the speed of MobileNetV2 networks.
+
+References: [SANDLER-2018]_, [RONNEBERGER-2015]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.m2u import build_m2unet
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/configs/models/m2unetssl.py b/bob/ip/binseg/configs/models/m2unetssl.py
index a6ef11a50fd6cb9db7c085ac46ec53bb3ca50ea0..402a84d6072a2b109aa8841794dfcb07c58278c7 100644
--- a/bob/ip/binseg/configs/models/m2unetssl.py
+++ b/bob/ip/binseg/configs/models/m2unetssl.py
@@ -1,6 +1,22 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+
+"""MobileNetV2 U-Net Model for Vessel Segmentation using SSL
+
+The MobileNetV2 architecture is based on an inverted residual structure where
+the input and output of the residual block are thin bottleneck layers opposite
+to traditional residual models which use expanded representations in the input
+an MobileNetV2 uses lightweight depthwise convolutions to filter features in
+the intermediate expansion layer.  This model implements a MobileNetV2 U-Net
+model, henceforth named M2U-Net, combining the strenghts of U-Net for medical
+segmentation applications and the speed of MobileNetV2 networks.  This version
+of our model includes a loss that is suitable for Semi-Supervised Learning
+(SSL).
+
+References: [SANDLER-2018]_, [RONNEBERGER-2015]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.m2u import build_m2unet
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/configs/models/resunet.py b/bob/ip/binseg/configs/models/resunet.py
index ca9bc8b41b631b2d5881bcee370142c55aa1be11..ff7e26e599294b7f52da4bb25a71f5d08205c128 100644
--- a/bob/ip/binseg/configs/models/resunet.py
+++ b/bob/ip/binseg/configs/models/resunet.py
@@ -1,6 +1,19 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""Residual U-Net for Vessel Segmentation
+
+A semantic segmentation neural network which combines the strengths of residual
+learning and U-Net is proposed for road area extraction.  The network is built
+with residual units and has similar architecture to that of U-Net. The benefits
+of this model is two-fold: first, residual units ease training of deep
+networks. Second, the rich skip connections within the network could facilitate
+information propagation, allowing us to design networks with fewer parameters
+however better performance.
+
+Reference: [ZHANG-2017]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.resunet import build_res50unet
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/configs/models/unet.py b/bob/ip/binseg/configs/models/unet.py
index 59d96eacbf2de0a2f4e2dab7bafaa8645a8848e6..ee1eddb71417c96feb8a2897b9e0079271bdf83e 100644
--- a/bob/ip/binseg/configs/models/unet.py
+++ b/bob/ip/binseg/configs/models/unet.py
@@ -1,6 +1,17 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
+"""U-Net for Vessel Segmentation
+
+U-Net is a convolutional neural network that was developed for biomedical image
+segmentation at the Computer Science Department of the University of Freiburg,
+Germany.  The network is based on the fully convolutional network (FCN) and its
+architecture was modified and extended to work with fewer training images and
+to yield more precise segmentations.
+
+Reference: [RONNEBERGER-2015]_
+"""
+
 from torch.optim.lr_scheduler import MultiStepLR
 from bob.ip.binseg.modeling.unet import build_unet
 from bob.ip.binseg.utils.model_zoo import modelurls
diff --git a/bob/ip/binseg/script/binseg.py b/bob/ip/binseg/script/binseg.py
index 5bb0ece67128d9020b4f63f126972e996abbfc85..88a995d14a3ab4e928b967212c5843e3595d51d4 100644
--- a/bob/ip/binseg/script/binseg.py
+++ b/bob/ip/binseg/script/binseg.py
@@ -39,11 +39,31 @@ logger = logging.getLogger(__name__)
 @with_plugins(pkg_resources.iter_entry_points("bob.ip.binseg.cli"))
 @click.group(cls=AliasedGroup)
 def binseg():
-    """Binary 2D Fundus Image Segmentation Benchmark commands."""
+    """Binary 2D Image Segmentation Benchmark commands."""
 
 
 # Train
-@binseg.command(entry_point_group="bob.ip.binseg.config", cls=ConfigCommand)
+@binseg.command(entry_point_group="bob.ip.binseg.config", cls=ConfigCommand,
+    epilog="""
+Examples:
+
+  1. Builds recipe from one of our build dependencies (inside bob.conda):
+
+\b
+     $ cd bob.conda
+     $ bdt build -vv conda/libblitz
+
+
+  2. Builds recipe from one of our packages, for Python 3.6 (if that is not already the default for you):
+
+     $ bdt build --python=3.6 -vv path/to/conda/dir
+
+
+  3. To build multiple recipes, just pass the paths to them:
+
+     $ bdt build --python=3.6 -vv path/to/recipe-dir1 path/to/recipe-dir2
+"""
+        )
 @click.option(
     "--output-path", "-o", required=True, default="output", cls=ResourceOption
 )
@@ -173,7 +193,7 @@ def train(
 )
 @verbosity_option(cls=ResourceOption)
 def test(model, output_path, device, batch_size, dataset, weight, **kwargs):
-    """ Run inference and evalaute the model performance """
+    """ Run inference and evaluate the model performance """
 
     # PyTorch dataloader
     data_loader = DataLoader(
@@ -420,7 +440,7 @@ def transformfolder(source_path, target_path, transforms, **kwargs):
 )
 @verbosity_option(cls=ResourceOption)
 def predict(model, output_path, device, batch_size, dataset, weight, **kwargs):
-    """ Run inference and evalaute the model performance """
+    """ Run inference and evaluate the model performance """
 
     # PyTorch dataloader
     data_loader = DataLoader(
diff --git a/bob/ip/binseg/script/config.py b/bob/ip/binseg/script/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..72ceb2b4a8e8afeb1834d4523a16e0f508a3f653
--- /dev/null
+++ b/bob/ip/binseg/script/config.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+import shutil
+import inspect
+
+import click
+import pkg_resources
+from click_plugins import with_plugins
+
+from bob.extension.scripts.click_helper import (
+    verbosity_option,
+    AliasedGroup,
+)
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+@click.group(cls=AliasedGroup)
+def config():
+    """Commands for listing, describing and copying configuration resources"""
+    pass
+
+
+@config.command(
+    epilog="""
+\b
+Examples:
+
+\b
+  1. Lists all configuration resources (type: bob.ip.binseg.config) installed:
+
+\b
+     $ bob binseg config list
+
+
+\b
+  2. Lists all configuration resources and their descriptions (notice this may
+     be slow as it needs to load all modules once):
+
+\b
+     $ bob binseg config list -v
+
+"""
+)
+@verbosity_option()
+def list(verbose):
+    """Lists configuration files installed"""
+
+    entry_points = pkg_resources.iter_entry_points("bob.ip.binseg.config")
+    entry_points = dict([(k.name, k) for k in entry_points])
+
+    # all modules with configuration resources
+    modules = set(
+        k.module_name.rsplit(".", 1)[0] for k in entry_points.values()
+    )
+
+    # sort data entries by originating module
+    entry_points_by_module = {}
+    for k in modules:
+        entry_points_by_module[k] = {}
+        for name, ep in entry_points.items():
+            if ep.module_name.startswith(k):
+                entry_points_by_module[k][name] = ep
+
+    for config_type in sorted(entry_points_by_module):
+
+        # calculates the longest config name so we offset the printing
+        longest_name_length = max(
+            len(k) for k in entry_points_by_module[config_type].keys()
+        )
+
+        # set-up printing options
+        print_string = "  %%-%ds   %%s" % (longest_name_length,)
+        # 79 - 4 spaces = 75 (see string above)
+        description_leftover = 75 - longest_name_length
+
+        print("module: %s" % (config_type,))
+        for name in sorted(entry_points_by_module[config_type]):
+            ep = entry_points[name]
+
+            if verbose >= 1:
+                module = ep.load()
+                doc = inspect.getdoc(module)
+                if doc is not None:
+                    summary = doc.split("\n\n")[0]
+                else:
+                    summary = "<DOCSTRING NOT AVAILABLE>"
+            else:
+                summary = ""
+
+            summary = (
+                (summary[: (description_leftover - 3)] + "...")
+                if len(summary) > (description_leftover - 3)
+                else summary
+            )
+
+            print(print_string % (name, summary))
+
+
+@config.command(
+    epilog="""
+\b
+Examples:
+
+\b
+  1. Describes the DRIVE (training) dataset configuration:
+
+\b
+     $ bob binseg config describe drive
+
+
+\b
+  2. Describes the DRIVE (training) dataset configuration and lists its
+     contents:
+
+\b
+     $ bob binseg config describe drive -v
+
+"""
+)
+@click.argument(
+    "name", required=True, nargs=-1,
+)
+@verbosity_option()
+def describe(name, verbose):
+    """Describes a specific configuration file"""
+
+    entry_points = pkg_resources.iter_entry_points("bob.ip.binseg.config")
+    entry_points = dict([(k.name, k) for k in entry_points])
+
+    for k in name:
+        if k not in entry_points:
+            logger.error("Cannot find configuration resource '%s'", k)
+            continue
+        ep = entry_points[k]
+        print("Configuration: %s" % (ep.name,))
+        print("Python Module: %s" % (ep.module_name,))
+        print("")
+        mod = ep.load()
+
+        if verbose >= 1:
+            fname = inspect.getfile(mod)
+            print("Contents:")
+            with open(fname, "r") as f:
+                print(f.read())
+        else:  #only output documentation
+            print("Documentation:")
+            print(inspect.getdoc(mod))
+
+
+@config.command(
+    epilog="""
+\b
+Examples:
+
+\b
+  1. Makes a copy of one of the stock configuration files locally, so it can be
+     adapted:
+
+\b
+     $ bob binseg config copy drive -vvv newdataset.py
+
+
+"""
+)
+@click.argument(
+    "source", required=True, nargs=1,
+)
+@click.argument(
+    "destination", required=True, nargs=1,
+)
+@verbosity_option()
+def copy(source, destination, verbose):
+    """Copies a specific configuration resource so it can be modified locally"""
+
+    entry_points = pkg_resources.iter_entry_points("bob.ip.binseg.config")
+    entry_points = dict([(k.name, k) for k in entry_points])
+
+    if source not in entry_points:
+        logger.error("Cannot find configuration resource '%s'", source)
+        return 1
+    ep = entry_points[source]
+    mod = ep.load()
+    src_name = inspect.getfile(mod)
+    logger.info('cp %s -> %s' % (src_name, destination))
+    shutil.copyfile(src_name, destination)
diff --git a/conda/meta.yaml b/conda/meta.yaml
index 57b6a20a123908e56f736ab9e47dc7b4e01cd0c5..2a29081e489583768a501696aa9f6398c7c03a11 100644
--- a/conda/meta.yaml
+++ b/conda/meta.yaml
@@ -47,6 +47,15 @@ test:
   commands:
     # test commands ("script" entry-points) from your package here
     - bob binseg --help
+    - bob binseg config --help
+    - bob binseg config list --help
+    - bob binseg config list
+    - bob binseg config list -v
+    - bob binseg config describe --help
+    - bob binseg config describe drive
+    - bob binseg config describe drive -v
+    - bob binseg config copy --help
+    - bob binseg config copy drive /tmp/test.py
     - bob binseg compare --help
     - bob binseg evalpred --help
     - bob binseg gridtable --help
diff --git a/doc/api.rst b/doc/api.rst
index 5e8366929f33edfd293512f828c4f8d20e71eef7..f9dbddb80198b94a84587707959536a2cf91cc89 100644
--- a/doc/api.rst
+++ b/doc/api.rst
@@ -93,6 +93,8 @@ Scripts
    bob.ip.binseg.script.binseg
 
 
+.. _bob.ip.binseg.configs:
+
 Preset Configurations
 ---------------------
 
diff --git a/doc/datasets.rst b/doc/datasets.rst
index bd77ab1fa1c908a9ccfe3818f1f532b93591f6d7..54b45744c90439bfd5eff9495e271c6d6103f511 100644
--- a/doc/datasets.rst
+++ b/doc/datasets.rst
@@ -13,7 +13,6 @@ to generate iterators for training and testing.
 
 
 .. list-table::
-   :header-rows: 1
 
    * - Dataset
      - Reference
diff --git a/doc/index.rst b/doc/index.rst
index 04c2d851a85f5931283eae34a05d1df1cf5f2a12..e8fe3faa2e78d57d63114ea2a9367a48964c357b 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -42,15 +42,12 @@ Users Guide
    :maxdepth: 2
 
    setup
-   datasets
-   training
-   evaluation
+   usage
    benchmarkresults
    covdresults
-   plotting
-   visualization
    acknowledgements
    references
+   datasets
    api
 
 
diff --git a/doc/references.rst b/doc/references.rst
index 97ad5358df6bdaea2cd787a29c93475f98e46100..6b942813843865e8d8e9cc87d15952c3a3ad3af4 100644
--- a/doc/references.rst
+++ b/doc/references.rst
@@ -89,3 +89,18 @@
 .. [LIN-2018] *J. Lin*, **pytorch-mobilenet-v2: A PyTorch implementation of
    MobileNetV2**, 2018.  Last accessed: 21.03.2020.
    https://github.com/tonylins/pytorch-mobilenet-v2
+
+.. [XIE-2015] *S. Xie and Z. Tu*, **Holistically-Nested Edge Detection**, 2015
+   IEEE International Conference on Computer Vision (ICCV), Santiago, 2015, pp.
+   1395-1403.  https://doi.org/10.1109/ICCV.2015.164
+
+.. [RONNEBERGER-2015] *O. Ronneberger, P. Fischer, T. Brox*, **U-Net:
+   Convolutional Networks for Biomedical Image Segmentation**, 2015.
+   https://arxiv.org/abs/1505.04597
+
+.. [ZHANG-2017] *Z. Zhang, Q. Liu, Y. Wang*, **Road Extraction by Deep Residual
+   U-Net**, 2017. https://arxiv.org/abs/1711.10684
+
+.. [SANDLER-2018] *M. Sandler, A. Howard, M. Zhu, A. Zhmoginov, L.-C.h Chen*,
+   **MobileNetV2: Inverted Residuals and Linear Bottlenecks**, 2018.
+   https://arxiv.org/abs/1801.04381
diff --git a/doc/training.rst b/doc/training.rst
index e23d1bfcad2c592900cb0e1fc5742b8db828f74e..30e483f44232425fa4b1aa024be9edecb96739a7 100644
--- a/doc/training.rst
+++ b/doc/training.rst
@@ -1,14 +1,20 @@
 .. -*- coding: utf-8 -*-
-.. _bob.ip.binseg.training:
 
+.. _bob.ip.binseg.training:
 
 ==========
  Training
 ==========
 
+To train a new FCN, use the command-line interface (CLI) application ``bob
+binseg train``, available on your prompt.  To use this CLI, you must define
+the input dataset that will be used to train the FCN, as well as the type of
+model that will be trained.  You may issue ``bob binseg train --help`` for a
+help message containing more detailed instructions.
+
 To replicate our results, use our main application ``bob binseg train``
-followed by the model configuration, and dataset configuration files.  Use ``bob
-binseg train --help`` for more information.
+followed by the model configuration, and dataset configuration files, and/or
+command-line options.  Use ``bob binseg train --help`` for more information.
 
 .. note::
 
diff --git a/doc/usage.rst b/doc/usage.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2c8882e4505b555450eda8d7f4d6da9ac5059575
--- /dev/null
+++ b/doc/usage.rst
@@ -0,0 +1,43 @@
+.. -*- coding: utf-8 -*-
+
+.. _bob.ip.binseg.usage:
+
+==================
+ Usage Guidelines
+==================
+
+This package supports a fully reproducible research experimentation cycle for
+semantic binary segmentation with support for the following activities:
+
+* Training: Images are fed to a Fully Convolutional Deep Neural Network (FCN),
+  that is trained to reconstruct annotations (pre-segmented binary maps),
+  automatically, via error back propagation.  The objective of this phase is to
+  produce an FCN model.
+* Inference: The FCN is used to generate vessel map predictions
+* Evaluation: Vessel map predictions are used evaluate FCN performance against
+  test data, generate ROC curves or visualize prediction results overlayed on
+  the original raw images.
+
+Each application is implemented as a command-line utility, that is configurable
+using :ref:`Bob's extensible configuration framework
+<bob.extension.framework>`.  In essence, each command-line option may be
+provided as a variable with the same name in a Python file.  Each file may
+combine any number of variables that are pertinent to an application.  We
+provide a number of :ref:`preset configuration files <bob.ip.binseg.configs>`
+that can be used in one or more of the activities described above.  Our
+command-line framework allows you to refer to these preset configuration files
+using special names (a.k.a. "resources"), that procure and load these for you
+automatically.  Aside preset configuration files, you may also create your own
+to extend existing baseline experiments.
+
+
+.. toctree::
+   :maxdepth: 2
+
+   training
+   evaluation
+   plotting
+   visualization
+
+
+.. include:: links.rst
diff --git a/setup.py b/setup.py
index 51f4125a0323f041fff3ac5085166d9824ce65dc..0b3b97be06d045c55006747ee89e1cccc532af53 100644
--- a/setup.py
+++ b/setup.py
@@ -39,77 +39,80 @@ setup(
             "train = bob.ip.binseg.script.binseg:train",
             "test = bob.ip.binseg.script.binseg:test",
             "visualize = bob.ip.binseg.script.binseg:visualize",
+            "config = bob.ip.binseg.script.config:config",
         ],
         # bob train configurations
         "bob.ip.binseg.config": [
 
             # models
-            "DRIU = bob.ip.binseg.configs.models.driu",
-            "DRIUBN = bob.ip.binseg.configs.models.driubn",
-            "DRIUSSL = bob.ip.binseg.configs.models.driussl",
-            "DRIUBNSSL = bob.ip.binseg.configs.models.driubnssl",
-            "DRIUOD = bob.ip.binseg.configs.models.driuod",
-            "HED = bob.ip.binseg.configs.models.hed",
-            "M2UNet = bob.ip.binseg.configs.models.m2unet",
-            "M2UNetSSL = bob.ip.binseg.configs.models.m2unetssl",
-            "UNet = bob.ip.binseg.configs.models.unet",
-            "ResUNet = bob.ip.binseg.configs.models.resunet",
+            "driu = bob.ip.binseg.configs.models.driu",
+            "driu-bn = bob.ip.binseg.configs.models.driubn",
+            "driu-ssl = bob.ip.binseg.configs.models.driussl",
+            "driu-bn-ssl = bob.ip.binseg.configs.models.driubnssl",
+            "driu-od = bob.ip.binseg.configs.models.driuod",
+            "hed = bob.ip.binseg.configs.models.hed",
+            "m2unet = bob.ip.binseg.configs.models.m2unet",
+            "m2unet-ssl = bob.ip.binseg.configs.models.m2unetssl",
+            "unet = bob.ip.binseg.configs.models.unet",
+            "resunet = bob.ip.binseg.configs.models.resunet",
 
             # datasets
-            "IMAGEFOLDER = bob.ip.binseg.configs.datasets.imagefolder",
+            "imagefolder = bob.ip.binseg.configs.datasets.imagefolder",
 
             # drive dataset (numbers represent target resolution)
-            "DRIVE = bob.ip.binseg.configs.datasets.drive",
-            "DRIVETEST = bob.ip.binseg.configs.datasets.drivetest",
-            "COVD-DRIVE = bob.ip.binseg.configs.datasets.starechasedb1iostarhrf544",
-            "COVD-DRIVE_SSL = bob.ip.binseg.configs.datasets.starechasedb1iostarhrf544ssldrive",
+            "drive = bob.ip.binseg.configs.datasets.drive",
+            "covd-drive = bob.ip.binseg.configs.datasets.starechasedb1iostarhrf544",
+            "covd-drive-ssl = bob.ip.binseg.configs.datasets.starechasedb1iostarhrf544ssldrive",
+            "drive-test = bob.ip.binseg.configs.datasets.drivetest",
 
             # stare dataset (numbers represent target resolution)
-            "STARE = bob.ip.binseg.configs.datasets.stare",
-            "STARETEST = bob.ip.binseg.configs.datasets.staretest",
-            "COVD-STARE = bob.ip.binseg.configs.datasets.drivechasedb1iostarhrf608",
-            "COVD-STARE_SSL = bob.ip.binseg.configs.datasets.drivechasedb1iostarhrf608sslstare",
+            "stare = bob.ip.binseg.configs.datasets.stare",
+            "covd-stare = bob.ip.binseg.configs.datasets.drivechasedb1iostarhrf608",
+            "covd-stare-ssl = bob.ip.binseg.configs.datasets.drivechasedb1iostarhrf608sslstare",
+            "stare-test = bob.ip.binseg.configs.datasets.staretest",
 
             # iostar vessel (numbers represent target resolution)
-            "IOSTAROD = bob.ip.binseg.configs.datasets.iostarod",
-            "IOSTARODTEST = bob.ip.binseg.configs.datasets.iostarodtest",
-            "IOSTARVESSEL = bob.ip.binseg.configs.datasets.iostarvessel",
-            "IOSTARVESSELTEST = bob.ip.binseg.configs.datasets.iostarvesseltest",
-            "COVD-IOSTARVESSEL = bob.ip.binseg.configs.datasets.drivestarechasedb1hrf1024",
-            "COVD-IOSTARVESSEL_SSL = bob.ip.binseg.configs.datasets.drivestarechasedb1hrf1024ssliostar",
+            "iostar-vessel = bob.ip.binseg.configs.datasets.iostarvessel",
+            "covd-iostar-vessel = bob.ip.binseg.configs.datasets.drivestarechasedb1hrf1024",
+            "covd-iostar-vessel-ssl = bob.ip.binseg.configs.datasets.drivestarechasedb1hrf1024ssliostar",
+            "iostar-vessel-test = bob.ip.binseg.configs.datasets.iostarvesseltest",
+
+            # iostar optic disc
+            "iostarod = bob.ip.binseg.configs.datasets.iostarod",
+            "iostarodtest = bob.ip.binseg.configs.datasets.iostarodtest",
 
             # hrf (numbers represent target resolution)
-            "HRF = bob.ip.binseg.configs.datasets.hrf1168",
-            "HRFTEST = bob.ip.binseg.configs.datasets.hrftest",
-            "COVD-HRF = bob.ip.binseg.configs.datasets.drivestarechasedb1iostar1168",
-            "COVD-HRF_SSL = bob.ip.binseg.configs.datasets.drivestarechasedb1iostar1168sslhrf",
+            "hrf = bob.ip.binseg.configs.datasets.hrf1168",
+            "covd-hrf = bob.ip.binseg.configs.datasets.drivestarechasedb1iostar1168",
+            "covd-hrf-ssl = bob.ip.binseg.configs.datasets.drivestarechasedb1iostar1168sslhrf",
+            "hrftest-test = bob.ip.binseg.configs.datasets.hrftest",
 
             # chase-db1 (numbers represent target resolution)
-            "CHASEDB1 = bob.ip.binseg.configs.datasets.chasedb1",
-            "CHASEDB1TEST = bob.ip.binseg.configs.datasets.chasedb1test",
-            "COVD-CHASEDB1 = bob.ip.binseg.configs.datasets.drivestareiostarhrf960",
-            "COVD-CHASEDB1_SSL = bob.ip.binseg.configs.datasets.drivestareiostarhrf960sslchase",
+            "chasedb1 = bob.ip.binseg.configs.datasets.chasedb1",
+            "covd-chasedb1 = bob.ip.binseg.configs.datasets.drivestareiostarhrf960",
+            "covd-chasedb1-ssl = bob.ip.binseg.configs.datasets.drivestareiostarhrf960sslchase",
+            "chasedb1-test = bob.ip.binseg.configs.datasets.chasedb1test",
 
             # drionsdb
-            "DRIONSDB = bob.ip.binseg.configs.datasets.drionsdb",
-            "DRIONSDBTEST = bob.ip.binseg.configs.datasets.drionsdbtest",
+            "drionsdb = bob.ip.binseg.configs.datasets.drionsdb",
+            "drionsdb-test = bob.ip.binseg.configs.datasets.drionsdbtest",
 
             # drishtigs
-            "DRISHTIGS1OD = bob.ip.binseg.configs.datasets.dristhigs1od",
-            "DRISHTIGS1ODTEST = bob.ip.binseg.configs.datasets.dristhigs1odtest",
-            "DRISHTIGS1CUP = bob.ip.binseg.configs.datasets.dristhigs1cup",
-            "DRISHTIGS1CUPTEST = bob.ip.binseg.configs.datasets.dristhigs1cuptest",
+            "drishtigs1-od = bob.ip.binseg.configs.datasets.dristhigs1od",
+            "drishtigs1-od-test = bob.ip.binseg.configs.datasets.dristhigs1odtest",
+            "drishtigs1-cup = bob.ip.binseg.configs.datasets.dristhigs1cup",
+            "drishtigs1-cup-test = bob.ip.binseg.configs.datasets.dristhigs1cuptest",
             # refuge
-            "REFUGECUP = bob.ip.binseg.configs.datasets.refugecup",
-            "REFUGECUPTEST = bob.ip.binseg.configs.datasets.refugecuptest",
-            "REFUGEOD = bob.ip.binseg.configs.datasets.refugeod",
-            "REFUGEODTEST = bob.ip.binseg.configs.datasets.refugeodtest",
+            "refuge-cup = bob.ip.binseg.configs.datasets.refugecup",
+            "refuge-cup-test = bob.ip.binseg.configs.datasets.refugecuptest",
+            "refuge-od = bob.ip.binseg.configs.datasets.refugeod",
+            "refuge-od-test = bob.ip.binseg.configs.datasets.refugeodtest",
 
             # rim one r3
-            "RIMONER3CUP = bob.ip.binseg.configs.datasets.rimoner3cup",
-            "RIMONER3CUPTEST = bob.ip.binseg.configs.datasets.rimoner3cuptest",
-            "RIMONER3OD = bob.ip.binseg.configs.datasets.rimoner3od",
-            "RIMONER3ODTEST = bob.ip.binseg.configs.datasets.rimoner3odtest",
+            "rimoner3-cup = bob.ip.binseg.configs.datasets.rimoner3cup",
+            "rimoner3-cup-test = bob.ip.binseg.configs.datasets.rimoner3cuptest",
+            "rimoner3-od = bob.ip.binseg.configs.datasets.rimoner3od",
+            "rimoner3-od-test = bob.ip.binseg.configs.datasets.rimoner3odtest",
         ],
     },
     # check classifiers, add and remove as you see fit