From b699ecf5d3b3cbedf624b90267a975762806d2af Mon Sep 17 00:00:00 2001
From: Andre Anjos <andre.dos.anjos@gmail.com>
Date: Fri, 10 Apr 2020 10:57:58 +0200
Subject: [PATCH] Remove use of bob.db.chasedb1

---
 bob/ip/binseg/configs/datasets/chasedb1.py    |  21 ++--
 .../binseg/configs/datasets/chasedb11024.py   |  19 +--
 .../binseg/configs/datasets/chasedb11168.py   |  19 +--
 bob/ip/binseg/configs/datasets/chasedb1544.py |  19 +--
 bob/ip/binseg/configs/datasets/chasedb1608.py |  19 +--
 .../binseg/configs/datasets/chasedb1test.py   |  25 ++--
 .../datasets/drivestareiostarhrf960.py        |  19 +--
 .../drivestareiostarhrf960sslchase.py         |  80 ++++++------
 .../starechasedb1iostarhrf544ssldrive.py      |   2 +-
 bob/ip/binseg/data/chasedb1/__init__.py       |  57 +++++++++
 bob/ip/binseg/data/chasedb1/default.json      | 118 ++++++++++++++++++
 .../data/chasedb1/second-annotation.json      | 118 ++++++++++++++++++
 bob/ip/binseg/data/chasedb1/test.py           |  92 ++++++++++++++
 bob/ip/binseg/data/drive/test.py              |   6 +-
 bob/ip/binseg/data/stare/test.py              |   8 +-
 conda/meta.yaml                               |   1 -
 doc/api.rst                                   |   2 +-
 17 files changed, 482 insertions(+), 143 deletions(-)
 create mode 100644 bob/ip/binseg/data/chasedb1/__init__.py
 create mode 100644 bob/ip/binseg/data/chasedb1/default.json
 create mode 100644 bob/ip/binseg/data/chasedb1/second-annotation.json
 create mode 100644 bob/ip/binseg/data/chasedb1/test.py

diff --git a/bob/ip/binseg/configs/datasets/chasedb1.py b/bob/ip/binseg/configs/datasets/chasedb1.py
index f9206da1..2b09a757 100644
--- a/bob/ip/binseg/configs/datasets/chasedb1.py
+++ b/bob/ip/binseg/configs/datasets/chasedb1.py
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-# -*- coding: utf-8 -*-
+# coding=utf-8
 
 """CHASE-DB1 (training set) for Vessel Segmentation
 
@@ -19,18 +19,12 @@ bright strip running down the centre known as the central vessel reflex.
 * Reference: [CHASEDB1-2012]_
 * Original resolution (height x width): 960 x 999
 * Configuration resolution: 960 x 960 (after hand-specified crop)
-* Training samples: 20
+* Training samples: 8
 * Split reference: [CHASEDB1-2012]_
 """
 
-
-from bob.db.chasedb1 import Database as CHASEDB1
 from bob.ip.binseg.data.transforms import *
-from bob.ip.binseg.data.binsegdataset import BinSegDataset
-
-#### Config ####
-
-transforms = Compose(
+_transforms = Compose(
     [
         Crop(0, 18, 960, 960),  #(upper, left, height, width)
         RandomHFlip(),
@@ -41,8 +35,7 @@ transforms = Compose(
     ]
 )
 
-# bob.db.dataset init
-bobdb = CHASEDB1(protocol="default")
-
-# PyTorch dataset
-dataset = BinSegDataset(bobdb, split="train", transform=transforms)
+from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
+from bob.ip.binseg.data.chasedb1 import dataset as chasedb1
+dataset = DelayedSample2TorchDataset(chasedb1.subsets("default")["train"],
+        transform=_transforms)
diff --git a/bob/ip/binseg/configs/datasets/chasedb11024.py b/bob/ip/binseg/configs/datasets/chasedb11024.py
index 27b7ab37..be005fd3 100644
--- a/bob/ip/binseg/configs/datasets/chasedb11024.py
+++ b/bob/ip/binseg/configs/datasets/chasedb11024.py
@@ -1,13 +1,5 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from bob.db.chasedb1 import Database as CHASEDB1
 from bob.ip.binseg.data.transforms import *
-from bob.ip.binseg.data.binsegdataset import BinSegDataset
-
-#### Config ####
-
-transforms = Compose(
+_transforms = Compose(
     [
         RandomRotation(),
         Crop(0, 18, 960, 960),
@@ -19,8 +11,7 @@ transforms = Compose(
     ]
 )
 
-# bob.db.dataset init
-bobdb = CHASEDB1(protocol="default")
-
-# PyTorch dataset
-dataset = BinSegDataset(bobdb, split="train", transform=transforms)
+from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
+from bob.ip.binseg.data.chasedb1 import dataset as chasedb1
+dataset = DelayedSample2TorchDataset(chasedb1.subsets("default")["train"],
+        transform=_transforms)
diff --git a/bob/ip/binseg/configs/datasets/chasedb11168.py b/bob/ip/binseg/configs/datasets/chasedb11168.py
index b85726e4..92ffccfa 100644
--- a/bob/ip/binseg/configs/datasets/chasedb11168.py
+++ b/bob/ip/binseg/configs/datasets/chasedb11168.py
@@ -1,13 +1,5 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from bob.db.chasedb1 import Database as CHASEDB1
 from bob.ip.binseg.data.transforms import *
-from bob.ip.binseg.data.binsegdataset import BinSegDataset
-
-#### Config ####
-
-transforms = Compose(
+_transforms = Compose(
     [
         RandomRotation(),
         Crop(140, 18, 680, 960),
@@ -19,8 +11,7 @@ transforms = Compose(
     ]
 )
 
-# bob.db.dataset init
-bobdb = CHASEDB1(protocol="default")
-
-# PyTorch dataset
-dataset = BinSegDataset(bobdb, split="train", transform=transforms)
+from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
+from bob.ip.binseg.data.chasedb1 import dataset as chasedb1
+dataset = DelayedSample2TorchDataset(chasedb1.subsets("default")["train"],
+        transform=_transforms)
diff --git a/bob/ip/binseg/configs/datasets/chasedb1544.py b/bob/ip/binseg/configs/datasets/chasedb1544.py
index 8ea0a9c6..16639aef 100644
--- a/bob/ip/binseg/configs/datasets/chasedb1544.py
+++ b/bob/ip/binseg/configs/datasets/chasedb1544.py
@@ -1,13 +1,5 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from bob.db.chasedb1 import Database as CHASEDB1
 from bob.ip.binseg.data.transforms import *
-from bob.ip.binseg.data.binsegdataset import BinSegDataset
-
-#### Config ####
-
-transforms = Compose(
+_transforms = Compose(
     [
         Resize(544),
         Crop(0, 12, 544, 544),
@@ -19,8 +11,7 @@ transforms = Compose(
     ]
 )
 
-# bob.db.dataset init
-bobdb = CHASEDB1(protocol="default")
-
-# PyTorch dataset
-dataset = BinSegDataset(bobdb, split="train", transform=transforms)
+from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
+from bob.ip.binseg.data.chasedb1 import dataset as chasedb1
+dataset = DelayedSample2TorchDataset(chasedb1.subsets("default")["train"],
+        transform=_transforms)
diff --git a/bob/ip/binseg/configs/datasets/chasedb1608.py b/bob/ip/binseg/configs/datasets/chasedb1608.py
index 1800574b..adfd9777 100644
--- a/bob/ip/binseg/configs/datasets/chasedb1608.py
+++ b/bob/ip/binseg/configs/datasets/chasedb1608.py
@@ -1,13 +1,5 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from bob.db.chasedb1 import Database as CHASEDB1
 from bob.ip.binseg.data.transforms import *
-from bob.ip.binseg.data.binsegdataset import BinSegDataset
-
-#### Config ####
-
-transforms = Compose(
+_transforms = Compose(
     [
         RandomRotation(),
         CenterCrop((829, 960)),
@@ -19,8 +11,7 @@ transforms = Compose(
     ]
 )
 
-# bob.db.dataset init
-bobdb = CHASEDB1(protocol="default")
-
-# PyTorch dataset
-dataset = BinSegDataset(bobdb, split="train", transform=transforms)
+from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
+from bob.ip.binseg.data.chasedb1 import dataset as chasedb1
+dataset = DelayedSample2TorchDataset(chasedb1.subsets("default")["train"],
+        transform=_transforms)
diff --git a/bob/ip/binseg/configs/datasets/chasedb1test.py b/bob/ip/binseg/configs/datasets/chasedb1test.py
index 46245430..0504acb3 100644
--- a/bob/ip/binseg/configs/datasets/chasedb1test.py
+++ b/bob/ip/binseg/configs/datasets/chasedb1test.py
@@ -19,20 +19,19 @@ bright strip running down the centre known as the central vessel reflex.
 * Reference: [CHASEDB1-2012]_
 * Original resolution (height x width): 960 x 999
 * Configuration resolution: 960 x 960 (after hand-specified crop)
-* Test samples: 8
+* Test samples: 20
 * Split reference: [CHASEDB1-2012]_
 """
 
-from bob.db.chasedb1 import Database as CHASEDB1
 from bob.ip.binseg.data.transforms import *
-from bob.ip.binseg.data.binsegdataset import BinSegDataset
-
-#### Config ####
-
-transforms = Compose([Crop(0, 18, 960, 960), ToTensor()])
-
-# bob.db.dataset init
-bobdb = CHASEDB1(protocol="default")
-
-# PyTorch dataset
-dataset = BinSegDataset(bobdb, split="test", transform=transforms)
+_transforms = Compose(
+    [
+        Crop(0, 18, 960, 960),  #(upper, left, height, width)
+        ToTensor(),
+    ]
+)
+
+from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
+from bob.ip.binseg.data.chasedb1 import dataset as chasedb1
+dataset = DelayedSample2TorchDataset(chasedb1.subsets("default")["test"],
+        transform=_transforms)
diff --git a/bob/ip/binseg/configs/datasets/drivestareiostarhrf960.py b/bob/ip/binseg/configs/datasets/drivestareiostarhrf960.py
index 5ea90c4d..c455fad0 100644
--- a/bob/ip/binseg/configs/datasets/drivestareiostarhrf960.py
+++ b/bob/ip/binseg/configs/datasets/drivestareiostarhrf960.py
@@ -1,10 +1,13 @@
-from bob.ip.binseg.configs.datasets.drive960 import dataset as drive
-from bob.ip.binseg.configs.datasets.stare960 import dataset as stare
-from bob.ip.binseg.configs.datasets.hrf960 import dataset as hrf
-from bob.ip.binseg.configs.datasets.iostarvessel960 import dataset as iostar
-import torch
+#!/usr/bin/env python
+# coding=utf-8
 
-#### Config ####
+"""COVD-CHASEDB1 (training set) for Vessel Segmentation
+"""
 
-# PyTorch dataset
-dataset = torch.utils.data.ConcatDataset([drive, stare, hrf, iostar])
+from bob.ip.binseg.configs.datasets.drive960 import dataset as _drive
+from bob.ip.binseg.configs.datasets.stare960 import dataset as _stare
+from bob.ip.binseg.configs.datasets.hrf960 import dataset as _hrf
+from bob.ip.binseg.configs.datasets.iostarvessel960 import dataset as _iostar
+
+import torch.utils.data
+dataset = torch.utils.data.ConcatDataset([_drive, _stare, _hrf, _iostar])
diff --git a/bob/ip/binseg/configs/datasets/drivestareiostarhrf960sslchase.py b/bob/ip/binseg/configs/datasets/drivestareiostarhrf960sslchase.py
index 2aea4222..e01d70e8 100644
--- a/bob/ip/binseg/configs/datasets/drivestareiostarhrf960sslchase.py
+++ b/bob/ip/binseg/configs/datasets/drivestareiostarhrf960sslchase.py
@@ -1,42 +1,38 @@
-from bob.ip.binseg.configs.datasets.drive960 import dataset as drive
-from bob.ip.binseg.configs.datasets.stare960 import dataset as stare
-from bob.ip.binseg.configs.datasets.hrf960 import dataset as hrf
-from bob.ip.binseg.configs.datasets.iostarvessel960 import dataset as iostar
-from bob.db.chasedb1 import Database as CHASE
-from bob.ip.binseg.data.transforms import *
-import torch
-from bob.ip.binseg.data.binsegdataset import (
-    BinSegDataset,
-    SSLBinSegDataset,
-    UnLabeledBinSegDataset,
-)
-
-
-#### Config ####
-
-# PyTorch dataset
-labeled_dataset = torch.utils.data.ConcatDataset([drive, stare, hrf, iostar])
-
-#### Unlabeled CHASE TRAIN ####
-unlabeled_transforms = Compose(
-    [
-        Crop(0, 18, 960, 960),
-        RandomHFlip(),
-        RandomVFlip(),
-        RandomRotation(),
-        ColorJitter(),
-        ToTensor(),
-    ]
-)
-
-# bob.db.dataset init
-chasebobdb = CHASE(protocol="default")
-
-# PyTorch dataset
-unlabeled_dataset = UnLabeledBinSegDataset(
-    chasebobdb, split="train", transform=unlabeled_transforms
-)
-
-# SSL Dataset
-
-dataset = SSLBinSegDataset(labeled_dataset, unlabeled_dataset)
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""CHASE-DB1 (SSL training set) for Vessel Segmentation
+
+The CHASE_DB1 is a retinal vessel reference dataset acquired from multiethnic
+school children. This database is a part of the Child Heart and Health Study in
+England (CHASE), a cardiovascular health survey in 200 primary schools in
+London, Birmingham, and Leicester. The ocular imaging was carried out in
+46 schools and demonstrated associations between retinal vessel tortuosity and
+early risk factors for cardiovascular disease in over 1000 British primary
+school children of different ethnic origin. The retinal images of both of the
+eyes of each child were recorded with a hand-held Nidek NM-200-D fundus camera.
+The images were captured at 30 degrees FOV camera. The dataset of images are
+characterized by having nonuniform back-ground illumination, poor contrast of
+blood vessels as compared with the background and wider arteriolars that have a
+bright strip running down the centre known as the central vessel reflex.
+
+* Reference: [CHASEDB1-2012]_
+* Configuration resolution (height x width): 960 x 960
+
+The dataset available in this file is composed of STARE, CHASE-DB1, IOSTAR
+vessel and HRF (with annotated samples) and CHASE-DB1 without labels.
+"""
+
+# Labelled bits
+import torch.utils.data
+from bob.ip.binseg.configs.datasets.drive960 import dataset as _drive
+from bob.ip.binseg.configs.datasets.stare960 import dataset as _stare
+from bob.ip.binseg.configs.datasets.hrf960 import dataset as _hrf
+from bob.ip.binseg.configs.datasets.iostarvessel960 import dataset as _iostar
+_labelled = torch.utils.data.ConcatDataset([_drive, _stare, _hrf, _iostar])
+
+# Use CHASE-DB1 without labels in this setup
+from bob.ip.binseg.configs.datasets.chasedb1 import dataset as _unlabelled
+
+from bob.ip.binseg.data.utils import SSLDataset
+dataset = SSLDataset(_labelled, _unlabelled)
diff --git a/bob/ip/binseg/configs/datasets/starechasedb1iostarhrf544ssldrive.py b/bob/ip/binseg/configs/datasets/starechasedb1iostarhrf544ssldrive.py
index 6aac6bbb..63385586 100644
--- a/bob/ip/binseg/configs/datasets/starechasedb1iostarhrf544ssldrive.py
+++ b/bob/ip/binseg/configs/datasets/starechasedb1iostarhrf544ssldrive.py
@@ -22,7 +22,7 @@ from bob.ip.binseg.configs.datasets.hrf544 import dataset as _hrf
 _labelled = torch.utils.data.ConcatDataset([_stare, _chase, _iostar, _hrf])
 
 # Use DRIVE without labels in this setup
-from .drive import dataset as _unlabelled
+from bob.ip.binseg.configs.datasets.drive import dataset as _unlabelled
 
 from bob.ip.binseg.data.utils import SSLDataset
 dataset = SSLDataset(_labelled, _unlabelled)
diff --git a/bob/ip/binseg/data/chasedb1/__init__.py b/bob/ip/binseg/data/chasedb1/__init__.py
new file mode 100644
index 00000000..3ab9e993
--- /dev/null
+++ b/bob/ip/binseg/data/chasedb1/__init__.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+"""CHASE-DB1 dataset for Vessel Segmentation
+
+The CHASE_DB1 is a retinal vessel reference dataset acquired from multiethnic
+school children. This database is a part of the Child Heart and Health Study in
+England (CHASE), a cardiovascular health survey in 200 primary schools in
+London, Birmingham, and Leicester. The ocular imaging was carried out in
+46 schools and demonstrated associations between retinal vessel tortuosity and
+early risk factors for cardiovascular disease in over 1000 British primary
+school children of different ethnic origin. The retinal images of both of the
+eyes of each child were recorded with a hand-held Nidek NM-200-D fundus camera.
+The images were captured at 30 degrees FOV camera. The dataset of images are
+characterized by having nonuniform back-ground illumination, poor contrast of
+blood vessels as compared with the background and wider arteriolars that have a
+bright strip running down the centre known as the central vessel reflex.
+
+* Reference: [CHASEDB1-2012]_
+* Original resolution (height x width): 960 x 999
+* Split reference: [CHASEDB1-2012]_
+* Protocol ``default``:
+
+  * Training samples: 8 (including labels from annotator "1stHO")
+  * Test samples: 20 (including labels from annotator "1stHO")
+
+* Protocol ``second-annotation``:
+
+  * Training samples: 8 (including labels from annotator "2ndHO")
+  * Test samples: 20 (including labels from annotator "2ndHO")
+
+"""
+
+import os
+import pkg_resources
+
+import bob.extension
+
+from ..jsondataset import JSONDataset
+from ..loader import load_pil_rgb, load_pil_1
+
+_protocols = [
+        pkg_resources.resource_filename(__name__, "default.json"),
+        pkg_resources.resource_filename(__name__, "second-annotation.json"),
+        ]
+
+_root_path = bob.extension.rc.get('bob.ip.binseg.chasedb1.datadir',
+        os.path.realpath(os.curdir))
+
+def _loader(s):
+    return dict(
+            data=load_pil_rgb(s["data"]),
+            label=load_pil_1(s["label"]),
+            )
+
+dataset = JSONDataset(protocols=_protocols, root_path=_root_path, loader=_loader)
+"""CHASE-DB1 dataset object"""
diff --git a/bob/ip/binseg/data/chasedb1/default.json b/bob/ip/binseg/data/chasedb1/default.json
new file mode 100644
index 00000000..e7e6761b
--- /dev/null
+++ b/bob/ip/binseg/data/chasedb1/default.json
@@ -0,0 +1,118 @@
+{
+ "train": [
+  [
+   "Image_11L.jpg",
+   "Image_11L_1stHO.png"
+  ],
+  [
+   "Image_11R.jpg",
+   "Image_11R_1stHO.png"
+  ],
+  [
+   "Image_12L.jpg",
+   "Image_12L_1stHO.png"
+  ],
+  [
+   "Image_12R.jpg",
+   "Image_12R_1stHO.png"
+  ],
+  [
+   "Image_13L.jpg",
+   "Image_13L_1stHO.png"
+  ],
+  [
+   "Image_13R.jpg",
+   "Image_13R_1stHO.png"
+  ],
+  [
+   "Image_14L.jpg",
+   "Image_14L_1stHO.png"
+  ],
+  [
+   "Image_14R.jpg",
+   "Image_14R_1stHO.png"
+  ]
+ ],
+ "test": [
+  [
+   "Image_01L.jpg",
+   "Image_01L_1stHO.png"
+  ],
+  [
+   "Image_01R.jpg",
+   "Image_01R_1stHO.png"
+  ],
+  [
+   "Image_02L.jpg",
+   "Image_02L_1stHO.png"
+  ],
+  [
+   "Image_02R.jpg",
+   "Image_02R_1stHO.png"
+  ],
+  [
+   "Image_03L.jpg",
+   "Image_03L_1stHO.png"
+  ],
+  [
+   "Image_03R.jpg",
+   "Image_03R_1stHO.png"
+  ],
+  [
+   "Image_04L.jpg",
+   "Image_04L_1stHO.png"
+  ],
+  [
+   "Image_04R.jpg",
+   "Image_04R_1stHO.png"
+  ],
+  [
+   "Image_05L.jpg",
+   "Image_05L_1stHO.png"
+  ],
+  [
+   "Image_05R.jpg",
+   "Image_05R_1stHO.png"
+  ],
+  [
+   "Image_06L.jpg",
+   "Image_06L_1stHO.png"
+  ],
+  [
+   "Image_06R.jpg",
+   "Image_06R_1stHO.png"
+  ],
+  [
+   "Image_07L.jpg",
+   "Image_07L_1stHO.png"
+  ],
+  [
+   "Image_07R.jpg",
+   "Image_07R_1stHO.png"
+  ],
+  [
+   "Image_08L.jpg",
+   "Image_08L_1stHO.png"
+  ],
+  [
+   "Image_08R.jpg",
+   "Image_08R_1stHO.png"
+  ],
+  [
+   "Image_09L.jpg",
+   "Image_09L_1stHO.png"
+  ],
+  [
+   "Image_09R.jpg",
+   "Image_09R_1stHO.png"
+  ],
+  [
+   "Image_10L.jpg",
+   "Image_10L_1stHO.png"
+  ],
+  [
+   "Image_10R.jpg",
+   "Image_10R_1stHO.png"
+  ]
+ ]
+}
\ No newline at end of file
diff --git a/bob/ip/binseg/data/chasedb1/second-annotation.json b/bob/ip/binseg/data/chasedb1/second-annotation.json
new file mode 100644
index 00000000..9e26e371
--- /dev/null
+++ b/bob/ip/binseg/data/chasedb1/second-annotation.json
@@ -0,0 +1,118 @@
+{
+ "train": [
+  [
+   "Image_11L.jpg",
+   "Image_11L_2ndHO.png"
+  ],
+  [
+   "Image_11R.jpg",
+   "Image_11R_2ndHO.png"
+  ],
+  [
+   "Image_12L.jpg",
+   "Image_12L_2ndHO.png"
+  ],
+  [
+   "Image_12R.jpg",
+   "Image_12R_2ndHO.png"
+  ],
+  [
+   "Image_13L.jpg",
+   "Image_13L_2ndHO.png"
+  ],
+  [
+   "Image_13R.jpg",
+   "Image_13R_2ndHO.png"
+  ],
+  [
+   "Image_14L.jpg",
+   "Image_14L_2ndHO.png"
+  ],
+  [
+   "Image_14R.jpg",
+   "Image_14R_2ndHO.png"
+  ]
+ ],
+ "test": [
+  [
+   "Image_01L.jpg",
+   "Image_01L_2ndHO.png"
+  ],
+  [
+   "Image_01R.jpg",
+   "Image_01R_2ndHO.png"
+  ],
+  [
+   "Image_02L.jpg",
+   "Image_02L_2ndHO.png"
+  ],
+  [
+   "Image_02R.jpg",
+   "Image_02R_2ndHO.png"
+  ],
+  [
+   "Image_03L.jpg",
+   "Image_03L_2ndHO.png"
+  ],
+  [
+   "Image_03R.jpg",
+   "Image_03R_2ndHO.png"
+  ],
+  [
+   "Image_04L.jpg",
+   "Image_04L_2ndHO.png"
+  ],
+  [
+   "Image_04R.jpg",
+   "Image_04R_2ndHO.png"
+  ],
+  [
+   "Image_05L.jpg",
+   "Image_05L_2ndHO.png"
+  ],
+  [
+   "Image_05R.jpg",
+   "Image_05R_2ndHO.png"
+  ],
+  [
+   "Image_06L.jpg",
+   "Image_06L_2ndHO.png"
+  ],
+  [
+   "Image_06R.jpg",
+   "Image_06R_2ndHO.png"
+  ],
+  [
+   "Image_07L.jpg",
+   "Image_07L_2ndHO.png"
+  ],
+  [
+   "Image_07R.jpg",
+   "Image_07R_2ndHO.png"
+  ],
+  [
+   "Image_08L.jpg",
+   "Image_08L_2ndHO.png"
+  ],
+  [
+   "Image_08R.jpg",
+   "Image_08R_2ndHO.png"
+  ],
+  [
+   "Image_09L.jpg",
+   "Image_09L_2ndHO.png"
+  ],
+  [
+   "Image_09R.jpg",
+   "Image_09R_2ndHO.png"
+  ],
+  [
+   "Image_10L.jpg",
+   "Image_10L_2ndHO.png"
+  ],
+  [
+   "Image_10R.jpg",
+   "Image_10R_2ndHO.png"
+  ]
+ ]
+}
diff --git a/bob/ip/binseg/data/chasedb1/test.py b/bob/ip/binseg/data/chasedb1/test.py
new file mode 100644
index 00000000..9fcccddf
--- /dev/null
+++ b/bob/ip/binseg/data/chasedb1/test.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+
+"""Tests for CHASE-DB1"""
+
+import os
+import nose.tools
+
+from ..utils import rc_variable_set, DelayedSample2TorchDataset
+from ..transforms import Compose, Crop
+from . import dataset
+
+
+def test_protocol_consitency():
+
+    subset = dataset.subsets("default")
+    nose.tools.eq_(len(subset), 2)
+
+    assert "train" in subset
+    nose.tools.eq_(len(subset["train"]), 8)
+    for s in subset["train"]:
+        assert s.key.startswith("Image_")
+
+    assert "test" in subset
+    nose.tools.eq_(len(subset["test"]), 20)
+    for s in subset["test"]:
+        assert s.key.startswith("Image_")
+
+    subset = dataset.subsets("second-annotation")
+    nose.tools.eq_(len(subset), 2)
+
+    assert "train" in subset
+    nose.tools.eq_(len(subset["train"]), 8)
+    for s in subset["train"]:
+        assert s.key.startswith("Image_")
+
+    assert "test" in subset
+    nose.tools.eq_(len(subset["test"]), 20)
+    for s in subset["test"]:
+        assert s.key.startswith("Image_")
+
+
+@rc_variable_set('bob.ip.binseg.chasedb1.datadir')
+def test_loading():
+
+    def _check_sample(s):
+        data = s.data
+        assert isinstance(data, dict)
+        nose.tools.eq_(len(data), 2)
+        assert "data" in data
+        nose.tools.eq_(data["data"].size, (999, 960))
+        nose.tools.eq_(data["data"].mode, "RGB")
+        assert "label" in data
+        nose.tools.eq_(data["label"].size, (999, 960))
+        nose.tools.eq_(data["label"].mode, "1")
+
+    subset = dataset.subsets("default")
+    for s in subset["train"]: _check_sample(s)
+    for s in subset["test"]: _check_sample(s)
+
+    subset = dataset.subsets("second-annotation")
+    for s in subset["test"]: _check_sample(s)
+
+
+@rc_variable_set('bob.ip.binseg.chasedb1.datadir')
+def test_check():
+    nose.tools.eq_(dataset.check(), 0)
+
+
+@rc_variable_set('bob.ip.binseg.chasedb1.datadir')
+def test_torch_dataset():
+
+    def _check_sample(s):
+        nose.tools.eq_(len(s), 3)
+        assert isinstance(s[0], str)
+        nose.tools.eq_(s[1].size, (960, 960))
+        nose.tools.eq_(s[1].mode, "RGB")
+        nose.tools.eq_(s[2].size, (960, 960))
+        nose.tools.eq_(s[2].mode, "1")
+
+    transforms = Compose([Crop(0, 18, 960, 960)])
+
+    subset = dataset.subsets("default")
+
+    torch_dataset = DelayedSample2TorchDataset(subset["train"], transforms)
+    nose.tools.eq_(len(torch_dataset), 8)
+    for s in torch_dataset: _check_sample(s)
+
+    torch_dataset = DelayedSample2TorchDataset(subset["test"], transforms)
+    nose.tools.eq_(len(torch_dataset), 20)
+    for s in torch_dataset: _check_sample(s)
diff --git a/bob/ip/binseg/data/drive/test.py b/bob/ip/binseg/data/drive/test.py
index 238f9e0f..2834ca41 100644
--- a/bob/ip/binseg/data/drive/test.py
+++ b/bob/ip/binseg/data/drive/test.py
@@ -36,7 +36,7 @@ def test_protocol_consitency():
         assert s.key.startswith(os.path.join("test", "images"))
 
 
-@rc_variable_set('bob.db.drive.datadir')
+@rc_variable_set('bob.ip.binseg.drive.datadir')
 def test_loading():
 
     def _check_sample(s):
@@ -61,12 +61,12 @@ def test_loading():
     for s in subset["test"]: _check_sample(s)
 
 
-@rc_variable_set('bob.db.drive.datadir')
+@rc_variable_set('bob.ip.binseg.drive.datadir')
 def test_check():
     nose.tools.eq_(dataset.check(), 0)
 
 
-@rc_variable_set('bob.db.drive.datadir')
+@rc_variable_set('bob.ip.binseg.drive.datadir')
 def test_torch_dataset():
 
     def _check_sample(s):
diff --git a/bob/ip/binseg/data/stare/test.py b/bob/ip/binseg/data/stare/test.py
index 33058d92..58f7aea6 100644
--- a/bob/ip/binseg/data/stare/test.py
+++ b/bob/ip/binseg/data/stare/test.py
@@ -2,7 +2,7 @@
 # coding=utf-8
 
 
-"""Tests for DRIVE"""
+"""Tests for STARE"""
 
 import os
 import nose.tools
@@ -41,7 +41,7 @@ def test_protocol_consitency():
         assert s.key.startswith(os.path.join("stare-images", "im0"))
 
 
-@rc_variable_set('bob.db.stare.datadir')
+@rc_variable_set('bob.ip.binseg.stare.datadir')
 def test_loading():
 
     def _check_sample(s):
@@ -63,12 +63,12 @@ def test_loading():
     for s in subset["test"]: _check_sample(s)
 
 
-@rc_variable_set('bob.db.drive.datadir')
+@rc_variable_set('bob.ip.binseg.drive.datadir')
 def test_check():
     nose.tools.eq_(dataset.check(), 0)
 
 
-@rc_variable_set('bob.db.drive.datadir')
+@rc_variable_set('bob.ip.binseg.drive.datadir')
 def test_torch_dataset():
 
     def _check_sample(s):
diff --git a/conda/meta.yaml b/conda/meta.yaml
index 133f7c29..1b1a7615 100644
--- a/conda/meta.yaml
+++ b/conda/meta.yaml
@@ -79,7 +79,6 @@ test:
     - sphinx
     - sphinx_rtd_theme
     - sphinxcontrib-programoutput
-    - bob.db.chasedb1
     - bob.db.hrf
     - bob.db.drionsdb
     - bob.db.rimoner3
diff --git a/doc/api.rst b/doc/api.rst
index 1b875e8c..19afcc1e 100644
--- a/doc/api.rst
+++ b/doc/api.rst
@@ -15,7 +15,6 @@ Data Manipulation
 .. autosummary::
    :toctree: api/data
 
-   bob.ip.binseg.data
    bob.ip.binseg.data.binsegdataset
    bob.ip.binseg.data.folderdataset
    bob.ip.binseg.data.csvdataset
@@ -34,6 +33,7 @@ Datasets
 
    bob.ip.binseg.data.drive
    bob.ip.binseg.data.stare
+   bob.ip.binseg.data.chasedb1
 
 
 Engines
-- 
GitLab