From fed3a442875e4d7b408eef9de541558e4355b20e Mon Sep 17 00:00:00 2001
From: Andre Anjos <andre.dos.anjos@gmail.com>
Date: Thu, 9 Apr 2020 23:42:48 +0200
Subject: [PATCH] Remove use of bob.db.stare

---
 bob/ip/binseg/configs/datasets/amdrive.py     | 64 -------------
 bob/ip/binseg/configs/datasets/amdrivetest.py | 58 ------------
 .../drivechasedb1iostarhrf608sslstare.py      | 74 +++++++--------
 bob/ip/binseg/configs/datasets/stare.py       | 20 ++--
 bob/ip/binseg/configs/datasets/stare1024.py   | 29 ++++--
 bob/ip/binseg/configs/datasets/stare1168.py   | 30 +++---
 bob/ip/binseg/configs/datasets/stare544.py    | 31 ++++---
 bob/ip/binseg/configs/datasets/stare960.py    | 31 ++++---
 .../datasets/starechasedb1iostarhrf544.py     | 19 ++--
 .../starechasedb1iostarhrf544ssldrive.py      |  3 +-
 bob/ip/binseg/configs/datasets/staretest.py   | 22 ++---
 bob/ip/binseg/data/stare/__init__.py          | 42 +++++++++
 bob/ip/binseg/data/stare/default.json         | 86 +++++++++++++++++
 .../binseg/data/stare/second-annotation.json  | 86 +++++++++++++++++
 bob/ip/binseg/data/stare/test.py              | 92 +++++++++++++++++++
 conda/meta.yaml                               |  1 -
 16 files changed, 442 insertions(+), 246 deletions(-)
 delete mode 100644 bob/ip/binseg/configs/datasets/amdrive.py
 delete mode 100644 bob/ip/binseg/configs/datasets/amdrivetest.py
 create mode 100644 bob/ip/binseg/data/stare/__init__.py
 create mode 100644 bob/ip/binseg/data/stare/default.json
 create mode 100644 bob/ip/binseg/data/stare/second-annotation.json
 create mode 100644 bob/ip/binseg/data/stare/test.py

diff --git a/bob/ip/binseg/configs/datasets/amdrive.py b/bob/ip/binseg/configs/datasets/amdrive.py
deleted file mode 100644
index 27f8d5c0..00000000
--- a/bob/ip/binseg/configs/datasets/amdrive.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-from bob.db.stare import Database as STARE
-from bob.db.chasedb1 import Database as CHASEDB1
-from bob.db.iostar import Database as IOSTAR
-from bob.db.hrf import Database as HRF
-from bob.ip.binseg.data.transforms import *
-from bob.ip.binseg.data.binsegdataset import BinSegDataset
-import torch
-
-# Target size: 544x544 (DRIVE)
-
-defaulttransforms = [
-    RandomHFlip(),
-    RandomVFlip(),
-    RandomRotation(),
-    ColorJitter(),
-    ToTensor(),
-]
-
-
-# CHASE_DB1
-transforms_chase = Compose([Resize(544), Crop(0, 12, 544, 544), *defaulttransforms])
-
-# bob.db.dataset init
-bobdb_chase = CHASEDB1(protocol="default")
-
-# PyTorch dataset
-torch_chase = BinSegDataset(bobdb_chase, split="train", transform=transforms_chase)
-
-
-# IOSTAR VESSEL
-transforms_iostar = Compose([Resize(544), *defaulttransforms])
-
-# bob.db.dataset init
-bobdb_iostar = IOSTAR(protocol="default_vessel")
-
-# PyTorch dataset
-torch_iostar = BinSegDataset(bobdb_iostar, split="train", transform=transforms_iostar)
-
-# STARE
-transforms = Compose([Resize(471), Pad((0, 37, 0, 36)), *defaulttransforms])
-
-# bob.db.dataset init
-bobdb_stare = STARE(protocol="default")
-
-# PyTorch dataset
-torch_stare = BinSegDataset(bobdb_stare, split="train", transform=transforms)
-
-
-# HRF
-transforms_hrf = Compose([Resize((363)), Pad((0, 90, 0, 91)), *defaulttransforms])
-
-# bob.db.dataset init
-bobdb_hrf = HRF(protocol="default")
-
-# PyTorch dataset
-torch_hrf = BinSegDataset(bobdb_hrf, split="train", transform=transforms_hrf)
-
-
-# Merge
-dataset = torch.utils.data.ConcatDataset(
-    [torch_stare, torch_chase, torch_iostar, torch_hrf]
-)
diff --git a/bob/ip/binseg/configs/datasets/amdrivetest.py b/bob/ip/binseg/configs/datasets/amdrivetest.py
deleted file mode 100644
index 0b71274f..00000000
--- a/bob/ip/binseg/configs/datasets/amdrivetest.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-from bob.db.stare import Database as STARE
-from bob.db.chasedb1 import Database as CHASEDB1
-from bob.db.iostar import Database as IOSTAR
-from bob.db.hrf import Database as HRF
-from bob.ip.binseg.data.transforms import *
-from bob.ip.binseg.data.binsegdataset import BinSegDataset
-import torch
-
-# Target size: 544x544 (DRIVE)
-
-defaulttransforms = [ToTensor()]
-
-
-# CHASE_DB1
-transforms_chase = Compose([Resize(544), Crop(0, 12, 544, 544), *defaulttransforms])
-
-# bob.db.dataset init
-bobdb_chase = CHASEDB1(protocol="default")
-
-# PyTorch dataset
-torch_chase = BinSegDataset(bobdb_chase, split="test", transform=transforms_chase)
-
-
-# IOSTAR VESSEL
-transforms_iostar = Compose([Resize(544), *defaulttransforms])
-
-# bob.db.dataset init
-bobdb_iostar = IOSTAR(protocol="default_vessel")
-
-# PyTorch dataset
-torch_iostar = BinSegDataset(bobdb_iostar, split="test", transform=transforms_iostar)
-
-# STARE
-transforms = Compose([Resize(471), Pad((0, 37, 0, 36)), *defaulttransforms])
-
-# bob.db.dataset init
-bobdb_stare = STARE(protocol="default")
-
-# PyTorch dataset
-torch_stare = BinSegDataset(bobdb_stare, split="test", transform=transforms)
-
-
-# HRF
-transforms_hrf = Compose([Resize((363)), Pad((0, 90, 0, 91)), *defaulttransforms])
-
-# bob.db.dataset init
-bobdb_hrf = HRF(protocol="default")
-
-# PyTorch dataset
-torch_hrf = BinSegDataset(bobdb_hrf, split="test", transform=transforms_hrf)
-
-
-# Merge
-dataset = torch.utils.data.ConcatDataset(
-    [torch_stare, torch_chase, torch_iostar, torch_hrf]
-)
diff --git a/bob/ip/binseg/configs/datasets/drivechasedb1iostarhrf608sslstare.py b/bob/ip/binseg/configs/datasets/drivechasedb1iostarhrf608sslstare.py
index 37c60f8a..c2d041a9 100644
--- a/bob/ip/binseg/configs/datasets/drivechasedb1iostarhrf608sslstare.py
+++ b/bob/ip/binseg/configs/datasets/drivechasedb1iostarhrf608sslstare.py
@@ -1,42 +1,32 @@
-from bob.ip.binseg.configs.datasets.drive608 import dataset as drive
-from bob.ip.binseg.configs.datasets.chasedb1608 import dataset as chase
-from bob.ip.binseg.configs.datasets.iostarvessel608 import dataset as iostar
-from bob.ip.binseg.configs.datasets.hrf608 import dataset as hrf
-from bob.db.stare import Database as STARE
-from bob.ip.binseg.data.transforms import *
-import torch
-from bob.ip.binseg.data.binsegdataset import (
-    BinSegDataset,
-    SSLBinSegDataset,
-    UnLabeledBinSegDataset,
-)
-
-
-#### Config ####
-
-# PyTorch dataset
-labeled_dataset = torch.utils.data.ConcatDataset([drive, chase, iostar, hrf])
-
-#### Unlabeled STARE TRAIN ####
-unlabeled_transforms = Compose(
-    [
-        Pad((2, 1, 2, 2)),
-        RandomHFlip(),
-        RandomVFlip(),
-        RandomRotation(),
-        ColorJitter(),
-        ToTensor(),
-    ]
-)
-
-# bob.db.dataset init
-starebobdb = STARE(protocol="default")
-
-# PyTorch dataset
-unlabeled_dataset = UnLabeledBinSegDataset(
-    starebobdb, split="train", transform=unlabeled_transforms
-)
-
-# SSL Dataset
-
-dataset = SSLBinSegDataset(labeled_dataset, unlabeled_dataset)
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""STARE (SSL training set) for Vessel Segmentation
+
+A subset of the original STARE dataset contains 20 annotated eye fundus images
+with a resolution of 605 x 700 (height x width). Two sets of ground-truth
+vessel annotations are available. The first set by Adam Hoover is commonly used
+for training and testing. The second set by Valentina Kouznetsova acts as a
+“human” baseline.
+
+* Reference: [STARE-2000]_
+* Configuration resolution: 704 x 608 (after padding)
+
+The dataset available in this file is composed of DRIVE, CHASE-DB1, IOSTAR
+vessel and HRF (with annotated samples) and STARE without labels.
+"""
+
+# Labelled bits
+import torch.utils.data
+
+from bob.ip.binseg.configs.datasets.drive608 import dataset as _drive
+from bob.ip.binseg.configs.datasets.chasedb1608 import dataset as _chase
+from bob.ip.binseg.configs.datasets.iostarvessel608 import dataset as _iostar
+from bob.ip.binseg.configs.datasets.hrf608 import dataset as _hrf
+_labelled = torch.utils.data.ConcatDataset([_drive, _chase, _iostar, _hrf])
+
+# Use STARE without labels in this setup
+from .stare import dataset as _unlabelled
+
+from bob.ip.binseg.data.utils import SSLDataset
+dataset = SSLDataset(_labelled, _unlabelled)
diff --git a/bob/ip/binseg/configs/datasets/stare.py b/bob/ip/binseg/configs/datasets/stare.py
index 4e11db03..74adacbc 100644
--- a/bob/ip/binseg/configs/datasets/stare.py
+++ b/bob/ip/binseg/configs/datasets/stare.py
@@ -10,19 +10,14 @@ for training and testing. The second set by Valentina Kouznetsova acts as a
 “human” baseline.
 
 * Reference: [STARE-2000]_
-* Original resolution (height x width): 605 x 700
-* Configuration resolution: 608 x 704 (after padding)
+* Original resolution (width x height): 700 x 605
+* Configuration resolution: 704 x 608 (after padding)
 * Training samples: 10
 * Split reference: [MANINIS-2016]_
 """
 
-from bob.db.stare import Database as STARE
 from bob.ip.binseg.data.transforms import *
-from bob.ip.binseg.data.binsegdataset import BinSegDataset
-
-#### Config ####
-
-transforms = Compose(
+_transforms = Compose(
     [
         Pad((2, 1, 2, 2)),  #(left, top, right, bottom)
         RandomHFlip(),
@@ -33,8 +28,7 @@ transforms = Compose(
     ]
 )
 
-# bob.db.dataset init
-bobdb = STARE(protocol="default")
-
-# PyTorch dataset
-dataset = BinSegDataset(bobdb, split="train", transform=transforms)
+from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
+from bob.ip.binseg.data.stare import dataset as stare
+dataset = DelayedSample2TorchDataset(stare.subsets("default")["train"],
+        transform=_transforms)
diff --git a/bob/ip/binseg/configs/datasets/stare1024.py b/bob/ip/binseg/configs/datasets/stare1024.py
index a8931ff2..ca3b3957 100644
--- a/bob/ip/binseg/configs/datasets/stare1024.py
+++ b/bob/ip/binseg/configs/datasets/stare1024.py
@@ -1,13 +1,23 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
-from bob.db.stare import Database as STARE
-from bob.ip.binseg.data.transforms import *
-from bob.ip.binseg.data.binsegdataset import BinSegDataset
+"""STARE (training set) for Vessel Segmentation
+
+A subset of the original STARE dataset contains 20 annotated eye fundus images
+with a resolution of 605 x 700 (height x width). Two sets of ground-truth
+vessel annotations are available. The first set by Adam Hoover is commonly used
+for training and testing. The second set by Valentina Kouznetsova acts as a
+“human” baseline.
 
-#### Config ####
+* Reference: [STARE-2000]_
+* Original resolution (width x height): 700 x 605
+* Configuration resolution: 1024 x 1024
+* Training samples: 10
+* Split reference: [MANINIS-2016]_
+"""
 
-transforms = Compose(
+from bob.ip.binseg.data.transforms import *
+_transforms = Compose(
     [
         RandomRotation(),
         Pad((0, 32, 0, 32)),
@@ -20,8 +30,7 @@ transforms = Compose(
     ]
 )
 
-# bob.db.dataset init
-bobdb = STARE(protocol="default")
-
-# PyTorch dataset
-dataset = BinSegDataset(bobdb, split="train", transform=transforms)
+from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
+from bob.ip.binseg.data.stare import dataset as stare
+dataset = DelayedSample2TorchDataset(stare.subsets("default")["train"],
+        transform=_transforms)
diff --git a/bob/ip/binseg/configs/datasets/stare1168.py b/bob/ip/binseg/configs/datasets/stare1168.py
index 516a9267..425ac1e5 100644
--- a/bob/ip/binseg/configs/datasets/stare1168.py
+++ b/bob/ip/binseg/configs/datasets/stare1168.py
@@ -1,13 +1,20 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
+"""STARE (training set) for Vessel Segmentation
 
-from bob.db.stare import Database as STARE
-from bob.ip.binseg.data.transforms import *
-from bob.ip.binseg.data.binsegdataset import BinSegDataset
+A subset of the original STARE dataset contains 20 annotated eye fundus images
+with a resolution of 605 x 700 (height x width). Two sets of ground-truth
+vessel annotations are available. The first set by Adam Hoover is commonly used
+for training and testing. The second set by Valentina Kouznetsova acts as a
+“human” baseline.
 
-#### Config ####
+* Reference: [STARE-2000]_
+* Original resolution (width x height): 700 x 605
+* Configuration resolution: 1168 x 1168
+* Training samples: 10
+* Split reference: [MANINIS-2016]_
+"""
 
-transforms = Compose(
+from bob.ip.binseg.data.transforms import *
+_transforms = Compose(
     [
         RandomRotation(),
         Crop(50, 0, 500, 705),
@@ -20,8 +27,7 @@ transforms = Compose(
     ]
 )
 
-# bob.db.dataset init
-bobdb = STARE(protocol="default")
-
-# PyTorch dataset
-dataset = BinSegDataset(bobdb, split="train", transform=transforms)
+from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
+from bob.ip.binseg.data.stare import dataset as stare
+dataset = DelayedSample2TorchDataset(stare.subsets("default")["train"],
+        transform=_transforms)
diff --git a/bob/ip/binseg/configs/datasets/stare544.py b/bob/ip/binseg/configs/datasets/stare544.py
index b972d1f3..da1b8230 100644
--- a/bob/ip/binseg/configs/datasets/stare544.py
+++ b/bob/ip/binseg/configs/datasets/stare544.py
@@ -1,13 +1,23 @@
 #!/usr/bin/env python
-# -*- coding: utf-8 -*-
+# coding=utf-8
 
-from bob.db.stare import Database as STARE
-from bob.ip.binseg.data.transforms import *
-from bob.ip.binseg.data.binsegdataset import BinSegDataset
+"""STARE (training set) for Vessel Segmentation
+
+A subset of the original STARE dataset contains 20 annotated eye fundus images
+with a resolution of 605 x 700 (height x width). Two sets of ground-truth
+vessel annotations are available. The first set by Adam Hoover is commonly used
+for training and testing. The second set by Valentina Kouznetsova acts as a
+“human” baseline.
 
-#### Config ####
+* Reference: [STARE-2000]_
+* Original resolution (width x height): 700 x 605
+* Configuration resolution: 544 x 544
+* Training samples: 10
+* Split reference: [MANINIS-2016]_
+"""
 
-transforms = Compose(
+from bob.ip.binseg.data.transforms import *
+_transforms = Compose(
     [
         RandomRotation(),
         Resize(471),
@@ -19,8 +29,7 @@ transforms = Compose(
     ]
 )
 
-# bob.db.dataset init
-bobdb = STARE(protocol="default")
-
-# PyTorch dataset
-dataset = BinSegDataset(bobdb, split="train", transform=transforms)
+from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
+from bob.ip.binseg.data.stare import dataset as stare
+dataset = DelayedSample2TorchDataset(stare.subsets("default")["train"],
+        transform=_transforms)
diff --git a/bob/ip/binseg/configs/datasets/stare960.py b/bob/ip/binseg/configs/datasets/stare960.py
index 211a8448..db8b88b5 100644
--- a/bob/ip/binseg/configs/datasets/stare960.py
+++ b/bob/ip/binseg/configs/datasets/stare960.py
@@ -1,13 +1,23 @@
 #!/usr/bin/env python
-# -*- coding: utf-8 -*-
+# coding=utf-8
 
-from bob.db.stare import Database as STARE
-from bob.ip.binseg.data.transforms import *
-from bob.ip.binseg.data.binsegdataset import BinSegDataset
+"""STARE (training set) for Vessel Segmentation
+
+A subset of the original STARE dataset contains 20 annotated eye fundus images
+with a resolution of 605 x 700 (height x width). Two sets of ground-truth
+vessel annotations are available. The first set by Adam Hoover is commonly used
+for training and testing. The second set by Valentina Kouznetsova acts as a
+“human” baseline.
 
-#### Config ####
+* Reference: [STARE-2000]_
+* Original resolution (width x height): 700 x 605
+* Configuration resolution: 960 x 960
+* Training samples: 10
+* Split reference: [MANINIS-2016]_
+"""
 
-transforms = Compose(
+from bob.ip.binseg.data.transforms import *
+_transforms = Compose(
     [
         RandomRotation(),
         Pad((0, 32, 0, 32)),
@@ -20,8 +30,7 @@ transforms = Compose(
     ]
 )
 
-# bob.db.dataset init
-bobdb = STARE(protocol="default")
-
-# PyTorch dataset
-dataset = BinSegDataset(bobdb, split="train", transform=transforms)
+from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
+from bob.ip.binseg.data.stare import dataset as stare
+dataset = DelayedSample2TorchDataset(stare.subsets("default")["train"],
+        transform=_transforms)
diff --git a/bob/ip/binseg/configs/datasets/starechasedb1iostarhrf544.py b/bob/ip/binseg/configs/datasets/starechasedb1iostarhrf544.py
index 200d7842..91720a23 100644
--- a/bob/ip/binseg/configs/datasets/starechasedb1iostarhrf544.py
+++ b/bob/ip/binseg/configs/datasets/starechasedb1iostarhrf544.py
@@ -1,10 +1,13 @@
-from bob.ip.binseg.configs.datasets.stare544 import dataset as stare
-from bob.ip.binseg.configs.datasets.chasedb1544 import dataset as chase
-from bob.ip.binseg.configs.datasets.iostarvessel544 import dataset as iostar
-from bob.ip.binseg.configs.datasets.hrf544 import dataset as hrf
-import torch
+#!/usr/bin/env python
+# coding=utf-8
 
-#### Config ####
+"""COVD-STARE (training set) for Vessel Segmentation
+"""
 
-# PyTorch dataset
-dataset = torch.utils.data.ConcatDataset([stare, chase, hrf, iostar])
+from bob.ip.binseg.configs.datasets.stare544 import dataset as _stare
+from bob.ip.binseg.configs.datasets.chasedb1544 import dataset as _chase
+from bob.ip.binseg.configs.datasets.iostarvessel544 import dataset as _iostar
+from bob.ip.binseg.configs.datasets.hrf544 import dataset as _hrf
+
+import torch.utils.data
+dataset = torch.utils.data.ConcatDataset([_stare, _chase, _hrf, _iostar])
diff --git a/bob/ip/binseg/configs/datasets/starechasedb1iostarhrf544ssldrive.py b/bob/ip/binseg/configs/datasets/starechasedb1iostarhrf544ssldrive.py
index 7e4eda99..6aac6bbb 100644
--- a/bob/ip/binseg/configs/datasets/starechasedb1iostarhrf544ssldrive.py
+++ b/bob/ip/binseg/configs/datasets/starechasedb1iostarhrf544ssldrive.py
@@ -7,8 +7,7 @@ The DRIVE database has been established to enable comparative studies on
 segmentation of blood vessels in retinal images.
 
 * Reference: [DRIVE-2004]_
-* This configuration resolution: 544 x 544 (center-crop)
-* Split reference: [DRIVE-2004]_
+* Configuration resolution: 544 x 544
 
 The dataset available in this file is composed of STARE, CHASE-DB1, IOSTAR
 vessel and HRF (with annotated samples) and DRIVE without labels.
diff --git a/bob/ip/binseg/configs/datasets/staretest.py b/bob/ip/binseg/configs/datasets/staretest.py
index e296ac7a..c7a69801 100644
--- a/bob/ip/binseg/configs/datasets/staretest.py
+++ b/bob/ip/binseg/configs/datasets/staretest.py
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-# -*- coding: utf-8 -*-
+# coding=utf-8
 
 """STARE (test set) for Vessel Segmentation
 
@@ -10,22 +10,16 @@ for training and testing. The second set by Valentina Kouznetsova acts as a
 “human” baseline.
 
 * Reference: [STARE-2000]_
-* Original resolution (height x width): 605 x 700
-* Configuration resolution: 608 x 704 (after padding)
+* Original resolution (width x height): 700 x 605
+* Configuration resolution: 704 x 608 (after padding)
 * Test samples: 10
 * Split reference: [MANINIS-2016]_
 """
 
-from bob.db.stare import Database as STARE
 from bob.ip.binseg.data.transforms import *
-from bob.ip.binseg.data.binsegdataset import BinSegDataset
+_transforms = Compose([Pad((2, 1, 2, 2)), ToTensor()])
 
-#### Config ####
-
-transforms = Compose([Pad((2, 1, 2, 2)), ToTensor()])
-
-# bob.db.dataset init
-bobdb = STARE(protocol="default")
-
-# PyTorch dataset
-dataset = BinSegDataset(bobdb, split="test", transform=transforms)
+from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
+from bob.ip.binseg.data.stare import dataset as stare
+dataset = DelayedSample2TorchDataset(stare.subsets("default")["test"],
+        transform=_transforms)
diff --git a/bob/ip/binseg/data/stare/__init__.py b/bob/ip/binseg/data/stare/__init__.py
new file mode 100644
index 00000000..62354bb9
--- /dev/null
+++ b/bob/ip/binseg/data/stare/__init__.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+import os
+import pkg_resources
+
+import bob.extension
+
+from ..jsondataset import JSONDataset
+from ..loader import load_pil_rgb, load_pil_1
+
+_protocols = [
+        pkg_resources.resource_filename(__name__, "default.json"),
+        pkg_resources.resource_filename(__name__, "second-annotation.json"),
+        ]
+
+_root_path = bob.extension.rc.get('bob.db.stare.datadir',
+        os.path.realpath(os.curdir))
+
+def _loader(s):
+    return dict(
+            data=load_pil_rgb(s["data"]),
+            label=load_pil_1(s["label"]),
+            )
+
+dataset = JSONDataset(protocols=_protocols, root_path=_root_path, loader=_loader)
+"""STARE (training set) for Vessel Segmentation
+
+A subset of the original STARE dataset contains 20 annotated eye fundus images
+with a resolution of 700 x 605 (width x height). Two sets of ground-truth
+vessel annotations are available. The first set by Adam Hoover is commonly used
+for training and testing. The second set by Valentina Kouznetsova acts as a
+“human” baseline.
+
+* Reference: [STARE-2000]_
+* Original resolution (width x height): 700 x 605
+* Training samples: 10
+* Test samples: 10
+* Samples include labels from 2 annotators (AH, default and VK, seconda
+  annotator)
+* Split reference: [MANINIS-2016]_
+"""
diff --git a/bob/ip/binseg/data/stare/default.json b/bob/ip/binseg/data/stare/default.json
new file mode 100644
index 00000000..38a96594
--- /dev/null
+++ b/bob/ip/binseg/data/stare/default.json
@@ -0,0 +1,86 @@
+{
+ "train": [
+  [
+   "stare-images/im0001.ppm",
+   "labels-ah/im0001.ah.ppm"
+  ],
+  [
+   "stare-images/im0002.ppm",
+   "labels-ah/im0002.ah.ppm"
+  ],
+  [
+   "stare-images/im0003.ppm",
+   "labels-ah/im0003.ah.ppm"
+  ],
+  [
+   "stare-images/im0004.ppm",
+   "labels-ah/im0004.ah.ppm"
+  ],
+  [
+   "stare-images/im0005.ppm",
+   "labels-ah/im0005.ah.ppm"
+  ],
+  [
+   "stare-images/im0044.ppm",
+   "labels-ah/im0044.ah.ppm"
+  ],
+  [
+   "stare-images/im0077.ppm",
+   "labels-ah/im0077.ah.ppm"
+  ],
+  [
+   "stare-images/im0081.ppm",
+   "labels-ah/im0081.ah.ppm"
+  ],
+  [
+   "stare-images/im0082.ppm",
+   "labels-ah/im0082.ah.ppm"
+  ],
+  [
+   "stare-images/im0139.ppm",
+   "labels-ah/im0139.ah.ppm"
+  ]
+ ],
+ "test": [
+  [
+   "stare-images/im0162.ppm",
+   "labels-ah/im0162.ah.ppm"
+  ],
+  [
+   "stare-images/im0163.ppm",
+   "labels-ah/im0163.ah.ppm"
+  ],
+  [
+   "stare-images/im0235.ppm",
+   "labels-ah/im0235.ah.ppm"
+  ],
+  [
+   "stare-images/im0236.ppm",
+   "labels-ah/im0236.ah.ppm"
+  ],
+  [
+   "stare-images/im0239.ppm",
+   "labels-ah/im0239.ah.ppm"
+  ],
+  [
+   "stare-images/im0240.ppm",
+   "labels-ah/im0240.ah.ppm"
+  ],
+  [
+   "stare-images/im0255.ppm",
+   "labels-ah/im0255.ah.ppm"
+  ],
+  [
+   "stare-images/im0291.ppm",
+   "labels-ah/im0291.ah.ppm"
+  ],
+  [
+   "stare-images/im0319.ppm",
+   "labels-ah/im0319.ah.ppm"
+  ],
+  [
+   "stare-images/im0324.ppm",
+   "labels-ah/im0324.ah.ppm"
+  ]
+ ]
+}
diff --git a/bob/ip/binseg/data/stare/second-annotation.json b/bob/ip/binseg/data/stare/second-annotation.json
new file mode 100644
index 00000000..2d9d4e46
--- /dev/null
+++ b/bob/ip/binseg/data/stare/second-annotation.json
@@ -0,0 +1,86 @@
+{
+ "train": [
+  [
+   "stare-images/im0001.ppm",
+   "labels-vk/im0001.vk.ppm"
+  ],
+  [
+   "stare-images/im0002.ppm",
+   "labels-vk/im0002.vk.ppm"
+  ],
+  [
+   "stare-images/im0003.ppm",
+   "labels-vk/im0003.vk.ppm"
+  ],
+  [
+   "stare-images/im0004.ppm",
+   "labels-vk/im0004.vk.ppm"
+  ],
+  [
+   "stare-images/im0005.ppm",
+   "labels-vk/im0005.vk.ppm"
+  ],
+  [
+   "stare-images/im0044.ppm",
+   "labels-vk/im0044.vk.ppm"
+  ],
+  [
+   "stare-images/im0077.ppm",
+   "labels-vk/im0077.vk.ppm"
+  ],
+  [
+   "stare-images/im0081.ppm",
+   "labels-vk/im0081.vk.ppm"
+  ],
+  [
+   "stare-images/im0082.ppm",
+   "labels-vk/im0082.vk.ppm"
+  ],
+  [
+   "stare-images/im0139.ppm",
+   "labels-vk/im0139.vk.ppm"
+  ]
+ ],
+ "test": [
+  [
+   "stare-images/im0162.ppm",
+   "labels-vk/im0162.vk.ppm"
+  ],
+  [
+   "stare-images/im0163.ppm",
+   "labels-vk/im0163.vk.ppm"
+  ],
+  [
+   "stare-images/im0235.ppm",
+   "labels-vk/im0235.vk.ppm"
+  ],
+  [
+   "stare-images/im0236.ppm",
+   "labels-vk/im0236.vk.ppm"
+  ],
+  [
+   "stare-images/im0239.ppm",
+   "labels-vk/im0239.vk.ppm"
+  ],
+  [
+   "stare-images/im0240.ppm",
+   "labels-vk/im0240.vk.ppm"
+  ],
+  [
+   "stare-images/im0255.ppm",
+   "labels-vk/im0255.vk.ppm"
+  ],
+  [
+   "stare-images/im0291.ppm",
+   "labels-vk/im0291.vk.ppm"
+  ],
+  [
+   "stare-images/im0319.ppm",
+   "labels-vk/im0319.vk.ppm"
+  ],
+  [
+   "stare-images/im0324.ppm",
+   "labels-vk/im0324.vk.ppm"
+  ]
+ ]
+}
diff --git a/bob/ip/binseg/data/stare/test.py b/bob/ip/binseg/data/stare/test.py
new file mode 100644
index 00000000..33058d92
--- /dev/null
+++ b/bob/ip/binseg/data/stare/test.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+
+"""Tests for DRIVE"""
+
+import os
+import nose.tools
+
+from ..utils import rc_variable_set, DelayedSample2TorchDataset
+from ..transforms import Compose, Pad
+from . import dataset
+
+
+def test_protocol_consitency():
+
+    subset = dataset.subsets("default")
+    nose.tools.eq_(len(subset), 2)
+
+    assert "train" in subset
+    nose.tools.eq_(len(subset["train"]), 10)
+    for s in subset["train"]:
+        assert s.key.startswith(os.path.join("stare-images", "im0"))
+
+    assert "test" in subset
+    nose.tools.eq_(len(subset["test"]), 10)
+    for s in subset["test"]:
+        assert s.key.startswith(os.path.join("stare-images", "im0"))
+
+    subset = dataset.subsets("second-annotation")
+    nose.tools.eq_(len(subset), 2)
+
+    assert "train" in subset
+    nose.tools.eq_(len(subset["train"]), 10)
+    for s in subset["train"]:
+        assert s.key.startswith(os.path.join("stare-images", "im0"))
+
+    assert "test" in subset
+    nose.tools.eq_(len(subset["test"]), 10)
+    for s in subset["test"]:
+        assert s.key.startswith(os.path.join("stare-images", "im0"))
+
+
+@rc_variable_set('bob.db.stare.datadir')
+def test_loading():
+
+    def _check_sample(s):
+        data = s.data
+        assert isinstance(data, dict)
+        nose.tools.eq_(len(data), 2)
+        assert "data" in data
+        nose.tools.eq_(data["data"].size, (700, 605))
+        nose.tools.eq_(data["data"].mode, "RGB")
+        assert "label" in data
+        nose.tools.eq_(data["label"].size, (700, 605))
+        nose.tools.eq_(data["label"].mode, "1")
+
+    subset = dataset.subsets("default")
+    for s in subset["train"]: _check_sample(s)
+    for s in subset["test"]: _check_sample(s)
+
+    subset = dataset.subsets("second-annotation")
+    for s in subset["test"]: _check_sample(s)
+
+
+@rc_variable_set('bob.db.drive.datadir')
+def test_check():
+    nose.tools.eq_(dataset.check(), 0)
+
+
+@rc_variable_set('bob.db.drive.datadir')
+def test_torch_dataset():
+
+    def _check_sample(s):
+        nose.tools.eq_(len(s), 3)
+        assert isinstance(s[0], str)
+        nose.tools.eq_(s[1].size, (704, 608))
+        nose.tools.eq_(s[1].mode, "RGB")
+        nose.tools.eq_(s[2].size, (704, 608))
+        nose.tools.eq_(s[2].mode, "1")
+
+    transforms = Compose([Pad((2, 1, 2, 2))])
+
+    subset = dataset.subsets("default")
+
+    torch_dataset = DelayedSample2TorchDataset(subset["train"], transforms)
+    nose.tools.eq_(len(torch_dataset), 10)
+    for s in torch_dataset: _check_sample(s)
+
+    torch_dataset = DelayedSample2TorchDataset(subset["test"], transforms)
+    nose.tools.eq_(len(torch_dataset), 10)
+    for s in torch_dataset: _check_sample(s)
diff --git a/conda/meta.yaml b/conda/meta.yaml
index 4f30610b..7b69b581 100644
--- a/conda/meta.yaml
+++ b/conda/meta.yaml
@@ -74,7 +74,6 @@ test:
     - sphinx
     - sphinx_rtd_theme
     - sphinxcontrib-programoutput
-    - bob.db.stare
     - bob.db.chasedb1
     - bob.db.hrf
     - bob.db.drionsdb
-- 
GitLab