Skip to content
Snippets Groups Projects
Commit b699ecf5 authored by André Anjos's avatar André Anjos :speech_balloon:
Browse files

Remove use of bob.db.chasedb1

parent 9420eb81
No related branches found
No related tags found
1 merge request!12Streamlining
Pipeline #38874 passed
Showing
with 482 additions and 143 deletions
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding=utf-8
"""CHASE-DB1 (training set) for Vessel Segmentation
......@@ -19,18 +19,12 @@ bright strip running down the centre known as the central vessel reflex.
* Reference: [CHASEDB1-2012]_
* Original resolution (height x width): 960 x 999
* Configuration resolution: 960 x 960 (after hand-specified crop)
* Training samples: 20
* Training samples: 8
* Split reference: [CHASEDB1-2012]_
"""
from bob.db.chasedb1 import Database as CHASEDB1
from bob.ip.binseg.data.transforms import *
from bob.ip.binseg.data.binsegdataset import BinSegDataset
#### Config ####
transforms = Compose(
_transforms = Compose(
[
Crop(0, 18, 960, 960), #(upper, left, height, width)
RandomHFlip(),
......@@ -41,8 +35,7 @@ transforms = Compose(
]
)
# bob.db.dataset init
bobdb = CHASEDB1(protocol="default")
# PyTorch dataset
dataset = BinSegDataset(bobdb, split="train", transform=transforms)
from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
from bob.ip.binseg.data.chasedb1 import dataset as chasedb1
dataset = DelayedSample2TorchDataset(chasedb1.subsets("default")["train"],
transform=_transforms)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bob.db.chasedb1 import Database as CHASEDB1
from bob.ip.binseg.data.transforms import *
from bob.ip.binseg.data.binsegdataset import BinSegDataset
#### Config ####
transforms = Compose(
_transforms = Compose(
[
RandomRotation(),
Crop(0, 18, 960, 960),
......@@ -19,8 +11,7 @@ transforms = Compose(
]
)
# bob.db.dataset init
bobdb = CHASEDB1(protocol="default")
# PyTorch dataset
dataset = BinSegDataset(bobdb, split="train", transform=transforms)
from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
from bob.ip.binseg.data.chasedb1 import dataset as chasedb1
dataset = DelayedSample2TorchDataset(chasedb1.subsets("default")["train"],
transform=_transforms)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bob.db.chasedb1 import Database as CHASEDB1
from bob.ip.binseg.data.transforms import *
from bob.ip.binseg.data.binsegdataset import BinSegDataset
#### Config ####
transforms = Compose(
_transforms = Compose(
[
RandomRotation(),
Crop(140, 18, 680, 960),
......@@ -19,8 +11,7 @@ transforms = Compose(
]
)
# bob.db.dataset init
bobdb = CHASEDB1(protocol="default")
# PyTorch dataset
dataset = BinSegDataset(bobdb, split="train", transform=transforms)
from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
from bob.ip.binseg.data.chasedb1 import dataset as chasedb1
dataset = DelayedSample2TorchDataset(chasedb1.subsets("default")["train"],
transform=_transforms)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bob.db.chasedb1 import Database as CHASEDB1
from bob.ip.binseg.data.transforms import *
from bob.ip.binseg.data.binsegdataset import BinSegDataset
#### Config ####
transforms = Compose(
_transforms = Compose(
[
Resize(544),
Crop(0, 12, 544, 544),
......@@ -19,8 +11,7 @@ transforms = Compose(
]
)
# bob.db.dataset init
bobdb = CHASEDB1(protocol="default")
# PyTorch dataset
dataset = BinSegDataset(bobdb, split="train", transform=transforms)
from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
from bob.ip.binseg.data.chasedb1 import dataset as chasedb1
dataset = DelayedSample2TorchDataset(chasedb1.subsets("default")["train"],
transform=_transforms)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bob.db.chasedb1 import Database as CHASEDB1
from bob.ip.binseg.data.transforms import *
from bob.ip.binseg.data.binsegdataset import BinSegDataset
#### Config ####
transforms = Compose(
_transforms = Compose(
[
RandomRotation(),
CenterCrop((829, 960)),
......@@ -19,8 +11,7 @@ transforms = Compose(
]
)
# bob.db.dataset init
bobdb = CHASEDB1(protocol="default")
# PyTorch dataset
dataset = BinSegDataset(bobdb, split="train", transform=transforms)
from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
from bob.ip.binseg.data.chasedb1 import dataset as chasedb1
dataset = DelayedSample2TorchDataset(chasedb1.subsets("default")["train"],
transform=_transforms)
......@@ -19,20 +19,19 @@ bright strip running down the centre known as the central vessel reflex.
* Reference: [CHASEDB1-2012]_
* Original resolution (height x width): 960 x 999
* Configuration resolution: 960 x 960 (after hand-specified crop)
* Test samples: 8
* Test samples: 20
* Split reference: [CHASEDB1-2012]_
"""
from bob.db.chasedb1 import Database as CHASEDB1
from bob.ip.binseg.data.transforms import *
from bob.ip.binseg.data.binsegdataset import BinSegDataset
#### Config ####
transforms = Compose([Crop(0, 18, 960, 960), ToTensor()])
# bob.db.dataset init
bobdb = CHASEDB1(protocol="default")
# PyTorch dataset
dataset = BinSegDataset(bobdb, split="test", transform=transforms)
_transforms = Compose(
[
Crop(0, 18, 960, 960), #(upper, left, height, width)
ToTensor(),
]
)
from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
from bob.ip.binseg.data.chasedb1 import dataset as chasedb1
dataset = DelayedSample2TorchDataset(chasedb1.subsets("default")["test"],
transform=_transforms)
from bob.ip.binseg.configs.datasets.drive960 import dataset as drive
from bob.ip.binseg.configs.datasets.stare960 import dataset as stare
from bob.ip.binseg.configs.datasets.hrf960 import dataset as hrf
from bob.ip.binseg.configs.datasets.iostarvessel960 import dataset as iostar
import torch
#!/usr/bin/env python
# coding=utf-8
#### Config ####
"""COVD-CHASEDB1 (training set) for Vessel Segmentation
"""
# PyTorch dataset
dataset = torch.utils.data.ConcatDataset([drive, stare, hrf, iostar])
from bob.ip.binseg.configs.datasets.drive960 import dataset as _drive
from bob.ip.binseg.configs.datasets.stare960 import dataset as _stare
from bob.ip.binseg.configs.datasets.hrf960 import dataset as _hrf
from bob.ip.binseg.configs.datasets.iostarvessel960 import dataset as _iostar
import torch.utils.data
dataset = torch.utils.data.ConcatDataset([_drive, _stare, _hrf, _iostar])
from bob.ip.binseg.configs.datasets.drive960 import dataset as drive
from bob.ip.binseg.configs.datasets.stare960 import dataset as stare
from bob.ip.binseg.configs.datasets.hrf960 import dataset as hrf
from bob.ip.binseg.configs.datasets.iostarvessel960 import dataset as iostar
from bob.db.chasedb1 import Database as CHASE
from bob.ip.binseg.data.transforms import *
import torch
from bob.ip.binseg.data.binsegdataset import (
BinSegDataset,
SSLBinSegDataset,
UnLabeledBinSegDataset,
)
#### Config ####
# PyTorch dataset
labeled_dataset = torch.utils.data.ConcatDataset([drive, stare, hrf, iostar])
#### Unlabeled CHASE TRAIN ####
unlabeled_transforms = Compose(
[
Crop(0, 18, 960, 960),
RandomHFlip(),
RandomVFlip(),
RandomRotation(),
ColorJitter(),
ToTensor(),
]
)
# bob.db.dataset init
chasebobdb = CHASE(protocol="default")
# PyTorch dataset
unlabeled_dataset = UnLabeledBinSegDataset(
chasebobdb, split="train", transform=unlabeled_transforms
)
# SSL Dataset
dataset = SSLBinSegDataset(labeled_dataset, unlabeled_dataset)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""CHASE-DB1 (SSL training set) for Vessel Segmentation
The CHASE_DB1 is a retinal vessel reference dataset acquired from multiethnic
school children. This database is a part of the Child Heart and Health Study in
England (CHASE), a cardiovascular health survey in 200 primary schools in
London, Birmingham, and Leicester. The ocular imaging was carried out in
46 schools and demonstrated associations between retinal vessel tortuosity and
early risk factors for cardiovascular disease in over 1000 British primary
school children of different ethnic origin. The retinal images of both of the
eyes of each child were recorded with a hand-held Nidek NM-200-D fundus camera.
The images were captured at 30 degrees FOV camera. The dataset of images are
characterized by having nonuniform back-ground illumination, poor contrast of
blood vessels as compared with the background and wider arteriolars that have a
bright strip running down the centre known as the central vessel reflex.
* Reference: [CHASEDB1-2012]_
* Configuration resolution (height x width): 960 x 960
The dataset available in this file is composed of STARE, CHASE-DB1, IOSTAR
vessel and HRF (with annotated samples) and CHASE-DB1 without labels.
"""
# Labelled bits
import torch.utils.data
from bob.ip.binseg.configs.datasets.drive960 import dataset as _drive
from bob.ip.binseg.configs.datasets.stare960 import dataset as _stare
from bob.ip.binseg.configs.datasets.hrf960 import dataset as _hrf
from bob.ip.binseg.configs.datasets.iostarvessel960 import dataset as _iostar
_labelled = torch.utils.data.ConcatDataset([_drive, _stare, _hrf, _iostar])
# Use CHASE-DB1 without labels in this setup
from bob.ip.binseg.configs.datasets.chasedb1 import dataset as _unlabelled
from bob.ip.binseg.data.utils import SSLDataset
dataset = SSLDataset(_labelled, _unlabelled)
......@@ -22,7 +22,7 @@ from bob.ip.binseg.configs.datasets.hrf544 import dataset as _hrf
_labelled = torch.utils.data.ConcatDataset([_stare, _chase, _iostar, _hrf])
# Use DRIVE without labels in this setup
from .drive import dataset as _unlabelled
from bob.ip.binseg.configs.datasets.drive import dataset as _unlabelled
from bob.ip.binseg.data.utils import SSLDataset
dataset = SSLDataset(_labelled, _unlabelled)
#!/usr/bin/env python
# coding=utf-8
"""CHASE-DB1 dataset for Vessel Segmentation
The CHASE_DB1 is a retinal vessel reference dataset acquired from multiethnic
school children. This database is a part of the Child Heart and Health Study in
England (CHASE), a cardiovascular health survey in 200 primary schools in
London, Birmingham, and Leicester. The ocular imaging was carried out in
46 schools and demonstrated associations between retinal vessel tortuosity and
early risk factors for cardiovascular disease in over 1000 British primary
school children of different ethnic origin. The retinal images of both of the
eyes of each child were recorded with a hand-held Nidek NM-200-D fundus camera.
The images were captured at 30 degrees FOV camera. The dataset of images are
characterized by having nonuniform back-ground illumination, poor contrast of
blood vessels as compared with the background and wider arteriolars that have a
bright strip running down the centre known as the central vessel reflex.
* Reference: [CHASEDB1-2012]_
* Original resolution (height x width): 960 x 999
* Split reference: [CHASEDB1-2012]_
* Protocol ``default``:
* Training samples: 8 (including labels from annotator "1stHO")
* Test samples: 20 (including labels from annotator "1stHO")
* Protocol ``second-annotation``:
* Training samples: 8 (including labels from annotator "2ndHO")
* Test samples: 20 (including labels from annotator "2ndHO")
"""
import os
import pkg_resources
import bob.extension
from ..jsondataset import JSONDataset
from ..loader import load_pil_rgb, load_pil_1
_protocols = [
pkg_resources.resource_filename(__name__, "default.json"),
pkg_resources.resource_filename(__name__, "second-annotation.json"),
]
_root_path = bob.extension.rc.get('bob.ip.binseg.chasedb1.datadir',
os.path.realpath(os.curdir))
def _loader(s):
return dict(
data=load_pil_rgb(s["data"]),
label=load_pil_1(s["label"]),
)
dataset = JSONDataset(protocols=_protocols, root_path=_root_path, loader=_loader)
"""CHASE-DB1 dataset object"""
{
"train": [
[
"Image_11L.jpg",
"Image_11L_1stHO.png"
],
[
"Image_11R.jpg",
"Image_11R_1stHO.png"
],
[
"Image_12L.jpg",
"Image_12L_1stHO.png"
],
[
"Image_12R.jpg",
"Image_12R_1stHO.png"
],
[
"Image_13L.jpg",
"Image_13L_1stHO.png"
],
[
"Image_13R.jpg",
"Image_13R_1stHO.png"
],
[
"Image_14L.jpg",
"Image_14L_1stHO.png"
],
[
"Image_14R.jpg",
"Image_14R_1stHO.png"
]
],
"test": [
[
"Image_01L.jpg",
"Image_01L_1stHO.png"
],
[
"Image_01R.jpg",
"Image_01R_1stHO.png"
],
[
"Image_02L.jpg",
"Image_02L_1stHO.png"
],
[
"Image_02R.jpg",
"Image_02R_1stHO.png"
],
[
"Image_03L.jpg",
"Image_03L_1stHO.png"
],
[
"Image_03R.jpg",
"Image_03R_1stHO.png"
],
[
"Image_04L.jpg",
"Image_04L_1stHO.png"
],
[
"Image_04R.jpg",
"Image_04R_1stHO.png"
],
[
"Image_05L.jpg",
"Image_05L_1stHO.png"
],
[
"Image_05R.jpg",
"Image_05R_1stHO.png"
],
[
"Image_06L.jpg",
"Image_06L_1stHO.png"
],
[
"Image_06R.jpg",
"Image_06R_1stHO.png"
],
[
"Image_07L.jpg",
"Image_07L_1stHO.png"
],
[
"Image_07R.jpg",
"Image_07R_1stHO.png"
],
[
"Image_08L.jpg",
"Image_08L_1stHO.png"
],
[
"Image_08R.jpg",
"Image_08R_1stHO.png"
],
[
"Image_09L.jpg",
"Image_09L_1stHO.png"
],
[
"Image_09R.jpg",
"Image_09R_1stHO.png"
],
[
"Image_10L.jpg",
"Image_10L_1stHO.png"
],
[
"Image_10R.jpg",
"Image_10R_1stHO.png"
]
]
}
\ No newline at end of file
{
"train": [
[
"Image_11L.jpg",
"Image_11L_2ndHO.png"
],
[
"Image_11R.jpg",
"Image_11R_2ndHO.png"
],
[
"Image_12L.jpg",
"Image_12L_2ndHO.png"
],
[
"Image_12R.jpg",
"Image_12R_2ndHO.png"
],
[
"Image_13L.jpg",
"Image_13L_2ndHO.png"
],
[
"Image_13R.jpg",
"Image_13R_2ndHO.png"
],
[
"Image_14L.jpg",
"Image_14L_2ndHO.png"
],
[
"Image_14R.jpg",
"Image_14R_2ndHO.png"
]
],
"test": [
[
"Image_01L.jpg",
"Image_01L_2ndHO.png"
],
[
"Image_01R.jpg",
"Image_01R_2ndHO.png"
],
[
"Image_02L.jpg",
"Image_02L_2ndHO.png"
],
[
"Image_02R.jpg",
"Image_02R_2ndHO.png"
],
[
"Image_03L.jpg",
"Image_03L_2ndHO.png"
],
[
"Image_03R.jpg",
"Image_03R_2ndHO.png"
],
[
"Image_04L.jpg",
"Image_04L_2ndHO.png"
],
[
"Image_04R.jpg",
"Image_04R_2ndHO.png"
],
[
"Image_05L.jpg",
"Image_05L_2ndHO.png"
],
[
"Image_05R.jpg",
"Image_05R_2ndHO.png"
],
[
"Image_06L.jpg",
"Image_06L_2ndHO.png"
],
[
"Image_06R.jpg",
"Image_06R_2ndHO.png"
],
[
"Image_07L.jpg",
"Image_07L_2ndHO.png"
],
[
"Image_07R.jpg",
"Image_07R_2ndHO.png"
],
[
"Image_08L.jpg",
"Image_08L_2ndHO.png"
],
[
"Image_08R.jpg",
"Image_08R_2ndHO.png"
],
[
"Image_09L.jpg",
"Image_09L_2ndHO.png"
],
[
"Image_09R.jpg",
"Image_09R_2ndHO.png"
],
[
"Image_10L.jpg",
"Image_10L_2ndHO.png"
],
[
"Image_10R.jpg",
"Image_10R_2ndHO.png"
]
]
}
#!/usr/bin/env python
# coding=utf-8
"""Tests for CHASE-DB1"""
import os
import nose.tools
from ..utils import rc_variable_set, DelayedSample2TorchDataset
from ..transforms import Compose, Crop
from . import dataset
def test_protocol_consitency():
subset = dataset.subsets("default")
nose.tools.eq_(len(subset), 2)
assert "train" in subset
nose.tools.eq_(len(subset["train"]), 8)
for s in subset["train"]:
assert s.key.startswith("Image_")
assert "test" in subset
nose.tools.eq_(len(subset["test"]), 20)
for s in subset["test"]:
assert s.key.startswith("Image_")
subset = dataset.subsets("second-annotation")
nose.tools.eq_(len(subset), 2)
assert "train" in subset
nose.tools.eq_(len(subset["train"]), 8)
for s in subset["train"]:
assert s.key.startswith("Image_")
assert "test" in subset
nose.tools.eq_(len(subset["test"]), 20)
for s in subset["test"]:
assert s.key.startswith("Image_")
@rc_variable_set('bob.ip.binseg.chasedb1.datadir')
def test_loading():
def _check_sample(s):
data = s.data
assert isinstance(data, dict)
nose.tools.eq_(len(data), 2)
assert "data" in data
nose.tools.eq_(data["data"].size, (999, 960))
nose.tools.eq_(data["data"].mode, "RGB")
assert "label" in data
nose.tools.eq_(data["label"].size, (999, 960))
nose.tools.eq_(data["label"].mode, "1")
subset = dataset.subsets("default")
for s in subset["train"]: _check_sample(s)
for s in subset["test"]: _check_sample(s)
subset = dataset.subsets("second-annotation")
for s in subset["test"]: _check_sample(s)
@rc_variable_set('bob.ip.binseg.chasedb1.datadir')
def test_check():
nose.tools.eq_(dataset.check(), 0)
@rc_variable_set('bob.ip.binseg.chasedb1.datadir')
def test_torch_dataset():
def _check_sample(s):
nose.tools.eq_(len(s), 3)
assert isinstance(s[0], str)
nose.tools.eq_(s[1].size, (960, 960))
nose.tools.eq_(s[1].mode, "RGB")
nose.tools.eq_(s[2].size, (960, 960))
nose.tools.eq_(s[2].mode, "1")
transforms = Compose([Crop(0, 18, 960, 960)])
subset = dataset.subsets("default")
torch_dataset = DelayedSample2TorchDataset(subset["train"], transforms)
nose.tools.eq_(len(torch_dataset), 8)
for s in torch_dataset: _check_sample(s)
torch_dataset = DelayedSample2TorchDataset(subset["test"], transforms)
nose.tools.eq_(len(torch_dataset), 20)
for s in torch_dataset: _check_sample(s)
......@@ -36,7 +36,7 @@ def test_protocol_consitency():
assert s.key.startswith(os.path.join("test", "images"))
@rc_variable_set('bob.db.drive.datadir')
@rc_variable_set('bob.ip.binseg.drive.datadir')
def test_loading():
def _check_sample(s):
......@@ -61,12 +61,12 @@ def test_loading():
for s in subset["test"]: _check_sample(s)
@rc_variable_set('bob.db.drive.datadir')
@rc_variable_set('bob.ip.binseg.drive.datadir')
def test_check():
nose.tools.eq_(dataset.check(), 0)
@rc_variable_set('bob.db.drive.datadir')
@rc_variable_set('bob.ip.binseg.drive.datadir')
def test_torch_dataset():
def _check_sample(s):
......
......@@ -2,7 +2,7 @@
# coding=utf-8
"""Tests for DRIVE"""
"""Tests for STARE"""
import os
import nose.tools
......@@ -41,7 +41,7 @@ def test_protocol_consitency():
assert s.key.startswith(os.path.join("stare-images", "im0"))
@rc_variable_set('bob.db.stare.datadir')
@rc_variable_set('bob.ip.binseg.stare.datadir')
def test_loading():
def _check_sample(s):
......@@ -63,12 +63,12 @@ def test_loading():
for s in subset["test"]: _check_sample(s)
@rc_variable_set('bob.db.drive.datadir')
@rc_variable_set('bob.ip.binseg.drive.datadir')
def test_check():
nose.tools.eq_(dataset.check(), 0)
@rc_variable_set('bob.db.drive.datadir')
@rc_variable_set('bob.ip.binseg.drive.datadir')
def test_torch_dataset():
def _check_sample(s):
......
......@@ -79,7 +79,6 @@ test:
- sphinx
- sphinx_rtd_theme
- sphinxcontrib-programoutput
- bob.db.chasedb1
- bob.db.hrf
- bob.db.drionsdb
- bob.db.rimoner3
......
......@@ -15,7 +15,6 @@ Data Manipulation
.. autosummary::
:toctree: api/data
bob.ip.binseg.data
bob.ip.binseg.data.binsegdataset
bob.ip.binseg.data.folderdataset
bob.ip.binseg.data.csvdataset
......@@ -34,6 +33,7 @@ Datasets
bob.ip.binseg.data.drive
bob.ip.binseg.data.stare
bob.ip.binseg.data.chasedb1
Engines
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment