Skip to content
Snippets Groups Projects
Commit 83216033 authored by André Anjos's avatar André Anjos :speech_balloon:
Browse files

Remove requirement on bob.db.iostar

parent 68ac4bdb
No related branches found
No related tags found
1 merge request!12Streamlining
Pipeline #38877 passed
Showing
with 862 additions and 146 deletions
from bob.ip.binseg.configs.datasets.drive1024 import dataset as drive
from bob.ip.binseg.configs.datasets.stare1024 import dataset as stare
from bob.ip.binseg.configs.datasets.hrf1024 import dataset as hrf
from bob.ip.binseg.configs.datasets.chasedb11024 import dataset as chase
import torch
#!/usr/bin/env python
# coding=utf-8
#### Config ####
"""COVD-IOSTAR (training set) for Vessel Segmentation
"""
# PyTorch dataset
dataset = torch.utils.data.ConcatDataset([drive, stare, hrf, chase])
from bob.ip.binseg.configs.datasets.drive1024 import dataset as _drive
from bob.ip.binseg.configs.datasets.stare1024 import dataset as _stare
from bob.ip.binseg.configs.datasets.hrf1024 import dataset as _hrf
from bob.ip.binseg.configs.datasets.chasedb11024 import dataset as _chase
import torch.utils.data
dataset = torch.utils.data.ConcatDataset([_drive, _stare, _hrf, _chase])
from bob.ip.binseg.configs.datasets.drive1024 import dataset as drive
from bob.ip.binseg.configs.datasets.stare1024 import dataset as stare
from bob.ip.binseg.configs.datasets.hrf1024 import dataset as hrf
from bob.ip.binseg.configs.datasets.chasedb11024 import dataset as chasedb
from bob.db.iostar import Database as IOSTAR
from bob.ip.binseg.data.transforms import *
import torch
from bob.ip.binseg.data.binsegdataset import (
BinSegDataset,
SSLBinSegDataset,
UnLabeledBinSegDataset,
)
#!/usr/bin/env python
# coding=utf-8
"""COVD-IOSTAR + SSL (training set) for Vessel Segmentation
#### Config ####
* Configuration resolution: 1024 x 1024
# PyTorch dataset
labeled_dataset = torch.utils.data.ConcatDataset([drive, stare, hrf, chasedb])
The dataset available in this file is composed of DRIVE, STARE, CHASE-DB1, and
HRF (with annotated samples) and IOSTAR without labels.
"""
#### Unlabeled IOSTAR Train ####
unlabeled_transforms = Compose(
[RandomHFlip(), RandomVFlip(), RandomRotation(), ColorJitter(), ToTensor()]
)
# bob.db.dataset init
iostarbobdb = IOSTAR(protocol="default_vessel")
# PyTorch dataset
unlabeled_dataset = UnLabeledBinSegDataset(
iostarbobdb, split="train", transform=unlabeled_transforms
)
# SSL Dataset
dataset = SSLBinSegDataset(labeled_dataset, unlabeled_dataset)
from bob.ip.binseg.configs.datasets.drivestarechasedb1hrf1024 import dataset as _labelled
from bob.ip.binseg.configs.datasets.iostarvessel import dataset as _unlabelled
from bob.ip.binseg.data.utils import SSLDataset
dataset = SSLDataset(_labelled, _unlabelled)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding=utf-8
"""IOSTAR (training set) for Optic Disc Segmentation
......@@ -15,18 +15,18 @@ dataset includes annotations for the optic disc and the artery/vein ratio.
* Split reference: [MEYER-2017]_
"""
from bob.db.iostar import Database as IOSTAR
from bob.ip.binseg.data.transforms import *
from bob.ip.binseg.data.binsegdataset import BinSegDataset
#### Config ####
transforms = Compose(
[RandomHFlip(), RandomVFlip(), RandomRotation(), ColorJitter(), ToTensor()]
_transforms = Compose(
[
RandomHFlip(),
RandomVFlip(),
RandomRotation(),
ColorJitter(),
ToTensor(),
]
)
# bob.db.dataset init
bobdb = IOSTAR(protocol="default_od")
# PyTorch dataset
dataset = BinSegDataset(bobdb, split="train", transform=transforms)
from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
from bob.ip.binseg.data.iostar import dataset as iostar
dataset = DelayedSample2TorchDataset(iostar.subsets("optic-disc")["train"],
transform=_transforms)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding=utf-8
"""IOSTAR (test set) for Optic Disc Segmentation
"""IOSTAR (training set) for Optic Disc Segmentation
The IOSTAR vessel segmentation dataset includes 30 images with a resolution of
1024 × 1024 pixels. All the vessels in this dataset are annotated by a group of
......@@ -15,16 +15,10 @@ dataset includes annotations for the optic disc and the artery/vein ratio.
* Split reference: [MEYER-2017]_
"""
from bob.db.iostar import Database as IOSTAR
from bob.ip.binseg.data.transforms import *
from bob.ip.binseg.data.binsegdataset import BinSegDataset
_transforms = Compose([ToTensor()])
#### Config ####
transforms = Compose([ToTensor()])
# bob.db.dataset init
bobdb = IOSTAR(protocol="default_od")
# PyTorch dataset
dataset = BinSegDataset(bobdb, split="test", transform=transforms)
from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
from bob.ip.binseg.data.iostar import dataset as iostar
dataset = DelayedSample2TorchDataset(iostar.subsets("optic-disc")["test"],
transform=_transforms)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding=utf-8
"""IOSTAR (training set) for Vessel Segmentation
......@@ -15,18 +15,18 @@ dataset includes annotations for the optic disc and the artery/vein ratio.
* Split reference: [MEYER-2017]_
"""
from bob.db.iostar import Database as IOSTAR
from bob.ip.binseg.data.transforms import *
from bob.ip.binseg.data.binsegdataset import BinSegDataset
#### Config ####
transforms = Compose(
[RandomHFlip(), RandomVFlip(), RandomRotation(), ColorJitter(), ToTensor()]
_transforms = Compose(
[
RandomHFlip(),
RandomVFlip(),
RandomRotation(),
ColorJitter(),
ToTensor(),
]
)
# bob.db.dataset init
bobdb = IOSTAR(protocol="default_vessel")
# PyTorch dataset
dataset = BinSegDataset(bobdb, split="train", transform=transforms)
from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
from bob.ip.binseg.data.iostar import dataset as iostar
dataset = DelayedSample2TorchDataset(iostar.subsets("vessel")["train"],
transform=_transforms)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding=utf-8
from bob.db.iostar import Database as IOSTAR
from bob.ip.binseg.data.transforms import *
from bob.ip.binseg.data.binsegdataset import BinSegDataset
"""IOSTAR (training set) for Vessel Segmentation
The IOSTAR vessel segmentation dataset includes 30 images with a resolution of
1024 × 1024 pixels. All the vessels in this dataset are annotated by a group of
experts working in the field of retinal image analysis. Additionally the
dataset includes annotations for the optic disc and the artery/vein ratio.
#### Config ####
* Reference: [IOSTAR-2016]_
* Original resolution (height x width): 1024 x 1024
* Configuration resolution: 1648 x 1168
* Training samples: 20
* Split reference: [MEYER-2017]_
"""
transforms = Compose(
from bob.ip.binseg.data.transforms import *
_transforms = Compose(
[
RandomRotation(),
Crop(144, 0, 768, 1024),
......@@ -20,8 +29,7 @@ transforms = Compose(
]
)
# bob.db.dataset init
bobdb = IOSTAR(protocol="default_vessel")
# PyTorch dataset
dataset = BinSegDataset(bobdb, split="train", transform=transforms)
from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
from bob.ip.binseg.data.iostar import dataset as iostar
dataset = DelayedSample2TorchDataset(iostar.subsets("vessel")["train"],
transform=_transforms)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding=utf-8
from bob.db.iostar import Database as IOSTAR
from bob.ip.binseg.data.transforms import *
from bob.ip.binseg.data.binsegdataset import BinSegDataset
"""IOSTAR (training set) for Vessel Segmentation
The IOSTAR vessel segmentation dataset includes 30 images with a resolution of
1024 × 1024 pixels. All the vessels in this dataset are annotated by a group of
experts working in the field of retinal image analysis. Additionally the
dataset includes annotations for the optic disc and the artery/vein ratio.
#### Config ####
* Reference: [IOSTAR-2016]_
* Original resolution (height x width): 1024 x 1024
* Configuration resolution: 544 x 544
* Training samples: 20
* Split reference: [MEYER-2017]_
"""
transforms = Compose(
from bob.ip.binseg.data.transforms import *
_transforms = Compose(
[
Resize(544),
RandomHFlip(),
......@@ -18,8 +27,7 @@ transforms = Compose(
]
)
# bob.db.dataset init
bobdb = IOSTAR(protocol="default_vessel")
# PyTorch dataset
dataset = BinSegDataset(bobdb, split="train", transform=transforms)
from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
from bob.ip.binseg.data.iostar import dataset as iostar
dataset = DelayedSample2TorchDataset(iostar.subsets("vessel")["train"],
transform=_transforms)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding=utf-8
from bob.db.iostar import Database as IOSTAR
from bob.ip.binseg.data.transforms import *
from bob.ip.binseg.data.binsegdataset import BinSegDataset
"""IOSTAR (training set) for Vessel Segmentation
The IOSTAR vessel segmentation dataset includes 30 images with a resolution of
1024 × 1024 pixels. All the vessels in this dataset are annotated by a group of
experts working in the field of retinal image analysis. Additionally the
dataset includes annotations for the optic disc and the artery/vein ratio.
#### Config ####
* Reference: [IOSTAR-2016]_
* Original resolution (height x width): 1024 x 1024
* Configuration resolution: 700 x 608
* Training samples: 20
* Split reference: [MEYER-2017]_
"""
transforms = Compose(
from bob.ip.binseg.data.transforms import *
_transforms = Compose(
[
Pad((81, 0, 81, 0)),
Resize(608),
......@@ -19,8 +28,7 @@ transforms = Compose(
]
)
# bob.db.dataset init
bobdb = IOSTAR(protocol="default_vessel")
# PyTorch dataset
dataset = BinSegDataset(bobdb, split="train", transform=transforms)
from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
from bob.ip.binseg.data.iostar import dataset as iostar
dataset = DelayedSample2TorchDataset(iostar.subsets("vessel")["train"],
transform=_transforms)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding=utf-8
from bob.db.iostar import Database as IOSTAR
from bob.ip.binseg.data.transforms import *
from bob.ip.binseg.data.binsegdataset import BinSegDataset
"""IOSTAR (training set) for Vessel Segmentation
The IOSTAR vessel segmentation dataset includes 30 images with a resolution of
1024 × 1024 pixels. All the vessels in this dataset are annotated by a group of
experts working in the field of retinal image analysis. Additionally the
dataset includes annotations for the optic disc and the artery/vein ratio.
#### Config ####
* Reference: [IOSTAR-2016]_
* Original resolution (height x width): 1024 x 1024
* Configuration resolution: 960 x 960
* Training samples: 20
* Split reference: [MEYER-2017]_
"""
transforms = Compose(
from bob.ip.binseg.data.transforms import *
_transforms = Compose(
[
Resize(960),
RandomHFlip(),
......@@ -18,8 +27,7 @@ transforms = Compose(
]
)
# bob.db.dataset init
bobdb = IOSTAR(protocol="default_vessel")
# PyTorch dataset
dataset = BinSegDataset(bobdb, split="train", transform=transforms)
from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
from bob.ip.binseg.data.iostar import dataset as iostar
dataset = DelayedSample2TorchDataset(iostar.subsets("vessel")["train"],
transform=_transforms)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding=utf-8
"""IOSTAR (test set) for Vessel Segmentation
"""IOSTAR (training set) for Vessel Segmentation
The IOSTAR vessel segmentation dataset includes 30 images with a resolution of
1024 × 1024 pixels. All the vessels in this dataset are annotated by a group of
......@@ -11,20 +11,14 @@ dataset includes annotations for the optic disc and the artery/vein ratio.
* Reference: [IOSTAR-2016]_
* Original resolution (height x width): 1024 x 1024
* Configuration resolution: 1024 x 1024
* Training samples: 10
* Test samples: 10
* Split reference: [MEYER-2017]_
"""
from bob.db.iostar import Database as IOSTAR
from bob.ip.binseg.data.transforms import *
from bob.ip.binseg.data.binsegdataset import BinSegDataset
_transforms = Compose([ToTensor()])
#### Config ####
transforms = Compose([ToTensor()])
# bob.db.dataset init
bobdb = IOSTAR(protocol="default_vessel")
# PyTorch dataset
dataset = BinSegDataset(bobdb, split="test", transform=transforms)
from bob.ip.binseg.data.utils import DelayedSample2TorchDataset
from bob.ip.binseg.data.iostar import dataset as iostar
dataset = DelayedSample2TorchDataset(iostar.subsets("vessel")["test"],
transform=_transforms)
......@@ -38,6 +38,7 @@ def _loader(s):
return dict(
data=load_pil_rgb(s["data"]),
label=load_pil_1(s["label"]),
mask=load_pil_1(s["mask"]),
)
dataset = JSONDataset(protocols=_protocols, root_path=_root_path, loader=_loader)
......
......@@ -34,13 +34,16 @@ def test_loading():
def _check_sample(s):
data = s.data
assert isinstance(data, dict)
nose.tools.eq_(len(data), 2)
nose.tools.eq_(len(data), 3)
assert "data" in data
nose.tools.eq_(data["data"].size, (3504, 2336))
nose.tools.eq_(data["data"].mode, "RGB")
assert "label" in data
nose.tools.eq_(data["label"].size, (3504, 2336))
nose.tools.eq_(data["label"].mode, "1")
assert "mask" in data
nose.tools.eq_(data["mask"].size, (3504, 2336))
nose.tools.eq_(data["mask"].mode, "1")
subset = dataset.subsets("default")
for s in subset["train"]: _check_sample(s)
......@@ -56,12 +59,14 @@ def test_check():
def test_torch_dataset():
def _check_sample(s):
nose.tools.eq_(len(s), 3)
nose.tools.eq_(len(s), 4)
assert isinstance(s[0], str)
nose.tools.eq_(s[1].size, (1648, 1168))
nose.tools.eq_(s[1].mode, "RGB")
nose.tools.eq_(s[2].size, (1648, 1168))
nose.tools.eq_(s[2].mode, "1")
nose.tools.eq_(s[3].size, (1648, 1168))
nose.tools.eq_(s[3].mode, "1")
transforms = Compose([Crop(0, 108, 2336, 3296), Resize((1168))])
......
#!/usr/bin/env python
# coding=utf-8
"""IOSTAR (training set) for Vessel and Optic-Disc Segmentation
The IOSTAR vessel segmentation dataset includes 30 images with a resolution of
1024 × 1024 pixels. All the vessels in this dataset are annotated by a group of
experts working in the field of retinal image analysis. Additionally the
dataset includes annotations for the optic disc and the artery/vein ratio.
* Reference: [IOSTAR-2016]_
* Original resolution (height x width): 1024 x 1024
* Split reference: [MEYER-2017]_
* Protocol ``vessel``:
* Training samples: 20 (including labels and masks)
* Test samples: 10 (including labels and masks)
* Protocol ``optic-disc``:
* Training samples: 20 (including labels and masks)
* Test samples: 10 (including labels and masks)
"""
import os
import pkg_resources
import bob.extension
from ..jsondataset import JSONDataset
from ..loader import load_pil_rgb, load_pil_1
_protocols = [
pkg_resources.resource_filename(__name__, "vessel.json"),
pkg_resources.resource_filename(__name__, "optic-disc.json"),
]
_root_path = bob.extension.rc.get('bob.ip.binseg.iostar.datadir',
os.path.realpath(os.curdir))
def _loader(s):
return dict(
data=load_pil_rgb(s["data"]),
label=load_pil_1(s["label"]),
mask=load_pil_1(s["mask"]),
)
dataset = JSONDataset(protocols=_protocols, root_path=_root_path, loader=_loader)
"""IOSTAR dataset object"""
{
"train": [
[
"images/01_dr.JPG",
"manual1/01_dr.tif",
"mask/01_dr_mask.tif"
],
[
"images/02_dr.JPG",
"manual1/02_dr.tif",
"mask/02_dr_mask.tif"
],
[
"images/03_dr.JPG",
"manual1/03_dr.tif",
"mask/03_dr_mask.tif"
],
[
"images/04_dr.JPG",
"manual1/04_dr.tif",
"mask/04_dr_mask.tif"
],
[
"images/05_dr.JPG",
"manual1/05_dr.tif",
"mask/05_dr_mask.tif"
],
[
"images/01_g.jpg",
"manual1/01_g.tif",
"mask/01_g_mask.tif"
],
[
"images/02_g.jpg",
"manual1/02_g.tif",
"mask/02_g_mask.tif"
],
[
"images/03_g.jpg",
"manual1/03_g.tif",
"mask/03_g_mask.tif"
],
[
"images/04_g.jpg",
"manual1/04_g.tif",
"mask/04_g_mask.tif"
],
[
"images/05_g.jpg",
"manual1/05_g.tif",
"mask/05_g_mask.tif"
],
[
"images/01_h.jpg",
"manual1/01_h.tif",
"mask/01_h_mask.tif"
],
[
"images/02_h.jpg",
"manual1/02_h.tif",
"mask/02_h_mask.tif"
],
[
"images/03_h.jpg",
"manual1/03_h.tif",
"mask/03_h_mask.tif"
],
[
"images/04_h.jpg",
"manual1/04_h.tif",
"mask/04_h_mask.tif"
],
[
"images/05_h.jpg",
"manual1/05_h.tif",
"mask/05_h_mask.tif"
]
],
"test": [
[
"images/06_dr.JPG",
"manual1/06_dr.tif",
"mask/06_dr_mask.tif"
],
[
"images/07_dr.JPG",
"manual1/07_dr.tif",
"mask/07_dr_mask.tif"
],
[
"images/08_dr.JPG",
"manual1/08_dr.tif",
"mask/08_dr_mask.tif"
],
[
"images/09_dr.JPG",
"manual1/09_dr.tif",
"mask/09_dr_mask.tif"
],
[
"images/10_dr.JPG",
"manual1/10_dr.tif",
"mask/10_dr_mask.tif"
],
[
"images/11_dr.JPG",
"manual1/11_dr.tif",
"mask/11_dr_mask.tif"
],
[
"images/12_dr.JPG",
"manual1/12_dr.tif",
"mask/12_dr_mask.tif"
],
[
"images/13_dr.JPG",
"manual1/13_dr.tif",
"mask/13_dr_mask.tif"
],
[
"images/14_dr.JPG",
"manual1/14_dr.tif",
"mask/14_dr_mask.tif"
],
[
"images/15_dr.JPG",
"manual1/15_dr.tif",
"mask/15_dr_mask.tif"
],
[
"images/06_g.jpg",
"manual1/06_g.tif",
"mask/06_g_mask.tif"
],
[
"images/07_g.jpg",
"manual1/07_g.tif",
"mask/07_g_mask.tif"
],
[
"images/08_g.jpg",
"manual1/08_g.tif",
"mask/08_g_mask.tif"
],
[
"images/09_g.jpg",
"manual1/09_g.tif",
"mask/09_g_mask.tif"
],
[
"images/10_g.jpg",
"manual1/10_g.tif",
"mask/10_g_mask.tif"
],
[
"images/11_g.jpg",
"manual1/11_g.tif",
"mask/11_g_mask.tif"
],
[
"images/12_g.jpg",
"manual1/12_g.tif",
"mask/12_g_mask.tif"
],
[
"images/13_g.jpg",
"manual1/13_g.tif",
"mask/13_g_mask.tif"
],
[
"images/14_g.jpg",
"manual1/14_g.tif",
"mask/14_g_mask.tif"
],
[
"images/15_g.jpg",
"manual1/15_g.tif",
"mask/15_g_mask.tif"
],
[
"images/06_h.jpg",
"manual1/06_h.tif",
"mask/06_h_mask.tif"
],
[
"images/07_h.jpg",
"manual1/07_h.tif",
"mask/07_h_mask.tif"
],
[
"images/08_h.jpg",
"manual1/08_h.tif",
"mask/08_h_mask.tif"
],
[
"images/09_h.jpg",
"manual1/09_h.tif",
"mask/09_h_mask.tif"
],
[
"images/10_h.jpg",
"manual1/10_h.tif",
"mask/10_h_mask.tif"
],
[
"images/11_h.jpg",
"manual1/11_h.tif",
"mask/11_h_mask.tif"
],
[
"images/12_h.jpg",
"manual1/12_h.tif",
"mask/12_h_mask.tif"
],
[
"images/13_h.jpg",
"manual1/13_h.tif",
"mask/13_h_mask.tif"
],
[
"images/14_h.jpg",
"manual1/14_h.tif",
"mask/14_h_mask.tif"
],
[
"images/15_h.jpg",
"manual1/15_h.tif",
"mask/15_h_mask.tif"
]
]
}
\ No newline at end of file
{
"train": [
[
"image/STAR 01_OSC.jpg",
"mask_OD/STAR 01_OSC_ODMask.tif",
"mask/STAR 01_OSC_Mask.tif"
],
[
"image/STAR 02_ODC.jpg",
"mask_OD/STAR 02_ODC_ODMask.tif",
"mask/STAR 02_ODC_Mask.tif"
],
[
"image/STAR 03_OSN.jpg",
"mask_OD/STAR 03_OSN_ODMask.tif",
"mask/STAR 03_OSN_Mask.tif"
],
[
"image/STAR 05_ODC.jpg",
"mask_OD/STAR 05_ODC_ODMask.tif",
"mask/STAR 05_ODC_Mask.tif"
],
[
"image/STAR 06_ODN.jpg",
"mask_OD/STAR 06_ODN_ODMask.tif",
"mask/STAR 06_ODN_Mask.tif"
],
[
"image/STAR 08_OSN.jpg",
"mask_OD/STAR 08_OSN_ODMask.tif",
"mask/STAR 08_OSN_Mask.tif"
],
[
"image/STAR 09_OSN.jpg",
"mask_OD/STAR 09_OSN_ODMask.tif",
"mask/STAR 09_OSN_Mask.tif"
],
[
"image/STAR 10_OSN.jpg",
"mask_OD/STAR 10_OSN_ODMask.tif",
"mask/STAR 10_OSN_Mask.tif"
],
[
"image/STAR 13_OSN.jpg",
"mask_OD/STAR 13_OSN_ODMask.tif",
"mask/STAR 13_OSN_Mask.tif"
],
[
"image/STAR 15_OSN.jpg",
"mask_OD/STAR 15_OSN_ODMask.tif",
"mask/STAR 15_OSN_Mask.tif"
],
[
"image/STAR 16_OSN.jpg",
"mask_OD/STAR 16_OSN_ODMask.tif",
"mask/STAR 16_OSN_Mask.tif"
],
[
"image/STAR 17_ODN.jpg",
"mask_OD/STAR 17_ODN_ODMask.tif",
"mask/STAR 17_ODN_Mask.tif"
],
[
"image/STAR 20_ODC.jpg",
"mask_OD/STAR 20_ODC_ODMask.tif",
"mask/STAR 20_ODC_Mask.tif"
],
[
"image/STAR 21_OSC.jpg",
"mask_OD/STAR 21_OSC_ODMask.tif",
"mask/STAR 21_OSC_Mask.tif"
],
[
"image/STAR 24_OSC.jpg",
"mask_OD/STAR 24_OSC_ODMask.tif",
"mask/STAR 24_OSC_Mask.tif"
],
[
"image/STAR 26_ODC.jpg",
"mask_OD/STAR 26_ODC_ODMask.tif",
"mask/STAR 26_ODC_Mask.tif"
],
[
"image/STAR 28_ODN.jpg",
"mask_OD/STAR 28_ODN_ODMask.tif",
"mask/STAR 28_ODN_Mask.tif"
],
[
"image/STAR 30_ODC.jpg",
"mask_OD/STAR 30_ODC_ODMask.tif",
"mask/STAR 30_ODC_Mask.tif"
],
[
"image/STAR 31_ODN.jpg",
"mask_OD/STAR 31_ODN_ODMask.tif",
"mask/STAR 31_ODN_Mask.tif"
],
[
"image/STAR 32_ODC.jpg",
"mask_OD/STAR 32_ODC_ODMask.tif",
"mask/STAR 32_ODC_Mask.tif"
]
],
"test": [
[
"image/STAR 34_ODC.jpg",
"mask_OD/STAR 34_ODC_ODMask.tif",
"mask/STAR 34_ODC_Mask.tif"
],
[
"image/STAR 36_OSC.jpg",
"mask_OD/STAR 36_OSC_ODMask.tif",
"mask/STAR 36_OSC_Mask.tif"
],
[
"image/STAR 37_ODN.jpg",
"mask_OD/STAR 37_ODN_ODMask.tif",
"mask/STAR 37_ODN_Mask.tif"
],
[
"image/STAR 38_ODC.jpg",
"mask_OD/STAR 38_ODC_ODMask.tif",
"mask/STAR 38_ODC_Mask.tif"
],
[
"image/STAR 39_ODC.jpg",
"mask_OD/STAR 39_ODC_ODMask.tif",
"mask/STAR 39_ODC_Mask.tif"
],
[
"image/STAR 40_OSC.jpg",
"mask_OD/STAR 40_OSC_ODMask.tif",
"mask/STAR 40_OSC_Mask.tif"
],
[
"image/STAR 43_OSC.jpg",
"mask_OD/STAR 43_OSC_ODMask.tif",
"mask/STAR 43_OSC_Mask.tif"
],
[
"image/STAR 44_OSN.jpg",
"mask_OD/STAR 44_OSN_ODMask.tif",
"mask/STAR 44_OSN_Mask.tif"
],
[
"image/STAR 45_ODC.jpg",
"mask_OD/STAR 45_ODC_ODMask.tif",
"mask/STAR 45_ODC_Mask.tif"
],
[
"image/STAR 48_OSN.jpg",
"mask_OD/STAR 48_OSN_ODMask.tif",
"mask/STAR 48_OSN_Mask.tif"
]
]
}
\ No newline at end of file
#!/usr/bin/env python
# coding=utf-8
"""Tests for IOSTAR"""
import os
import nose.tools
from ..utils import rc_variable_set, DelayedSample2TorchDataset
from ..transforms import Compose, Crop, Resize
from . import dataset
def test_protocol_consitency():
subset = dataset.subsets("vessel")
nose.tools.eq_(len(subset), 2)
assert "train" in subset
nose.tools.eq_(len(subset["train"]), 20)
for s in subset["train"]:
assert s.key.startswith(os.path.join("image", "STAR "))
assert "test" in subset
nose.tools.eq_(len(subset["test"]), 10)
for s in subset["test"]:
assert s.key.startswith(os.path.join("image", "STAR "))
subset = dataset.subsets("optic-disc")
nose.tools.eq_(len(subset), 2)
assert "train" in subset
nose.tools.eq_(len(subset["train"]), 20)
for s in subset["train"]:
assert s.key.startswith(os.path.join("image", "STAR "))
assert "test" in subset
nose.tools.eq_(len(subset["test"]), 10)
for s in subset["test"]:
assert s.key.startswith(os.path.join("image", "STAR "))
@rc_variable_set('bob.ip.binseg.iostar.datadir')
def test_loading():
def _check_sample(s):
data = s.data
assert isinstance(data, dict)
nose.tools.eq_(len(data), 3)
assert "data" in data
nose.tools.eq_(data["data"].size, (1024, 1024))
nose.tools.eq_(data["data"].mode, "RGB")
assert "label" in data
nose.tools.eq_(data["label"].size, (1024, 1024))
nose.tools.eq_(data["label"].mode, "1")
assert "label" in data
nose.tools.eq_(data["label"].size, (1024, 1024))
nose.tools.eq_(data["label"].mode, "1")
subset = dataset.subsets("vessel")
for s in subset["train"]: _check_sample(s)
for s in subset["test"]: _check_sample(s)
subset = dataset.subsets("optic-disc")
for s in subset["train"]: _check_sample(s)
for s in subset["test"]: _check_sample(s)
@rc_variable_set('bob.ip.binseg.iostar.datadir')
def test_check():
nose.tools.eq_(dataset.check(), 0)
@rc_variable_set('bob.ip.binseg.stare.datadir')
def test_torch_dataset():
def _check_sample(s):
nose.tools.eq_(len(s), 4)
assert isinstance(s[0], str)
nose.tools.eq_(s[1].size, (1024, 1024))
nose.tools.eq_(s[1].mode, "RGB")
nose.tools.eq_(s[2].size, (1024, 1024))
nose.tools.eq_(s[2].mode, "1")
nose.tools.eq_(s[3].size, (1024, 1024))
nose.tools.eq_(s[3].mode, "1")
transforms = None
subset = dataset.subsets("vessel")
torch_dataset = DelayedSample2TorchDataset(subset["train"], transforms)
nose.tools.eq_(len(torch_dataset), 20)
for s in torch_dataset: _check_sample(s)
torch_dataset = DelayedSample2TorchDataset(subset["test"], transforms)
nose.tools.eq_(len(torch_dataset), 10)
for s in torch_dataset: _check_sample(s)
subset = dataset.subsets("optic-disc")
torch_dataset = DelayedSample2TorchDataset(subset["train"], transforms)
nose.tools.eq_(len(torch_dataset), 20)
for s in torch_dataset: _check_sample(s)
torch_dataset = DelayedSample2TorchDataset(subset["test"], transforms)
nose.tools.eq_(len(torch_dataset), 10)
for s in torch_dataset: _check_sample(s)
{
"train": [
[
"image/STAR 01_OSC.jpg",
"GT/STAR 01_OSC_GT.tif",
"mask/STAR 01_OSC_Mask.tif"
],
[
"image/STAR 02_ODC.jpg",
"GT/STAR 02_ODC_GT.tif",
"mask/STAR 02_ODC_Mask.tif"
],
[
"image/STAR 03_OSN.jpg",
"GT/STAR 03_OSN_GT.tif",
"mask/STAR 03_OSN_Mask.tif"
],
[
"image/STAR 05_ODC.jpg",
"GT/STAR 05_ODC_GT.tif",
"mask/STAR 05_ODC_Mask.tif"
],
[
"image/STAR 06_ODN.jpg",
"GT/STAR 06_ODN_GT.tif",
"mask/STAR 06_ODN_Mask.tif"
],
[
"image/STAR 08_OSN.jpg",
"GT/STAR 08_OSN_GT.tif",
"mask/STAR 08_OSN_Mask.tif"
],
[
"image/STAR 09_OSN.jpg",
"GT/STAR 09_OSN_GT.tif",
"mask/STAR 09_OSN_Mask.tif"
],
[
"image/STAR 10_OSN.jpg",
"GT/STAR 10_OSN_GT.tif",
"mask/STAR 10_OSN_Mask.tif"
],
[
"image/STAR 13_OSN.jpg",
"GT/STAR 13_OSN_GT.tif",
"mask/STAR 13_OSN_Mask.tif"
],
[
"image/STAR 15_OSN.jpg",
"GT/STAR 15_OSN_GT.tif",
"mask/STAR 15_OSN_Mask.tif"
],
[
"image/STAR 16_OSN.jpg",
"GT/STAR 16_OSN_GT.tif",
"mask/STAR 16_OSN_Mask.tif"
],
[
"image/STAR 17_ODN.jpg",
"GT/STAR 17_ODN_GT.tif",
"mask/STAR 17_ODN_Mask.tif"
],
[
"image/STAR 20_ODC.jpg",
"GT/STAR 20_ODC_GT.tif",
"mask/STAR 20_ODC_Mask.tif"
],
[
"image/STAR 21_OSC.jpg",
"GT/STAR 21_OSC_GT.tif",
"mask/STAR 21_OSC_Mask.tif"
],
[
"image/STAR 24_OSC.jpg",
"GT/STAR 24_OSC_GT.tif",
"mask/STAR 24_OSC_Mask.tif"
],
[
"image/STAR 26_ODC.jpg",
"GT/STAR 26_ODC_GT.tif",
"mask/STAR 26_ODC_Mask.tif"
],
[
"image/STAR 28_ODN.jpg",
"GT/STAR 28_ODN_GT.tif",
"mask/STAR 28_ODN_Mask.tif"
],
[
"image/STAR 30_ODC.jpg",
"GT/STAR 30_ODC_GT.tif",
"mask/STAR 30_ODC_Mask.tif"
],
[
"image/STAR 31_ODN.jpg",
"GT/STAR 31_ODN_GT.tif",
"mask/STAR 31_ODN_Mask.tif"
],
[
"image/STAR 32_ODC.jpg",
"GT/STAR 32_ODC_GT.tif",
"mask/STAR 32_ODC_Mask.tif"
]
],
"test": [
[
"image/STAR 34_ODC.jpg",
"GT/STAR 34_ODC_GT.tif",
"mask/STAR 34_ODC_Mask.tif"
],
[
"image/STAR 36_OSC.jpg",
"GT/STAR 36_OSC_GT.tif",
"mask/STAR 36_OSC_Mask.tif"
],
[
"image/STAR 37_ODN.jpg",
"GT/STAR 37_ODN_GT.tif",
"mask/STAR 37_ODN_Mask.tif"
],
[
"image/STAR 38_ODC.jpg",
"GT/STAR 38_ODC_GT.tif",
"mask/STAR 38_ODC_Mask.tif"
],
[
"image/STAR 39_ODC.jpg",
"GT/STAR 39_ODC_GT.tif",
"mask/STAR 39_ODC_Mask.tif"
],
[
"image/STAR 40_OSC.jpg",
"GT/STAR 40_OSC_GT.tif",
"mask/STAR 40_OSC_Mask.tif"
],
[
"image/STAR 43_OSC.jpg",
"GT/STAR 43_OSC_GT.tif",
"mask/STAR 43_OSC_Mask.tif"
],
[
"image/STAR 44_OSN.jpg",
"GT/STAR 44_OSN_GT.tif",
"mask/STAR 44_OSN_Mask.tif"
],
[
"image/STAR 45_ODC.jpg",
"GT/STAR 45_ODC_GT.tif",
"mask/STAR 45_ODC_Mask.tif"
],
[
"image/STAR 48_OSN.jpg",
"GT/STAR 48_OSN_GT.tif",
"mask/STAR 48_OSN_Mask.tif"
]
]
}
\ No newline at end of file
......@@ -17,7 +17,6 @@ import bob.io.base
from ..utils.metric import base_metrics
from ..utils.plot import precision_recall_f1iso_confintval
from ..utils.summary import summary
import logging
logger = logging.getLogger(__name__)
......
......@@ -219,5 +219,7 @@ def run(model, data_loader, device, output_folder, overlayed_folder,
# Save model summary
summary_path = os.path.join(output_folder, "model-info.txt")
logger.info(f"Saving model summary at {summary_path}...")
with open(summary_path, "w") as f: summary(model, f)
with open(summary_path, "wt") as f:
r, n = summary(model)
logger.info(f"Model has {n} parameters...")
f.write(r)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
from bob.ip.binseg.modeling.driu import build_driu
from bob.ip.binseg.modeling.driuod import build_driuod
......@@ -17,27 +18,32 @@ class Tester(unittest.TestCase):
def test_summary_driu(self):
model = build_driu()
param = summary(model)
s, param = summary(model)
self.assertIsInstance(s, str)
self.assertIsInstance(param, int)
def test__summary_driuod(self):
model = build_driuod()
param = summary(model)
s, param = summary(model)
self.assertIsInstance(s, str)
self.assertIsInstance(param, int)
def test_summary_hed(self):
model = build_hed()
param = summary(model)
s, param = summary(model)
self.assertIsInstance(s, str)
self.assertIsInstance(param, int)
def test_summary_unet(self):
model = build_unet()
param = summary(model)
s, param = summary(model)
self.assertIsInstance(s, str)
self.assertIsInstance(param, int)
def test_summary_resunet(self):
model = build_res50unet()
param = summary(model)
s, param = summary(model)
self.assertIsInstance(s, str)
self.assertIsInstance(param, int)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment