Skip to content
Snippets Groups Projects
Commit 8f9bd614 authored by André Anjos's avatar André Anjos :speech_balloon:
Browse files

[pre-commit] make isort compatible with black; use 80 lines

parent 5613eda6
No related branches found
No related tags found
1 merge request!30Adds pre-commit support
Pipeline #52922 failed
Showing
with 119 additions and 39 deletions
[settings]
line_length=88
line_length=80
order_by_type=true
lines_between_types=1
......@@ -6,8 +6,9 @@ repos:
hooks:
- id: isort
args: [
--profile, "black",
--force-single-line-imports,
--line-length, "88",
--line-length, "80",
--order-by-type,
--lines-between-types, "1"
]
......@@ -15,6 +16,9 @@ repos:
rev: 21.7b0
hooks:
- id: black
args: [
--line-length, "80",
]
- repo: https://gitlab.com/pycqa/flake8
rev: 3.9.2
hooks:
......
......@@ -173,7 +173,11 @@ def make_dataset(subsets, transforms):
# also use it for validation during training
retval["__valid__"] = retval[key]
if ("__train__" in retval) and ("train" in retval) and ("__valid__" not in retval):
if (
("__train__" in retval)
and ("train" in retval)
and ("__valid__" not in retval)
):
# if the dataset does not have a validation set, we use the unaugmented
# training set as validation set
retval["__valid__"] = retval["train"]
......
......@@ -19,8 +19,12 @@ For details on those datasets, consult:
from torch.utils.data import ConcatDataset
from bob.ip.binseg.configs.datasets import augment_subset as _augment
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import dataset as _baseline
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import second_annotator
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import (
dataset as _baseline,
)
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import (
second_annotator,
)
from bob.ip.binseg.configs.datasets.chasedb1.mtest import dataset as _mtest
dataset = dict(**_baseline)
......
......@@ -15,15 +15,21 @@ from bob.ip.binseg.data.transforms import Resize
dataset = {
"train": _xt["train"],
"test": _xt["test"],
"drive (train)": _xt["drive (train)"].copy([CenterCrop((544, 544)), Resize(960)]),
"drive (test)": _xt["drive (test)"].copy([CenterCrop((544, 544)), Resize(960)]),
"drive (train)": _xt["drive (train)"].copy(
[CenterCrop((544, 544)), Resize(960)]
),
"drive (test)": _xt["drive (test)"].copy(
[CenterCrop((544, 544)), Resize(960)]
),
"stare (train)": _xt["stare (train)"].copy(
[Pad((0, 32, 0, 32)), Resize(960), CenterCrop(960)]
),
"stare (test)": _xt["stare (test)"].copy(
[Pad((0, 32, 0, 32)), Resize(960), CenterCrop(960)]
),
"hrf (train)": _xt["hrf (train)"].copy([Pad((0, 584, 0, 584)), Resize(960)]),
"hrf (train)": _xt["hrf (train)"].copy(
[Pad((0, 584, 0, 584)), Resize(960)]
),
"hrf (test)": _xt["hrf (test)"].copy([Pad((0, 584, 0, 584)), Resize(960)]),
"iostar (train)": _xt["iostar (train)"].copy([Resize(960)]),
"iostar (test)": _xt["iostar (test)"].copy([Resize(960)]),
......
......@@ -20,7 +20,9 @@ For details on datasets, consult:
"""
from bob.ip.binseg.configs.datasets.chasedb1.covd import dataset as _covd
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import dataset as _baseline
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import (
dataset as _baseline,
)
from bob.ip.binseg.data.utils import SSLDataset
# copy dictionary and replace only the augmented train dataset
......
......@@ -4,8 +4,12 @@
"""CHASE-DB1 cross-evaluation dataset
"""
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import dataset as _chase
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import second_annotator
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import (
dataset as _chase,
)
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import (
second_annotator,
)
from bob.ip.binseg.configs.datasets.drive.default import dataset as _drive
from bob.ip.binseg.configs.datasets.hrf.default import dataset as _hrf
from bob.ip.binseg.configs.datasets.iostar.vessel import dataset as _iostar
......
......@@ -15,15 +15,21 @@ from bob.ip.binseg.data.transforms import Resize
dataset = {
"train": _xt["train"],
"test": _xt["test"],
"stare (train)": _xt["stare (train)"].copy([Resize(471), Pad((0, 37, 0, 36))]),
"stare (test)": _xt["stare (test)"].copy([Resize(471), Pad((0, 37, 0, 36))]),
"stare (train)": _xt["stare (train)"].copy(
[Resize(471), Pad((0, 37, 0, 36))]
),
"stare (test)": _xt["stare (test)"].copy(
[Resize(471), Pad((0, 37, 0, 36))]
),
"chasedb1 (train)": _xt["chasedb1 (train)"].copy(
[Resize(544), Crop(0, 12, 544, 544)]
),
"chasedb1 (test)": _xt["chasedb1 (test)"].copy(
[Resize(544), Crop(0, 12, 544, 544)]
),
"hrf (train)": _xt["hrf (train)"].copy([Resize((363)), Pad((0, 90, 0, 91))]),
"hrf (train)": _xt["hrf (train)"].copy(
[Resize((363)), Pad((0, 90, 0, 91))]
),
"hrf (test)": _xt["hrf (test)"].copy([Resize((363)), Pad((0, 90, 0, 91))]),
"iostar (train)": _xt["iostar (train)"].copy([Resize(544)]),
"iostar (test)": _xt["iostar (test)"].copy([Resize(544)]),
......
......@@ -4,7 +4,9 @@
"""DRIVE cross-evaluation dataset
"""
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import dataset as _chase
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import (
dataset as _chase,
)
from bob.ip.binseg.configs.datasets.drive.default import dataset as _drive
from bob.ip.binseg.configs.datasets.drive.default import second_annotator
from bob.ip.binseg.configs.datasets.hrf.default import dataset as _hrf
......
......@@ -4,7 +4,9 @@
"""HRF cross-evaluation dataset
"""
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import dataset as _chase
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import (
dataset as _chase,
)
from bob.ip.binseg.configs.datasets.drive.default import dataset as _drive
from bob.ip.binseg.configs.datasets.hrf.default import dataset as _hrf
from bob.ip.binseg.configs.datasets.iostar.vessel import dataset as _iostar
......
......@@ -15,8 +15,12 @@ from bob.ip.binseg.data.transforms import Resize
dataset = {
"train": _xt["train"],
"test": _xt["test"],
"drive (train)": _xt["drive (train)"].copy([CenterCrop((540, 540)), Resize(1024)]),
"drive (test)": _xt["drive (test)"].copy([CenterCrop((540, 540)), Resize(1024)]),
"drive (train)": _xt["drive (train)"].copy(
[CenterCrop((540, 540)), Resize(1024)]
),
"drive (test)": _xt["drive (test)"].copy(
[CenterCrop((540, 540)), Resize(1024)]
),
"stare (train)": _xt["stare (train)"].copy(
[Pad((0, 32, 0, 32)), Resize(1024), CenterCrop(1024)]
),
......@@ -29,6 +33,8 @@ dataset = {
"chasedb1 (test)": _xt["chasedb1 (test)"].copy(
[Crop(0, 18, 960, 960), Resize(1024)]
),
"hrf (train)": _xt["hrf (train)"].copy([Pad((0, 584, 0, 584)), Resize(1024)]),
"hrf (train)": _xt["hrf (train)"].copy(
[Pad((0, 584, 0, 584)), Resize(1024)]
),
"hrf (test)": _xt["hrf (test)"].copy([Pad((0, 584, 0, 584)), Resize(1024)]),
}
......@@ -4,7 +4,9 @@
"""IOSTAR vessel cross-evaluation dataset
"""
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import dataset as _chase
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import (
dataset as _chase,
)
from bob.ip.binseg.configs.datasets.drive.default import dataset as _drive
from bob.ip.binseg.configs.datasets.hrf.default import dataset as _hrf
from bob.ip.binseg.configs.datasets.iostar.vessel import dataset as _iostar
......
......@@ -27,8 +27,14 @@ dataset = {
"chasedb1 (test)": _xt["chasedb1 (test)"].copy(
[CenterCrop((829, 960)), Resize(608)]
),
"hrf (train)": _xt["hrf (train)"].copy([Pad((0, 345, 0, 345)), Resize(608)]),
"hrf (train)": _xt["hrf (train)"].copy(
[Pad((0, 345, 0, 345)), Resize(608)]
),
"hrf (test)": _xt["hrf (test)"].copy([Pad((0, 345, 0, 345)), Resize(608)]),
"iostar (train)": _xt["iostar (train)"].copy([Pad((81, 0, 81, 0)), Resize(608)]),
"iostar (test)": _xt["iostar (test)"].copy([Pad((81, 0, 81, 0)), Resize(608)]),
"iostar (train)": _xt["iostar (train)"].copy(
[Pad((81, 0, 81, 0)), Resize(608)]
),
"iostar (test)": _xt["iostar (test)"].copy(
[Pad((81, 0, 81, 0)), Resize(608)]
),
}
......@@ -4,7 +4,9 @@
"""STARE cross-evaluation dataset
"""
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import dataset as _chase
from bob.ip.binseg.configs.datasets.chasedb1.first_annotator import (
dataset as _chase,
)
from bob.ip.binseg.configs.datasets.drive.default import dataset as _drive
from bob.ip.binseg.configs.datasets.hrf.default import dataset as _hrf
from bob.ip.binseg.configs.datasets.iostar.vessel import dataset as _iostar
......
......@@ -47,8 +47,12 @@ def _raw_data_loader(sample):
data=load_pil_rgb(os.path.join(_root_path, sample["data"])),
label=Image.fromarray(
np.ma.mask_or(
np.asarray(load_pil_1(os.path.join(_root_path, sample["label_l"]))),
np.asarray(load_pil_1(os.path.join(_root_path, sample["label_r"]))),
np.asarray(
load_pil_1(os.path.join(_root_path, sample["label_l"]))
),
np.asarray(
load_pil_1(os.path.join(_root_path, sample["label_r"]))
),
)
),
)
......@@ -61,7 +65,9 @@ def _loader(context, sample):
dataset = JSONDataset(
protocols=_protocols, fieldnames=("data", "label_l", "label_r"), loader=_loader
protocols=_protocols,
fieldnames=("data", "label_l", "label_r"),
loader=_loader,
)
"""Japanese Society of Radiological Technology dataset object"""
......@@ -47,8 +47,12 @@ def _raw_data_loader(sample):
data=load_pil_rgb(os.path.join(_root_path, sample["data"])),
label=Image.fromarray(
np.ma.mask_or(
np.asarray(load_pil_1(os.path.join(_root_path, sample["label_l"]))),
np.asarray(load_pil_1(os.path.join(_root_path, sample["label_r"]))),
np.asarray(
load_pil_1(os.path.join(_root_path, sample["label_l"]))
),
np.asarray(
load_pil_1(os.path.join(_root_path, sample["label_r"]))
),
)
),
)
......@@ -61,7 +65,9 @@ def _loader(context, sample):
dataset = JSONDataset(
protocols=_protocols, fieldnames=("data", "label_l", "label_r"), loader=_loader
protocols=_protocols,
fieldnames=("data", "label_l", "label_r"),
loader=_loader,
)
"""Montgomery County dataset object"""
......@@ -314,6 +314,8 @@ class CSVDataset:
fileobj.seek(0)
return [
self._loader(dict(subset=subset, order=n), dict(zip(self.fieldnames, k)))
self._loader(
dict(subset=subset, order=n), dict(zip(self.fieldnames, k))
)
for n, k in enumerate(samples)
]
......@@ -33,10 +33,18 @@ from ..loader import load_pil_rgb
from ..loader import make_delayed
_protocols = {
"optic-disc-all": pkg_resources.resource_filename(__name__, "optic-disc.json"),
"optic-cup-all": pkg_resources.resource_filename(__name__, "optic-cup.json"),
"optic-disc-any": pkg_resources.resource_filename(__name__, "optic-disc.json"),
"optic-cup-any": pkg_resources.resource_filename(__name__, "optic-cup.json"),
"optic-disc-all": pkg_resources.resource_filename(
__name__, "optic-disc.json"
),
"optic-cup-all": pkg_resources.resource_filename(
__name__, "optic-cup.json"
),
"optic-disc-any": pkg_resources.resource_filename(
__name__, "optic-disc.json"
),
"optic-cup-any": pkg_resources.resource_filename(
__name__, "optic-cup.json"
),
}
_root_path = bob.extension.rc.get(
......@@ -47,7 +55,9 @@ _root_path = bob.extension.rc.get(
def _raw_data_loader_all(sample):
retval = dict(
data=load_pil_rgb(os.path.join(_root_path, sample["data"])),
label=load_pil_rgb(os.path.join(_root_path, sample["label"])).convert("L"),
label=load_pil_rgb(os.path.join(_root_path, sample["label"])).convert(
"L"
),
)
retval["label"] = retval["label"].point(lambda p: p > 254, mode="1")
return retval
......@@ -56,7 +66,9 @@ def _raw_data_loader_all(sample):
def _raw_data_loader_any(sample):
retval = dict(
data=load_pil_rgb(os.path.join(_root_path, sample["data"])),
label=load_pil_rgb(os.path.join(_root_path, sample["label"])).convert("L"),
label=load_pil_rgb(os.path.join(_root_path, sample["label"])).convert(
"L"
),
)
retval["label"] = retval["label"].point(lambda p: p > 0, mode="1")
return retval
......
......@@ -60,7 +60,9 @@ def _disc_loader(sample):
data = load_pil_rgb(os.path.join(_root_path, sample["data"]))
label = load_pil_1(os.path.join(_root_path, sample["label"]))
mask = load_pil_1(os.path.join(_root_path, sample["mask"]))
label = subtract_mode1_images(invert_mode1_image(label), invert_mode1_image(mask))
label = subtract_mode1_images(
invert_mode1_image(label), invert_mode1_image(mask)
)
return dict(data=data, label=label, mask=mask)
......
......@@ -13,7 +13,9 @@ from collections.abc import MutableSequence
def _copy_attributes(s, d):
"""Copies attributes from a dictionary to self"""
s.__dict__.update(
dict([k, v] for k, v in d.items() if k not in ("data", "load", "samples"))
dict(
[k, v] for k, v in d.items() if k not in ("data", "load", "samples")
)
)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment