From 816e8b809c670de618bcf41f1f998b3d0caefe49 Mon Sep 17 00:00:00 2001 From: Andre Anjos <andre.dos.anjos@gmail.com> Date: Mon, 31 Jul 2023 12:00:24 +0200 Subject: [PATCH] [data.montgomery_shenzhen_indian_tbx11k] Implement it --- pyproject.toml | 46 ++++---- src/ptbench/data/mc_ch_in_11k/__init__.py | 3 - src/ptbench/data/mc_ch_in_11k/default.py | 101 ------------------ src/ptbench/data/mc_ch_in_11k/fold_0.py | 101 ------------------ src/ptbench/data/mc_ch_in_11k/fold_1.py | 101 ------------------ src/ptbench/data/mc_ch_in_11k/fold_2.py | 101 ------------------ src/ptbench/data/mc_ch_in_11k/fold_3.py | 101 ------------------ src/ptbench/data/mc_ch_in_11k/fold_4.py | 101 ------------------ src/ptbench/data/mc_ch_in_11k/fold_5.py | 101 ------------------ src/ptbench/data/mc_ch_in_11k/fold_6.py | 101 ------------------ src/ptbench/data/mc_ch_in_11k/fold_7.py | 101 ------------------ src/ptbench/data/mc_ch_in_11k/fold_8.py | 101 ------------------ src/ptbench/data/mc_ch_in_11k/fold_9.py | 101 ------------------ src/ptbench/data/mc_ch_in_11kv2/__init__.py | 3 - src/ptbench/data/mc_ch_in_11kv2/default.py | 101 ------------------ src/ptbench/data/mc_ch_in_11kv2/fold_0.py | 101 ------------------ src/ptbench/data/mc_ch_in_11kv2/fold_1.py | 101 ------------------ src/ptbench/data/mc_ch_in_11kv2/fold_2.py | 101 ------------------ src/ptbench/data/mc_ch_in_11kv2/fold_3.py | 101 ------------------ src/ptbench/data/mc_ch_in_11kv2/fold_4.py | 101 ------------------ src/ptbench/data/mc_ch_in_11kv2/fold_5.py | 101 ------------------ src/ptbench/data/mc_ch_in_11kv2/fold_6.py | 101 ------------------ src/ptbench/data/mc_ch_in_11kv2/fold_7.py | 101 ------------------ src/ptbench/data/mc_ch_in_11kv2/fold_8.py | 101 ------------------ src/ptbench/data/mc_ch_in_11kv2/fold_9.py | 101 ------------------ .../__init__.py | 0 .../datamodule.py | 50 +++++++++ .../v1_fold_0.py | 7 ++ .../v1_fold_1.py | 7 ++ .../v1_fold_2.py | 7 ++ .../v1_fold_3.py | 7 ++ .../v1_fold_4.py | 7 ++ .../v1_fold_5.py | 7 ++ .../v1_fold_6.py | 7 ++ .../v1_fold_7.py | 7 ++ .../v1_fold_8.py | 7 ++ .../v1_fold_9.py | 7 ++ .../v1_healthy_vs_atb.py | 7 ++ .../v2_fold_0.py | 7 ++ .../v2_fold_1.py | 7 ++ .../v2_fold_2.py | 7 ++ .../v2_fold_3.py | 7 ++ .../v2_fold_4.py | 7 ++ .../v2_fold_5.py | 7 ++ .../v2_fold_6.py | 7 ++ .../v2_fold_7.py | 7 ++ .../v2_fold_8.py | 7 ++ .../v2_fold_9.py | 7 ++ .../v2_others_vs_atb.py | 7 ++ 49 files changed, 226 insertions(+), 2252 deletions(-) delete mode 100644 src/ptbench/data/mc_ch_in_11k/__init__.py delete mode 100644 src/ptbench/data/mc_ch_in_11k/default.py delete mode 100644 src/ptbench/data/mc_ch_in_11k/fold_0.py delete mode 100644 src/ptbench/data/mc_ch_in_11k/fold_1.py delete mode 100644 src/ptbench/data/mc_ch_in_11k/fold_2.py delete mode 100644 src/ptbench/data/mc_ch_in_11k/fold_3.py delete mode 100644 src/ptbench/data/mc_ch_in_11k/fold_4.py delete mode 100644 src/ptbench/data/mc_ch_in_11k/fold_5.py delete mode 100644 src/ptbench/data/mc_ch_in_11k/fold_6.py delete mode 100644 src/ptbench/data/mc_ch_in_11k/fold_7.py delete mode 100644 src/ptbench/data/mc_ch_in_11k/fold_8.py delete mode 100644 src/ptbench/data/mc_ch_in_11k/fold_9.py delete mode 100644 src/ptbench/data/mc_ch_in_11kv2/__init__.py delete mode 100644 src/ptbench/data/mc_ch_in_11kv2/default.py delete mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_0.py delete mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_1.py delete mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_2.py delete mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_3.py delete mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_4.py delete mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_5.py delete mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_6.py delete mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_7.py delete mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_8.py delete mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_9.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/__init__.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/datamodule.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_0.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_1.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_2.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_3.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_4.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_5.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_6.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_7.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_8.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_9.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_healthy_vs_atb.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_0.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_1.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_2.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_3.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_4.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_5.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_6.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_7.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_8.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_9.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_others_vs_atb.py diff --git a/pyproject.toml b/pyproject.toml index 6549e5c2..d76e3979 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -170,30 +170,28 @@ montgomery-shenzhen-indian-f8 = "ptbench.data.montgomery_shenzhen_indian.fold_8" montgomery-shenzhen-indian-f9 = "ptbench.data.montgomery_shenzhen_indian.fold_9" # montgomery-shenzhen-indian-tbx11k aggregated dataset -mc_ch_in_11k = "ptbench.data.mc_ch_in_11k.default" -mc_ch_in_11k_f0 = "ptbench.data.mc_ch_in_11k.fold_0" -mc_ch_in_11k_f1 = "ptbench.data.mc_ch_in_11k.fold_1" -mc_ch_in_11k_f2 = "ptbench.data.mc_ch_in_11k.fold_2" -mc_ch_in_11k_f3 = "ptbench.data.mc_ch_in_11k.fold_3" -mc_ch_in_11k_f4 = "ptbench.data.mc_ch_in_11k.fold_4" -mc_ch_in_11k_f5 = "ptbench.data.mc_ch_in_11k.fold_5" -mc_ch_in_11k_f6 = "ptbench.data.mc_ch_in_11k.fold_6" -mc_ch_in_11k_f7 = "ptbench.data.mc_ch_in_11k.fold_7" -mc_ch_in_11k_f8 = "ptbench.data.mc_ch_in_11k.fold_8" -mc_ch_in_11k_f9 = "ptbench.data.mc_ch_in_11k.fold_9" - -# montgomery-shenzhen-indian-tbx11kv2 aggregated dataset -mc_ch_in_11kv2 = "ptbench.data.mc_ch_in_11kv2.default" -mc_ch_in_11kv2_f0 = "ptbench.data.mc_ch_in_11kv2.fold_0" -mc_ch_in_11kv2_f1 = "ptbench.data.mc_ch_in_11kv2.fold_1" -mc_ch_in_11kv2_f2 = "ptbench.data.mc_ch_in_11kv2.fold_2" -mc_ch_in_11kv2_f3 = "ptbench.data.mc_ch_in_11kv2.fold_3" -mc_ch_in_11kv2_f4 = "ptbench.data.mc_ch_in_11kv2.fold_4" -mc_ch_in_11kv2_f5 = "ptbench.data.mc_ch_in_11kv2.fold_5" -mc_ch_in_11kv2_f6 = "ptbench.data.mc_ch_in_11kv2.fold_6" -mc_ch_in_11kv2_f7 = "ptbench.data.mc_ch_in_11kv2.fold_7" -mc_ch_in_11kv2_f8 = "ptbench.data.mc_ch_in_11kv2.fold_8" -mc_ch_in_11kv2_f9 = "ptbench.data.mc_ch_in_11kv2.fold_9" +montgomery-shenzhen-indian-tbx11k-v1 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_healthy_vs_atb" +montgomery-shenzhen-indian-tbx11k-v1-f0 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_0" +montgomery-shenzhen-indian-tbx11k-v1-f1 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_1" +montgomery-shenzhen-indian-tbx11k-v1-f2 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_2" +montgomery-shenzhen-indian-tbx11k-v1-f3 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_3" +montgomery-shenzhen-indian-tbx11k-v1-f4 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_4" +montgomery-shenzhen-indian-tbx11k-v1-f5 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_5" +montgomery-shenzhen-indian-tbx11k-v1-f6 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_6" +montgomery-shenzhen-indian-tbx11k-v1-f7 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_7" +montgomery-shenzhen-indian-tbx11k-v1-f8 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_8" +montgomery-shenzhen-indian-tbx11k-v1-f9 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_9" +montgomery-shenzhen-indian-tbx11k-v2 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_others_vs_atb" +montgomery-shenzhen-indian-tbx11k-v2-f0 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_0" +montgomery-shenzhen-indian-tbx11k-v2-f1 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_1" +montgomery-shenzhen-indian-tbx11k-v2-f2 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_2" +montgomery-shenzhen-indian-tbx11k-v2-f3 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_3" +montgomery-shenzhen-indian-tbx11k-v2-f4 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_4" +montgomery-shenzhen-indian-tbx11k-v2-f5 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_5" +montgomery-shenzhen-indian-tbx11k-v2-f6 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_6" +montgomery-shenzhen-indian-tbx11k-v2-f7 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_7" +montgomery-shenzhen-indian-tbx11k-v2-f8 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_8" +montgomery-shenzhen-indian-tbx11k-v2-f9 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_9" # tbpoc dataset (and cross-validation folds) tbpoc_f0 = "ptbench.data.tbpoc.fold_0" diff --git a/src/ptbench/data/mc_ch_in_11k/__init__.py b/src/ptbench/data/mc_ch_in_11k/__init__.py deleted file mode 100644 index 662d5c13..00000000 --- a/src/ptbench/data/mc_ch_in_11k/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later diff --git a/src/ptbench/data/mc_ch_in_11k/default.py b/src/ptbench/data/mc_ch_in_11k/default.py deleted file mode 100644 index 454521a7..00000000 --- a/src/ptbench/data/mc_ch_in_11k/default.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets.""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.default import datamodule as indian_datamodule -from ..montgomery.default import datamodule as mc_datamodule -from ..shenzhen.default import datamodule as ch_datamodule -from ..tbx11k_simplified.default import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_0.py b/src/ptbench/data/mc_ch_in_11k/fold_0.py deleted file mode 100644 index 1adce163..00000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_0.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 0)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_0 import datamodule as indian_datamodule -from ..montgomery.fold_0 import datamodule as mc_datamodule -from ..shenzhen.fold_0 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_0 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_1.py b/src/ptbench/data/mc_ch_in_11k/fold_1.py deleted file mode 100644 index 584a65c4..00000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_1.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 1)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_1 import datamodule as indian_datamodule -from ..montgomery.fold_1 import datamodule as mc_datamodule -from ..shenzhen.fold_1 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_1 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_2.py b/src/ptbench/data/mc_ch_in_11k/fold_2.py deleted file mode 100644 index 05c0234e..00000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_2.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 2)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_2 import datamodule as indian_datamodule -from ..montgomery.fold_2 import datamodule as mc_datamodule -from ..shenzhen.fold_2 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_2 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_3.py b/src/ptbench/data/mc_ch_in_11k/fold_3.py deleted file mode 100644 index 80a544be..00000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_3.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 3)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_3 import datamodule as indian_datamodule -from ..montgomery.fold_3 import datamodule as mc_datamodule -from ..shenzhen.fold_3 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_3 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_4.py b/src/ptbench/data/mc_ch_in_11k/fold_4.py deleted file mode 100644 index 5860e1dd..00000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_4.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 4)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_4 import datamodule as indian_datamodule -from ..montgomery.fold_4 import datamodule as mc_datamodule -from ..shenzhen.fold_4 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_4 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_5.py b/src/ptbench/data/mc_ch_in_11k/fold_5.py deleted file mode 100644 index 93fac65b..00000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_5.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 5)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_5 import datamodule as indian_datamodule -from ..montgomery.fold_5 import datamodule as mc_datamodule -from ..shenzhen.fold_5 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_5 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_6.py b/src/ptbench/data/mc_ch_in_11k/fold_6.py deleted file mode 100644 index 44a79a42..00000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_6.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 6)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_6 import datamodule as indian_datamodule -from ..montgomery.fold_6 import datamodule as mc_datamodule -from ..shenzhen.fold_6 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_6 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_7.py b/src/ptbench/data/mc_ch_in_11k/fold_7.py deleted file mode 100644 index d955a02e..00000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_7.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 7)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_7 import datamodule as indian_datamodule -from ..montgomery.fold_7 import datamodule as mc_datamodule -from ..shenzhen.fold_7 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_7 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_8.py b/src/ptbench/data/mc_ch_in_11k/fold_8.py deleted file mode 100644 index 77753c11..00000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_8.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 8)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_8 import datamodule as indian_datamodule -from ..montgomery.fold_8 import datamodule as mc_datamodule -from ..shenzhen.fold_8 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_8 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_9.py b/src/ptbench/data/mc_ch_in_11k/fold_9.py deleted file mode 100644 index 45e88b27..00000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_9.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 9)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_9 import datamodule as indian_datamodule -from ..montgomery.fold_9 import datamodule as mc_datamodule -from ..shenzhen.fold_9 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_9 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/__init__.py b/src/ptbench/data/mc_ch_in_11kv2/__init__.py deleted file mode 100644 index 662d5c13..00000000 --- a/src/ptbench/data/mc_ch_in_11kv2/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later diff --git a/src/ptbench/data/mc_ch_in_11kv2/default.py b/src/ptbench/data/mc_ch_in_11kv2/default.py deleted file mode 100644 index 1eea460b..00000000 --- a/src/ptbench/data/mc_ch_in_11kv2/default.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets.""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.default import datamodule as indian_datamodule -from ..montgomery.default import datamodule as mc_datamodule -from ..shenzhen.default import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.default import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_0.py b/src/ptbench/data/mc_ch_in_11kv2/fold_0.py deleted file mode 100644 index 738164a7..00000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_0.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 0)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_0 import datamodule as indian_datamodule -from ..montgomery.fold_0 import datamodule as mc_datamodule -from ..shenzhen.fold_0 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_0 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_1.py b/src/ptbench/data/mc_ch_in_11kv2/fold_1.py deleted file mode 100644 index bfc2dbfc..00000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_1.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 1)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_1 import datamodule as indian_datamodule -from ..montgomery.fold_1 import datamodule as mc_datamodule -from ..shenzhen.fold_1 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_1 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_2.py b/src/ptbench/data/mc_ch_in_11kv2/fold_2.py deleted file mode 100644 index 06c7c1ab..00000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_2.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 2)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_2 import datamodule as indian_datamodule -from ..montgomery.fold_2 import datamodule as mc_datamodule -from ..shenzhen.fold_2 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_2 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_3.py b/src/ptbench/data/mc_ch_in_11kv2/fold_3.py deleted file mode 100644 index 11309b1c..00000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_3.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 3)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_3 import datamodule as indian_datamodule -from ..montgomery.fold_3 import datamodule as mc_datamodule -from ..shenzhen.fold_3 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_3 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_4.py b/src/ptbench/data/mc_ch_in_11kv2/fold_4.py deleted file mode 100644 index 0f53ed63..00000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_4.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 4)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_4 import datamodule as indian_datamodule -from ..montgomery.fold_4 import datamodule as mc_datamodule -from ..shenzhen.fold_4 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_4 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_5.py b/src/ptbench/data/mc_ch_in_11kv2/fold_5.py deleted file mode 100644 index df7dc0e8..00000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_5.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 5)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_5 import datamodule as indian_datamodule -from ..montgomery.fold_5 import datamodule as mc_datamodule -from ..shenzhen.fold_5 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_5 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_6.py b/src/ptbench/data/mc_ch_in_11kv2/fold_6.py deleted file mode 100644 index ef246cd8..00000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_6.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 6)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_6 import datamodule as indian_datamodule -from ..montgomery.fold_6 import datamodule as mc_datamodule -from ..shenzhen.fold_6 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_6 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_7.py b/src/ptbench/data/mc_ch_in_11kv2/fold_7.py deleted file mode 100644 index 41d3ba3e..00000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_7.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 7)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_7 import datamodule as indian_datamodule -from ..montgomery.fold_7 import datamodule as mc_datamodule -from ..shenzhen.fold_7 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_7 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_8.py b/src/ptbench/data/mc_ch_in_11kv2/fold_8.py deleted file mode 100644 index a6842d47..00000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_8.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 8)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_8 import datamodule as indian_datamodule -from ..montgomery.fold_8 import datamodule as mc_datamodule -from ..shenzhen.fold_8 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_8 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_9.py b/src/ptbench/data/mc_ch_in_11kv2/fold_9.py deleted file mode 100644 index ba69b788..00000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_9.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 9)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_9 import datamodule as indian_datamodule -from ..montgomery.fold_9 import datamodule as mc_datamodule -from ..shenzhen.fold_9 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_9 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/__init__.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/datamodule.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/datamodule.py new file mode 100644 index 00000000..02599187 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/datamodule.py @@ -0,0 +1,50 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from ..datamodule import ConcatDataModule +from ..indian.datamodule import RawDataLoader as IndianLoader +from ..indian.datamodule import make_split as make_indian_split +from ..montgomery.datamodule import RawDataLoader as MontgomeryLoader +from ..montgomery.datamodule import make_split as make_montgomery_split +from ..shenzhen.datamodule import RawDataLoader as ShenzhenLoader +from ..shenzhen.datamodule import make_split as make_shenzhen_split +from ..tbx11k.datamodule import RawDataLoader as TBX11kLoader +from ..tbx11k.datamodule import make_split as make_tbx11k_split + + +class DataModule(ConcatDataModule): + """Aggregated datamodule composed of Montgomery and Shenzhen datasets.""" + + def __init__(self, split_filename: str, tbx11_split_filename: str): + montgomery_loader = MontgomeryLoader() + montgomery_split = make_montgomery_split(split_filename) + shenzhen_loader = ShenzhenLoader() + shenzhen_split = make_shenzhen_split(split_filename) + indian_loader = IndianLoader() + indian_split = make_indian_split(split_filename) + tbx11k_loader = TBX11kLoader() + tbx11k_split = make_tbx11k_split(tbx11_split_filename) + + super().__init__( + splits={ + "train": [ + (montgomery_split["train"], montgomery_loader), + (shenzhen_split["train"], shenzhen_loader), + (indian_split["train"], indian_loader), + (tbx11k_split["train"], tbx11k_loader), + ], + "validation": [ + (montgomery_split["validation"], montgomery_loader), + (shenzhen_split["validation"], shenzhen_loader), + (indian_split["validation"], indian_loader), + (tbx11k_split["validation"], tbx11k_loader), + ], + "test": [ + (montgomery_split["test"], montgomery_loader), + (shenzhen_split["test"], shenzhen_loader), + (indian_split["test"], indian_loader), + (tbx11k_split["test"], tbx11k_loader), + ], + } + ) diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_0.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_0.py new file mode 100644 index 00000000..42620697 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_0.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-0.json", "v1-fold-0.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_1.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_1.py new file mode 100644 index 00000000..c7f11aad --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_1.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-1.json", "v1-fold-1.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_2.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_2.py new file mode 100644 index 00000000..8c94b1e9 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_2.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-2.json", "v1-fold-2.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_3.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_3.py new file mode 100644 index 00000000..c90704d1 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_3.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-3.json", "v1-fold-3.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_4.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_4.py new file mode 100644 index 00000000..5d243746 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_4.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-4.json", "v1-fold-4.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_5.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_5.py new file mode 100644 index 00000000..65aa7840 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_5.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-5.json", "v1-fold-5.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_6.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_6.py new file mode 100644 index 00000000..bc175ac2 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_6.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-6.json", "v1-fold-6.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_7.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_7.py new file mode 100644 index 00000000..3b7d7f71 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_7.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-7.json", "v1-fold-7.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_8.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_8.py new file mode 100644 index 00000000..20ac8e5c --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_8.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-8.json", "v1-fold-8.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_9.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_9.py new file mode 100644 index 00000000..413f141d --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_9.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-9.json", "v1-fold-9.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_healthy_vs_atb.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_healthy_vs_atb.py new file mode 100644 index 00000000..2f5e2226 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_healthy_vs_atb.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("default.json", "v1-healthy-vs-atb.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_0.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_0.py new file mode 100644 index 00000000..634aff84 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_0.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-0.json", "v2-others-vs-atb.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_1.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_1.py new file mode 100644 index 00000000..10de943e --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_1.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-1.json", "v2-fold-1.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_2.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_2.py new file mode 100644 index 00000000..12062ae6 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_2.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-2.json", "v2-fold-2.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_3.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_3.py new file mode 100644 index 00000000..453c2b8c --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_3.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-3.json", "v2-fold-3.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_4.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_4.py new file mode 100644 index 00000000..d31da770 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_4.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-4.json", "v2-fold-4.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_5.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_5.py new file mode 100644 index 00000000..1c50c2e7 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_5.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-5.json", "v2-fold-5.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_6.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_6.py new file mode 100644 index 00000000..0f15a0fa --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_6.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-6.json", "v2-fold-6.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_7.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_7.py new file mode 100644 index 00000000..10829456 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_7.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-7.json", "v2-fold-7.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_8.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_8.py new file mode 100644 index 00000000..a90e1111 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_8.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-8.json", "v2-fold-8.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_9.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_9.py new file mode 100644 index 00000000..a6cb7d51 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_9.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-9.json", "v2-fold-9.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_others_vs_atb.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_others_vs_atb.py new file mode 100644 index 00000000..bef1efce --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_others_vs_atb.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("default.json", "v2-others-vs-atb.json") -- GitLab