diff --git a/pyproject.toml b/pyproject.toml index 6549e5c273ef10f2510c235a0f311fc2180a53bb..d76e39795609ea0c8e2aa2a8f2186655288601d6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -170,30 +170,28 @@ montgomery-shenzhen-indian-f8 = "ptbench.data.montgomery_shenzhen_indian.fold_8" montgomery-shenzhen-indian-f9 = "ptbench.data.montgomery_shenzhen_indian.fold_9" # montgomery-shenzhen-indian-tbx11k aggregated dataset -mc_ch_in_11k = "ptbench.data.mc_ch_in_11k.default" -mc_ch_in_11k_f0 = "ptbench.data.mc_ch_in_11k.fold_0" -mc_ch_in_11k_f1 = "ptbench.data.mc_ch_in_11k.fold_1" -mc_ch_in_11k_f2 = "ptbench.data.mc_ch_in_11k.fold_2" -mc_ch_in_11k_f3 = "ptbench.data.mc_ch_in_11k.fold_3" -mc_ch_in_11k_f4 = "ptbench.data.mc_ch_in_11k.fold_4" -mc_ch_in_11k_f5 = "ptbench.data.mc_ch_in_11k.fold_5" -mc_ch_in_11k_f6 = "ptbench.data.mc_ch_in_11k.fold_6" -mc_ch_in_11k_f7 = "ptbench.data.mc_ch_in_11k.fold_7" -mc_ch_in_11k_f8 = "ptbench.data.mc_ch_in_11k.fold_8" -mc_ch_in_11k_f9 = "ptbench.data.mc_ch_in_11k.fold_9" - -# montgomery-shenzhen-indian-tbx11kv2 aggregated dataset -mc_ch_in_11kv2 = "ptbench.data.mc_ch_in_11kv2.default" -mc_ch_in_11kv2_f0 = "ptbench.data.mc_ch_in_11kv2.fold_0" -mc_ch_in_11kv2_f1 = "ptbench.data.mc_ch_in_11kv2.fold_1" -mc_ch_in_11kv2_f2 = "ptbench.data.mc_ch_in_11kv2.fold_2" -mc_ch_in_11kv2_f3 = "ptbench.data.mc_ch_in_11kv2.fold_3" -mc_ch_in_11kv2_f4 = "ptbench.data.mc_ch_in_11kv2.fold_4" -mc_ch_in_11kv2_f5 = "ptbench.data.mc_ch_in_11kv2.fold_5" -mc_ch_in_11kv2_f6 = "ptbench.data.mc_ch_in_11kv2.fold_6" -mc_ch_in_11kv2_f7 = "ptbench.data.mc_ch_in_11kv2.fold_7" -mc_ch_in_11kv2_f8 = "ptbench.data.mc_ch_in_11kv2.fold_8" -mc_ch_in_11kv2_f9 = "ptbench.data.mc_ch_in_11kv2.fold_9" +montgomery-shenzhen-indian-tbx11k-v1 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_healthy_vs_atb" +montgomery-shenzhen-indian-tbx11k-v1-f0 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_0" +montgomery-shenzhen-indian-tbx11k-v1-f1 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_1" +montgomery-shenzhen-indian-tbx11k-v1-f2 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_2" +montgomery-shenzhen-indian-tbx11k-v1-f3 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_3" +montgomery-shenzhen-indian-tbx11k-v1-f4 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_4" +montgomery-shenzhen-indian-tbx11k-v1-f5 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_5" +montgomery-shenzhen-indian-tbx11k-v1-f6 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_6" +montgomery-shenzhen-indian-tbx11k-v1-f7 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_7" +montgomery-shenzhen-indian-tbx11k-v1-f8 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_8" +montgomery-shenzhen-indian-tbx11k-v1-f9 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v1_fold_9" +montgomery-shenzhen-indian-tbx11k-v2 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_others_vs_atb" +montgomery-shenzhen-indian-tbx11k-v2-f0 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_0" +montgomery-shenzhen-indian-tbx11k-v2-f1 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_1" +montgomery-shenzhen-indian-tbx11k-v2-f2 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_2" +montgomery-shenzhen-indian-tbx11k-v2-f3 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_3" +montgomery-shenzhen-indian-tbx11k-v2-f4 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_4" +montgomery-shenzhen-indian-tbx11k-v2-f5 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_5" +montgomery-shenzhen-indian-tbx11k-v2-f6 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_6" +montgomery-shenzhen-indian-tbx11k-v2-f7 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_7" +montgomery-shenzhen-indian-tbx11k-v2-f8 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_8" +montgomery-shenzhen-indian-tbx11k-v2-f9 = "ptbench.data.montgomery_shenzhen_indian_tbx11k.v2_fold_9" # tbpoc dataset (and cross-validation folds) tbpoc_f0 = "ptbench.data.tbpoc.fold_0" diff --git a/src/ptbench/data/mc_ch_in_11k/__init__.py b/src/ptbench/data/mc_ch_in_11k/__init__.py deleted file mode 100644 index 662d5c1326651b4d9f48d47bc4b503df23d17216..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11k/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later diff --git a/src/ptbench/data/mc_ch_in_11k/default.py b/src/ptbench/data/mc_ch_in_11k/default.py deleted file mode 100644 index 454521a7ebaaf1072fa5f2ffaaacb4775ff9a9ea..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11k/default.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets.""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.default import datamodule as indian_datamodule -from ..montgomery.default import datamodule as mc_datamodule -from ..shenzhen.default import datamodule as ch_datamodule -from ..tbx11k_simplified.default import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_0.py b/src/ptbench/data/mc_ch_in_11k/fold_0.py deleted file mode 100644 index 1adce163d7a4f734924a7a237666d75a116c3039..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_0.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 0)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_0 import datamodule as indian_datamodule -from ..montgomery.fold_0 import datamodule as mc_datamodule -from ..shenzhen.fold_0 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_0 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_1.py b/src/ptbench/data/mc_ch_in_11k/fold_1.py deleted file mode 100644 index 584a65c4b1a6579f666784a16d83f2c69d1ab079..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_1.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 1)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_1 import datamodule as indian_datamodule -from ..montgomery.fold_1 import datamodule as mc_datamodule -from ..shenzhen.fold_1 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_1 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_2.py b/src/ptbench/data/mc_ch_in_11k/fold_2.py deleted file mode 100644 index 05c0234e93c54bd0b3706c2e526895d884ba5386..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_2.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 2)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_2 import datamodule as indian_datamodule -from ..montgomery.fold_2 import datamodule as mc_datamodule -from ..shenzhen.fold_2 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_2 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_3.py b/src/ptbench/data/mc_ch_in_11k/fold_3.py deleted file mode 100644 index 80a544be0507f15055e5eccf5d000e514b069352..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_3.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 3)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_3 import datamodule as indian_datamodule -from ..montgomery.fold_3 import datamodule as mc_datamodule -from ..shenzhen.fold_3 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_3 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_4.py b/src/ptbench/data/mc_ch_in_11k/fold_4.py deleted file mode 100644 index 5860e1ddbb886116f2d22fece689bc587651c6fa..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_4.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 4)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_4 import datamodule as indian_datamodule -from ..montgomery.fold_4 import datamodule as mc_datamodule -from ..shenzhen.fold_4 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_4 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_5.py b/src/ptbench/data/mc_ch_in_11k/fold_5.py deleted file mode 100644 index 93fac65bb9b2f261f7d8abffea32d91b81711555..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_5.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 5)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_5 import datamodule as indian_datamodule -from ..montgomery.fold_5 import datamodule as mc_datamodule -from ..shenzhen.fold_5 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_5 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_6.py b/src/ptbench/data/mc_ch_in_11k/fold_6.py deleted file mode 100644 index 44a79a42d619b26907bb44c85339d4aa5cec9bf6..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_6.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 6)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_6 import datamodule as indian_datamodule -from ..montgomery.fold_6 import datamodule as mc_datamodule -from ..shenzhen.fold_6 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_6 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_7.py b/src/ptbench/data/mc_ch_in_11k/fold_7.py deleted file mode 100644 index d955a02e610af58d58a9380c11a60eddfcae7571..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_7.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 7)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_7 import datamodule as indian_datamodule -from ..montgomery.fold_7 import datamodule as mc_datamodule -from ..shenzhen.fold_7 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_7 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_8.py b/src/ptbench/data/mc_ch_in_11k/fold_8.py deleted file mode 100644 index 77753c1114c47335982bfb0904922d5d55394c8a..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_8.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 8)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_8 import datamodule as indian_datamodule -from ..montgomery.fold_8 import datamodule as mc_datamodule -from ..shenzhen.fold_8 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_8 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11k/fold_9.py b/src/ptbench/data/mc_ch_in_11k/fold_9.py deleted file mode 100644 index 45e88b2780acaf30fbdd524f9df6c0736924e4d7..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11k/fold_9.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 9)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_9 import datamodule as indian_datamodule -from ..montgomery.fold_9 import datamodule as mc_datamodule -from ..shenzhen.fold_9 import datamodule as ch_datamodule -from ..tbx11k_simplified.fold_9 import datamodule as tbx11k_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11k = get_dataset_from_module( - tbx11k_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11k["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11k["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11k["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11k["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11k["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/__init__.py b/src/ptbench/data/mc_ch_in_11kv2/__init__.py deleted file mode 100644 index 662d5c1326651b4d9f48d47bc4b503df23d17216..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11kv2/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later diff --git a/src/ptbench/data/mc_ch_in_11kv2/default.py b/src/ptbench/data/mc_ch_in_11kv2/default.py deleted file mode 100644 index 1eea460b5503e79c755d52d2e5f08fd28d7ed3f7..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11kv2/default.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets.""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.default import datamodule as indian_datamodule -from ..montgomery.default import datamodule as mc_datamodule -from ..shenzhen.default import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.default import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_0.py b/src/ptbench/data/mc_ch_in_11kv2/fold_0.py deleted file mode 100644 index 738164a78835fa7bed8add6a9e5e0fe101abad52..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_0.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 0)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_0 import datamodule as indian_datamodule -from ..montgomery.fold_0 import datamodule as mc_datamodule -from ..shenzhen.fold_0 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_0 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_1.py b/src/ptbench/data/mc_ch_in_11kv2/fold_1.py deleted file mode 100644 index bfc2dbfc53a33325549f4cb41513201a005bb1f9..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_1.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 1)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_1 import datamodule as indian_datamodule -from ..montgomery.fold_1 import datamodule as mc_datamodule -from ..shenzhen.fold_1 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_1 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_2.py b/src/ptbench/data/mc_ch_in_11kv2/fold_2.py deleted file mode 100644 index 06c7c1ab9b9ca28a6cdbf5e95108a669349a32a6..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_2.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 2)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_2 import datamodule as indian_datamodule -from ..montgomery.fold_2 import datamodule as mc_datamodule -from ..shenzhen.fold_2 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_2 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_3.py b/src/ptbench/data/mc_ch_in_11kv2/fold_3.py deleted file mode 100644 index 11309b1c94eb182026882f934835ed9d62917b27..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_3.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 3)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_3 import datamodule as indian_datamodule -from ..montgomery.fold_3 import datamodule as mc_datamodule -from ..shenzhen.fold_3 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_3 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_4.py b/src/ptbench/data/mc_ch_in_11kv2/fold_4.py deleted file mode 100644 index 0f53ed635e2690776ca704bc50f812cc09c33256..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_4.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 4)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_4 import datamodule as indian_datamodule -from ..montgomery.fold_4 import datamodule as mc_datamodule -from ..shenzhen.fold_4 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_4 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_5.py b/src/ptbench/data/mc_ch_in_11kv2/fold_5.py deleted file mode 100644 index df7dc0e8fe8fd9a6ce122a20161afdb927aa62eb..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_5.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 5)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_5 import datamodule as indian_datamodule -from ..montgomery.fold_5 import datamodule as mc_datamodule -from ..shenzhen.fold_5 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_5 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_6.py b/src/ptbench/data/mc_ch_in_11kv2/fold_6.py deleted file mode 100644 index ef246cd82afe20f65d1d214646155fe9416be479..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_6.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 6)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_6 import datamodule as indian_datamodule -from ..montgomery.fold_6 import datamodule as mc_datamodule -from ..shenzhen.fold_6 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_6 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_7.py b/src/ptbench/data/mc_ch_in_11kv2/fold_7.py deleted file mode 100644 index 41d3ba3ebf059320e36195d85447fe57bf1dffbb..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_7.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 7)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_7 import datamodule as indian_datamodule -from ..montgomery.fold_7 import datamodule as mc_datamodule -from ..shenzhen.fold_7 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_7 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_8.py b/src/ptbench/data/mc_ch_in_11kv2/fold_8.py deleted file mode 100644 index a6842d47c932586d6910e05df039921fc82c5843..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_8.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 8)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_8 import datamodule as indian_datamodule -from ..montgomery.fold_8 import datamodule as mc_datamodule -from ..shenzhen.fold_8 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_8 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_9.py b/src/ptbench/data/mc_ch_in_11kv2/fold_9.py deleted file mode 100644 index ba69b78892058b141a761e6cb4070683a0098fd8..0000000000000000000000000000000000000000 --- a/src/ptbench/data/mc_ch_in_11kv2/fold_9.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 9)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_9 import datamodule as indian_datamodule -from ..montgomery.fold_9 import datamodule as mc_datamodule -from ..shenzhen.fold_9 import datamodule as ch_datamodule -from ..tbx11k_simplified_v2.fold_9 import datamodule as tbx11kv2_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - tbx11kv2 = get_dataset_from_module( - tbx11kv2_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - self.dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/__init__.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/datamodule.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/datamodule.py new file mode 100644 index 0000000000000000000000000000000000000000..025991879e9a9937dd1635620700d2b7bba93db4 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/datamodule.py @@ -0,0 +1,50 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from ..datamodule import ConcatDataModule +from ..indian.datamodule import RawDataLoader as IndianLoader +from ..indian.datamodule import make_split as make_indian_split +from ..montgomery.datamodule import RawDataLoader as MontgomeryLoader +from ..montgomery.datamodule import make_split as make_montgomery_split +from ..shenzhen.datamodule import RawDataLoader as ShenzhenLoader +from ..shenzhen.datamodule import make_split as make_shenzhen_split +from ..tbx11k.datamodule import RawDataLoader as TBX11kLoader +from ..tbx11k.datamodule import make_split as make_tbx11k_split + + +class DataModule(ConcatDataModule): + """Aggregated datamodule composed of Montgomery and Shenzhen datasets.""" + + def __init__(self, split_filename: str, tbx11_split_filename: str): + montgomery_loader = MontgomeryLoader() + montgomery_split = make_montgomery_split(split_filename) + shenzhen_loader = ShenzhenLoader() + shenzhen_split = make_shenzhen_split(split_filename) + indian_loader = IndianLoader() + indian_split = make_indian_split(split_filename) + tbx11k_loader = TBX11kLoader() + tbx11k_split = make_tbx11k_split(tbx11_split_filename) + + super().__init__( + splits={ + "train": [ + (montgomery_split["train"], montgomery_loader), + (shenzhen_split["train"], shenzhen_loader), + (indian_split["train"], indian_loader), + (tbx11k_split["train"], tbx11k_loader), + ], + "validation": [ + (montgomery_split["validation"], montgomery_loader), + (shenzhen_split["validation"], shenzhen_loader), + (indian_split["validation"], indian_loader), + (tbx11k_split["validation"], tbx11k_loader), + ], + "test": [ + (montgomery_split["test"], montgomery_loader), + (shenzhen_split["test"], shenzhen_loader), + (indian_split["test"], indian_loader), + (tbx11k_split["test"], tbx11k_loader), + ], + } + ) diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_0.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_0.py new file mode 100644 index 0000000000000000000000000000000000000000..426206972c50fbc067f31e9acfede8ada5022fec --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_0.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-0.json", "v1-fold-0.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_1.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_1.py new file mode 100644 index 0000000000000000000000000000000000000000..c7f11aad9fe142fc7f08415e7bf6ff441ebbd354 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_1.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-1.json", "v1-fold-1.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_2.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_2.py new file mode 100644 index 0000000000000000000000000000000000000000..8c94b1e9f9fe8469912283f0bdec42334d33b5ec --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_2.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-2.json", "v1-fold-2.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_3.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_3.py new file mode 100644 index 0000000000000000000000000000000000000000..c90704d139a635582bd03fa66660d2bd62b2c485 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_3.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-3.json", "v1-fold-3.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_4.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_4.py new file mode 100644 index 0000000000000000000000000000000000000000..5d24374631e7b0c1df1dfce0f3fec41e8854b4b1 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_4.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-4.json", "v1-fold-4.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_5.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_5.py new file mode 100644 index 0000000000000000000000000000000000000000..65aa784052c5597c62f8269f2fbe2676f15492da --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_5.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-5.json", "v1-fold-5.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_6.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_6.py new file mode 100644 index 0000000000000000000000000000000000000000..bc175ac215411addb6f4db1ef2bb767272aa8d59 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_6.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-6.json", "v1-fold-6.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_7.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_7.py new file mode 100644 index 0000000000000000000000000000000000000000..3b7d7f7163e255e3a69d70b8ee073294067858f2 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_7.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-7.json", "v1-fold-7.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_8.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_8.py new file mode 100644 index 0000000000000000000000000000000000000000..20ac8e5cbd6a79be099acb453ca068f3bb349bec --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_8.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-8.json", "v1-fold-8.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_9.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_9.py new file mode 100644 index 0000000000000000000000000000000000000000..413f141dbccc5b7a6d9ddc48355989e4ab2820dd --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_fold_9.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-9.json", "v1-fold-9.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_healthy_vs_atb.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_healthy_vs_atb.py new file mode 100644 index 0000000000000000000000000000000000000000..2f5e2226e341a5af0395402ed707155844ab46c9 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v1_healthy_vs_atb.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("default.json", "v1-healthy-vs-atb.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_0.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_0.py new file mode 100644 index 0000000000000000000000000000000000000000..634aff84a04ec2841866f4bee5af534ee0bb6d30 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_0.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-0.json", "v2-others-vs-atb.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_1.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_1.py new file mode 100644 index 0000000000000000000000000000000000000000..10de943e18e794b3c947db09312ebe9b5e9e9f53 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_1.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-1.json", "v2-fold-1.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_2.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_2.py new file mode 100644 index 0000000000000000000000000000000000000000..12062ae6cb9d1c45c5b55d99d956be0935424d72 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_2.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-2.json", "v2-fold-2.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_3.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_3.py new file mode 100644 index 0000000000000000000000000000000000000000..453c2b8cd407e49f3037f62e0b15732b26dedd4a --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_3.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-3.json", "v2-fold-3.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_4.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_4.py new file mode 100644 index 0000000000000000000000000000000000000000..d31da7702dfac53225a34a370bcf09618da3679f --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_4.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-4.json", "v2-fold-4.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_5.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_5.py new file mode 100644 index 0000000000000000000000000000000000000000..1c50c2e75cfe1c9b22abb1e99c424c3d19d4320f --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_5.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-5.json", "v2-fold-5.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_6.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_6.py new file mode 100644 index 0000000000000000000000000000000000000000..0f15a0fa965252cf9f9965288dc46892712c2ec5 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_6.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-6.json", "v2-fold-6.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_7.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_7.py new file mode 100644 index 0000000000000000000000000000000000000000..1082945619d45251743bc50d055d327cac7304d5 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_7.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-7.json", "v2-fold-7.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_8.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_8.py new file mode 100644 index 0000000000000000000000000000000000000000..a90e1111b25bab6ee14990bfa98de18fd945c422 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_8.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-8.json", "v2-fold-8.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_9.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_9.py new file mode 100644 index 0000000000000000000000000000000000000000..a6cb7d51a9aec209f5665f1ea9947ad1c02793d9 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_fold_9.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold-9.json", "v2-fold-9.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_others_vs_atb.py b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_others_vs_atb.py new file mode 100644 index 0000000000000000000000000000000000000000..bef1efcedd59570380048fbd1e0f9d458cd7d88d --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian_tbx11k/v2_others_vs_atb.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("default.json", "v2-others-vs-atb.json")