From 9b6d73867318e40ef634e86074adda40c25dcdf7 Mon Sep 17 00:00:00 2001 From: Andre Anjos <andre.dos.anjos@gmail.com> Date: Fri, 28 Jul 2023 08:31:08 +0200 Subject: [PATCH] [montgomery_shenzhen_indian] Refactor aggregated dataset --- pyproject.toml | 42 +++++----- src/ptbench/data/mc_ch_in/__init__.py | 3 - src/ptbench/data/mc_ch_in/default.py | 81 ------------------ src/ptbench/data/mc_ch_in/fold_0.py | 82 ------------------- src/ptbench/data/mc_ch_in/fold_1.py | 82 ------------------- src/ptbench/data/mc_ch_in/fold_2.py | 82 ------------------- src/ptbench/data/mc_ch_in/fold_3.py | 82 ------------------- src/ptbench/data/mc_ch_in/fold_4.py | 82 ------------------- src/ptbench/data/mc_ch_in/fold_5.py | 82 ------------------- src/ptbench/data/mc_ch_in/fold_6.py | 82 ------------------- src/ptbench/data/mc_ch_in/fold_7.py | 82 ------------------- src/ptbench/data/mc_ch_in/fold_8.py | 82 ------------------- src/ptbench/data/mc_ch_in/fold_9.py | 82 ------------------- .../montgomery_shenzhen_indian/__init__.py | 0 .../montgomery_shenzhen_indian/datamodule.py | 43 ++++++++++ .../montgomery_shenzhen_indian/default.py | 7 ++ .../data/montgomery_shenzhen_indian/fold_0.py | 7 ++ .../data/montgomery_shenzhen_indian/fold_1.py | 7 ++ .../data/montgomery_shenzhen_indian/fold_2.py | 7 ++ .../data/montgomery_shenzhen_indian/fold_3.py | 7 ++ .../data/montgomery_shenzhen_indian/fold_4.py | 7 ++ .../data/montgomery_shenzhen_indian/fold_5.py | 7 ++ .../data/montgomery_shenzhen_indian/fold_6.py | 7 ++ .../data/montgomery_shenzhen_indian/fold_7.py | 7 ++ .../data/montgomery_shenzhen_indian/fold_8.py | 7 ++ .../data/montgomery_shenzhen_indian/fold_9.py | 7 ++ 26 files changed, 141 insertions(+), 925 deletions(-) delete mode 100644 src/ptbench/data/mc_ch_in/__init__.py delete mode 100644 src/ptbench/data/mc_ch_in/default.py delete mode 100644 src/ptbench/data/mc_ch_in/fold_0.py delete mode 100644 src/ptbench/data/mc_ch_in/fold_1.py delete mode 100644 src/ptbench/data/mc_ch_in/fold_2.py delete mode 100644 src/ptbench/data/mc_ch_in/fold_3.py delete mode 100644 src/ptbench/data/mc_ch_in/fold_4.py delete mode 100644 src/ptbench/data/mc_ch_in/fold_5.py delete mode 100644 src/ptbench/data/mc_ch_in/fold_6.py delete mode 100644 src/ptbench/data/mc_ch_in/fold_7.py delete mode 100644 src/ptbench/data/mc_ch_in/fold_8.py delete mode 100644 src/ptbench/data/mc_ch_in/fold_9.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian/__init__.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian/datamodule.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian/default.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian/fold_0.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian/fold_1.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian/fold_2.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian/fold_3.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian/fold_4.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian/fold_5.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian/fold_6.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian/fold_7.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian/fold_8.py create mode 100644 src/ptbench/data/montgomery_shenzhen_indian/fold_9.py diff --git a/pyproject.toml b/pyproject.toml index a4c23c48..0b98f409 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -145,29 +145,29 @@ tbx11k_simplified_v2_f9 = "ptbench.data.tbx11k_simplified_v2.fold_9" # montgomery-shenzhen aggregated dataset montgomery-shenzhen = "ptbench.data.montgomery_shenzhen.default" -montgomery-shenzhen_f0 = "ptbench.data.montgomery_shenzhen.fold_0" -montgomery-shenzhen_f1 = "ptbench.data.montgomery_shenzhen.fold_1" -montgomery-shenzhen_f2 = "ptbench.data.montgomery_shenzhen.fold_2" -montgomery-shenzhen_f3 = "ptbench.data.montgomery_shenzhen.fold_3" -montgomery-shenzhen_f4 = "ptbench.data.montgomery_shenzhen.fold_4" -montgomery-shenzhen_f5 = "ptbench.data.montgomery_shenzhen.fold_5" -montgomery-shenzhen_f6 = "ptbench.data.montgomery_shenzhen.fold_6" -montgomery-shenzhen_f7 = "ptbench.data.montgomery_shenzhen.fold_7" -montgomery-shenzhen_f8 = "ptbench.data.montgomery_shenzhen.fold_8" -montgomery-shenzhen_f9 = "ptbench.data.montgomery_shenzhen.fold_9" +montgomery-shenzhen-f0 = "ptbench.data.montgomery_shenzhen.fold_0" +montgomery-shenzhen-f1 = "ptbench.data.montgomery_shenzhen.fold_1" +montgomery-shenzhen-f2 = "ptbench.data.montgomery_shenzhen.fold_2" +montgomery-shenzhen-f3 = "ptbench.data.montgomery_shenzhen.fold_3" +montgomery-shenzhen-f4 = "ptbench.data.montgomery_shenzhen.fold_4" +montgomery-shenzhen-f5 = "ptbench.data.montgomery_shenzhen.fold_5" +montgomery-shenzhen-f6 = "ptbench.data.montgomery_shenzhen.fold_6" +montgomery-shenzhen-f7 = "ptbench.data.montgomery_shenzhen.fold_7" +montgomery-shenzhen-f8 = "ptbench.data.montgomery_shenzhen.fold_8" +montgomery-shenzhen-f9 = "ptbench.data.montgomery_shenzhen.fold_9" # montgomery-shenzhen-indian aggregated dataset -mc_ch_in = "ptbench.data.mc_ch_in.default" -mc_ch_in_f0 = "ptbench.data.mc_ch_in.fold_0" -mc_ch_in_f1 = "ptbench.data.mc_ch_in.fold_1" -mc_ch_in_f2 = "ptbench.data.mc_ch_in.fold_2" -mc_ch_in_f3 = "ptbench.data.mc_ch_in.fold_3" -mc_ch_in_f4 = "ptbench.data.mc_ch_in.fold_4" -mc_ch_in_f5 = "ptbench.data.mc_ch_in.fold_5" -mc_ch_in_f6 = "ptbench.data.mc_ch_in.fold_6" -mc_ch_in_f7 = "ptbench.data.mc_ch_in.fold_7" -mc_ch_in_f8 = "ptbench.data.mc_ch_in.fold_8" -mc_ch_in_f9 = "ptbench.data.mc_ch_in.fold_9" +montgomery-shenzhen-indian = "ptbench.data.montgomery_shenzhen_indian.default" +montgomery-shenzhen-indian-f0 = "ptbench.data.montgomery_shenzhen_indian.fold_0" +montgomery-shenzhen-indian-f1 = "ptbench.data.montgomery_shenzhen_indian.fold_1" +montgomery-shenzhen-indian-f2 = "ptbench.data.montgomery_shenzhen_indian.fold_2" +montgomery-shenzhen-indian-f3 = "ptbench.data.montgomery_shenzhen_indian.fold_3" +montgomery-shenzhen-indian-f4 = "ptbench.data.montgomery_shenzhen_indian.fold_4" +montgomery-shenzhen-indian-f5 = "ptbench.data.montgomery_shenzhen_indian.fold_5" +montgomery-shenzhen-indian-f6 = "ptbench.data.montgomery_shenzhen_indian.fold_6" +montgomery-shenzhen-indian-f7 = "ptbench.data.montgomery_shenzhen_indian.fold_7" +montgomery-shenzhen-indian-f8 = "ptbench.data.montgomery_shenzhen_indian.fold_8" +montgomery-shenzhen-indian-f9 = "ptbench.data.montgomery_shenzhen_indian.fold_9" # montgomery-shenzhen-indian-tbx11k aggregated dataset mc_ch_in_11k = "ptbench.data.mc_ch_in_11k.default" diff --git a/src/ptbench/data/mc_ch_in/__init__.py b/src/ptbench/data/mc_ch_in/__init__.py deleted file mode 100644 index 662d5c13..00000000 --- a/src/ptbench/data/mc_ch_in/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later diff --git a/src/ptbench/data/mc_ch_in/default.py b/src/ptbench/data/mc_ch_in/default.py deleted file mode 100644 index 485173aa..00000000 --- a/src/ptbench/data/mc_ch_in/default.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen and Indian datasets.""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.default import datamodule as indian_datamodule -from ..montgomery.default import datamodule as mc_datamodule -from ..shenzhen.default import datamodule as ch_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [mc["__train__"], ch["__train__"], indian["__train__"]] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [mc["__valid__"], ch["__valid__"], indian["__valid__"]] - ) - self.dataset["validation"] = ConcatDataset( - [mc["validation"], ch["validation"], indian["validation"]] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in/fold_0.py b/src/ptbench/data/mc_ch_in/fold_0.py deleted file mode 100644 index db749a38..00000000 --- a/src/ptbench/data/mc_ch_in/fold_0.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen and Indian datasets -(cross validation fold 0)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_0 import datamodule as indian_datamodule -from ..montgomery.fold_0 import datamodule as mc_datamodule -from ..shenzhen.fold_0 import datamodule as ch_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [mc["__train__"], ch["__train__"], indian["__train__"]] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [mc["__valid__"], ch["__valid__"], indian["__valid__"]] - ) - self.dataset["validation"] = ConcatDataset( - [mc["validation"], ch["validation"], indian["validation"]] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in/fold_1.py b/src/ptbench/data/mc_ch_in/fold_1.py deleted file mode 100644 index 033f5bc5..00000000 --- a/src/ptbench/data/mc_ch_in/fold_1.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen and Indian datasets -(cross validation fold 1)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_1 import datamodule as indian_datamodule -from ..montgomery.fold_1 import datamodule as mc_datamodule -from ..shenzhen.fold_1 import datamodule as ch_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [mc["__train__"], ch["__train__"], indian["__train__"]] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [mc["__valid__"], ch["__valid__"], indian["__valid__"]] - ) - self.dataset["validation"] = ConcatDataset( - [mc["validation"], ch["validation"], indian["validation"]] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in/fold_2.py b/src/ptbench/data/mc_ch_in/fold_2.py deleted file mode 100644 index bcd7654c..00000000 --- a/src/ptbench/data/mc_ch_in/fold_2.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen and Indian datasets -(cross validation fold 2)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_2 import datamodule as indian_datamodule -from ..montgomery.fold_2 import datamodule as mc_datamodule -from ..shenzhen.fold_2 import datamodule as ch_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [mc["__train__"], ch["__train__"], indian["__train__"]] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [mc["__valid__"], ch["__valid__"], indian["__valid__"]] - ) - self.dataset["validation"] = ConcatDataset( - [mc["validation"], ch["validation"], indian["validation"]] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in/fold_3.py b/src/ptbench/data/mc_ch_in/fold_3.py deleted file mode 100644 index e09e6080..00000000 --- a/src/ptbench/data/mc_ch_in/fold_3.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen and Indian datasets -(cross validation fold 3)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_3 import datamodule as indian_datamodule -from ..montgomery.fold_3 import datamodule as mc_datamodule -from ..shenzhen.fold_3 import datamodule as ch_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [mc["__train__"], ch["__train__"], indian["__train__"]] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [mc["__valid__"], ch["__valid__"], indian["__valid__"]] - ) - self.dataset["validation"] = ConcatDataset( - [mc["validation"], ch["validation"], indian["validation"]] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in/fold_4.py b/src/ptbench/data/mc_ch_in/fold_4.py deleted file mode 100644 index cd472c6b..00000000 --- a/src/ptbench/data/mc_ch_in/fold_4.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen and Indian datasets -(cross validation fold 4)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_4 import datamodule as indian_datamodule -from ..montgomery.fold_4 import datamodule as mc_datamodule -from ..shenzhen.fold_4 import datamodule as ch_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [mc["__train__"], ch["__train__"], indian["__train__"]] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [mc["__valid__"], ch["__valid__"], indian["__valid__"]] - ) - self.dataset["validation"] = ConcatDataset( - [mc["validation"], ch["validation"], indian["validation"]] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in/fold_5.py b/src/ptbench/data/mc_ch_in/fold_5.py deleted file mode 100644 index 9afdb1f9..00000000 --- a/src/ptbench/data/mc_ch_in/fold_5.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen and Indian datasets -(cross validation fold 5)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_5 import datamodule as indian_datamodule -from ..montgomery.fold_5 import datamodule as mc_datamodule -from ..shenzhen.fold_5 import datamodule as ch_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [mc["__train__"], ch["__train__"], indian["__train__"]] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [mc["__valid__"], ch["__valid__"], indian["__valid__"]] - ) - self.dataset["validation"] = ConcatDataset( - [mc["validation"], ch["validation"], indian["validation"]] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in/fold_6.py b/src/ptbench/data/mc_ch_in/fold_6.py deleted file mode 100644 index 9246629e..00000000 --- a/src/ptbench/data/mc_ch_in/fold_6.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen and Indian datasets -(cross validation fold 6)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_6 import datamodule as indian_datamodule -from ..montgomery.fold_6 import datamodule as mc_datamodule -from ..shenzhen.fold_6 import datamodule as ch_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [mc["__train__"], ch["__train__"], indian["__train__"]] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [mc["__valid__"], ch["__valid__"], indian["__valid__"]] - ) - self.dataset["validation"] = ConcatDataset( - [mc["validation"], ch["validation"], indian["validation"]] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in/fold_7.py b/src/ptbench/data/mc_ch_in/fold_7.py deleted file mode 100644 index 7f628c9b..00000000 --- a/src/ptbench/data/mc_ch_in/fold_7.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen and Indian datasets -(cross validation fold 7)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_7 import datamodule as indian_datamodule -from ..montgomery.fold_7 import datamodule as mc_datamodule -from ..shenzhen.fold_7 import datamodule as ch_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [mc["__train__"], ch["__train__"], indian["__train__"]] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [mc["__valid__"], ch["__valid__"], indian["__valid__"]] - ) - self.dataset["validation"] = ConcatDataset( - [mc["validation"], ch["validation"], indian["validation"]] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in/fold_8.py b/src/ptbench/data/mc_ch_in/fold_8.py deleted file mode 100644 index 5bc985cb..00000000 --- a/src/ptbench/data/mc_ch_in/fold_8.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen and Indian datasets -(cross validation fold 8)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_8 import datamodule as indian_datamodule -from ..montgomery.fold_8 import datamodule as mc_datamodule -from ..shenzhen.fold_8 import datamodule as ch_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [mc["__train__"], ch["__train__"], indian["__train__"]] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [mc["__valid__"], ch["__valid__"], indian["__valid__"]] - ) - self.dataset["validation"] = ConcatDataset( - [mc["validation"], ch["validation"], indian["validation"]] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in/fold_9.py b/src/ptbench/data/mc_ch_in/fold_9.py deleted file mode 100644 index 641fdc67..00000000 --- a/src/ptbench/data/mc_ch_in/fold_9.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen and Indian datasets -(cross validation fold 9)""" - -from clapper.logging import setup -from torch.utils.data.dataset import ConcatDataset - -from .. import return_subsets -from ..base_datamodule import BaseDataModule, get_dataset_from_module -from ..indian.fold_9 import datamodule as indian_datamodule -from ..montgomery.fold_9 import datamodule as mc_datamodule -from ..shenzhen.fold_9 import datamodule as ch_datamodule - -logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") - - -class DefaultModule(BaseDataModule): - def __init__( - self, - train_batch_size=1, - predict_batch_size=1, - drop_incomplete_batch=False, - multiproc_kwargs=None, - ): - self.train_batch_size = train_batch_size - self.predict_batch_size = predict_batch_size - self.drop_incomplete_batch = drop_incomplete_batch - self.multiproc_kwargs = multiproc_kwargs - - super().__init__( - train_batch_size=train_batch_size, - predict_batch_size=predict_batch_size, - drop_incomplete_batch=drop_incomplete_batch, - multiproc_kwargs=multiproc_kwargs, - ) - - def setup(self, stage: str): - # Instantiate other datamodules and get their datasets - - module_args = { - "train_batch_size": self.train_batch_size, - "predict_batch_size": self.predict_batch_size, - "drop_incomplete_batch": self.drop_incomplete_batch, - "multiproc_kwargs": self.multiproc_kwargs, - } - - mc = get_dataset_from_module(mc_datamodule, stage, **module_args) - ch = get_dataset_from_module(ch_datamodule, stage, **module_args) - indian = get_dataset_from_module( - indian_datamodule, stage, **module_args - ) - - # Combine datasets - self.dataset = {} - self.dataset["__train__"] = ConcatDataset( - [mc["__train__"], ch["__train__"], indian["__train__"]] - ) - self.dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"]] - ) - self.dataset["__valid__"] = ConcatDataset( - [mc["__valid__"], ch["__valid__"], indian["__valid__"]] - ) - self.dataset["validation"] = ConcatDataset( - [mc["validation"], ch["validation"], indian["validation"]] - ) - self.dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"]] - ) - - ( - self.train_dataset, - self.validation_dataset, - self.extra_validation_datasets, - self.predict_dataset, - ) = return_subsets(self.dataset) - - -datamodule = DefaultModule diff --git a/src/ptbench/data/montgomery_shenzhen_indian/__init__.py b/src/ptbench/data/montgomery_shenzhen_indian/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/ptbench/data/montgomery_shenzhen_indian/datamodule.py b/src/ptbench/data/montgomery_shenzhen_indian/datamodule.py new file mode 100644 index 00000000..ea2d2a4f --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian/datamodule.py @@ -0,0 +1,43 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from ..datamodule import ConcatDataModule +from ..indian.datamodule import RawDataLoader as IndianLoader +from ..indian.datamodule import make_split as make_indian_split +from ..montgomery.datamodule import RawDataLoader as MontgomeryLoader +from ..montgomery.datamodule import make_split as make_montgomery_split +from ..shenzhen.datamodule import RawDataLoader as ShenzhenLoader +from ..shenzhen.datamodule import make_split as make_shenzhen_split + + +class DataModule(ConcatDataModule): + """Aggregated datamodule composed of Montgomery and Shenzhen datasets.""" + + def __init__(self, split_filename: str): + montgomery_loader = MontgomeryLoader() + montgomery_split = make_montgomery_split(split_filename) + shenzhen_loader = ShenzhenLoader() + shenzhen_split = make_shenzhen_split(split_filename) + indian_loader = IndianLoader() + indian_split = make_indian_split(split_filename) + + super().__init__( + splits={ + "train": [ + (montgomery_split["train"], montgomery_loader), + (shenzhen_split["train"], shenzhen_loader), + (indian_split["train"], indian_loader), + ], + "validation": [ + (montgomery_split["validation"], montgomery_loader), + (shenzhen_split["validation"], shenzhen_loader), + (indian_split["validation"], indian_loader), + ], + "test": [ + (montgomery_split["test"], montgomery_loader), + (shenzhen_split["test"], shenzhen_loader), + (indian_split["test"], indian_loader), + ], + } + ) diff --git a/src/ptbench/data/montgomery_shenzhen_indian/default.py b/src/ptbench/data/montgomery_shenzhen_indian/default.py new file mode 100644 index 00000000..2b8a8fb2 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian/default.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("default.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian/fold_0.py b/src/ptbench/data/montgomery_shenzhen_indian/fold_0.py new file mode 100644 index 00000000..3d114d07 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian/fold_0.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold_0.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian/fold_1.py b/src/ptbench/data/montgomery_shenzhen_indian/fold_1.py new file mode 100644 index 00000000..cd3a8cb6 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian/fold_1.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold_1.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian/fold_2.py b/src/ptbench/data/montgomery_shenzhen_indian/fold_2.py new file mode 100644 index 00000000..44eeda80 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian/fold_2.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold_2.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian/fold_3.py b/src/ptbench/data/montgomery_shenzhen_indian/fold_3.py new file mode 100644 index 00000000..f24fb314 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian/fold_3.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold_3.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian/fold_4.py b/src/ptbench/data/montgomery_shenzhen_indian/fold_4.py new file mode 100644 index 00000000..58456d38 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian/fold_4.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold_4.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian/fold_5.py b/src/ptbench/data/montgomery_shenzhen_indian/fold_5.py new file mode 100644 index 00000000..92796746 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian/fold_5.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold_5.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian/fold_6.py b/src/ptbench/data/montgomery_shenzhen_indian/fold_6.py new file mode 100644 index 00000000..9566b7cf --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian/fold_6.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold_6.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian/fold_7.py b/src/ptbench/data/montgomery_shenzhen_indian/fold_7.py new file mode 100644 index 00000000..25cbfe1b --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian/fold_7.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold_7.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian/fold_8.py b/src/ptbench/data/montgomery_shenzhen_indian/fold_8.py new file mode 100644 index 00000000..fb5332ce --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian/fold_8.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold_8.json") diff --git a/src/ptbench/data/montgomery_shenzhen_indian/fold_9.py b/src/ptbench/data/montgomery_shenzhen_indian/fold_9.py new file mode 100644 index 00000000..d1626586 --- /dev/null +++ b/src/ptbench/data/montgomery_shenzhen_indian/fold_9.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +from .datamodule import DataModule + +datamodule = DataModule("fold_9.json") -- GitLab