diff --git a/pyproject.toml b/pyproject.toml index 7c9f19488a8aa34e72d1faf2be868a255710654c..e6208923a93d82f69e13230ac9e4582474f0b504 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -365,28 +365,28 @@ mc_ch_in_11k_rs_f7 = "ptbench.configs.datasets.mc_ch_in_11k_RS.fold_7" mc_ch_in_11k_rs_f8 = "ptbench.configs.datasets.mc_ch_in_11k_RS.fold_8" mc_ch_in_11k_rs_f9 = "ptbench.configs.datasets.mc_ch_in_11k_RS.fold_9" # montgomery-shenzhen-indian-tbx11kv2 aggregated dataset -mc_ch_in_11kv2 = "ptbench.configs.datasets.mc_ch_in_11kv2.default" -mc_ch_in_11kv2_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.rgb" -mc_ch_in_11kv2_f0 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_0" -mc_ch_in_11kv2_f1 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_1" -mc_ch_in_11kv2_f2 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_2" -mc_ch_in_11kv2_f3 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_3" -mc_ch_in_11kv2_f4 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_4" -mc_ch_in_11kv2_f5 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_5" -mc_ch_in_11kv2_f6 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_6" -mc_ch_in_11kv2_f7 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_7" -mc_ch_in_11kv2_f8 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_8" -mc_ch_in_11kv2_f9 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_9" -mc_ch_in_11kv2_f0_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_0_rgb" -mc_ch_in_11kv2_f1_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_1_rgb" -mc_ch_in_11kv2_f2_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_2_rgb" -mc_ch_in_11kv2_f3_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_3_rgb" -mc_ch_in_11kv2_f4_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_4_rgb" -mc_ch_in_11kv2_f5_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_5_rgb" -mc_ch_in_11kv2_f6_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_6_rgb" -mc_ch_in_11kv2_f7_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_7_rgb" -mc_ch_in_11kv2_f8_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_8_rgb" -mc_ch_in_11kv2_f9_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_9_rgb" +mc_ch_in_11kv2 = "ptbench.data.mc_ch_in_11kv2.default" +mc_ch_in_11kv2_rgb = "ptbench.data.mc_ch_in_11kv2.rgb" +mc_ch_in_11kv2_f0 = "ptbench.data.mc_ch_in_11kv2.fold_0" +mc_ch_in_11kv2_f1 = "ptbench.data.mc_ch_in_11kv2.fold_1" +mc_ch_in_11kv2_f2 = "ptbench.data.mc_ch_in_11kv2.fold_2" +mc_ch_in_11kv2_f3 = "ptbench.data.mc_ch_in_11kv2.fold_3" +mc_ch_in_11kv2_f4 = "ptbench.data.mc_ch_in_11kv2.fold_4" +mc_ch_in_11kv2_f5 = "ptbench.data.mc_ch_in_11kv2.fold_5" +mc_ch_in_11kv2_f6 = "ptbench.data.mc_ch_in_11kv2.fold_6" +mc_ch_in_11kv2_f7 = "ptbench.data.mc_ch_in_11kv2.fold_7" +mc_ch_in_11kv2_f8 = "ptbench.data.mc_ch_in_11kv2.fold_8" +mc_ch_in_11kv2_f9 = "ptbench.data.mc_ch_in_11kv2.fold_9" +mc_ch_in_11kv2_f0_rgb = "ptbench.data.mc_ch_in_11kv2.fold_0_rgb" +mc_ch_in_11kv2_f1_rgb = "ptbench.data.mc_ch_in_11kv2.fold_1_rgb" +mc_ch_in_11kv2_f2_rgb = "ptbench.data.mc_ch_in_11kv2.fold_2_rgb" +mc_ch_in_11kv2_f3_rgb = "ptbench.data.mc_ch_in_11kv2.fold_3_rgb" +mc_ch_in_11kv2_f4_rgb = "ptbench.data.mc_ch_in_11kv2.fold_4_rgb" +mc_ch_in_11kv2_f5_rgb = "ptbench.data.mc_ch_in_11kv2.fold_5_rgb" +mc_ch_in_11kv2_f6_rgb = "ptbench.data.mc_ch_in_11kv2.fold_6_rgb" +mc_ch_in_11kv2_f7_rgb = "ptbench.data.mc_ch_in_11kv2.fold_7_rgb" +mc_ch_in_11kv2_f8_rgb = "ptbench.data.mc_ch_in_11kv2.fold_8_rgb" +mc_ch_in_11kv2_f9_rgb = "ptbench.data.mc_ch_in_11kv2.fold_9_rgb" # extended montgomery-shenzhen-indian-tbx11kv2 aggregated dataset # (with radiological signs) mc_ch_in_11kv2_rs = "ptbench.configs.datasets.mc_ch_in_11kv2_RS.default" diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/__init__.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/__init__.py deleted file mode 100644 index c36f7f60c8d1111baa7f3559c419e134b7ece62f..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/__init__.py +++ /dev/null @@ -1,157 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -from torch.utils.data.dataset import ConcatDataset - - -def _maker(protocol): - if protocol == "default": - from ..indian import default as indian - from ..montgomery import default as mc - from ..shenzhen import default as ch - from ..tbx11k_simplified_v2 import default as tbx11kv2 - elif protocol == "rgb": - from ..indian import rgb as indian - from ..montgomery import rgb as mc - from ..shenzhen import rgb as ch - from ..tbx11k_simplified_v2 import rgb as tbx11kv2 - elif protocol == "fold_0": - from ..indian import fold_0 as indian - from ..montgomery import fold_0 as mc - from ..shenzhen import fold_0 as ch - from ..tbx11k_simplified_v2 import fold_0 as tbx11kv2 - elif protocol == "fold_1": - from ..indian import fold_1 as indian - from ..montgomery import fold_1 as mc - from ..shenzhen import fold_1 as ch - from ..tbx11k_simplified_v2 import fold_1 as tbx11kv2 - elif protocol == "fold_2": - from ..indian import fold_2 as indian - from ..montgomery import fold_2 as mc - from ..shenzhen import fold_2 as ch - from ..tbx11k_simplified_v2 import fold_2 as tbx11kv2 - elif protocol == "fold_3": - from ..indian import fold_3 as indian - from ..montgomery import fold_3 as mc - from ..shenzhen import fold_3 as ch - from ..tbx11k_simplified_v2 import fold_3 as tbx11kv2 - elif protocol == "fold_4": - from ..indian import fold_4 as indian - from ..montgomery import fold_4 as mc - from ..shenzhen import fold_4 as ch - from ..tbx11k_simplified_v2 import fold_4 as tbx11kv2 - elif protocol == "fold_5": - from ..indian import fold_5 as indian - from ..montgomery import fold_5 as mc - from ..shenzhen import fold_5 as ch - from ..tbx11k_simplified_v2 import fold_5 as tbx11kv2 - elif protocol == "fold_6": - from ..indian import fold_6 as indian - from ..montgomery import fold_6 as mc - from ..shenzhen import fold_6 as ch - from ..tbx11k_simplified_v2 import fold_6 as tbx11kv2 - elif protocol == "fold_7": - from ..indian import fold_7 as indian - from ..montgomery import fold_7 as mc - from ..shenzhen import fold_7 as ch - from ..tbx11k_simplified_v2 import fold_7 as tbx11kv2 - elif protocol == "fold_8": - from ..indian import fold_8 as indian - from ..montgomery import fold_8 as mc - from ..shenzhen import fold_8 as ch - from ..tbx11k_simplified_v2 import fold_8 as tbx11kv2 - elif protocol == "fold_9": - from ..indian import fold_9 as indian - from ..montgomery import fold_9 as mc - from ..shenzhen import fold_9 as ch - from ..tbx11k_simplified_v2 import fold_9 as tbx11kv2 - elif protocol == "fold_0_rgb": - from ..indian import fold_0_rgb as indian - from ..montgomery import fold_0_rgb as mc - from ..shenzhen import fold_0_rgb as ch - from ..tbx11k_simplified_v2 import fold_0_rgb as tbx11kv2 - elif protocol == "fold_1_rgb": - from ..indian import fold_1_rgb as indian - from ..montgomery import fold_1_rgb as mc - from ..shenzhen import fold_1_rgb as ch - from ..tbx11k_simplified_v2 import fold_1_rgb as tbx11kv2 - elif protocol == "fold_2_rgb": - from ..indian import fold_2_rgb as indian - from ..montgomery import fold_2_rgb as mc - from ..shenzhen import fold_2_rgb as ch - from ..tbx11k_simplified_v2 import fold_2_rgb as tbx11kv2 - elif protocol == "fold_3_rgb": - from ..indian import fold_3_rgb as indian - from ..montgomery import fold_3_rgb as mc - from ..shenzhen import fold_3_rgb as ch - from ..tbx11k_simplified_v2 import fold_3_rgb as tbx11kv2 - elif protocol == "fold_4_rgb": - from ..indian import fold_4_rgb as indian - from ..montgomery import fold_4_rgb as mc - from ..shenzhen import fold_4_rgb as ch - from ..tbx11k_simplified_v2 import fold_4_rgb as tbx11kv2 - elif protocol == "fold_5_rgb": - from ..indian import fold_5_rgb as indian - from ..montgomery import fold_5_rgb as mc - from ..shenzhen import fold_5_rgb as ch - from ..tbx11k_simplified_v2 import fold_5_rgb as tbx11kv2 - elif protocol == "fold_6_rgb": - from ..indian import fold_6_rgb as indian - from ..montgomery import fold_6_rgb as mc - from ..shenzhen import fold_6_rgb as ch - from ..tbx11k_simplified_v2 import fold_6_rgb as tbx11kv2 - elif protocol == "fold_7_rgb": - from ..indian import fold_7_rgb as indian - from ..montgomery import fold_7_rgb as mc - from ..shenzhen import fold_7_rgb as ch - from ..tbx11k_simplified_v2 import fold_7_rgb as tbx11kv2 - elif protocol == "fold_8_rgb": - from ..indian import fold_8_rgb as indian - from ..montgomery import fold_8_rgb as mc - from ..shenzhen import fold_8_rgb as ch - from ..tbx11k_simplified_v2 import fold_8_rgb as tbx11kv2 - elif protocol == "fold_9_rgb": - from ..indian import fold_9_rgb as indian - from ..montgomery import fold_9_rgb as mc - from ..shenzhen import fold_9_rgb as ch - from ..tbx11k_simplified_v2 import fold_9_rgb as tbx11kv2 - - mc = mc.dataset - ch = ch.dataset - indian = indian.dataset - tbx11kv2 = tbx11kv2.dataset - - dataset = {} - dataset["__train__"] = ConcatDataset( - [ - mc["__train__"], - ch["__train__"], - indian["__train__"], - tbx11kv2["__train__"], - ] - ) - dataset["train"] = ConcatDataset( - [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] - ) - dataset["__valid__"] = ConcatDataset( - [ - mc["__valid__"], - ch["__valid__"], - indian["__valid__"], - tbx11kv2["__valid__"], - ] - ) - dataset["validation"] = ConcatDataset( - [ - mc["validation"], - ch["validation"], - indian["validation"], - tbx11kv2["validation"], - ] - ) - dataset["test"] = ConcatDataset( - [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] - ) - - return dataset diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/default.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/default.py deleted file mode 100644 index 7d4f16bda48b05e7e9302ffc9c689d8393b3e495..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/default.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets.""" - -from . import _maker - -dataset = _maker("default") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_0.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_0.py deleted file mode 100644 index 757a0eb98214ba020d76095363d424b9209540e7..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_0.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 0)""" - -from . import _maker - -dataset = _maker("fold_0") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_0_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_0_rgb.py deleted file mode 100644 index 48e05ff3f71f13976190d04cfaf59c5c36996bac..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_0_rgb.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 0, RGB)""" - -from . import _maker - -dataset = _maker("fold_0_rgb") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_1.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_1.py deleted file mode 100644 index 5657958934b926879bd26503c9b383e775bc724d..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_1.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 1)""" - -from . import _maker - -dataset = _maker("fold_1") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_1_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_1_rgb.py deleted file mode 100644 index c782d68de247c876ddd6826100cbb7908342b928..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_1_rgb.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 1, RGB)""" - -from . import _maker - -dataset = _maker("fold_1_rgb") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_2.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_2.py deleted file mode 100644 index 10a597bcb8e0485db63f0d7500b15b3e78877066..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_2.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 2)""" - -from . import _maker - -dataset = _maker("fold_2") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_2_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_2_rgb.py deleted file mode 100644 index d624f3af53abcf053c7bf17a9822a86cb53e2923..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_2_rgb.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 2, RGB)""" - -from . import _maker - -dataset = _maker("fold_2_rgb") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_3.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_3.py deleted file mode 100644 index 39bee4fec99e81eecc22a365183283bcd2ec3d98..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_3.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 3)""" - -from . import _maker - -dataset = _maker("fold_3") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_3_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_3_rgb.py deleted file mode 100644 index 7b26e4257e61013843e3a62c3bc419003e23b645..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_3_rgb.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 3, RGB)""" - -from . import _maker - -dataset = _maker("fold_3_rgb") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_4.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_4.py deleted file mode 100644 index 5fb56292fd97636f452cde06c87bb34c89f01b1c..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_4.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 4)""" - -from . import _maker - -dataset = _maker("fold_4") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_4_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_4_rgb.py deleted file mode 100644 index fbc4f0cfd9edc602fbe5665aca0465b29c5183b5..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_4_rgb.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 4, RGB)""" - -from . import _maker - -dataset = _maker("fold_4_rgb") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_5.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_5.py deleted file mode 100644 index 679bb9b3cbbdede06cd87834239609720f439296..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_5.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 5)""" - -from . import _maker - -dataset = _maker("fold_5") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_5_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_5_rgb.py deleted file mode 100644 index 747d510ecd1c7bd2f32ab7b139a53603d5bbee88..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_5_rgb.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 5, RGB)""" - -from . import _maker - -dataset = _maker("fold_5_rgb") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_6.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_6.py deleted file mode 100644 index cd8e4cd571b8c796bad3221584870888c5186d3d..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_6.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 6)""" - -from . import _maker - -dataset = _maker("fold_6") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_6_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_6_rgb.py deleted file mode 100644 index 86f112c3aae0c1c1dd48002347f78ce565797d47..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_6_rgb.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 6, RGB)""" - -from . import _maker - -dataset = _maker("fold_6_rgb") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_7.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_7.py deleted file mode 100644 index 98241531d3e15720f07ef9174687c47db7d737f1..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_7.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 7)""" - -from . import _maker - -dataset = _maker("fold_7") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_7_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_7_rgb.py deleted file mode 100644 index 981fe19180e0d8d4e1b21653f52a92a567723a63..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_7_rgb.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 7, RGB)""" - -from . import _maker - -dataset = _maker("fold_7_rgb") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_8.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_8.py deleted file mode 100644 index dab1a234a3842ab450706d86060651d4383ddbfc..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_8.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 8)""" - -from . import _maker - -dataset = _maker("fold_8") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_8_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_8_rgb.py deleted file mode 100644 index 798b8de64761ef0d87f491ef08b43426f55898f2..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_8_rgb.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 8, RGB)""" - -from . import _maker - -dataset = _maker("fold_8_rgb") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_9.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_9.py deleted file mode 100644 index 097724b9446c4c2f0bef8ee6f838c1c11ff627a5..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_9.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 9)""" - -from . import _maker - -dataset = _maker("fold_9") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_9_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_9_rgb.py deleted file mode 100644 index c6c564a40b957b562a37bb30b5809f7cf680e896..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_9_rgb.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (cross validation fold 9, RGB)""" - -from . import _maker - -dataset = _maker("fold_9_rgb") diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/rgb.py deleted file mode 100644 index f47796a89c31a5a31c0f972d81b5d97c7f8742b4..0000000000000000000000000000000000000000 --- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/rgb.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default -TBX11K-simplified datasets (RGB)""" - -from . import _maker - -dataset = _maker("rgb") diff --git a/src/ptbench/data/mc_ch_in_11kv2/__init__.py b/src/ptbench/data/mc_ch_in_11kv2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..662d5c1326651b4d9f48d47bc4b503df23d17216 --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/__init__.py @@ -0,0 +1,3 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later diff --git a/src/ptbench/data/mc_ch_in_11kv2/default.py b/src/ptbench/data/mc_ch_in_11kv2/default.py new file mode 100644 index 0000000000000000000000000000000000000000..1eea460b5503e79c755d52d2e5f08fd28d7ed3f7 --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/default.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets.""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.default import datamodule as indian_datamodule +from ..montgomery.default import datamodule as mc_datamodule +from ..shenzhen.default import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.default import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_0.py b/src/ptbench/data/mc_ch_in_11kv2/fold_0.py new file mode 100644 index 0000000000000000000000000000000000000000..738164a78835fa7bed8add6a9e5e0fe101abad52 --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_0.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 0)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_0 import datamodule as indian_datamodule +from ..montgomery.fold_0 import datamodule as mc_datamodule +from ..shenzhen.fold_0 import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_0 import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_0_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_0_rgb.py new file mode 100644 index 0000000000000000000000000000000000000000..c852a9a92ee5faf6a2accf83c3faa4169e829447 --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_0_rgb.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 0, RGB)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_0_rgb import datamodule as indian_datamodule +from ..montgomery.fold_0_rgb import datamodule as mc_datamodule +from ..shenzhen.fold_0_rgb import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_0_rgb import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_1.py b/src/ptbench/data/mc_ch_in_11kv2/fold_1.py new file mode 100644 index 0000000000000000000000000000000000000000..bfc2dbfc53a33325549f4cb41513201a005bb1f9 --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_1.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 1)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_1 import datamodule as indian_datamodule +from ..montgomery.fold_1 import datamodule as mc_datamodule +from ..shenzhen.fold_1 import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_1 import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_1_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_1_rgb.py new file mode 100644 index 0000000000000000000000000000000000000000..8f0c9174ffb8a7307fed77996fe0bbad655ad34c --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_1_rgb.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 1, RGB)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_1_rgb import datamodule as indian_datamodule +from ..montgomery.fold_1_rgb import datamodule as mc_datamodule +from ..shenzhen.fold_1_rgb import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_1_rgb import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_2.py b/src/ptbench/data/mc_ch_in_11kv2/fold_2.py new file mode 100644 index 0000000000000000000000000000000000000000..06c7c1ab9b9ca28a6cdbf5e95108a669349a32a6 --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_2.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 2)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_2 import datamodule as indian_datamodule +from ..montgomery.fold_2 import datamodule as mc_datamodule +from ..shenzhen.fold_2 import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_2 import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_2_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_2_rgb.py new file mode 100644 index 0000000000000000000000000000000000000000..5ecaa2a7e5a8ae7972a06eb1fa68c39cb575bba3 --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_2_rgb.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 2, RGB)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_2_rgb import datamodule as indian_datamodule +from ..montgomery.fold_2_rgb import datamodule as mc_datamodule +from ..shenzhen.fold_2_rgb import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_2_rgb import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_3.py b/src/ptbench/data/mc_ch_in_11kv2/fold_3.py new file mode 100644 index 0000000000000000000000000000000000000000..11309b1c94eb182026882f934835ed9d62917b27 --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_3.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 3)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_3 import datamodule as indian_datamodule +from ..montgomery.fold_3 import datamodule as mc_datamodule +from ..shenzhen.fold_3 import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_3 import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_3_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_3_rgb.py new file mode 100644 index 0000000000000000000000000000000000000000..267d128e5dee412b933b350bde8ff8d60e6f771e --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_3_rgb.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 3, RGB)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_3_rgb import datamodule as indian_datamodule +from ..montgomery.fold_3_rgb import datamodule as mc_datamodule +from ..shenzhen.fold_3_rgb import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_3_rgb import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_4.py b/src/ptbench/data/mc_ch_in_11kv2/fold_4.py new file mode 100644 index 0000000000000000000000000000000000000000..0f53ed635e2690776ca704bc50f812cc09c33256 --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_4.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 4)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_4 import datamodule as indian_datamodule +from ..montgomery.fold_4 import datamodule as mc_datamodule +from ..shenzhen.fold_4 import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_4 import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_4_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_4_rgb.py new file mode 100644 index 0000000000000000000000000000000000000000..9bdc8c23043961b780f8b53d4349ce9cd4c7e3fd --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_4_rgb.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 4, RGB)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_4_rgb import datamodule as indian_datamodule +from ..montgomery.fold_4_rgb import datamodule as mc_datamodule +from ..shenzhen.fold_4_rgb import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_4_rgb import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_5.py b/src/ptbench/data/mc_ch_in_11kv2/fold_5.py new file mode 100644 index 0000000000000000000000000000000000000000..df7dc0e8fe8fd9a6ce122a20161afdb927aa62eb --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_5.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 5)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_5 import datamodule as indian_datamodule +from ..montgomery.fold_5 import datamodule as mc_datamodule +from ..shenzhen.fold_5 import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_5 import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_5_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_5_rgb.py new file mode 100644 index 0000000000000000000000000000000000000000..ce3ffd479d9264bd17188e89cfc8716f59caf25a --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_5_rgb.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 5, RGB)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_5_rgb import datamodule as indian_datamodule +from ..montgomery.fold_5_rgb import datamodule as mc_datamodule +from ..shenzhen.fold_5_rgb import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_5_rgb import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_6.py b/src/ptbench/data/mc_ch_in_11kv2/fold_6.py new file mode 100644 index 0000000000000000000000000000000000000000..ef246cd82afe20f65d1d214646155fe9416be479 --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_6.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 6)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_6 import datamodule as indian_datamodule +from ..montgomery.fold_6 import datamodule as mc_datamodule +from ..shenzhen.fold_6 import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_6 import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_6_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_6_rgb.py new file mode 100644 index 0000000000000000000000000000000000000000..bc892b7964c8565d70577b514be426be85073645 --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_6_rgb.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 6, RGB)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_6_rgb import datamodule as indian_datamodule +from ..montgomery.fold_6_rgb import datamodule as mc_datamodule +from ..shenzhen.fold_5_rgb import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_6_rgb import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_7.py b/src/ptbench/data/mc_ch_in_11kv2/fold_7.py new file mode 100644 index 0000000000000000000000000000000000000000..41d3ba3ebf059320e36195d85447fe57bf1dffbb --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_7.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 7)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_7 import datamodule as indian_datamodule +from ..montgomery.fold_7 import datamodule as mc_datamodule +from ..shenzhen.fold_7 import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_7 import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_7_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_7_rgb.py new file mode 100644 index 0000000000000000000000000000000000000000..0fe1f28289a5a09f5fd321de5d8cd65c57fef4e0 --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_7_rgb.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 7, RGB)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_7_rgb import datamodule as indian_datamodule +from ..montgomery.fold_7_rgb import datamodule as mc_datamodule +from ..shenzhen.fold_7_rgb import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_7_rgb import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_8.py b/src/ptbench/data/mc_ch_in_11kv2/fold_8.py new file mode 100644 index 0000000000000000000000000000000000000000..a6842d47c932586d6910e05df039921fc82c5843 --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_8.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 8)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_8 import datamodule as indian_datamodule +from ..montgomery.fold_8 import datamodule as mc_datamodule +from ..shenzhen.fold_8 import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_8 import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_8_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_8_rgb.py new file mode 100644 index 0000000000000000000000000000000000000000..a330b2e6d12847bc6406d87e5d48ad626f802c77 --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_8_rgb.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 8, RGB)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_8_rgb import datamodule as indian_datamodule +from ..montgomery.fold_8_rgb import datamodule as mc_datamodule +from ..shenzhen.fold_8_rgb import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_8_rgb import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_9.py b/src/ptbench/data/mc_ch_in_11kv2/fold_9.py new file mode 100644 index 0000000000000000000000000000000000000000..ba69b78892058b141a761e6cb4070683a0098fd8 --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_9.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 9)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_9 import datamodule as indian_datamodule +from ..montgomery.fold_9 import datamodule as mc_datamodule +from ..shenzhen.fold_9 import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_9 import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_9_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_9_rgb.py new file mode 100644 index 0000000000000000000000000000000000000000..93296414df6d566de6a48556074b185c045cac08 --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/fold_9_rgb.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (cross validation fold 9, RGB)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.fold_9_rgb import datamodule as indian_datamodule +from ..montgomery.fold_9_rgb import datamodule as mc_datamodule +from ..shenzhen.fold_9_rgb import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.fold_9_rgb import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule diff --git a/src/ptbench/data/mc_ch_in_11kv2/rgb.py b/src/ptbench/data/mc_ch_in_11kv2/rgb.py new file mode 100644 index 0000000000000000000000000000000000000000..c99b8fe902afb572dd3dae0dd2766b8fd9d961cb --- /dev/null +++ b/src/ptbench/data/mc_ch_in_11kv2/rgb.py @@ -0,0 +1,101 @@ +# Copyright © 2022 Idiap Research Institute <contact@idiap.ch> +# +# SPDX-License-Identifier: GPL-3.0-or-later + +"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default +TBX11K-simplified datasets (RGB)""" + +from clapper.logging import setup +from torch.utils.data.dataset import ConcatDataset + +from .. import return_subsets +from ..base_datamodule import BaseDataModule, get_dataset_from_module +from ..indian.rgb import datamodule as indian_datamodule +from ..montgomery.rgb import datamodule as mc_datamodule +from ..shenzhen.rgb import datamodule as ch_datamodule +from ..tbx11k_simplified_v2.rgb import datamodule as tbx11kv2_datamodule + +logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") + + +class DefaultModule(BaseDataModule): + def __init__( + self, + train_batch_size=1, + predict_batch_size=1, + drop_incomplete_batch=False, + multiproc_kwargs=None, + ): + self.train_batch_size = train_batch_size + self.predict_batch_size = predict_batch_size + self.drop_incomplete_batch = drop_incomplete_batch + self.multiproc_kwargs = multiproc_kwargs + + super().__init__( + train_batch_size=train_batch_size, + predict_batch_size=predict_batch_size, + drop_incomplete_batch=drop_incomplete_batch, + multiproc_kwargs=multiproc_kwargs, + ) + + def setup(self, stage: str): + # Instantiate other datamodules and get their datasets + + module_args = { + "train_batch_size": self.train_batch_size, + "predict_batch_size": self.predict_batch_size, + "drop_incomplete_batch": self.drop_incomplete_batch, + "multiproc_kwargs": self.multiproc_kwargs, + } + + mc = get_dataset_from_module(mc_datamodule, stage, **module_args) + ch = get_dataset_from_module(ch_datamodule, stage, **module_args) + indian = get_dataset_from_module( + indian_datamodule, stage, **module_args + ) + tbx11kv2 = get_dataset_from_module( + tbx11kv2_datamodule, stage, **module_args + ) + + # Combine datasets + self.dataset = {} + self.dataset["__train__"] = ConcatDataset( + [ + mc["__train__"], + ch["__train__"], + indian["__train__"], + tbx11kv2["__train__"], + ] + ) + self.dataset["train"] = ConcatDataset( + [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]] + ) + self.dataset["__valid__"] = ConcatDataset( + [ + mc["__valid__"], + ch["__valid__"], + indian["__valid__"], + tbx11kv2["__valid__"], + ] + ) + self.dataset["validation"] = ConcatDataset( + [ + mc["validation"], + ch["validation"], + indian["validation"], + tbx11kv2["validation"], + ] + ) + self.dataset["test"] = ConcatDataset( + [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]] + ) + + ( + self.train_dataset, + self.validation_dataset, + self.extra_validation_datasets, + self.predict_dataset, + ) = return_subsets(self.dataset) + + +datamodule = DefaultModule