diff --git a/pyproject.toml b/pyproject.toml
index d5663572ad8ccf2b84b3514e80e617c4686c2902..3e8f41d99dadb706ba68c6f227a82dc2d8ad89c3 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -329,28 +329,28 @@ mc_ch_in_rs_f7 = "ptbench.configs.datasets.mc_ch_in_RS.fold_7"
 mc_ch_in_rs_f8 = "ptbench.configs.datasets.mc_ch_in_RS.fold_8"
 mc_ch_in_rs_f9 = "ptbench.configs.datasets.mc_ch_in_RS.fold_9"
 # montgomery-shenzhen-indian-tbx11k aggregated dataset
-mc_ch_in_11k = "ptbench.configs.datasets.mc_ch_in_11k.default"
-mc_ch_in_11k_rgb = "ptbench.configs.datasets.mc_ch_in_11k.rgb"
-mc_ch_in_11k_f0 = "ptbench.configs.datasets.mc_ch_in_11k.fold_0"
-mc_ch_in_11k_f1 = "ptbench.configs.datasets.mc_ch_in_11k.fold_1"
-mc_ch_in_11k_f2 = "ptbench.configs.datasets.mc_ch_in_11k.fold_2"
-mc_ch_in_11k_f3 = "ptbench.configs.datasets.mc_ch_in_11k.fold_3"
-mc_ch_in_11k_f4 = "ptbench.configs.datasets.mc_ch_in_11k.fold_4"
-mc_ch_in_11k_f5 = "ptbench.configs.datasets.mc_ch_in_11k.fold_5"
-mc_ch_in_11k_f6 = "ptbench.configs.datasets.mc_ch_in_11k.fold_6"
-mc_ch_in_11k_f7 = "ptbench.configs.datasets.mc_ch_in_11k.fold_7"
-mc_ch_in_11k_f8 = "ptbench.configs.datasets.mc_ch_in_11k.fold_8"
-mc_ch_in_11k_f9 = "ptbench.configs.datasets.mc_ch_in_11k.fold_9"
-mc_ch_in_11k_f0_rgb = "ptbench.configs.datasets.mc_ch_in_11k.fold_0_rgb"
-mc_ch_in_11k_f1_rgb = "ptbench.configs.datasets.mc_ch_in_11k.fold_1_rgb"
-mc_ch_in_11k_f2_rgb = "ptbench.configs.datasets.mc_ch_in_11k.fold_2_rgb"
-mc_ch_in_11k_f3_rgb = "ptbench.configs.datasets.mc_ch_in_11k.fold_3_rgb"
-mc_ch_in_11k_f4_rgb = "ptbench.configs.datasets.mc_ch_in_11k.fold_4_rgb"
-mc_ch_in_11k_f5_rgb = "ptbench.configs.datasets.mc_ch_in_11k.fold_5_rgb"
-mc_ch_in_11k_f6_rgb = "ptbench.configs.datasets.mc_ch_in_11k.fold_6_rgb"
-mc_ch_in_11k_f7_rgb = "ptbench.configs.datasets.mc_ch_in_11k.fold_7_rgb"
-mc_ch_in_11k_f8_rgb = "ptbench.configs.datasets.mc_ch_in_11k.fold_8_rgb"
-mc_ch_in_11k_f9_rgb = "ptbench.configs.datasets.mc_ch_in_11k.fold_9_rgb"
+mc_ch_in_11k = "ptbench.data.mc_ch_in_11k.default"
+mc_ch_in_11k_rgb = "ptbench.data.mc_ch_in_11k.rgb"
+mc_ch_in_11k_f0 = "ptbench.data.mc_ch_in_11k.fold_0"
+mc_ch_in_11k_f1 = "ptbench.data.mc_ch_in_11k.fold_1"
+mc_ch_in_11k_f2 = "ptbench.data.mc_ch_in_11k.fold_2"
+mc_ch_in_11k_f3 = "ptbench.data.mc_ch_in_11k.fold_3"
+mc_ch_in_11k_f4 = "ptbench.data.mc_ch_in_11k.fold_4"
+mc_ch_in_11k_f5 = "ptbench.data.mc_ch_in_11k.fold_5"
+mc_ch_in_11k_f6 = "ptbench.data.mc_ch_in_11k.fold_6"
+mc_ch_in_11k_f7 = "ptbench.data.mc_ch_in_11k.fold_7"
+mc_ch_in_11k_f8 = "ptbench.data.mc_ch_in_11k.fold_8"
+mc_ch_in_11k_f9 = "ptbench.data.mc_ch_in_11k.fold_9"
+mc_ch_in_11k_f0_rgb = "ptbench.data.mc_ch_in_11k.fold_0_rgb"
+mc_ch_in_11k_f1_rgb = "ptbench.data.mc_ch_in_11k.fold_1_rgb"
+mc_ch_in_11k_f2_rgb = "ptbench.data.mc_ch_in_11k.fold_2_rgb"
+mc_ch_in_11k_f3_rgb = "ptbench.data.mc_ch_in_11k.fold_3_rgb"
+mc_ch_in_11k_f4_rgb = "ptbench.data.mc_ch_in_11k.fold_4_rgb"
+mc_ch_in_11k_f5_rgb = "ptbench.data.mc_ch_in_11k.fold_5_rgb"
+mc_ch_in_11k_f6_rgb = "ptbench.data.mc_ch_in_11k.fold_6_rgb"
+mc_ch_in_11k_f7_rgb = "ptbench.data.mc_ch_in_11k.fold_7_rgb"
+mc_ch_in_11k_f8_rgb = "ptbench.data.mc_ch_in_11k.fold_8_rgb"
+mc_ch_in_11k_f9_rgb = "ptbench.data.mc_ch_in_11k.fold_9_rgb"
 # extended montgomery-shenzhen-indian-tbx11k aggregated dataset
 # (with radiological signs)
 mc_ch_in_11k_rs = "ptbench.configs.datasets.mc_ch_in_11k_RS.default"
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/__init__.py b/src/ptbench/configs/datasets/mc_ch_in_11k/__init__.py
deleted file mode 100644
index e8970bd744280c9fb07f6f09d5b4b844a9f57993..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/__init__.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from torch.utils.data.dataset import ConcatDataset
-
-
-def _maker(protocol):
-    if protocol == "default":
-        from ..indian import default as indian
-        from ..montgomery import default as mc
-        from ..shenzhen import default as ch
-        from ..tbx11k_simplified import default as tbx11k
-    elif protocol == "rgb":
-        from ..indian import rgb as indian
-        from ..montgomery import rgb as mc
-        from ..shenzhen import rgb as ch
-        from ..tbx11k_simplified import rgb as tbx11k
-    elif protocol == "fold_0":
-        from ..indian import fold_0 as indian
-        from ..montgomery import fold_0 as mc
-        from ..shenzhen import fold_0 as ch
-        from ..tbx11k_simplified import fold_0 as tbx11k
-    elif protocol == "fold_1":
-        from ..indian import fold_1 as indian
-        from ..montgomery import fold_1 as mc
-        from ..shenzhen import fold_1 as ch
-        from ..tbx11k_simplified import fold_1 as tbx11k
-    elif protocol == "fold_2":
-        from ..indian import fold_2 as indian
-        from ..montgomery import fold_2 as mc
-        from ..shenzhen import fold_2 as ch
-        from ..tbx11k_simplified import fold_2 as tbx11k
-    elif protocol == "fold_3":
-        from ..indian import fold_3 as indian
-        from ..montgomery import fold_3 as mc
-        from ..shenzhen import fold_3 as ch
-        from ..tbx11k_simplified import fold_3 as tbx11k
-    elif protocol == "fold_4":
-        from ..indian import fold_4 as indian
-        from ..montgomery import fold_4 as mc
-        from ..shenzhen import fold_4 as ch
-        from ..tbx11k_simplified import fold_4 as tbx11k
-    elif protocol == "fold_5":
-        from ..indian import fold_5 as indian
-        from ..montgomery import fold_5 as mc
-        from ..shenzhen import fold_5 as ch
-        from ..tbx11k_simplified import fold_5 as tbx11k
-    elif protocol == "fold_6":
-        from ..indian import fold_6 as indian
-        from ..montgomery import fold_6 as mc
-        from ..shenzhen import fold_6 as ch
-        from ..tbx11k_simplified import fold_6 as tbx11k
-    elif protocol == "fold_7":
-        from ..indian import fold_7 as indian
-        from ..montgomery import fold_7 as mc
-        from ..shenzhen import fold_7 as ch
-        from ..tbx11k_simplified import fold_7 as tbx11k
-    elif protocol == "fold_8":
-        from ..indian import fold_8 as indian
-        from ..montgomery import fold_8 as mc
-        from ..shenzhen import fold_8 as ch
-        from ..tbx11k_simplified import fold_8 as tbx11k
-    elif protocol == "fold_9":
-        from ..indian import fold_9 as indian
-        from ..montgomery import fold_9 as mc
-        from ..shenzhen import fold_9 as ch
-        from ..tbx11k_simplified import fold_9 as tbx11k
-    elif protocol == "fold_0_rgb":
-        from ..indian import fold_0_rgb as indian
-        from ..montgomery import fold_0_rgb as mc
-        from ..shenzhen import fold_0_rgb as ch
-        from ..tbx11k_simplified import fold_0_rgb as tbx11k
-    elif protocol == "fold_1_rgb":
-        from ..indian import fold_1_rgb as indian
-        from ..montgomery import fold_1_rgb as mc
-        from ..shenzhen import fold_1_rgb as ch
-        from ..tbx11k_simplified import fold_1_rgb as tbx11k
-    elif protocol == "fold_2_rgb":
-        from ..indian import fold_2_rgb as indian
-        from ..montgomery import fold_2_rgb as mc
-        from ..shenzhen import fold_2_rgb as ch
-        from ..tbx11k_simplified import fold_2_rgb as tbx11k
-    elif protocol == "fold_3_rgb":
-        from ..indian import fold_3_rgb as indian
-        from ..montgomery import fold_3_rgb as mc
-        from ..shenzhen import fold_3_rgb as ch
-        from ..tbx11k_simplified import fold_3_rgb as tbx11k
-    elif protocol == "fold_4_rgb":
-        from ..indian import fold_4_rgb as indian
-        from ..montgomery import fold_4_rgb as mc
-        from ..shenzhen import fold_4_rgb as ch
-        from ..tbx11k_simplified import fold_4_rgb as tbx11k
-    elif protocol == "fold_5_rgb":
-        from ..indian import fold_5_rgb as indian
-        from ..montgomery import fold_5_rgb as mc
-        from ..shenzhen import fold_5_rgb as ch
-        from ..tbx11k_simplified import fold_5_rgb as tbx11k
-    elif protocol == "fold_6_rgb":
-        from ..indian import fold_6_rgb as indian
-        from ..montgomery import fold_6_rgb as mc
-        from ..shenzhen import fold_6_rgb as ch
-        from ..tbx11k_simplified import fold_6_rgb as tbx11k
-    elif protocol == "fold_7_rgb":
-        from ..indian import fold_7_rgb as indian
-        from ..montgomery import fold_7_rgb as mc
-        from ..shenzhen import fold_7_rgb as ch
-        from ..tbx11k_simplified import fold_7_rgb as tbx11k
-    elif protocol == "fold_8_rgb":
-        from ..indian import fold_8_rgb as indian
-        from ..montgomery import fold_8_rgb as mc
-        from ..shenzhen import fold_8_rgb as ch
-        from ..tbx11k_simplified import fold_8_rgb as tbx11k
-    elif protocol == "fold_9_rgb":
-        from ..indian import fold_9_rgb as indian
-        from ..montgomery import fold_9_rgb as mc
-        from ..shenzhen import fold_9_rgb as ch
-        from ..tbx11k_simplified import fold_9_rgb as tbx11k
-
-    mc = mc.dataset
-    ch = ch.dataset
-    indian = indian.dataset
-    tbx11k = tbx11k.dataset
-
-    dataset = {}
-    dataset["__train__"] = ConcatDataset(
-        [
-            mc["__train__"],
-            ch["__train__"],
-            indian["__train__"],
-            tbx11k["__train__"],
-        ]
-    )
-    dataset["train"] = ConcatDataset(
-        [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
-    )
-    dataset["__valid__"] = ConcatDataset(
-        [
-            mc["__valid__"],
-            ch["__valid__"],
-            indian["__valid__"],
-            tbx11k["__valid__"],
-        ]
-    )
-    dataset["validation"] = ConcatDataset(
-        [
-            mc["validation"],
-            ch["validation"],
-            indian["validation"],
-            tbx11k["validation"],
-        ]
-    )
-    dataset["test"] = ConcatDataset(
-        [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
-    )
-
-    return dataset
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/default.py b/src/ptbench/configs/datasets/mc_ch_in_11k/default.py
deleted file mode 100644
index 7d4f16bda48b05e7e9302ffc9c689d8393b3e495..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/default.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets."""
-
-from . import _maker
-
-dataset = _maker("default")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_0.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_0.py
deleted file mode 100644
index 757a0eb98214ba020d76095363d424b9209540e7..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_0.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 0)"""
-
-from . import _maker
-
-dataset = _maker("fold_0")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_0_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_0_rgb.py
deleted file mode 100644
index 48e05ff3f71f13976190d04cfaf59c5c36996bac..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_0_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 0, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_0_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_1.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_1.py
deleted file mode 100644
index 5657958934b926879bd26503c9b383e775bc724d..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_1.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 1)"""
-
-from . import _maker
-
-dataset = _maker("fold_1")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_1_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_1_rgb.py
deleted file mode 100644
index c782d68de247c876ddd6826100cbb7908342b928..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_1_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 1, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_1_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_2.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_2.py
deleted file mode 100644
index 10a597bcb8e0485db63f0d7500b15b3e78877066..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_2.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 2)"""
-
-from . import _maker
-
-dataset = _maker("fold_2")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_2_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_2_rgb.py
deleted file mode 100644
index d624f3af53abcf053c7bf17a9822a86cb53e2923..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_2_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 2, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_2_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_3.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_3.py
deleted file mode 100644
index 39bee4fec99e81eecc22a365183283bcd2ec3d98..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_3.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 3)"""
-
-from . import _maker
-
-dataset = _maker("fold_3")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_3_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_3_rgb.py
deleted file mode 100644
index 7b26e4257e61013843e3a62c3bc419003e23b645..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_3_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 3, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_3_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_4.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_4.py
deleted file mode 100644
index 5fb56292fd97636f452cde06c87bb34c89f01b1c..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_4.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 4)"""
-
-from . import _maker
-
-dataset = _maker("fold_4")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_4_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_4_rgb.py
deleted file mode 100644
index fbc4f0cfd9edc602fbe5665aca0465b29c5183b5..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_4_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 4, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_4_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_5.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_5.py
deleted file mode 100644
index 679bb9b3cbbdede06cd87834239609720f439296..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_5.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 5)"""
-
-from . import _maker
-
-dataset = _maker("fold_5")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_5_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_5_rgb.py
deleted file mode 100644
index 747d510ecd1c7bd2f32ab7b139a53603d5bbee88..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_5_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 5, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_5_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_6.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_6.py
deleted file mode 100644
index cd8e4cd571b8c796bad3221584870888c5186d3d..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_6.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 6)"""
-
-from . import _maker
-
-dataset = _maker("fold_6")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_6_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_6_rgb.py
deleted file mode 100644
index 86f112c3aae0c1c1dd48002347f78ce565797d47..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_6_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 6, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_6_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_7.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_7.py
deleted file mode 100644
index 98241531d3e15720f07ef9174687c47db7d737f1..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_7.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 7)"""
-
-from . import _maker
-
-dataset = _maker("fold_7")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_7_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_7_rgb.py
deleted file mode 100644
index 981fe19180e0d8d4e1b21653f52a92a567723a63..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_7_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 7, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_7_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_8.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_8.py
deleted file mode 100644
index dab1a234a3842ab450706d86060651d4383ddbfc..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_8.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 8)"""
-
-from . import _maker
-
-dataset = _maker("fold_8")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_8_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_8_rgb.py
deleted file mode 100644
index 798b8de64761ef0d87f491ef08b43426f55898f2..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_8_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 8, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_8_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_9.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_9.py
deleted file mode 100644
index 097724b9446c4c2f0bef8ee6f838c1c11ff627a5..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_9.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 9)"""
-
-from . import _maker
-
-dataset = _maker("fold_9")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_9_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11k/fold_9_rgb.py
deleted file mode 100644
index c6c564a40b957b562a37bb30b5809f7cf680e896..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/fold_9_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 9, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_9_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11k/rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11k/rgb.py
deleted file mode 100644
index f47796a89c31a5a31c0f972d81b5d97c7f8742b4..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11k/rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (RGB)"""
-
-from . import _maker
-
-dataset = _maker("rgb")
diff --git a/src/ptbench/data/mc_ch_in_11k/__init__.py b/src/ptbench/data/mc_ch_in_11k/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..662d5c1326651b4d9f48d47bc4b503df23d17216
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/__init__.py
@@ -0,0 +1,3 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/src/ptbench/data/mc_ch_in_11k/default.py b/src/ptbench/data/mc_ch_in_11k/default.py
new file mode 100644
index 0000000000000000000000000000000000000000..454521a7ebaaf1072fa5f2ffaaacb4775ff9a9ea
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/default.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.default import datamodule as indian_datamodule
+from ..montgomery.default import datamodule as mc_datamodule
+from ..shenzhen.default import datamodule as ch_datamodule
+from ..tbx11k_simplified.default import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_0.py b/src/ptbench/data/mc_ch_in_11k/fold_0.py
new file mode 100644
index 0000000000000000000000000000000000000000..1adce163d7a4f734924a7a237666d75a116c3039
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_0.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 0)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_0 import datamodule as indian_datamodule
+from ..montgomery.fold_0 import datamodule as mc_datamodule
+from ..shenzhen.fold_0 import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_0 import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_0_rgb.py b/src/ptbench/data/mc_ch_in_11k/fold_0_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a03d33958cb790d62e4ac83eb13844be3ca25bf
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_0_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 0, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_0_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_0_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_0_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_0_rgb import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_1.py b/src/ptbench/data/mc_ch_in_11k/fold_1.py
new file mode 100644
index 0000000000000000000000000000000000000000..584a65c4b1a6579f666784a16d83f2c69d1ab079
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_1.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 1)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_1 import datamodule as indian_datamodule
+from ..montgomery.fold_1 import datamodule as mc_datamodule
+from ..shenzhen.fold_1 import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_1 import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_1_rgb.py b/src/ptbench/data/mc_ch_in_11k/fold_1_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..32a94a5d68567ab4361686ac11118330e0b911b0
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_1_rgb.py
@@ -0,0 +1,81 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_1_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_1_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_1_rgb import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"], indian["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"], indian["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"], indian["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_2.py b/src/ptbench/data/mc_ch_in_11k/fold_2.py
new file mode 100644
index 0000000000000000000000000000000000000000..05c0234e93c54bd0b3706c2e526895d884ba5386
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_2.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 2)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_2 import datamodule as indian_datamodule
+from ..montgomery.fold_2 import datamodule as mc_datamodule
+from ..shenzhen.fold_2 import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_2 import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_2_rgb.py b/src/ptbench/data/mc_ch_in_11k/fold_2_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4bd35f00113478bcb61cc566af0aa8eb86c0027
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_2_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 2, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_2_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_2_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_2_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_2_rgb import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_3.py b/src/ptbench/data/mc_ch_in_11k/fold_3.py
new file mode 100644
index 0000000000000000000000000000000000000000..80a544be0507f15055e5eccf5d000e514b069352
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_3.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 3)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_3 import datamodule as indian_datamodule
+from ..montgomery.fold_3 import datamodule as mc_datamodule
+from ..shenzhen.fold_3 import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_3 import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_3_rgb.py b/src/ptbench/data/mc_ch_in_11k/fold_3_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..28cce948da793f97b3fa654cf41df72eee7d6392
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_3_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 3, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_3_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_3_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_3_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_3_rgb import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_4.py b/src/ptbench/data/mc_ch_in_11k/fold_4.py
new file mode 100644
index 0000000000000000000000000000000000000000..5860e1ddbb886116f2d22fece689bc587651c6fa
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_4.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 4)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_4 import datamodule as indian_datamodule
+from ..montgomery.fold_4 import datamodule as mc_datamodule
+from ..shenzhen.fold_4 import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_4 import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_4_rgb.py b/src/ptbench/data/mc_ch_in_11k/fold_4_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbd6065ee32d93774ea56a538a3bfa160e23b049
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_4_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 4, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_4_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_4_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_4_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_4_rgb import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_5.py b/src/ptbench/data/mc_ch_in_11k/fold_5.py
new file mode 100644
index 0000000000000000000000000000000000000000..93fac65bb9b2f261f7d8abffea32d91b81711555
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_5.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 5)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_5 import datamodule as indian_datamodule
+from ..montgomery.fold_5 import datamodule as mc_datamodule
+from ..shenzhen.fold_5 import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_5 import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_5_rgb.py b/src/ptbench/data/mc_ch_in_11k/fold_5_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..6cf1a1fe4b6926d6848f3582fc5681e6057e3948
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_5_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 5, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_5_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_5_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_5_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_5_rgb import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_6.py b/src/ptbench/data/mc_ch_in_11k/fold_6.py
new file mode 100644
index 0000000000000000000000000000000000000000..44a79a42d619b26907bb44c85339d4aa5cec9bf6
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_6.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 6)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_6 import datamodule as indian_datamodule
+from ..montgomery.fold_6 import datamodule as mc_datamodule
+from ..shenzhen.fold_6 import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_6 import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_6_rgb.py b/src/ptbench/data/mc_ch_in_11k/fold_6_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..de6abc58b0a4871627ed2973f0b999a0d620d2f9
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_6_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 6, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_6_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_6_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_5_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_6_rgb import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_7.py b/src/ptbench/data/mc_ch_in_11k/fold_7.py
new file mode 100644
index 0000000000000000000000000000000000000000..d955a02e610af58d58a9380c11a60eddfcae7571
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_7.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 7)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_7 import datamodule as indian_datamodule
+from ..montgomery.fold_7 import datamodule as mc_datamodule
+from ..shenzhen.fold_7 import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_7 import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_7_rgb.py b/src/ptbench/data/mc_ch_in_11k/fold_7_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..f163fe7ffaa1a82014b2fedb03dd89a626e541b9
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_7_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 7, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_7_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_7_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_7_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_7_rgb import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_8.py b/src/ptbench/data/mc_ch_in_11k/fold_8.py
new file mode 100644
index 0000000000000000000000000000000000000000..77753c1114c47335982bfb0904922d5d55394c8a
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_8.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 8)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_8 import datamodule as indian_datamodule
+from ..montgomery.fold_8 import datamodule as mc_datamodule
+from ..shenzhen.fold_8 import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_8 import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_8_rgb.py b/src/ptbench/data/mc_ch_in_11k/fold_8_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..503854beeac9e900c67b1f9bb230032832d25cf1
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_8_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 8, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_8_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_8_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_8_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_8_rgb import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_9.py b/src/ptbench/data/mc_ch_in_11k/fold_9.py
new file mode 100644
index 0000000000000000000000000000000000000000..45e88b2780acaf30fbdd524f9df6c0736924e4d7
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_9.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 9)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_9 import datamodule as indian_datamodule
+from ..montgomery.fold_9 import datamodule as mc_datamodule
+from ..shenzhen.fold_9 import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_9 import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/fold_9_rgb.py b/src/ptbench/data/mc_ch_in_11k/fold_9_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..39bcadec8792861461d8a8f65908a14ba793c6ee
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/fold_9_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 9, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_9_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_9_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_9_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified.fold_9_rgb import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11k/rgb.py b/src/ptbench/data/mc_ch_in_11k/rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8482e9802b73bcd175ac7fc55a40a561055d8f0
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11k/rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.rgb import datamodule as indian_datamodule
+from ..montgomery.rgb import datamodule as mc_datamodule
+from ..shenzhen.rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified.rgb import datamodule as tbx11k_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11k = get_dataset_from_module(
+            tbx11k_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11k["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11k["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11k["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11k["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11k["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule