From eeadd6b6a66a54a82b1fd785a00df18637bd801c Mon Sep 17 00:00:00 2001
From: dcarron <daniel.carron@idiap.ch>
Date: Tue, 6 Jun 2023 09:57:34 +0200
Subject: [PATCH] Moved mc_ch_in_11kv2 configs to data

---
 pyproject.toml                                |  44 ++---
 .../datasets/mc_ch_in_11kv2/__init__.py       | 157 ------------------
 .../datasets/mc_ch_in_11kv2/default.py        |  10 --
 .../configs/datasets/mc_ch_in_11kv2/fold_0.py |  10 --
 .../datasets/mc_ch_in_11kv2/fold_0_rgb.py     |  10 --
 .../configs/datasets/mc_ch_in_11kv2/fold_1.py |  10 --
 .../datasets/mc_ch_in_11kv2/fold_1_rgb.py     |  10 --
 .../configs/datasets/mc_ch_in_11kv2/fold_2.py |  10 --
 .../datasets/mc_ch_in_11kv2/fold_2_rgb.py     |  10 --
 .../configs/datasets/mc_ch_in_11kv2/fold_3.py |  10 --
 .../datasets/mc_ch_in_11kv2/fold_3_rgb.py     |  10 --
 .../configs/datasets/mc_ch_in_11kv2/fold_4.py |  10 --
 .../datasets/mc_ch_in_11kv2/fold_4_rgb.py     |  10 --
 .../configs/datasets/mc_ch_in_11kv2/fold_5.py |  10 --
 .../datasets/mc_ch_in_11kv2/fold_5_rgb.py     |  10 --
 .../configs/datasets/mc_ch_in_11kv2/fold_6.py |  10 --
 .../datasets/mc_ch_in_11kv2/fold_6_rgb.py     |  10 --
 .../configs/datasets/mc_ch_in_11kv2/fold_7.py |  10 --
 .../datasets/mc_ch_in_11kv2/fold_7_rgb.py     |  10 --
 .../configs/datasets/mc_ch_in_11kv2/fold_8.py |  10 --
 .../datasets/mc_ch_in_11kv2/fold_8_rgb.py     |  10 --
 .../configs/datasets/mc_ch_in_11kv2/fold_9.py |  10 --
 .../datasets/mc_ch_in_11kv2/fold_9_rgb.py     |  10 --
 .../configs/datasets/mc_ch_in_11kv2/rgb.py    |  10 --
 src/ptbench/data/mc_ch_in_11kv2/__init__.py   |   3 +
 src/ptbench/data/mc_ch_in_11kv2/default.py    | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_0.py     | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_0_rgb.py | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_1.py     | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_1_rgb.py | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_2.py     | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_2_rgb.py | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_3.py     | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_3_rgb.py | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_4.py     | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_4_rgb.py | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_5.py     | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_5_rgb.py | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_6.py     | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_6_rgb.py | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_7.py     | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_7_rgb.py | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_8.py     | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_8_rgb.py | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_9.py     | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/fold_9_rgb.py | 101 +++++++++++
 src/ptbench/data/mc_ch_in_11kv2/rgb.py        | 101 +++++++++++
 47 files changed, 2247 insertions(+), 399 deletions(-)
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/__init__.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/default.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_0.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_0_rgb.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_1.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_1_rgb.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_2.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_2_rgb.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_3.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_3_rgb.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_4.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_4_rgb.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_5.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_5_rgb.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_6.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_6_rgb.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_7.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_7_rgb.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_8.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_8_rgb.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_9.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_9_rgb.py
 delete mode 100644 src/ptbench/configs/datasets/mc_ch_in_11kv2/rgb.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/__init__.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/default.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_0.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_0_rgb.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_1.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_1_rgb.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_2.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_2_rgb.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_3.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_3_rgb.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_4.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_4_rgb.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_5.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_5_rgb.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_6.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_6_rgb.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_7.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_7_rgb.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_8.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_8_rgb.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_9.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/fold_9_rgb.py
 create mode 100644 src/ptbench/data/mc_ch_in_11kv2/rgb.py

diff --git a/pyproject.toml b/pyproject.toml
index 7c9f1948..e6208923 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -365,28 +365,28 @@ mc_ch_in_11k_rs_f7 = "ptbench.configs.datasets.mc_ch_in_11k_RS.fold_7"
 mc_ch_in_11k_rs_f8 = "ptbench.configs.datasets.mc_ch_in_11k_RS.fold_8"
 mc_ch_in_11k_rs_f9 = "ptbench.configs.datasets.mc_ch_in_11k_RS.fold_9"
 # montgomery-shenzhen-indian-tbx11kv2 aggregated dataset
-mc_ch_in_11kv2 = "ptbench.configs.datasets.mc_ch_in_11kv2.default"
-mc_ch_in_11kv2_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.rgb"
-mc_ch_in_11kv2_f0 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_0"
-mc_ch_in_11kv2_f1 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_1"
-mc_ch_in_11kv2_f2 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_2"
-mc_ch_in_11kv2_f3 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_3"
-mc_ch_in_11kv2_f4 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_4"
-mc_ch_in_11kv2_f5 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_5"
-mc_ch_in_11kv2_f6 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_6"
-mc_ch_in_11kv2_f7 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_7"
-mc_ch_in_11kv2_f8 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_8"
-mc_ch_in_11kv2_f9 = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_9"
-mc_ch_in_11kv2_f0_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_0_rgb"
-mc_ch_in_11kv2_f1_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_1_rgb"
-mc_ch_in_11kv2_f2_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_2_rgb"
-mc_ch_in_11kv2_f3_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_3_rgb"
-mc_ch_in_11kv2_f4_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_4_rgb"
-mc_ch_in_11kv2_f5_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_5_rgb"
-mc_ch_in_11kv2_f6_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_6_rgb"
-mc_ch_in_11kv2_f7_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_7_rgb"
-mc_ch_in_11kv2_f8_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_8_rgb"
-mc_ch_in_11kv2_f9_rgb = "ptbench.configs.datasets.mc_ch_in_11kv2.fold_9_rgb"
+mc_ch_in_11kv2 = "ptbench.data.mc_ch_in_11kv2.default"
+mc_ch_in_11kv2_rgb = "ptbench.data.mc_ch_in_11kv2.rgb"
+mc_ch_in_11kv2_f0 = "ptbench.data.mc_ch_in_11kv2.fold_0"
+mc_ch_in_11kv2_f1 = "ptbench.data.mc_ch_in_11kv2.fold_1"
+mc_ch_in_11kv2_f2 = "ptbench.data.mc_ch_in_11kv2.fold_2"
+mc_ch_in_11kv2_f3 = "ptbench.data.mc_ch_in_11kv2.fold_3"
+mc_ch_in_11kv2_f4 = "ptbench.data.mc_ch_in_11kv2.fold_4"
+mc_ch_in_11kv2_f5 = "ptbench.data.mc_ch_in_11kv2.fold_5"
+mc_ch_in_11kv2_f6 = "ptbench.data.mc_ch_in_11kv2.fold_6"
+mc_ch_in_11kv2_f7 = "ptbench.data.mc_ch_in_11kv2.fold_7"
+mc_ch_in_11kv2_f8 = "ptbench.data.mc_ch_in_11kv2.fold_8"
+mc_ch_in_11kv2_f9 = "ptbench.data.mc_ch_in_11kv2.fold_9"
+mc_ch_in_11kv2_f0_rgb = "ptbench.data.mc_ch_in_11kv2.fold_0_rgb"
+mc_ch_in_11kv2_f1_rgb = "ptbench.data.mc_ch_in_11kv2.fold_1_rgb"
+mc_ch_in_11kv2_f2_rgb = "ptbench.data.mc_ch_in_11kv2.fold_2_rgb"
+mc_ch_in_11kv2_f3_rgb = "ptbench.data.mc_ch_in_11kv2.fold_3_rgb"
+mc_ch_in_11kv2_f4_rgb = "ptbench.data.mc_ch_in_11kv2.fold_4_rgb"
+mc_ch_in_11kv2_f5_rgb = "ptbench.data.mc_ch_in_11kv2.fold_5_rgb"
+mc_ch_in_11kv2_f6_rgb = "ptbench.data.mc_ch_in_11kv2.fold_6_rgb"
+mc_ch_in_11kv2_f7_rgb = "ptbench.data.mc_ch_in_11kv2.fold_7_rgb"
+mc_ch_in_11kv2_f8_rgb = "ptbench.data.mc_ch_in_11kv2.fold_8_rgb"
+mc_ch_in_11kv2_f9_rgb = "ptbench.data.mc_ch_in_11kv2.fold_9_rgb"
 # extended montgomery-shenzhen-indian-tbx11kv2 aggregated dataset
 # (with radiological signs)
 mc_ch_in_11kv2_rs = "ptbench.configs.datasets.mc_ch_in_11kv2_RS.default"
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/__init__.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/__init__.py
deleted file mode 100644
index c36f7f60..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/__init__.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from torch.utils.data.dataset import ConcatDataset
-
-
-def _maker(protocol):
-    if protocol == "default":
-        from ..indian import default as indian
-        from ..montgomery import default as mc
-        from ..shenzhen import default as ch
-        from ..tbx11k_simplified_v2 import default as tbx11kv2
-    elif protocol == "rgb":
-        from ..indian import rgb as indian
-        from ..montgomery import rgb as mc
-        from ..shenzhen import rgb as ch
-        from ..tbx11k_simplified_v2 import rgb as tbx11kv2
-    elif protocol == "fold_0":
-        from ..indian import fold_0 as indian
-        from ..montgomery import fold_0 as mc
-        from ..shenzhen import fold_0 as ch
-        from ..tbx11k_simplified_v2 import fold_0 as tbx11kv2
-    elif protocol == "fold_1":
-        from ..indian import fold_1 as indian
-        from ..montgomery import fold_1 as mc
-        from ..shenzhen import fold_1 as ch
-        from ..tbx11k_simplified_v2 import fold_1 as tbx11kv2
-    elif protocol == "fold_2":
-        from ..indian import fold_2 as indian
-        from ..montgomery import fold_2 as mc
-        from ..shenzhen import fold_2 as ch
-        from ..tbx11k_simplified_v2 import fold_2 as tbx11kv2
-    elif protocol == "fold_3":
-        from ..indian import fold_3 as indian
-        from ..montgomery import fold_3 as mc
-        from ..shenzhen import fold_3 as ch
-        from ..tbx11k_simplified_v2 import fold_3 as tbx11kv2
-    elif protocol == "fold_4":
-        from ..indian import fold_4 as indian
-        from ..montgomery import fold_4 as mc
-        from ..shenzhen import fold_4 as ch
-        from ..tbx11k_simplified_v2 import fold_4 as tbx11kv2
-    elif protocol == "fold_5":
-        from ..indian import fold_5 as indian
-        from ..montgomery import fold_5 as mc
-        from ..shenzhen import fold_5 as ch
-        from ..tbx11k_simplified_v2 import fold_5 as tbx11kv2
-    elif protocol == "fold_6":
-        from ..indian import fold_6 as indian
-        from ..montgomery import fold_6 as mc
-        from ..shenzhen import fold_6 as ch
-        from ..tbx11k_simplified_v2 import fold_6 as tbx11kv2
-    elif protocol == "fold_7":
-        from ..indian import fold_7 as indian
-        from ..montgomery import fold_7 as mc
-        from ..shenzhen import fold_7 as ch
-        from ..tbx11k_simplified_v2 import fold_7 as tbx11kv2
-    elif protocol == "fold_8":
-        from ..indian import fold_8 as indian
-        from ..montgomery import fold_8 as mc
-        from ..shenzhen import fold_8 as ch
-        from ..tbx11k_simplified_v2 import fold_8 as tbx11kv2
-    elif protocol == "fold_9":
-        from ..indian import fold_9 as indian
-        from ..montgomery import fold_9 as mc
-        from ..shenzhen import fold_9 as ch
-        from ..tbx11k_simplified_v2 import fold_9 as tbx11kv2
-    elif protocol == "fold_0_rgb":
-        from ..indian import fold_0_rgb as indian
-        from ..montgomery import fold_0_rgb as mc
-        from ..shenzhen import fold_0_rgb as ch
-        from ..tbx11k_simplified_v2 import fold_0_rgb as tbx11kv2
-    elif protocol == "fold_1_rgb":
-        from ..indian import fold_1_rgb as indian
-        from ..montgomery import fold_1_rgb as mc
-        from ..shenzhen import fold_1_rgb as ch
-        from ..tbx11k_simplified_v2 import fold_1_rgb as tbx11kv2
-    elif protocol == "fold_2_rgb":
-        from ..indian import fold_2_rgb as indian
-        from ..montgomery import fold_2_rgb as mc
-        from ..shenzhen import fold_2_rgb as ch
-        from ..tbx11k_simplified_v2 import fold_2_rgb as tbx11kv2
-    elif protocol == "fold_3_rgb":
-        from ..indian import fold_3_rgb as indian
-        from ..montgomery import fold_3_rgb as mc
-        from ..shenzhen import fold_3_rgb as ch
-        from ..tbx11k_simplified_v2 import fold_3_rgb as tbx11kv2
-    elif protocol == "fold_4_rgb":
-        from ..indian import fold_4_rgb as indian
-        from ..montgomery import fold_4_rgb as mc
-        from ..shenzhen import fold_4_rgb as ch
-        from ..tbx11k_simplified_v2 import fold_4_rgb as tbx11kv2
-    elif protocol == "fold_5_rgb":
-        from ..indian import fold_5_rgb as indian
-        from ..montgomery import fold_5_rgb as mc
-        from ..shenzhen import fold_5_rgb as ch
-        from ..tbx11k_simplified_v2 import fold_5_rgb as tbx11kv2
-    elif protocol == "fold_6_rgb":
-        from ..indian import fold_6_rgb as indian
-        from ..montgomery import fold_6_rgb as mc
-        from ..shenzhen import fold_6_rgb as ch
-        from ..tbx11k_simplified_v2 import fold_6_rgb as tbx11kv2
-    elif protocol == "fold_7_rgb":
-        from ..indian import fold_7_rgb as indian
-        from ..montgomery import fold_7_rgb as mc
-        from ..shenzhen import fold_7_rgb as ch
-        from ..tbx11k_simplified_v2 import fold_7_rgb as tbx11kv2
-    elif protocol == "fold_8_rgb":
-        from ..indian import fold_8_rgb as indian
-        from ..montgomery import fold_8_rgb as mc
-        from ..shenzhen import fold_8_rgb as ch
-        from ..tbx11k_simplified_v2 import fold_8_rgb as tbx11kv2
-    elif protocol == "fold_9_rgb":
-        from ..indian import fold_9_rgb as indian
-        from ..montgomery import fold_9_rgb as mc
-        from ..shenzhen import fold_9_rgb as ch
-        from ..tbx11k_simplified_v2 import fold_9_rgb as tbx11kv2
-
-    mc = mc.dataset
-    ch = ch.dataset
-    indian = indian.dataset
-    tbx11kv2 = tbx11kv2.dataset
-
-    dataset = {}
-    dataset["__train__"] = ConcatDataset(
-        [
-            mc["__train__"],
-            ch["__train__"],
-            indian["__train__"],
-            tbx11kv2["__train__"],
-        ]
-    )
-    dataset["train"] = ConcatDataset(
-        [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
-    )
-    dataset["__valid__"] = ConcatDataset(
-        [
-            mc["__valid__"],
-            ch["__valid__"],
-            indian["__valid__"],
-            tbx11kv2["__valid__"],
-        ]
-    )
-    dataset["validation"] = ConcatDataset(
-        [
-            mc["validation"],
-            ch["validation"],
-            indian["validation"],
-            tbx11kv2["validation"],
-        ]
-    )
-    dataset["test"] = ConcatDataset(
-        [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
-    )
-
-    return dataset
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/default.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/default.py
deleted file mode 100644
index 7d4f16bd..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/default.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets."""
-
-from . import _maker
-
-dataset = _maker("default")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_0.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_0.py
deleted file mode 100644
index 757a0eb9..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_0.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 0)"""
-
-from . import _maker
-
-dataset = _maker("fold_0")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_0_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_0_rgb.py
deleted file mode 100644
index 48e05ff3..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_0_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 0, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_0_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_1.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_1.py
deleted file mode 100644
index 56579589..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_1.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 1)"""
-
-from . import _maker
-
-dataset = _maker("fold_1")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_1_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_1_rgb.py
deleted file mode 100644
index c782d68d..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_1_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 1, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_1_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_2.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_2.py
deleted file mode 100644
index 10a597bc..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_2.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 2)"""
-
-from . import _maker
-
-dataset = _maker("fold_2")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_2_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_2_rgb.py
deleted file mode 100644
index d624f3af..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_2_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 2, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_2_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_3.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_3.py
deleted file mode 100644
index 39bee4fe..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_3.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 3)"""
-
-from . import _maker
-
-dataset = _maker("fold_3")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_3_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_3_rgb.py
deleted file mode 100644
index 7b26e425..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_3_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 3, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_3_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_4.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_4.py
deleted file mode 100644
index 5fb56292..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_4.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 4)"""
-
-from . import _maker
-
-dataset = _maker("fold_4")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_4_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_4_rgb.py
deleted file mode 100644
index fbc4f0cf..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_4_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 4, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_4_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_5.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_5.py
deleted file mode 100644
index 679bb9b3..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_5.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 5)"""
-
-from . import _maker
-
-dataset = _maker("fold_5")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_5_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_5_rgb.py
deleted file mode 100644
index 747d510e..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_5_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 5, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_5_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_6.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_6.py
deleted file mode 100644
index cd8e4cd5..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_6.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 6)"""
-
-from . import _maker
-
-dataset = _maker("fold_6")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_6_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_6_rgb.py
deleted file mode 100644
index 86f112c3..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_6_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 6, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_6_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_7.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_7.py
deleted file mode 100644
index 98241531..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_7.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 7)"""
-
-from . import _maker
-
-dataset = _maker("fold_7")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_7_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_7_rgb.py
deleted file mode 100644
index 981fe191..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_7_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 7, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_7_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_8.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_8.py
deleted file mode 100644
index dab1a234..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_8.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 8)"""
-
-from . import _maker
-
-dataset = _maker("fold_8")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_8_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_8_rgb.py
deleted file mode 100644
index 798b8de6..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_8_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 8, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_8_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_9.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_9.py
deleted file mode 100644
index 097724b9..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_9.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 9)"""
-
-from . import _maker
-
-dataset = _maker("fold_9")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_9_rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_9_rgb.py
deleted file mode 100644
index c6c564a4..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/fold_9_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (cross validation fold 9, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_9_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch_in_11kv2/rgb.py b/src/ptbench/configs/datasets/mc_ch_in_11kv2/rgb.py
deleted file mode 100644
index f47796a8..00000000
--- a/src/ptbench/configs/datasets/mc_ch_in_11kv2/rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
-TBX11K-simplified datasets (RGB)"""
-
-from . import _maker
-
-dataset = _maker("rgb")
diff --git a/src/ptbench/data/mc_ch_in_11kv2/__init__.py b/src/ptbench/data/mc_ch_in_11kv2/__init__.py
new file mode 100644
index 00000000..662d5c13
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/__init__.py
@@ -0,0 +1,3 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/src/ptbench/data/mc_ch_in_11kv2/default.py b/src/ptbench/data/mc_ch_in_11kv2/default.py
new file mode 100644
index 00000000..1eea460b
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/default.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.default import datamodule as indian_datamodule
+from ..montgomery.default import datamodule as mc_datamodule
+from ..shenzhen.default import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.default import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_0.py b/src/ptbench/data/mc_ch_in_11kv2/fold_0.py
new file mode 100644
index 00000000..738164a7
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_0.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 0)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_0 import datamodule as indian_datamodule
+from ..montgomery.fold_0 import datamodule as mc_datamodule
+from ..shenzhen.fold_0 import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_0 import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_0_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_0_rgb.py
new file mode 100644
index 00000000..c852a9a9
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_0_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 0, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_0_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_0_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_0_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_0_rgb import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_1.py b/src/ptbench/data/mc_ch_in_11kv2/fold_1.py
new file mode 100644
index 00000000..bfc2dbfc
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_1.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 1)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_1 import datamodule as indian_datamodule
+from ..montgomery.fold_1 import datamodule as mc_datamodule
+from ..shenzhen.fold_1 import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_1 import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_1_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_1_rgb.py
new file mode 100644
index 00000000..8f0c9174
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_1_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 1, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_1_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_1_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_1_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_1_rgb import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_2.py b/src/ptbench/data/mc_ch_in_11kv2/fold_2.py
new file mode 100644
index 00000000..06c7c1ab
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_2.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 2)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_2 import datamodule as indian_datamodule
+from ..montgomery.fold_2 import datamodule as mc_datamodule
+from ..shenzhen.fold_2 import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_2 import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_2_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_2_rgb.py
new file mode 100644
index 00000000..5ecaa2a7
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_2_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 2, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_2_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_2_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_2_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_2_rgb import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_3.py b/src/ptbench/data/mc_ch_in_11kv2/fold_3.py
new file mode 100644
index 00000000..11309b1c
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_3.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 3)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_3 import datamodule as indian_datamodule
+from ..montgomery.fold_3 import datamodule as mc_datamodule
+from ..shenzhen.fold_3 import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_3 import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_3_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_3_rgb.py
new file mode 100644
index 00000000..267d128e
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_3_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 3, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_3_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_3_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_3_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_3_rgb import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_4.py b/src/ptbench/data/mc_ch_in_11kv2/fold_4.py
new file mode 100644
index 00000000..0f53ed63
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_4.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 4)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_4 import datamodule as indian_datamodule
+from ..montgomery.fold_4 import datamodule as mc_datamodule
+from ..shenzhen.fold_4 import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_4 import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_4_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_4_rgb.py
new file mode 100644
index 00000000..9bdc8c23
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_4_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 4, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_4_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_4_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_4_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_4_rgb import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_5.py b/src/ptbench/data/mc_ch_in_11kv2/fold_5.py
new file mode 100644
index 00000000..df7dc0e8
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_5.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 5)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_5 import datamodule as indian_datamodule
+from ..montgomery.fold_5 import datamodule as mc_datamodule
+from ..shenzhen.fold_5 import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_5 import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_5_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_5_rgb.py
new file mode 100644
index 00000000..ce3ffd47
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_5_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 5, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_5_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_5_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_5_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_5_rgb import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_6.py b/src/ptbench/data/mc_ch_in_11kv2/fold_6.py
new file mode 100644
index 00000000..ef246cd8
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_6.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 6)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_6 import datamodule as indian_datamodule
+from ..montgomery.fold_6 import datamodule as mc_datamodule
+from ..shenzhen.fold_6 import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_6 import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_6_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_6_rgb.py
new file mode 100644
index 00000000..bc892b79
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_6_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 6, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_6_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_6_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_5_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_6_rgb import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_7.py b/src/ptbench/data/mc_ch_in_11kv2/fold_7.py
new file mode 100644
index 00000000..41d3ba3e
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_7.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 7)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_7 import datamodule as indian_datamodule
+from ..montgomery.fold_7 import datamodule as mc_datamodule
+from ..shenzhen.fold_7 import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_7 import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_7_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_7_rgb.py
new file mode 100644
index 00000000..0fe1f282
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_7_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 7, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_7_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_7_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_7_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_7_rgb import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_8.py b/src/ptbench/data/mc_ch_in_11kv2/fold_8.py
new file mode 100644
index 00000000..a6842d47
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_8.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 8)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_8 import datamodule as indian_datamodule
+from ..montgomery.fold_8 import datamodule as mc_datamodule
+from ..shenzhen.fold_8 import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_8 import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_8_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_8_rgb.py
new file mode 100644
index 00000000..a330b2e6
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_8_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 8, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_8_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_8_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_8_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_8_rgb import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_9.py b/src/ptbench/data/mc_ch_in_11kv2/fold_9.py
new file mode 100644
index 00000000..ba69b788
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_9.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 9)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_9 import datamodule as indian_datamodule
+from ..montgomery.fold_9 import datamodule as mc_datamodule
+from ..shenzhen.fold_9 import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_9 import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/fold_9_rgb.py b/src/ptbench/data/mc_ch_in_11kv2/fold_9_rgb.py
new file mode 100644
index 00000000..93296414
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/fold_9_rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (cross validation fold 9, RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.fold_9_rgb import datamodule as indian_datamodule
+from ..montgomery.fold_9_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_9_rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.fold_9_rgb import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch_in_11kv2/rgb.py b/src/ptbench/data/mc_ch_in_11kv2/rgb.py
new file mode 100644
index 00000000..c99b8fe9
--- /dev/null
+++ b/src/ptbench/data/mc_ch_in_11kv2/rgb.py
@@ -0,0 +1,101 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery, Shenzhen, Indian and the default
+TBX11K-simplified datasets (RGB)"""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule, get_dataset_from_module
+from ..indian.rgb import datamodule as indian_datamodule
+from ..montgomery.rgb import datamodule as mc_datamodule
+from ..shenzhen.rgb import datamodule as ch_datamodule
+from ..tbx11k_simplified_v2.rgb import datamodule as tbx11kv2_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+
+        module_args = {
+            "train_batch_size": self.train_batch_size,
+            "predict_batch_size": self.predict_batch_size,
+            "drop_incomplete_batch": self.drop_incomplete_batch,
+            "multiproc_kwargs": self.multiproc_kwargs,
+        }
+
+        mc = get_dataset_from_module(mc_datamodule, stage, **module_args)
+        ch = get_dataset_from_module(ch_datamodule, stage, **module_args)
+        indian = get_dataset_from_module(
+            indian_datamodule, stage, **module_args
+        )
+        tbx11kv2 = get_dataset_from_module(
+            tbx11kv2_datamodule, stage, **module_args
+        )
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [
+                mc["__train__"],
+                ch["__train__"],
+                indian["__train__"],
+                tbx11kv2["__train__"],
+            ]
+        )
+        self.dataset["train"] = ConcatDataset(
+            [mc["train"], ch["train"], indian["train"], tbx11kv2["train"]]
+        )
+        self.dataset["__valid__"] = ConcatDataset(
+            [
+                mc["__valid__"],
+                ch["__valid__"],
+                indian["__valid__"],
+                tbx11kv2["__valid__"],
+            ]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [
+                mc["validation"],
+                ch["validation"],
+                indian["validation"],
+                tbx11kv2["validation"],
+            ]
+        )
+        self.dataset["test"] = ConcatDataset(
+            [mc["test"], ch["test"], indian["test"], tbx11kv2["test"]]
+        )
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
-- 
GitLab