diff --git a/pyproject.toml b/pyproject.toml
index 8e237195349bda4087ffe8845e96897fa6f21958..1c57f5c502ceac58925b8a986245cd17ea564cdf 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -257,28 +257,28 @@ tbx11k_simplified_v2_rs_f7 = "ptbench.configs.datasets.tbx11k_simplified_v2_RS.f
 tbx11k_simplified_v2_rs_f8 = "ptbench.configs.datasets.tbx11k_simplified_v2_RS.fold_8"
 tbx11k_simplified_v2_rs_f9 = "ptbench.configs.datasets.tbx11k_simplified_v2_RS.fold_9"
 # montgomery-shenzhen aggregated dataset
-mc_ch = "ptbench.configs.datasets.mc_ch.default"
-mc_ch_rgb = "ptbench.configs.datasets.mc_ch.rgb"
-mc_ch_f0 = "ptbench.configs.datasets.mc_ch.fold_0"
-mc_ch_f1 = "ptbench.configs.datasets.mc_ch.fold_1"
-mc_ch_f2 = "ptbench.configs.datasets.mc_ch.fold_2"
-mc_ch_f3 = "ptbench.configs.datasets.mc_ch.fold_3"
-mc_ch_f4 = "ptbench.configs.datasets.mc_ch.fold_4"
-mc_ch_f5 = "ptbench.configs.datasets.mc_ch.fold_5"
-mc_ch_f6 = "ptbench.configs.datasets.mc_ch.fold_6"
-mc_ch_f7 = "ptbench.configs.datasets.mc_ch.fold_7"
-mc_ch_f8 = "ptbench.configs.datasets.mc_ch.fold_8"
-mc_ch_f9 = "ptbench.configs.datasets.mc_ch.fold_9"
-mc_ch_f0_rgb = "ptbench.configs.datasets.mc_ch.fold_0_rgb"
-mc_ch_f1_rgb = "ptbench.configs.datasets.mc_ch.fold_1_rgb"
-mc_ch_f2_rgb = "ptbench.configs.datasets.mc_ch.fold_2_rgb"
-mc_ch_f3_rgb = "ptbench.configs.datasets.mc_ch.fold_3_rgb"
-mc_ch_f4_rgb = "ptbench.configs.datasets.mc_ch.fold_4_rgb"
-mc_ch_f5_rgb = "ptbench.configs.datasets.mc_ch.fold_5_rgb"
-mc_ch_f6_rgb = "ptbench.configs.datasets.mc_ch.fold_6_rgb"
-mc_ch_f7_rgb = "ptbench.configs.datasets.mc_ch.fold_7_rgb"
-mc_ch_f8_rgb = "ptbench.configs.datasets.mc_ch.fold_8_rgb"
-mc_ch_f9_rgb = "ptbench.configs.datasets.mc_ch.fold_9_rgb"
+mc_ch = "ptbench.data.mc_ch.default"
+mc_ch_rgb = "ptbench.data.mc_ch.rgb"
+mc_ch_f0 = "ptbench.data.mc_ch.fold_0"
+mc_ch_f1 = "ptbench.data.mc_ch.fold_1"
+mc_ch_f2 = "ptbench.data.mc_ch.fold_2"
+mc_ch_f3 = "ptbench.data.mc_ch.fold_3"
+mc_ch_f4 = "ptbench.data.mc_ch.fold_4"
+mc_ch_f5 = "ptbench.data.mc_ch.fold_5"
+mc_ch_f6 = "ptbench.data.mc_ch.fold_6"
+mc_ch_f7 = "ptbench.data.mc_ch.fold_7"
+mc_ch_f8 = "ptbench.data.mc_ch.fold_8"
+mc_ch_f9 = "ptbench.data.mc_ch.fold_9"
+mc_ch_f0_rgb = "ptbench.data.mc_ch.fold_0_rgb"
+mc_ch_f1_rgb = "ptbench.data.mc_ch.fold_1_rgb"
+mc_ch_f2_rgb = "ptbench.data.mc_ch.fold_2_rgb"
+mc_ch_f3_rgb = "ptbench.data.mc_ch.fold_3_rgb"
+mc_ch_f4_rgb = "ptbench.data.mc_ch.fold_4_rgb"
+mc_ch_f5_rgb = "ptbench.data.mc_ch.fold_5_rgb"
+mc_ch_f6_rgb = "ptbench.data.mc_ch.fold_6_rgb"
+mc_ch_f7_rgb = "ptbench.data.mc_ch.fold_7_rgb"
+mc_ch_f8_rgb = "ptbench.data.mc_ch.fold_8_rgb"
+mc_ch_f9_rgb = "ptbench.data.mc_ch.fold_9_rgb"
 # extended montgomery-shenzhen aggregated dataset
 # (with radiological signs)
 mc_ch_rs = "ptbench.configs.datasets.mc_ch_RS.default"
diff --git a/src/ptbench/configs/datasets/mc_ch/__init__.py b/src/ptbench/configs/datasets/mc_ch/__init__.py
deleted file mode 100644
index c5c6ed62e60b510ea0bf7239deb12fc895df9111..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/__init__.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from torch.utils.data.dataset import ConcatDataset
-
-
-def _maker(protocol):
-    if protocol == "default":
-        from ..montgomery import default as mc
-        from ..shenzhen import default as ch
-    elif protocol == "rgb":
-        from ..montgomery import rgb as mc
-        from ..shenzhen import rgb as ch
-    elif protocol == "fold_0":
-        from ..montgomery import fold_0 as mc
-        from ..shenzhen import fold_0 as ch
-    elif protocol == "fold_1":
-        from ..montgomery import fold_1 as mc
-        from ..shenzhen import fold_1 as ch
-    elif protocol == "fold_2":
-        from ..montgomery import fold_2 as mc
-        from ..shenzhen import fold_2 as ch
-    elif protocol == "fold_3":
-        from ..montgomery import fold_3 as mc
-        from ..shenzhen import fold_3 as ch
-    elif protocol == "fold_4":
-        from ..montgomery import fold_4 as mc
-        from ..shenzhen import fold_4 as ch
-    elif protocol == "fold_5":
-        from ..montgomery import fold_5 as mc
-        from ..shenzhen import fold_5 as ch
-    elif protocol == "fold_6":
-        from ..montgomery import fold_6 as mc
-        from ..shenzhen import fold_6 as ch
-    elif protocol == "fold_7":
-        from ..montgomery import fold_7 as mc
-        from ..shenzhen import fold_7 as ch
-    elif protocol == "fold_8":
-        from ..montgomery import fold_8 as mc
-        from ..shenzhen import fold_8 as ch
-    elif protocol == "fold_9":
-        from ..montgomery import fold_9 as mc
-        from ..shenzhen import fold_9 as ch
-    elif protocol == "fold_0_rgb":
-        from ..montgomery import fold_0_rgb as mc
-        from ..shenzhen import fold_0_rgb as ch
-    elif protocol == "fold_1_rgb":
-        from ..montgomery import fold_1_rgb as mc
-        from ..shenzhen import fold_1_rgb as ch
-    elif protocol == "fold_2_rgb":
-        from ..montgomery import fold_2_rgb as mc
-        from ..shenzhen import fold_2_rgb as ch
-    elif protocol == "fold_3_rgb":
-        from ..montgomery import fold_3_rgb as mc
-        from ..shenzhen import fold_3_rgb as ch
-    elif protocol == "fold_4_rgb":
-        from ..montgomery import fold_4_rgb as mc
-        from ..shenzhen import fold_4_rgb as ch
-    elif protocol == "fold_5_rgb":
-        from ..montgomery import fold_5_rgb as mc
-        from ..shenzhen import fold_5_rgb as ch
-    elif protocol == "fold_6_rgb":
-        from ..montgomery import fold_6_rgb as mc
-        from ..shenzhen import fold_6_rgb as ch
-    elif protocol == "fold_7_rgb":
-        from ..montgomery import fold_7_rgb as mc
-        from ..shenzhen import fold_7_rgb as ch
-    elif protocol == "fold_8_rgb":
-        from ..montgomery import fold_8_rgb as mc
-        from ..shenzhen import fold_8_rgb as ch
-    elif protocol == "fold_9_rgb":
-        from ..montgomery import fold_9_rgb as mc
-        from ..shenzhen import fold_9_rgb as ch
-
-    mc = mc.dataset
-    ch = ch.dataset
-
-    dataset = {}
-    dataset["__train__"] = ConcatDataset([mc["__train__"], ch["__train__"]])
-    dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
-    dataset["__valid__"] = ConcatDataset([mc["__valid__"], ch["__valid__"]])
-    dataset["validation"] = ConcatDataset([mc["validation"], ch["validation"]])
-    dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
-
-    return dataset
diff --git a/src/ptbench/configs/datasets/mc_ch/default.py b/src/ptbench/configs/datasets/mc_ch/default.py
deleted file mode 100644
index d3fd59885933615c3a4fc5ba4c459f2d36a035d9..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/default.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
-
-from . import _maker
-
-dataset = _maker("default")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_0.py b/src/ptbench/configs/datasets/mc_ch/fold_0.py
deleted file mode 100644
index e2d5d3f3fda869e0949c629774aa420592e53c6a..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_0.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 0)"""
-
-from . import _maker
-
-dataset = _maker("fold_0")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_0_rgb.py b/src/ptbench/configs/datasets/mc_ch/fold_0_rgb.py
deleted file mode 100644
index 4ccdd5379f5bfcd01897cdc569be23d04d4b0e45..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_0_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 0, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_0_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_1.py b/src/ptbench/configs/datasets/mc_ch/fold_1.py
deleted file mode 100644
index fa08164bdf109333791946466ea02f1947d35881..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_1.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 1)"""
-
-from . import _maker
-
-dataset = _maker("fold_1")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_1_rgb.py b/src/ptbench/configs/datasets/mc_ch/fold_1_rgb.py
deleted file mode 100644
index 8243397c6f38bc2ab39f904d3afe4b890083fd56..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_1_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 1, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_1_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_2.py b/src/ptbench/configs/datasets/mc_ch/fold_2.py
deleted file mode 100644
index 43c243014500cd8a3db13f8f26ab0fc1cf09d8d1..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_2.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 2)"""
-
-from . import _maker
-
-dataset = _maker("fold_2")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_2_rgb.py b/src/ptbench/configs/datasets/mc_ch/fold_2_rgb.py
deleted file mode 100644
index 1687b78e05054c8330cde14c2614223ef2f26f87..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_2_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 2, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_2_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_3.py b/src/ptbench/configs/datasets/mc_ch/fold_3.py
deleted file mode 100644
index df33cf812add6f3ccb554fdf2a2017dcfb43929d..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_3.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 3)"""
-
-from . import _maker
-
-dataset = _maker("fold_3")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_3_rgb.py b/src/ptbench/configs/datasets/mc_ch/fold_3_rgb.py
deleted file mode 100644
index 4378c8b84cfbb17a00d37f500f573a687ec400ed..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_3_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 3, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_3_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_4.py b/src/ptbench/configs/datasets/mc_ch/fold_4.py
deleted file mode 100644
index b74104a3c3fc6216f0880e5abf19be6ae9f391ff..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_4.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 4)"""
-
-from . import _maker
-
-dataset = _maker("fold_4")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_4_rgb.py b/src/ptbench/configs/datasets/mc_ch/fold_4_rgb.py
deleted file mode 100644
index cba3284d4a51f15b1546781894fd37e55f62bf7a..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_4_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 4, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_4_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_5.py b/src/ptbench/configs/datasets/mc_ch/fold_5.py
deleted file mode 100644
index 2bd11c0b058e3b6feabd9bbd824e10de66bc4c15..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_5.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 5)"""
-
-from . import _maker
-
-dataset = _maker("fold_5")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_5_rgb.py b/src/ptbench/configs/datasets/mc_ch/fold_5_rgb.py
deleted file mode 100644
index b9a7ca14fc8399a894dc441c813132ba4691ed43..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_5_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 5, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_5_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_6.py b/src/ptbench/configs/datasets/mc_ch/fold_6.py
deleted file mode 100644
index af7e9b2edf62b7eb94e04e30b5f848bc68b8f28b..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_6.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 6)"""
-
-from . import _maker
-
-dataset = _maker("fold_6")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_6_rgb.py b/src/ptbench/configs/datasets/mc_ch/fold_6_rgb.py
deleted file mode 100644
index b27ddf2708d3eac46039685c931471c5c432af88..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_6_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 6, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_6_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_7.py b/src/ptbench/configs/datasets/mc_ch/fold_7.py
deleted file mode 100644
index dc0f8c899f7f029e380fd696a23e850278badad0..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_7.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 7)"""
-
-from . import _maker
-
-dataset = _maker("fold_7")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_7_rgb.py b/src/ptbench/configs/datasets/mc_ch/fold_7_rgb.py
deleted file mode 100644
index e9ad682d56ea68759e450b6068786e7940d87b4f..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_7_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 7, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_7_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_8.py b/src/ptbench/configs/datasets/mc_ch/fold_8.py
deleted file mode 100644
index b7c4a02a9cb1688d272f430f46f928e2749e392f..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_8.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 8)"""
-
-from . import _maker
-
-dataset = _maker("fold_8")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_8_rgb.py b/src/ptbench/configs/datasets/mc_ch/fold_8_rgb.py
deleted file mode 100644
index b7764a3357ee81766f3ea3664f349f4e9e36183c..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_8_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 8, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_8_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_9.py b/src/ptbench/configs/datasets/mc_ch/fold_9.py
deleted file mode 100644
index 8481ae14a188a82945197776c12f4c89846736d1..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_9.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 9)"""
-
-from . import _maker
-
-dataset = _maker("fold_9")
diff --git a/src/ptbench/configs/datasets/mc_ch/fold_9_rgb.py b/src/ptbench/configs/datasets/mc_ch/fold_9_rgb.py
deleted file mode 100644
index e4f565f58656e3fe164925942ef4a8d2669ffae6..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/fold_9_rgb.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen datasets (cross
-validation fold 9, RGB)"""
-
-from . import _maker
-
-dataset = _maker("fold_9_rgb")
diff --git a/src/ptbench/configs/datasets/mc_ch/rgb.py b/src/ptbench/configs/datasets/mc_ch/rgb.py
deleted file mode 100644
index 272e8dce6a54d58aa9b99863bc313ad54e406dcf..0000000000000000000000000000000000000000
--- a/src/ptbench/configs/datasets/mc_ch/rgb.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-"""Aggregated dataset composed of Montgomery and Shenzhen (RGB) datasets."""
-
-from . import _maker
-
-dataset = _maker("rgb")
diff --git a/src/ptbench/data/mc_ch/__init__.py b/src/ptbench/data/mc_ch/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..662d5c1326651b4d9f48d47bc4b503df23d17216
--- /dev/null
+++ b/src/ptbench/data/mc_ch/__init__.py
@@ -0,0 +1,3 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/src/ptbench/data/mc_ch/default.py b/src/ptbench/data/mc_ch/default.py
new file mode 100644
index 0000000000000000000000000000000000000000..0af1d68682ec2b91ae8febf1adcebf22704f0982
--- /dev/null
+++ b/src/ptbench/data/mc_ch/default.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.default import datamodule as mc_datamodule
+from ..shenzhen.default import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_0.py b/src/ptbench/data/mc_ch/fold_0.py
new file mode 100644
index 0000000000000000000000000000000000000000..e151c0e0eb0703b8a8b3bc83f68d8f020053cc14
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_0.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_0 import datamodule as mc_datamodule
+from ..shenzhen.fold_0 import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_0_rgb.py b/src/ptbench/data/mc_ch/fold_0_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..56502b24651879a8106ddd6228eee70e1c4de205
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_0_rgb.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_0_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_0_rgb import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_1.py b/src/ptbench/data/mc_ch/fold_1.py
new file mode 100644
index 0000000000000000000000000000000000000000..732513da40421d271b86a087731a56f6e1bafbd5
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_1.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_1 import datamodule as mc_datamodule
+from ..shenzhen.fold_1 import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_1_rgb.py b/src/ptbench/data/mc_ch/fold_1_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..6cfbcb366f71146cc7f20c5d27ea1998cd209bc1
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_1_rgb.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_1_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_1_rgb import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_2.py b/src/ptbench/data/mc_ch/fold_2.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d4ac5b58bb7d2500932061be0e9a252139e6274
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_2.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_2 import datamodule as mc_datamodule
+from ..shenzhen.fold_2 import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_2_rgb.py b/src/ptbench/data/mc_ch/fold_2_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..eec98dca20441bf30fb6ae8250303dd970b68148
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_2_rgb.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_2_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_2_rgb import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_3.py b/src/ptbench/data/mc_ch/fold_3.py
new file mode 100644
index 0000000000000000000000000000000000000000..b97b5e944b94797c518a56b8f0021b5b890766ec
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_3.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_3 import datamodule as mc_datamodule
+from ..shenzhen.fold_3 import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_3_rgb.py b/src/ptbench/data/mc_ch/fold_3_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..e380dc33b3653632a3c8f7f5324ab2b0bfb737e2
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_3_rgb.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_3_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_3_rgb import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_4.py b/src/ptbench/data/mc_ch/fold_4.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cd906b53064644499f45a07f082e2f91acb76fc
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_4.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_4 import datamodule as mc_datamodule
+from ..shenzhen.fold_4 import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_4_rgb.py b/src/ptbench/data/mc_ch/fold_4_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ba0ecfebd957bf6778fd0c34731974ddec50434
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_4_rgb.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_4_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_4_rgb import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_5.py b/src/ptbench/data/mc_ch/fold_5.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f20a33b633df0aba8b65e5849709634b96cce6e
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_5.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_5 import datamodule as mc_datamodule
+from ..shenzhen.fold_5 import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_5_rgb.py b/src/ptbench/data/mc_ch/fold_5_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..61159ed6e37be4e898ac37d3d2b1cf4f71b5347a
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_5_rgb.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_5_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_5_rgb import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_6.py b/src/ptbench/data/mc_ch/fold_6.py
new file mode 100644
index 0000000000000000000000000000000000000000..7413791054f2d14948f3ee4f5f12817e172872a8
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_6.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_6 import datamodule as mc_datamodule
+from ..shenzhen.fold_6 import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_6_rgb.py b/src/ptbench/data/mc_ch/fold_6_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..79abe09b973fe27a54af0cd1db6d855f1e93d2f4
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_6_rgb.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_6_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_6_rgb import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_7.py b/src/ptbench/data/mc_ch/fold_7.py
new file mode 100644
index 0000000000000000000000000000000000000000..a94621e61d846e596ff8422c8abce39c896ee028
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_7.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_7 import datamodule as mc_datamodule
+from ..shenzhen.fold_7 import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_7_rgb.py b/src/ptbench/data/mc_ch/fold_7_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..90b866e1d0b506e3d5d03a76472815fb6165294f
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_7_rgb.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_7_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_7_rgb import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_8.py b/src/ptbench/data/mc_ch/fold_8.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa52bc818fd9dc4daff55c6691ada122813fbfd8
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_8.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_8 import datamodule as mc_datamodule
+from ..shenzhen.fold_8 import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_8_rgb.py b/src/ptbench/data/mc_ch/fold_8_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..3df1838d21c7c8c3cdb4fb686edad87f053e564f
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_8_rgb.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_8_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_8_rgb import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_9.py b/src/ptbench/data/mc_ch/fold_9.py
new file mode 100644
index 0000000000000000000000000000000000000000..4bb4a5a3c9b70a35d4cd7665bc5221ae3948419e
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_9.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_9 import datamodule as mc_datamodule
+from ..shenzhen.fold_9 import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/fold_9_rgb.py b/src/ptbench/data/mc_ch/fold_9_rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..a07ffce4a0b6d567e85eff06162a845a3f4c6bab
--- /dev/null
+++ b/src/ptbench/data/mc_ch/fold_9_rgb.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.fold_9_rgb import datamodule as mc_datamodule
+from ..shenzhen.fold_9_rgb import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule
diff --git a/src/ptbench/data/mc_ch/rgb.py b/src/ptbench/data/mc_ch/rgb.py
new file mode 100644
index 0000000000000000000000000000000000000000..05407eae2ee826f5706770bbec3feb2e6aa9426f
--- /dev/null
+++ b/src/ptbench/data/mc_ch/rgb.py
@@ -0,0 +1,84 @@
+# Copyright © 2022 Idiap Research Institute <contact@idiap.ch>
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Aggregated dataset composed of Montgomery and Shenzhen datasets."""
+
+from clapper.logging import setup
+from torch.utils.data.dataset import ConcatDataset
+
+from .. import return_subsets
+from ..base_datamodule import BaseDataModule
+from ..montgomery.rgb import datamodule as mc_datamodule
+from ..shenzhen.rgb import datamodule as ch_datamodule
+
+logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
+
+
+class DefaultModule(BaseDataModule):
+    def __init__(
+        self,
+        train_batch_size=1,
+        predict_batch_size=1,
+        drop_incomplete_batch=False,
+        multiproc_kwargs=None,
+    ):
+        self.train_batch_size = train_batch_size
+        self.predict_batch_size = predict_batch_size
+        self.drop_incomplete_batch = drop_incomplete_batch
+        self.multiproc_kwargs = multiproc_kwargs
+
+        super().__init__(
+            train_batch_size=train_batch_size,
+            predict_batch_size=predict_batch_size,
+            drop_incomplete_batch=drop_incomplete_batch,
+            multiproc_kwargs=multiproc_kwargs,
+        )
+
+    def setup(self, stage: str):
+        # Instantiate other datamodules and get their datasets
+        mc_module = mc_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        mc_module.prepare_data()
+        mc_module.setup(stage=stage)
+        mc = mc_module.dataset
+
+        ch_module = ch_datamodule(
+            train_batch_size=self.train_batch_size,
+            predict_batch_size=self.predict_batch_size,
+            drop_incomplete_batch=self.drop_incomplete_batch,
+            multiproc_kwargs=self.multiproc_kwargs,
+        )
+
+        ch_module.prepare_data()
+        ch_module.setup(stage=stage)
+        ch = ch_module.dataset
+
+        # Combine datasets
+        self.dataset = {}
+        self.dataset["__train__"] = ConcatDataset(
+            [mc["__train__"], ch["__train__"]]
+        )
+        self.dataset["train"] = ConcatDataset([mc["train"], ch["train"]])
+        self.dataset["__valid__"] = ConcatDataset(
+            [mc["__valid__"], ch["__valid__"]]
+        )
+        self.dataset["validation"] = ConcatDataset(
+            [mc["validation"], ch["validation"]]
+        )
+        self.dataset["test"] = ConcatDataset([mc["test"], ch["test"]])
+
+        (
+            self.train_dataset,
+            self.validation_dataset,
+            self.extra_validation_datasets,
+            self.predict_dataset,
+        ) = return_subsets(self.dataset)
+
+
+datamodule = DefaultModule