diff --git a/bob/paper/mcae/icb2019/config/autoencoder/1_ae/models/experiment_0/__init__.py b/bob/paper/mcae/icb2019/config/autoencoder/1_ae/models/experiment_0/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/bob/paper/mcae/icb2019/config/autoencoder/1_ae/models/experiment_0/model_50_batl_3_layer_finetune.pth b/bob/paper/mcae/icb2019/config/autoencoder/1_ae/models/experiment_0/model_50_batl_3_layer_finetune.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b2dfc3c9b5f079f2d0b1af2ae54c358d37a21f08
Binary files /dev/null and b/bob/paper/mcae/icb2019/config/autoencoder/1_ae/models/experiment_0/model_50_batl_3_layer_finetune.pth differ
diff --git a/bob/paper/mcae/icb2019/config/autoencoder/1_ae/models/experiment_0/model_70_celeba_pretrain.pth b/bob/paper/mcae/icb2019/config/autoencoder/1_ae/models/experiment_0/model_70_celeba_pretrain.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9f4496ec508fb0df643ef62a2ad784ba900c4ce9
Binary files /dev/null and b/bob/paper/mcae/icb2019/config/autoencoder/1_ae/models/experiment_0/model_70_celeba_pretrain.pth differ
diff --git a/bob/paper/mcae/icb2019/config/mlp/models/mlp_for_1_ae_embeddings/mlp_model_batl_3_layer_ae_finetune.pth b/bob/paper/mcae/icb2019/config/mlp/models/mlp_for_1_ae_embeddings/mlp_model_batl_3_layer_ae_finetune.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fd62ebba3d73dc24d9c30c1674045fc2f4861870
Binary files /dev/null and b/bob/paper/mcae/icb2019/config/mlp/models/mlp_for_1_ae_embeddings/mlp_model_batl_3_layer_ae_finetune.pth differ
diff --git a/bob/paper/mcae/icb2019/config/mlp/models/mlp_for_1_ae_embeddings/mlp_model_celeba_ae_pretrain.pth b/bob/paper/mcae/icb2019/config/mlp/models/mlp_for_1_ae_embeddings/mlp_model_celeba_ae_pretrain.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4935113c588e4eeda908fb4716cddd922676c6c0
Binary files /dev/null and b/bob/paper/mcae/icb2019/config/mlp/models/mlp_for_1_ae_embeddings/mlp_model_celeba_ae_pretrain.pth differ
diff --git a/setup.py b/setup.py
index f98f32cf6120d13947fe78f6d60e1bbd1c224cc9..dc0810b621268acd744106ac58a23e1722535722 100644
--- a/setup.py
+++ b/setup.py
@@ -127,6 +127,11 @@ setup(
             'mc-pad-bw-nir-d-9-ae-celeba-pretrain = bob.paper.mcae.icb2019.config.autoencoder.9_ae.mc_pad_config_ae_celeba_pretrain',
             # multi-channel BW-NIR-D PAD using **9** autoencoders in the extractor, and an MLP algorithm with 10 ReLU units. AE pre-trained using RGB patches from CelebA, and fine-tuned using MC data from BATL:
             'mc-pad-bw-nir-d-9-ae-batl-3-layers-finetune = bob.paper.mcae.icb2019.config.autoencoder.9_ae.mc_pad_config_ae_batl_3_layers_finetune',
+
+            # multi-channel BW-NIR-D PAD using **1** autoencoder in the extractor, and an MLP algorithm with 10 ReLU units. AE pre-trained using RGB patches from CelebA:
+            'mc-pad-bw-nir-d-1-ae-celeba-pretrain = bob.paper.mcae.icb2019.config.autoencoder.1_ae.mc_pad_config_ae_celeba_pretrain',
+            # multi-channel BW-NIR-D PAD using **1** autoencoders in the extractor, and an MLP algorithm with 10 ReLU units. AE pre-trained using RGB patches from CelebA, and fine-tuned using MC data from BATL:
+            'mc-pad-bw-nir-d-1-ae-batl-3-layers-finetune = bob.paper.mcae.icb2019.config.autoencoder.1_ae.mc_pad_config_ae_batl_3_layers_finetune',
         ],
     },