diff --git a/bob/bio/demographics/datasets.py b/bob/bio/demographics/datasets.py
index 2171d0600d15ade8f8f24fea934ba8c315b09251..317ba16288936eff653d78dc159174c1d809ff45 100644
--- a/bob/bio/demographics/datasets.py
+++ b/bob/bio/demographics/datasets.py
@@ -78,12 +78,44 @@ class MorphTorchDataset(DemoraphicTorchDataset):
         self, protocol, database_path, database_extension=".h5", transform=None
     ):
 
-        bob_dataset = MorphDatabase(
+        self.bob_dataset = MorphDatabase(
             protocol=protocol,
             dataset_original_directory=database_path,
             dataset_original_extension=database_extension,
         )
-        super().__init__(bob_dataset, transform=transform)
+
+        # Morph dataset has an intersection in between zprobes and treferences
+        self.excluding_list = [
+            "190276",
+            "332158",
+            "111942",
+            "308129",
+            "334074",
+            "350814",
+            "131677",
+            "168724",
+            "276055",
+            "275589",
+            "286810",
+        ]
+
+        self.bucket = [s for sset in self.bob_dataset.zprobes() for s in sset]
+        self.bucket += [
+            s
+            for sset in self.bob_dataset.treferences()
+            for s in sset
+            if sset.subject_id not in self.excluding_list
+        ]
+
+        # Defining keys and labels
+        keys = [b.subject_id for b in self.bucket]
+
+        self.labels = dict(zip(keys, range(len(keys))))
+
+        self.demographic_keys = self.load_demographics()
+        self.transform = transform
+
+        # super().__init__(bob_dataset, transform=transform)
 
     def load_demographics(self):
 
@@ -91,7 +123,11 @@ class MorphTorchDataset(DemoraphicTorchDataset):
 
         metadata_keys = set(
             [f"{sset.rac}-{sset.sex}" for sset in self.bob_dataset.zprobes()]
-            + [f"{sset.rac}-{sset.sex}" for sset in self.bob_dataset.treferences()]
+            + [
+                f"{sset.rac}-{sset.sex}"
+                for sset in self.bob_dataset.treferences()
+                if sset.subject_id not in self.excluding_list
+            ]
         )
         metadata_keys = dict(zip(metadata_keys, range(len(metadata_keys))))
         return metadata_keys
diff --git a/bob/bio/demographics/experiments/__init__.py b/bob/bio/demographics/experiments/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/bob/bio/demographics/experiments/evaluation/ortogonality/meds.py b/bob/bio/demographics/experiments/evaluation/ortogonality/meds.py
new file mode 100644
index 0000000000000000000000000000000000000000..d65a5f99efc4dfd4912a6bb7b961e4f3478cc1d6
--- /dev/null
+++ b/bob/bio/demographics/experiments/evaluation/ortogonality/meds.py
@@ -0,0 +1,78 @@
+import click
+from bob.bio.face.embeddings.pytorch import PyTorchModel, iresnet_template
+from bob.bio.demographics.fair_transformers import RunnableTransformer
+from functools import partial
+import os
+from bob.extension.scripts.click_helper import ResourceOption
+from bob.bio.base.pipelines.vanilla_biometrics import checkpoint_vanilla_biometrics
+from bob.bio.base.pipelines.vanilla_biometrics import execute_vanilla_biometrics
+from bob.pipelines.distributed import VALID_DASK_CLIENT_STRINGS
+from bob.bio.demographics.regularizers import AVAILABLE_BACKBONES
+
+# BACKBONES = dict()
+# BACKBONES["iresnet100"] = iresnet100
+
+
+@click.command()
+@click.argument("BACKBONE")
+@click.argument("CHECKPOINT-PATH")
+@click.argument("EXPERIMENT-PATH")
+@click.option(
+    "--dask-client",
+    "-l",
+    entry_point_group="dask.client",
+    string_exceptions=VALID_DASK_CLIENT_STRINGS,
+    default="single-threaded",
+    help="Dask client for the execution of the pipeline.",
+    cls=ResourceOption,
+)
+def meds_experiment(backbone, checkpoint_path, experiment_path, dask_client, **kwargs):
+
+    annotation_type = "eyes-center"
+    fixed_positions = None
+    memory_demanding = False
+
+    backbone = AVAILABLE_BACKBONES[backbone]["structure"]
+
+    # Loading the pipeline
+    pipeline = iresnet_template(
+        embedding=RunnableTransformer(
+            partial(backbone, pretrained=checkpoint_path),
+            memory_demanding=memory_demanding,
+        ),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+    face_crop_path = os.path.join(experiment_path, "../face-crop/")
+
+    # Checkpointing the pipeline
+    pipeline = checkpoint_vanilla_biometrics(pipeline, base_dir=experiment_path)
+
+    pipeline.transformer[0].features_dir = face_crop_path
+    dask_partition_size = 100
+
+    #### DATABASE
+
+    from bob.bio.face.database import MEDSDatabase
+
+    protocol = "verification_fold1"
+    database = MEDSDatabase(protocol=protocol)
+
+    groups = ["dev", "eval"]
+    execute_vanilla_biometrics(
+        pipeline,
+        database,
+        dask_client,
+        groups,
+        experiment_path,
+        write_metadata_scores=True,
+        checkpoint=False,
+        dask_partition_size=dask_partition_size,
+        dask_n_workers=10,
+        allow_scoring_with_all_biometric_references=True,
+    )
+
+
+if __name__ == "__main__":
+    meds_experiment()
diff --git a/bob/bio/demographics/experiments/evaluation/ortogonality/morph.py b/bob/bio/demographics/experiments/evaluation/ortogonality/morph.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ed5fce47572afaac01162f3d27fd14427b925a4
--- /dev/null
+++ b/bob/bio/demographics/experiments/evaluation/ortogonality/morph.py
@@ -0,0 +1,81 @@
+import click
+from bob.bio.face.embeddings.pytorch import PyTorchModel, iresnet_template
+
+# from bob.learn.pytorch.architectures.iresnet import iresnet100
+from bob.bio.demographics.fair_transformers import RunnableTransformer
+from functools import partial
+import os
+from bob.extension.scripts.click_helper import ResourceOption
+from bob.bio.base.pipelines.vanilla_biometrics import checkpoint_vanilla_biometrics
+from bob.bio.base.pipelines.vanilla_biometrics import execute_vanilla_biometrics
+from bob.pipelines.distributed import VALID_DASK_CLIENT_STRINGS
+
+from bob.bio.demographics.regularizers import AVAILABLE_BACKBONES
+
+
+@click.command()
+@click.argument("BACKBONE")
+@click.argument("CHECKPOINT-PATH")
+@click.argument("EXPERIMENT-PATH")
+@click.option(
+    "--dask-client",
+    "-l",
+    entry_point_group="dask.client",
+    string_exceptions=VALID_DASK_CLIENT_STRINGS,
+    default="single-threaded",
+    help="Dask client for the execution of the pipeline.",
+    cls=ResourceOption,
+)
+def morph_experiment(backbone, checkpoint_path, experiment_path, dask_client, **kwargs):
+
+    annotation_type = "eyes-center"
+    fixed_positions = None
+    memory_demanding = False
+
+    # checkpoint_path = "/idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality_hypothesis/meds/iresnet100.pth"
+    # checkpoint_path = "/idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality_hypothesis/meds_identity-10.0_orthogonality-1.0/iresnet100.pth"
+
+    backbone = AVAILABLE_BACKBONES[backbone]["structure"]
+
+    # Loading the pipeline
+    pipeline = iresnet_template(
+        embedding=RunnableTransformer(
+            partial(backbone, pretrained=checkpoint_path),
+            memory_demanding=memory_demanding,
+        ),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+    face_crop_path = os.path.join(experiment_path, "../face-crop/")
+
+    # Checkpointing the pipeline
+    pipeline = checkpoint_vanilla_biometrics(pipeline, base_dir=experiment_path)
+
+    pipeline.transformer[0].features_dir = face_crop_path
+    dask_partition_size = 100
+
+    #### DATABASE
+
+    from bob.bio.face.database import MorphDatabase
+
+    protocol = "verification_fold1"
+    database = MorphDatabase(protocol=protocol)
+
+    groups = ["dev", "eval"]
+    execute_vanilla_biometrics(
+        pipeline,
+        database,
+        dask_client,
+        groups,
+        experiment_path,
+        write_metadata_scores=True,
+        checkpoint=False,
+        dask_partition_size=dask_partition_size,
+        dask_n_workers=10,
+        allow_scoring_with_all_biometric_references=True,
+    )
+
+
+if __name__ == "__main__":
+    morph_experiment()
diff --git a/bob/bio/demographics/experiments/evaluation/ortogonality_hypothesis/meds.py b/bob/bio/demographics/experiments/evaluation/ortogonality_hypothesis/meds.py
deleted file mode 100644
index 12ae523a690626c1aace7afabee4d7e2a3ffb676..0000000000000000000000000000000000000000
--- a/bob/bio/demographics/experiments/evaluation/ortogonality_hypothesis/meds.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from bob.bio.face.embeddings.pytorch import PyTorchModel, iresnet_template
-from bob.learn.pytorch.architectures.iresnet import iresnet100
-from bob.bio.demographics.fair_transformers import RunnableTransformer
-from functools import partial
-
-annotation_type = "eyes-center"
-fixed_positions = None
-memory_demanding = False
-
-# checkpoint_path = "/idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality_hypothesis/meds/iresnet100.pth"
-checkpoint_path = "/idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality_hypothesis/meds_identity-10.0_orthogonality-1.0/iresnet100.pth"
-
-pipeline = iresnet_template(
-    embedding=RunnableTransformer(
-        partial(iresnet100, pretrained=checkpoint_path),
-        memory_demanding=memory_demanding,
-    ),
-    annotation_type=annotation_type,
-    fixed_positions=fixed_positions,
-)
-
-
-#### DATABASE
-
-from bob.bio.face.database import MEDSDatabase
-
-protocol = "verification_fold1"
-database = MEDSDatabase(protocol=protocol)
-
-# output = (
-# "/remote/idiap.svm/user.active/tpereira/gitlab/bob/bob.nightlies/vanilla-callback"
-# )
diff --git a/bob/bio/demographics/experiments/train/__init__.py b/bob/bio/demographics/experiments/train/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/bob/bio/demographics/experiments/train/mine/__init__.py b/bob/bio/demographics/experiments/train/mine/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/bob/bio/demographics/experiments/train/mine/meds.py b/bob/bio/demographics/experiments/train/mine/meds.py
new file mode 100644
index 0000000000000000000000000000000000000000..afa6441b4b4c8c538e7354c6af0e3c56472dd52e
--- /dev/null
+++ b/bob/bio/demographics/experiments/train/mine/meds.py
@@ -0,0 +1,93 @@
+from bob.bio.demographics.datasets import MedsTorchDataset, MorphTorchDataset
+
+# https://pytorch.org/docs/stable/data.html
+from torch.utils.data import DataLoader
+from bob.extension import rc
+import os
+
+import bob.io.image
+
+import torch
+from functools import partial
+
+import torchvision.transforms as transforms
+import click
+import yaml
+
+
+from bob.bio.demographics.regularizers.trainers import mine_trainer
+
+
+@click.command()
+@click.argument("OUTPUT_DIR")
+@click.option("--identity-factor", default=1.0, help="Identity factor")
+@click.option("--mine-factor", default=1.0, help="MINE factor")
+@click.option("--max-epochs", default=600, help="Max number of epochs")
+@click.option(
+    "--demographic-epochs",
+    default=100,
+    help="Number of epochs to train the demographic classifier",
+)
+@click.option(
+    "--identity-epochs",
+    default=200,
+    help="Number of epochs to train the identity classifier",
+)
+@click.option("--batch-size", default=64, help="Batch size")
+@click.option("--backbone", default="iresnet100", help="Backbone")
+def mine_meds(
+    output_dir,
+    identity_factor,
+    mine_factor,
+    max_epochs,
+    demographic_epochs,
+    identity_epochs,
+    batch_size,
+    backbone,
+):
+
+    from bob.bio.demographics.regularizers import AVAILABLE_BACKBONES
+
+    database_path = os.path.join(
+        rc.get("bob.bio.demographics.directory"), "meds", "samplewrapper"
+    )
+
+    transform = transforms.Compose(
+        [
+            lambda x: bob.io.image.to_matplotlib(x.astype("float32")),
+            # transforms.ToPILImage(mode="RGB"),
+            # transforms.RandomHorizontalFlip(p=0.5),
+            # transforms.RandomRotation(degrees=(-3, 3)),
+            # transforms.RandomAutocontrast(p=0.1),
+            transforms.ToTensor(),
+            lambda x: (x - 127.5) / 128.0,
+        ]
+    )
+
+    dataset = MedsTorchDataset(
+        protocol="verification_fold1", database_path=database_path, transform=transform,
+    )
+
+    train_dataloader = DataLoader(
+        dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=2
+    )
+    # train_dataloader = DataLoader(dataset, batch_size=64, shuffle=True)
+
+    backbone_model = AVAILABLE_BACKBONES[backbone]()
+
+    mine_trainer(
+        output_dir,
+        identity_factor,
+        mine_factor,
+        max_epochs,
+        demographic_epochs,
+        identity_epochs,
+        batch_size,
+        train_dataloader,
+        backbone_model,
+        transform,
+    )
+
+
+if __name__ == "__main__":
+    mine_meds()
diff --git a/bob/bio/demographics/experiments/train/ortogonality/__init__.py b/bob/bio/demographics/experiments/train/ortogonality/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/bob/bio/demographics/experiments/train/ortogonality/meds.py b/bob/bio/demographics/experiments/train/ortogonality/meds.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5d14dbe67f8b1609166aa53995d49283928a5a5
--- /dev/null
+++ b/bob/bio/demographics/experiments/train/ortogonality/meds.py
@@ -0,0 +1,93 @@
+from bob.bio.demographics.datasets import MedsTorchDataset, MorphTorchDataset
+
+# https://pytorch.org/docs/stable/data.html
+from torch.utils.data import DataLoader
+from bob.extension import rc
+import os
+
+import bob.io.image
+
+import torch
+from functools import partial
+
+import torchvision.transforms as transforms
+import click
+import yaml
+
+
+from bob.bio.demographics.regularizers.trainers import ortogonality_trainer
+
+
+@click.command()
+@click.argument("OUTPUT_DIR")
+@click.option("--identity-factor", default=1.0, help="Identity factor")
+@click.option("--orthogonality-factor", default=1.0, help="Ortogonality factor")
+@click.option("--max-epochs", default=600, help="Max number of epochs")
+@click.option(
+    "--demographic-epochs",
+    default=100,
+    help="Number of epochs to train the demographic classifier",
+)
+@click.option(
+    "--identity-epochs",
+    default=200,
+    help="Number of epochs to train the identity classifier",
+)
+@click.option("--batch-size", default=64, help="Batch size")
+@click.option("--backbone", default="iresnet100", help="Backbone")
+def ortogonality_meds(
+    output_dir,
+    identity_factor,
+    orthogonality_factor,
+    max_epochs,
+    demographic_epochs,
+    identity_epochs,
+    batch_size,
+    backbone,
+):
+
+    from bob.bio.demographics.regularizers import AVAILABLE_BACKBONES
+
+    database_path = os.path.join(
+        rc.get("bob.bio.demographics.directory"), "meds", "samplewrapper"
+    )
+
+    transform = transforms.Compose(
+        [
+            lambda x: bob.io.image.to_matplotlib(x.astype("float32")),
+            # transforms.ToPILImage(mode="RGB"),
+            # transforms.RandomHorizontalFlip(p=0.5),
+            # transforms.RandomRotation(degrees=(-3, 3)),
+            # transforms.RandomAutocontrast(p=0.1),
+            transforms.ToTensor(),
+            lambda x: (x - 127.5) / 128.0,
+        ]
+    )
+
+    dataset = MedsTorchDataset(
+        protocol="verification_fold1", database_path=database_path, transform=transform,
+    )
+
+    train_dataloader = DataLoader(
+        dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=2
+    )
+    # train_dataloader = DataLoader(dataset, batch_size=64, shuffle=True)
+
+    backbone_model = AVAILABLE_BACKBONES[backbone]()
+
+    ortogonality_trainer(
+        output_dir,
+        identity_factor,
+        orthogonality_factor,
+        max_epochs,
+        demographic_epochs,
+        identity_epochs,
+        batch_size,
+        train_dataloader,
+        backbone_model,
+        transform,
+    )
+
+
+if __name__ == "__main__":
+    ortogonality_meds()
diff --git a/bob/bio/demographics/experiments/train/ortogonality/morph.py b/bob/bio/demographics/experiments/train/ortogonality/morph.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e9c97e2de8e8433bed8490670bdf31683332afd
--- /dev/null
+++ b/bob/bio/demographics/experiments/train/ortogonality/morph.py
@@ -0,0 +1,93 @@
+from bob.bio.demographics.datasets import MedsTorchDataset, MorphTorchDataset
+
+# https://pytorch.org/docs/stable/data.html
+from torch.utils.data import DataLoader
+from bob.extension import rc
+import os
+
+import bob.io.image
+
+import torch
+from functools import partial
+
+import torchvision.transforms as transforms
+import click
+import yaml
+
+
+from bob.bio.demographics.regularizers.trainers import ortogonality_trainer
+
+
+@click.command()
+@click.argument("OUTPUT_DIR")
+@click.option("--identity-factor", default=1.0, help="Identity factor")
+@click.option("--orthogonality-factor", default=1.0, help="Ortogonality factor")
+@click.option("--max-epochs", default=600, help="Max number of epochs")
+@click.option(
+    "--demographic-epochs",
+    default=100,
+    help="Number of epochs to train the demographic classifier",
+)
+@click.option(
+    "--identity-epochs",
+    default=200,
+    help="Number of epochs to train the identity classifier",
+)
+@click.option("--batch-size", default=64, help="Batch size")
+@click.option("--backbone", default="iresnet100", help="Backbone")
+def ortogonality_morph(
+    output_dir,
+    identity_factor,
+    orthogonality_factor,
+    max_epochs,
+    demographic_epochs,
+    identity_epochs,
+    batch_size,
+    backbone,
+):
+
+    from bob.bio.demographics.regularizers import AVAILABLE_BACKBONES
+
+    database_path = os.path.join(
+        rc.get("bob.bio.demographics.directory"), "morph", "samplewrapper"
+    )
+
+    transform = transforms.Compose(
+        [
+            lambda x: bob.io.image.to_matplotlib(x.astype("float32")),
+            # transforms.ToPILImage(mode="RGB"),
+            # transforms.RandomHorizontalFlip(p=0.5),
+            # transforms.RandomRotation(degrees=(-3, 3)),
+            # transforms.RandomAutocontrast(p=0.1),
+            transforms.ToTensor(),
+            lambda x: (x - 127.5) / 128.0,
+        ]
+    )
+
+    dataset = MorphTorchDataset(
+        protocol="verification_fold1", database_path=database_path, transform=transform,
+    )
+
+    train_dataloader = DataLoader(
+        dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=2
+    )
+    # train_dataloader = DataLoader(dataset, batch_size=64, shuffle=True)
+
+    backbone_model = AVAILABLE_BACKBONES[backbone]()
+
+    ortogonality_trainer(
+        output_dir,
+        identity_factor,
+        orthogonality_factor,
+        max_epochs,
+        demographic_epochs,
+        identity_epochs,
+        batch_size,
+        train_dataloader,
+        backbone_model,
+        transform,
+    )
+
+
+if __name__ == "__main__":
+    ortogonality_morph()
diff --git a/bob/bio/demographics/experiments/train/ortogonality_hypothesis/meds.py b/bob/bio/demographics/experiments/train/ortogonality_hypothesis/meds.py
deleted file mode 100644
index cf76727c1d694d54c750ebac0f8a3b66455b241f..0000000000000000000000000000000000000000
--- a/bob/bio/demographics/experiments/train/ortogonality_hypothesis/meds.py
+++ /dev/null
@@ -1,181 +0,0 @@
-from bob.bio.demographics.datasets import MedsTorchDataset, MorphTorchDataset
-
-# https://pytorch.org/docs/stable/data.html
-from torch.utils.data import DataLoader
-import pytest
-from bob.extension import rc
-import os
-from bob.learn.pytorch.architectures.iresnet import iresnet34, iresnet100
-from bob.learn.pytorch.head import ArcFace, Regular
-from bob.bio.demographics.regularizers.independence import (
-    DemographicRegularHead,
-    OrthogonalityModel,
-)
-import pytorch_lightning as pl
-import torch
-from functools import partial
-
-from pytorch_lightning.callbacks import ModelCheckpoint
-from pytorch_lightning.loggers import TensorBoardLogger
-import torchvision.transforms as transforms
-import click
-import yaml
-
-
-# demographic_epochs = 50
-# identity_epochs = 200
-
-
-@click.command()
-@click.argument("OUTPUT_DIR")
-@click.option("--identity-factor", default=1.0, help="Identity factor")
-@click.option("--orthogonality-factor", default=1.0, help="Ortogonality factor")
-@click.option("--max-epochs", default=600, help="Max number of epochs")
-@click.option(
-    "--demographic-epochs",
-    default=100,
-    help="Number of epochs to train the demographic classifier",
-)
-@click.option(
-    "--identity-epochs",
-    default=200,
-    help="Number of epochs to train the identity classifier",
-)
-@click.option("--batch-size", default=64, help="Batch size")
-def ortogonality_meds(
-    output_dir,
-    identity_factor,
-    orthogonality_factor,
-    max_epochs,
-    demographic_epochs,
-    identity_epochs,
-    batch_size,
-):
-
-    os.makedirs(output_dir, exist_ok=True)
-    with open(f"{output_dir}/config.yml", "w") as file:
-        dict_file = dict()
-        dict_file["demographic_epochs"] = demographic_epochs
-        dict_file["identity_epochs"] = identity_epochs
-        dict_file["max_epochs"] = max_epochs
-        dict_file["batch_size"] = batch_size
-        yaml.dump(dict_file, file)
-
-    backbone_checkpoint_path = f"{output_dir}/iresnet100.pth"
-    checkpoint_dir = f"{output_dir}/last.ckpt"
-
-    database_path = os.path.join(
-        rc.get("bob.bio.demographics.directory"), "meds", "samplewrapper"
-    )
-
-    import bob.io.image
-
-    transform = transforms.Compose(
-        [
-            lambda x: bob.io.image.to_matplotlib(x),
-            transforms.ToPILImage(mode="RGB"),
-            transforms.RandomHorizontalFlip(p=0.5),
-            transforms.RandomRotation(degrees=(-3, 3)),
-            # transforms.RandomAutocontrast(p=0.1),
-            transforms.ToTensor(),
-            lambda x: (x - 127.5) / 128.0,
-        ]
-    )
-
-    dataset = MedsTorchDataset(
-        protocol="verification_fold1", database_path=database_path, transform=transform,
-    )
-
-    # train_dataloader = DataLoader(
-    #    dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=2
-    # )
-    train_dataloader = DataLoader(dataset, batch_size=64, shuffle=True)
-
-    # backbone = iresnet34(
-    # pretrained="/idiap/temp/tpereira/bob/data/pytorch/iresnet-91a5de61/iresnet34-5b0d0e90.pth"
-    # )
-    # Add this argument
-    backbone = iresnet100(
-        pretrained="/idiap/temp/tpereira/bob/data/pytorch/iresnet-91a5de61/iresnet100-73e07ba7.pth"
-    )
-
-    # list(dataloader.dataset.labels.values())
-    #####################
-    ## IDENTITY
-    num_class = len(list(train_dataloader.dataset.labels.values()))
-    identity_head = ArcFace(
-        feat_dim=backbone.features.num_features, num_class=num_class
-    )
-
-    ######################
-    ## DEMOGRAPHIC
-    num_class = len(list(train_dataloader.dataset.demographic_keys.values()))
-    demographic_head = DemographicRegularHead(
-        feat_dim=backbone.features.num_features, num_class=num_class
-    )
-
-    ################
-    ## Trainer
-    optimizer = partial(torch.optim.SGD, lr=0.001, momentum=0.9)
-
-    # demographic_epochs = 50
-    # identity_epochs = 200
-    # ortogonality_epochs = 400
-
-    # Preparing lightining model
-    model = OrthogonalityModel(
-        backbone=backbone,
-        identity_head=identity_head,
-        demographic_head=demographic_head,
-        loss_fn=torch.nn.CrossEntropyLoss(),
-        optimizer_fn=optimizer,
-        identity_factor=identity_factor,
-        orthogonality_factor=orthogonality_factor,
-        backbone_checkpoint_path=backbone_checkpoint_path,
-        demographic_epochs=demographic_epochs,
-        identity_epochs=identity_epochs,
-    )
-
-    """
-    from bob.bio.face.pytorch.callbacks import VanillaBiometricsCallback
-
-
-    vanilla_callback = VanillaBiometricsCallback(
-        config="/remote/idiap.svm/user.active/tpereira/gitlab/bob/bob.nightlies/src/bob.bio.demographics/bob/bio/demographics/fair_transformers/transformers.py",
-        output_path="./vanilla-callback",
-    )
-    """
-    model_checkpoint = ModelCheckpoint(
-        output_dir, every_n_train_steps=100, save_last=True
-    )
-    logger = TensorBoardLogger(os.path.join(output_dir, "tb_logs"))
-
-    # Be careful with
-    # https://github.com/PyTorchLightning/pytorch-lightning/issues/5325
-    resume_from_checkpoint = checkpoint_dir if os.path.exists(checkpoint_dir) else None
-
-    # TODO: using this code to learn too
-    # so, be nice with my comments
-    # callbacks=[model_checkpoint, vanilla_callback],
-    callbacks = [model_checkpoint]
-    trainer = pl.Trainer(
-        callbacks=callbacks,
-        logger=logger,
-        max_epochs=max_epochs,
-        gpus=-1 if torch.cuda.is_available() else None,
-        resume_from_checkpoint=resume_from_checkpoint,
-        # resume_from_checkpoint=resume_from_checkpoint, #https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#resume-from-checkpoint
-        # debug flags
-        # limit_train_batches=10,  # https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#limit-train-batches
-        # limit_val_batches=1,
-        amp_level="00",  # https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#amp-level
-        log_every_n_steps=5,
-    )
-
-    trainer.fit(
-        model=model, train_dataloaders=train_dataloader,
-    )
-
-
-if __name__ == "__main__":
-    ortogonality_meds()
diff --git a/bob/bio/demographics/regularizers/__init__.py b/bob/bio/demographics/regularizers/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..eca908cc0bf1a6b723564de153cdd92bf41a4388 100644
--- a/bob/bio/demographics/regularizers/__init__.py
+++ b/bob/bio/demographics/regularizers/__init__.py
@@ -0,0 +1,31 @@
+AVAILABLE_BACKBONES = dict()
+from bob.learn.pytorch.architectures.iresnet import iresnet34, iresnet100, iresnet50
+from functools import partial
+
+# Organize these checkpoints
+AVAILABLE_BACKBONES["iresnet100"] = {
+    "structure": iresnet100,
+    "prior": partial(
+        iresnet100,
+        "/idiap/temp/tpereira/bob/data/pytorch/iresnet-91a5de61/iresnet100-73e07ba7.pth",
+    ),
+}
+
+
+AVAILABLE_BACKBONES["iresnet50"] = {
+    "structure": iresnet50,
+    "prior": partial(
+        iresnet50,
+        "/idiap/temp/tpereira/bob/data/pytorch/iresnet-91a5de61/iresnet50-7f187506.pth",
+    ),
+}
+
+
+AVAILABLE_BACKBONES["iresnet34"] = {
+    "structure": iresnet34,
+    "prior": partial(
+        iresnet34,
+        "/idiap/temp/tpereira/bob/data/pytorch/iresnet-91a5de61/iresnet34-5b0d0e90.pth",
+    ),
+}
+
diff --git a/bob/bio/demographics/regularizers/independence.py b/bob/bio/demographics/regularizers/independence.py
index d8a84d358a96e4d0d84cd5b392f9ccfba73828cd..6653f711e0309116f94695018513eadbae2bdc90 100644
--- a/bob/bio/demographics/regularizers/independence.py
+++ b/bob/bio/demographics/regularizers/independence.py
@@ -4,8 +4,9 @@ import pytorch_lightning as pl
 import torch
 import torch.nn.functional as F
 import numpy as np
-
+import torch.nn as nn
 import copy
+import math
 
 
 class DemographicRegularHead(Module):
@@ -42,7 +43,7 @@ def switch(model, flag):
 class OrthogonalityModel(BackboneHeadModel):
     """
     Here we hypothesize that the sensitive attribute is orthogonal 
-    to the identity
+    to the identity attribute
     """
 
     def __init__(
@@ -110,9 +111,6 @@ class OrthogonalityModel(BackboneHeadModel):
 
             # Switching of the backbone
             if not self.demographic_switch:
-                self.demographic_switch = True
-
-                self.backbone = switch(self.backbone, False)
 
                 self.demographic_head = switch(
                     self.demographic_head, True
@@ -244,3 +242,325 @@ class OrthogonalityModel(BackboneHeadModel):
     def configure_optimizers(self):
         # optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
         return self.optimizer_fn(params=self.parameters())
+
+
+class Mine(nn.Module):
+    """
+    Implements the MINE loss from the paper:
+
+    Mutual Information Neural Estimation (https://arxiv.org/pdf/1801.04062.pdf)
+
+    """
+
+    def __init__(self, T, alpha=0.01, is_mine_f=False):
+        super().__init__()
+        # self.running_mean = 0
+        # self.loss = loss
+        # self.alpha = alpha
+        # self.method = method
+        self.is_mine_f = is_mine_f
+        self.T = T
+
+    def forward(self, x, z, z_marg=None):
+        if z_marg is None:
+            z_marg = z[torch.randperm(x.shape[0])]
+
+        t = self.T(x, z).mean()
+        t_marg = self.T(x, z_marg)
+
+        if self.is_mine_f:
+            second_term = torch.exp(t_marg - 1).mean()
+        else:
+            second_term = torch.logsumexp(t_marg, 0) - math.log(t_marg.shape[0])
+
+        return -t + second_term
+
+    def mi(self, x, z, z_marg=None):
+        if isinstance(x, np.ndarray):
+            x = torch.from_numpy(x).float()
+        if isinstance(z, np.ndarray):
+            z = torch.from_numpy(z).float()
+
+        with torch.no_grad():
+            mi = -self.forward(x, z, z_marg)
+        return mi
+
+
+class MINEModel(BackboneHeadModel):
+    """
+    Here we hypothesize that the sensitive attribute is mutually independent
+    to the identity attribute
+    """
+
+    def __init__(
+        self,
+        backbone,
+        identity_head,
+        demographic_head,
+        loss_fn,
+        optimizer_fn,
+        identity_factor=1.0,
+        mine_factor=1.0,
+        backbone_checkpoint_path=None,
+        demographic_epochs=30,
+        identity_epochs=50,
+        mine_dimension=512,
+        **kwargs,
+    ):
+        # super(pl.LightningModule, self).__init__(**kwargs)
+
+        pl.LightningModule.__init__(self, **kwargs)
+        self.backbone = backbone
+        self.identity_head = identity_head
+        self.demographic_head = demographic_head
+        self.loss_fn = loss_fn
+        self.optimizer_fn = optimizer_fn
+
+        self.identity_factor = identity_factor
+        self.mine_factor = mine_factor
+
+        self.demographic_epochs = demographic_epochs  # 1. First train demographics
+
+        self.identity_epochs = (
+            demographic_epochs + identity_epochs
+        )  # 2. Train identities
+
+        # self.ortogonality_epochs = (
+        #    demographic_epochs + identity_epochs + ortogonality_epochs
+        # )  # 3. Train ortogonality
+
+        self.backbone_checkpoint_path = backbone_checkpoint_path
+        self.last_op = None
+
+        # Control the networks that will be updated
+        self.demographic_switch = False
+        self.identity_switch = False
+        self.mine_switch = False
+
+        # Defining the dimension of the MINE model
+        dim = (
+            self.backbone.features.num_features
+            if hasattr(self.backbone, "features")
+            else mine_dimension
+        )
+
+        # Get the backbone
+        self.mine = Mine(T(dim, dim))
+
+    def training_epoch_end(self, training_step_outputs):
+        if self.backbone_checkpoint_path:
+
+            state = self.backbone.state_dict()
+            torch.save(state, self.backbone_checkpoint_path)
+
+        pass
+
+    def training_step(self, batch, batch_idx):
+
+        data = batch["data"]
+        label = batch["label"]
+        demography = batch["demography"]
+
+        embedding = self.backbone(data)
+
+        if self.current_epoch < self.demographic_epochs:
+            ## First we learn the demography classifiers
+
+            # Switching of the backbone
+            if not self.demographic_switch:
+
+                self.demographic_head = switch(
+                    self.demographic_head, True
+                )  # Update just he demographic classifier
+
+                self.identity_head = switch(
+                    self.identity_head, False
+                )  # Update just he demographic classifier
+
+            # Demographic CLASSIFICATION loss
+            _, demographic_logits = self.demographic_head(embedding, demography)
+
+            loss_demography = self.loss_fn(demographic_logits, demography)
+            self.log("train/loss_demography", loss_demography)
+
+            acc = (
+                sum(
+                    np.argmax(demographic_logits.cpu().detach().numpy(), axis=1)
+                    == demography.cpu().detach().numpy()
+                )
+                / demography.shape[0]
+            )
+            self.log("train/acc_demography_before_MINE", acc)
+
+            return loss_demography
+
+        if self.current_epoch < self.identity_epochs:
+            ## Second the idenity classifier
+
+            if not self.identity_switch:
+                self.identity_switch = True
+
+                # Switching of the backbone
+                self.backbone = switch(self.backbone, False)
+
+                self.demographic_head = switch(
+                    self.demographic_head, False
+                )  # Update just he demographic classifier
+
+                self.identity_head = switch(
+                    self.identity_head, True
+                )  # Update just he demographic classifier
+
+            # Identity loss
+            logits_identiy = self.identity_head(embedding, label)
+            loss_identity = self.loss_fn(logits_identiy, label)
+            self.log("train/loss_identity", loss_identity)
+
+            acc = (
+                sum(
+                    np.argmax(logits_identiy.cpu().detach().numpy(), axis=1)
+                    == label.cpu().detach().numpy()
+                )
+                / label.shape[0]
+            )
+            self.log("train/acc_identity_before_MINE", acc)
+
+            return loss_identity
+
+        #########################################
+        # Now we learn the mine
+        #########################################
+
+        # Switching of the backbone
+        if not self.mine_switch:
+            self.mine_switch = True
+
+            self.backbone = switch(self.backbone, True)
+
+            self.demographic_head = switch(
+                self.demographic_head, False
+            )  # Update just he demographic classifier
+
+            self.identity_head = switch(
+                self.identity_head, True
+            )  # Update just he demographic classifier
+
+        # Identity loss
+        logits_identiy = self.identity_head(embedding, label)
+        loss_identity = self.loss_fn(logits_identiy, label)
+        # self.log("train/loss_identity", loss_identity)
+
+        # Demographic CLASSIFICATION loss
+        demographic_embeding, demographic_logits = self.demographic_head(
+            embedding, demography
+        )
+
+        # Demographic
+        acc = (
+            sum(
+                np.argmax(demographic_logits.cpu().detach().numpy(), axis=1)
+                == demography.cpu().detach().numpy()
+            )
+            / demography.shape[0]
+        )
+        self.log("train/acc_demography_after_MINE", acc)
+
+        # Identity
+        acc = (
+            sum(
+                np.argmax(logits_identiy.cpu().detach().numpy(), axis=1)
+                == label.cpu().detach().numpy()
+            )
+            / label.shape[0]
+        )
+        self.log("train/acc_identity_after_MINE", acc)
+
+        # MUTUAL INFORMATION LOSS
+        #
+        loss_mine = self.mine(embedding, demographic_embeding)
+
+        self.log("train/loss_MINE", loss_mine)
+
+        total_loss = self.identity_factor * loss_identity + self.mine_factor * loss_mine
+
+        # total_loss = loss_identity
+        self.log("train/total_loss", total_loss)
+
+        return total_loss
+
+    def configure_optimizers(self):
+        # optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
+        return self.optimizer_fn(params=self.parameters())
+
+
+class Mine(nn.Module):
+    """
+    Implements the MINE loss from the paper:
+
+    Mutual Information Neural Estimation (https://arxiv.org/pdf/1801.04062.pdf)
+
+    """
+
+    def __init__(self, T, alpha=0.01, is_mine_f=False):
+        super().__init__()
+        # self.running_mean = 0
+        # self.loss = loss
+        # self.alpha = alpha
+        # self.method = method
+        self.is_mine_f = is_mine_f
+        self.T = T
+
+    def forward(self, x, z, z_marg=None):
+        if z_marg is None:
+            z_marg = z[torch.randperm(x.shape[0])]
+
+        t = self.T(x, z).mean()
+        t_marg = self.T(x, z_marg)
+
+        if self.is_mine_f:
+            second_term = torch.exp(t_marg - 1).mean()
+        else:
+            second_term = torch.logsumexp(t_marg, 0) - math.log(t_marg.shape[0])
+
+        return -t + second_term
+
+    def mi(self, x, z, z_marg=None):
+        if isinstance(x, np.ndarray):
+            x = torch.from_numpy(x).float()
+        if isinstance(z, np.ndarray):
+            z = torch.from_numpy(z).float()
+
+        with torch.no_grad():
+            mi = -self.forward(x, z, z_marg)
+        return mi
+
+
+"""
+class ConcatLayer(nn.Module):
+    def __init__(self, dim=1):
+        super().__init__()
+        self.dim = dim
+
+    def forward(self, x, y):
+        return torch.cat((x, y), self.dim)
+"""
+
+
+class T(nn.Module):
+
+    # Base network for to train the mutial informatuib
+
+    def __init__(self, x_dim, z_dim):
+        super().__init__()
+        self.t = nn.Sequential(
+            nn.Linear(x_dim + z_dim, 100),
+            nn.ReLU(),
+            nn.Linear(100, 100),
+            nn.ReLU(),
+            nn.Linear(100, 1),
+        )
+
+    def forward(self, x, z):
+
+        x_z = torch.cat((x, z), 1)
+        return self.t(x_z)
diff --git a/bob/bio/demographics/regularizers/trainers.py b/bob/bio/demographics/regularizers/trainers.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e84d272a637771840114e30d2495459d4c2c587
--- /dev/null
+++ b/bob/bio/demographics/regularizers/trainers.py
@@ -0,0 +1,239 @@
+from bob.bio.demographics.datasets import MedsTorchDataset, MorphTorchDataset
+
+# https://pytorch.org/docs/stable/data.html
+from torch.utils.data import DataLoader
+import pytest
+from bob.extension import rc
+import os
+from bob.learn.pytorch.architectures.iresnet import iresnet34, iresnet100
+from bob.learn.pytorch.head import ArcFace, Regular
+from bob.bio.demographics.regularizers.independence import (
+    DemographicRegularHead,
+    OrthogonalityModel,
+    MINEModel,
+)
+import pytorch_lightning as pl
+import torch
+from functools import partial
+
+from pytorch_lightning.callbacks import ModelCheckpoint
+from pytorch_lightning.loggers import TensorBoardLogger
+import torchvision.transforms as transforms
+import click
+import yaml
+
+
+def ortogonality_trainer(
+    output_dir,
+    identity_factor,
+    orthogonality_factor,
+    max_epochs,
+    demographic_epochs,
+    identity_epochs,
+    batch_size,
+    train_dataloader,
+    backbone_model,
+    transform,
+):
+
+    """
+    Trains a Pytorch CNN using the ortogonality hypothesis
+    """
+
+    # Defing the variables of the experiment, so we don't get lost
+    os.makedirs(output_dir, exist_ok=True)
+    with open(f"{output_dir}/config.yml", "w") as file:
+        dict_file = dict()
+        dict_file["demographic_epochs"] = demographic_epochs
+        dict_file["identity_epochs"] = identity_epochs
+        dict_file["max_epochs"] = max_epochs
+        dict_file["batch_size"] = batch_size
+        dict_file["identity_factor"] = identity_factor
+        dict_file["orthogonality_factor"] = orthogonality_factor
+        dict_file["hypothesis"] = "Orthogonality"
+        yaml.dump(dict_file, file)
+
+    backbone_checkpoint_path = f"{output_dir}/model.pth"
+    checkpoint_dir = f"{output_dir}/last.ckpt"
+
+    #####################
+    ## IDENTITY
+    num_class = len(list(train_dataloader.dataset.labels.values()))
+    identity_head = ArcFace(
+        feat_dim=backbone_model.features.num_features, num_class=num_class
+    )
+
+    ######################
+    ## DEMOGRAPHIC
+    num_class = len(list(train_dataloader.dataset.demographic_keys.values()))
+    demographic_head = DemographicRegularHead(
+        feat_dim=backbone_model.features.num_features, num_class=num_class
+    )
+
+    ################
+    ## Trainer
+    optimizer = partial(torch.optim.SGD, lr=0.001, momentum=0.9)
+
+    # demographic_epochs = 50
+    # identity_epochs = 200
+    # ortogonality_epochs = 400
+
+    # Preparing lightining model
+    model = OrthogonalityModel(
+        backbone=backbone_model,
+        identity_head=identity_head,
+        demographic_head=demographic_head,
+        loss_fn=torch.nn.CrossEntropyLoss(),
+        optimizer_fn=optimizer,
+        identity_factor=identity_factor,
+        orthogonality_factor=orthogonality_factor,
+        backbone_checkpoint_path=backbone_checkpoint_path,
+        demographic_epochs=demographic_epochs,
+        identity_epochs=identity_epochs,
+    )
+
+    """
+    from bob.bio.face.pytorch.callbacks import VanillaBiometricsCallback
+
+
+    vanilla_callback = VanillaBiometricsCallback(
+        config="/remote/idiap.svm/user.active/tpereira/gitlab/bob/bob.nightlies/src/bob.bio.demographics/bob/bio/demographics/fair_transformers/transformers.py",
+        output_path="./vanilla-callback",
+    )
+    """
+    model_checkpoint = ModelCheckpoint(
+        output_dir, every_n_train_steps=100, save_last=True
+    )
+    logger = TensorBoardLogger(os.path.join(output_dir, "tb_logs"))
+
+    # Be careful with
+    # https://github.com/PyTorchLightning/pytorch-lightning/issues/5325
+    resume_from_checkpoint = checkpoint_dir if os.path.exists(checkpoint_dir) else None
+
+    # TODO: using this code to learn too
+    # so, be nice with my comments
+    # callbacks=[model_checkpoint, vanilla_callback],
+    callbacks = [model_checkpoint]
+    trainer = pl.Trainer(
+        callbacks=callbacks,
+        logger=logger,
+        max_epochs=max_epochs,
+        gpus=-1 if torch.cuda.is_available() else None,
+        resume_from_checkpoint=resume_from_checkpoint,
+        # resume_from_checkpoint=resume_from_checkpoint, #https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#resume-from-checkpoint
+        # debug flags
+        # limit_train_batches=10,  # https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#limit-train-batches
+        # limit_val_batches=1,
+        amp_level="00",  # https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#amp-level
+        log_every_n_steps=5,
+    )
+
+    trainer.fit(
+        model=model, train_dataloaders=train_dataloader,
+    )
+
+
+def mine_trainer(
+    output_dir,
+    identity_factor,
+    mine_factor,
+    max_epochs,
+    demographic_epochs,
+    identity_epochs,
+    batch_size,
+    train_dataloader,
+    backbone_model,
+    transform,
+):
+
+    """
+    Trains a Pytorch CNN using the ortogonality hypothesis
+    """
+
+    # Defing the variables of the experiment, so we don't get lost
+    os.makedirs(output_dir, exist_ok=True)
+    with open(f"{output_dir}/config.yml", "w") as file:
+        dict_file = dict()
+        dict_file["demographic_epochs"] = demographic_epochs
+        dict_file["identity_epochs"] = identity_epochs
+        dict_file["max_epochs"] = max_epochs
+        dict_file["batch_size"] = batch_size
+        dict_file["identity_factor"] = identity_factor
+        dict_file["mine_factor"] = mine_factor
+        dict_file["hypothesis"] = "MINE"
+        yaml.dump(dict_file, file)
+
+    backbone_checkpoint_path = f"{output_dir}/model.pth"
+    checkpoint_dir = f"{output_dir}/last.ckpt"
+
+    #####################
+    ## IDENTITY
+    num_class = len(list(train_dataloader.dataset.labels.values()))
+    identity_head = ArcFace(
+        feat_dim=backbone_model.features.num_features, num_class=num_class
+    )
+
+    ######################
+    ## DEMOGRAPHIC
+    num_class = len(list(train_dataloader.dataset.demographic_keys.values()))
+    demographic_head = DemographicRegularHead(
+        feat_dim=backbone_model.features.num_features, num_class=num_class
+    )
+
+    ################
+    ## Trainer
+    optimizer = partial(torch.optim.SGD, lr=0.001, momentum=0.9)
+
+    # Preparing lightining model
+    model = MINEModel(
+        backbone=backbone_model,
+        identity_head=identity_head,
+        demographic_head=demographic_head,
+        loss_fn=torch.nn.CrossEntropyLoss(),
+        optimizer_fn=optimizer,
+        identity_factor=identity_factor,
+        mine_factor=mine_factor,
+        backbone_checkpoint_path=backbone_checkpoint_path,
+        demographic_epochs=demographic_epochs,
+        identity_epochs=identity_epochs,
+    )
+
+    """
+    from bob.bio.face.pytorch.callbacks import VanillaBiometricsCallback
+
+
+    vanilla_callback = VanillaBiometricsCallback(
+        config="/remote/idiap.svm/user.active/tpereira/gitlab/bob/bob.nightlies/src/bob.bio.demographics/bob/bio/demographics/fair_transformers/transformers.py",
+        output_path="./vanilla-callback",
+    )
+    """
+    model_checkpoint = ModelCheckpoint(
+        output_dir, every_n_train_steps=100, save_last=True
+    )
+    logger = TensorBoardLogger(os.path.join(output_dir, "tb_logs"))
+
+    # Be careful with
+    # https://github.com/PyTorchLightning/pytorch-lightning/issues/5325
+    resume_from_checkpoint = checkpoint_dir if os.path.exists(checkpoint_dir) else None
+
+    # TODO: using this code to learn too
+    # so, be nice with my comments
+    # callbacks=[model_checkpoint, vanilla_callback],
+    callbacks = [model_checkpoint]
+    trainer = pl.Trainer(
+        callbacks=callbacks,
+        logger=logger,
+        max_epochs=max_epochs,
+        gpus=-1 if torch.cuda.is_available() else None,
+        resume_from_checkpoint=resume_from_checkpoint,
+        # resume_from_checkpoint=resume_from_checkpoint, #https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#resume-from-checkpoint
+        # debug flags
+        # limit_train_batches=10,  # https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#limit-train-batches
+        # limit_val_batches=1,
+        amp_level="00",  # https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#amp-level
+        log_every_n_steps=5,
+    )
+
+    trainer.fit(
+        model=model, train_dataloaders=train_dataloader,
+    )
diff --git a/bob/bio/demographics/reports.py b/bob/bio/demographics/reports.py
index a6c34552edd6c3a40ab9cedd5b499ae8e51f14b3..2b786a1d8cfedbc914213be87fc1213beadebb29 100644
--- a/bob/bio/demographics/reports.py
+++ b/bob/bio/demographics/reports.py
@@ -72,8 +72,8 @@ def meds_report(
         taus = [compute_fmr_thresholds(d, fmr_thresholds) for d in negatives_dev]
 
         fig = plot_fdr(
-            negatives_dev,
-            positives_dev,
+            negatives_eval,
+            positives_eval,
             titles,
             variable_suffix,
             fmr_thresholds,
@@ -123,7 +123,7 @@ def morph_report(
     output_filename,
     scores_eval=None,
     fmr_thresholds=[10 ** i for i in list(range(-8, 0))],
-    percentile=0.01,
+    percentile=0.05,
     titles=None,
     possible_races=["A", "B", "H", "W"],
     genders_considered=["M"],
@@ -178,16 +178,16 @@ def morph_report(
 
         taus = [compute_fmr_thresholds(d, fmr_thresholds) for d in negatives_dev]
 
-        # fig = plot_fdr(
-        #    negatives_dev,
-        #    positives_dev,
-        #    titles,
-        #    variable_suffix,
-        #    fmr_thresholds,
-        #    taus=taus,
-        # )
+        fig = plot_fdr(
+            negatives_eval,
+            positives_eval,
+            titles,
+            variable_suffix,
+            fmr_thresholds,
+            taus=taus,
+        )
 
-    # pdf.savefig(fig)
+    pdf.savefig(fig)
 
     for i, (n_dev, p_dev, n_eval, p_eval) in enumerate(
         zip(negatives_dev, positives_dev, negatives_eval, positives_eval)
@@ -209,17 +209,17 @@ def morph_report(
         pdf.savefig(fig)
 
         #### PLOTTING THE FMR AND FNMR TRADE OFF
-        # fig = plot_fmr_fnmr_tradeoff(
-        #    n_dev,
-        #    p_dev,
-        #    variable_suffix=variable_suffix,
-        #    fmr_thresholds=fmr_thresholds,
-        #    negatives_eval=n_eval,
-        #    positives_eval=p_eval,
-        #    print_fmr_fnmr=True,
-        #    label_lookup_table=label_lookup_table,
-        # )
-        # pdf.savefig(fig)
+        fig = plot_fmr_fnmr_tradeoff(
+            n_dev,
+            p_dev,
+            variable_suffix=variable_suffix,
+            fmr_thresholds=fmr_thresholds,
+            negatives_eval=n_eval,
+            positives_eval=p_eval,
+            print_fmr_fnmr=True,
+            label_lookup_table=label_lookup_table,
+        )
+        pdf.savefig(fig)
 
     pdf.close()
 
diff --git a/bob/bio/demographics/script/commands.py b/bob/bio/demographics/script/commands.py
index 082603a030fe6f13e4a0c492f7c208d44b669b64..626bb6ffe95d4fea7460c2f81fc4ba55df260859 100644
--- a/bob/bio/demographics/script/commands.py
+++ b/bob/bio/demographics/script/commands.py
@@ -81,7 +81,7 @@ def morph(ctx, scores, evaluation, output, titles, percentile, dask_client, **ka
         scores_eval,
         percentile=percentile,
         titles=titles,
-        fmr_thresholds=[10 ** i for i in list(range(-6, -2))],
+        fmr_thresholds=[10 ** i for i in list(range(-8, 0))],
     )
 
     dask_client.shutdown()
diff --git a/bob/bio/demographics/test/test_mine.py b/bob/bio/demographics/test/test_mine.py
new file mode 100644
index 0000000000000000000000000000000000000000..409700480b804407791780a9221f0601492ae5cb
--- /dev/null
+++ b/bob/bio/demographics/test/test_mine.py
@@ -0,0 +1,45 @@
+import torch
+import torch.nn as nn
+
+
+from bob.bio.demographics.regularizers.independence import Mine, T
+import numpy as np
+
+
+def run_mine(is_mine_f):
+    np.random.seed(10)
+    N = 1000
+    d = 1
+    EPOCHS = 1000
+
+    # Generating
+    X = np.sign(np.random.normal(0.0, 1.0, [N, d]))
+    Z = X + np.random.normal(0.0, np.sqrt(0.2), [N, d])
+
+    from sklearn.feature_selection import mutual_info_regression
+
+    mi_numerical = mutual_info_regression(X.reshape(-1, 1), Z.ravel())[0]
+
+    x_dim = X.shape[1]
+    z_dim = Z.shape[1]
+    t = T(x_dim, z_dim)
+    model = Mine(T=t, is_mine_f=is_mine_f)
+
+    X = torch.tensor(X.astype("float32"))
+    Z = torch.tensor(Z.astype("float32"))
+
+    solver = torch.optim.Adam(model.parameters(), lr=1e-4)
+
+    for i in range(EPOCHS):
+        solver.zero_grad()
+        loss = model.forward(X, Z)
+        loss.backward()
+        solver.step()
+
+    mi = model.mi(X, Z)
+    assert np.allclose(mi, mi_numerical, atol=0.1)
+
+
+def test_mine():
+    run_mine(True)
+    run_mine(False)
diff --git a/bob/bio/demographics/test/test_regularizer.py b/bob/bio/demographics/test/test_regularizer.py
index 54181f12f0e088cd64593853930b58a73b148e69..8f6b73050396cc4464626e1542a33a616367f65c 100644
--- a/bob/bio/demographics/test/test_regularizer.py
+++ b/bob/bio/demographics/test/test_regularizer.py
@@ -10,10 +10,13 @@ from bob.learn.pytorch.head import ArcFace, Regular
 from bob.bio.demographics.regularizers.independence import (
     DemographicRegularHead,
     OrthogonalityModel,
+    MINEModel,
 )
 import pytorch_lightning as pl
 import torch
 from functools import partial
+import torchvision.transforms as transforms
+import bob.io.image
 
 
 @pytest.mark.skipif(
@@ -87,6 +90,90 @@ def test_orthogonality():
         model=model, train_dataloaders=train_dataloader,
     )
 
+
+@pytest.mark.skipif(
+    rc.get("bob.bio.demographics.directory") is None,
+    reason="Demographics features directory not available. Please do `bob config set bob.bio.demographics.directory [PATH]` to set the base features path.",
+)
+def test_mine():
+
+    database_path = os.path.join(
+        rc.get("bob.bio.demographics.directory"), "meds", "samplewrapper"
+    )
+
+    transform = transforms.Compose(
+        [
+            lambda x: bob.io.image.to_matplotlib(x.astype("float32")),
+            # transforms.ToPILImage(mode="RGB"),
+            # transforms.RandomHorizontalFlip(p=0.5),
+            # transforms.RandomRotation(degrees=(-3, 3)),
+            # transforms.RandomAutocontrast(p=0.1),
+            transforms.ToTensor(),
+            lambda x: (x - 127.5) / 128.0,
+        ]
+    )
+
+    dataset = MedsTorchDataset(
+        protocol="verification_fold1", database_path=database_path, transform=transform
+    )
+
+    # train_dataloader = DataLoader(
+    #    dataset, batch_size=64, shuffle=True, pin_memory=True, num_workers=2
+    # )
+    train_dataloader = DataLoader(dataset, batch_size=64, shuffle=True)
+
+    backbone = iresnet34(
+        pretrained="/idiap/temp/tpereira/bob/data/pytorch/iresnet-91a5de61/iresnet34-5b0d0e90.pth"
+    )
+
+    # list(dataloader.dataset.labels.values())
+    #####################
+    ## IDENTITY
+    num_class = len(list(train_dataloader.dataset.labels.values()))
+    identity_head = ArcFace(
+        feat_dim=backbone.features.num_features, num_class=num_class
+    )
+
+    ######################
+    ## DEMOGRAPHIC
+    num_class = len(list(train_dataloader.dataset.demographic_keys.values()))
+    demographic_head = DemographicRegularHead(
+        feat_dim=backbone.features.num_features, num_class=num_class
+    )
+
+    ################
+    ## Trainer
+    optimizer = partial(torch.optim.SGD, lr=0.1, momentum=0.9)
+
+    # Preparing lightining model
+    model = MINEModel(
+        backbone=backbone,
+        identity_head=identity_head,
+        demographic_head=demographic_head,
+        loss_fn=torch.nn.CrossEntropyLoss(),
+        optimizer_fn=optimizer,
+        demographic_epochs=1,
+        identity_epochs=1,
+    )
+
+    # TODO: using this code to learn too
+    # so, be nice with my comments
+    trainer = pl.Trainer(
+        # callbacks=..... # https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#callbacks
+        # logger=logger,...
+        max_epochs=4,
+        gpus=-1 if torch.cuda.is_available() else None,
+        # resume_from_checkpoint=resume_from_checkpoint, #https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#resume-from-checkpoint
+        # debug flags
+        # limit_train_batches=10,  # https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#limit-train-batches
+        # limit_val_batches=1,
+        amp_level="00",  # https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#amp-level
+    )
+
+    trainer.fit(
+        model=model, train_dataloaders=train_dataloader,
+    )
+
     pass
     # head = ArcFace(feat_dim=30, num_class=10)
     # head = Regular(feat_dim=84, num_class=10)
diff --git a/experiments/regularization/mine/meds/meds_evaluate.sh b/experiments/regularization/mine/meds/meds_evaluate.sh
new file mode 100644
index 0000000000000000000000000000000000000000..042e1270ad98265e522b4e75f46bf395e190dd76
--- /dev/null
+++ b/experiments/regularization/mine/meds/meds_evaluate.sh
@@ -0,0 +1,26 @@
+BASE_PATH=/remote/idiap.svm/user.active/tpereira/gitlab/bob/bob.nightlies
+
+#$BASE_PATH/bin/bob bio pipelines vanilla-biometrics $BASE_PATH/src/bob.bio.demographics/bob/bio/demographics/experiments/evaluation/ortogonality_hypothesis/meds.py --output /idiap/temp/tpereira/2.FRDemographics/regularization/experiments/orthogonality/iresnet100_identity_factor1_orthogonality_factor1 --dask-partition-size 50 -c -g dev -g eval -l sge 
+
+#/idiap/temp/tpereira/2.FRDemographics/regularization/experiments/orthogonality/meds/analysis
+
+
+OUTPUT_PATH=/idiap/temp/tpereira/2.FRDemographics/regularization/experiments/orthogonality/meds/
+MODELS_PATH=/idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality/meds/
+
+BACKBONE=iresnet100
+
+
+array=( "identity-1.0_orthogonality-1.0" "identity-1.0_orthogonality-5.0" "identity-1.0_orthogonality-10.0" "identity-1.0_orthogonality-100.0" "identity-5.0_orthogonality-1.0" "identity-10.0_orthogonality-1.0")
+for CNN in "${array[@]}"
+do
+    ./bin/python $BASE_PATH/src/bob.bio.demographics/bob/bio/demographics/experiments/evaluation/ortogonality/meds.py $BACKBONE $MODELS_PATH/$BACKBONE/$CNN/model.pth $OUTPUT_PATH/$BACKBONE/$CNN/
+
+done
+
+
+
+
+# -l sge
+
+
diff --git a/experiments/regularization/mine/meds/meds_train.sh b/experiments/regularization/mine/meds/meds_train.sh
new file mode 100644
index 0000000000000000000000000000000000000000..547bf69f03a579f4c1a9c508128c31d6eb7028b3
--- /dev/null
+++ b/experiments/regularization/mine/meds/meds_train.sh
@@ -0,0 +1,49 @@
+BASE_PATH=/idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality/meds/
+
+BACKBONE=iresnet100
+
+COMMAND=./src/bob.bio.demographics/bob/bio/demographics/experiments/train/ortogonality/meds.py
+
+./bin/jman submit --name ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-1.0 \
+    --identity-factor 1. --orthogonality-factor 1. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-5.0 \
+    --identity-factor 1. --orthogonality-factor 5. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-10.0 \
+    --identity-factor 1. --orthogonality-factor 10. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-100.0 \
+    --identity-factor 1. --orthogonality-factor 100. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-5.0_orthogonality-1.0 \
+    --identity-factor 5. --orthogonality-factor 1. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-10.0_orthogonality-1.0 \
+    --identity-factor 10. --orthogonality-factor 1. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+
+
+
+./bin/python ./src/bob.bio.demographics/bob/bio/demographics/experiments/train/mine/meds.py /idiap/temp/tpereira/2.FRDemographics/regularization/models/mine/meds/iresnet100/identity-1.0_mine-1.0 \
+    --identity-factor 1. --mine 1. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
diff --git a/experiments/regularization/ortogonality/meds/meds_evaluate.sh b/experiments/regularization/ortogonality/meds/meds_evaluate.sh
index 927d0a67a8083a769c83723c1dd4c9715afd1825..042e1270ad98265e522b4e75f46bf395e190dd76 100644
--- a/experiments/regularization/ortogonality/meds/meds_evaluate.sh
+++ b/experiments/regularization/ortogonality/meds/meds_evaluate.sh
@@ -1,5 +1,26 @@
 BASE_PATH=/remote/idiap.svm/user.active/tpereira/gitlab/bob/bob.nightlies
 
-$BASE_PATH/bin/bob bio pipelines vanilla-biometrics $BASE_PATH/src/bob.bio.demographics/bob/bio/demographics/experiments/evaluation/ortogonality_hypothesis/meds.py --output /idiap/temp/tpereira/2.FRDemographics/regularization/experiments/orthogonality/iresnet100_identity_factor1_orthogonality_factor1 --dask-partition-size 50 -c -g dev -g eval -l sge 
+#$BASE_PATH/bin/bob bio pipelines vanilla-biometrics $BASE_PATH/src/bob.bio.demographics/bob/bio/demographics/experiments/evaluation/ortogonality_hypothesis/meds.py --output /idiap/temp/tpereira/2.FRDemographics/regularization/experiments/orthogonality/iresnet100_identity_factor1_orthogonality_factor1 --dask-partition-size 50 -c -g dev -g eval -l sge 
+
+#/idiap/temp/tpereira/2.FRDemographics/regularization/experiments/orthogonality/meds/analysis
+
+
+OUTPUT_PATH=/idiap/temp/tpereira/2.FRDemographics/regularization/experiments/orthogonality/meds/
+MODELS_PATH=/idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality/meds/
+
+BACKBONE=iresnet100
+
+
+array=( "identity-1.0_orthogonality-1.0" "identity-1.0_orthogonality-5.0" "identity-1.0_orthogonality-10.0" "identity-1.0_orthogonality-100.0" "identity-5.0_orthogonality-1.0" "identity-10.0_orthogonality-1.0")
+for CNN in "${array[@]}"
+do
+    ./bin/python $BASE_PATH/src/bob.bio.demographics/bob/bio/demographics/experiments/evaluation/ortogonality/meds.py $BACKBONE $MODELS_PATH/$BACKBONE/$CNN/model.pth $OUTPUT_PATH/$BACKBONE/$CNN/
+
+done
+
+
+
+
+# -l sge
 
 
diff --git a/experiments/regularization/ortogonality/meds/meds_evaluate_irenet50.sh b/experiments/regularization/ortogonality/meds/meds_evaluate_irenet50.sh
new file mode 100644
index 0000000000000000000000000000000000000000..01617021493437f307dec588688b7a5eb01f7dda
--- /dev/null
+++ b/experiments/regularization/ortogonality/meds/meds_evaluate_irenet50.sh
@@ -0,0 +1,26 @@
+BASE_PATH=/remote/idiap.svm/user.active/tpereira/gitlab/bob/bob.nightlies
+
+#$BASE_PATH/bin/bob bio pipelines vanilla-biometrics $BASE_PATH/src/bob.bio.demographics/bob/bio/demographics/experiments/evaluation/ortogonality_hypothesis/meds.py --output /idiap/temp/tpereira/2.FRDemographics/regularization/experiments/orthogonality/iresnet100_identity_factor1_orthogonality_factor1 --dask-partition-size 50 -c -g dev -g eval -l sge 
+
+#/idiap/temp/tpereira/2.FRDemographics/regularization/experiments/orthogonality/meds/analysis
+
+
+OUTPUT_PATH=/idiap/temp/tpereira/2.FRDemographics/regularization/experiments/orthogonality/meds/
+MODELS_PATH=/idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality/meds/
+
+BACKBONE=iresnet50
+
+
+array=( "identity-1.0_orthogonality-1.0" "identity-1.0_orthogonality-5.0" "identity-1.0_orthogonality-10.0" "identity-1.0_orthogonality-100.0" "identity-5.0_orthogonality-1.0" "identity-10.0_orthogonality-1.0")
+for CNN in "${array[@]}"
+do
+    ./bin/python $BASE_PATH/src/bob.bio.demographics/bob/bio/demographics/experiments/evaluation/ortogonality/meds.py $BACKBONE $MODELS_PATH/$BACKBONE/$CNN/model.pth $OUTPUT_PATH/$BACKBONE/$CNN/
+
+done
+
+
+
+
+# -l sge
+
+
diff --git a/experiments/regularization/ortogonality/meds/meds_train.sh b/experiments/regularization/ortogonality/meds/meds_train.sh
index 589f7409679fcef8b47172ef74376761d6022a6e..f2f525dfa2248eeab154bdac27725f21eb60a214 100644
--- a/experiments/regularization/ortogonality/meds/meds_train.sh
+++ b/experiments/regularization/ortogonality/meds/meds_train.sh
@@ -1,18 +1,41 @@
-BASE_PATH=/idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality_hypothesis/meds/
-
-./bin/jman submit --name ORTH -q sgpu -- ./bin/python ./src/bob.bio.demographics/bob/bio/demographics/experiments/train/ortogonality_hypothesis/meds.py $BASE_PATH/identity-1.0_orthogonality-1.0 \
-    --identity-factor 1. --orthogonality-factor 1. --max-epochs 1000
-    
-./bin/jman submit --name ORTH -q sgpu -- ./bin/python ./src/bob.bio.demographics/bob/bio/demographics/experiments/train/ortogonality_hypothesis/meds.py $BASE_PATH/identity-5.0_orthogonality-1.0 \
-    --identity-factor 5. --orthogonality-factor 1. --max-epochs 1000
-    
-./bin/jman submit --name ORTH -q sgpu -- ./bin/python ./src/bob.bio.demographics/bob/bio/demographics/experiments/train/ortogonality_hypothesis/meds.py $BASE_PATH/identity-10.0_orthogonality-1.0 \
-    --identity-factor 10. --orthogonality-factor 1. --max-epochs 1000
-    
-    
-./bin/jman submit --name ORTH -q sgpu -- ./bin/python ./src/bob.bio.demographics/bob/bio/demographics/experiments/train/ortogonality_hypothesis/meds.py $BASE_PATH/identity-1.0_orthogonality-5.0 \
-    --identity-factor 1. --orthogonality-factor 5. --max-epochs 1000
-    
-    
-./bin/jman submit --name ORTH -q sgpu -- ./bin/python ./src/bob.bio.demographics/bob/bio/demographics/experiments/train/ortogonality_hypothesis/meds.py $BASE_PATH/identity-1.0_orthogonality-10.0 \
-    --identity-factor 1. --orthogonality-factor 10. --max-epochs 1000
+BASE_PATH=/idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality/meds/
+
+BACKBONE=iresnet100
+
+COMMAND=./src/bob.bio.demographics/bob/bio/demographics/experiments/train/ortogonality/meds.py
+
+./bin/jman submit --name ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-1.0 \
+    --identity-factor 1. --orthogonality-factor 1. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-5.0 \
+    --identity-factor 1. --orthogonality-factor 5. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-10.0 \
+    --identity-factor 1. --orthogonality-factor 10. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-100.0 \
+    --identity-factor 1. --orthogonality-factor 100. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-5.0_orthogonality-1.0 \
+    --identity-factor 5. --orthogonality-factor 1. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-10.0_orthogonality-1.0 \
+    --identity-factor 10. --orthogonality-factor 1. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
diff --git a/experiments/regularization/ortogonality/meds/meds_train_iresnet50.sh b/experiments/regularization/ortogonality/meds/meds_train_iresnet50.sh
new file mode 100644
index 0000000000000000000000000000000000000000..bf84881f561259c894fa3b28d10511962661b199
--- /dev/null
+++ b/experiments/regularization/ortogonality/meds/meds_train_iresnet50.sh
@@ -0,0 +1,41 @@
+BASE_PATH=/idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality/meds/
+
+BACKBONE=iresnet50
+
+COMMAND=./src/bob.bio.demographics/bob/bio/demographics/experiments/train/ortogonality/meds.py
+
+./bin/jman submit --name MEDS-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-1.0 \
+    --identity-factor 1. --orthogonality-factor 1. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name MEDS-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-5.0 \
+    --identity-factor 1. --orthogonality-factor 5. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name MEDS-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-10.0 \
+    --identity-factor 1. --orthogonality-factor 10. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name MEDS-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-100.0 \
+    --identity-factor 1. --orthogonality-factor 100. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name MEDS-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-5.0_orthogonality-1.0 \
+    --identity-factor 5. --orthogonality-factor 1. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name MEDS-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-10.0_orthogonality-1.0 \
+    --identity-factor 10. --orthogonality-factor 1. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
diff --git a/experiments/regularization/ortogonality/morph/morph_evaluate.sh b/experiments/regularization/ortogonality/morph/morph_evaluate.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e0e8491ca141cf6e610189e6fb4baae89c554fb4
--- /dev/null
+++ b/experiments/regularization/ortogonality/morph/morph_evaluate.sh
@@ -0,0 +1,23 @@
+BASE_PATH=/remote/idiap.svm/user.active/tpereira/gitlab/bob/bob.nightlies
+
+
+
+OUTPUT_PATH=/idiap/temp/tpereira/2.FRDemographics/regularization/experiments/orthogonality/morph/
+MODELS_PATH=/idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality/morph/
+
+BACKBONE=iresnet100
+
+
+array=( "identity-1.0_orthogonality-1.0" "identity-1.0_orthogonality-5.0" "identity-1.0_orthogonality-10.0" "identity-1.0_orthogonality-100.0" "identity-5.0_orthogonality-1.0" "identity-10.0_orthogonality-1.0")
+for CNN in "${array[@]}"
+do
+    ./bin/python $BASE_PATH/src/bob.bio.demographics/bob/bio/demographics/experiments/evaluation/ortogonality/morph.py $BACKBONE $MODELS_PATH/$BACKBONE/$CNN/model.pth $OUTPUT_PATH/$BACKBONE/$CNN/ -l sge-gpu
+
+done
+
+
+
+
+# -l sge
+
+
diff --git a/experiments/regularization/ortogonality/morph/morph_evaluate_iresnet50.sh b/experiments/regularization/ortogonality/morph/morph_evaluate_iresnet50.sh
new file mode 100644
index 0000000000000000000000000000000000000000..72f0b6b5ba287c2efd5aefca5873a25dec2b0a38
--- /dev/null
+++ b/experiments/regularization/ortogonality/morph/morph_evaluate_iresnet50.sh
@@ -0,0 +1,23 @@
+BASE_PATH=/remote/idiap.svm/user.active/tpereira/gitlab/bob/bob.nightlies
+
+
+
+OUTPUT_PATH=/idiap/temp/tpereira/2.FRDemographics/regularization/experiments/orthogonality/morph/
+MODELS_PATH=/idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality/morph/
+
+BACKBONE=iresnet50
+
+
+array=( "identity-1.0_orthogonality-1.0" "identity-1.0_orthogonality-5.0" "identity-1.0_orthogonality-10.0" "identity-1.0_orthogonality-100.0" "identity-5.0_orthogonality-1.0" "identity-10.0_orthogonality-1.0")
+for CNN in "${array[@]}"
+do
+    ./bin/python $BASE_PATH/src/bob.bio.demographics/bob/bio/demographics/experiments/evaluation/ortogonality/morph.py $BACKBONE $MODELS_PATH/$BACKBONE/$CNN/model.pth $OUTPUT_PATH/$BACKBONE/$CNN/ -l sge
+
+done
+
+
+
+
+# -l sge
+
+
diff --git a/experiments/regularization/ortogonality/morph/morph_iresnet50_train.sh b/experiments/regularization/ortogonality/morph/morph_iresnet50_train.sh
new file mode 100644
index 0000000000000000000000000000000000000000..9cd714fcc69c98847ad5ea5e9f707b5ce0b6e05c
--- /dev/null
+++ b/experiments/regularization/ortogonality/morph/morph_iresnet50_train.sh
@@ -0,0 +1,50 @@
+BASE_PATH=/idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality/morph/
+
+BACKBONE=iresnet50
+
+COMMAND=./src/bob.bio.demographics/bob/bio/demographics/experiments/train/ortogonality/morph.py
+
+./bin/jman submit --name MORPH-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-1.0 \
+    --identity-factor 1. --orthogonality-factor 1. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name MORPH-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-5.0 \
+    --identity-factor 1. --orthogonality-factor 5. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name MORPH-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-10.0 \
+    --identity-factor 1. --orthogonality-factor 10. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name MORPH-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-100.0 \
+    --identity-factor 1. --orthogonality-factor 100. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name MORPH-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-5.0_orthogonality-1.0 \
+    --identity-factor 5. --orthogonality-factor 1. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name MORPH-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-10.0_orthogonality-1.0 \
+    --identity-factor 10. --orthogonality-factor 1. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+
+
+
+#./bin/python ./src/bob.bio.demographics/bob/bio/demographics/experiments/train/ortogonality/morph.py /idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality/morph/iresnet100/identity-1.0_orthogonality-1.0 \
+#    --identity-factor 1. --orthogonality-factor 1. --identity-epochs 1 --demographic-epochs 1 \
+#    --backbone iresnet100 \
+#    --max-epochs 1000
+
diff --git a/experiments/regularization/ortogonality/morph/morph_train.sh b/experiments/regularization/ortogonality/morph/morph_train.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c1a874d22592317e899a2dfd8d089a757fc2a075
--- /dev/null
+++ b/experiments/regularization/ortogonality/morph/morph_train.sh
@@ -0,0 +1,50 @@
+BASE_PATH=/idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality/morph/
+
+BACKBONE=iresnet100
+
+COMMAND=./src/bob.bio.demographics/bob/bio/demographics/experiments/train/ortogonality/morph.py
+
+./bin/jman submit --name MORPH-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-1.0 \
+    --identity-factor 1. --orthogonality-factor 1. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name MORPH-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-5.0 \
+    --identity-factor 1. --orthogonality-factor 5. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name MORPH-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-10.0 \
+    --identity-factor 1. --orthogonality-factor 10. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name MORPH-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-1.0_orthogonality-100.0 \
+    --identity-factor 1. --orthogonality-factor 100. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name MORPH-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-5.0_orthogonality-1.0 \
+    --identity-factor 5. --orthogonality-factor 1. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+./bin/jman submit --name MORPH-ORTH -q sgpu -- ./bin/python $COMMAND $BASE_PATH/$BACKBONE/identity-10.0_orthogonality-1.0 \
+    --identity-factor 10. --orthogonality-factor 1. \
+    --backbone $BACKBONE \
+    --max-epochs 1000
+
+
+
+
+
+#./bin/python ./src/bob.bio.demographics/bob/bio/demographics/experiments/train/ortogonality/morph.py /idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality/morph/iresnet100/identity-1.0_orthogonality-1.0 \
+#    --identity-factor 1. --orthogonality-factor 1. --identity-epochs 1 --demographic-epochs 1 \
+#    --backbone iresnet100 \
+#    --max-epochs 1000
+