Commit 8e329713 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Merge branch 'transition' into 'master'

Transition

See merge request !1
parents d02339d3 5920462a
Pipeline #55556 failed with stages
in 1 minute and 40 seconds
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
from torch.utils.data import Dataset
from bob.bio.face.database import MEDSDatabase, MorphDatabase
import torchvision.transforms as transforms
class DemoraphicTorchDataset(Dataset):
def __init__(self, bob_dataset, transform=None):
self.bob_dataset = bob_dataset
self.bucket = [s for sset in self.bob_dataset.zprobes() for s in sset]
self.bucket += [s for sset in self.bob_dataset.treferences() for s in sset]
# Defining keys and labels
keys = [sset.subject_id for sset in self.bob_dataset.zprobes()] + [
sset.subject_id for sset in self.bob_dataset.treferences()
]
self.labels = dict(zip(keys, range(len(keys))))
self.demographic_keys = self.load_demographics()
self.transform = transform
def __len__(self):
return len(self.bucket)
def __getitem__(self, idx):
sample = self.bucket[idx]
image = sample.data if self.transform is None else self.transform(sample.data)
# image = image.astype("float32")
label = self.labels[sample.subject_id]
demography = self.get_demographics(sample)
return {"data": image, "label": label, "demography": demography}
class MedsTorchDataset(DemoraphicTorchDataset):
def __init__(
self, protocol, database_path, database_extension=".h5", transform=None
):
bob_dataset = MEDSDatabase(
protocol=protocol,
dataset_original_directory=database_path,
dataset_original_extension=database_extension,
)
super().__init__(bob_dataset, transform=transform)
def load_demographics(self):
target_metadata = "rac"
metadata_keys = set(
[getattr(sset, target_metadata) for sset in self.bob_dataset.zprobes()]
+ [
getattr(sset, target_metadata)
for sset in self.bob_dataset.treferences()
]
)
metadata_keys = dict(zip(metadata_keys, range(len(metadata_keys))))
return metadata_keys
def get_demographics(self, sample):
demographic_key = getattr(sample, "rac")
return self.demographic_keys[demographic_key]
class MorphTorchDataset(DemoraphicTorchDataset):
def __init__(
self, protocol, database_path, database_extension=".h5", transform=None
):
self.bob_dataset = MorphDatabase(
protocol=protocol,
dataset_original_directory=database_path,
dataset_original_extension=database_extension,
)
# Morph dataset has an intersection in between zprobes and treferences
self.excluding_list = [
"190276",
"332158",
"111942",
"308129",
"334074",
"350814",
"131677",
"168724",
"276055",
"275589",
"286810",
]
self.bucket = [s for sset in self.bob_dataset.zprobes() for s in sset]
self.bucket += [
s
for sset in self.bob_dataset.treferences()
for s in sset
if sset.subject_id not in self.excluding_list
]
# Defining keys and labels
keys = [b.subject_id for b in self.bucket]
self.labels = dict(zip(keys, range(len(keys))))
self.demographic_keys = self.load_demographics()
self.transform = transform
# super().__init__(bob_dataset, transform=transform)
def load_demographics(self):
target_metadata = "rac"
metadata_keys = set(
[f"{sset.rac}-{sset.sex}" for sset in self.bob_dataset.zprobes()]
+ [
f"{sset.rac}-{sset.sex}"
for sset in self.bob_dataset.treferences()
if sset.subject_id not in self.excluding_list
]
)
metadata_keys = dict(zip(metadata_keys, range(len(metadata_keys))))
return metadata_keys
def get_demographics(self, sample):
demographic_key = f"{sample.rac}-{sample.sex}"
return self.demographic_keys[demographic_key]
import click
from bob.bio.face.embeddings.pytorch import PyTorchModel, iresnet_template
from bob.bio.demographics.fair_transformers import RunnableTransformer
from functools import partial
import os
from bob.extension.scripts.click_helper import ResourceOption
from bob.bio.base.pipelines.vanilla_biometrics import checkpoint_vanilla_biometrics
from bob.bio.base.pipelines.vanilla_biometrics import execute_vanilla_biometrics
from bob.pipelines.distributed import VALID_DASK_CLIENT_STRINGS
from bob.bio.demographics.regularizers import AVAILABLE_BACKBONES
# BACKBONES = dict()
# BACKBONES["iresnet100"] = iresnet100
@click.command()
@click.argument("BACKBONE")
@click.argument("CHECKPOINT-PATH")
@click.argument("EXPERIMENT-PATH")
@click.option(
"--dask-client",
"-l",
entry_point_group="dask.client",
string_exceptions=VALID_DASK_CLIENT_STRINGS,
default="single-threaded",
help="Dask client for the execution of the pipeline.",
cls=ResourceOption,
)
def meds_experiment(backbone, checkpoint_path, experiment_path, dask_client, **kwargs):
annotation_type = "eyes-center"
fixed_positions = None
memory_demanding = False
backbone = AVAILABLE_BACKBONES[backbone]["structure"]
# Loading the pipeline
pipeline = iresnet_template(
embedding=RunnableTransformer(
partial(backbone, pretrained=checkpoint_path),
memory_demanding=memory_demanding,
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
face_crop_path = os.path.join(experiment_path, "../face-crop/")
# Checkpointing the pipeline
pipeline = checkpoint_vanilla_biometrics(pipeline, base_dir=experiment_path)
pipeline.transformer[0].features_dir = face_crop_path
dask_partition_size = 100
#### DATABASE
from bob.bio.face.database import MEDSDatabase
protocol = "verification_fold1"
database = MEDSDatabase(protocol=protocol)
groups = ["dev", "eval"]
execute_vanilla_biometrics(
pipeline,
database,
dask_client,
groups,
experiment_path,
write_metadata_scores=True,
checkpoint=False,
dask_partition_size=dask_partition_size,
dask_n_workers=10,
allow_scoring_with_all_biometric_references=True,
)
if __name__ == "__main__":
meds_experiment()
import click
from bob.bio.face.embeddings.pytorch import PyTorchModel, iresnet_template
# from bob.learn.pytorch.architectures.iresnet import iresnet100
from bob.bio.demographics.fair_transformers import RunnableTransformer
from functools import partial
import os
from bob.extension.scripts.click_helper import ResourceOption
from bob.bio.base.pipelines.vanilla_biometrics import checkpoint_vanilla_biometrics
from bob.bio.base.pipelines.vanilla_biometrics import execute_vanilla_biometrics
from bob.pipelines.distributed import VALID_DASK_CLIENT_STRINGS
from bob.bio.demographics.regularizers import AVAILABLE_BACKBONES
@click.command()
@click.argument("BACKBONE")
@click.argument("CHECKPOINT-PATH")
@click.argument("EXPERIMENT-PATH")
@click.option(
"--dask-client",
"-l",
entry_point_group="dask.client",
string_exceptions=VALID_DASK_CLIENT_STRINGS,
default="single-threaded",
help="Dask client for the execution of the pipeline.",
cls=ResourceOption,
)
def morph_experiment(backbone, checkpoint_path, experiment_path, dask_client, **kwargs):
annotation_type = "eyes-center"
fixed_positions = None
memory_demanding = False
# checkpoint_path = "/idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality_hypothesis/meds/iresnet100.pth"
# checkpoint_path = "/idiap/temp/tpereira/2.FRDemographics/regularization/models/orthogonality_hypothesis/meds_identity-10.0_orthogonality-1.0/iresnet100.pth"
backbone = AVAILABLE_BACKBONES[backbone]["structure"]
# Loading the pipeline
pipeline = iresnet_template(
embedding=RunnableTransformer(
partial(backbone, pretrained=checkpoint_path),
memory_demanding=memory_demanding,
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
face_crop_path = os.path.join(experiment_path, "../face-crop/")
# Checkpointing the pipeline
pipeline = checkpoint_vanilla_biometrics(pipeline, base_dir=experiment_path)
pipeline.transformer[0].features_dir = face_crop_path
dask_partition_size = 100
#### DATABASE
from bob.bio.face.database import MorphDatabase
protocol = "verification_fold1"
database = MorphDatabase(protocol=protocol)
groups = ["dev", "eval"]
execute_vanilla_biometrics(
pipeline,
database,
dask_client,
groups,
experiment_path,
write_metadata_scores=True,
checkpoint=False,
dask_partition_size=dask_partition_size,
dask_n_workers=10,
allow_scoring_with_all_biometric_references=True,
)
if __name__ == "__main__":
morph_experiment()
from bob.bio.demographics.datasets import MedsTorchDataset, MorphTorchDataset
# https://pytorch.org/docs/stable/data.html
from torch.utils.data import DataLoader
from bob.extension import rc
import os
import bob.io.image
import torch
from functools import partial
import torchvision.transforms as transforms
import click
import yaml
from bob.bio.demographics.regularizers.trainers import mine_trainer
@click.command()
@click.argument("OUTPUT_DIR")
@click.option("--identity-factor", default=1.0, help="Identity factor")
@click.option("--mine-factor", default=1.0, help="MINE factor")
@click.option("--max-epochs", default=600, help="Max number of epochs")
@click.option(
"--demographic-epochs",
default=100,
help="Number of epochs to train the demographic classifier",
)
@click.option(
"--identity-epochs",
default=200,
help="Number of epochs to train the identity classifier",
)
@click.option("--batch-size", default=64, help="Batch size")
@click.option("--backbone", default="iresnet100", help="Backbone")
def mine_meds(
output_dir,
identity_factor,
mine_factor,
max_epochs,
demographic_epochs,
identity_epochs,
batch_size,
backbone,
):
from bob.bio.demographics.regularizers import AVAILABLE_BACKBONES
database_path = os.path.join(
rc.get("bob.bio.demographics.directory"), "meds", "samplewrapper"
)
transform = transforms.Compose(
[
lambda x: bob.io.image.to_matplotlib(x.astype("float32")),
# transforms.ToPILImage(mode="RGB"),
# transforms.RandomHorizontalFlip(p=0.5),
# transforms.RandomRotation(degrees=(-3, 3)),
# transforms.RandomAutocontrast(p=0.1),
transforms.ToTensor(),
lambda x: (x - 127.5) / 128.0,
]
)
dataset = MedsTorchDataset(
protocol="verification_fold1", database_path=database_path, transform=transform,
)
train_dataloader = DataLoader(
dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=2
)
# train_dataloader = DataLoader(dataset, batch_size=64, shuffle=True)
backbone_model = AVAILABLE_BACKBONES[backbone]()
mine_trainer(
output_dir,
identity_factor,
mine_factor,
max_epochs,
demographic_epochs,
identity_epochs,
batch_size,
train_dataloader,
backbone_model,
transform,
)
if __name__ == "__main__":
mine_meds()
from bob.bio.demographics.datasets import MedsTorchDataset, MorphTorchDataset
# https://pytorch.org/docs/stable/data.html
from torch.utils.data import DataLoader
from bob.extension import rc
import os
import bob.io.image
import torch
from functools import partial
import torchvision.transforms as transforms
import click
import yaml
from bob.bio.demographics.regularizers.trainers import ortogonality_trainer
@click.command()
@click.argument("OUTPUT_DIR")
@click.option("--identity-factor", default=1.0, help="Identity factor")
@click.option("--orthogonality-factor", default=1.0, help="Ortogonality factor")
@click.option("--max-epochs", default=600, help="Max number of epochs")
@click.option(
"--demographic-epochs",
default=100,
help="Number of epochs to train the demographic classifier",
)
@click.option(
"--identity-epochs",
default=200,
help="Number of epochs to train the identity classifier",
)
@click.option("--batch-size", default=64, help="Batch size")
@click.option("--backbone", default="iresnet100", help="Backbone")
def ortogonality_meds(
output_dir,
identity_factor,
orthogonality_factor,
max_epochs,
demographic_epochs,
identity_epochs,
batch_size,
backbone,
):
from bob.bio.demographics.regularizers import AVAILABLE_BACKBONES
database_path = os.path.join(
rc.get("bob.bio.demographics.directory"), "meds", "samplewrapper"
)
transform = transforms.Compose(
[
lambda x: bob.io.image.to_matplotlib(x.astype("float32")),
# transforms.ToPILImage(mode="RGB"),
# transforms.RandomHorizontalFlip(p=0.5),
# transforms.RandomRotation(degrees=(-3, 3)),
# transforms.RandomAutocontrast(p=0.1),
transforms.ToTensor(),
lambda x: (x - 127.5) / 128.0,
]
)
dataset = MedsTorchDataset(
protocol="verification_fold1", database_path=database_path, transform=transform,
)
train_dataloader = DataLoader(
dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=2
)
# train_dataloader = DataLoader(dataset, batch_size=64, shuffle=True)
backbone_model = AVAILABLE_BACKBONES[backbone]()
ortogonality_trainer(
output_dir,
identity_factor,
orthogonality_factor,
max_epochs,
demographic_epochs,
identity_epochs,
batch_size,
train_dataloader,
backbone_model,
transform,
)
if __name__ == "__main__":
ortogonality_meds()
from bob.bio.demographics.datasets import MedsTorchDataset, MorphTorchDataset
# https://pytorch.org/docs/stable/data.html
from torch.utils.data import DataLoader
from bob.extension import rc
import os
import bob.io.image
import torch
from functools import partial
import torchvision.transforms as transforms
import click
import yaml
from bob.bio.demographics.regularizers.trainers import ortogonality_trainer
@click.command()
@click.argument("OUTPUT_DIR")
@click.option("--identity-factor", default=1.0, help="Identity factor")
@click.option("--orthogonality-factor", default=1.0, help="Ortogonality factor")
@click.option("--max-epochs", default=600, help="Max number of epochs")
@click.option(
"--demographic-epochs",
default=100,
help="Number of epochs to train the demographic classifier",
)
@click.option(
"--identity-epochs",
default=200,
help="Number of epochs to train the identity classifier",
)
@click.option("--batch-size", default=64, help="Batch size")
@click.option("--backbone", default="iresnet100", help="Backbone")
def ortogonality_morph(
output_dir,
identity_factor,
orthogonality_factor,
max_epochs,
demographic_epochs,
identity_epochs,
batch_size,
backbone,
):
from bob.bio.demographics.regularizers import AVAILABLE_BACKBONES
database_path = os.path.join(
rc.get("bob.bio.demographics.directory"), "morph", "samplewrapper"
)
transform = transforms.Compose(
[
lambda x: bob.io.image.to_matplotlib(x.astype("float32")),
# transforms.ToPILImage(mode="RGB"),
# transforms.RandomHorizontalFlip(p=0.5),
# transforms.RandomRotation(degrees=(-3, 3)),
# transforms.RandomAutocontrast(p=0.1),
transforms.ToTensor(),
lambda x: (x - 127.5) / 128.0,
]
)
dataset = MorphTorchDataset(
protocol="verification_fold1", database_path=database_path, transform=transform,
)
train_dataloader = DataLoader(
dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=2
)
# train_dataloader = DataLoader(dataset, batch_size=64, shuffle=True)
backbone_model = AVAILABLE_BACKBONES[backbone]()
ortogonality_trainer(
output_dir,
identity_factor,
orthogonality_factor,
max_epochs,
demographic_epochs,
identity_epochs,
batch_size,
train_dataloader,
backbone_model,
transform,
)
if __name__ == "__main__":
ortogonality_morph()
# from .facecrop import facecrop_pipeline
from .transformers import