Commit 617a5159 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Update

parent 3deb5e25
Pipeline #58346 failed with stages
in 1 minute and 57 seconds
from bob.bio.face.pytorch.backbones.iresnet import iresnet100
import logging
logger = logging.getLogger(__name__)
logger.info("Loading BACKBONE")
backbone = iresnet100(
"/idiap/temp/tpereira/bob/data/pytorch/iresnet-91a5de61/iresnet100-73e07ba7.pth"
)
# Reducing the batch size
BATCH_SIZE = 64
from bob.bio.face.pytorch.backbones.iresnet import iresnet34
import logging
logger = logging.getLogger(__name__)
logger.info("Loading BACKBONE")
backbone = iresnet34(
"/idiap/temp/tpereira/bob/data/pytorch/iresnet-91a5de61/iresnet34-5b0d0e90.pth"
)
from bob.bio.face.pytorch.backbones.iresnet import iresnet50
import logging
logger = logging.getLogger(__name__)
logger.info("Loading BACKBONE")
backbone = iresnet50(
"/idiap/temp/tpereira/bob/data/pytorch/iresnet-91a5de61/iresnet50-7f187506.pth"
)
from bob.bio.face.pytorch.datasets import MobioTorchDataset
# https://pytorch.org/docs/stable/data.html
from torch.utils.data import DataLoader
from bob.extension import rc
import torch
from functools import partial
import torchvision.transforms as transforms
from bob.bio.face.pytorch.preprocessing import get_standard_data_augmentation
import os
BATCH_SIZE = 128
PROTOCOL = "mobile0-male-female"
DATABASE_PATH = os.path.join(
rc.get("bob.bio.demographics.directory"), "mobio", "samplewrapper"
)
DATABASE_EXTENSION = ".h5"
import logging
logger = logging.getLogger(__name__)
logger.info(f"Loading protocol {PROTOCOL} from {DATABASE_PATH}")
transform = get_standard_data_augmentation()
train_dataset = MobioTorchDataset(
protocol=PROTOCOL,
database_path=DATABASE_PATH,
database_extension=DATABASE_EXTENSION,
transform=transform,
)
# validation_dataset = VGG2TorchDataset(
# protocol=PROTOCOL,
# database_path=DATABASE_PATH,
# database_extension=DATABASE_EXTENSION,
# transform=transform,
# train=False,
# )
train_dataloader = DataLoader(
train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
pin_memory=True,
num_workers=4,
)
# For some reason we have an issue with
validation_dataset = None
# validation_dataloader = DataLoader(
# validation_dataset,
# batch_size=BATCH_SIZE,
# shuffle=False,
# pin_memory=False,
# num_workers=1,
# )
from bob.bio.face.pytorch.datasets import VGG2TorchDataset
# https://pytorch.org/docs/stable/data.html
from torch.utils.data import DataLoader
from bob.extension import rc
import torch
from functools import partial
import torchvision.transforms as transforms
from bob.bio.face.pytorch.preprocessing import get_standard_data_augmentation
if locals().get("BATCH_SIZE") is None:
BATCH_SIZE = 128
PROTOCOL = "vgg2-full"
DATABASE_PATH = rc.get("bob.bio.face.vgg2-crops.directory")
DATABASE_EXTENSION = ".jpg"
import logging
logger = logging.getLogger(__name__)
logger.info(f"Loading protocol {PROTOCOL} from {DATABASE_PATH}")
transform = get_standard_data_augmentation()
train_dataset = VGG2TorchDataset(
protocol=PROTOCOL,
database_path=DATABASE_PATH,
database_extension=DATABASE_EXTENSION,
transform=transform,
)
validation_dataset = VGG2TorchDataset(
protocol=PROTOCOL,
database_path=DATABASE_PATH,
database_extension=DATABASE_EXTENSION,
transform=transform,
train=False,
)
train_dataloader = DataLoader(
train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
pin_memory=True,
num_workers=4,
)
validation_dataloader = DataLoader(
validation_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
pin_memory=False,
num_workers=1,
)
# validation_dataloader = None
from bob.bio.face.pytorch.datasets import VGG2TorchDataset
# https://pytorch.org/docs/stable/data.html
from torch.utils.data import DataLoader
from bob.extension import rc
import torch
from functools import partial
import torchvision.transforms as transforms
from bob.bio.face.pytorch.preprocessing import get_standard_data_augmentation
BATCH_SIZE = 128
PROTOCOL = "vgg2-full"
DATABASE_PATH = rc.get("bob.bio.face.vgg2-crops.directory")
DATABASE_EXTENSION = ".jpg"
import logging
logger = logging.getLogger(__name__)
logger.info(f"Loading protocol {PROTOCOL} from {DATABASE_PATH}")
transform = get_standard_data_augmentation()
train_dataset = VGG2TorchDataset(
protocol=PROTOCOL,
database_path=DATABASE_PATH,
database_extension=DATABASE_EXTENSION,
transform=transform,
)
# validation_dataset = VGG2TorchDataset(
# protocol=PROTOCOL,
# database_path=DATABASE_PATH,
# database_extension=DATABASE_EXTENSION,
# transform=transform,
# train=False,
# )
train_dataloader = DataLoader(
train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
pin_memory=True,
num_workers=4,
)
validation_dataset = None
# For some reason we have an issue with
#validation_dataloader = None
# validation_dataloader = DataLoader(
# validation_dataset,
# batch_size=BATCH_SIZE,
# shuffle=False,
# pin_memory=False,
# num_workers=1,
# )
from bob.bio.face.pytorch.datasets import VGG2TorchDataset
# https://pytorch.org/docs/stable/data.html
from torch.utils.data import DataLoader
from bob.extension import rc
import torch
from functools import partial
import torchvision.transforms as transforms
from bob.bio.face.pytorch.preprocessing import get_standard_data_augmentation
if locals().get("BATCH_SIZE") is None:
BATCH_SIZE = 128
PROTOCOL = "vgg2-short"
DATABASE_PATH = rc.get("bob.bio.face.vgg2-crops.directory")
DATABASE_EXTENSION = ".jpg"
import logging
logger = logging.getLogger(__name__)
logger.info(f"Loading protocol {PROTOCOL} from {DATABASE_PATH}")
transform = get_standard_data_augmentation()
train_dataset = VGG2TorchDataset(
protocol=PROTOCOL,
database_path=DATABASE_PATH,
database_extension=DATABASE_EXTENSION,
transform=transform,
)
validation_dataset = VGG2TorchDataset(
protocol=PROTOCOL,
database_path=DATABASE_PATH,
database_extension=DATABASE_EXTENSION,
transform=transform,
train=False,
)
train_dataloader = DataLoader(
train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
pin_memory=True,
num_workers=4,
)
validation_dataloader = DataLoader(
validation_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
pin_memory=False,
num_workers=1,
)
# validation_dataloader = None
from bob.bio.face.pytorch.datasets import VGG2TorchDataset
# https://pytorch.org/docs/stable/data.html
from torch.utils.data import DataLoader
from bob.extension import rc
import torch
from functools import partial
import torchvision.transforms as transforms
from bob.bio.face.pytorch.preprocessing import get_standard_data_augmentation
if locals().get("BATCH_SIZE") is None:
BATCH_SIZE = 128
PROTOCOL = "vgg2-short"
DATABASE_PATH = rc.get("bob.bio.face.vgg2-crops.directory")
DATABASE_EXTENSION = ".jpg"
import logging
logger = logging.getLogger(__name__)
logger.info(f"Loading protocol {PROTOCOL} from {DATABASE_PATH}")
transform = get_standard_data_augmentation()
train_dataset = VGG2TorchDataset(
protocol=PROTOCOL,
database_path=DATABASE_PATH,
database_extension=DATABASE_EXTENSION,
transform=transform,
)
# validation_dataset = VGG2TorchDataset(
# protocol=protocol,
# database_path=database_path,
# database_extension=DATABASE_EXTENSION,
# transform=transform,
# train=False,
# )
train_dataloader = DataLoader(
train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
pin_memory=True,
num_workers=2,
)
# validation_dataloader = DataLoader(
# validation_dataset,
# batch_size=batch_size,
# shuffle=False,
# pin_memory=False,
# num_workers=1,
# )
validation_dataset = None
from bob.bio.face.pytorch.head import ArcFace
### train_dataloader AND BACKBONES needs to be loaded before
#NUM_CLASS = train_dataloader.dataset.n_classes
#WEIGHT = train_dataloader.dataset.get_demographic_class_weights()
NUM_CLASS = len(list(train_dataloader.dataset.demographic_keys.values()))
identity_head = ArcFace(feat_dim=backbone.features.num_features, num_class=NUM_CLASS)
pass
from bob.bio.face.pytorch.head import MagFace
### train_dataloader AND BACKBONES needs to be loaded before
#NUM_CLASS = train_dataloader.dataset.n_classes
NUM_CLASS = len(list(train_dataloader.dataset.demographic_keys.values()))
#WEIGHT = train_dataloader.dataset.get_demographic_class_weights()
identity_head = MagFace(feat_dim=backbone.features.num_features, num_class=NUM_CLASS)
pass
from bob.bio.face.pytorch.head import ArcFace
### train_dataloader AND BACKBONES needs to be loaded before
NUM_CLASS = train_dataloader.dataset.n_classes
WEIGHT = train_dataloader.dataset.get_demographic_class_weights()
identity_head = ArcFace(feat_dim=backbone.features.num_features, num_class=NUM_CLASS)
pass
from bob.bio.face.pytorch.head import MagFace
### train_dataloader AND BACKBONES needs to be loaded before
NUM_CLASS = train_dataloader.dataset.n_classes
WEIGHT = train_dataloader.dataset.get_demographic_class_weights()
identity_head = MagFace(feat_dim=backbone.features.num_features, num_class=NUM_CLASS)
pass
# REGULAR TRAINING SETUP
import torch
from functools import partial
MAX_EPOCHS = 50
optimizer = partial(torch.optim.SGD, lr=0.1, momentum=0.9)
# lr_schedule = torch.optim.lr_scheduler.MultiStepLR(
# optimizer, milestones = [5,10,20], gamma = 0.1)
import click
from bob.bio.face.embeddings.pytorch import PyTorchModel, iresnet_template
from bob.bio.demographics.fair_transformers import RunnableTransformer
from functools import partial
import os
from bob.extension.scripts.click_helper import ResourceOption
from bob.bio.base.pipelines.vanilla_biometrics import checkpoint_vanilla_biometrics
from bob.bio.base.pipelines.vanilla_biometrics import execute_vanilla_biometrics
from bob.pipelines.distributed import VALID_DASK_CLIENT_STRINGS
from bob.bio.demographics.regularizers import AVAILABLE_BACKBONES
# BACKBONES = dict()
# BACKBONES["iresnet100"] = iresnet100
@click.command()
@click.argument("BACKBONE")
@click.argument("CHECKPOINT-PATH")
@click.argument("EXPERIMENT-PATH")
@click.option(
"--dask-client",
"-l",
entry_point_group="dask.client",
string_exceptions=VALID_DASK_CLIENT_STRINGS,
default="single-threaded",
help="Dask client for the execution of the pipeline.",
cls=ResourceOption,
)
def experiment_regularized_model(
backbone, checkpoint_path, experiment_path, dask_client, **kwargs
):
annotation_type = "eyes-center"
fixed_positions = None
memory_demanding = False
backbone = AVAILABLE_BACKBONES[backbone]["structure"]
# Loading the pipeline
pipeline = iresnet_template(
embedding=RunnableTransformer(
partial(backbone, pretrained=checkpoint_path),
memory_demanding=memory_demanding,
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
# face_crop_path = os.path.join(experiment_path, "../face-crop/")
# Checkpointing the pipeline
pipeline = checkpoint_vanilla_biometrics(pipeline, base_dir=experiment_path)
p # ipeline.transformer[0].features_dir = face_crop_path
dask_partition_size = 100
#### DATABASE
from bob.bio.face.database import MEDSDatabase
protocol = "verification_fold1"
database = MEDSDatabase(protocol=protocol)
groups = ["dev", "eval"]
execute_vanilla_biometrics(
pipeline,
database,
dask_client,
groups,
experiment_path,
write_metadata_scores=True,
checkpoint=False,
dask_partition_size=dask_partition_size,
dask_n_workers=10,
allow_scoring_with_all_biometric_references=True,
)
if __name__ == "__main__":
meds_experiment()
from bob.bio.demographics.datasets import MedsTorchDataset
# https://pytorch.org/docs/stable/data.html
from torch.utils.data import DataLoader
from bob.extension import rc
import os
import bob.io.image
import torch
from functools import partial
import torchvision.transforms as transforms
import click
import yaml
from bob.bio.demographics.regularizers.trainers import balance_trainer
@click.command()
@click.argument("OUTPUT_DIR")
@click.option("--max-epochs", default=600, help="Max number of epochs")
@click.option("--batch-size", default=64, help="Batch size")
@click.option("--backbone", default="iresnet100", help="Backbone")
def balance_meds(
output_dir,
max_epochs,
batch_size,
backbone,
):
from bob.bio.demographics.regularizers import AVAILABLE_BACKBONES
database_path = os.path.join(
rc.get("bob.bio.demographics.directory"), "meds", "samplewrapper"
)
transform = transforms.Compose(
[
lambda x: bob.io.image.to_matplotlib(x.astype("float32")),
# transforms.ToPILImage(mode="RGB"),
# transforms.RandomHorizontalFlip(p=0.5),
# transforms.RandomRotation(degrees=(-3, 3)),
# transforms.RandomAutocontrast(p=0.1),
transforms.ToTensor(),
lambda x: (x - 127.5) / 128.0,
]
)
dataset = MedsTorchDataset(
protocol="verification_fold1",
database_path=database_path,
transform=transform,
)
train_dataloader = DataLoader(
dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=2
)
# train_dataloader = DataLoader(dataset, batch_size=64, shuffle=True)
backbone_model = AVAILABLE_BACKBONES[backbone]["prior"]()
balance_trainer(
output_dir,
max_epochs,
batch_size,
train_dataloader,
backbone_model,
transform,
)
if __name__ == "__main__":
balance_meds()
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment