Commit fcb1057f authored by Olegs NIKISINS's avatar Olegs NIKISINS
Browse files

Added configs allowing Conv AE fine-tuning on MC BATL data

parent 07aa13df
Pipeline #26306 passed with stage
in 29 minutes and 35 seconds
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: Olegs Nikisins
"""
#==============================================================================
# Import here:
from torchvision import transforms
from bob.pad.face.database import BatlPadDatabase
from torch import nn
#==============================================================================
# Define parameters here:
"""
Note: do not change names of the below constants.
"""
NUM_EPOCHS = 50 # Maximum number of epochs
BATCH_SIZE = 32 # Size of the batch
LEARNING_RATE = 1e-3 # Learning rate
NUM_WORKERS = 8 # The number of workers for the DataLoader
"""
Transformations to be applied sequentially to the input PIL image.
Note: the variable name ``transform`` must be the same in all configuration files.
"""
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
"""
Set the parameters of the DataFolder dataset class.
Note: do not change the name ``kwargs``.
"""
ORIGINAL_DIRECTORY = ""
ORIGINAL_EXTENSION = ".h5" # extension of the data files
PROTOCOL = 'grandtest-color*infrared*depth-10' # use 10 frames for PAD experiments
annotations_temp_dir = ""
bob_hldi_instance = BatlPadDatabase(protocol=PROTOCOL,
original_directory=ORIGINAL_DIRECTORY,
original_extension=ORIGINAL_EXTENSION,
annotations_temp_dir = annotations_temp_dir,
landmark_detect_method="mtcnn", # detect annotations using mtcnn
exclude_attacks_list=['makeup'],
exclude_pai_all_sets=True, # exclude makeup from all the sets, which is the default behavior for grandtest protocol
append_color_face_roi_annot=False)
kwargs = {}
kwargs["data_folder"] = "NO NEED TO SET HERE, WILL BE SET IN THE TRAINING SCRIPT"
kwargs["transform"] = transform
kwargs["extension"] = '.hdf5'
kwargs["bob_hldi_instance"] = bob_hldi_instance
kwargs["hldi_type"] = "pad"
kwargs["groups"] = ['train']
kwargs["protocol"] = 'grandtest'
kwargs["purposes"] = ['real']
kwargs["allow_missing_files"] = True
"""
Define the network to be trained as a class, named ``Network``.
Note: Do not change the name of the below class.
"""
from bob.learn.pytorch.architectures import ConvAutoencoder as Network
"""
Define the loss to be used for training.
Note: do not change the name of the below variable.
"""
loss_type = nn.MSELoss()
"""
OPTIONAL: if not defined loss will be computed in the training script.
See training script for details
Define the function to compute the loss. Don't change the signature of this
function.
"""
# we don't define the loss_function for this configuration
#def loss_function(output, img, target):
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: Olegs Nikisins
"""
#==============================================================================
# Import here:
from torchvision import transforms
from bob.pad.face.database import BatlPadDatabase
from torch import nn
#==============================================================================
# Define parameters here:
"""
Note: do not change names of the below constants.
"""
NUM_EPOCHS = 50 # Maximum number of epochs
BATCH_SIZE = 32 # Size of the batch
LEARNING_RATE = 1e-3 # Learning rate
NUM_WORKERS = 8 # The number of workers for the DataLoader
"""
Transformations to be applied sequentially to the input PIL image.
Note: the variable name ``transform`` must be the same in all configuration files.
"""
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
"""
Set the parameters of the DataFolder dataset class.
Note: do not change the name ``kwargs``.
"""
ORIGINAL_DIRECTORY = ""
ORIGINAL_EXTENSION = ".h5" # extension of the data files
PROTOCOL = 'grandtest-color*infrared*depth-10' # use 10 frames for PAD experiments
annotations_temp_dir = ""
bob_hldi_instance = BatlPadDatabase(protocol=PROTOCOL,
original_directory=ORIGINAL_DIRECTORY,
original_extension=ORIGINAL_EXTENSION,
annotations_temp_dir = annotations_temp_dir,
landmark_detect_method="mtcnn", # detect annotations using mtcnn
exclude_attacks_list=['makeup'],
exclude_pai_all_sets=True, # exclude makeup from all the sets, which is the default behavior for grandtest protocol
append_color_face_roi_annot=False)
kwargs = {}
kwargs["data_folder"] = "NO NEED TO SET HERE, WILL BE SET IN THE TRAINING SCRIPT"
kwargs["transform"] = transform
kwargs["extension"] = '.hdf5'
kwargs["bob_hldi_instance"] = bob_hldi_instance
kwargs["hldi_type"] = "pad"
kwargs["groups"] = ['train']
kwargs["protocol"] = 'grandtest'
kwargs["purposes"] = ['real']
kwargs["allow_missing_files"] = True
"""
Define the network to be trained as a class, named ``Network``.
Note: Do not change the name of the below class.
"""
from bob.learn.pytorch.architectures import ConvAutoencoder as Network
"""
Only parameters defined in the ``param_idx_that_requires_grad`` list will be tuned if the list is defined
"""
param_idx_that_requires_grad = [0,16,17] # input Conv2d layers, output ConvTranspose2d+Tanh layers
"""
Define the loss to be used for training.
Note: do not change the name of the below variable.
"""
loss_type = nn.MSELoss()
"""
OPTIONAL: if not defined loss will be computed in the training script.
See training script for details
Define the function to compute the loss. Don't change the signature of this
function.
"""
# we don't define the loss_function for this configuration
#def loss_function(output, img, target):
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment