Commit 15496c1b authored by Guillaume HEUSCH's avatar Guillaume HEUSCH
Browse files

Merge branch 'cleaning-mcae' into 'master'

cleaning

See merge request !27
parents 7a2609cc 8afc7c98
Pipeline #28580 passed with stage
in 25 minutes and 20 seconds
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: Olegs Nikisins
"""
#==============================================================================
# Import here:
from torch import nn
#==============================================================================
# Define the network:
class ConvAutoencoder(nn.Module):
"""
A class defining a simple convolutional autoencoder.
"""
A class defining a simple convolutional autoencoder.
Attributes
Attributes
----------
return_latent_embedding : bool
returns the encoder output if true, the reconstructed image otherwise.
"""
def __init__(self, return_latent_embedding=False):
"""
Init function
Parameters
----------
return_latent_embedding : bool
If set to ``True`` forward() method returns a latent
emebedding (encoder output), otherwise a reconstructed
image is returned. Default: ``False``
returns the encoder output if true, the reconstructed image otherwise.
"""
super(ConvAutoencoder, self).__init__()
self.return_latent_embedding = return_latent_embedding
def __init__(self, return_latent_embedding = False):
super(ConvAutoencoder, self).__init__()
self.return_latent_embedding = return_latent_embedding
self.encoder = nn.Sequential(nn.Conv2d(3, 16, 5, padding=2),
nn.ReLU(True),
nn.MaxPool2d(2),
nn.Conv2d(16, 16, 5, padding=2),
nn.ReLU(True),
nn.MaxPool2d(2),
nn.Conv2d(16, 16, 3, padding=2),
nn.ReLU(True),
nn.MaxPool2d(2),
nn.Conv2d(16, 16, 3, padding=2),
nn.ReLU(True),
nn.MaxPool2d(2))
self.decoder = nn.Sequential(nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1),
nn.ReLU(True),
nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1),
nn.ReLU(True),
nn.ConvTranspose2d(16, 16, 5, stride=2, padding=2),
nn.ReLU(True),
nn.ConvTranspose2d(16, 3, 5, stride=2, padding=2),
nn.ReLU(True),
nn.ConvTranspose2d(3, 3, 2, stride=1, padding=1),
nn.Tanh())
self.encoder = nn.Sequential(nn.Conv2d(3, 16, 5, padding=2),
nn.ReLU(True),
nn.MaxPool2d(2),
nn.Conv2d(16, 16, 5, padding=2),
nn.ReLU(True),
nn.MaxPool2d(2),
nn.Conv2d(16, 16, 3, padding=2),
nn.ReLU(True),
nn.MaxPool2d(2),
nn.Conv2d(16, 16, 3, padding=2),
nn.ReLU(True),
nn.MaxPool2d(2))
def forward(self, x):
"""
The forward method.
"""
x = self.encoder(x)
if self.return_latent_embedding:
return x
x = self.decoder(x)
return x
self.decoder = nn.Sequential(nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1),
nn.ReLU(True),
nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1),
nn.ReLU(True),
nn.ConvTranspose2d(16, 16, 5, stride=2, padding=2),
nn.ReLU(True),
nn.ConvTranspose2d(16, 3, 5, stride=2, padding=2),
nn.ReLU(True),
nn.ConvTranspose2d(3, 3, 2, stride=1, padding=1),
nn.Tanh())
def forward(self, x):
""" Propagate data through the network
Parameters
----------
x: :py:class:`torch.Tensor`
x = self.encoder(x)
Returns
-------
:py:class:`torch.Tensor`
either the encoder output or the reconstructed image
"""
x = self.encoder(x)
if self.return_latent_embedding:
return x
x = self.decoder(x)
return x
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: Olegs Nikisins
"""
#==============================================================================
# Import here:
from torchvision import transforms
from bob.pad.face.database import BatlPadDatabase
from torch import nn
#==============================================================================
# Define parameters here:
"""
Note: do not change names of the below constants.
"""
NUM_EPOCHS = 50 # Maximum number of epochs
BATCH_SIZE = 32 # Size of the batch
LEARNING_RATE = 1e-3 # Learning rate
NUM_WORKERS = 8 # The number of workers for the DataLoader
"""
Transformations to be applied sequentially to the input PIL image.
Note: the variable name ``transform`` must be the same in all configuration files.
"""
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
"""
Set the parameters of the DataFolder dataset class.
Note: do not change the name ``kwargs``.
"""
ORIGINAL_DIRECTORY = ""
ORIGINAL_EXTENSION = ".h5" # extension of the data files
PROTOCOL = 'grandtest-color*infrared*depth-10' # use 10 frames for PAD experiments
annotations_temp_dir = ""
bob_hldi_instance = BatlPadDatabase(protocol=PROTOCOL,
original_directory=ORIGINAL_DIRECTORY,
original_extension=ORIGINAL_EXTENSION,
annotations_temp_dir = annotations_temp_dir,
landmark_detect_method="mtcnn", # detect annotations using mtcnn
exclude_attacks_list=['makeup'],
exclude_pai_all_sets=True, # exclude makeup from all the sets, which is the default behavior for grandtest protocol
append_color_face_roi_annot=False)
kwargs = {}
kwargs["data_folder"] = "NO NEED TO SET HERE, WILL BE SET IN THE TRAINING SCRIPT"
kwargs["transform"] = transform
kwargs["extension"] = '.hdf5'
kwargs["bob_hldi_instance"] = bob_hldi_instance
kwargs["hldi_type"] = "pad"
kwargs["groups"] = ['train']
kwargs["protocol"] = 'grandtest'
kwargs["purposes"] = ['real']
kwargs["allow_missing_files"] = True
"""
Define the network to be trained as a class, named ``Network``.
Note: Do not change the name of the below class.
"""
from bob.learn.pytorch.architectures import ConvAutoencoder as Network
"""
Define the loss to be used for training.
Note: do not change the name of the below variable.
"""
loss_type = nn.MSELoss()
"""
OPTIONAL: if not defined loss will be computed in the training script.
See training script for details
Define the function to compute the loss. Don't change the signature of this
function.
"""
# we don't define the loss_function for this configuration
#def loss_function(output, img, target):
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: Olegs Nikisins
"""
#==============================================================================
# Import here:
from torchvision import transforms
from bob.pad.face.database import BatlPadDatabase
from torch import nn
#==============================================================================
# Define parameters here:
"""
Note: do not change names of the below constants.
"""
NUM_EPOCHS = 50 # Maximum number of epochs
BATCH_SIZE = 32 # Size of the batch
LEARNING_RATE = 1e-3 # Learning rate
NUM_WORKERS = 8 # The number of workers for the DataLoader
"""
Transformations to be applied sequentially to the input PIL image.
Note: the variable name ``transform`` must be the same in all configuration files.
"""
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
"""
Set the parameters of the DataFolder dataset class.
Note: do not change the name ``kwargs``.
"""
ORIGINAL_DIRECTORY = ""
ORIGINAL_EXTENSION = ".h5" # extension of the data files
PROTOCOL = 'grandtest-color*infrared*depth-10' # use 10 frames for PAD experiments
annotations_temp_dir = ""
bob_hldi_instance = BatlPadDatabase(protocol=PROTOCOL,
original_directory=ORIGINAL_DIRECTORY,
original_extension=ORIGINAL_EXTENSION,
annotations_temp_dir = annotations_temp_dir,
landmark_detect_method="mtcnn", # detect annotations using mtcnn
exclude_attacks_list=['makeup'],
exclude_pai_all_sets=True, # exclude makeup from all the sets, which is the default behavior for grandtest protocol
append_color_face_roi_annot=False)
kwargs = {}
kwargs["data_folder"] = "NO NEED TO SET HERE, WILL BE SET IN THE TRAINING SCRIPT"
kwargs["transform"] = transform
kwargs["extension"] = '.hdf5'
kwargs["bob_hldi_instance"] = bob_hldi_instance
kwargs["hldi_type"] = "pad"
kwargs["groups"] = ['train']
kwargs["protocol"] = 'grandtest'
kwargs["purposes"] = ['real']
kwargs["allow_missing_files"] = True
"""
Define the network to be trained as a class, named ``Network``.
Note: Do not change the name of the below class.
"""
from bob.learn.pytorch.architectures import ConvAutoencoder as Network
"""
Only parameters defined in the ``param_idx_that_requires_grad`` list will be tuned if the list is defined
"""
param_idx_that_requires_grad = [0,16,17] # input Conv2d layers, output ConvTranspose2d+Tanh layers
"""
Define the loss to be used for training.
Note: do not change the name of the below variable.
"""
loss_type = nn.MSELoss()
"""
OPTIONAL: if not defined loss will be computed in the training script.
See training script for details
Define the function to compute the loss. Don't change the signature of this
function.
"""
# we don't define the loss_function for this configuration
#def loss_function(output, img, target):
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: Olegs Nikisins
"""
#==============================================================================
# Import here:
from torchvision import transforms
from bob.pad.face.database import CELEBAPadDatabase
from torch import nn
#==============================================================================
# Define parameters here:
"""
Note: do not change names of the below constants.
"""
NUM_EPOCHS = 70 # Maximum number of epochs
BATCH_SIZE = 32 # Size of the batch
LEARNING_RATE = 1e-3 # Learning rate
NUM_WORKERS = 8 # The number of workers for the DataLoader
"""
Transformations to be applied sequentially to the input PIL image.
Note: the variable name ``transform`` must be the same in all configuration files.
"""
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
"""
Set the parameters of the DataFolder dataset class.
Note: do not change the name ``kwargs``.
"""
bob_hldi_instance = CELEBAPadDatabase(original_directory = "", original_extension = "")
kwargs = {}
kwargs["data_folder"] = "NO NEED TO SET HERE, WILL BE SET IN THE TRAINING SCRIPT"
kwargs["transform"] = transform
kwargs["extension"] = '.hdf5'
kwargs["bob_hldi_instance"] = bob_hldi_instance
kwargs["hldi_type"] = "pad"
kwargs["groups"] = ['train']
kwargs["protocol"] = 'grandtest'
kwargs["purposes"] = ['real']
kwargs["allow_missing_files"] = True
"""
Define the network to be trained as a class, named ``Network``.
Note: Do not change the name of the below class.
"""
from bob.learn.pytorch.architectures import ConvAutoencoder as Network
"""
Define the loss to be used for training.
Note: do not change the name of the below variable.
"""
loss_type = nn.MSELoss()
"""
OPTIONAL: if not defined loss will be computed in the training script.
See training script for details
Define the function to compute the loss. Don't change the signature of this
function.
"""
# we don't define the loss_function for this configuration
#def loss_function(output, img, target):
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: Olegs Nikisins
"""
#==============================================================================
# Import here:
from bob.pad.face.database import BatlPadDatabase
from torch import nn
#==============================================================================
# Define parameters here:
"""
Note: do not change names of the below constants.
"""
NUM_EPOCHS = 100 # Maximum number of epochs
BATCH_SIZE = 64 # Size of the batch
LEARNING_RATE = 1e-4 # Learning rate
NUM_WORKERS = 8 # The number of workers for the DataLoader
"""
Set the kwargs of the "dataset" instance of the DataFolder class.
Note: do not change the name ``kwargs``.
"""
# Initialize HLDI for data folder:
ORIGINAL_DIRECTORY = "" # this arguments is not important in this case
ORIGINAL_EXTENSION = ".h5" # extension of the data files
ANNOTATIONS_TEMP_DIR = "" # this argument is not used here
PROTOCOL = 'grandtest-color*infrared*depth-10' # 3 channels are used here, 10 frames
bob_hldi_instance = BatlPadDatabase(protocol=PROTOCOL,
original_directory=ORIGINAL_DIRECTORY,
original_extension=ORIGINAL_EXTENSION,
annotations_temp_dir=ANNOTATIONS_TEMP_DIR, # annotations computed here will not be saved because ANNOTATIONS_TEMP_DIR is empty string
landmark_detect_method="mtcnn", # detect annotations using mtcnn
exclude_attacks_list=['makeup'],
exclude_pai_all_sets=True, # exclude makeup from all the sets, which is the default behavior for grandtest protocol
append_color_face_roi_annot=False) # annotations defining ROI in the cropped face image are not important here
kwargs = {}
kwargs["data_folder"] = "NO NEED TO SET HERE, WILL BE SET IN THE TRAINING SCRIPT"
# NOTE: ``kwargs["transform"] = transform`` is re-defined below, after ``transform()`` method is defined
kwargs["transform"] = None # keep None for now, re-define below
kwargs["extension"] = '.hdf5'
kwargs["bob_hldi_instance"] = bob_hldi_instance
kwargs["hldi_type"] = "pad"
kwargs["groups"] = ['train']
kwargs["protocol"] = 'grandtest'
kwargs["purposes"] = ['real', 'attack']
kwargs["allow_missing_files"] = True
"""
Transformations to be applied to the input data sample.
Note: the variable or function name ``transform`` must be the same in
all configuration files. This transformation is handled in DataFolder.
"""
from bob.learn.pytorch.utils import MeanStdNormalizer
transform = MeanStdNormalizer(kwargs)
"""
Set the kwargs of the "dataset" instance of the DataFolder class.
Note: do not change the name ``kwargs``.
"""
# NOTE: re-define ``transform`` parameter, after we defined the ``transform`` method / object
# In this case transformation is mean-std normalization, given mean-std for each feature:
kwargs["transform"] = transform
"""
Define the network to be trained as a class, named ``Network``.
Note: Do not change the name of the below class.
"""
from bob.learn.pytorch.architectures import TwoLayerMLP as Network
"""
Define the kwargs to be used for ``Network`` in the dictionary namely
``network_kwargs``.
"""
network_kwargs = {}
network_kwargs['in_features'] = 1296
network_kwargs['n_hidden_relu'] = 10
network_kwargs['apply_sigmoid'] = True
"""
Define the loss to be used for training.
Note: do not change the name of the below variable.
"""
loss_type = nn.BCELoss()
"""
OPTIONAL: if not defined **above** loss will be computed in the training script.
See training script for details
Define the function to compute the loss. Don't change the signature of this
function: ``loss_function(output, input, target)``
"""
from bob.learn.pytorch.utils import weighted_bce_loss as loss_function
......@@ -43,8 +43,6 @@ def test_architectures():
assert emdedding.shape == torch.Size([1, 256])
# LightCNN29
a = numpy.random.rand(1, 1, 128, 128).astype("float32")
t = torch.from_numpy(a)
from ..architectures import LightCNN29
net = LightCNN29()
output, emdedding = net.forward(t)
......@@ -52,8 +50,6 @@ def test_architectures():
assert emdedding.shape == torch.Size([1, 256])
# LightCNN29v2
a = numpy.random.rand(1, 1, 128, 128).astype("float32")
t = torch.from_numpy(a)
from ..architectures import LightCNN29v2
net = LightCNN29v2()
output, emdedding = net.forward(t)
......@@ -69,8 +65,6 @@ def test_architectures():
assert output.shape == torch.Size([1, 1])
# MCCNNv2
a = numpy.random.rand(1, 4, 128, 128).astype("float32")
t = torch.from_numpy(a)
from ..architectures import MCCNNv2
net = MCCNNv2(num_channels=4)
output = net.forward(t)
......@@ -117,7 +111,6 @@ def test_architectures():
assert output.shape == torch.Size([1, 3, 64, 64])
# Conditional GAN
d = numpy.random.rand(1, 3, 64, 64).astype("float32")
t = torch.from_numpy(d)
cfm = numpy.zeros((1, 13, 64, 64), dtype="float32")
cfm[:, 0, :, :] = 1
......@@ -127,7 +120,6 @@ def test_architectures():
output = discriminator.forward(t, cfmt)
assert output.shape == torch.Size([1])
g = numpy.random.rand(1, 100, 1, 1).astype("float32")
t = torch.from_numpy(g)
oh = numpy.zeros((1, 13, 1, 1), dtype="float32")
oh[0] = 1
......@@ -136,6 +128,16 @@ def test_architectures():
discriminator = ConditionalGAN_generator(100, 13)
output = discriminator.forward(t, oht)
assert output.shape == torch.Size([1, 3, 64, 64])
# Convolutional Autoencoder
from bob.learn.pytorch.architectures import ConvAutoencoder
batch = torch.randn(1, 3, 64, 64)
model = ConvAutoencoder()
output = model(batch)
assert batch.shape == output.shape
model_embeddings = ConvAutoencoder(return_latent_embedding = True)
embedding = model_embeddings(batch)
assert list(embedding.shape) == [1, 16, 5, 5]
def test_transforms():
......@@ -186,15 +188,11 @@ def test_map_labels():
assert '0' in new_labels, "new_labels = {}".format(new_labels)
assert '1' in new_labels, "new_labels = {}".format(new_labels)
assert '2' in new_labels, "new_labels = {}".format(new_labels)
#new_labels = sorted(new_labels)
#assert new_labels == ['0', '1', '2']
new_labels = map_labels(labels, start_index = 5)
#new_labels = sorted(new_labels)
assert '5' in new_labels, "new_labels = {}".format(new_labels)
assert '6' in new_labels, "new_labels = {}".format(new_labels)
assert '7' in new_labels, "new_labels = {}".format(new_labels)
#assert new_labels == ['5', '6', '7']
from torch.utils.data import Dataset
......@@ -390,22 +388,6 @@ def test_ConditionalGANTrainer():
os.remove('netG_epoch_0.pth')
def test_conv_autoencoder():
"""
Test the ConvAutoencoder class.
"""
from bob.learn.pytorch.architectures import ConvAutoencoder