[black]

parent c83fd608
Pipeline #46610 passed with stage
in 14 minutes and 1 second
# see https://docs.python.org/3/library/pkgutil.html # see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path from pkgutil import extend_path
__path__ = extend_path(__path__, __name__) __path__ = extend_path(__path__, __name__)
# see https://docs.python.org/3/library/pkgutil.html # see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path from pkgutil import extend_path
__path__ = extend_path(__path__, __name__) __path__ = extend_path(__path__, __name__)
# gets sphinx autodoc done right - don't remove it # gets sphinx autodoc done right - don't remove it
def __appropriate__(*args): def __appropriate__(*args):
"""Says object was actually declared here, an not on the import module. """Says object was actually declared here, an not on the import module.
Parameters: Parameters:
...@@ -11,8 +10,10 @@ def __appropriate__(*args): ...@@ -11,8 +10,10 @@ def __appropriate__(*args):
<https://github.com/sphinx-doc/sphinx/issues/3048>` <https://github.com/sphinx-doc/sphinx/issues/3048>`
""" """
for obj in args: obj.__module__ = __name__ for obj in args:
obj.__module__ = __name__
__appropriate__() __appropriate__()
__all__ = [_ for _ in dir() if not _.startswith('_')] __all__ = [_ for _ in dir() if not _.startswith("_")]
import torch import torch
from .utils import make_conv_layers from .utils import make_conv_layers
CASIA_CONFIG = [32, 64, 'M', 64, 128, 'M', 96, 192, 'M', 128, 256, 'M', 160, 320] CASIA_CONFIG = [32, 64, "M", 64, 128, "M", 96, 192, "M", 128, 256, "M", 160, 320]
class CASIANet(torch.nn.Module): class CASIANet(torch.nn.Module):
""" The class defining the CASIA-Net CNN model. """ The class defining the CASIA-Net CNN model.
This class implements the CNN described in: This class implements the CNN described in:
"Learning Face Representation From Scratch", D. Yi, Z. Lei, S. Liao and S.z. Li, 2014 "Learning Face Representation From Scratch", D. Yi, Z. Lei, S. Liao and S.z. Li, 2014
...@@ -22,10 +23,10 @@ class CASIANet(torch.nn.Module): ...@@ -22,10 +23,10 @@ class CASIANet(torch.nn.Module):
classifier: :py:class:`torch.nn.Module` classifier: :py:class:`torch.nn.Module`
The output of the last linear (logits) The output of the last linear (logits)
""" """
def __init__(self, num_cls, drop_rate=0.5): def __init__(self, num_cls, drop_rate=0.5):
""" Init method """ Init method
Parameters Parameters
---------- ----------
...@@ -35,16 +36,16 @@ class CASIANet(torch.nn.Module): ...@@ -35,16 +36,16 @@ class CASIANet(torch.nn.Module):
The probability for dropout. The probability for dropout.
""" """
super(CASIANet, self).__init__()
self.num_classes = num_cls
self.drop_rate = float(drop_rate)
self.conv = make_conv_layers(CASIA_CONFIG)
self.avgpool = torch.nn.AvgPool2d(8)
self.classifier = torch.nn.Linear(320, self.num_classes)
def forward(self, x): super(CASIANet, self).__init__()
""" Propagate data through the network self.num_classes = num_cls
self.drop_rate = float(drop_rate)
self.conv = make_conv_layers(CASIA_CONFIG)
self.avgpool = torch.nn.AvgPool2d(8)
self.classifier = torch.nn.Linear(320, self.num_classes)
def forward(self, x):
""" Propagate data through the network
Parameters Parameters
---------- ----------
...@@ -58,9 +59,9 @@ class CASIANet(torch.nn.Module): ...@@ -58,9 +59,9 @@ class CASIANet(torch.nn.Module):
""" """
x = self.conv(x) x = self.conv(x)
x = self.avgpool(x) x = self.avgpool(x)
x = x.view(x.size(0), -1) x = x.view(x.size(0), -1)
x = torch.nn.functional.dropout(x, p = self.drop_rate, training=self.training) x = torch.nn.functional.dropout(x, p=self.drop_rate, training=self.training)
out = self.classifier(x) out = self.classifier(x)
return out, x # x for feature return out, x # x for feature
...@@ -8,10 +8,11 @@ import torch.nn.functional as F ...@@ -8,10 +8,11 @@ import torch.nn.functional as F
from .utils import make_conv_layers from .utils import make_conv_layers
CNN8_CONFIG = [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M'] CNN8_CONFIG = [64, 64, "M", 128, 128, "M", 256, 256, "M", 512, 512, "M"]
class CNN8(nn.Module): class CNN8(nn.Module):
""" The class defining the CNN8 model. """ The class defining the CNN8 model.
Attributes Attributes
---------- ----------
...@@ -26,10 +27,10 @@ class CNN8(nn.Module): ...@@ -26,10 +27,10 @@ class CNN8(nn.Module):
classifier: :py:class:`torch.nn.Module` classifier: :py:class:`torch.nn.Module`
The output of the last linear (logits) The output of the last linear (logits)
""" """
def __init__(self, num_cls, drop_rate=0.5): def __init__(self, num_cls, drop_rate=0.5):
""" Init method """ Init method
Parameters Parameters
---------- ----------
...@@ -39,16 +40,16 @@ class CNN8(nn.Module): ...@@ -39,16 +40,16 @@ class CNN8(nn.Module):
The probability for dropout. The probability for dropout.
""" """
super(CNN8, self).__init__()
self.num_classes = num_cls
self.drop_rate = float(drop_rate)
self.conv = make_conv_layers(CNN8_CONFIG)
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(512, self.num_classes)
def forward(self, x): super(CNN8, self).__init__()
""" Propagate data through the network self.num_classes = num_cls
self.drop_rate = float(drop_rate)
self.conv = make_conv_layers(CNN8_CONFIG)
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(512, self.num_classes)
def forward(self, x):
""" Propagate data through the network
Parameters Parameters
---------- ----------
...@@ -62,10 +63,9 @@ class CNN8(nn.Module): ...@@ -62,10 +63,9 @@ class CNN8(nn.Module):
""" """
x = self.conv(x) x = self.conv(x)
x = self.avgpool(x) x = self.avgpool(x)
x = x.view(x.size(0), -1) x = x.view(x.size(0), -1)
x = F.dropout(x, p = self.drop_rate, training=self.training) x = F.dropout(x, p=self.drop_rate, training=self.training)
out = self.classifier(x) out = self.classifier(x)
return out, x # x for feature return out, x # x for feature
...@@ -4,8 +4,9 @@ ...@@ -4,8 +4,9 @@
import torch import torch
import torch.nn as nn import torch.nn as nn
class ConditionalGAN_generator(nn.Module): class ConditionalGAN_generator(nn.Module):
""" Class implementating the conditional GAN generator """ Class implementating the conditional GAN generator
This network is introduced in the following publication: This network is introduced in the following publication:
Mehdi Mirza, Simon Osindero: "Conditional Generative Adversarial Nets" Mehdi Mirza, Simon Osindero: "Conditional Generative Adversarial Nets"
...@@ -18,8 +19,9 @@ class ConditionalGAN_generator(nn.Module): ...@@ -18,8 +19,9 @@ class ConditionalGAN_generator(nn.Module):
The sequential container The sequential container
""" """
def __init__(self, noise_dim, conditional_dim, channels=3, ngpu=1):
"""Init function def __init__(self, noise_dim, conditional_dim, channels=3, ngpu=1):
"""Init function
Parameters Parameters
---------- ----------
...@@ -33,38 +35,40 @@ class ConditionalGAN_generator(nn.Module): ...@@ -33,38 +35,40 @@ class ConditionalGAN_generator(nn.Module):
The number of available GPU devices The number of available GPU devices
""" """
super(ConditionalGAN_generator, self).__init__() super(ConditionalGAN_generator, self).__init__()
self.ngpu = ngpu self.ngpu = ngpu
self.conditional_dim = conditional_dim self.conditional_dim = conditional_dim
# output dimension # output dimension
ngf = 64 ngf = 64
self.main = nn.Sequential( self.main = nn.Sequential(
# input is Z, going into a convolution # input is Z, going into a convolution
nn.ConvTranspose2d((noise_dim + conditional_dim), ngf * 8, 4, 1, 0, bias=False), nn.ConvTranspose2d(
nn.BatchNorm2d(ngf * 8), (noise_dim + conditional_dim), ngf * 8, 4, 1, 0, bias=False
nn.ReLU(True), ),
# state size. (ngf*8) x 4 x 4 nn.BatchNorm2d(ngf * 8),
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), nn.ReLU(True),
nn.BatchNorm2d(ngf * 4), # state size. (ngf*8) x 4 x 4
nn.ReLU(True), nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
# state size. (ngf*4) x 8 x 8 nn.BatchNorm2d(ngf * 4),
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), nn.ReLU(True),
nn.BatchNorm2d(ngf * 2), # state size. (ngf*4) x 8 x 8
nn.ReLU(True), nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
# state size. (ngf*2) x 16 x 16 nn.BatchNorm2d(ngf * 2),
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.ReLU(True),
nn.BatchNorm2d(ngf), # state size. (ngf*2) x 16 x 16
nn.ReLU(True), nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
# state size. (ngf) x 32 x 32 nn.BatchNorm2d(ngf),
nn.ConvTranspose2d(ngf, channels, 4, 2, 1, bias=False), nn.ReLU(True),
nn.Tanh() # state size. (ngf) x 32 x 32
# state size. (nc) x 64 x 64 nn.ConvTranspose2d(ngf, channels, 4, 2, 1, bias=False),
) nn.Tanh()
# state size. (nc) x 64 x 64
def forward(self, z, y): )
"""Forward function
def forward(self, z, y):
"""Forward function
Parameters Parameters
---------- ----------
...@@ -79,19 +83,19 @@ class ConditionalGAN_generator(nn.Module): ...@@ -79,19 +83,19 @@ class ConditionalGAN_generator(nn.Module):
the output of the generator (i.e. an image) the output of the generator (i.e. an image)
""" """
generator_input = torch.cat((z, y), 1) generator_input = torch.cat((z, y), 1)
#if isinstance(generator_input.data, torch.cuda.FloatTensor) and self.ngpu > 1: # if isinstance(generator_input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
# output = nn.parallel.data_parallel(self.main, generator_input, range(self.ngpu)) # output = nn.parallel.data_parallel(self.main, generator_input, range(self.ngpu))
#else: # else:
# output = self.main(generator_input) # output = self.main(generator_input)
# let's assume that we will never face the case where more than a GPU is used ... # let's assume that we will never face the case where more than a GPU is used ...
output = self.main(generator_input) output = self.main(generator_input)
return output return output
class ConditionalGAN_discriminator(nn.Module): class ConditionalGAN_discriminator(nn.Module):
""" Class implementating the conditional GAN discriminator """ Class implementating the conditional GAN discriminator
Attributes Attributes
---------- ----------
...@@ -104,9 +108,10 @@ class ConditionalGAN_discriminator(nn.Module): ...@@ -104,9 +108,10 @@ class ConditionalGAN_discriminator(nn.Module):
main : :py:class:`torch.nn.Sequential` main : :py:class:`torch.nn.Sequential`
The sequential container The sequential container
""" """
def __init__(self, conditional_dim, channels=3, ngpu=1):
"""Init function def __init__(self, conditional_dim, channels=3, ngpu=1):
"""Init function
Parameters Parameters
---------- ----------
...@@ -118,36 +123,35 @@ class ConditionalGAN_discriminator(nn.Module): ...@@ -118,36 +123,35 @@ class ConditionalGAN_discriminator(nn.Module):
The number of available GPU devices The number of available GPU devices
""" """
super(ConditionalGAN_discriminator, self).__init__() super(ConditionalGAN_discriminator, self).__init__()
self.conditional_dim = conditional_dim self.conditional_dim = conditional_dim
self.ngpu = ngpu self.ngpu = ngpu
# input dimension # input dimension
ndf = 64 ndf = 64
self.main = nn.Sequential( self.main = nn.Sequential(
# input is (nc) x 64 x 64 # input is (nc) x 64 x 64
nn.Conv2d((channels + conditional_dim), ndf, 4, 2, 1, bias=False), nn.Conv2d((channels + conditional_dim), ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True), nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32 # state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False), nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2), nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True), nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16 # state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False), nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4), nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True), nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8 # state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False), nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8), nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True), nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4 # state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False), nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid() nn.Sigmoid(),
) )
def forward(self, images, y):
def forward(self, images, y): """Forward function
"""Forward function
Parameters Parameters
---------- ----------
...@@ -161,12 +165,12 @@ class ConditionalGAN_discriminator(nn.Module): ...@@ -161,12 +165,12 @@ class ConditionalGAN_discriminator(nn.Module):
:py:class:`torch.Tensor` :py:class:`torch.Tensor`
the output of the discriminator the output of the discriminator
""" """
input_discriminator = torch.cat((images, y), 1) input_discriminator = torch.cat((images, y), 1)
#if isinstance(input_discriminator.data, torch.cuda.FloatTensor) and self.ngpu > 1: # if isinstance(input_discriminator.data, torch.cuda.FloatTensor) and self.ngpu > 1:
# output = nn.parallel.data_parallel(self.main, input_discriminator, range(self.ngpu)) # output = nn.parallel.data_parallel(self.main, input_discriminator, range(self.ngpu))
#else: # else:
# output = self.main(input_discriminator) # output = self.main(input_discriminator)
# let's assume that we will never face the case where more than a GPU is used ... # let's assume that we will never face the case where more than a GPU is used ...
output = self.main(input_discriminator) output = self.main(input_discriminator)
return output.view(-1, 1).squeeze(1) return output.view(-1, 1).squeeze(1)
...@@ -3,8 +3,9 @@ ...@@ -3,8 +3,9 @@
from torch import nn from torch import nn
class ConvAutoencoder(nn.Module): class ConvAutoencoder(nn.Module):
""" """
A class defining a simple convolutional autoencoder. A class defining a simple convolutional autoencoder.
Attributes Attributes
...@@ -13,8 +14,9 @@ class ConvAutoencoder(nn.Module): ...@@ -13,8 +14,9 @@ class ConvAutoencoder(nn.Module):
returns the encoder output if true, the reconstructed image otherwise. returns the encoder output if true, the reconstructed image otherwise.
""" """
def __init__(self, return_latent_embedding=False):
""" def __init__(self, return_latent_embedding=False):
"""
Init function Init function
Parameters Parameters
...@@ -22,36 +24,40 @@ class ConvAutoencoder(nn.Module): ...@@ -22,36 +24,40 @@ class ConvAutoencoder(nn.Module):
return_latent_embedding : bool return_latent_embedding : bool
returns the encoder output if true, the reconstructed image otherwise. returns the encoder output if true, the reconstructed image otherwise.
""" """
super(ConvAutoencoder, self).__init__()
self.return_latent_embedding = return_latent_embedding
self.encoder = nn.Sequential(nn.Conv2d(3, 16, 5, padding=2), super(ConvAutoencoder, self).__init__()
nn.ReLU(True), self.return_latent_embedding = return_latent_embedding
nn.MaxPool2d(2),
nn.Conv2d(16, 16, 5, padding=2), self.encoder = nn.Sequential(
nn.ReLU(True), nn.Conv2d(3, 16, 5, padding=2),
nn.MaxPool2d(2), nn.ReLU(True),
nn.Conv2d(16, 16, 3, padding=2), nn.MaxPool2d(2),
nn.ReLU(True), nn.Conv2d(16, 16, 5, padding=2),
nn.MaxPool2d(2), nn.ReLU(True),
nn.Conv2d(16, 16, 3, padding=2), nn.MaxPool2d(2),
nn.ReLU(True), nn.Conv2d(16, 16, 3, padding=2),
nn.MaxPool2d(2)) nn.ReLU(True),
nn.MaxPool2d(2),
nn.Conv2d(16, 16, 3, padding=2),
nn.ReLU(True),
nn.MaxPool2d(2),
)
self.decoder = nn.Sequential(nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1), self.decoder = nn.Sequential(
nn.ReLU(True), nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1),
nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1), nn.ReLU(True),
nn.ReLU(True), nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1),
nn.ConvTranspose2d(16, 16, 5, stride=2, padding=2), nn.ReLU(True),
nn.ReLU(True), nn.ConvTranspose2d(16, 16, 5, stride=2, padding=2),
nn.ConvTranspose2d(16, 3, 5, stride=2, padding=2), nn.ReLU(True),
nn.ReLU(True), nn.ConvTranspose2d(16, 3, 5, stride=2, padding=2),
nn.ConvTranspose2d(3, 3, 2, stride=1, padding=1), nn.ReLU(True),
nn.Tanh()) nn.ConvTranspose2d(3, 3, 2, stride=1, padding=1),
nn.Tanh(),
)
def forward(self, x): def forward(self, x):
""" Propagate data through the network """ Propagate data through the network
Parameters Parameters
---------- ----------
...@@ -64,8 +70,8 @@ class ConvAutoencoder(nn.Module): ...@@ -64,8 +70,8 @@ class ConvAutoencoder(nn.Module):
either the encoder output or the reconstructed image either the encoder output or the reconstructed image
""" """
x = self.encoder(x) x = self.encoder(x)
if self.return_latent_embedding: if self.return_latent_embedding:
return x return x
x = self.decoder(x) x = self.decoder(x)
return x return x
...@@ -6,7 +6,7 @@ import torch.nn as nn ...@@ -6,7 +6,7 @@ import torch.nn as nn
class DCGAN_generator(nn.Module): class DCGAN_generator(nn.Module):
""" Class implementating the generator part of the Deeply Convolutional GAN """ Class implementating the generator part of the Deeply Convolutional GAN
This network is introduced in the following publication: This network is introduced in the following publication:
Alec Radford, Luke Metz, Soumith Chintala: "Unsupervised Representation Alec Radford, Luke Metz, Soumith Chintala: "Unsupervised Representation
...@@ -21,8 +21,9 @@ class DCGAN_generator(nn.Module): ...@@ -21,8 +21,9 @@ class DCGAN_generator(nn.Module):
The number of available GPU devices The number of available GPU devices
""" """
def __init__(self, ngpu):
"""Init function def __init__(self, ngpu):
"""Init function
Parameters Parameters
---------- ----------
...@@ -30,39 +31,39 @@ class DCGAN_generator(nn.Module): ...@@ -30,39 +31,39 @@ class DCGAN_generator(nn.Module):
The number of available GPU devices The number of available GPU devices
""" """
super(DCGAN_generator, self).__init__() super(DCGAN_generator, self).__init__()
self.ngpu = ngpu self.ngpu = ngpu
# just to test - will soon be args # just to test - will soon be args
nz = 100 # noise dimension nz = 100 # noise dimension
ngf = 64 # number of features map on the first layer ngf = 64 # number of features map on the first layer
nc = 3 # number of channels nc = 3 # number of channels
self.main = nn.Sequential( self.main = nn.Sequential(
# input is Z, going into a convolution # input is Z, going into a convolution
nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False), nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8), nn.BatchNorm2d(ngf * 8),
nn.ReLU(True), nn.ReLU(True),
# state size. (ngf*8) x 4 x 4 # state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4), nn.BatchNorm2d(ngf * 4),
nn.ReLU(True), nn.ReLU(True),
# state size. (ngf*4) x 8 x 8 # state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2), nn.BatchNorm2d(ngf * 2),
nn.ReLU(True), nn.ReLU(True),
# state size. (ngf*2) x 16 x 16 # state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf), nn.BatchNorm2d(ngf),
nn.ReLU(True), nn.ReLU(True),
# state size. (ngf) x 32 x 32 # state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False), nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh() nn.Tanh()
# state size. (nc) x 64 x 64 # state size. (nc) x 64 x 64
) )
def forward(self, input):