Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.pytorch
Commits
e4a7d224
Commit
e4a7d224
authored
Dec 15, 2020
by
Tiago de Freitas Pereira
Browse files
[black]
parent
c83fd608
Pipeline
#46610
passed with stage
in 14 minutes and 1 second
Changes
59
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
bob/__init__.py
View file @
e4a7d224
# see https://docs.python.org/3/library/pkgutil.html
from
pkgutil
import
extend_path
__path__
=
extend_path
(
__path__
,
__name__
)
bob/learn/__init__.py
View file @
e4a7d224
# see https://docs.python.org/3/library/pkgutil.html
from
pkgutil
import
extend_path
__path__
=
extend_path
(
__path__
,
__name__
)
bob/learn/pytorch/__init__.py
View file @
e4a7d224
# gets sphinx autodoc done right - don't remove it
def
__appropriate__
(
*
args
):
"""Says object was actually declared here, an not on the import module.
"""Says object was actually declared here, an not on the import module.
Parameters:
...
...
@@ -11,8 +10,10 @@ def __appropriate__(*args):
<https://github.com/sphinx-doc/sphinx/issues/3048>`
"""
for
obj
in
args
:
obj
.
__module__
=
__name__
for
obj
in
args
:
obj
.
__module__
=
__name__
__appropriate__
()
__all__
=
[
_
for
_
in
dir
()
if
not
_
.
startswith
(
'_'
)]
__all__
=
[
_
for
_
in
dir
()
if
not
_
.
startswith
(
"_"
)]
bob/learn/pytorch/architectures/CASIANet.py
View file @
e4a7d224
import
torch
from
.utils
import
make_conv_layers
CASIA_CONFIG
=
[
32
,
64
,
'M'
,
64
,
128
,
'M'
,
96
,
192
,
'M'
,
128
,
256
,
'M'
,
160
,
320
]
CASIA_CONFIG
=
[
32
,
64
,
"M"
,
64
,
128
,
"M"
,
96
,
192
,
"M"
,
128
,
256
,
"M"
,
160
,
320
]
class
CASIANet
(
torch
.
nn
.
Module
):
""" The class defining the CASIA-Net CNN model.
""" The class defining the CASIA-Net CNN model.
This class implements the CNN described in:
"Learning Face Representation From Scratch", D. Yi, Z. Lei, S. Liao and S.z. Li, 2014
...
...
@@ -22,10 +23,10 @@ class CASIANet(torch.nn.Module):
classifier: :py:class:`torch.nn.Module`
The output of the last linear (logits)
"""
"""
def
__init__
(
self
,
num_cls
,
drop_rate
=
0.5
):
""" Init method
def
__init__
(
self
,
num_cls
,
drop_rate
=
0.5
):
""" Init method
Parameters
----------
...
...
@@ -35,16 +36,16 @@ class CASIANet(torch.nn.Module):
The probability for dropout.
"""
super
(
CASIANet
,
self
).
__init__
()
self
.
num_classes
=
num_cls
self
.
drop_rate
=
float
(
drop_rate
)
self
.
conv
=
make_conv_layers
(
CASIA_CONFIG
)
self
.
avgpool
=
torch
.
nn
.
AvgPool2d
(
8
)
self
.
classifier
=
torch
.
nn
.
Linear
(
320
,
self
.
num_classes
)
def
forward
(
self
,
x
):
""" Propagate data through the network
super
(
CASIANet
,
self
).
__init__
()
self
.
num_classes
=
num_cls
self
.
drop_rate
=
float
(
drop_rate
)
self
.
conv
=
make_conv_layers
(
CASIA_CONFIG
)
self
.
avgpool
=
torch
.
nn
.
AvgPool2d
(
8
)
self
.
classifier
=
torch
.
nn
.
Linear
(
320
,
self
.
num_classes
)
def
forward
(
self
,
x
):
""" Propagate data through the network
Parameters
----------
...
...
@@ -58,9 +59,9 @@ class CASIANet(torch.nn.Module):
"""
x
=
self
.
conv
(
x
)
x
=
self
.
avgpool
(
x
)
x
=
x
.
view
(
x
.
size
(
0
),
-
1
)
x
=
torch
.
nn
.
functional
.
dropout
(
x
,
p
=
self
.
drop_rate
,
training
=
self
.
training
)
out
=
self
.
classifier
(
x
)
return
out
,
x
# x for feature
x
=
self
.
conv
(
x
)
x
=
self
.
avgpool
(
x
)
x
=
x
.
view
(
x
.
size
(
0
),
-
1
)
x
=
torch
.
nn
.
functional
.
dropout
(
x
,
p
=
self
.
drop_rate
,
training
=
self
.
training
)
out
=
self
.
classifier
(
x
)
return
out
,
x
# x for feature
bob/learn/pytorch/architectures/CNN8.py
View file @
e4a7d224
...
...
@@ -8,10 +8,11 @@ import torch.nn.functional as F
from
.utils
import
make_conv_layers
CNN8_CONFIG
=
[
64
,
64
,
'M'
,
128
,
128
,
'M'
,
256
,
256
,
'M'
,
512
,
512
,
'M'
]
CNN8_CONFIG
=
[
64
,
64
,
"M"
,
128
,
128
,
"M"
,
256
,
256
,
"M"
,
512
,
512
,
"M"
]
class
CNN8
(
nn
.
Module
):
""" The class defining the CNN8 model.
""" The class defining the CNN8 model.
Attributes
----------
...
...
@@ -26,10 +27,10 @@ class CNN8(nn.Module):
classifier: :py:class:`torch.nn.Module`
The output of the last linear (logits)
"""
"""
def
__init__
(
self
,
num_cls
,
drop_rate
=
0.5
):
""" Init method
def
__init__
(
self
,
num_cls
,
drop_rate
=
0.5
):
""" Init method
Parameters
----------
...
...
@@ -39,16 +40,16 @@ class CNN8(nn.Module):
The probability for dropout.
"""
super
(
CNN8
,
self
).
__init__
()
self
.
num_classes
=
num_cls
self
.
drop_rate
=
float
(
drop_rate
)
self
.
conv
=
make_conv_layers
(
CNN8_CONFIG
)
self
.
avgpool
=
nn
.
AvgPool2d
(
8
)
self
.
classifier
=
nn
.
Linear
(
512
,
self
.
num_classes
)
def
forward
(
self
,
x
):
""" Propagate data through the network
super
(
CNN8
,
self
).
__init__
()
self
.
num_classes
=
num_cls
self
.
drop_rate
=
float
(
drop_rate
)
self
.
conv
=
make_conv_layers
(
CNN8_CONFIG
)
self
.
avgpool
=
nn
.
AvgPool2d
(
8
)
self
.
classifier
=
nn
.
Linear
(
512
,
self
.
num_classes
)
def
forward
(
self
,
x
):
""" Propagate data through the network
Parameters
----------
...
...
@@ -62,10 +63,9 @@ class CNN8(nn.Module):
"""
x
=
self
.
conv
(
x
)
x
=
self
.
avgpool
(
x
)
x
=
x
.
view
(
x
.
size
(
0
),
-
1
)
x
=
F
.
dropout
(
x
,
p
=
self
.
drop_rate
,
training
=
self
.
training
)
out
=
self
.
classifier
(
x
)
return
out
,
x
# x for feature
x
=
self
.
conv
(
x
)
x
=
self
.
avgpool
(
x
)
x
=
x
.
view
(
x
.
size
(
0
),
-
1
)
x
=
F
.
dropout
(
x
,
p
=
self
.
drop_rate
,
training
=
self
.
training
)
out
=
self
.
classifier
(
x
)
return
out
,
x
# x for feature
bob/learn/pytorch/architectures/ConditionalGAN.py
View file @
e4a7d224
...
...
@@ -4,8 +4,9 @@
import
torch
import
torch.nn
as
nn
class
ConditionalGAN_generator
(
nn
.
Module
):
""" Class implementating the conditional GAN generator
""" Class implementating the conditional GAN generator
This network is introduced in the following publication:
Mehdi Mirza, Simon Osindero: "Conditional Generative Adversarial Nets"
...
...
@@ -18,8 +19,9 @@ class ConditionalGAN_generator(nn.Module):
The sequential container
"""
def
__init__
(
self
,
noise_dim
,
conditional_dim
,
channels
=
3
,
ngpu
=
1
):
"""Init function
def
__init__
(
self
,
noise_dim
,
conditional_dim
,
channels
=
3
,
ngpu
=
1
):
"""Init function
Parameters
----------
...
...
@@ -33,38 +35,40 @@ class ConditionalGAN_generator(nn.Module):
The number of available GPU devices
"""
super
(
ConditionalGAN_generator
,
self
).
__init__
()
self
.
ngpu
=
ngpu
self
.
conditional_dim
=
conditional_dim
# output dimension
ngf
=
64
self
.
main
=
nn
.
Sequential
(
# input is Z, going into a convolution
nn
.
ConvTranspose2d
((
noise_dim
+
conditional_dim
),
ngf
*
8
,
4
,
1
,
0
,
bias
=
False
),
nn
.
BatchNorm2d
(
ngf
*
8
),
nn
.
ReLU
(
True
),
# state size. (ngf*8) x 4 x 4
nn
.
ConvTranspose2d
(
ngf
*
8
,
ngf
*
4
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ngf
*
4
),
nn
.
ReLU
(
True
),
# state size. (ngf*4) x 8 x 8
nn
.
ConvTranspose2d
(
ngf
*
4
,
ngf
*
2
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ngf
*
2
),
nn
.
ReLU
(
True
),
# state size. (ngf*2) x 16 x 16
nn
.
ConvTranspose2d
(
ngf
*
2
,
ngf
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ngf
),
nn
.
ReLU
(
True
),
# state size. (ngf) x 32 x 32
nn
.
ConvTranspose2d
(
ngf
,
channels
,
4
,
2
,
1
,
bias
=
False
),
nn
.
Tanh
()
# state size. (nc) x 64 x 64
)
def
forward
(
self
,
z
,
y
):
"""Forward function
super
(
ConditionalGAN_generator
,
self
).
__init__
()
self
.
ngpu
=
ngpu
self
.
conditional_dim
=
conditional_dim
# output dimension
ngf
=
64
self
.
main
=
nn
.
Sequential
(
# input is Z, going into a convolution
nn
.
ConvTranspose2d
(
(
noise_dim
+
conditional_dim
),
ngf
*
8
,
4
,
1
,
0
,
bias
=
False
),
nn
.
BatchNorm2d
(
ngf
*
8
),
nn
.
ReLU
(
True
),
# state size. (ngf*8) x 4 x 4
nn
.
ConvTranspose2d
(
ngf
*
8
,
ngf
*
4
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ngf
*
4
),
nn
.
ReLU
(
True
),
# state size. (ngf*4) x 8 x 8
nn
.
ConvTranspose2d
(
ngf
*
4
,
ngf
*
2
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ngf
*
2
),
nn
.
ReLU
(
True
),
# state size. (ngf*2) x 16 x 16
nn
.
ConvTranspose2d
(
ngf
*
2
,
ngf
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ngf
),
nn
.
ReLU
(
True
),
# state size. (ngf) x 32 x 32
nn
.
ConvTranspose2d
(
ngf
,
channels
,
4
,
2
,
1
,
bias
=
False
),
nn
.
Tanh
()
# state size. (nc) x 64 x 64
)
def
forward
(
self
,
z
,
y
):
"""Forward function
Parameters
----------
...
...
@@ -79,19 +83,19 @@ class ConditionalGAN_generator(nn.Module):
the output of the generator (i.e. an image)
"""
generator_input
=
torch
.
cat
((
z
,
y
),
1
)
#if isinstance(generator_input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
# output = nn.parallel.data_parallel(self.main, generator_input, range(self.ngpu))
#else:
# output = self.main(generator_input)
# let's assume that we will never face the case where more than a GPU is used ...
output
=
self
.
main
(
generator_input
)
return
output
generator_input
=
torch
.
cat
((
z
,
y
),
1
)
#
if isinstance(generator_input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
# output = nn.parallel.data_parallel(self.main, generator_input, range(self.ngpu))
#
else:
# output = self.main(generator_input)
# let's assume that we will never face the case where more than a GPU is used ...
output
=
self
.
main
(
generator_input
)
return
output
class
ConditionalGAN_discriminator
(
nn
.
Module
):
""" Class implementating the conditional GAN discriminator
""" Class implementating the conditional GAN discriminator
Attributes
----------
...
...
@@ -104,9 +108,10 @@ class ConditionalGAN_discriminator(nn.Module):
main : :py:class:`torch.nn.Sequential`
The sequential container
"""
def
__init__
(
self
,
conditional_dim
,
channels
=
3
,
ngpu
=
1
):
"""Init function
"""
def
__init__
(
self
,
conditional_dim
,
channels
=
3
,
ngpu
=
1
):
"""Init function
Parameters
----------
...
...
@@ -118,36 +123,35 @@ class ConditionalGAN_discriminator(nn.Module):
The number of available GPU devices
"""
super
(
ConditionalGAN_discriminator
,
self
).
__init__
()
self
.
conditional_dim
=
conditional_dim
self
.
ngpu
=
ngpu
# input dimension
ndf
=
64
self
.
main
=
nn
.
Sequential
(
# input is (nc) x 64 x 64
nn
.
Conv2d
((
channels
+
conditional_dim
),
ndf
,
4
,
2
,
1
,
bias
=
False
),
nn
.
LeakyReLU
(
0.2
,
inplace
=
True
),
# state size. (ndf) x 32 x 32
nn
.
Conv2d
(
ndf
,
ndf
*
2
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ndf
*
2
),
nn
.
LeakyReLU
(
0.2
,
inplace
=
True
),
# state size. (ndf*2) x 16 x 16
nn
.
Conv2d
(
ndf
*
2
,
ndf
*
4
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ndf
*
4
),
nn
.
LeakyReLU
(
0.2
,
inplace
=
True
),
# state size. (ndf*4) x 8 x 8
nn
.
Conv2d
(
ndf
*
4
,
ndf
*
8
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ndf
*
8
),
nn
.
LeakyReLU
(
0.2
,
inplace
=
True
),
# state size. (ndf*8) x 4 x 4
nn
.
Conv2d
(
ndf
*
8
,
1
,
4
,
1
,
0
,
bias
=
False
),
nn
.
Sigmoid
()
)
def
forward
(
self
,
images
,
y
):
"""Forward function
super
(
ConditionalGAN_discriminator
,
self
).
__init__
()
self
.
conditional_dim
=
conditional_dim
self
.
ngpu
=
ngpu
# input dimension
ndf
=
64
self
.
main
=
nn
.
Sequential
(
# input is (nc) x 64 x 64
nn
.
Conv2d
((
channels
+
conditional_dim
),
ndf
,
4
,
2
,
1
,
bias
=
False
),
nn
.
LeakyReLU
(
0.2
,
inplace
=
True
),
# state size. (ndf) x 32 x 32
nn
.
Conv2d
(
ndf
,
ndf
*
2
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ndf
*
2
),
nn
.
LeakyReLU
(
0.2
,
inplace
=
True
),
# state size. (ndf*2) x 16 x 16
nn
.
Conv2d
(
ndf
*
2
,
ndf
*
4
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ndf
*
4
),
nn
.
LeakyReLU
(
0.2
,
inplace
=
True
),
# state size. (ndf*4) x 8 x 8
nn
.
Conv2d
(
ndf
*
4
,
ndf
*
8
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ndf
*
8
),
nn
.
LeakyReLU
(
0.2
,
inplace
=
True
),
# state size. (ndf*8) x 4 x 4
nn
.
Conv2d
(
ndf
*
8
,
1
,
4
,
1
,
0
,
bias
=
False
),
nn
.
Sigmoid
(),
)
def
forward
(
self
,
images
,
y
):
"""Forward function
Parameters
----------
...
...
@@ -161,12 +165,12 @@ class ConditionalGAN_discriminator(nn.Module):
:py:class:`torch.Tensor`
the output of the discriminator
"""
input_discriminator
=
torch
.
cat
((
images
,
y
),
1
)
#if isinstance(input_discriminator.data, torch.cuda.FloatTensor) and self.ngpu > 1:
# output = nn.parallel.data_parallel(self.main, input_discriminator, range(self.ngpu))
#else:
# output = self.main(input_discriminator)
# let's assume that we will never face the case where more than a GPU is used ...
output
=
self
.
main
(
input_discriminator
)
return
output
.
view
(
-
1
,
1
).
squeeze
(
1
)
input_discriminator
=
torch
.
cat
((
images
,
y
),
1
)
#
if isinstance(input_discriminator.data, torch.cuda.FloatTensor) and self.ngpu > 1:
# output = nn.parallel.data_parallel(self.main, input_discriminator, range(self.ngpu))
#
else:
# output = self.main(input_discriminator)
# let's assume that we will never face the case where more than a GPU is used ...
output
=
self
.
main
(
input_discriminator
)
return
output
.
view
(
-
1
,
1
).
squeeze
(
1
)
bob/learn/pytorch/architectures/ConvAutoencoder.py
View file @
e4a7d224
...
...
@@ -3,8 +3,9 @@
from
torch
import
nn
class
ConvAutoencoder
(
nn
.
Module
):
"""
"""
A class defining a simple convolutional autoencoder.
Attributes
...
...
@@ -13,8 +14,9 @@ class ConvAutoencoder(nn.Module):
returns the encoder output if true, the reconstructed image otherwise.
"""
def
__init__
(
self
,
return_latent_embedding
=
False
):
"""
def
__init__
(
self
,
return_latent_embedding
=
False
):
"""
Init function
Parameters
...
...
@@ -22,36 +24,40 @@ class ConvAutoencoder(nn.Module):
return_latent_embedding : bool
returns the encoder output if true, the reconstructed image otherwise.
"""
super
(
ConvAutoencoder
,
self
).
__init__
()
self
.
return_latent_embedding
=
return_latent_embedding
self
.
encoder
=
nn
.
Sequential
(
nn
.
Conv2d
(
3
,
16
,
5
,
padding
=
2
),
nn
.
ReLU
(
True
),
nn
.
MaxPool2d
(
2
),
nn
.
Conv2d
(
16
,
16
,
5
,
padding
=
2
),
nn
.
ReLU
(
True
),
nn
.
MaxPool2d
(
2
),
nn
.
Conv2d
(
16
,
16
,
3
,
padding
=
2
),
nn
.
ReLU
(
True
),
nn
.
MaxPool2d
(
2
),
nn
.
Conv2d
(
16
,
16
,
3
,
padding
=
2
),
nn
.
ReLU
(
True
),
nn
.
MaxPool2d
(
2
))
super
(
ConvAutoencoder
,
self
).
__init__
()
self
.
return_latent_embedding
=
return_latent_embedding
self
.
encoder
=
nn
.
Sequential
(
nn
.
Conv2d
(
3
,
16
,
5
,
padding
=
2
),
nn
.
ReLU
(
True
),
nn
.
MaxPool2d
(
2
),
nn
.
Conv2d
(
16
,
16
,
5
,
padding
=
2
),
nn
.
ReLU
(
True
),
nn
.
MaxPool2d
(
2
),
nn
.
Conv2d
(
16
,
16
,
3
,
padding
=
2
),
nn
.
ReLU
(
True
),
nn
.
MaxPool2d
(
2
),
nn
.
Conv2d
(
16
,
16
,
3
,
padding
=
2
),
nn
.
ReLU
(
True
),
nn
.
MaxPool2d
(
2
),
)
self
.
decoder
=
nn
.
Sequential
(
nn
.
ConvTranspose2d
(
16
,
16
,
3
,
stride
=
2
,
padding
=
1
),
nn
.
ReLU
(
True
),
nn
.
ConvTranspose2d
(
16
,
16
,
3
,
stride
=
2
,
padding
=
1
),
nn
.
ReLU
(
True
),
nn
.
ConvTranspose2d
(
16
,
16
,
5
,
stride
=
2
,
padding
=
2
),
nn
.
ReLU
(
True
),
nn
.
ConvTranspose2d
(
16
,
3
,
5
,
stride
=
2
,
padding
=
2
),
nn
.
ReLU
(
True
),
nn
.
ConvTranspose2d
(
3
,
3
,
2
,
stride
=
1
,
padding
=
1
),
nn
.
Tanh
())
self
.
decoder
=
nn
.
Sequential
(
nn
.
ConvTranspose2d
(
16
,
16
,
3
,
stride
=
2
,
padding
=
1
),
nn
.
ReLU
(
True
),
nn
.
ConvTranspose2d
(
16
,
16
,
3
,
stride
=
2
,
padding
=
1
),
nn
.
ReLU
(
True
),
nn
.
ConvTranspose2d
(
16
,
16
,
5
,
stride
=
2
,
padding
=
2
),
nn
.
ReLU
(
True
),
nn
.
ConvTranspose2d
(
16
,
3
,
5
,
stride
=
2
,
padding
=
2
),
nn
.
ReLU
(
True
),
nn
.
ConvTranspose2d
(
3
,
3
,
2
,
stride
=
1
,
padding
=
1
),
nn
.
Tanh
(),
)
def
forward
(
self
,
x
):
""" Propagate data through the network
def
forward
(
self
,
x
):
""" Propagate data through the network
Parameters
----------
...
...
@@ -64,8 +70,8 @@ class ConvAutoencoder(nn.Module):
either the encoder output or the reconstructed image
"""
x
=
self
.
encoder
(
x
)
if
self
.
return_latent_embedding
:
return
x
x
=
self
.
decoder
(
x
)
return
x
x
=
self
.
encoder
(
x
)
if
self
.
return_latent_embedding
:
return
x
x
=
self
.
decoder
(
x
)
return
x
bob/learn/pytorch/architectures/DCGAN.py
View file @
e4a7d224
...
...
@@ -6,7 +6,7 @@ import torch.nn as nn
class
DCGAN_generator
(
nn
.
Module
):
""" Class implementating the generator part of the Deeply Convolutional GAN
""" Class implementating the generator part of the Deeply Convolutional GAN
This network is introduced in the following publication:
Alec Radford, Luke Metz, Soumith Chintala: "Unsupervised Representation
...
...
@@ -21,8 +21,9 @@ class DCGAN_generator(nn.Module):
The number of available GPU devices
"""
def
__init__
(
self
,
ngpu
):
"""Init function
def
__init__
(
self
,
ngpu
):
"""Init function
Parameters
----------
...
...
@@ -30,39 +31,39 @@ class DCGAN_generator(nn.Module):
The number of available GPU devices
"""
super
(
DCGAN_generator
,
self
).
__init__
()
self
.
ngpu
=
ngpu
# just to test - will soon be args
nz
=
100
# noise dimension
ngf
=
64
# number of features map on the first layer
nc
=
3
# number of channels
self
.
main
=
nn
.
Sequential
(
# input is Z, going into a convolution
nn
.
ConvTranspose2d
(
nz
,
ngf
*
8
,
4
,
1
,
0
,
bias
=
False
),
nn
.
BatchNorm2d
(
ngf
*
8
),
nn
.
ReLU
(
True
),
# state size. (ngf*8) x 4 x 4
nn
.
ConvTranspose2d
(
ngf
*
8
,
ngf
*
4
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ngf
*
4
),
nn
.
ReLU
(
True
),
# state size. (ngf*4) x 8 x 8
nn
.
ConvTranspose2d
(
ngf
*
4
,
ngf
*
2
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ngf
*
2
),
nn
.
ReLU
(
True
),
# state size. (ngf*2) x 16 x 16
nn
.
ConvTranspose2d
(
ngf
*
2
,
ngf
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ngf
),
nn
.
ReLU
(
True
),
# state size. (ngf) x 32 x 32
nn
.
ConvTranspose2d
(
ngf
,
nc
,
4
,
2
,
1
,
bias
=
False
),
nn
.
Tanh
()
# state size. (nc) x 64 x 64
)
def
forward
(
self
,
input
):
"""Forward function
super
(
DCGAN_generator
,
self
).
__init__
()
self
.
ngpu
=
ngpu
# just to test - will soon be args
nz
=
100
# noise dimension
ngf
=
64
# number of features map on the first layer
nc
=
3
# number of channels
self
.
main
=
nn
.
Sequential
(
# input is Z, going into a convolution
nn
.
ConvTranspose2d
(
nz
,
ngf
*
8
,
4
,
1
,
0
,
bias
=
False
),
nn
.
BatchNorm2d
(
ngf
*
8
),
nn
.
ReLU
(
True
),
# state size. (ngf*8) x 4 x 4
nn
.
ConvTranspose2d
(
ngf
*
8
,
ngf
*
4
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ngf
*
4
),
nn
.
ReLU
(
True
),
# state size. (ngf*4) x 8 x 8
nn
.
ConvTranspose2d
(
ngf
*
4
,
ngf
*
2
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ngf
*
2
),
nn
.
ReLU
(
True
),
# state size. (ngf*2) x 16 x 16
nn
.
ConvTranspose2d
(
ngf
*
2
,
ngf
,
4
,
2
,
1
,
bias
=
False
),
nn
.
BatchNorm2d
(
ngf
),
nn
.
ReLU
(
True
),
# state size. (ngf) x 32 x 32
nn
.
ConvTranspose2d
(
ngf
,
nc
,
4
,
2
,
1
,
bias
=
False
),
nn
.
Tanh
()
# state size. (nc) x 64 x 64
)
def
forward
(
self
,
input
):
"""Forward function