Commit f9f2355d authored by Tiago Pereira's avatar Tiago Pereira

Cleaning up

[sphinx] Fixed reference
parent b0436a50
Pipeline #10772 passed with stages
in 13 minutes and 55 seconds
......@@ -89,6 +89,7 @@ class Base(object):
self.label_ph = None
# Prefetch variables
self.prefetch = prefetch
self.prefetch_capacity = prefetch_capacity
self.data_ph_from_queue = None
self.label_ph_from_queue = None
......
......@@ -6,6 +6,7 @@
import tensorflow as tf
from .Base import Base
class OnlineSampling(object):
"""
This data shuffler uses the current state of the network to select the samples.
......
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Mon 05 Sep 2016 16:35 CEST
import logging
logger = logging.getLogger("bob.learn.tensorflow")
from .Initialization import Initialization
import tensorflow as tf
class Constant(Initialization):
"""
Implements the constant initialization.
This is usually used to initialize biases.
This tip were extracted from here
http://www.deeplearningbook.org/contents/optimization.html
page: 302
"""
def __init__(self, constant_value=0.1, use_gpu=False, seed=None):
self.constant_value = constant_value
super(Constant, self).__init__(seed=None, use_gpu=use_gpu)
def __call__(self, shape, name, scope, init_value=None):
initializer = tf.constant(self.constant_value, shape=shape)
try:
with tf.variable_scope(scope):
if self.use_gpu:
with tf.device("/gpu:0"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
else:
with tf.device("/cpu"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
except ValueError:
with tf.variable_scope(scope, reuse=True):
if self.use_gpu:
with tf.device("/gpu:0"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
else:
with tf.device("/cpu"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Mon 05 Sep 2016 16:35 CEST
import logging
logger = logging.getLogger("bob.learn.tensorflow")
from .Initialization import Initialization
import tensorflow as tf
class Gaussian(Initialization):
"""
Implements Gaussian Initialization
** Parameters **
mean: Mean of the gaussian
std: Standard deviation
seed: Seed of the random number generator
use_gpu: Place the variables in the GPU?
"""
def __init__(self, mean=0.,
std=1.,
seed=10.,
use_gpu=False):
self.mean = mean
self.std = std
super(Gaussian, self).__init__(seed, use_gpu=use_gpu)
def __call__(self, shape, name, scope, init_value=None):
"""
Create the gaussian initialized variables
** Parameters **
shape: Shape of the variable
name: Name of the variable
scope: Tensorflow scope name
"""
if len(shape) == 4:
in_out = shape[0] * shape[1] * shape[2] + shape[3]
else:
in_out = shape[0] + shape[1]
initializer = tf.truncated_normal(shape,
mean=self.mean,
stddev=self.std,
seed=self.seed)
try:
with tf.variable_scope(scope):
if self.use_gpu:
with tf.device("/gpu:0"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
else:
with tf.device("/cpu"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
except ValueError:
with tf.variable_scope(scope, reuse=True):
if self.use_gpu:
with tf.device("/gpu:0"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
else:
with tf.device("/cpu"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Mon 05 Sep 2016 16:35 CEST
import logging
logger = logging.getLogger("bob.learn.tensorflow")
import tensorflow as tf
class Initialization(object):
"""
Base function for initialization.
"""
def __init__(self, seed=10., use_gpu=False):
"""
Default constructor
**Parameters**
shape: Shape of the input vector
seed: Seed for the pseudo random number generator
use_gpu: Variable stored in the GPU
"""
self.seed = seed
self.use_gpu = use_gpu
tf.set_random_seed(seed)
def variable_exist(self, var):
return var in [v.name.split("/")[0] for v in tf.global_variables()]
def __call__(self, shape, name, scope, init_value=None):
NotImplementedError("Please implement this function in derived classes")
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Mon 05 Sep 2016 16:35 CEST
import logging
logger = logging.getLogger("bob.learn.tensorflow")
from .Initialization import Initialization
import tensorflow as tf
class SimplerXavier(Initialization):
"""
Implements the A SIMPLER VERSION of the classic and well used Xavier initialization as in
Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of training deep feedforward neural networks." Aistats. Vol. 9. 2010.
Basically the initialization is Gaussian distribution with mean 0 and variance:
Var(W) = 1/sqrt(n_{in});
where n is the total number of parameters for input and output.
"""
def __init__(self, seed=10., use_gpu=False):
super(SimplerXavier, self).__init__(seed, use_gpu=use_gpu)
def __call__(self, shape, name, scope, init_value=None):
"""
Create the gaussian initialized variables
** Parameters **
shape: Shape of the variable
name: Name of the variable
scope: Tensorflow scope name
"""
if len(shape) == 4:
in_out = shape[0] * shape[1] * shape[2]
else:
in_out = shape[0]
import math
stddev = math.sqrt(1.0 / in_out) # XAVIER INITIALIZER (GAUSSIAN)
initializer = tf.truncated_normal(shape, stddev=stddev, seed=self.seed)
try:
with tf.variable_scope(scope):
if self.use_gpu:
with tf.device("/gpu:0"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
else:
with tf.device("/cpu"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
except ValueError:
with tf.variable_scope(scope, reuse=True):
if self.use_gpu:
with tf.device("/gpu:0"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
else:
with tf.device("/cpu"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Pavel Korshunov <pavel.korshunov@idiap.ch>
# @date: Wed 09 Nov 2016 13:55:22 CEST
import logging
logger = logging.getLogger("bob.learn.tensorflow")
from .Initialization import Initialization
import tensorflow as tf
class Uniform(Initialization):
"""
Implements Random Uniform initialization
"""
def __init__(self, seed=10., use_gpu=False):
super(Uniform, self).__init__(seed, use_gpu=use_gpu)
def __call__(self, shape, name, scope, init_value=None):
if init_value is None:
init_value = shape[0]
import math
# We use init_value as normalization value, but it can be used differently in different initializations
stddev = 1.0 / math.sqrt(init_value) # RANDOM UNIFORM INITIALIZATION
initializer = tf.random_uniform(shape,
minval=-stddev,
maxval=stddev,
seed=self.seed)
try:
with tf.variable_scope(scope):
if self.use_gpu:
with tf.device("/gpu:0"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
else:
with tf.device("/cpu"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
except ValueError:
with tf.variable_scope(scope, reuse=True):
if self.use_gpu:
with tf.device("/gpu:0"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
else:
with tf.device("/cpu"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Mon 05 Sep 2016 16:35 CEST
import logging
logger = logging.getLogger("bob.learn.tensorflow")
from .Initialization import Initialization
import tensorflow as tf
class Xavier(Initialization):
"""
Implements the classic and well used Xavier initialization as in
Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of training deep feedforward neural networks." Aistats. Vol. 9. 2010.
Basically the initialization is Gaussian distribution with mean 0 and variance:
Var(W) = 1/sqrt(n_{in} + n_{out});
where n is the total number of parameters for input and output.
"""
def __init__(self, seed=10., use_gpu=False):
super(Xavier, self).__init__(seed, use_gpu=use_gpu)
def __call__(self, shape, name, scope, init_value=None):
"""
Create the gaussian initialized variables
** Parameters **
shape: Shape of the variable
name: Name of the variable
scope: Tensorflow scope name
"""
if len(shape) == 4:
in_out = shape[0] * shape[1] * shape[2] + shape[3]
else:
in_out = shape[0] + shape[1]
import math
stddev = math.sqrt(3.0 / in_out) # XAVIER INITIALIZER (GAUSSIAN)
initializer = tf.truncated_normal(shape, stddev=stddev, seed=self.seed)
reuse = self.variable_exist(scope)
self.use_gpu = False
with tf.variable_scope(scope, reuse=reuse):
if self.use_gpu:
with tf.device("/gpu:0"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
else:
with tf.device("/cpu:0"):
#return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
"""
try:
with tf.variable_scope(scope):
if self.use_gpu:
with tf.device("/gpu:0"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
else:
with tf.device("/cpu"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
except ValueError:
with tf.variable_scope(scope, reuse=True):
if self.use_gpu:
with tf.device("/gpu:0"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
else:
with tf.device("/cpu"):
return tf.get_variable(name, initializer=initializer, dtype=tf.float32)
"""
\ No newline at end of file
from .Initialization import Initialization
from .Xavier import Xavier
from .SimplerXavier import SimplerXavier
from .Gaussian import Gaussian
from .Constant import Constant
from .Uniform import Uniform
# gets sphinx autodoc done right - don't remove it
def __appropriate__(*args):
"""Says object was actually declared here, an not on the import module.
Parameters:
*args: An iterable of objects to modify
Resolves `Sphinx referencing issues
<https://github.com/sphinx-doc/sphinx/issues/3048>`
"""
for obj in args: obj.__module__ = __name__
__appropriate__(
Initialization,
Xavier,
SimplerXavier,
Gaussian,
Constant,
Uniform,
)
__all__ = [_ for _ in dir() if not _.startswith('_')]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 17:38 CEST
import tensorflow as tf
from .MaxPooling import MaxPooling
class AveragePooling(MaxPooling):
"""
Wraps the tensorflow average pooling
**Parameters**
name: str
The name of the layer
shape:
Shape of the pooling kernel
stride:
Shape of the stride
batch_norm: bool
Do batch norm?
activation: bool
Tensor Flow activation
"""
def __init__(self, name, shape=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
batch_norm=False,
activation=None):
super(AveragePooling, self).__init__(name, activation=activation, batch_norm=batch_norm)
self.shape = shape
self.strides = strides
def get_graph(self, training_phase=True):
with tf.name_scope(str(self.name)):
output = tf.nn.avg_pool(self.input_layer, ksize=self.shape, strides=self.strides, padding='SAME')
if self.batch_norm:
output = self.batch_normalize(output, training_phase)
if self.activation is not None:
output = self.activation(output)
return output
......@@ -6,8 +6,8 @@
import tensorflow as tf
from .Layer import Layer
from bob.learn.tensorflow.initialization import Xavier
from bob.learn.tensorflow.initialization import Constant
#from bob.learn.tensorflow.initialization import Xavier
#from bob.learn.tensorflow.initialization import Constant
class Conv1D(Layer):
......@@ -49,9 +49,9 @@ class Conv1D(Layer):
kernel_size=300,
filters=20,
stride=100,
weights_initialization=Xavier(),
weights_initialization=tf.contrib.layers.xavier_initializer(uniform=False, dtype=tf.float32, seed=10),
init_value=None,
bias_initialization=Constant(),
bias_initialization=tf.contrib.layers.xavier_initializer(uniform=False, dtype=tf.float32, seed=10),
batch_norm=False,
use_gpu=False
):
......
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 17:38 CEST
import tensorflow as tf
from .Layer import Layer
from bob.learn.tensorflow.initialization import Xavier
from bob.learn.tensorflow.initialization import Constant
class Conv2D(Layer):
"""
2D Convolution
**Parameters**
name: str
The name of the layer
activation:
Tensor Flow activation
kernel_size: int
Size of the convolutional kernel
filters: int
Number of filters
stride:
Shape of the stride
weights_initialization: py:class:`bob.learn.tensorflow.initialization.Initialization`
Initialization type for the weights
bias_initialization: py:class:`bob.learn.tensorflow.initialization.Initialization`
Initialization type for the weights
batch_norm: bool
Do batch norm?
use_gpu: bool
Store data in the GPU
"""
def __init__(self, name, activation=None,
kernel_size=3,
filters=8,
stride=[1, 1, 1, 1],
weights_initialization=Xavier(),
bias_initialization=Constant(),
batch_norm=False,
use_gpu=False
):
super(Conv2D, self).__init__(name=name,
activation=activation,
weights_initialization=weights_initialization,
bias_initialization=bias_initialization,
batch_norm=batch_norm,
use_gpu=use_gpu,
)
self.kernel_size = kernel_size
self.filters = filters
self.W = None
self.b = None
self.stride = stride
def create_variables(self, input_layer):
self.input_layer = input_layer
# TODO: Do an assert here
if len(input_layer.get_shape().as_list()) != 4:
raise ValueError("The input as a convolutional layer must have 4 dimensions, "
"but {0} were provided".format(len(input_layer.get_shape().as_list())))
n_channels = input_layer.get_shape().as_list()[3]
if self.W is None:
variable = "w_" + str(self.name)
if self.get_varible_by_name(variable) is not None:
self.W = self.get_varible_by_name(variable)
else:
self.W = self.weights_initialization(shape=[self.kernel_size, self.kernel_size, n_channels, self.filters],
name=variable,
scope=variable
)
variable = "b_" + str(self.name) + "bias"
if self.get_varible_by_name(variable) is not None:
self.b = self.get_varible_by_name(variable)
else:
self.b = self.bias_initialization(shape=[self.filters],
name=variable,
scope="b_" + str(self.name))
def get_graph(self, training_phase=True):
with tf.name_scope(str(self.name)):
conv2d = tf.nn.conv2d(self.input_layer, self.W, strides=self.stride, padding='SAME')
if self.batch_norm:
conv2d = self.batch_normalize(conv2d, training_phase)
if self.activation is not None:
output = self.activation(tf.nn.bias_add(conv2d, self.b))
else:
output = tf.nn.bias_add(conv2d, self.b)
return output
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 17:38 CEST
import tensorflow as tf
from .Layer import Layer
from operator import mul
class Dropout(Layer):
"""
Dropout
**Parameters**
name: str
The name of the layer
keep_prob: float
With probability keep_prob, outputs the input element scaled up by 1 / keep_prob, otherwise outputs 0.
"""
def __init__(self, name,
keep_prob=0.99
):
super(Dropout, self).__init__(name=name)
self.keep_prob = keep_prob
def create_variables(self, input_layer):
self.input_layer = input_layer
return
def get_graph(self, training_phase=True):
with tf.name_scope(str(self.name)):
output = tf.nn.dropout(self.input_layer, self.keep_prob, name=self.name)
return output
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 17:38 CEST
import tensorflow as tf
from .Layer import Layer
from operator import mul
from bob.learn.tensorflow.initialization import Xavier
from bob.learn.tensorflow.initialization import Constant
import numpy
class FullyConnected(Layer):
"""
Fully Connected layer
**Parameters**
name: str
The name of the layer
output_dim: int
Size of the output
activation:
Tensor Flow activation
weights_initialization: py:class:`bob.learn.tensorflow.initialization.Initialization`
Initialization type for the weights
bias_initialization: py:class:`bob.learn.tensorflow.initialization.Initialization`
Initialization type for the weights
batch_norm: bool
Do batch norm?
use_gpu: bool
Store data in the GPU
"""
def __init__(self, name,
output_dim,
activation=None,