Set the documentation

parent ba9a7fe1
......@@ -4,4 +4,5 @@ __path__ = extend_path(__path__, __name__)
from DataShuffler import *
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
......@@ -54,9 +54,9 @@ class Conv2D(Layer):
if self.W is None:
self.W = create_weight_variables([self.kernel_size, self.kernel_size, n_channels, self.filters],
seed=self.seed, name=str(self.name), use_gpu=self.use_gpu)
seed=self.seed, name="w_" + str(self.name), use_gpu=self.use_gpu)
if self.activation is not None:
self.b = create_bias_variables([self.filters], name=str(self.name) + "bias", use_gpu=self.use_gpu)
self.b = create_bias_variables([self.filters], name="b_" + str(self.name) + "bias", use_gpu=self.use_gpu)
def get_graph(self):
......@@ -64,7 +64,7 @@ class Conv2D(Layer):
conv2d = tf.nn.conv2d(self.input_layer, self.W, strides=[1, 1, 1, 1], padding='SAME')
if self.activation is not None:
non_linear_conv2d = tf.nn.tanh(tf.nn.bias_add(conv2d, self.b))
non_linear_conv2d = self.activation(tf.nn.bias_add(conv2d, self.b))
output = non_linear_conv2d
else:
output = conv2d
......
......@@ -43,9 +43,9 @@ class FullyConnected(Layer):
input_dim = reduce(mul, self.input_layer.get_shape().as_list())
self.W = create_weight_variables([input_dim, self.output_dim],
seed=self.seed, name=str(self.name), use_gpu=self.use_gpu)
seed=self.seed, name="W_" + str(self.name), use_gpu=self.use_gpu)
#if self.activation is not None:
self.b = create_bias_variables([self.output_dim], name=str(self.name)+"_bias", use_gpu=self.use_gpu)
self.b = create_bias_variables([self.output_dim], name="b_" + str(self.name), use_gpu=self.use_gpu)
def get_graph(self):
......@@ -58,7 +58,7 @@ class FullyConnected(Layer):
fc = self.input_layer
if self.activation is not None:
non_linear_fc = tf.nn.tanh(tf.matmul(fc, self.W) + self.b)
non_linear_fc = self.activation(tf.matmul(fc, self.W) + self.b)
output = non_linear_fc
else:
output = tf.matmul(fc, self.W) + self.b
......
......@@ -17,8 +17,11 @@ class Layer(object):
Base constructor
**Parameters**
input: Layer input
name: Name of the layer
activation: Tensorflow activation operation (https://www.tensorflow.org/versions/r0.10/api_docs/python/nn.html)
initialization: STILL TO BE DONE.
use_gpu: I think this is not necessary to explain
seed: Initialization seed set in Tensor flow
"""
self.name = name
self.initialization = initialization
......
......@@ -9,4 +9,6 @@ from .FullyConnected import FullyConnected
from .MaxPooling import MaxPooling
from .InputLayer import InputLayer
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
......@@ -4,6 +4,8 @@ __path__ = extend_path(__path__, __name__)
from .BaseLoss import BaseLoss
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
......
......@@ -43,9 +43,9 @@ class Lenet(SequenceNetwork):
"""
super(Lenet, self).__init__()
self.add(Conv2D(name="conv1", kernel_size=conv1_kernel_size, filters=conv1_output, activation=True))
self.add(Conv2D(name="conv1", kernel_size=conv1_kernel_size, filters=conv1_output, activation=tf.nn.tanh))
self.add(MaxPooling(name="pooling1"))
self.add(Conv2D(name="conv2", kernel_size=conv2_kernel_size, filters=conv2_output, activation=True))
self.add(Conv2D(name="conv2", kernel_size=conv2_kernel_size, filters=conv2_output, activation=tf.nn.tanh))
self.add(MaxPooling(name="pooling2"))
self.add(FullyConnected(name="fc1", output_dim=fc1_output, activation=True))
self.add(FullyConnected(name="fc1", output_dim=fc1_output, activation=tf.nn.tanh))
self.add(FullyConnected(name="fc2", output_dim=n_classes, activation=None))
......@@ -5,3 +5,5 @@ __path__ = extend_path(__path__, __name__)
from .SequenceNetwork import SequenceNetwork
from .Lenet import Lenet
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
......@@ -23,9 +23,9 @@ class Trainer(object):
###### training options ##########
convergence_threshold = 0.01,
iterations=5000,
base_lr=0.00001,
base_lr=0.001,
momentum=0.9,
weight_decay=0.0005,
weight_decay=0.95,
# The learning rate policy
snapshot=100):
......@@ -59,18 +59,13 @@ class Trainer(object):
raise ValueError("The variable `architecture` must be an instance of "
"`bob.learn.tensorflow.network.SequenceNetwork`")
#input_layer = InputLayer(name="input", input_data=train_placeholder_data)
#import ipdb;
#ipdb.set_trace();
train_graph = self.architecture.compute_graph(train_placeholder_data)
validation_graph = self.architecture.compute_graph(validation_placeholder_data)
loss_train = tf.reduce_mean(self.loss(train_graph, train_placeholder_labels))
loss_validation = tf.reduce_mean(self.loss(validation_graph, validation_placeholder_labels))
batch = tf.Variable(0)
learning_rate = tf.train.exponential_decay(
self.base_lr, # Learning rate
......@@ -81,7 +76,6 @@ class Trainer(object):
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_train,
global_step=batch)
train_prediction = tf.nn.softmax(train_graph)
validation_prediction = tf.nn.softmax(validation_graph)
......@@ -100,7 +94,7 @@ class Trainer(object):
feed_dict = {train_placeholder_data: train_data,
train_placeholder_labels: train_labels}
_, l, lr, _ = session.run([optimizer, loss_train,
_, l, lr, __ = session.run([optimizer, loss_train,
learning_rate, train_prediction], feed_dict=feed_dict)
if step % self.snapshot == 0:
......@@ -108,16 +102,9 @@ class Trainer(object):
feed_dict = {validation_placeholder_data: validation_data,
validation_placeholder_labels: validation_labels}
#import ipdb;
#ipdb.set_trace();
l, predictions = session.run([loss_validation, validation_prediction], feed_dict=feed_dict)
accuracy = 100. * numpy.sum(numpy.argmax(predictions, 1) == validation_labels) / predictions.shape[0]
print "Step {0}. Loss = {1}, Acc Validation={2}".format(step, l, accuracy)
#accuracy = util.evaluate_softmax(validation_data, validation_labels, session, validation_prediction,
# validation_data_node)
#print("Step {0}. Loss = {1}, Lr={2}, Accuracy validation = {3}".format(step, l, lr, accuracy))
#sys.stdout.flush()
train_writer.close()
......@@ -19,4 +19,7 @@ def evaluate_softmax(data, labels, session, graph, data_node):
return 100. * numpy.sum(predictions == labels) / predictions.shape[0]
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment