Skip to content
Snippets Groups Projects
Commit 5f92d884 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Implemented the LightCNN9

parent 8235f252
No related branches found
No related tags found
No related merge requests found
Pipeline #
from .Layer import Layer
from .Conv1D import Conv1D
from .Maxout import maxout
# gets sphinx autodoc done right - don't remove it
......
......@@ -4,7 +4,7 @@
# @date: Wed 11 May 2016 09:39:36 CEST
import tensorflow as tf
from bob.learn.tensorflow.layers import maxout
class LightCNN9(object):
"""Creates the graph for the Light CNN-9 in
......@@ -28,16 +28,16 @@ class LightCNN9(object):
with tf.device(self.device):
initializer = tf.contrib.layers.xavier_initializer(uniform=False, dtype=tf.float32, seed=self.seed)
graph = slim.conv2d(inputs, 96, [5, 5], activation_fn=tf.nn.relu,
stride=1,
weights_initializer=initializer,
scope='Conv1',
reuse=reuse)
graph = slim.maxout(graph,
num_units=48,
scope='Maxout1')
graph = maxout(graph,
num_units=48,
name='Maxout1')
graph = slim.max_pool2d(graph, [2, 2], stride=2, padding="SAME", scope='Pool1')
......@@ -49,9 +49,9 @@ class LightCNN9(object):
scope='Conv2a',
reuse=reuse)
graph = slim.maxout(graph,
num_units=48,
scope='Maxout2a')
graph = maxout(graph,
num_units=48,
name='Maxout2a')
graph = slim.conv2d(graph, 192, [3, 3], activation_fn=tf.nn.relu,
stride=1,
......@@ -59,9 +59,9 @@ class LightCNN9(object):
scope='Conv2',
reuse=reuse)
graph = slim.maxout(graph,
num_units=96,
scope='Maxout2')
graph = maxout(graph,
num_units=96,
name='Maxout2')
graph = slim.max_pool2d(graph, [2, 2], stride=2, padding="SAME", scope='Pool2')
......@@ -73,9 +73,9 @@ class LightCNN9(object):
scope='Conv3a',
reuse=reuse)
graph = slim.maxout(graph,
num_units=96,
scope='Maxout3a')
graph = maxout(graph,
num_units=96,
name='Maxout3a')
graph = slim.conv2d(graph, 384, [3, 3], activation_fn=tf.nn.relu,
stride=1,
......@@ -83,9 +83,9 @@ class LightCNN9(object):
scope='Conv3',
reuse=reuse)
graph = slim.maxout(graph,
num_units=192,
scope='Maxout3')
graph = maxout(graph,
num_units=192,
name='Maxout3')
graph = slim.max_pool2d(graph, [2, 2], stride=2, padding="SAME", scope='Pool3')
......@@ -97,9 +97,9 @@ class LightCNN9(object):
scope='Conv4a',
reuse=reuse)
graph = slim.maxout(graph,
num_units=192,
scope='Maxout4a')
graph = maxout(graph,
num_units=192,
name='Maxout4a')
graph = slim.conv2d(graph, 256, [3, 3], activation_fn=tf.nn.relu,
stride=1,
......@@ -107,9 +107,9 @@ class LightCNN9(object):
scope='Conv4',
reuse=reuse)
graph = slim.maxout(graph,
num_units=128,
scope='Maxout4')
graph = maxout(graph,
num_units=128,
name='Maxout4')
#####
......@@ -119,9 +119,9 @@ class LightCNN9(object):
scope='Conv5a',
reuse=reuse)
graph = slim.maxout(graph,
num_units=128,
scope='Maxout5a')
graph = maxout(graph,
num_units=128,
name='Maxout5a')
graph = slim.conv2d(graph, 256, [3, 3], activation_fn=tf.nn.relu,
stride=1,
......@@ -129,9 +129,9 @@ class LightCNN9(object):
scope='Conv5',
reuse=reuse)
graph = slim.maxout(graph,
num_units=128,
scope='Maxout5')
graph = maxout(graph,
num_units=128,
name='Maxout5')
graph = slim.max_pool2d(graph, [2, 2], stride=2, padding="SAME", scope='Pool4')
......@@ -142,9 +142,9 @@ class LightCNN9(object):
activation_fn=tf.nn.relu,
scope='fc1',
reuse=reuse)
graph = slim.maxout(graph,
num_units=256,
scope='Maxoutfc1')
graph = maxout(graph,
num_units=256,
scope='Maxoutfc1')
graph = slim.fully_connected(graph, self.n_classes,
weights_initializer=initializer,
......
from .Chopra import Chopra
from .LightCNN9 import LightCNN9
from .Dummy import Dummy
from .MLP import MLP
from .Embedding import Embedding
......@@ -21,6 +22,7 @@ __appropriate__(
Chopra,
Dummy,
MLP,
LightCNN9,
)
__all__ = [_ for _ in dir() if not _.startswith('_')]
......@@ -4,8 +4,8 @@
# @date: Thu 13 Oct 2016 13:35 CEST
import numpy
from bob.learn.tensorflow.datashuffler import Memory, SiameseMemory, TripletMemory, ImageAugmentation, ScaleFactor
from bob.learn.tensorflow.network import Chopra
from bob.learn.tensorflow.datashuffler import Memory, SiameseMemory, TripletMemory, ImageAugmentation, ScaleFactor, Linear
from bob.learn.tensorflow.network import Chopra, LightCNN9
from bob.learn.tensorflow.loss import BaseLoss, ContrastiveLoss, TripletLoss
from bob.learn.tensorflow.trainers import Trainer, SiameseTrainer, TripletTrainer, constant
from .test_cnn_scratch import validate_network
......@@ -26,7 +26,7 @@ batch_size = 32
validation_batch_size = 400
iterations = 300
seed = 10
numpy.random.seed(seed)
def dummy_experiment(data_s, embedding):
"""
......@@ -123,6 +123,64 @@ def test_cnn_trainer():
del trainer
del graph
"""
def test_lightcnn_trainer():
# generating fake data
train_data = numpy.random.normal(0, 0.2, size=(100, 128, 128, 1))
train_data = numpy.vstack((train_data, numpy.random.normal(2, 0.2, size=(100, 128, 128, 1))))
train_labels = numpy.hstack((numpy.zeros(100), numpy.ones(100))).astype("uint64")
validation_data = numpy.random.normal(0, 0.2, size=(100, 128, 128, 1))
validation_data = numpy.vstack((validation_data, numpy.random.normal(2, 0.2, size=(100, 128, 128, 1))))
validation_labels = numpy.hstack((numpy.zeros(100), numpy.ones(100))).astype("uint64")
# Creating datashufflers
data_augmentation = ImageAugmentation()
train_data_shuffler = Memory(train_data, train_labels,
input_shape=[None, 128, 128, 1],
batch_size=batch_size,
data_augmentation=data_augmentation,
normalizer=Linear())
directory = "./temp/cnn"
# Loss for the softmax
loss = BaseLoss(tf.nn.sparse_softmax_cross_entropy_with_logits, tf.reduce_mean)
# Preparing the architecture
architecture = LightCNN9(seed=seed,
n_classes=2)
input_pl = train_data_shuffler("data", from_queue=True)
graph = architecture(input_pl)
embedding = Embedding(train_data_shuffler("data", from_queue=False), graph)
# One graph trainer
trainer = Trainer(train_data_shuffler,
iterations=50,
analizer=None,
temp_dir=directory
)
trainer.create_network_from_scratch(graph=graph,
loss=loss,
learning_rate=constant(0.01, name="regular_lr"),
optimizer=tf.train.GradientDescentOptimizer(0.01),
)
trainer.train()
#trainer.train(validation_data_shuffler)
# Using embedding to compute the accuracy
import ipdb; ipdb.set_trace();
accuracy = validate_network(embedding, validation_data, validation_labels, input_shape=[None, 128, 128, 1], normalizer=Linear())
# At least 80% of accuracy
assert accuracy > 80.
shutil.rmtree(directory)
del trainer
del graph
"""
def test_siamesecnn_trainer():
train_data, train_labels, validation_data, validation_labels = load_mnist()
......
......@@ -4,7 +4,7 @@
# @date: Thu 13 Oct 2016 13:35 CEST
import numpy
from bob.learn.tensorflow.datashuffler import Memory, ImageAugmentation, ScaleFactor
from bob.learn.tensorflow.datashuffler import Memory, ImageAugmentation, ScaleFactor, Linear
from bob.learn.tensorflow.network import Embedding
from bob.learn.tensorflow.loss import BaseLoss
from bob.learn.tensorflow.trainers import Trainer, constant
......@@ -41,12 +41,12 @@ def scratch_network(train_data_shuffler, reuse=False):
return graph
def validate_network(embedding, validation_data, validation_labels):
def validate_network(embedding, validation_data, validation_labels, input_shape=[None, 28, 28, 1], normalizer=ScaleFactor()):
# Testing
validation_data_shuffler = Memory(validation_data, validation_labels,
input_shape=[None, 28, 28, 1],
input_shape=input_shape,
batch_size=validation_batch_size,
normalizer=ScaleFactor())
normalizer=normalizer)
[data, labels] = validation_data_shuffler.get_batch()
predictions = embedding(data)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment