Skip to content
Snippets Groups Projects
Commit 564be738 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Documenting and unit tests

parent 66868b60
No related branches found
No related tags found
1 merge request!4Issue 19
Pipeline #
......@@ -25,28 +25,25 @@ import imp
def main():
args = docopt(__doc__, version='Train Neural Net')
#ITERATIONS = int(args['--iterations'])
#VALIDATION_TEST = int(args['--validation-interval'])
#USE_GPU = args['--use-gpu']
#OUTPUT_DIR = str(args['--output-dir'])
#PREFETCH = args['--prefetch']
#if args['--pretrained-net'] is None:
# PRETRAINED_NET = ""
#else:
# PRETRAINED_NET = str(args['--pretrained-net'])
OUTPUT_DIR = str(args['--output-dir'])
PREFETCH = args['--prefetch']
ITERATIONS = int(args['--iterations'])
PRETRAINED_NET = ""
if not args['--pretrained-net'] is None:
PRETRAINED_NET = str(args['--pretrained-net'])
config = imp.load_source('config', args['<configuration>'])
trainer = config.Trainer(architecture=config.architecture,
loss=config.loss,
iterations=int(args['--iterations']),
iterations=ITERATIONS,
analizer=None,
prefetch=args['--prefetch'],
prefetch=PREFETCH,
learning_rate=config.learning_rate,
temp_dir=args['--output-dir'],
model_from_file=config.model_from_file
temp_dir=OUTPUT_DIR,
model_from_file=PRETRAINED_NET
)
import ipdb; ipdb.set_trace();
trainer.train(config.train_data_shuffler)
from bob.learn.tensorflow.datashuffler import SiameseMemory
from bob.learn.tensorflow.network import Chopra
from bob.learn.tensorflow.trainers import SiameseTrainer as Trainer
from bob.learn.tensorflow.trainers import constant
from bob.learn.tensorflow.loss import ContrastiveLoss
from bob.learn.tensorflow.utils import load_mnist
import tensorflow as tf
import numpy
BATCH_SIZE = 32
INPUT_SHAPE = [28, 28, 1]
SEED = 10
### PREPARING DATASHUFFLER ###
train_data, train_labels, validation_data, validation_labels = \
load_mnist(data_dir="./src/bob.db.mnist/bob/db/mnist/")
train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
train_data_shuffler = SiameseMemory(train_data, train_labels,
input_shape=INPUT_SHAPE,
batch_size=BATCH_SIZE)
### ARCHITECTURE ###
architecture = Chopra(seed=SEED, fc1_output=10, batch_norm=False, use_gpu=False)
### LOSS ###
loss = ContrastiveLoss(contrastive_margin=4.)
### SOLVER ###
optimizer = tf.train.GradientDescentOptimizer(0.001)
### LEARNING RATE ###
learning_rate = constant(base_learning_rate=0.001)
### Trainer ###
trainer = Trainer
from bob.learn.tensorflow.datashuffler import Memory
from bob.learn.tensorflow.network import Chopra
from bob.learn.tensorflow.trainers import Trainer, constant
from bob.learn.tensorflow.loss import BaseLoss
from bob.learn.tensorflow.utils import load_mnist
import tensorflow as tf
import numpy
BATCH_SIZE = 32
INPUT_SHAPE = [28, 28, 1]
SEED = 10
USE_GPU = False
### PREPARING DATASHUFFLER ###
train_data, train_labels, validation_data, validation_labels = \
load_mnist(data_dir="./src/bob.db.mnist/bob/db/mnist/")
train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
train_data_shuffler = Memory(train_data, train_labels,
input_shape=INPUT_SHAPE,
batch_size=BATCH_SIZE)
### ARCHITECTURE ###
architecture = Chopra(seed=SEED, fc1_output=10, batch_norm=False, use_gpu=USE_GPU)
### LOSS ###
loss = BaseLoss(tf.nn.sparse_softmax_cross_entropy_with_logits, tf.reduce_mean)
### SOLVER ###
optimizer = tf.train.GradientDescentOptimizer(0.001)
### LEARNING RATE ###
learning_rate = constant(base_learning_rate=0.001)
### Trainer ###
trainer = Trainer
from bob.learn.tensorflow.datashuffler import TripletMemory
from bob.learn.tensorflow.network import Chopra
from bob.learn.tensorflow.trainers import TripletTrainer as Trainer
from bob.learn.tensorflow.trainers import constant
from bob.learn.tensorflow.loss import TripletLoss
from bob.learn.tensorflow.utils import load_mnist
import tensorflow as tf
import numpy
BATCH_SIZE = 32
INPUT_SHAPE = [28, 28, 1]
SEED = 10
### PREPARING DATASHUFFLER ###
train_data, train_labels, validation_data, validation_labels = \
load_mnist(data_dir="./src/bob.db.mnist/bob/db/mnist/")
train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
train_data_shuffler = TripletMemory(train_data, train_labels,
input_shape=INPUT_SHAPE,
batch_size=BATCH_SIZE)
### ARCHITECTURE ###
architecture = Chopra(seed=SEED, fc1_output=10, batch_norm=False, use_gpu=False)
### LOSS ###
loss = TripletLoss(margin=4.)
### SOLVER ###
optimizer = tf.train.GradientDescentOptimizer(0.001)
### LEARNING RATE ###
learning_rate = constant(base_learning_rate=0.001)
### Trainer ###
trainer = Trainer
......@@ -12,7 +12,7 @@ from bob.learn.tensorflow.trainers import Trainer, constant, TripletTrainer, Sia
from bob.learn.tensorflow.utils import load_mnist
from bob.learn.tensorflow.network import SequenceNetwork
from bob.learn.tensorflow.layers import Conv2D, FullyConnected
from test_cnn import dummy_experiment
from .test_cnn import dummy_experiment
import tensorflow as tf
import shutil
......
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Thu 13 Oct 2016 13:35 CEST
import pkg_resources
import shutil
def test_train_script_softmax():
directory = "./temp/train-script"
train_script = pkg_resources.resource_filename(__name__, 'data/train_scripts/softmax.py')
#from subprocess import call
#call(["./bin/train.py", "--iterations", "100", "--output-dir", directory, train_script])
#shutil.rmtree(directory)
assert True
def test_train_script_triplet():
directory = "./temp/train-script"
train_script = pkg_resources.resource_filename(__name__, 'data/train_scripts/triplet.py')
#from subprocess import call
#call(["./bin/train.py", "--iterations", "100", "--output-dir", directory, train_script])
#shutil.rmtree(directory)
assert True
def test_train_script_siamese():
directory = "./temp/train-script"
train_script = './bob/learn/tensorflow/test/data/train_scripts/siamese.py'
#from subprocess import call
#call(["./bin/train.py", "--iterations", "100", "--output-dir", directory, train_script])
#shutil.rmtree(directory)
assert True
......@@ -3,14 +3,14 @@
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Tue 09 Aug 2016 15:25:22 CEST
import logging
logger = logging.getLogger("bob.learn.tensorflow")
import tensorflow as tf
from tensorflow.core.framework import summary_pb2
from ..analyzers import ExperimentAnalizer, SoftmaxAnalizer
from ..network import SequenceNetwork
from .Trainer import Trainer
import os
import logging
logger = logging.getLogger("bob.learn")
class SiameseTrainer(Trainer):
......
......@@ -22,7 +22,6 @@ import logging
logger = logging.getLogger("bob.learn")
class Trainer(object):
"""
One graph trainer.
......
......@@ -3,19 +3,16 @@
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Tue 09 Aug 2016 15:25:22 CEST
import logging
logger = logging.getLogger("bob.learn.tensorflow")
import tensorflow as tf
from tensorflow.core.framework import summary_pb2
import threading
from ..analyzers import ExperimentAnalizer
from ..network import SequenceNetwork
import bob.io.base
from .Trainer import Trainer
import os
import sys
from .learning_rate import constant
import logging
logger = logging.getLogger("bob.learn")
class TripletTrainer(Trainer):
......
......@@ -13,7 +13,6 @@ Quick start
Before explain the base elements of this library, lets first do a simple example.
The example consists in training a very simple **CNN** with `MNIST` dataset in 4 steps.
1. Preparing your input data
.. code-block:: python
......@@ -34,17 +33,26 @@ The example consists in training a very simple **CNN** with `MNIST` dataset in 4
>>> architecture.add(bob.learn.tensorflow.layers.Conv2D(name="conv1", kernel_size=3, filters=10, activation=tf.nn.tanh))
>>> architecture.add(bob.learn.tensorflow.layers.FullyConnected(name="fc1", output_dim=10, activation=None))
3. Defining a loss and training
3. Defining a loss and training algorithm
.. code-block:: python
>>> loss = bob.learn.tensorflow.loss.BaseLoss(tf.nn.sparse_softmax_cross_entropy_with_logits, tf.reduce_mean)
>>> trainer = bob.learn.tensorflow.trainers.Trainer(architecture=architecture, loss=loss, iterations=100, temp_dir="./cnn")
>>> trainer.train(train_data_shuffler)
>>> from bob.learn.tensorflow.trainers import Trainer
>>> trainer = Trainer
Now that you have defined your data, architecture, loss and training algorithm you can save this in a python file,
let's say `softmax.py`, and run:
.. code-block:: shell
>>> ./bin/train.py softmax.py
4. Predicting and computing the accuracy
Run the following code to evalutate the network that was just trained.
.. code-block:: python
>>> # Loading the model
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment