Commit 9950b499 authored by Tiago Pereira's avatar Tiago Pereira
Browse files

Fixed issues with the training by script

parent 055b4354
......@@ -20,7 +20,7 @@ Options:
from docopt import docopt
import imp
import bob.learn.tensorflow
def main():
args = docopt(__doc__, version='Train Neural Net')
......@@ -36,16 +36,28 @@ def main():
config = imp.load_source('config', args['<configuration>'])
trainer = config.Trainer(architecture=config.architecture,
loss=config.loss,
# One graph trainer
trainer = config.Trainer(config.train_data_shuffler,
iterations=ITERATIONS,
analizer=None,
prefetch=PREFETCH,
learning_rate=config.learning_rate,
temp_dir=OUTPUT_DIR,
snapshot=100,
model_from_file=PRETRAINED_NET,
use_gpu=USE_GPU
)
temp_dir=OUTPUT_DIR)
# Preparing the architecture
input_pl = config.train_data_shuffler("data", from_queue=False)
if isinstance(trainer, bob.learn.tensorflow.trainers.SiameseTrainer):
graph = dict()
graph['left'] = config.architecture(input_pl['left'])
graph['right'] = config.architecture(input_pl['right'])
elif isinstance(trainer, bob.learn.tensorflow.trainers.TripletTrainer):
graph = dict()
graph['anchor'] = config.architecture(input_pl['anchor'])
graph['positive'] = config.architecture(input_pl['positive'])
graph['negative'] = config.architecture(input_pl['negative'])
else:
graph = config.architecture(input_pl)
trainer.create_network_from_scratch(graph, loss=config.loss,
learning_rate=config.learning_rate,
optimizer=config.optimizer)
trainer.train(config.train_data_shuffler)
......@@ -8,12 +8,12 @@ import tensorflow as tf
import numpy
BATCH_SIZE = 32
INPUT_SHAPE = [28, 28, 1]
INPUT_SHAPE = [None, 28, 28, 1]
SEED = 10
### PREPARING DATASHUFFLER ###
train_data, train_labels, validation_data, validation_labels = \
load_mnist(data_dir="./src/bob.db.mnist/bob/db/mnist/")
load_mnist()
train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
train_data_shuffler = SiameseMemory(train_data, train_labels,
......@@ -21,7 +21,7 @@ train_data_shuffler = SiameseMemory(train_data, train_labels,
batch_size=BATCH_SIZE)
### ARCHITECTURE ###
architecture = Chopra(seed=SEED, fc1_output=10, batch_norm=False, use_gpu=False)
architecture = Chopra(seed=SEED, fc1_output=10, batch_norm=False)
### LOSS ###
loss = ContrastiveLoss(contrastive_margin=4.)
......
......@@ -7,14 +7,13 @@ import tensorflow as tf
import numpy
BATCH_SIZE = 32
INPUT_SHAPE = [28, 28, 1]
INPUT_SHAPE = [None, 28, 28, 1]
SEED = 10
USE_GPU = False
### PREPARING DATASHUFFLER ###
train_data, train_labels, validation_data, validation_labels = \
load_mnist(data_dir="./src/bob.db.mnist/bob/db/mnist/")
train_data, train_labels, validation_data, validation_labels = load_mnist()
train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
train_data_shuffler = Memory(train_data, train_labels,
......@@ -22,7 +21,7 @@ train_data_shuffler = Memory(train_data, train_labels,
batch_size=BATCH_SIZE)
### ARCHITECTURE ###
architecture = Chopra(seed=SEED, fc1_output=10, batch_norm=False, use_gpu=USE_GPU)
architecture = Chopra(seed=SEED, fc1_output=10, batch_norm=False)
### LOSS ###
loss = BaseLoss(tf.nn.sparse_softmax_cross_entropy_with_logits, tf.reduce_mean)
......
......@@ -8,12 +8,12 @@ import tensorflow as tf
import numpy
BATCH_SIZE = 32
INPUT_SHAPE = [28, 28, 1]
INPUT_SHAPE = [None, 28, 28, 1]
SEED = 10
### PREPARING DATASHUFFLER ###
train_data, train_labels, validation_data, validation_labels = \
load_mnist(data_dir="./src/bob.db.mnist/bob/db/mnist/")
load_mnist()
train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
train_data_shuffler = TripletMemory(train_data, train_labels,
......@@ -21,7 +21,7 @@ train_data_shuffler = TripletMemory(train_data, train_labels,
batch_size=BATCH_SIZE)
### ARCHITECTURE ###
architecture = Chopra(seed=SEED, fc1_output=10, batch_norm=False, use_gpu=False)
architecture = Chopra(seed=SEED, fc1_output=10, batch_norm=False)
### LOSS ###
loss = TripletLoss(margin=4.)
......
......@@ -11,10 +11,9 @@ def test_train_script_softmax():
directory = "./temp/train-script"
train_script = pkg_resources.resource_filename(__name__, 'data/train_scripts/softmax.py')
#from subprocess import call
#call(["./bin/train.py", "--iterations", "100", "--output-dir", directory, train_script])
#shutil.rmtree(directory)
from subprocess import call
call(["./bin/train.py", "--iterations", "5", "--output-dir", directory, train_script])
shutil.rmtree(directory)
assert True
......@@ -22,9 +21,9 @@ def test_train_script_triplet():
directory = "./temp/train-script"
train_script = pkg_resources.resource_filename(__name__, 'data/train_scripts/triplet.py')
#from subprocess import call
#call(["./bin/train.py", "--iterations", "100", "--output-dir", directory, train_script])
#shutil.rmtree(directory)
from subprocess import call
call(["./bin/train.py", "--iterations", "5", "--output-dir", directory, train_script])
shutil.rmtree(directory)
assert True
......@@ -33,8 +32,8 @@ def test_train_script_siamese():
directory = "./temp/train-script"
train_script = './bob/learn/tensorflow/test/data/train_scripts/siamese.py'
#from subprocess import call
#call(["./bin/train.py", "--iterations", "100", "--output-dir", directory, train_script])
#shutil.rmtree(directory)
from subprocess import call
call(["./bin/train.py", "--iterations", "5", "--output-dir", directory, train_script])
shutil.rmtree(directory)
assert True
......@@ -17,10 +17,11 @@ def compute_euclidean_distance(x, y):
d = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(x, y)), 1))
return d
def load_mnist(data_dir="./src/bob.db.mnist/bob/db/mnist/", perc_train=0.9):
def load_mnist(perc_train=0.9):
import bob.db.mnist
db = bob.db.mnist.Database(data_dir)
db = bob.db.mnist.Database()
raw_data = db.data()
# data = raw_data[0].astype(numpy.float64)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment