Developing triplet selection

parent f4b7c953
......@@ -112,8 +112,6 @@ class Base(object):
return bob_image
def rescale(self, data):
"""
Reescale a single sample with input_shape
......@@ -146,3 +144,10 @@ class Base(object):
return dst
else:
return data
def reshape_for_deploy(self, data):
shape = tuple([1] + list(data.shape))
return numpy.reshape(data, shape)
......@@ -9,14 +9,11 @@ from .Disk import Disk
from .SiameseMemory import SiameseMemory
from .TripletMemory import TripletMemory
from .TripletWithSelectionMemory import TripletWithSelectionMemory
from .SiameseDisk import SiameseDisk
from .TripletDisk import TripletDisk
# Data Augmentation
from .DataAugmentation import DataAugmentation
from .ImageAugmentation import ImageAugmentation
from .TripletWithSelectionDisk import TripletWithSelectionDisk
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
......@@ -102,7 +102,10 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
if feature_layer is None:
feature_layer = self.default_feature_layer
return session.run([self.compute_graph(feature_placeholder, feature_layer, training=False)], feed_dict=feed_dict)[0]
feature = session.run([self.compute_graph(feature_placeholder, feature_layer, training=False)], feed_dict=feed_dict)[0]
del feature_placeholder
return feature
def dump_variables(self):
"""Return all the tensorflow `variables <https://www.tensorflow.org/versions/r0.11/api_docs/python/state_ops.html#Variable>`_ used in the graph
......
......@@ -22,7 +22,7 @@ from docopt import docopt
import tensorflow as tf
from .. import util
SEED = 10
from bob.learn.tensorflow.data import MemoryDataShuffler, TextDataShuffler
from bob.learn.tensorflow.datashuffler import TripletMemory, TripletWithSelectionMemory
from bob.learn.tensorflow.network import Lenet, MLP, LenetDropout, VGG, Chopra, Dummy
from bob.learn.tensorflow.trainers import TripletTrainer
from bob.learn.tensorflow.loss import TripletLoss
......@@ -44,22 +44,37 @@ def main():
train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
validation_data = numpy.reshape(validation_data, (validation_data.shape[0], 28, 28, 1))
train_data_shuffler = MemoryDataShuffler(train_data, train_labels,
input_shape=[28, 28, 1],
scale=True,
batch_size=BATCH_SIZE)
#train_data_shuffler = MemoryDataShuffler(train_data, train_labels,
# input_shape=[28, 28, 1],
# scale=True,
# batch_size=BATCH_SIZE)
validation_data_shuffler = MemoryDataShuffler(validation_data, validation_labels,
input_shape=[28, 28, 1],
scale=True,
batch_size=VALIDATION_BATCH_SIZE)
#validation_data_shuffler = MemoryDataShuffler(validation_data, validation_labels,
# input_shape=[28, 28, 1],
# scale=True,
# batch_size=VALIDATION_BATCH_SIZE)
validation_data_shuffler = TripletMemory(train_data, train_labels,
input_shape=[28, 28, 1],
scale=True,
batch_size=VALIDATION_BATCH_SIZE)
train_data_shuffler = TripletWithSelectionMemory(train_data, train_labels,
input_shape=[28, 28, 1],
scale=True,
batch_size=BATCH_SIZE)
#train_data_shuffler = TripletMemory(train_data, train_labels,
# input_shape=[28, 28, 1],
# scale=True,
# batch_size=BATCH_SIZE)
# Preparing the architecture
n_classes = len(train_data_shuffler.possible_labels)
#n_classes = 200
cnn = True
if cnn:
architecture = Chopra(seed=SEED, fc1_output=n_classes)
architecture = Chopra(seed=SEED, fc1_output=n_classes, use_gpu=USE_GPU)
#architecture = Lenet(default_feature_layer="fc2", n_classes=n_classes, conv1_output=8, conv2_output=16,use_gpu=USE_GPU)
#architecture = VGG(n_classes=n_classes, use_gpu=USE_GPU)
#architecture = Dummy(seed=SEED)
......@@ -71,8 +86,8 @@ def main():
loss=loss,
iterations=ITERATIONS,
snapshot=VALIDATION_TEST,
temp_dir="cnn-triplet",
prefetch=True,
temp_dir="triplet/cnn-triplet-SELECTION",
prefetch=False,
optimizer=optimizer
)
trainer.train(train_data_shuffler, validation_data_shuffler)
......@@ -86,5 +101,6 @@ def main():
temp_dir="dnn-triplet",
iterations=ITERATIONS,
snapshot=VALIDATION_TEST)
trainer.train(train_data_shuffler, validation_data_shuffler)
#trainer.train(train_data_shuffler, validation_data_shuffler)
trainer.train(train_data_shuffler)
......@@ -22,7 +22,7 @@ from docopt import docopt
import tensorflow as tf
from .. import util
SEED = 10
from bob.learn.tensorflow.data import MemoryDataShuffler, TextDataShuffler
from bob.learn.tensorflow.datashuffler import TripletWithSelectionDisk, TripletDisk
from bob.learn.tensorflow.network import Lenet, MLP, LenetDropout, VGG, Chopra, Dummy
from bob.learn.tensorflow.trainers import SiameseTrainer, Trainer, TripletTrainer
from bob.learn.tensorflow.loss import ContrastiveLoss, BaseLoss, TripletLoss
......@@ -53,9 +53,9 @@ def main():
directory=directory,
extension=".hdf5")
for o in train_objects]
train_data_shuffler = TextDataShuffler(train_file_names, train_labels,
input_shape=[125, 125, 3],
batch_size=BATCH_SIZE)
train_data_shuffler = TripletWithSelectionDisk(train_file_names, train_labels,
input_shape=[125, 125, 3],
batch_size=BATCH_SIZE)
# Preparing train set
validation_objects = db_mobio.objects(protocol="male", groups="dev")
......@@ -66,12 +66,12 @@ def main():
directory=directory,
extension=".hdf5")
for o in validation_objects]
validation_data_shuffler = TextDataShuffler(validation_file_names, validation_labels,
validation_data_shuffler = TripletDisk(validation_file_names, validation_labels,
input_shape=[125, 125, 3],
batch_size=VALIDATION_BATCH_SIZE)
# Preparing the architecture
#architecture = Chopra(seed=SEED, fc1_output=n_classes)
architecture = Chopra(seed=SEED)
architecture = Chopra(seed=SEED, fc1_output=n_classes)
optimizer = tf.train.GradientDescentOptimizer(0.00000001)
......@@ -82,18 +82,19 @@ def main():
# optimizer=optimizer,
# temp_dir="./LOGS/cnn")
loss = ContrastiveLoss(contrastive_margin=4.)
trainer = SiameseTrainer(architecture=architecture, loss=loss,
#loss = ContrastiveLoss(contrastive_margin=4.)
#trainer = SiameseTrainer(architecture=architecture, loss=loss,
# iterations=ITERATIONS,
# prefetch=False,
# optimizer=optimizer,
# temp_dir="./LOGS_MOBIO/siamese-cnn-prefetch")
loss = TripletLoss(margin=4.)
trainer = TripletTrainer(architecture=architecture, loss=loss,
iterations=ITERATIONS,
prefetch=False,
optimizer=optimizer,
temp_dir="./LOGS_MOBIO/siamese-cnn-prefetch")
#loss = TripletLoss(margin=4.)
#trainer = TripletTrainer(architecture=architecture, loss=loss,
# iterations=ITERATIONS,
# prefetch=True,
# optimizer=optimizer,
# temp_dir="./LOGS_MOBIO/triplet-cnn-prefetch")
temp_dir="./LOGS_MOBIO/triplet-cnn")
trainer.train(train_data_shuffler, validation_data_shuffler)
#trainer.train(train_data_shuffler, validation_data_shuffler)
trainer.train(train_data_shuffler)
......@@ -12,6 +12,8 @@ import bob.core
from ..analyzers import SoftmaxAnalizer
from tensorflow.core.framework import summary_pb2
import time
from bob.learn.tensorflow.datashuffler.OnlineSampling import OnLineSampling
logger = bob.core.log.setup("bob.learn.tensorflow")
......@@ -277,6 +279,9 @@ class Trainer(object):
tf.initialize_all_variables().run()
if isinstance(train_data_shuffler, OnLineSampling):
train_data_shuffler.set_feature_extractor(self.architecture, session=session)
# Start a thread to enqueue data asynchronously, and hide I/O latency.
if self.prefetch:
self.thread_pool = tf.train.Coordinator()
......@@ -314,4 +319,4 @@ class Trainer(object):
self.thread_pool.request_stop()
self.thread_pool.join(threads)
session.close()# For some reason the session is not closed after the context manager finishes
session.close() # For some reason the session is not closed after the context manager finishes
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment