DOcumenting

parent db9747e1
...@@ -60,7 +60,7 @@ class Memory(Base): ...@@ -60,7 +60,7 @@ class Memory(Base):
indexes = numpy.array(range(self.data.shape[0])) indexes = numpy.array(range(self.data.shape[0]))
numpy.random.shuffle(indexes) numpy.random.shuffle(indexes)
selected_data = self.data[indexes[0:self.batch_size], :, :, :] selected_data = self.data[indexes[0:self.batch_size], ...]
selected_labels = self.labels[indexes[0:self.batch_size]] selected_labels = self.labels[indexes[0:self.batch_size]]
# Applying the data augmentation # Applying the data augmentation
......
...@@ -75,13 +75,10 @@ class SiameseDisk(Siamese, Disk): ...@@ -75,13 +75,10 @@ class SiameseDisk(Siamese, Disk):
genuine = True genuine = True
for i in range(self.shape[0]): for i in range(self.shape[0]):
file_name, file_name_p = self.get_genuine_or_not(self.data, self.labels, genuine=genuine) file_name, file_name_p = self.get_genuine_or_not(self.data, self.labels, genuine=genuine)
sample_l[i, ...] = self.load_from_file(str(file_name)) sample_l[i, ...] = self.normalize_sample(self.load_from_file(str(file_name)))
sample_r[i, ...] = self.load_from_file(str(file_name_p)) sample_r[i, ...] = self.normalize_sample(self.load_from_file(str(file_name_p)))
labels_siamese[i] = not genuine labels_siamese[i] = not genuine
genuine = not genuine genuine = not genuine
sample_l = self.normalize_sample(sample_l)
sample_r = self.normalize_sample(sample_r)
return sample_l, sample_r, labels_siamese return sample_l, sample_r, labels_siamese
...@@ -78,12 +78,8 @@ class TripletDisk(Triplet, Disk): ...@@ -78,12 +78,8 @@ class TripletDisk(Triplet, Disk):
for i in range(self.shape[0]): for i in range(self.shape[0]):
file_name_a, file_name_p, file_name_n = self.get_one_triplet(self.data, self.labels) file_name_a, file_name_p, file_name_n = self.get_one_triplet(self.data, self.labels)
sample_a[i, ...] = self.load_from_file(str(file_name_a)) sample_a[i, ...] = self.normalize_sample(self.load_from_file(str(file_name_a)))
sample_p[i, ...] = self.load_from_file(str(file_name_p)) sample_p[i, ...] = self.normalize_sample(self.load_from_file(str(file_name_p)))
sample_n[i, ...] = self.load_from_file(str(file_name_n)) sample_n[i, ...] = self.normalize_sample(self.load_from_file(str(file_name_n)))
sample_a = self.normalize_sample(sample_a)
sample_p = self.normalize_sample(sample_p)
sample_n = self.normalize_sample(sample_n)
return [sample_a, sample_p, sample_n] return [sample_a, sample_p, sample_n]
...@@ -322,8 +322,8 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)): ...@@ -322,8 +322,8 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
session = Session.instance().session session = Session.instance().session
self.sequence_net = pickle.loads(open(path+"_sequence_net.pickle").read()) self.sequence_net = pickle.loads(open(path+"_sequence_net.pickle").read())
#saver = tf.train.import_meta_graph(path + ".meta", clear_devices=clear_devices) saver = tf.train.import_meta_graph(path + ".meta", clear_devices=clear_devices)
saver = tf.train.import_meta_graph(path + ".meta") #saver = tf.train.import_meta_graph(path + ".meta")
saver.restore(session, path) saver.restore(session, path)
self.inference_graph = tf.get_collection("inference_graph")[0] self.inference_graph = tf.get_collection("inference_graph")[0]
self.inference_placeholder = tf.get_collection("inference_placeholder")[0] self.inference_placeholder = tf.get_collection("inference_placeholder")[0]
......
...@@ -71,10 +71,13 @@ class VGG16_mod(SequenceNetwork): ...@@ -71,10 +71,13 @@ class VGG16_mod(SequenceNetwork):
default_feature_layer="fc8", default_feature_layer="fc8",
seed=10, seed=10,
do_dropout=True,
use_gpu=False): use_gpu=False):
super(VGG16_mod, self).__init__(default_feature_layer=default_feature_layer, super(VGG16_mod, self).__init__(default_feature_layer=default_feature_layer,
use_gpu=use_gpu) use_gpu=use_gpu)
# First convolutional block # First convolutional block
self.conv1_1_kernel_size = conv1_1_kernel_size self.conv1_1_kernel_size = conv1_1_kernel_size
...@@ -223,6 +226,9 @@ class VGG16_mod(SequenceNetwork): ...@@ -223,6 +226,9 @@ class VGG16_mod(SequenceNetwork):
)) ))
self.add(AveragePooling(name="pooling5", strides=[1, 2, 2, 1])) self.add(AveragePooling(name="pooling5", strides=[1, 2, 2, 1]))
if do_dropout:
self.add(Dropout(name="dropout", keep_prob=0.4))
self.add(FullyConnected(name="fc8", output_dim=n_classes, self.add(FullyConnected(name="fc8", output_dim=n_classes,
activation=None, activation=None,
weights_initialization=Xavier(seed=seed, use_gpu=self.use_gpu), weights_initialization=Xavier(seed=seed, use_gpu=self.use_gpu),
......
...@@ -8,12 +8,11 @@ from bob.learn.tensorflow.datashuffler import Memory, SiameseMemory, TripletMemo ...@@ -8,12 +8,11 @@ from bob.learn.tensorflow.datashuffler import Memory, SiameseMemory, TripletMemo
from bob.learn.tensorflow.network import Chopra from bob.learn.tensorflow.network import Chopra
from bob.learn.tensorflow.loss import BaseLoss, ContrastiveLoss, TripletLoss from bob.learn.tensorflow.loss import BaseLoss, ContrastiveLoss, TripletLoss
from bob.learn.tensorflow.trainers import Trainer, SiameseTrainer, TripletTrainer, constant from bob.learn.tensorflow.trainers import Trainer, SiameseTrainer, TripletTrainer, constant
from .test_cnn_scratch import validate_network
# from ..analyzers import ExperimentAnalizer, SoftmaxAnalizer from bob.learn.tensorflow.utils import load_mnist
from bob.learn.tensorflow.util import load_mnist
import tensorflow as tf import tensorflow as tf
import bob.io.base import bob.io.base
import os
import shutil import shutil
from scipy.spatial.distance import cosine from scipy.spatial.distance import cosine
import bob.measure import bob.measure
...@@ -28,7 +27,7 @@ iterations = 50 ...@@ -28,7 +27,7 @@ iterations = 50
seed = 10 seed = 10
def dummy_experiment(data_s, architecture, session): def dummy_experiment(data_s, architecture):
""" """
Create a dummy experiment and return the EER Create a dummy experiment and return the EER
""" """
...@@ -38,12 +37,12 @@ def dummy_experiment(data_s, architecture, session): ...@@ -38,12 +37,12 @@ def dummy_experiment(data_s, architecture, session):
# Extracting features for enrollment # Extracting features for enrollment
enroll_data, enroll_labels = data_shuffler.get_batch() enroll_data, enroll_labels = data_shuffler.get_batch()
enroll_features = architecture(enroll_data, session=session) enroll_features = architecture(enroll_data)
del enroll_data del enroll_data
# Extracting features for probing # Extracting features for probing
probe_data, probe_labels = data_shuffler.get_batch() probe_data, probe_labels = data_shuffler.get_batch()
probe_features = architecture(probe_data, session=session) probe_features = architecture(probe_data)
del probe_data del probe_data
# Creating models # Creating models
...@@ -102,26 +101,14 @@ def test_cnn_trainer(): ...@@ -102,26 +101,14 @@ def test_cnn_trainer():
prefetch=False, prefetch=False,
temp_dir=directory) temp_dir=directory)
trainer.train(train_data_shuffler) trainer.train(train_data_shuffler)
del trainer #Just to clean tf.variables
with tf.Session() as session: accuracy = validate_network(validation_data, validation_labels, architecture)
# Testing # At least 80% of accuracy
chopra = Chopra(seed=seed, fc1_output=10) assert accuracy > 80.
chopra.load(session, os.path.join(directory, "model.ckp")) shutil.rmtree(directory)
del trainer
validation_data_shuffler = Memory(validation_data, validation_labels, del architecture
input_shape=[28, 28, 1],
batch_size=validation_batch_size)
[data, labels] = validation_data_shuffler.get_batch()
predictions = chopra(data, session=session)
accuracy = 100. * numpy.sum(numpy.argmax(predictions, 1) == labels) / predictions.shape[0]
# At least 80% of accuracy
assert accuracy > 80.
shutil.rmtree(directory)
del chopra
def test_siamesecnn_trainer(): def test_siamesecnn_trainer():
...@@ -155,19 +142,15 @@ def test_siamesecnn_trainer(): ...@@ -155,19 +142,15 @@ def test_siamesecnn_trainer():
temp_dir=directory) temp_dir=directory)
trainer.train(train_data_shuffler) trainer.train(train_data_shuffler)
del trainer # Just to clean tf.variables
with tf.Session() as session: eer = dummy_experiment(validation_data_shuffler, architecture)
# Testing
chopra = Chopra(seed=seed, fc1_output=10)
chopra.load(session, os.path.join(directory, "model.ckp"))
eer = dummy_experiment(validation_data_shuffler, chopra, session) # At least 80% of accuracy
assert eer < 0.25
shutil.rmtree(directory)
# At least 80% of accuracy del architecture
assert eer < 0.25 del trainer # Just to clean tf.variables
shutil.rmtree(directory)
del chopra
def test_tripletcnn_trainer(): def test_tripletcnn_trainer():
...@@ -201,17 +184,13 @@ def test_tripletcnn_trainer(): ...@@ -201,17 +184,13 @@ def test_tripletcnn_trainer():
temp_dir=directory) temp_dir=directory)
trainer.train(train_data_shuffler) trainer.train(train_data_shuffler)
del trainer # Just to clean tf.variables
with tf.Session() as session: # Testing
eer = dummy_experiment(validation_data_shuffler, architecture)
# Testing # At least 80% of accuracy
chopra = Chopra(seed=seed, fc1_output=10) assert eer < 0.25
chopra.load(session, os.path.join(directory, "model.ckp")) shutil.rmtree(directory)
eer = dummy_experiment(validation_data_shuffler, chopra, session) del architecture
del trainer # Just to clean tf.variables
# At least 80% of accuracy
assert eer < 0.25
shutil.rmtree(directory)
del chopra
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Thu 13 Oct 2016 13:35 CEST
"""
Some unit tests that create networks on the fly
"""
import numpy
import pkg_resources
from bob.learn.tensorflow.utils import load_mnist
from bob.learn.tensorflow.network import SequenceNetwork
from bob.learn.tensorflow.datashuffler import Memory
def validate_network(validation_data, validation_labels, network):
# Testing
validation_data_shuffler = Memory(validation_data, validation_labels,
input_shape=[28, 28, 1],
batch_size=400)
[data, labels] = validation_data_shuffler.get_batch()
predictions = network.predict(data)
accuracy = 100. * numpy.sum(predictions == labels) / predictions.shape[0]
return accuracy
"""
def test_load_test_cnn():
_, _, validation_data, validation_labels = load_mnist()
# Creating datashufflers
validation_data = numpy.reshape(validation_data, (validation_data.shape[0], 28, 28, 1))
network = SequenceNetwork()
network.load(pkg_resources.resource_filename(__name__, 'data/cnn_mnist/model.ckp'))
accuracy = validate_network(validation_data, validation_labels, network)
assert accuracy > 80
del network
"""
...@@ -9,7 +9,10 @@ import os ...@@ -9,7 +9,10 @@ import os
from bob.learn.tensorflow.datashuffler import Memory, ImageAugmentation from bob.learn.tensorflow.datashuffler import Memory, ImageAugmentation
from bob.learn.tensorflow.loss import BaseLoss from bob.learn.tensorflow.loss import BaseLoss
from bob.learn.tensorflow.trainers import Trainer, constant from bob.learn.tensorflow.trainers import Trainer, constant
from bob.learn.tensorflow.util import load_mnist from bob.learn.tensorflow.utils import load_mnist
from bob.learn.tensorflow.network import SequenceNetwork
from bob.learn.tensorflow.layers import Conv2D, FullyConnected
import tensorflow as tf import tensorflow as tf
import shutil import shutil
...@@ -22,10 +25,36 @@ validation_batch_size = 400 ...@@ -22,10 +25,36 @@ validation_batch_size = 400
iterations = 50 iterations = 50
seed = 10 seed = 10
from test_cnn_scratch import scratch_network, validate_network
def scratch_network():
# Creating a random network
scratch = SequenceNetwork(default_feature_layer="fc1")
scratch.add(Conv2D(name="conv1", kernel_size=3,
filters=10,
activation=tf.nn.tanh,
batch_norm=False))
scratch.add(FullyConnected(name="fc1", output_dim=10,
activation=None,
batch_norm=False
))
return scratch
def validate_network(validation_data, validation_labels, network):
# Testing
validation_data_shuffler = Memory(validation_data, validation_labels,
input_shape=[28, 28, 1],
batch_size=validation_batch_size)
[data, labels] = validation_data_shuffler.get_batch()
predictions = network.predict(data)
accuracy = 100. * numpy.sum(predictions == labels) / predictions.shape[0]
return accuracy
def test_cnn_trainer_scratch(): def test_cnn_pretrained():
train_data, train_labels, validation_data, validation_labels = load_mnist() train_data, train_labels, validation_data, validation_labels = load_mnist()
train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1)) train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
...@@ -55,8 +84,7 @@ def test_cnn_trainer_scratch(): ...@@ -55,8 +84,7 @@ def test_cnn_trainer_scratch():
learning_rate=constant(0.05, name="lr"), learning_rate=constant(0.05, name="lr"),
temp_dir=directory) temp_dir=directory)
trainer.train(train_data_shuffler) trainer.train(train_data_shuffler)
accuracy = validate_network(validation_data, validation_labels, scratch)
accuracy = validate_network(validation_data, validation_labels, directory)
assert accuracy > 85 assert accuracy > 85
del scratch del scratch
...@@ -77,7 +105,12 @@ def test_cnn_trainer_scratch(): ...@@ -77,7 +105,12 @@ def test_cnn_trainer_scratch():
trainer.train(train_data_shuffler) trainer.train(train_data_shuffler)
accuracy = validate_network(validation_data, validation_labels, directory2) accuracy = validate_network(validation_data, validation_labels, scratch)
assert accuracy > 85 assert accuracy > 90
shutil.rmtree(directory) shutil.rmtree(directory)
shutil.rmtree(directory2) shutil.rmtree(directory2)
del scratch
del loss
del trainer
...@@ -7,12 +7,11 @@ import numpy ...@@ -7,12 +7,11 @@ import numpy
import bob.io.base import bob.io.base
import os import os
from bob.learn.tensorflow.datashuffler import Memory, ImageAugmentation from bob.learn.tensorflow.datashuffler import Memory, ImageAugmentation
from bob.learn.tensorflow.initialization import Xavier, Constant
from bob.learn.tensorflow.network import SequenceNetwork from bob.learn.tensorflow.network import SequenceNetwork
from bob.learn.tensorflow.loss import BaseLoss from bob.learn.tensorflow.loss import BaseLoss
from bob.learn.tensorflow.trainers import Trainer from bob.learn.tensorflow.trainers import Trainer
from bob.learn.tensorflow.utils import load_mnist from bob.learn.tensorflow.utils import load_mnist
from bob.learn.tensorflow.layers import Conv2D, FullyConnected, MaxPooling from bob.learn.tensorflow.layers import Conv2D, FullyConnected
import tensorflow as tf import tensorflow as tf
import shutil import shutil
...@@ -33,13 +32,9 @@ def scratch_network(): ...@@ -33,13 +32,9 @@ def scratch_network():
scratch.add(Conv2D(name="conv1", kernel_size=3, scratch.add(Conv2D(name="conv1", kernel_size=3,
filters=10, filters=10,
activation=tf.nn.tanh, activation=tf.nn.tanh,
weights_initialization=Xavier(seed=seed, use_gpu=False),
bias_initialization=Constant(use_gpu=False),
batch_norm=False)) batch_norm=False))
scratch.add(FullyConnected(name="fc1", output_dim=10, scratch.add(FullyConnected(name="fc1", output_dim=10,
activation=None, activation=None,
weights_initialization=Xavier(seed=seed, use_gpu=False),
bias_initialization=Constant(use_gpu=False),
batch_norm=False batch_norm=False
)) ))
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
import numpy import numpy
from bob.learn.tensorflow.datashuffler import Memory, SiameseMemory, TripletMemory, Disk, SiameseDisk, TripletDisk from bob.learn.tensorflow.datashuffler import Memory, SiameseMemory, TripletMemory, Disk, SiameseDisk, TripletDisk
import pkg_resources import pkg_resources
from ..util import load_mnist from bob.learn.tensorflow.utils import load_mnist
import os import os
""" """
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
import numpy import numpy
from bob.learn.tensorflow.datashuffler import Memory, SiameseMemory, TripletMemory, Disk, SiameseDisk, TripletDisk, ImageAugmentation from bob.learn.tensorflow.datashuffler import Memory, SiameseMemory, TripletMemory, Disk, SiameseDisk, TripletDisk, ImageAugmentation
import pkg_resources import pkg_resources
from ..util import load_mnist from bob.learn.tensorflow.utils import load_mnist
import os import os
""" """
......
...@@ -8,13 +8,9 @@ from bob.learn.tensorflow.datashuffler import Memory ...@@ -8,13 +8,9 @@ from bob.learn.tensorflow.datashuffler import Memory
from bob.learn.tensorflow.network import MLP from bob.learn.tensorflow.network import MLP
from bob.learn.tensorflow.loss import BaseLoss from bob.learn.tensorflow.loss import BaseLoss
from bob.learn.tensorflow.trainers import Trainer, constant from bob.learn.tensorflow.trainers import Trainer, constant
# from ..analyzers import ExperimentAnalizer, SoftmaxAnalizer from bob.learn.tensorflow.utils import load_mnist
from bob.learn.tensorflow.util import load_mnist
import tensorflow as tf import tensorflow as tf
import bob.io.base
import os
import shutil import shutil
import bob.measure
""" """
Some unit tests for the datashuffler Some unit tests for the datashuffler
...@@ -26,14 +22,25 @@ iterations = 200 ...@@ -26,14 +22,25 @@ iterations = 200
seed = 10 seed = 10
def validate_network(validation_data, validation_labels, network):
# Testing
validation_data_shuffler = Memory(validation_data, validation_labels,
input_shape=[784],
batch_size=validation_batch_size)
[data, labels] = validation_data_shuffler.get_batch()
predictions = network.predict(data)
accuracy = 100. * numpy.sum(predictions == labels) / predictions.shape[0]
return accuracy
def test_dnn_trainer(): def test_dnn_trainer():
train_data, train_labels, validation_data, validation_labels = load_mnist() train_data, train_labels, validation_data, validation_labels = load_mnist()
train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
validation_data = numpy.reshape(validation_data, (validation_data.shape[0], 28, 28, 1))
# Creating datashufflers # Creating datashufflers
train_data_shuffler = Memory(train_data, train_labels, train_data_shuffler = Memory(train_data, train_labels,
input_shape=[28, 28, 1], input_shape=[784],
batch_size=batch_size) batch_size=batch_size)
directory = "./temp/dnn" directory = "./temp/dnn"
...@@ -53,21 +60,12 @@ def test_dnn_trainer(): ...@@ -53,21 +60,12 @@ def test_dnn_trainer():
learning_rate=constant(0.05, name="dnn_lr"), learning_rate=constant(0.05, name="dnn_lr"),
temp_dir=directory) temp_dir=directory)
trainer.train(train_data_shuffler) trainer.train(train_data_shuffler)
del trainer# Just to clean the variables
accuracy = validate_network(validation_data, validation_labels, architecture)
with tf.Session() as session:
# Testing # At least 50% of accuracy for the DNN
mlp = MLP(10, hidden_layers=[15, 20]) assert accuracy > 50.
mlp.load(session, os.path.join(directory, "model.ckp")) shutil.rmtree(directory)
validation_data_shuffler = Memory(validation_data, validation_labels,
input_shape=[28, 28, 1], del architecture
batch_size=validation_batch_size) del trainer # Just to clean the variables
[data, labels] = validation_data_shuffler.get_batch()
predictions = mlp(data, session=session)
accuracy = 100. * numpy.sum(numpy.argmax(predictions, 1) == labels) / predictions.shape[0]
# At least 50% of accuracy for the DNN
assert accuracy > 50.
shutil.rmtree(directory)
session.close()
...@@ -49,9 +49,6 @@ class SiameseTrainer(Trainer): ...@@ -49,9 +49,6 @@ class SiameseTrainer(Trainer):
temp_dir="cnn", temp_dir="cnn",
# Learning rate # Learning rate
#base_learning_rate=0.001,
#weight_decay=0.9,
#decay_steps=1000,
learning_rate=constant(), learning_rate=constant(),
###### training options ########## ###### training options ##########
...@@ -76,9 +73,6 @@ class SiameseTrainer(Trainer): ...@@ -76,9 +73,6 @@ class SiameseTrainer(Trainer):
temp_dir=temp_dir, temp_dir=temp_dir,
# Learning rate # Learning rate
#base_learning_rate=base_learning_rate,
#weight_decay=weight_decay,
#decay_steps=decay_steps,
learning_rate=learning_rate, learning_rate=learning_rate,
###### training options ########## ###### training options ##########
...@@ -207,7 +201,7 @@ class SiameseTrainer(Trainer): ...@@ -207,7 +201,7 @@ class SiameseTrainer(Trainer):
return feed_dict return feed_dict
def fit(self, session, step): def fit(self, step):
""" """
Run one iteration (`forward` and `backward`) Run one iteration (`forward` and `backward`)
...@@ -217,19 +211,19 @@ class SiameseTrainer(Trainer): ...@@ -217,19 +211,19 @@ class SiameseTrainer(Trainer):
""" """
if self.prefetch: if self.prefetch:
_, l, bt_class, wt_class, lr, summary = session.run([self.optimizer, _, l, bt_class, wt_class, lr, summary = self.session.run([self.optimizer,
self.training_graph, self.between_class_graph_train, self.within_class_graph_train, self.training_graph, self.between_class_graph_train, self.within_class_graph_train,
self.learning_rate, self.summaries_train]) self.learning_rate, self.summaries_train])
else: else:
feed_dict = self.get_feed_dict(self.train_data_shuffler) feed_dict = self.get_feed_dict(self.train_data_shuffler)
_, l, bt_class, wt_class, lr, summary = session.run([self.optimizer, _, l, bt_class, wt_class, lr, summary = self.session.run([self.optimizer,
self.training_graph, self.between_class_graph_train, self.within_class_graph_train, self.training_graph, self.between_class_graph_train, self.within_class_graph_train,
self.learning_rate, self.summaries_train], feed_dict=feed_dict) self.learning_rate, self.summaries_train], feed_dict=feed_dict)
logger.info("Loss training set step={0} = {1}".format(step, l)) logger.info("Loss training set step={0} = {1}".format(step, l))
self.train_summary_writter.add_summary(summary, step) self.train_summary_writter.add_summary(summary, step)
def compute_validation(self, session, data_shuffler, step): def compute_validation(self, data_shuffler, step):
""" """
Computes the loss in the validation set Computes the loss in the validation set
...@@ -245,9 +239,9 @@ class SiameseTrainer(Trainer): ...@@ -245,9 +239,9 @@ class SiameseTrainer(Trainer):
self.validation_graph = self.compute_graph(data_shuffler, name="validation", training=False) self.validation_graph = self.compute_graph(data_shuffler, name="validation", training=False)
feed_dict = self.get_feed_dict(data_shuffler) feed_dict = self.get_feed_dict(data_shuffler)
l, bt_class, wt_class = session.run([self.validation_graph, l, bt_class, wt_class = self.session.run([self.validation_graph,
self.between_class_graph_validation, self.within_class_graph_validation], self.between_class_graph_validation, self.within_class_graph_validation],
feed_dict=feed_dict) feed_dict=feed_dict)
summaries = [] summaries = []
summaries.append(summary_pb2.Summary.Value(tag="loss", simple_value=float(l))) summaries.append(summary_pb2.Summary.Value(tag="loss", simple_value=float(l)))
...@@ -268,7 +262,7 @@ class SiameseTrainer(Trainer): ...@@ -268,7 +262,7 @@ class SiameseTrainer(Trainer):
tf.scalar_summary('lr', self.learning_rate, name="train") tf.scalar_summary('lr', self.learning_rate, name="train")
return tf.merge_all_summaries() return tf.merge_all_summaries()
def load_and_enqueue(self, session): def load_and_enqueue(self):
""" """
Injecting data in the place holder queue Injecting data in the place holder queue
...@@ -285,4 +279,4 @@ class SiameseTrainer(Trainer): ...@@ -285,4 +279,4 @@ class SiameseTrainer(Trainer):
placeholder_right_data: batch_right, placeholder_right_data: batch_right,
placeholder_label: labels} placeholder_label: labels}
session.run(self.enqueue_op, feed_dict=feed_dict) self.session.run(self.enqueue_op, feed_dict=feed_dict)
...@@ -306,7 +306,7 @@ class Trainer(object): ...@@ -306,7 +306,7 @@ class Trainer(object):
""" """