Worked in the milestone #3

parent 33f090cb
......@@ -21,7 +21,7 @@ class Analizer:
"""
def __init__(self, data_shuffler, machine, feature_placeholder, session):
def __init__(self, data_shuffler, machine, session):
"""
Use the CNN as feature extractor for a n-class classification
......@@ -34,7 +34,6 @@ class Analizer:
self.data_shuffler = data_shuffler
self.machine = machine
self.feature_placeholder = feature_placeholder
self.session = session
# Statistics
......@@ -43,17 +42,17 @@ class Analizer:
self.far100 = []
self.far1000 = []
def extract_features(self):
data, labels = self.data_shuffler.get_batch(train_dataset=False)
feed_dict = {self.feature_placeholder: data}
return self.machine(feed_dict, self.session)
def __call__(self):
# Extracting features
enroll_features, enroll_labels = self.extract_features()
probe_features, probe_labels = self.extract_features()
# Extracting features for enrollment
enroll_data, enroll_labels = self.data_shuffler.get_batch(train_dataset=False)
enroll_features = self.machine(enroll_data, self.session)
del enroll_data
# Extracting features for probing
probe_data, probe_labels = self.data_shuffler.get_batch(train_dataset=False)
probe_features = self.machine(probe_data, self.session)
del probe_data
# Creating models
models = []
......@@ -64,7 +63,6 @@ class Analizer:
# Probing
positive_scores = numpy.zeros(shape=0)
negative_scores = numpy.zeros(shape=0)
for i in range(self.data_shuffler.total_labels):
# Positive scoring
indexes = probe_labels == i
......
......@@ -45,11 +45,9 @@ class Conv2D(Layer):
self.input_layer = input_layer
# TODO: Do an assert here
if len(input_layer.get_shape().as_list()) != 4:
raise ValueError("The input as a convolutional layer must have 4 dimensions, "
"but {0} were provided".format(len(input_layer.get_shape().as_list())))
n_channels = input_layer.get_shape().as_list()[3]
if self.W is None:
......
......@@ -7,13 +7,13 @@
Class that creates the lenet architecture
"""
from ..util import *
import tensorflow as tf
import abc
import six
import os
from collections import OrderedDict
from bob.learn.tensorflow.layers import Layer
from bob.learn.tensorflow.layers import Layer, MaxPooling
class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
......@@ -31,6 +31,7 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
self.sequence_net = OrderedDict()
self.feature_layer = feature_layer
self.saver = None
def add(self, layer):
"""
......@@ -47,7 +48,7 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
**Parameter**
input_data:
cut:
cut: Name of the layer that you want to cut.
"""
input_offset = input_data
......@@ -64,6 +65,54 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
def compute_projection_graph(self, placeholder):
return self.compute_graph(placeholder, cut=True)
def __call__(self, feed_dict, session):
#placeholder
return session.run([self.graph], feed_dict=feed_dict)[0]
def __call__(self, data, session=None):
if session is None:
session = tf.Session()
batch_size = data.shape[0]
width = data.shape[1]
height = data.shape[2]
channels = data.shape[3]
# Feeding the placeholder
feature_placeholder = tf.placeholder(tf.float32, shape=(batch_size, width, height, channels), name="feature")
feed_dict = {feature_placeholder: data}
return session.run([self.compute_projection_graph(feature_placeholder)], feed_dict=feed_dict)[0]
def dump_variables(self):
variables = {}
for k in self.sequence_net:
# TODO: IT IS NOT SMART TESTING ALONG THIS PAGE
if not isinstance(self.sequence_net[k], MaxPooling):
variables[self.sequence_net[k].W.name] = self.sequence_net[k].W
variables[self.sequence_net[k].b.name] = self.sequence_net[k].b
return variables
def save(self, session, path, step=None):
if self.saver is None:
self.saver = tf.train.Saver(self.dump_variables())
if step is None:
return self.saver.save(session, os.path.join(path, "model.ckpt"))
else:
return self.saver.save(session, os.path.join(path, "model" + str(step) + ".ckpt"))
def load(self, path, shape, session=None):
if session is None:
session = tf.Session()
# Loading variables
place_holder = tf.placeholder(tf.float32, shape=shape, name="load")
self.compute_graph(place_holder)
tf.initialize_all_variables().run(session=session)
if self.saver is None:
self.saver = tf.train.Saver(self.dump_variables())
self.saver.restore(session, path)
......@@ -47,7 +47,12 @@ def main():
lenet = Lenet(feature_layer="fc2")
loss = ContrastiveLoss()
trainer = SiameseTrainer(architecture=lenet, loss=loss, iterations=ITERATIONS, base_lr=0.00001)
trainer = SiameseTrainer(architecture=lenet,
loss=loss,
iterations=ITERATIONS,
base_lr=0.00001,
save_intermediate=False,
snapshot=VALIDATION_TEST)
trainer.train(data_shuffler)
......@@ -8,6 +8,8 @@ logger = logging.getLogger("bob.learn.tensorflow")
import tensorflow as tf
from ..analyzers import Analizer
from ..network import SequenceNetwork
import bob.io.base
import os
class SiameseTrainer(object):
......@@ -17,6 +19,8 @@ class SiameseTrainer(object):
architecture=None,
use_gpu=False,
loss=None,
temp_dir="",
save_intermediate=False,
###### training options ##########
convergence_threshold = 0.01,
......@@ -31,6 +35,8 @@ class SiameseTrainer(object):
self.loss = loss
self.loss_instance = None
self.optimizer = None
self.temp_dir = temp_dir
self.save_intermediate = save_intermediate
self.architecture = architecture
self.use_gpu = use_gpu
......@@ -42,16 +48,16 @@ class SiameseTrainer(object):
self.weight_decay = weight_decay
self.convergence_threshold = convergence_threshold
def train(self, data_shuffler):
"""
Do the loop forward --> backward --|
^--------------------|
"""
bob.io.base.create_directories_safe(os.path.join(self.temp_dir, 'OUTPUT'))
train_placeholder_left_data, train_placeholder_labels = data_shuffler.get_placeholders(name="train_left")
train_placeholder_right_data, _ = data_shuffler.get_placeholders(name="train_right")
feature_placeholder, _ = data_shuffler.get_placeholders(name="feature", train_dataset=False)
# feature_placeholder, _ = data_shuffler.get_placeholders(name="feature", train_dataset=False)
#validation_placeholder_data, validation_placeholder_labels = data_shuffler.get_placeholders(name="validation",
# train_dataset=False)
......@@ -87,10 +93,9 @@ class SiameseTrainer(object):
print("Initializing !!")
# Training
with tf.Session() as session:
analizer = Analizer(data_shuffler, self.architecture, feature_placeholder, session)
analizer = Analizer(data_shuffler, self.architecture, session)
train_writer = tf.train.SummaryWriter('./LOGS/train',
session.graph)
train_writer = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'LOGS'), session.graph)
# Tensorboard data
tf.scalar_summary('loss', loss_train)
......@@ -113,7 +118,10 @@ class SiameseTrainer(object):
if step % self.snapshot == 0:
analizer()
print str(step) + " - " + str(analizer.eer[-1])
if self.save_intermediate:
self.architecture.save(session, os.path.join(self.temp_dir, 'OUTPUT'), step)
print str(step) + " - " + str(analizer.eer[-1])
self.architecture.save(session, os.path.join(self.temp_dir, 'OUTPUT'))
train_writer.close()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment