Porting to tensorflow 1.0.0

parent 9f46e635
Pipeline #7461 failed with stages
in 4 minutes and 17 seconds
......@@ -29,7 +29,7 @@ class Initialization(object):
tf.set_random_seed(seed)
def variable_exist(self, var):
return var in [v.name.split("/")[0] for v in tf.all_variables()]
return var in [v.name.split("/")[0] for v in tf.global_variables()]
def __call__(self, shape, name, scope, init_value=None):
NotImplementedError("Please implement this function in derived classes")
......@@ -64,7 +64,7 @@ class Layer(object):
NotImplementedError("Please implement this function in derived classes")
def variable_exist(self, var):
return var in [v.name.split("/")[0] for v in tf.all_variables()]
return var in [v.name.split("/")[0] for v in tf.global_variables()]
def batch_normalize(self, x, phase_train):
"""
......@@ -124,7 +124,7 @@ class Layer(object):
Doing this because of that https://github.com/tensorflow/tensorflow/issues/1325
"""
for v in tf.all_variables():
for v in tf.global_variables():
if (len(v.name.split("/")) > 1) and (var in v.name.split("/")[1]):
return v
......
......@@ -20,4 +20,4 @@ class BaseLoss(object):
self.name = name
def __call__(self, graph, label):
return self.operation(self.loss(graph, label), name=self.name)
return self.operation(self.loss(logits=graph, labels=label), name=self.name)
......@@ -44,10 +44,10 @@ class ContrastiveLoss(BaseLoss):
one = tf.constant(1.0)
d = compute_euclidean_distance(left_feature, right_feature)
between_class = tf.mul(one - label, tf.square(d)) # (1-Y)*(d^2)
between_class = tf.multiply(one - label, tf.square(d)) # (1-Y)*(d^2)
max_part = tf.square(tf.maximum(self.contrastive_margin - d, 0))
within_class = tf.mul(label, max_part) # (Y) * max((margin - d)^2, 0)
within_class = tf.multiply(label, max_part) # (Y) * max((margin - d)^2, 0)
loss = 0.5 * (within_class + between_class)
......
......@@ -25,9 +25,9 @@ class NegLogLoss(BaseLoss):
rank = len(shape)
flat_params = tf.reshape(params, [-1])
if rank > 2:
indices_unpacked = tf.unpack(tf.transpose(indices, [rank - 1] + range(0, rank - 1), name))
indices_unpacked = tf.unstack(tf.transpose(indices, [rank - 1] + range(0, rank - 1), name))
elif rank == 2:
indices_unpacked = tf.unpack(indices)
indices_unpacked = tf.unstack(indices)
else:
indices_unpacked = indices
flat_indices = [i * rank + indices_unpacked[i] for i in range(0, len(indices_unpacked))]
......@@ -38,6 +38,6 @@ class NegLogLoss(BaseLoss):
log_probabilities = tf.nn.log_softmax(graph)
# negative of the log-probability that correspond to the correct label
correct_probabilities = self.gather_nd(log_probabilities, label)
neg_log_prob = tf.neg(correct_probabilities)
neg_log_prob = tf.negative(correct_probabilities)
# use negative log likelihood as the loss
return self.operation(neg_log_prob)
......@@ -48,10 +48,10 @@ class TripletLoss(BaseLoss):
positive_embedding = tf.nn.l2_normalize(positive_embedding, 1, 1e-10)
negative_embedding = tf.nn.l2_normalize(negative_embedding, 1, 1e-10)
d_positive = tf.reduce_sum(tf.square(tf.sub(anchor_embedding, positive_embedding)), 1)
d_negative = tf.reduce_sum(tf.square(tf.sub(anchor_embedding, negative_embedding)), 1)
d_positive = tf.reduce_sum(tf.square(tf.subtract(anchor_embedding, positive_embedding)), 1)
d_negative = tf.reduce_sum(tf.square(tf.subtract(anchor_embedding, negative_embedding)), 1)
basic_loss = tf.add(tf.sub(d_positive, d_negative), self.margin)
basic_loss = tf.add(tf.subtract(d_positive, d_negative), self.margin)
loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
return loss, tf.reduce_mean(d_negative), tf.reduce_mean(d_positive)
......@@ -160,13 +160,13 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
"""Attach a lot of summaries to a Tensor."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
tf.summary.scalar('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
tf.scalar_summary('sttdev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
tf.summary.scalar('sttdev/' + name, stddev)
tf.summary.scalar('max/' + name, tf.reduce_max(var))
tf.summary.scalar('min/' + name, tf.reduce_min(var))
tf.summary.histogram(name, var)
def generate_summaries(self):
for k in self.sequence_net.keys():
......@@ -310,7 +310,7 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
# Loading variables
place_holder = tf.placeholder(tf.float32, shape=shape, name="load")
self.compute_graph(place_holder)
tf.initialize_all_variables().run(session=session)
tf.global_variables_initializer().run(session=session)
self.load_variables_only(hdf5, session)
def save(self, saver, path):
......
......@@ -75,6 +75,7 @@ def dummy_experiment(data_s, architecture):
def test_cnn_trainer():
train_data, train_labels, validation_data, validation_labels = load_mnist()
train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
validation_data = numpy.reshape(validation_data, (validation_data.shape[0], 28, 28, 1))
......
......@@ -294,7 +294,7 @@ class SiameseTrainer(Trainer):
"""
if self.validation_summary_writter is None:
self.validation_summary_writter = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'validation'), self.session.graph)
self.validation_summary_writter = tf.summary.FileWriter(os.path.join(self.temp_dir, 'validation'), self.session.graph)
self.validation_graph = self.compute_graph(data_shuffler, name="validation", training=False)
feed_dict = self.get_feed_dict(data_shuffler)
......@@ -315,11 +315,11 @@ class SiameseTrainer(Trainer):
"""
# Train summary
tf.scalar_summary('loss', self.training_graph, name="train")
tf.scalar_summary('between_class_loss', self.between_class_graph_train, name="train")
tf.scalar_summary('within_class_loss', self.within_class_graph_train, name="train")
tf.scalar_summary('lr', self.learning_rate, name="train")
return tf.merge_all_summaries()
tf.summary.scalar('loss', self.training_graph)
tf.summary.scalar('between_class_loss', self.between_class_graph_train)
tf.summary.scalar('within_class_loss', self.within_class_graph_train)
tf.summary.scalar('lr', self.learning_rate)
return tf.summary.merge_all()
def load_and_enqueue(self):
"""
......
......@@ -229,7 +229,7 @@ class Trainer(object):
l = self.session.run(self.validation_graph, feed_dict=feed_dict)
if self.validation_summary_writter is None:
self.validation_summary_writter = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'validation'), self.session.graph)
self.validation_summary_writter = tf.summary.FileWriter(os.path.join(self.temp_dir, 'validation'), self.session.graph)
summaries = [summary_pb2.Summary.Value(tag="loss", simple_value=float(l))]
self.validation_summary_writter.add_summary(summary_pb2.Summary(value=summaries), step)
......@@ -240,9 +240,9 @@ class Trainer(object):
Creates a simple tensorboard summary with the value of the loss and learning rate
"""
# Train summary
tf.scalar_summary('loss', self.training_graph, name="train")
tf.scalar_summary('lr', self.learning_rate, name="train")
return tf.merge_all_summaries()
tf.summary.scalar('loss', self.training_graph)
tf.summary.scalar('lr', self.learning_rate)
return tf.summary.merge_all()
def start_thread(self):
"""
......@@ -281,7 +281,6 @@ class Trainer(object):
"""
Create all the necessary graphs for training, validation and inference graphs
"""
# Creating train graph
self.training_graph = self.compute_graph(train_data_shuffler, prefetch=self.prefetch, name="train")
tf.add_to_collection("training_graph", self.training_graph)
......@@ -420,10 +419,10 @@ class Trainer(object):
tf.add_to_collection("summaries_train", self.summaries_train)
tf.initialize_all_variables().run(session=self.session)
tf.global_variables_initializer().run(session=self.session)
# Original tensorflow saver object
saver = tf.train.Saver(var_list=tf.all_variables())
saver = tf.train.Saver(var_list=tf.global_variables())
if isinstance(train_data_shuffler, OnlineSampling):
train_data_shuffler.set_feature_extractor(self.architecture, session=self.session)
......@@ -435,7 +434,7 @@ class Trainer(object):
threads = self.start_thread()
# TENSOR BOARD SUMMARY
self.train_summary_writter = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'train'), self.session.graph)
self.train_summary_writter = tf.summary.FileWriter(os.path.join(self.temp_dir, 'train'), self.session.graph)
for step in range(start_step, self.iterations):
start = time.time()
......
......@@ -304,7 +304,7 @@ class TripletTrainer(Trainer):
"""
if self.validation_summary_writter is None:
self.validation_summary_writter = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'validation'),
self.validation_summary_writter = tf.summary.FileWriter(os.path.join(self.temp_dir, 'validation'),
self.session.graph)
self.validation_graph = self.compute_graph(data_shuffler, name="validation", training=False)
......@@ -326,11 +326,11 @@ class TripletTrainer(Trainer):
"""
# Train summary
tf.scalar_summary('loss', self.training_graph, name="train")
tf.scalar_summary('between_class_loss', self.between_class_graph_train, name="train")
tf.scalar_summary('within_class_loss', self.within_class_graph_train, name="train")
tf.scalar_summary('lr', self.learning_rate, name="train")
return tf.merge_all_summaries()
tf.summary.scalar('loss', self.training_graph)
tf.summary.scalar('between_class_loss', self.between_class_graph_train)
tf.summary.scalar('within_class_loss', self.within_class_graph_train)
tf.summary.scalar('lr', self.learning_rate)
return tf.summary.merge_all()
def load_and_enqueue(self):
"""
......
......@@ -14,7 +14,7 @@ def compute_euclidean_distance(x, y):
"""
with tf.name_scope('euclidean_distance') as scope:
d = tf.sqrt(tf.reduce_sum(tf.square(tf.sub(x, y)), 1))
d = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(x, y)), 1))
return d
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment