Commit 4dc72f0a authored by Amir MOHAMMADI's avatar Amir MOHAMMADI

Use tf.train.get_or_create_global_step (introduced in 1.2) instead of the contrib one

parent 82a80a54
Pipeline #14648 failed with stages
in 36 minutes and 16 seconds
......@@ -301,7 +301,7 @@ class LogitsCenterLoss(estimator.Estimator):
tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"],
self.extra_checkpoint["scopes"])
global_step = tf.contrib.framework.get_or_create_global_step()
global_step = tf.train.get_or_create_global_step()
train_op = tf.group(self.optimizer.minimize(self.loss, global_step=global_step),
centers)
return tf.estimator.EstimatorSpec(mode=mode, loss=self.loss,
......
......@@ -43,7 +43,7 @@ class Siamese(estimator.Estimator):
return loss_set_of_ops(logits, labels)
extra_checkpoint = {"checkpoint_path":model_dir,
extra_checkpoint = {"checkpoint_path":model_dir,
"scopes": dict({"Dummy/": "Dummy/"}),
"is_trainable": False
}
......@@ -60,35 +60,35 @@ class Siamese(estimator.Estimator):
- tf.train.GradientDescentOptimizer
- tf.train.AdagradOptimizer
- ....
config:
loss_op:
Pointer to a function that computes the loss.
embedding_validation:
Run the validation using embeddings?? [default: False]
model_dir:
Model path
validation_batch_size:
Size of the batch for validation. This value is used when the
validation with embeddings is used. This is a hack.
params:
Extra params for the model function
Extra params for the model function
(please see https://www.tensorflow.org/extend/estimators for more info)
extra_checkpoint: dict()
In case you want to use other model to initialize some variables.
This argument should be in the following format
extra_checkpoint = {"checkpoint_path": <YOUR_CHECKPOINT>,
extra_checkpoint = {"checkpoint_path": <YOUR_CHECKPOINT>,
"scopes": dict({"<SOURCE_SCOPE>/": "<TARGET_SCOPE>/"}),
"is_trainable": <IF_THOSE_LOADED_VARIABLES_ARE_TRAINABLE>
}
"""
def __init__(self,
......@@ -99,18 +99,18 @@ class Siamese(estimator.Estimator):
model_dir="",
validation_batch_size=None,
params=None,
extra_checkpoint=None
extra_checkpoint=None
):
self.architecture = architecture
self.optimizer=optimizer
self.loss_op=loss_op
self.loss = None
self.extra_checkpoint = extra_checkpoint
self.extra_checkpoint = extra_checkpoint
if self.architecture is None:
raise ValueError("Please specify a function to build the architecture !!")
if self.optimizer is None:
raise ValueError("Please specify a optimizer (https://www.tensorflow.org/api_guides/python/train) !!")
......@@ -119,8 +119,8 @@ class Siamese(estimator.Estimator):
def _model_fn(features, labels, mode, params, config):
if mode == tf.estimator.ModeKeys.TRAIN:
if mode == tf.estimator.ModeKeys.TRAIN:
# Building one graph, by default everything is trainable
if self.extra_checkpoint is None:
is_trainable = True
......@@ -138,12 +138,12 @@ class Siamese(estimator.Estimator):
if self.extra_checkpoint is not None:
tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"],
self.extra_checkpoint["scopes"])
# Compute Loss (for both TRAIN and EVAL modes)
self.loss = self.loss_op(prelogits_left, prelogits_right, labels)
# Configure the Training Op (for TRAIN mode)
global_step = tf.contrib.framework.get_or_create_global_step()
global_step = tf.train.get_or_create_global_step()
train_op = self.optimizer.minimize(self.loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode=mode, loss=self.loss,
......@@ -162,9 +162,9 @@ class Siamese(estimator.Estimator):
predictions_op = predict_using_tensors(predictions["embeddings"], labels, num=validation_batch_size)
eval_metric_ops = {"accuracy": tf.metrics.accuracy(labels=labels, predictions=predictions_op)}
return tf.estimator.EstimatorSpec(mode=mode, loss=tf.reduce_mean(1), eval_metric_ops=eval_metric_ops)
super(Siamese, self).__init__(model_fn=_model_fn,
model_dir=model_dir,
......
......@@ -51,18 +51,18 @@ class Triplet(estimator.Estimator):
- tf.train.GradientDescentOptimizer
- tf.train.AdagradOptimizer
- ....
config:
n_classes:
Number of classes of your problem. The logits will be appended in this class
loss_op:
Pointer to a function that computes the loss.
embedding_validation:
Run the validation using embeddings?? [default: False]
model_dir:
Model path
......@@ -97,7 +97,7 @@ class Triplet(estimator.Estimator):
if self.architecture is None:
raise ValueError("Please specify a function to build the architecture !!")
if self.optimizer is None:
raise ValueError("Please specify a optimizer (https://www.tensorflow.org/api_guides/python/train) !!")
......@@ -132,7 +132,7 @@ class Triplet(estimator.Estimator):
# Compute Loss (for both TRAIN and EVAL modes)
self.loss = self.loss_op(prelogits_anchor, prelogits_positive, prelogits_negative)
# Configure the Training Op (for TRAIN mode)
global_step = tf.contrib.framework.get_or_create_global_step()
global_step = tf.train.get_or_create_global_step()
train_op = self.optimizer.minimize(self.loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode=mode, loss=self.loss,
train_op=train_op)
......@@ -150,7 +150,7 @@ class Triplet(estimator.Estimator):
predictions_op = predict_using_tensors(predictions["embeddings"], labels, num=validation_batch_size)
eval_metric_ops = {"accuracy": tf.metrics.accuracy(labels=labels, predictions=predictions_op)}
return tf.estimator.EstimatorSpec(mode=mode, loss=tf.reduce_mean(1), eval_metric_ops=eval_metric_ops)
super(Triplet, self).__init__(model_fn=_model_fn,
......
......@@ -95,22 +95,22 @@ class SiameseTrainer(Trainer):
self.graph = None
self.validation_graph = None
self.loss = None
self.validation_predictor = None
self.validation_predictor = None
self.optimizer_class = None
self.learning_rate = None
# Training variables used in the fit
self.optimizer = None
self.data_ph = None
self.label_ph = None
self.validation_data_ph = None
self.validation_label_ph = None
self.saver = None
bob.core.log.set_verbosity_level(logger, verbosity_level)
......@@ -140,7 +140,7 @@ class SiameseTrainer(Trainer):
self.optimizer_class = optimizer
self.learning_rate = learning_rate
self.global_step = tf.contrib.framework.get_or_create_global_step()
self.global_step = tf.train.get_or_create_global_step()
# Saving all the variables
self.saver = tf.train.Saver(var_list=tf.global_variables())
......@@ -215,7 +215,7 @@ class SiameseTrainer(Trainer):
def fit(self, step):
feed_dict = self.get_feed_dict(self.train_data_shuffler)
_, l, bt_class, wt_class, lr, summary = self.session.run([
self.optimizer,
self.loss['loss'], self.loss['between_class'],
......
This diff is collapsed.
......@@ -96,23 +96,23 @@ class TripletTrainer(Trainer):
self.graph = None
self.validation_graph = None
self.loss = None
self.validation_predictor = None
self.validation_predictor = None
self.optimizer_class = None
self.learning_rate = None
# Training variables used in the fit
self.optimizer = None
self.data_ph = None
self.label_ph = None
self.validation_data_ph = None
self.validation_label_ph = None
self.saver = None
bob.core.log.set_verbosity_level(logger, verbosity_level)
......@@ -141,7 +141,7 @@ class TripletTrainer(Trainer):
self.optimizer_class = optimizer
self.learning_rate = learning_rate
self.global_step = tf.contrib.framework.get_or_create_global_step()
self.global_step = tf.train.get_or_create_global_step()
# Saving all the variables
self.saver = tf.train.Saver(var_list=tf.global_variables())
......
......@@ -18,7 +18,7 @@ def exponential_decay(base_learning_rate=0.05,
staircase: Boolean. It True decay the learning rate at discrete intervals
"""
global_step = tf.contrib.framework.get_or_create_global_step()
global_step = tf.train.get_or_create_global_step()
return tf.train.exponential_decay(learning_rate=base_learning_rate,
global_step=global_step,
decay_steps=decay_steps,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment