Skip to content
Snippets Groups Projects
Commit b066530f authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

code clean-up

parent 55f2356e
No related branches found
No related tags found
1 merge request!33Changes to the biogenerator
......@@ -73,7 +73,8 @@ def append_image_augmentation(image, gray_scale=False,
if output_shape is not None:
assert len(output_shape) == 2
image = tf.image.resize_image_with_crop_or_pad(image, output_shape[0], output_shape[1])
image = tf.image.resize_image_with_crop_or_pad(
image, output_shape[0], output_shape[1])
if random_flip:
image = tf.image.random_flip_left_right(image)
......@@ -136,15 +137,18 @@ def triplets_random_generator(input_data, input_labels):
input_labels = numpy.array(input_labels)
total_samples = input_data.shape[0]
indexes_per_labels = arrange_indexes_by_label(input_labels, possible_labels)
indexes_per_labels = arrange_indexes_by_label(
input_labels, possible_labels)
# searching for random triplets
offset_class = 0
for i in range(total_samples):
anchor_sample = input_data[indexes_per_labels[possible_labels[offset_class]][numpy.random.randint(len(indexes_per_labels[possible_labels[offset_class]]))], ...]
anchor_sample = input_data[indexes_per_labels[possible_labels[offset_class]][numpy.random.randint(
len(indexes_per_labels[possible_labels[offset_class]]))], ...]
positive_sample = input_data[indexes_per_labels[possible_labels[offset_class]][numpy.random.randint(len(indexes_per_labels[possible_labels[offset_class]]))], ...]
positive_sample = input_data[indexes_per_labels[possible_labels[offset_class]][numpy.random.randint(
len(indexes_per_labels[possible_labels[offset_class]]))], ...]
# Changing the class
offset_class += 1
......@@ -152,10 +156,11 @@ def triplets_random_generator(input_data, input_labels):
if offset_class == len(possible_labels):
offset_class = 0
negative_sample = input_data[indexes_per_labels[possible_labels[offset_class]][numpy.random.randint(len(indexes_per_labels[possible_labels[offset_class]]))], ...]
negative_sample = input_data[indexes_per_labels[possible_labels[offset_class]][numpy.random.randint(
len(indexes_per_labels[possible_labels[offset_class]]))], ...]
append(str(anchor_sample), str(positive_sample), str(negative_sample))
#yield anchor, positive, negative
# yield anchor, positive, negative
return anchor, positive, negative
......@@ -191,13 +196,16 @@ def siamease_pairs_generator(input_data, input_labels):
# Filtering the samples by label and shuffling all the indexes
#indexes_per_labels = dict()
#for l in possible_labels:
# for l in possible_labels:
# indexes_per_labels[l] = numpy.where(input_labels == l)[0]
# numpy.random.shuffle(indexes_per_labels[l])
indexes_per_labels = arrange_indexes_by_label(input_labels, possible_labels)
indexes_per_labels = arrange_indexes_by_label(
input_labels, possible_labels)
left_possible_indexes = numpy.random.choice(possible_labels, total_samples, replace=True)
right_possible_indexes = numpy.random.choice(possible_labels, total_samples, replace=True)
left_possible_indexes = numpy.random.choice(
possible_labels, total_samples, replace=True)
right_possible_indexes = numpy.random.choice(
possible_labels, total_samples, replace=True)
genuine = True
for i in range(total_samples):
......@@ -207,10 +215,12 @@ def siamease_pairs_generator(input_data, input_labels):
class_index = left_possible_indexes[i]
# Now selecting the samples for the pair
left = input_data[indexes_per_labels[class_index][numpy.random.randint(len(indexes_per_labels[class_index]))]]
right = input_data[indexes_per_labels[class_index][numpy.random.randint(len(indexes_per_labels[class_index]))]]
left = input_data[indexes_per_labels[class_index][numpy.random.randint(
len(indexes_per_labels[class_index]))]]
right = input_data[indexes_per_labels[class_index][numpy.random.randint(
len(indexes_per_labels[class_index]))]]
append(left, right, 0)
#yield left, right, 0
# yield left, right, 0
else:
# Selecting the 2 classes
class_index = list()
......@@ -219,7 +229,7 @@ def siamease_pairs_generator(input_data, input_labels):
# Finding the right pair
j = i
# TODO: Lame solution. Fix this
while j < total_samples: # Here is an unidiretinal search for the negative pair
while j < total_samples: # Here is an unidiretinal search for the negative pair
if left_possible_indexes[i] != right_possible_indexes[j]:
class_index.append(right_possible_indexes[j])
break
......@@ -227,11 +237,12 @@ def siamease_pairs_generator(input_data, input_labels):
if j < total_samples:
# Now selecting the samples for the pair
left = input_data[indexes_per_labels[class_index[0]][numpy.random.randint(len(indexes_per_labels[class_index[0]]))]]
right = input_data[indexes_per_labels[class_index[1]][numpy.random.randint(len(indexes_per_labels[class_index[1]]))]]
left = input_data[indexes_per_labels[class_index[0]][numpy.random.randint(
len(indexes_per_labels[class_index[0]]))]]
right = input_data[indexes_per_labels[class_index[1]][numpy.random.randint(
len(indexes_per_labels[class_index[1]]))]]
append(left, right, 1)
genuine = not genuine
return left_data, right_data, labels
......@@ -296,3 +307,30 @@ def tf_repeat(tensor, repeats):
tiled_tensor = tf.tile(expanded_tensor, multiples=multiples)
repeated_tesnor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats)
return repeated_tesnor
def all_patches(image, label, key, size):
"""Extracts all patches of an image
Parameters
----------
image
The image should be channels_last format and already batched.
label
The label for the image
key
The key for the image
size : (int, int)
The height and width of the blocks.
Returns
-------
(blocks, label, key)
The non-overlapping blocks of size from image and labels and keys are
repeated.
"""
blocks, n_blocks = blocks_tensorflow(image, size)
# duplicate label and key as n_blocks
label = tf_repeat(label, [n_blocks])
key = tf_repeat(key, [n_blocks])
return blocks, label, key
......@@ -3,14 +3,6 @@
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
import tensorflow as tf
import threading
import os
import bob.io.base
import bob.core
from tensorflow.core.framework import summary_pb2
import time
#logger = bob.core.log.setup("bob.learn.tensorflow")
from bob.learn.tensorflow.network.utils import append_logits
from tensorflow.python.estimator import estimator
from bob.learn.tensorflow.utils import predict_using_tensors
......@@ -28,102 +20,88 @@ class Logits(estimator.Estimator):
The **architecture** function should follow the following pattern:
def my_beautiful_function(placeholder):
def my_beautiful_architecture(placeholder, **kwargs):
end_points = dict()
graph = convXX(placeholder)
end_points['conv'] = graph
....
return graph, end_points
end_points = dict()
graph = convXX(placeholder)
end_points['conv'] = graph
....
return graph, end_points
The **loss** function should follow the following pattern:
def my_beautiful_loss(logits, labels):
def my_beautiful_loss(logits, labels, **kwargs):
return loss_set_of_ops(logits, labels)
Variables, scopes... from other models can be loaded by the model_fn.
For that, please, wrap the the path of the OTHER checkpoint and the list
of variables in a dictionary with the key "load_variable_from_checkpoint" an provide them to the keyword `params`:
{"load_variable_from_checkpoint": {"checkpoint_path":"mypath",
"scopes":{"my_scope/": my_scope/}}}
Parameters
----------
**Parameters**
architecture:
Pointer to a function that builds the graph.
optimizer:
One of the tensorflow solvers (https://www.tensorflow.org/api_guides/python/train)
One of the tensorflow solvers
(https://www.tensorflow.org/api_guides/python/train)
- tf.train.GradientDescentOptimizer
- tf.train.AdagradOptimizer
- ....
config:
n_classes:
Number of classes of your problem. The logits will be appended in this class
Number of classes of your problem. The logits will be appended in this
class
loss_op:
Pointer to a function that computes the loss.
embedding_validation:
Run the validation using embeddings?? [default: False]
model_dir:
Model path
validation_batch_size:
Size of the batch for validation. This value is used when the
validation with embeddings is used. This is a hack.
params:
Extra params for the model function (please see https://www.tensorflow.org/extend/estimators for more info)
Extra params for the model function (please see
https://www.tensorflow.org/extend/estimators for more info)
extra_checkpoint: dict()
extra_checkpoint: dict
In case you want to use other model to initialize some variables.
This argument should be in the following format
extra_checkpoint = {"checkpoint_path": <YOUR_CHECKPOINT>,
"scopes": dict({"<SOURCE_SCOPE>/": "<TARGET_SCOPE>/"}),
"is_trainable": <IF_THOSE_LOADED_VARIABLES_ARE_TRAINABLE>
}
extra_checkpoint = {
"checkpoint_path": <YOUR_CHECKPOINT>,
"scopes": dict({"<SOURCE_SCOPE>/": "<TARGET_SCOPE>/"}),
"is_trainable": <IF_THOSE_LOADED_VARIABLES_ARE_TRAINABLE>
}
"""
def __init__(self,
architecture=None,
optimizer=None,
architecture,
optimizer,
loss_op,
n_classes,
config=None,
n_classes=0,
loss_op=None,
embedding_validation=False,
model_dir="",
validation_batch_size=None,
params=None,
extra_checkpoint=None
):
):
self.architecture = architecture
self.optimizer=optimizer
self.n_classes=n_classes
self.loss_op=loss_op
self.optimizer = optimizer
self.n_classes = n_classes
self.loss_op = loss_op
self.loss = None
self.embedding_validation = embedding_validation
self.extra_checkpoint = extra_checkpoint
if self.architecture is None:
raise ValueError("Please specify a function to build the architecture !!")
if self.optimizer is None:
raise ValueError("Please specify a optimizer (https://www.tensorflow.org/api_guides/python/train) !!")
if self.loss_op is None:
raise ValueError("Please specify a function to build the loss !!")
if self.n_classes <= 0:
raise ValueError("Number of classes must be greated than 0")
def _model_fn(features, labels, mode, params, config):
check_features(features)
......@@ -131,11 +109,11 @@ class Logits(estimator.Estimator):
key = features['key']
# Building one graph, by default everything is trainable
if self.extra_checkpoint is None:
if self.extra_checkpoint is None:
is_trainable = True
else:
is_trainable = is_trainable_checkpoint(self.extra_checkpoint)
prelogits = self.architecture(data, is_trainable=is_trainable)[0]
logits = append_logits(prelogits, n_classes)
......@@ -143,41 +121,50 @@ class Logits(estimator.Estimator):
# Compute the embeddings
embeddings = tf.nn.l2_normalize(prelogits, 1)
predictions = {
"embeddings": embeddings
"embeddings": embeddings,
"key": key,
}
else:
probabilities = tf.nn.softmax(logits, name="softmax_tensor")
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
# Add `softmax_tensor` to the graph. It is used for PREDICT
# and by the `logging_hook`.
"probabilities": probabilities,
"key": key,
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
return tf.estimator.EstimatorSpec(mode=mode,
predictions=predictions)
# Compute Loss (for both TRAIN and EVAL modes)
self.loss = self.loss_op(logits, labels)
self.loss = self.loss_op(logits=logits, labels=labels)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
if self.extra_checkpoint is not None:
tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"],
self.extra_checkpoint["scopes"])
tf.contrib.framework.init_from_checkpoint(
self.extra_checkpoint["checkpoint_path"],
self.extra_checkpoint["scopes"])
global_step = tf.contrib.framework.get_or_create_global_step()
train_op = self.optimizer.minimize(self.loss, global_step=global_step)
global_step = tf.train.get_or_create_global_step()
train_op = self.optimizer.minimize(
self.loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode=mode, loss=self.loss,
train_op=train_op)
# Validation
if self.embedding_validation:
predictions_op = predict_using_tensors(predictions["embeddings"], labels, num=validation_batch_size)
eval_metric_ops = {"accuracy": tf.metrics.accuracy(labels=labels, predictions=predictions_op)}
return tf.estimator.EstimatorSpec(mode=mode, loss=self.loss, eval_metric_ops=eval_metric_ops)
predictions_op = predict_using_tensors(
predictions["embeddings"], labels,
num=validation_batch_size)
eval_metric_ops = {"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions_op)}
return tf.estimator.EstimatorSpec(
mode=mode, loss=self.loss, eval_metric_ops=eval_metric_ops)
else:
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
......@@ -211,7 +198,8 @@ class LogitsCenterLoss(estimator.Estimator):
Pointer to a function that builds the graph.
optimizer:
One of the tensorflow solvers (https://www.tensorflow.org/api_guides/python/train)
One of the tensorflow solvers
(https://www.tensorflow.org/api_guides/python/train)
- tf.train.GradientDescentOptimizer
- tf.train.AdagradOptimizer
- ....
......@@ -219,7 +207,8 @@ class LogitsCenterLoss(estimator.Estimator):
config:
n_classes:
Number of classes of your problem. The logits will be appended in this class
Number of classes of your problem. The logits will be appended in this
class
loss_op:
Pointer to a function that computes the loss.
......@@ -233,10 +222,11 @@ class LogitsCenterLoss(estimator.Estimator):
validation_batch_size:
Size of the batch for validation. This value is used when the
validation with embeddings is used. This is a hack.
params:
Extra params for the model function (please see https://www.tensorflow.org/extend/estimators for more info)
Extra params for the model function (please see
https://www.tensorflow.org/extend/estimators for more info)
"""
def __init__(self,
......@@ -250,8 +240,8 @@ class LogitsCenterLoss(estimator.Estimator):
factor=0.01,
validation_batch_size=None,
params=None,
extra_checkpoint=None,
):
extra_checkpoint=None,
):
self.architecture = architecture
self.optimizer = optimizer
......@@ -263,10 +253,13 @@ class LogitsCenterLoss(estimator.Estimator):
self.extra_checkpoint = extra_checkpoint
if self.architecture is None:
raise ValueError("Please specify a function to build the architecture !!")
raise ValueError(
"Please specify a function to build the architecture !!")
if self.optimizer is None:
raise ValueError("Please specify a optimizer (https://www.tensorflow.org/api_guides/python/train) !!")
raise ValueError(
"Please specify a optimizer (https://www.tensorflow.org/"
"api_guides/python/train) !!")
if self.n_classes <= 0:
raise ValueError("Number of classes must be greated than 0")
......@@ -278,11 +271,11 @@ class LogitsCenterLoss(estimator.Estimator):
key = features['key']
# Building one graph, by default everything is trainable
if self.extra_checkpoint is None:
if self.extra_checkpoint is None:
is_trainable = True
else:
is_trainable = is_trainable_checkpoint(self.extra_checkpoint)
prelogits = self.architecture(data)[0]
logits = append_logits(prelogits, n_classes)
......@@ -294,7 +287,8 @@ class LogitsCenterLoss(estimator.Estimator):
# Compute the embeddings
embeddings = tf.nn.l2_normalize(prelogits, 1)
predictions = {
"embeddings": embeddings
"embeddings": embeddings,
"key": key,
}
else:
predictions = {
......@@ -302,13 +296,13 @@ class LogitsCenterLoss(estimator.Estimator):
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
"probabilities": tf.nn.softmax(logits, name="softmax_tensor"),
"key": key,
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
self.loss = loss_dict['loss']
centers = loss_dict['centers']
......@@ -327,10 +321,11 @@ class LogitsCenterLoss(estimator.Estimator):
return tf.estimator.EstimatorSpec(mode=mode, loss=self.loss,
train_op=train_op)
if self.embedding_validation:
predictions_op = predict_using_tensors(predictions["embeddings"], labels, num=validation_batch_size)
eval_metric_ops = {"accuracy": tf.metrics.accuracy(labels=labels, predictions=predictions_op)}
predictions_op = predict_using_tensors(
predictions["embeddings"], labels, num=validation_batch_size)
eval_metric_ops = {"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions_op)}
return tf.estimator.EstimatorSpec(mode=mode, loss=self.loss, eval_metric_ops=eval_metric_ops)
else:
......@@ -344,4 +339,3 @@ class LogitsCenterLoss(estimator.Estimator):
super(LogitsCenterLoss, self).__init__(model_fn=_model_fn,
model_dir=model_dir,
config=config)
......@@ -3,12 +3,14 @@
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
import tensorflow as tf
slim = tf.contrib.slim
import tensorflow.contrib.slim as slim
def append_logits(graph, n_classes, reuse=False, l2_regularizer=0.001, weights_std=0.1):
return slim.fully_connected(graph, n_classes, activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=weights_std),
weights_regularizer=slim.l2_regularizer(l2_regularizer),
scope='Logits', reuse=reuse)
def append_logits(graph, n_classes, reuse=False, l2_regularizer=0.001,
weights_std=0.1):
return slim.fully_connected(
graph, n_classes, activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(
stddev=weights_std),
weights_regularizer=slim.l2_regularizer(l2_regularizer),
scope='Logits', reuse=reuse)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment