Commit d9b4b431 authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

Merge branch 'cleanup' into 'master'

Code clean-up

Closes #55

See merge request !51
parents dc2a07b0 69fba930
Pipeline #20941 failed with stages
in 29 minutes and 30 seconds
......@@ -97,8 +97,10 @@ def append_image_augmentation(image,
image = tf.image.random_saturation(image, lower=0, upper=0.5)
if random_rotate:
image = tf.contrib.image.rotate(image, angles=numpy.random.randint(-5,5),
interpolation="BILINEAR")
image = tf.contrib.image.rotate(
image,
angles=numpy.random.randint(-5, 5),
interpolation="BILINEAR")
if gray_scale:
image = tf.image.rgb_to_grayscale(image, name="rgb_to_gray")
......
......@@ -80,9 +80,10 @@ class BioGenerator(object):
self._output_shapes = (data.shape, tf.TensorShape([]),
tf.TensorShape([]))
logger.info("Initializing a dataset with %d files and %s types "
"and %s shapes", len(self.biofiles), self.output_types,
self.output_shapes)
logger.info(
"Initializing a dataset with %d files and %s types "
"and %s shapes", len(self.biofiles), self.output_types,
self.output_shapes)
@property
def labels(self):
......
......@@ -136,8 +136,10 @@ class Logits(estimator.Estimator):
# Compute the moving average of all individual losses and the total loss.
if apply_moving_averages:
variable_averages = tf.train.ExponentialMovingAverage(0.9999, global_step)
variable_averages_op = variable_averages.apply(tf.trainable_variables())
variable_averages = tf.train.ExponentialMovingAverage(
0.9999, global_step)
variable_averages_op = variable_averages.apply(
tf.trainable_variables())
else:
variable_averages_op = tf.no_op(name='noop')
......@@ -147,15 +149,20 @@ class Logits(estimator.Estimator):
self.loss = self.loss_op(logits=logits, labels=labels)
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(tf.get_collection(tf.GraphKeys.LOSSES))
loss_averages = tf.train.ExponentialMovingAverage(
0.9, name='avg')
loss_averages_op = loss_averages.apply(
tf.get_collection(tf.GraphKeys.LOSSES))
for l in tf.get_collection(tf.GraphKeys.LOSSES):
tf.summary.scalar(l.op.name+"_averaged", loss_averages.average(l))
tf.summary.scalar(l.op.name + "_averaged",
loss_averages.average(l))
global_step = tf.train.get_or_create_global_step()
train_op = tf.group(self.optimizer.minimize(self.loss, global_step=global_step),
variable_averages_op, loss_averages_op)
train_op = tf.group(
self.optimizer.minimize(
self.loss, global_step=global_step),
variable_averages_op, loss_averages_op)
return tf.estimator.EstimatorSpec(
mode=mode, loss=self.loss, train_op=train_op)
......@@ -285,21 +292,19 @@ class LogitsCenterLoss(estimator.Estimator):
"""
def __init__(
self,
architecture=None,
optimizer=None,
config=None,
n_classes=0,
embedding_validation=False,
model_dir="",
alpha=0.9,
factor=0.01,
validation_batch_size=None,
params=None,
extra_checkpoint=None,
apply_moving_averages=True
):
def __init__(self,
architecture=None,
optimizer=None,
config=None,
n_classes=0,
embedding_validation=False,
model_dir="",
alpha=0.9,
factor=0.01,
validation_batch_size=None,
params=None,
extra_checkpoint=None,
apply_moving_averages=True):
self.architecture = architecture
self.optimizer = optimizer
......@@ -344,8 +349,10 @@ class LogitsCenterLoss(estimator.Estimator):
# Compute the moving average of all individual losses and the total loss.
if apply_moving_averages:
variable_averages = tf.train.ExponentialMovingAverage(0.9999, global_step)
variable_averages_op = variable_averages.apply(tf.trainable_variables())
variable_averages = tf.train.ExponentialMovingAverage(
0.9999, global_step)
variable_averages_op = variable_averages.apply(
tf.trainable_variables())
else:
variable_averages_op = tf.no_op(name='noop')
......@@ -363,8 +370,10 @@ class LogitsCenterLoss(estimator.Estimator):
centers = loss_dict['centers']
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(tf.get_collection(tf.GraphKeys.LOSSES))
loss_averages = tf.train.ExponentialMovingAverage(
0.9, name='avg')
loss_averages_op = loss_averages.apply(
tf.get_collection(tf.GraphKeys.LOSSES))
for l in tf.get_collection(tf.GraphKeys.LOSSES):
tf.summary.scalar(l.op.name, loss_averages.average(l))
......@@ -376,7 +385,8 @@ class LogitsCenterLoss(estimator.Estimator):
train_op = tf.group(
self.optimizer.minimize(
self.loss, global_step=global_step), centers, variable_averages_op, loss_averages_op)
self.loss, global_step=global_step), centers,
variable_averages_op, loss_averages_op)
return tf.estimator.EstimatorSpec(
mode=mode, loss=self.loss, train_op=train_op)
......
......@@ -179,4 +179,3 @@ class Siamese(estimator.Estimator):
model_dir=model_dir,
params=params,
config=config)
......@@ -172,4 +172,3 @@ class Triplet(estimator.Estimator):
super(Triplet, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
......@@ -71,4 +71,3 @@ class MaxOut(base.Layer):
outputs.set_shape(shape)
return outputs
......@@ -33,8 +33,9 @@ def mean_cross_entropy_loss(logits, labels, add_regularization_losses=True):
if add_regularization_losses:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n([cross_loss] + regularization_losses, name="total_loss")
total_loss = tf.add_n(
[cross_loss] + regularization_losses, name="total_loss")
return total_loss
else:
return cross_loss
......
......@@ -68,15 +68,13 @@ def contrastive_loss(left_embedding,
loss = tf.reduce_mean(loss, name="total_loss_raw")
tf.summary.scalar('loss_raw', loss)
tf.add_to_collection(tf.GraphKeys.LOSSES, loss)
## Appending the regularization loss
#regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
#loss = tf.add_n([loss] + regularization_losses, name="total_loss")
tf.summary.scalar('loss', loss)
tf.summary.scalar('between_class', between_class_loss)
tf.summary.scalar('within_class', within_class_loss)
return loss
......@@ -57,19 +57,19 @@ def triplet_loss(anchor_embedding,
with tf.name_scope("TripletLoss"):
# Between
between_class_loss = tf.reduce_mean(d_negative)
tf.summary.scalar('between_class', between_class_loss)
tf.summary.scalar('between_class', between_class_loss)
tf.add_to_collection(tf.GraphKeys.LOSSES, between_class_loss)
# Within
within_class_loss = tf.reduce_mean(d_positive)
tf.summary.scalar('within_class', within_class_loss)
tf.summary.scalar('within_class', within_class_loss)
tf.add_to_collection(tf.GraphKeys.LOSSES, within_class_loss)
# Total loss
loss = tf.reduce_mean(
tf.maximum(basic_loss, 0.0), 0, name="total_loss")
tf.add_to_collection(tf.GraphKeys.LOSSES, loss)
tf.summary.scalar('loss_raw', loss)
tf.summary.scalar('loss_raw', loss)
# Appending the regularization loss
#regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
......
......@@ -107,4 +107,3 @@ def chopra(
end_points['fc1'] = graph
return graph, end_points
......@@ -62,4 +62,3 @@ def dummy(inputs,
end_points[name] = graph
return graph, end_points
......@@ -26,6 +26,7 @@ import tensorflow as tf
import tensorflow.contrib.slim as slim
from .utils import is_trainable
# Inception-Renset-A
def block35(net,
scale=1.0,
......@@ -254,15 +255,16 @@ def reduction_b(net, reuse=None, trainable_variables=True):
3)
return net
def inception_resnet_v1_batch_norm(inputs,
dropout_keep_prob=0.8,
bottleneck_layer_size=128,
reuse=None,
scope='InceptionResnetV1',
mode=tf.estimator.ModeKeys.TRAIN,
trainable_variables=None,
weight_decay=1e-5,
**kwargs):
dropout_keep_prob=0.8,
bottleneck_layer_size=128,
reuse=None,
scope='InceptionResnetV1',
mode=tf.estimator.ModeKeys.TRAIN,
trainable_variables=None,
weight_decay=1e-5,
**kwargs):
"""
Creates the Inception Resnet V1 model applying batch not to each
Convolutional and FullyConnected layer.
......@@ -292,7 +294,6 @@ def inception_resnet_v1_batch_norm(inputs,
end_points: the set of end_points from the inception model.
"""
batch_norm_params = {
# Decay for the moving averages.
......@@ -304,20 +305,22 @@ def inception_resnet_v1_batch_norm(inputs,
# Moving averages ends up in the trainable variables collection
'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
}
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v1(inputs,
dropout_keep_prob=0.8,
bottleneck_layer_size=128,
reuse=None,
scope='InceptionResnetV1',
mode=mode,
trainable_variables=None,)
return inception_resnet_v1(
inputs,
dropout_keep_prob=0.8,
bottleneck_layer_size=128,
reuse=None,
scope='InceptionResnetV1',
mode=mode,
trainable_variables=None,
)
def inception_resnet_v1(inputs,
......@@ -327,7 +330,7 @@ def inception_resnet_v1(inputs,
scope='InceptionResnetV1',
mode=tf.estimator.ModeKeys.TRAIN,
trainable_variables=None,
**kwargs):
**kwargs):
"""
Creates the Inception Resnet V1 model.
......@@ -363,7 +366,6 @@ def inception_resnet_v1(inputs,
[slim.batch_norm, slim.dropout],
is_training=(mode == tf.estimator.ModeKeys.TRAIN)):
with slim.arg_scope(
[slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1,
......@@ -382,7 +384,7 @@ def inception_resnet_v1(inputs,
trainable=trainable,
reuse=reuse)
end_points[name] = net
# 147 x 147 x 32
name = "Conv2d_2a_3x3"
trainable = is_trainable(name, trainable_variables)
......@@ -395,23 +397,18 @@ def inception_resnet_v1(inputs,
trainable=trainable,
reuse=reuse)
end_points[name] = net
# 147 x 147 x 64
name = "Conv2d_2b_3x3"
trainable = is_trainable(name, trainable_variables)
net = slim.conv2d(
net,
64,
3,
scope=name,
trainable=trainable,
reuse=reuse)
net, 64, 3, scope=name, trainable=trainable, reuse=reuse)
end_points[name] = net
# 73 x 73 x 64
net = slim.max_pool2d(
net, 3, stride=2, padding='VALID', scope='MaxPool_3a_3x3')
end_points['MaxPool_3a_3x3'] = net
# 73 x 73 x 80
name = "Conv2d_3b_1x1"
trainable = is_trainable(name, trainable_variables)
......@@ -437,7 +434,7 @@ def inception_resnet_v1(inputs,
trainable=trainable,
reuse=reuse)
end_points[name] = net
# 35 x 35 x 256
name = "Conv2d_4b_3x3"
trainable = is_trainable(name, trainable_variables)
......@@ -495,9 +492,7 @@ def inception_resnet_v1(inputs,
trainable = is_trainable(name, trainable_variables)
with tf.variable_scope(name):
net = reduction_b(
net,
trainable_variables=trainable,
reuse=reuse)
net, trainable_variables=trainable, reuse=reuse)
end_points[name] = net
# 5 x Inception-Resnet-C
......
......@@ -38,8 +38,7 @@ def model_fn(features, labels, mode, params, config):
else:
reuse = True
with tf.variable_scope('SimpleCNN', reuse=reuse):
net, _ = simplecnn_arch(
patches[:, i], mode, **simplecnn_kwargs)
net, _ = simplecnn_arch(patches[:, i], mode, **simplecnn_kwargs)
if i == 0:
simplecnn_embeddings = net
else:
......
......@@ -44,8 +44,16 @@ from __future__ import print_function
import tensorflow as tf
def create_conv_layer(inputs, mode, data_format, endpoints, number, filters,
kernel_size, pool_size, pool_strides, skip_pool=False):
def create_conv_layer(inputs,
mode,
data_format,
endpoints,
number,
filters,
kernel_size,
pool_size,
pool_strides,
skip_pool=False):
bn_axis = 1 if data_format.lower() == 'channels_first' else 3
training = mode == tf.estimator.ModeKeys.TRAIN
......@@ -74,8 +82,12 @@ def create_conv_layer(inputs, mode, data_format, endpoints, number, filters,
pool = bn_act
else:
pool = tf.layers.max_pooling2d(
inputs=bn_act, pool_size=pool_size, strides=pool_strides,
padding='same', data_format=data_format, name=name)
inputs=bn_act,
pool_size=pool_size,
strides=pool_strides,
padding='same',
data_format=data_format,
name=name)
endpoints[name] = pool
return pool
......@@ -101,8 +113,11 @@ def create_dense_layer(inputs, mode, endpoints, number, units):
return bn_act
def base_architecture(input_layer, mode, data_format,
skip_first_two_pool=False, **kwargs):
def base_architecture(input_layer,
mode,
data_format,
skip_first_two_pool=False,
**kwargs):
training = mode == tf.estimator.ModeKeys.TRAIN
# Keep track of all the endpoints
endpoints = {}
......@@ -110,37 +125,69 @@ def base_architecture(input_layer, mode, data_format,
# ======================
# Convolutional Layer #1
pool1 = create_conv_layer(
inputs=input_layer, mode=mode, data_format=data_format,
endpoints=endpoints, number=1, filters=50, kernel_size=(5, 5),
pool_size=(2, 2), pool_strides=2, skip_pool=skip_first_two_pool)
inputs=input_layer,
mode=mode,
data_format=data_format,
endpoints=endpoints,
number=1,
filters=50,
kernel_size=(5, 5),
pool_size=(2, 2),
pool_strides=2,
skip_pool=skip_first_two_pool)
# ======================
# Convolutional Layer #2
pool2 = create_conv_layer(
inputs=pool1, mode=mode, data_format=data_format,
endpoints=endpoints, number=2, filters=100, kernel_size=(3, 3),
pool_size=(2, 2), pool_strides=2, skip_pool=skip_first_two_pool)
inputs=pool1,
mode=mode,
data_format=data_format,
endpoints=endpoints,
number=2,
filters=100,
kernel_size=(3, 3),
pool_size=(2, 2),
pool_strides=2,
skip_pool=skip_first_two_pool)
# ======================
# Convolutional Layer #3
pool3 = create_conv_layer(
inputs=pool2, mode=mode, data_format=data_format,
endpoints=endpoints, number=3, filters=150, kernel_size=(3, 3),
pool_size=(3, 3), pool_strides=2)
inputs=pool2,
mode=mode,
data_format=data_format,
endpoints=endpoints,
number=3,
filters=150,
kernel_size=(3, 3),
pool_size=(3, 3),
pool_strides=2)
# ======================
# Convolutional Layer #4
pool4 = create_conv_layer(
inputs=pool3, mode=mode, data_format=data_format,
endpoints=endpoints, number=4, filters=200, kernel_size=(3, 3),
pool_size=(2, 2), pool_strides=2)
inputs=pool3,
mode=mode,
data_format=data_format,
endpoints=endpoints,
number=4,
filters=200,
kernel_size=(3, 3),
pool_size=(2, 2),
pool_strides=2)
# ======================
# Convolutional Layer #5
pool5 = create_conv_layer(
inputs=pool4, mode=mode, data_format=data_format,
endpoints=endpoints, number=5, filters=250, kernel_size=(3, 3),
pool_size=(2, 2), pool_strides=2)
inputs=pool4,
mode=mode,
data_format=data_format,
endpoints=endpoints,
number=5,
filters=250,
kernel_size=(3, 3),
pool_size=(2, 2),
pool_strides=2)
# ========================
# Flatten tensor into a batch of vectors
......@@ -151,7 +198,10 @@ def base_architecture(input_layer, mode, data_format,
# ========================
# Fully Connected Layer #1
fc1 = create_dense_layer(
inputs=pool5_flat, mode=mode, endpoints=endpoints, number=1,
inputs=pool5_flat,
mode=mode,
endpoints=endpoints,
number=1,
units=1000)
# ========================
......@@ -164,8 +214,7 @@ def base_architecture(input_layer, mode, data_format,
# ========================
# Fully Connected Layer #2
fc2 = create_dense_layer(
inputs=dropout, mode=mode, endpoints=endpoints, number=2,
units=400)
inputs=dropout, mode=mode, endpoints=endpoints, number=2, units=400)
return fc2, endpoints
......@@ -179,11 +228,12 @@ def architecture(input_layer,
regularizer=None,
**kwargs):
with tf.variable_scope('PatchCNN', reuse=reuse,
regularizer=regularizer):
with tf.variable_scope('PatchCNN', reuse=reuse, regularizer=regularizer):
fc2, endpoints = base_architecture(
input_layer=input_layer, mode=mode, data_format=data_format,
input_layer=input_layer,
mode=mode,
data_format=data_format,
skip_first_two_pool=skip_first_two_pool)
# Logits layer
logits = tf.layers.dense(inputs=fc2, units=n_classes)
......@@ -252,14 +302,12 @@ def model_fn(features, labels, mode, params=None, config=None):
staircase=staircase)
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=momentum)
learning_rate=learning_rate, momentum=momentum)
# for batch normalization to be updated as well:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(
loss=loss, global_step=global_step)
train_op = optimizer.minimize(loss=loss, global_step=global_step)
# Log accuracy and loss
with tf.name_scope('train_metrics'):
......
......@@ -4,9 +4,17 @@ from .utils import is_trainable
from ..estimators import get_trainable_variables
def create_conv_layer(inputs, mode, data_format, endpoints, number, filters,
kernel_size, pool_size, pool_strides,
add_batch_norm=False, trainable_variables=None):
def create_conv_layer(inputs,
mode,
data_format,
endpoints,
number,
filters,
kernel_size,
pool_size,
pool_strides,
add_batch_norm=False,
trainable_variables=None):
bn_axis = 1 if data_format.lower() == 'channels_first' else 3
training = mode == tf.estimator.ModeKeys.TRAIN
......@@ -42,15 +50,22 @@ def create_conv_layer(inputs, mode, data_format, endpoints, number, filters,
name = 'pool{}'.format(number)
pool = tf.layers.max_pooling2d(
inputs=bn_act, pool_size=pool_size, strides=pool_strides,
padding='same', data_format=data_format)
inputs=bn_act,
pool_size=pool_size,
strides=pool_strides,
padding='same',
data_format=data_format)
endpoints[name] = pool