Skip to content
Snippets Groups Projects
Commit 1de62edf authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

Changes to simplecnn and jointcnn archs

parent 05a066c5
Branches
Tags
1 merge request!57Updates to the logits estimator
Pipeline #
......@@ -4,13 +4,7 @@ import numpy as np
import tensorflow as tf
def model_fn(features, labels, mode, params, config):
"""The model function for join face and patch PAD. The input to the model
is 160x160 faces."""
faces = features['data']
key = features['key']
def architecture(faces, mode, **kwargs):
# construct patches inside the model
ksizes = strides = [1, 28, 28, 1]
rates = [1, 1, 1, 1]
......@@ -19,18 +13,12 @@ def model_fn(features, labels, mode, params, config):
# n_blocks should be 25 for 160x160 faces
patches = tf.reshape(patches, [-1, n_blocks, 28, 28, 3])
# organize the parameters
params = params or {}
learning_rate = params.get('learning_rate', 1e-4)
apply_moving_averages = params.get('apply_moving_averages', True)
n_classes = params.get('n_classes', 2)
add_histograms = params.get('add_histograms')
simplecnn_kwargs = {
'kernerl_size': (3, 3),
'data_format': 'channels_last',
'add_batch_norm': True,
}
endpoints = {}
# construct simplecnn from patches
for i in range(n_blocks):
if i == 0:
......@@ -38,19 +26,43 @@ def model_fn(features, labels, mode, params, config):
else:
reuse = True
with tf.variable_scope('SimpleCNN', reuse=reuse):
net, _ = simplecnn_arch(patches[:, i], mode, **simplecnn_kwargs)
net, temp = simplecnn_arch(patches[:, i], mode, **simplecnn_kwargs)
if i == 0:
simplecnn_embeddings = net
endpoints.update(temp)
else:
simplecnn_embeddings += net
# average the embeddings of patches
simplecnn_embeddings /= n_blocks
# construct inception_resnet_v2 from faces
incresv2_embeddings, _ = inception_resnet_v2_batch_norm(faces, mode=mode)
incresv2_embeddings, temp = inception_resnet_v2_batch_norm(
faces, mode=mode)
endpoints.update(temp)
embeddings = tf.concat([simplecnn_embeddings, incresv2_embeddings], 1)
endpoints['final_embeddings'] = embeddings
return embeddings, endpoints
def model_fn(features, labels, mode, params, config):
"""The model function for join face and patch PAD. The input to the model
is 160x160 faces."""
faces = features['data']
key = features['key']
# organize the parameters
params = params or {}
learning_rate = params.get('learning_rate', 1e-4)
apply_moving_averages = params.get('apply_moving_averages', True)
n_classes = params.get('n_classes', 2)
add_histograms = params.get('add_histograms')
embeddings, _ = architecture(faces, mode)
# Logits layer
logits = tf.layers.dense(inputs=embeddings, units=n_classes, name='logits')
......
......@@ -184,10 +184,11 @@ def model_fn(features, labels, mode, params=None, config=None):
params = params or {}
learning_rate = params.get('learning_rate', 1e-5)
apply_moving_averages = params.get('apply_moving_averages', False)
extra_checkpoint = params.get('extra_checkpoint', None)
extra_checkpoint = params.get('extra_checkpoint')
trainable_variables = get_trainable_variables(extra_checkpoint)
loss_weights = params.get('loss_weights', 1.0)
add_histograms = params.get('add_histograms', None)
add_histograms = params.get('add_histograms')
nnet_optimizer = params.get('nnet_optimizer') or 'sgd'
arch_kwargs = {
'kernerl_size': params.get('kernerl_size', None),
......@@ -260,8 +261,12 @@ def model_fn(features, labels, mode, params=None, config=None):
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate)
if nnet_optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate)
else:
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate)
train_op = tf.group(
optimizer.minimize(loss, global_step=global_step),
variable_averages_op, loss_averages_op)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment