Commit 74c00208 authored by Tiago Pereira's avatar Tiago Pereira
Browse files

[tensorflow-1.1.x] Fixed some issue with 1.1.x

parent b1738e9b
......@@ -81,7 +81,7 @@ class Chopra(object):
self.device = device
self.batch_norm = batch_norm
def __call__(self, inputs):
def __call__(self, inputs, reuse=False):
slim = tf.contrib.slim
with tf.device(self.device):
......@@ -91,13 +91,14 @@ class Chopra(object):
graph = slim.conv2d(inputs, self.conv1_output, self.conv1_kernel_size, activation_fn=tf.nn.relu,
stride=1,
weights_initializer=initializer,
scope='conv1')
scope='conv1',
reuse=reuse)
graph = slim.max_pool2d(graph, self.pooling1_size, scope='pool1')
graph = slim.conv2d(graph, self.conv2_output, self.conv2_kernel_size, activation_fn=tf.nn.relu,
stride=1,
weights_initializer=initializer,
scope='conv2')
scope='conv2', reuse=reuse)
graph = slim.max_pool2d(graph, self.pooling2_size, scope='pool2')
graph = slim.flatten(graph, scope='flatten1')
......@@ -105,5 +106,6 @@ class Chopra(object):
graph = slim.fully_connected(graph, self.fc1_output,
weights_initializer=initializer,
activation_fn=None,
scope='fc1')
scope='fc1',
reuse=reuse)
return graph
......@@ -22,6 +22,7 @@ from docopt import docopt
import imp
import bob.learn.tensorflow
def main():
args = docopt(__doc__, version='Train Neural Net')
......@@ -47,13 +48,13 @@ def main():
if isinstance(trainer, bob.learn.tensorflow.trainers.SiameseTrainer):
graph = dict()
graph['left'] = config.architecture(input_pl['left'])
graph['right'] = config.architecture(input_pl['right'])
graph['right'] = config.architecture(input_pl['right'], reuse=True)
elif isinstance(trainer, bob.learn.tensorflow.trainers.TripletTrainer):
graph = dict()
graph['anchor'] = config.architecture(input_pl['anchor'])
graph['positive'] = config.architecture(input_pl['positive'])
graph['negative'] = config.architecture(input_pl['negative'])
graph['positive'] = config.architecture(input_pl['positive'], reuse=True)
graph['negative'] = config.architecture(input_pl['negative'], reuse=True)
else:
graph = config.architecture(input_pl)
......
from bob.learn.tensorflow.datashuffler import SiameseMemory
from bob.learn.tensorflow.datashuffler import SiameseMemory, ScaleFactor
from bob.learn.tensorflow.network import Chopra
from bob.learn.tensorflow.trainers import SiameseTrainer as Trainer
from bob.learn.tensorflow.trainers import constant
......@@ -18,7 +18,8 @@ train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
train_data_shuffler = SiameseMemory(train_data, train_labels,
input_shape=INPUT_SHAPE,
batch_size=BATCH_SIZE)
batch_size=BATCH_SIZE,
normalizer=ScaleFactor())
### ARCHITECTURE ###
architecture = Chopra(seed=SEED, fc1_output=10, batch_norm=False)
......
......@@ -149,7 +149,7 @@ def test_siamesecnn_trainer():
input_pl = train_data_shuffler("data")
graph = dict()
graph['left'] = architecture(input_pl['left'])
graph['right'] = architecture(input_pl['right'])
graph['right'] = architecture(input_pl['right'], reuse=True)
trainer = SiameseTrainer(train_data_shuffler,
iterations=iterations,
......@@ -196,8 +196,8 @@ def test_tripletcnn_trainer():
input_pl = train_data_shuffler("data")
graph = dict()
graph['anchor'] = architecture(input_pl['anchor'])
graph['positive'] = architecture(input_pl['positive'])
graph['negative'] = architecture(input_pl['negative'])
graph['positive'] = architecture(input_pl['positive'], reuse=True)
graph['negative'] = architecture(input_pl['negative'], reuse=True)
# One graph trainer
trainer = TripletTrainer(train_data_shuffler,
......
......@@ -29,7 +29,7 @@ iterations = 300
seed = 10
def scratch_network(input_pl):
def scratch_network(input_pl, reuse=False):
# Creating a random network
slim = tf.contrib.slim
......@@ -39,13 +39,13 @@ def scratch_network(input_pl):
scratch = slim.conv2d(input_pl, 16, [3, 3], activation_fn=tf.nn.relu,
stride=1,
weights_initializer=initializer,
scope='conv1')
scope='conv1', reuse=reuse)
scratch = slim.max_pool2d(scratch, kernel_size=[2, 2], scope='pool1')
scratch = slim.flatten(scratch, scope='flatten1')
scratch = slim.fully_connected(scratch, 10,
weights_initializer=initializer,
activation_fn=None,
scope='fc1')
scope='fc1', reuse=reuse)
return scratch
......@@ -134,8 +134,8 @@ def test_triplet_cnn_pretrained():
input_pl = train_data_shuffler("data", from_queue=False)
graph = dict()
graph['anchor'] = scratch_network(input_pl['anchor'])
graph['positive'] = scratch_network(input_pl['positive'])
graph['negative'] = scratch_network(input_pl['negative'])
graph['positive'] = scratch_network(input_pl['positive'], reuse=True)
graph['negative'] = scratch_network(input_pl['negative'], reuse=True)
# Loss for the softmax
loss = TripletLoss(margin=4.)
......@@ -204,7 +204,7 @@ def test_siamese_cnn_pretrained():
input_pl = train_data_shuffler("data")
graph = dict()
graph['left'] = scratch_network(input_pl['left'])
graph['right'] = scratch_network(input_pl['right'])
graph['right'] = scratch_network(input_pl['right'], reuse=True)
# Loss for the softmax
loss = ContrastiveLoss(contrastive_margin=4.)
......
......@@ -25,18 +25,18 @@ directory = "./temp/cnn_scratch"
slim = tf.contrib.slim
def scratch_network(train_data_shuffler):
def scratch_network(train_data_shuffler, reuse=False):
inputs = train_data_shuffler("data", from_queue=False)
# Creating a random network
initializer = tf.contrib.layers.xavier_initializer(seed=seed)
graph = slim.conv2d(inputs, 10, [3, 3], activation_fn=tf.nn.relu, stride=1, scope='conv1',
weights_initializer=initializer)
weights_initializer=initializer, reuse=reuse)
graph = slim.max_pool2d(graph, [4, 4], scope='pool1')
graph = slim.flatten(graph, scope='flatten1')
graph = slim.fully_connected(graph, 10, activation_fn=None, scope='fc1',
weights_initializer=initializer)
weights_initializer=initializer, reuse=reuse)
return graph
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment