Skip to content
Snippets Groups Projects
Commit 0c5f373a authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

disable slow tests for deprecated stuff

parent 89e4b78b
Branches
Tags
1 merge request!79Add keras-based models, add pixel-wise loss, other improvements
Pipeline #36815 canceled
...@@ -6,97 +6,97 @@ from nose.plugins.attrib import attr ...@@ -6,97 +6,97 @@ from nose.plugins.attrib import attr
import tensorflow as tf import tensorflow as tf
from bob.learn.tensorflow.network import inception_resnet_v2, inception_resnet_v2_batch_norm,\ from bob.learn.tensorflow.network import inception_resnet_v2, inception_resnet_v2_batch_norm,\
inception_resnet_v1, inception_resnet_v1_batch_norm,\ inception_resnet_v1, inception_resnet_v1_batch_norm,\
vgg_19, vgg_16, mlp_with_batchnorm_and_dropout vgg_19, vgg_16, mlp_with_batchnorm_and_dropout
@attr('slow') # @attr('slow')
def test_inceptionv2(): # def test_inceptionv2():
tf.reset_default_graph() # tf.reset_default_graph()
# Testing WITHOUT batch norm # # Testing WITHOUT batch norm
inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1)) # inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
graph, _ = inception_resnet_v2(inputs) # graph, _ = inception_resnet_v2(inputs)
assert len(tf.trainable_variables()) == 490 # assert len(tf.trainable_variables()) == 490
tf.reset_default_graph() # tf.reset_default_graph()
assert len(tf.global_variables()) == 0 # assert len(tf.global_variables()) == 0
# Testing WITH batch norm # # Testing WITH batch norm
inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1)) # inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
graph, _ = inception_resnet_v2_batch_norm(inputs) # graph, _ = inception_resnet_v2_batch_norm(inputs)
assert len(tf.trainable_variables()) == 490, len(tf.trainable_variables()) # assert len(tf.trainable_variables()) == 490, len(tf.trainable_variables())
tf.reset_default_graph() # tf.reset_default_graph()
assert len(tf.global_variables()) == 0 # assert len(tf.global_variables()) == 0
@attr('slow') # @attr('slow')
def test_inceptionv2_adaptation(): # def test_inceptionv2_adaptation():
tf.reset_default_graph() # tf.reset_default_graph()
for n, trainable_variables in [ # for n, trainable_variables in [
(490, None), # (490, None),
(0, []), # (0, []),
(2, ['Conv2d_1a_3x3', 'Conv2d_1a_3x3_BN']), # (2, ['Conv2d_1a_3x3', 'Conv2d_1a_3x3_BN']),
(4, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_1a_3x3_BN', # (4, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_1a_3x3_BN',
'Conv2d_2a_3x3_BN']), # 'Conv2d_2a_3x3_BN']),
(6, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', # (6, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']), # 'Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
(1, ['Conv2d_1a_3x3_BN']), # (1, ['Conv2d_1a_3x3_BN']),
(2, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN']), # (2, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN']),
(3, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']), # (3, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
]: # ]:
input = tf.placeholder(tf.float32, shape=(1, 160, 160, 1)) # input = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
net, end_points = inception_resnet_v2_batch_norm( # net, end_points = inception_resnet_v2_batch_norm(
input, trainable_variables=trainable_variables) # input, trainable_variables=trainable_variables)
l = len(tf.trainable_variables()) # l = len(tf.trainable_variables())
assert l == n, (l, n) # assert l == n, (l, n)
tf.reset_default_graph() # tf.reset_default_graph()
tf.reset_default_graph() # tf.reset_default_graph()
assert len(tf.global_variables()) == 0 # assert len(tf.global_variables()) == 0
@attr('slow') # @attr('slow')
def test_inceptionv1(): # def test_inceptionv1():
tf.reset_default_graph() # tf.reset_default_graph()
# Testing WITHOUT batch norm # # Testing WITHOUT batch norm
inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1)) # inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
graph, _ = inception_resnet_v1(inputs) # graph, _ = inception_resnet_v1(inputs)
assert len(tf.trainable_variables()) == 266 # assert len(tf.trainable_variables()) == 266
tf.reset_default_graph() # tf.reset_default_graph()
assert len(tf.global_variables()) == 0 # assert len(tf.global_variables()) == 0
# Testing WITH batch norm # # Testing WITH batch norm
inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1)) # inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
graph, _ = inception_resnet_v1_batch_norm(inputs) # graph, _ = inception_resnet_v1_batch_norm(inputs)
assert len(tf.trainable_variables()) == 266 # assert len(tf.trainable_variables()) == 266
tf.reset_default_graph() # tf.reset_default_graph()
assert len(tf.global_variables()) == 0 # assert len(tf.global_variables()) == 0
@attr('slow') # @attr('slow')
def test_inceptionv1_adaptation(): # def test_inceptionv1_adaptation():
tf.reset_default_graph() # tf.reset_default_graph()
for n, trainable_variables in [ # for n, trainable_variables in [
(266, None), # (266, None),
(0, []), # (0, []),
(2, ['Conv2d_1a_3x3', 'Conv2d_1a_3x3_BN']), # (2, ['Conv2d_1a_3x3', 'Conv2d_1a_3x3_BN']),
(4, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_1a_3x3_BN', # (4, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_1a_3x3_BN',
'Conv2d_2a_3x3_BN']), # 'Conv2d_2a_3x3_BN']),
(6, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', # (6, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']), # 'Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
(1, ['Conv2d_1a_3x3_BN']), # (1, ['Conv2d_1a_3x3_BN']),
(2, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN']), # (2, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN']),
(3, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']), # (3, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
]: # ]:
input = tf.placeholder(tf.float32, shape=(1, 160, 160, 1)) # input = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
net, end_points = inception_resnet_v1_batch_norm( # net, end_points = inception_resnet_v1_batch_norm(
input, trainable_variables=trainable_variables) # input, trainable_variables=trainable_variables)
l = len(tf.trainable_variables()) # l = len(tf.trainable_variables())
assert l == n, (l, n) # assert l == n, (l, n)
tf.reset_default_graph() # tf.reset_default_graph()
tf.reset_default_graph() # tf.reset_default_graph()
assert len(tf.global_variables()) == 0 # assert len(tf.global_variables()) == 0
def test_vgg(): def test_vgg():
...@@ -149,4 +149,4 @@ def test_mlp(): ...@@ -149,4 +149,4 @@ def test_mlp():
inputs = tf.placeholder(tf.float32, shape=(1, 10, 10, 3)) inputs = tf.placeholder(tf.float32, shape=(1, 10, 10, 3))
graph, _ = mlp_with_batchnorm_and_dropout(inputs, [6, 5], mode=tf.estimator.ModeKeys.PREDICT) graph, _ = mlp_with_batchnorm_and_dropout(inputs, [6, 5], mode=tf.estimator.ModeKeys.PREDICT)
assert len(tf.trainable_variables()) == 0 assert len(tf.trainable_variables()) == 0
...@@ -37,126 +37,126 @@ epochs = 6 ...@@ -37,126 +37,126 @@ epochs = 6
steps = 5000 steps = 5000
reproducible.set_seed() reproducible.set_seed()
@attr('slow') # @attr('slow')
def test_logitstrainer(): # def test_logitstrainer():
# Trainer logits # # Trainer logits
try: # try:
embedding_validation = False # embedding_validation = False
_, run_config, _, _, _ = reproducible.set_seed() # _, run_config, _, _, _ = reproducible.set_seed()
run_config = run_config.replace( # run_config = run_config.replace(
keep_checkpoint_max=10, save_checkpoints_steps=100, save_checkpoints_secs=None) # keep_checkpoint_max=10, save_checkpoints_steps=100, save_checkpoints_secs=None)
trainer = Logits( # trainer = Logits(
model_dir=model_dir, # model_dir=model_dir,
architecture=dummy, # architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=10, # n_classes=10,
loss_op=mean_cross_entropy_loss, # loss_op=mean_cross_entropy_loss,
embedding_validation=embedding_validation, # embedding_validation=embedding_validation,
validation_batch_size=validation_batch_size, # validation_batch_size=validation_batch_size,
config=run_config) # config=run_config)
run_logitstrainer_mnist(trainer, augmentation=True) # run_logitstrainer_mnist(trainer, augmentation=True)
finally: # finally:
try: # try:
os.unlink(tfrecord_train) # os.unlink(tfrecord_train)
os.unlink(tfrecord_validation) # os.unlink(tfrecord_validation)
shutil.rmtree(model_dir, ignore_errors=True) # shutil.rmtree(model_dir, ignore_errors=True)
except Exception: # except Exception:
pass # pass
@attr('slow') # @attr('slow')
def test_logitstrainer_embedding(): # def test_logitstrainer_embedding():
try: # try:
embedding_validation = True # embedding_validation = True
_, run_config, _, _, _ = reproducible.set_seed() # _, run_config, _, _, _ = reproducible.set_seed()
trainer = Logits( # trainer = Logits(
model_dir=model_dir, # model_dir=model_dir,
architecture=dummy, # architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=10, # n_classes=10,
loss_op=mean_cross_entropy_loss, # loss_op=mean_cross_entropy_loss,
embedding_validation=embedding_validation, # embedding_validation=embedding_validation,
validation_batch_size=validation_batch_size, # validation_batch_size=validation_batch_size,
config=run_config) # config=run_config)
run_logitstrainer_mnist(trainer) # run_logitstrainer_mnist(trainer)
finally: # finally:
try: # try:
os.unlink(tfrecord_train) # os.unlink(tfrecord_train)
os.unlink(tfrecord_validation) # os.unlink(tfrecord_validation)
shutil.rmtree(model_dir, ignore_errors=True) # shutil.rmtree(model_dir, ignore_errors=True)
except Exception: # except Exception:
pass # pass
@attr('slow') # @attr('slow')
def test_logitstrainer_centerloss(): # def test_logitstrainer_centerloss():
try: # try:
embedding_validation = False # embedding_validation = False
_, run_config, _, _, _ = reproducible.set_seed() # _, run_config, _, _, _ = reproducible.set_seed()
run_config = run_config.replace(save_checkpoints_steps=1000) # run_config = run_config.replace(save_checkpoints_steps=1000)
trainer = LogitsCenterLoss( # trainer = LogitsCenterLoss(
model_dir=model_dir, # model_dir=model_dir,
architecture=dummy, # architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=10, # n_classes=10,
embedding_validation=embedding_validation, # embedding_validation=embedding_validation,
validation_batch_size=validation_batch_size, # validation_batch_size=validation_batch_size,
factor=0.01, # factor=0.01,
config=run_config) # config=run_config)
run_logitstrainer_mnist(trainer) # run_logitstrainer_mnist(trainer)
# Checking if the centers were updated # # Checking if the centers were updated
sess = tf.Session() # sess = tf.Session()
checkpoint_path = tf.train.get_checkpoint_state( # checkpoint_path = tf.train.get_checkpoint_state(
model_dir).model_checkpoint_path # model_dir).model_checkpoint_path
saver = tf.train.import_meta_graph( # saver = tf.train.import_meta_graph(
checkpoint_path + ".meta", clear_devices=True) # checkpoint_path + ".meta", clear_devices=True)
saver.restore(sess, tf.train.latest_checkpoint(model_dir)) # saver.restore(sess, tf.train.latest_checkpoint(model_dir))
centers = tf.get_collection( # centers = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope="center_loss/centers:0")[0] # tf.GraphKeys.GLOBAL_VARIABLES, scope="center_loss/centers:0")[0]
assert numpy.sum(numpy.abs(centers.eval(sess))) > 0.0 # assert numpy.sum(numpy.abs(centers.eval(sess))) > 0.0
finally: # finally:
try: # try:
os.unlink(tfrecord_train) # os.unlink(tfrecord_train)
os.unlink(tfrecord_validation) # os.unlink(tfrecord_validation)
shutil.rmtree(model_dir, ignore_errors=True) # shutil.rmtree(model_dir, ignore_errors=True)
except Exception: # except Exception:
pass # pass
@attr('slow') # @attr('slow')
def test_logitstrainer_centerloss_embedding(): # def test_logitstrainer_centerloss_embedding():
try: # try:
embedding_validation = True # embedding_validation = True
_, run_config, _, _, _ = reproducible.set_seed() # _, run_config, _, _, _ = reproducible.set_seed()
trainer = LogitsCenterLoss( # trainer = LogitsCenterLoss(
model_dir=model_dir, # model_dir=model_dir,
architecture=dummy, # architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=10, # n_classes=10,
embedding_validation=embedding_validation, # embedding_validation=embedding_validation,
validation_batch_size=validation_batch_size, # validation_batch_size=validation_batch_size,
factor=0.01, # factor=0.01,
config=run_config) # config=run_config)
run_logitstrainer_mnist(trainer) # run_logitstrainer_mnist(trainer)
# Checking if the centers were updated # # Checking if the centers were updated
sess = tf.Session() # sess = tf.Session()
checkpoint_path = tf.train.get_checkpoint_state( # checkpoint_path = tf.train.get_checkpoint_state(
model_dir).model_checkpoint_path # model_dir).model_checkpoint_path
saver = tf.train.import_meta_graph( # saver = tf.train.import_meta_graph(
checkpoint_path + ".meta", clear_devices=True) # checkpoint_path + ".meta", clear_devices=True)
saver.restore(sess, tf.train.latest_checkpoint(model_dir)) # saver.restore(sess, tf.train.latest_checkpoint(model_dir))
centers = tf.get_collection( # centers = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope="center_loss/centers:0")[0] # tf.GraphKeys.GLOBAL_VARIABLES, scope="center_loss/centers:0")[0]
assert numpy.sum(numpy.abs(centers.eval(sess))) > 0.0 # assert numpy.sum(numpy.abs(centers.eval(sess))) > 0.0
finally: # finally:
try: # try:
os.unlink(tfrecord_train) # os.unlink(tfrecord_train)
os.unlink(tfrecord_validation) # os.unlink(tfrecord_validation)
shutil.rmtree(model_dir, ignore_errors=True) # shutil.rmtree(model_dir, ignore_errors=True)
except Exception: # except Exception:
pass # pass
def run_logitstrainer_mnist(trainer, augmentation=False): def run_logitstrainer_mnist(trainer, augmentation=False):
...@@ -220,117 +220,117 @@ def run_logitstrainer_mnist(trainer, augmentation=False): ...@@ -220,117 +220,117 @@ def run_logitstrainer_mnist(trainer, augmentation=False):
tf.reset_default_graph() tf.reset_default_graph()
assert len(tf.global_variables()) == 0 assert len(tf.global_variables()) == 0
@attr('slow') # @attr('slow')
def test_moving_average_trainer(): # def test_moving_average_trainer():
# define a fixed input data # # define a fixed input data
# train the same network with the same initialization # # train the same network with the same initialization
# evaluate it # # evaluate it
# train and evaluate it again with moving average # # train and evaluate it again with moving average
# Accuracy should be lower when moving average is on # # Accuracy should be lower when moving average is on
try: # try:
# Creating tf records for mnist # # Creating tf records for mnist
train_data, train_labels, validation_data, validation_labels = load_mnist() # train_data, train_labels, validation_data, validation_labels = load_mnist()
create_mnist_tfrecord( # create_mnist_tfrecord(
tfrecord_train, train_data, train_labels, n_samples=6000) # tfrecord_train, train_data, train_labels, n_samples=6000)
create_mnist_tfrecord( # create_mnist_tfrecord(
tfrecord_validation, # tfrecord_validation,
validation_data, # validation_data,
validation_labels, # validation_labels,
n_samples=validation_batch_size) # n_samples=validation_batch_size)
def input_fn(): # def input_fn():
return batch_data_and_labels( # return batch_data_and_labels(
tfrecord_train, # tfrecord_train,
data_shape, # data_shape,
data_type, # data_type,
batch_size, # batch_size,
epochs=1) # epochs=1)
def input_fn_validation(): # def input_fn_validation():
return batch_data_and_labels( # return batch_data_and_labels(
tfrecord_validation, # tfrecord_validation,
data_shape, # data_shape,
data_type, # data_type,
validation_batch_size, # validation_batch_size,
epochs=1) # epochs=1)
from bob.learn.tensorflow.network.Dummy import dummy as architecture # from bob.learn.tensorflow.network.Dummy import dummy as architecture
run_config = reproducible.set_seed(183, 183)[1] # run_config = reproducible.set_seed(183, 183)[1]
run_config = run_config.replace(save_checkpoints_steps=2000) # run_config = run_config.replace(save_checkpoints_steps=2000)
def _estimator(apply_moving_averages): # def _estimator(apply_moving_averages):
return Logits( # return Logits(
architecture, # architecture,
tf.train.GradientDescentOptimizer(1e-1), # tf.train.GradientDescentOptimizer(1e-1),
tf.losses.sparse_softmax_cross_entropy, # tf.losses.sparse_softmax_cross_entropy,
10, # 10,
model_dir=model_dir, # model_dir=model_dir,
config=run_config, # config=run_config,
apply_moving_averages=apply_moving_averages, # apply_moving_averages=apply_moving_averages,
) # )
def _evaluate(estimator, delete=True): # def _evaluate(estimator, delete=True):
try: # try:
estimator.train(input_fn) # estimator.train(input_fn)
evaluations = estimator.evaluate(input_fn_validation) # evaluations = estimator.evaluate(input_fn_validation)
finally: # finally:
if delete: # if delete:
shutil.rmtree(estimator.model_dir, ignore_errors=True) # shutil.rmtree(estimator.model_dir, ignore_errors=True)
return evaluations # return evaluations
estimator = _estimator(False) # estimator = _estimator(False)
evaluations = _evaluate(estimator, delete=True) # evaluations = _evaluate(estimator, delete=True)
no_moving_average_acc = evaluations['accuracy'] # no_moving_average_acc = evaluations['accuracy']
# same as above with moving average # # same as above with moving average
estimator = _estimator(True) # estimator = _estimator(True)
evaluations = _evaluate(estimator, delete=False) # evaluations = _evaluate(estimator, delete=False)
with_moving_average_acc = evaluations['accuracy'] # with_moving_average_acc = evaluations['accuracy']
assert no_moving_average_acc > with_moving_average_acc, \ # assert no_moving_average_acc > with_moving_average_acc, \
(no_moving_average_acc, with_moving_average_acc) # (no_moving_average_acc, with_moving_average_acc)
# Can it resume training? # # Can it resume training?
del estimator # del estimator
tf.reset_default_graph() # tf.reset_default_graph()
estimator = _estimator(True) # estimator = _estimator(True)
_evaluate(estimator, delete=True) # _evaluate(estimator, delete=True)
finally: # finally:
try: # try:
os.unlink(tfrecord_train) # os.unlink(tfrecord_train)
os.unlink(tfrecord_validation) # os.unlink(tfrecord_validation)
shutil.rmtree(model_dir, ignore_errors=True) # shutil.rmtree(model_dir, ignore_errors=True)
except Exception: # except Exception:
pass # pass
@attr('slow') # @attr('slow')
def test_saver_with_moving_average(): # def test_saver_with_moving_average():
try: # try:
_, run_config, _, _, _ = reproducible.set_seed() # _, run_config, _, _, _ = reproducible.set_seed()
run_config = run_config.replace( # run_config = run_config.replace(
keep_checkpoint_max=10, save_checkpoints_steps=100, # keep_checkpoint_max=10, save_checkpoints_steps=100,
save_checkpoints_secs=None) # save_checkpoints_secs=None)
estimator = Logits( # estimator = Logits(
model_dir=model_dir, # model_dir=model_dir,
architecture=dummy, # architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=10, # n_classes=10,
loss_op=mean_cross_entropy_loss, # loss_op=mean_cross_entropy_loss,
embedding_validation=False, # embedding_validation=False,
validation_batch_size=validation_batch_size, # validation_batch_size=validation_batch_size,
config=run_config) # config=run_config)
run_logitstrainer_mnist(estimator, augmentation=True) # run_logitstrainer_mnist(estimator, augmentation=True)
ckpt = tf.train.get_checkpoint_state(estimator.model_dir) # ckpt = tf.train.get_checkpoint_state(estimator.model_dir)
assert ckpt, "Failed to get any checkpoint!" # assert ckpt, "Failed to get any checkpoint!"
assert len( # assert len(
ckpt.all_model_checkpoint_paths) == 10, ckpt.all_model_checkpoint_paths # ckpt.all_model_checkpoint_paths) == 10, ckpt.all_model_checkpoint_paths
finally: # finally:
try: # try:
os.unlink(tfrecord_train) # os.unlink(tfrecord_train)
os.unlink(tfrecord_validation) # os.unlink(tfrecord_validation)
shutil.rmtree(model_dir, ignore_errors=True) # shutil.rmtree(model_dir, ignore_errors=True)
except Exception: # except Exception:
pass # pass
...@@ -79,141 +79,141 @@ filenames = [ ...@@ -79,141 +79,141 @@ filenames = [
] ]
labels = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1] labels = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]
@attr('slow') # @attr('slow')
def test_siamesetrainer(): # def test_siamesetrainer():
# Trainer logits # # Trainer logits
try: # try:
# Setting seed # # Setting seed
session_config, run_config, _, _, _ = reproducible.set_seed() # session_config, run_config, _, _, _ = reproducible.set_seed()
run_config = run_config.replace(save_checkpoints_steps=500) # run_config = run_config.replace(save_checkpoints_steps=500)
trainer = Siamese( # trainer = Siamese(
model_dir=model_dir, # model_dir=model_dir,
architecture=dummy, # architecture=dummy,
config=run_config, # config=run_config,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
loss_op=contrastive_loss, # loss_op=contrastive_loss,
validation_batch_size=validation_batch_size) # validation_batch_size=validation_batch_size)
run_siamesetrainer(trainer) # run_siamesetrainer(trainer)
finally: # finally:
try: # try:
shutil.rmtree(model_dir, ignore_errors=True) # shutil.rmtree(model_dir, ignore_errors=True)
# pass # # pass
except Exception: # except Exception:
pass # pass
@attr('slow') # @attr('slow')
def test_siamesetrainer_transfer(): # def test_siamesetrainer_transfer():
def logits_input_fn(): # def logits_input_fn():
return single_batch( # return single_batch(
filenames, # filenames,
labels, # labels,
data_shape, # data_shape,
data_type, # data_type,
batch_size, # batch_size,
epochs=epochs, # epochs=epochs,
output_shape=output_shape) # output_shape=output_shape)
# Trainer logits first than siamese # # Trainer logits first than siamese
try: # try:
# Setting seed # # Setting seed
session_config, run_config, _, _, _ = reproducible.set_seed() # session_config, run_config, _, _, _ = reproducible.set_seed()
run_config = run_config.replace(save_checkpoints_steps=500) # run_config = run_config.replace(save_checkpoints_steps=500)
extra_checkpoint = { # extra_checkpoint = {
"checkpoint_path": model_dir, # "checkpoint_path": model_dir,
"scopes": dict({ # "scopes": dict({
"Dummy/": "Dummy/" # "Dummy/": "Dummy/"
}), # }),
"trainable_variables": [] # "trainable_variables": []
} # }
# LOGISTS # # LOGISTS
logits_trainer = Logits( # logits_trainer = Logits(
model_dir=model_dir, # model_dir=model_dir,
architecture=dummy, # architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=2, # n_classes=2,
config=run_config, # config=run_config,
loss_op=mean_cross_entropy_loss, # loss_op=mean_cross_entropy_loss,
embedding_validation=False, # embedding_validation=False,
validation_batch_size=validation_batch_size) # validation_batch_size=validation_batch_size)
logits_trainer.train(logits_input_fn, steps=steps) # logits_trainer.train(logits_input_fn, steps=steps)
# NOW THE FUCKING SIAMESE # # NOW THE FUCKING SIAMESE
trainer = Siamese( # trainer = Siamese(
model_dir=model_dir_adapted, # model_dir=model_dir_adapted,
architecture=dummy_adapted, # architecture=dummy_adapted,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
config=run_config, # config=run_config,
loss_op=contrastive_loss, # loss_op=contrastive_loss,
validation_batch_size=validation_batch_size, # validation_batch_size=validation_batch_size,
extra_checkpoint=extra_checkpoint) # extra_checkpoint=extra_checkpoint)
run_siamesetrainer(trainer) # run_siamesetrainer(trainer)
finally: # finally:
try: # try:
shutil.rmtree(model_dir, ignore_errors=True) # shutil.rmtree(model_dir, ignore_errors=True)
shutil.rmtree(model_dir_adapted, ignore_errors=True) # shutil.rmtree(model_dir_adapted, ignore_errors=True)
except Exception: # except Exception:
pass # pass
@attr('slow') # @attr('slow')
def test_siamesetrainer_transfer_extraparams(): # def test_siamesetrainer_transfer_extraparams():
def logits_input_fn(): # def logits_input_fn():
return single_batch( # return single_batch(
filenames, # filenames,
labels, # labels,
data_shape, # data_shape,
data_type, # data_type,
batch_size, # batch_size,
epochs=epochs, # epochs=epochs,
output_shape=output_shape) # output_shape=output_shape)
# Trainer logits first than siamese # # Trainer logits first than siamese
try: # try:
extra_checkpoint = { # extra_checkpoint = {
"checkpoint_path": model_dir, # "checkpoint_path": model_dir,
"scopes": dict({ # "scopes": dict({
"Dummy/": "Dummy/" # "Dummy/": "Dummy/"
}), # }),
"trainable_variables": ["Dummy"] # "trainable_variables": ["Dummy"]
} # }
# Setting seed # # Setting seed
session_config, run_config, _, _, _ = reproducible.set_seed() # session_config, run_config, _, _, _ = reproducible.set_seed()
run_config = run_config.replace(save_checkpoints_steps=500) # run_config = run_config.replace(save_checkpoints_steps=500)
# LOGISTS # # LOGISTS
logits_trainer = Logits( # logits_trainer = Logits(
model_dir=model_dir, # model_dir=model_dir,
architecture=dummy, # architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=2, # n_classes=2,
config=run_config, # config=run_config,
loss_op=mean_cross_entropy_loss, # loss_op=mean_cross_entropy_loss,
embedding_validation=False, # embedding_validation=False,
validation_batch_size=validation_batch_size) # validation_batch_size=validation_batch_size)
logits_trainer.train(logits_input_fn, steps=steps) # logits_trainer.train(logits_input_fn, steps=steps)
# NOW THE FUCKING SIAMESE # # NOW THE FUCKING SIAMESE
trainer = Siamese( # trainer = Siamese(
model_dir=model_dir_adapted, # model_dir=model_dir_adapted,
architecture=dummy_adapted, # architecture=dummy_adapted,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
loss_op=contrastive_loss, # loss_op=contrastive_loss,
config=run_config, # config=run_config,
validation_batch_size=validation_batch_size, # validation_batch_size=validation_batch_size,
extra_checkpoint=extra_checkpoint) # extra_checkpoint=extra_checkpoint)
run_siamesetrainer(trainer) # run_siamesetrainer(trainer)
finally: # finally:
try: # try:
shutil.rmtree(model_dir, ignore_errors=True) # shutil.rmtree(model_dir, ignore_errors=True)
shutil.rmtree(model_dir_adapted, ignore_errors=True) # shutil.rmtree(model_dir_adapted, ignore_errors=True)
except Exception: # except Exception:
pass # pass
def run_siamesetrainer(trainer): def run_siamesetrainer(trainer):
......
...@@ -42,7 +42,7 @@ def dummy_adapted(inputs, ...@@ -42,7 +42,7 @@ def dummy_adapted(inputs,
Parameters Parameters
---------- ----------
inputs: inputs:
reuse: reuse:
mode: mode:
...@@ -81,101 +81,101 @@ def dummy_adapted(inputs, ...@@ -81,101 +81,101 @@ def dummy_adapted(inputs,
return graph, end_points return graph, end_points
@attr('slow') # @attr('slow')
def test_logitstrainer(): # def test_logitstrainer():
# Trainer logits # # Trainer logits
try: # try:
_, run_config, _, _, _ = reproducible.set_seed() # _, run_config, _, _, _ = reproducible.set_seed()
embedding_validation = False # embedding_validation = False
trainer = Logits( # trainer = Logits(
model_dir=model_dir, # model_dir=model_dir,
architecture=dummy, # architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=10, # n_classes=10,
loss_op=mean_cross_entropy_loss, # loss_op=mean_cross_entropy_loss,
embedding_validation=embedding_validation, # embedding_validation=embedding_validation,
validation_batch_size=validation_batch_size, # validation_batch_size=validation_batch_size,
config=run_config) # config=run_config)
run_logitstrainer_mnist(trainer, augmentation=True) # run_logitstrainer_mnist(trainer, augmentation=True)
del trainer # del trainer
## Again # ## Again
extra_checkpoint = { # extra_checkpoint = {
"checkpoint_path": "./temp", # "checkpoint_path": "./temp",
"scopes": dict({ # "scopes": dict({
"Dummy/": "Dummy/" # "Dummy/": "Dummy/"
}), # }),
"trainable_variables": [] # "trainable_variables": []
} # }
trainer = Logits( # trainer = Logits(
model_dir=model_dir_adapted, # model_dir=model_dir_adapted,
architecture=dummy_adapted, # architecture=dummy_adapted,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=10, # n_classes=10,
loss_op=mean_cross_entropy_loss, # loss_op=mean_cross_entropy_loss,
embedding_validation=embedding_validation, # embedding_validation=embedding_validation,
validation_batch_size=validation_batch_size, # validation_batch_size=validation_batch_size,
extra_checkpoint=extra_checkpoint, # extra_checkpoint=extra_checkpoint,
config=run_config) # config=run_config)
run_logitstrainer_mnist(trainer, augmentation=True) # run_logitstrainer_mnist(trainer, augmentation=True)
finally: # finally:
try: # try:
os.unlink(tfrecord_train) # os.unlink(tfrecord_train)
os.unlink(tfrecord_validation) # os.unlink(tfrecord_validation)
shutil.rmtree(model_dir, ignore_errors=True) # shutil.rmtree(model_dir, ignore_errors=True)
shutil.rmtree(model_dir_adapted, ignore_errors=True) # shutil.rmtree(model_dir_adapted, ignore_errors=True)
pass # pass
except Exception: # except Exception:
pass # pass
@attr('slow') # @attr('slow')
def test_logitstrainer_center_loss(): # def test_logitstrainer_center_loss():
# Trainer logits # # Trainer logits
try: # try:
embedding_validation = False # embedding_validation = False
_, run_config, _, _, _ = reproducible.set_seed() # _, run_config, _, _, _ = reproducible.set_seed()
trainer = LogitsCenterLoss( # trainer = LogitsCenterLoss(
model_dir=model_dir, # model_dir=model_dir,
architecture=dummy, # architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=10, # n_classes=10,
embedding_validation=embedding_validation, # embedding_validation=embedding_validation,
validation_batch_size=validation_batch_size, # validation_batch_size=validation_batch_size,
apply_moving_averages=False, # apply_moving_averages=False,
config=run_config) # config=run_config)
run_logitstrainer_mnist(trainer, augmentation=True) # run_logitstrainer_mnist(trainer, augmentation=True)
del trainer # del trainer
## Again # ## Again
extra_checkpoint = { # extra_checkpoint = {
"checkpoint_path": "./temp", # "checkpoint_path": "./temp",
"scopes": dict({ # "scopes": dict({
"Dummy/": "Dummy/" # "Dummy/": "Dummy/"
}), # }),
"trainable_variables": ["Dummy"] # "trainable_variables": ["Dummy"]
} # }
trainer = LogitsCenterLoss( # trainer = LogitsCenterLoss(
model_dir=model_dir_adapted, # model_dir=model_dir_adapted,
architecture=dummy_adapted, # architecture=dummy_adapted,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=10, # n_classes=10,
embedding_validation=embedding_validation, # embedding_validation=embedding_validation,
validation_batch_size=validation_batch_size, # validation_batch_size=validation_batch_size,
extra_checkpoint=extra_checkpoint, # extra_checkpoint=extra_checkpoint,
apply_moving_averages=False, # apply_moving_averages=False,
config=run_config) # config=run_config)
run_logitstrainer_mnist(trainer, augmentation=True) # run_logitstrainer_mnist(trainer, augmentation=True)
finally: # finally:
try: # try:
os.unlink(tfrecord_train) # os.unlink(tfrecord_train)
os.unlink(tfrecord_validation) # os.unlink(tfrecord_validation)
shutil.rmtree(model_dir, ignore_errors=True) # shutil.rmtree(model_dir, ignore_errors=True)
shutil.rmtree(model_dir_adapted, ignore_errors=True) # shutil.rmtree(model_dir_adapted, ignore_errors=True)
except Exception: # except Exception:
pass # pass
...@@ -77,73 +77,73 @@ filenames = [ ...@@ -77,73 +77,73 @@ filenames = [
] ]
labels = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1] labels = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]
@attr('slow') # @attr('slow')
def test_triplet_estimator(): # def test_triplet_estimator():
# Trainer logits # # Trainer logits
try: # try:
trainer = Triplet( # trainer = Triplet(
model_dir=model_dir, # model_dir=model_dir,
architecture=dummy, # architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
loss_op=triplet_loss, # loss_op=triplet_loss,
validation_batch_size=validation_batch_size) # validation_batch_size=validation_batch_size)
run_triplet_estimator(trainer) # run_triplet_estimator(trainer)
finally: # finally:
try: # try:
shutil.rmtree(model_dir, ignore_errors=True) # shutil.rmtree(model_dir, ignore_errors=True)
# pass # # pass
except Exception: # except Exception:
pass # pass
@attr('slow') # @attr('slow')
def test_triplettrainer_transfer(): # def test_triplettrainer_transfer():
def logits_input_fn(): # def logits_input_fn():
return single_batch( # return single_batch(
filenames, # filenames,
labels, # labels,
data_shape, # data_shape,
data_type, # data_type,
batch_size, # batch_size,
epochs=epochs, # epochs=epochs,
output_shape=output_shape) # output_shape=output_shape)
# Trainer logits first than siamese # # Trainer logits first than siamese
try: # try:
extra_checkpoint = { # extra_checkpoint = {
"checkpoint_path": model_dir, # "checkpoint_path": model_dir,
"scopes": dict({ # "scopes": dict({
"Dummy/": "Dummy/" # "Dummy/": "Dummy/"
}), # }),
"trainable_variables": [] # "trainable_variables": []
} # }
# LOGISTS # # LOGISTS
logits_trainer = Logits( # logits_trainer = Logits(
model_dir=model_dir, # model_dir=model_dir,
architecture=dummy, # architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=2, # n_classes=2,
loss_op=mean_cross_entropy_loss, # loss_op=mean_cross_entropy_loss,
embedding_validation=False, # embedding_validation=False,
validation_batch_size=validation_batch_size) # validation_batch_size=validation_batch_size)
logits_trainer.train(logits_input_fn, steps=steps) # logits_trainer.train(logits_input_fn, steps=steps)
# NOW THE FUCKING SIAMESE # # NOW THE FUCKING SIAMESE
trainer = Triplet( # trainer = Triplet(
model_dir=model_dir_adapted, # model_dir=model_dir_adapted,
architecture=dummy_adapted, # architecture=dummy_adapted,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
loss_op=triplet_loss, # loss_op=triplet_loss,
validation_batch_size=validation_batch_size, # validation_batch_size=validation_batch_size,
extra_checkpoint=extra_checkpoint) # extra_checkpoint=extra_checkpoint)
run_triplet_estimator(trainer) # run_triplet_estimator(trainer)
finally: # finally:
try: # try:
shutil.rmtree(model_dir, ignore_errors=True) # shutil.rmtree(model_dir, ignore_errors=True)
shutil.rmtree(model_dir_adapted, ignore_errors=True) # shutil.rmtree(model_dir_adapted, ignore_errors=True)
except Exception: # except Exception:
pass # pass
def run_triplet_estimator(trainer): def run_triplet_estimator(trainer):
......
...@@ -4,60 +4,60 @@ from tensorflow import keras ...@@ -4,60 +4,60 @@ from tensorflow import keras
import tensorflow as tf import tensorflow as tf
import tensorflow.contrib.slim as slim import tensorflow.contrib.slim as slim
@attr('slow') # @attr('slow')
def test_regressor(): # def test_regressor():
boston_housing = keras.datasets.boston_housing # boston_housing = keras.datasets.boston_housing
(train_data, train_labels), (test_data, # (train_data, train_labels), (test_data,
test_labels) = boston_housing.load_data() # test_labels) = boston_housing.load_data()
mean = train_data.mean(axis=0) # mean = train_data.mean(axis=0)
std = train_data.std(axis=0) # std = train_data.std(axis=0)
train_data = (train_data - mean) / std # train_data = (train_data - mean) / std
test_data = (test_data - mean) / std # test_data = (test_data - mean) / std
def input_fn(mode): # def input_fn(mode):
if mode == tf.estimator.ModeKeys.TRAIN: # if mode == tf.estimator.ModeKeys.TRAIN:
features, labels = train_data, train_labels # features, labels = train_data, train_labels
else: # else:
features, labels, = test_data, test_labels # features, labels, = test_data, test_labels
dataset = tf.data.Dataset.from_tensor_slices( # dataset = tf.data.Dataset.from_tensor_slices(
(features, labels, [str(x) for x in labels])) # (features, labels, [str(x) for x in labels]))
dataset = dataset.batch(1) # dataset = dataset.batch(1)
if mode == tf.estimator.ModeKeys.TRAIN: # if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.apply( # dataset = dataset.apply(
tf.contrib.data.shuffle_and_repeat(len(labels), 2)) # tf.contrib.data.shuffle_and_repeat(len(labels), 2))
data, label, key = dataset.make_one_shot_iterator().get_next() # data, label, key = dataset.make_one_shot_iterator().get_next()
return {'data': data, 'key': key}, label # return {'data': data, 'key': key}, label
def train_input_fn(): # def train_input_fn():
return input_fn(tf.estimator.ModeKeys.TRAIN) # return input_fn(tf.estimator.ModeKeys.TRAIN)
def eval_input_fn(): # def eval_input_fn():
return input_fn(tf.estimator.ModeKeys.EVAL) # return input_fn(tf.estimator.ModeKeys.EVAL)
def architecture(data, mode, **kwargs): # def architecture(data, mode, **kwargs):
endpoints = {} # endpoints = {}
with tf.variable_scope('DNN'): # with tf.variable_scope('DNN'):
name = 'fc1' # name = 'fc1'
net = slim.fully_connected(data, 64, scope=name) # net = slim.fully_connected(data, 64, scope=name)
endpoints[name] = net # endpoints[name] = net
name = 'fc2' # name = 'fc2'
net = slim.fully_connected(net, 64, scope=name) # net = slim.fully_connected(net, 64, scope=name)
endpoints[name] = net # endpoints[name] = net
return net, endpoints # return net, endpoints
estimator = Regressor(architecture) # estimator = Regressor(architecture)
estimator.train(train_input_fn) # estimator.train(train_input_fn)
list(estimator.predict(eval_input_fn)) # list(estimator.predict(eval_input_fn))
evaluations = estimator.evaluate(eval_input_fn) # evaluations = estimator.evaluate(eval_input_fn)
assert 'rmse' in evaluations # assert 'rmse' in evaluations
assert 'loss' in evaluations # assert 'loss' in evaluations
...@@ -25,7 +25,7 @@ CONFIG = ''' ...@@ -25,7 +25,7 @@ CONFIG = '''
from bob.learn.tensorflow.network import dummy from bob.learn.tensorflow.network import dummy
architecture = dummy architecture = dummy
import pkg_resources import pkg_resources
checkpoint_dir = "./temp/" checkpoint_dir = "./temp/"
style_end_points = ["conv1"] style_end_points = ["conv1"]
...@@ -47,34 +47,34 @@ batch_size = 32 ...@@ -47,34 +47,34 @@ batch_size = 32
epochs = 1 epochs = 1
steps = 100 steps = 100
@attr('slow') # @attr('slow')
def test_style_transfer(): # def test_style_transfer():
with open(dummy_config, 'w') as f: # with open(dummy_config, 'w') as f:
f.write(CONFIG) # f.write(CONFIG)
# Trainer logits # # Trainer logits
# CREATING FAKE MODEL USING MNIST # # CREATING FAKE MODEL USING MNIST
_, run_config,_,_,_ = reproducible.set_seed() # _, run_config,_,_,_ = reproducible.set_seed()
trainer = Logits( # trainer = Logits(
model_dir=model_dir, # model_dir=model_dir,
architecture=dummy, # architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), # optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=10, # n_classes=10,
loss_op=mean_cross_entropy_loss, # loss_op=mean_cross_entropy_loss,
config=run_config) # config=run_config)
run_logitstrainer_mnist(trainer) # run_logitstrainer_mnist(trainer)
# Style transfer using this fake model # # Style transfer using this fake model
runner = CliRunner() # runner = CliRunner()
result = runner.invoke(style_transfer, # result = runner.invoke(style_transfer,
args=[pkg_resources.resource_filename( __name__, 'data/dummy_image_database/m301_01_p01_i0_0_GRAY.png'), # args=[pkg_resources.resource_filename( __name__, 'data/dummy_image_database/m301_01_p01_i0_0_GRAY.png'),
output_style_image, dummy_config]) # output_style_image, dummy_config])
try: # try:
os.unlink(dummy_config) # os.unlink(dummy_config)
shutil.rmtree(model_dir, ignore_errors=True) # shutil.rmtree(model_dir, ignore_errors=True)
except Exception: # except Exception:
pass # pass
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment