Commit 0c5f373a authored by Amir MOHAMMADI's avatar Amir MOHAMMADI

disable slow tests for deprecated stuff

parent 89e4b78b
Pipeline #36815 canceled with stage
in 28 minutes and 52 seconds
......@@ -6,97 +6,97 @@ from nose.plugins.attrib import attr
import tensorflow as tf
from bob.learn.tensorflow.network import inception_resnet_v2, inception_resnet_v2_batch_norm,\
inception_resnet_v1, inception_resnet_v1_batch_norm,\
vgg_19, vgg_16, mlp_with_batchnorm_and_dropout
@attr('slow')
def test_inceptionv2():
tf.reset_default_graph()
# Testing WITHOUT batch norm
inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
graph, _ = inception_resnet_v2(inputs)
assert len(tf.trainable_variables()) == 490
tf.reset_default_graph()
assert len(tf.global_variables()) == 0
# Testing WITH batch norm
inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
graph, _ = inception_resnet_v2_batch_norm(inputs)
assert len(tf.trainable_variables()) == 490, len(tf.trainable_variables())
tf.reset_default_graph()
assert len(tf.global_variables()) == 0
@attr('slow')
def test_inceptionv2_adaptation():
tf.reset_default_graph()
for n, trainable_variables in [
(490, None),
(0, []),
(2, ['Conv2d_1a_3x3', 'Conv2d_1a_3x3_BN']),
(4, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_1a_3x3_BN',
'Conv2d_2a_3x3_BN']),
(6, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
(1, ['Conv2d_1a_3x3_BN']),
(2, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN']),
(3, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
]:
input = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
net, end_points = inception_resnet_v2_batch_norm(
input, trainable_variables=trainable_variables)
l = len(tf.trainable_variables())
assert l == n, (l, n)
tf.reset_default_graph()
tf.reset_default_graph()
assert len(tf.global_variables()) == 0
@attr('slow')
def test_inceptionv1():
tf.reset_default_graph()
# Testing WITHOUT batch norm
inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
graph, _ = inception_resnet_v1(inputs)
assert len(tf.trainable_variables()) == 266
tf.reset_default_graph()
assert len(tf.global_variables()) == 0
# Testing WITH batch norm
inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
graph, _ = inception_resnet_v1_batch_norm(inputs)
assert len(tf.trainable_variables()) == 266
tf.reset_default_graph()
assert len(tf.global_variables()) == 0
@attr('slow')
def test_inceptionv1_adaptation():
tf.reset_default_graph()
for n, trainable_variables in [
(266, None),
(0, []),
(2, ['Conv2d_1a_3x3', 'Conv2d_1a_3x3_BN']),
(4, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_1a_3x3_BN',
'Conv2d_2a_3x3_BN']),
(6, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
(1, ['Conv2d_1a_3x3_BN']),
(2, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN']),
(3, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
]:
input = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
net, end_points = inception_resnet_v1_batch_norm(
input, trainable_variables=trainable_variables)
l = len(tf.trainable_variables())
assert l == n, (l, n)
tf.reset_default_graph()
tf.reset_default_graph()
assert len(tf.global_variables()) == 0
vgg_19, vgg_16, mlp_with_batchnorm_and_dropout
# @attr('slow')
# def test_inceptionv2():
# tf.reset_default_graph()
# # Testing WITHOUT batch norm
# inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# graph, _ = inception_resnet_v2(inputs)
# assert len(tf.trainable_variables()) == 490
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
# # Testing WITH batch norm
# inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# graph, _ = inception_resnet_v2_batch_norm(inputs)
# assert len(tf.trainable_variables()) == 490, len(tf.trainable_variables())
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
# @attr('slow')
# def test_inceptionv2_adaptation():
# tf.reset_default_graph()
# for n, trainable_variables in [
# (490, None),
# (0, []),
# (2, ['Conv2d_1a_3x3', 'Conv2d_1a_3x3_BN']),
# (4, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_1a_3x3_BN',
# 'Conv2d_2a_3x3_BN']),
# (6, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
# 'Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
# (1, ['Conv2d_1a_3x3_BN']),
# (2, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN']),
# (3, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
# ]:
# input = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# net, end_points = inception_resnet_v2_batch_norm(
# input, trainable_variables=trainable_variables)
# l = len(tf.trainable_variables())
# assert l == n, (l, n)
# tf.reset_default_graph()
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
# @attr('slow')
# def test_inceptionv1():
# tf.reset_default_graph()
# # Testing WITHOUT batch norm
# inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# graph, _ = inception_resnet_v1(inputs)
# assert len(tf.trainable_variables()) == 266
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
# # Testing WITH batch norm
# inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# graph, _ = inception_resnet_v1_batch_norm(inputs)
# assert len(tf.trainable_variables()) == 266
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
# @attr('slow')
# def test_inceptionv1_adaptation():
# tf.reset_default_graph()
# for n, trainable_variables in [
# (266, None),
# (0, []),
# (2, ['Conv2d_1a_3x3', 'Conv2d_1a_3x3_BN']),
# (4, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_1a_3x3_BN',
# 'Conv2d_2a_3x3_BN']),
# (6, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
# 'Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
# (1, ['Conv2d_1a_3x3_BN']),
# (2, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN']),
# (3, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
# ]:
# input = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# net, end_points = inception_resnet_v1_batch_norm(
# input, trainable_variables=trainable_variables)
# l = len(tf.trainable_variables())
# assert l == n, (l, n)
# tf.reset_default_graph()
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
def test_vgg():
......@@ -149,4 +149,4 @@ def test_mlp():
inputs = tf.placeholder(tf.float32, shape=(1, 10, 10, 3))
graph, _ = mlp_with_batchnorm_and_dropout(inputs, [6, 5], mode=tf.estimator.ModeKeys.PREDICT)
assert len(tf.trainable_variables()) == 0
......@@ -79,141 +79,141 @@ filenames = [
]
labels = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]
@attr('slow')
def test_siamesetrainer():
# Trainer logits
try:
# Setting seed
session_config, run_config, _, _, _ = reproducible.set_seed()
run_config = run_config.replace(save_checkpoints_steps=500)
trainer = Siamese(
model_dir=model_dir,
architecture=dummy,
config=run_config,
optimizer=tf.train.GradientDescentOptimizer(learning_rate),
loss_op=contrastive_loss,
validation_batch_size=validation_batch_size)
run_siamesetrainer(trainer)
finally:
try:
shutil.rmtree(model_dir, ignore_errors=True)
# pass
except Exception:
pass
@attr('slow')
def test_siamesetrainer_transfer():
def logits_input_fn():
return single_batch(
filenames,
labels,
data_shape,
data_type,
batch_size,
epochs=epochs,
output_shape=output_shape)
# Trainer logits first than siamese
try:
# Setting seed
session_config, run_config, _, _, _ = reproducible.set_seed()
run_config = run_config.replace(save_checkpoints_steps=500)
extra_checkpoint = {
"checkpoint_path": model_dir,
"scopes": dict({
"Dummy/": "Dummy/"
}),
"trainable_variables": []
}
# LOGISTS
logits_trainer = Logits(
model_dir=model_dir,
architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=2,
config=run_config,
loss_op=mean_cross_entropy_loss,
embedding_validation=False,
validation_batch_size=validation_batch_size)
logits_trainer.train(logits_input_fn, steps=steps)
# NOW THE FUCKING SIAMESE
trainer = Siamese(
model_dir=model_dir_adapted,
architecture=dummy_adapted,
optimizer=tf.train.GradientDescentOptimizer(learning_rate),
config=run_config,
loss_op=contrastive_loss,
validation_batch_size=validation_batch_size,
extra_checkpoint=extra_checkpoint)
run_siamesetrainer(trainer)
finally:
try:
shutil.rmtree(model_dir, ignore_errors=True)
shutil.rmtree(model_dir_adapted, ignore_errors=True)
except Exception:
pass
@attr('slow')
def test_siamesetrainer_transfer_extraparams():
def logits_input_fn():
return single_batch(
filenames,
labels,
data_shape,
data_type,
batch_size,
epochs=epochs,
output_shape=output_shape)
# Trainer logits first than siamese
try:
extra_checkpoint = {
"checkpoint_path": model_dir,
"scopes": dict({
"Dummy/": "Dummy/"
}),
"trainable_variables": ["Dummy"]
}
# Setting seed
session_config, run_config, _, _, _ = reproducible.set_seed()
run_config = run_config.replace(save_checkpoints_steps=500)
# LOGISTS
logits_trainer = Logits(
model_dir=model_dir,
architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=2,
config=run_config,
loss_op=mean_cross_entropy_loss,
embedding_validation=False,
validation_batch_size=validation_batch_size)
logits_trainer.train(logits_input_fn, steps=steps)
# NOW THE FUCKING SIAMESE
trainer = Siamese(
model_dir=model_dir_adapted,
architecture=dummy_adapted,
optimizer=tf.train.GradientDescentOptimizer(learning_rate),
loss_op=contrastive_loss,
config=run_config,
validation_batch_size=validation_batch_size,
extra_checkpoint=extra_checkpoint)
run_siamesetrainer(trainer)
finally:
try:
shutil.rmtree(model_dir, ignore_errors=True)
shutil.rmtree(model_dir_adapted, ignore_errors=True)
except Exception:
pass
# @attr('slow')
# def test_siamesetrainer():
# # Trainer logits
# try:
# # Setting seed
# session_config, run_config, _, _, _ = reproducible.set_seed()
# run_config = run_config.replace(save_checkpoints_steps=500)
# trainer = Siamese(
# model_dir=model_dir,
# architecture=dummy,
# config=run_config,
# optimizer=tf.train.GradientDescentOptimizer(learning_rate),
# loss_op=contrastive_loss,
# validation_batch_size=validation_batch_size)
# run_siamesetrainer(trainer)
# finally:
# try:
# shutil.rmtree(model_dir, ignore_errors=True)
# # pass
# except Exception:
# pass
# @attr('slow')
# def test_siamesetrainer_transfer():
# def logits_input_fn():
# return single_batch(
# filenames,
# labels,
# data_shape,
# data_type,
# batch_size,
# epochs=epochs,
# output_shape=output_shape)
# # Trainer logits first than siamese
# try:
# # Setting seed
# session_config, run_config, _, _, _ = reproducible.set_seed()
# run_config = run_config.replace(save_checkpoints_steps=500)
# extra_checkpoint = {
# "checkpoint_path": model_dir,
# "scopes": dict({
# "Dummy/": "Dummy/"
# }),
# "trainable_variables": []
# }
# # LOGISTS
# logits_trainer = Logits(
# model_dir=model_dir,
# architecture=dummy,
# optimizer=tf.train.GradientDescentOptimizer(learning_rate),
# n_classes=2,
# config=run_config,
# loss_op=mean_cross_entropy_loss,
# embedding_validation=False,
# validation_batch_size=validation_batch_size)
# logits_trainer.train(logits_input_fn, steps=steps)
# # NOW THE FUCKING SIAMESE
# trainer = Siamese(
# model_dir=model_dir_adapted,
# architecture=dummy_adapted,
# optimizer=tf.train.GradientDescentOptimizer(learning_rate),
# config=run_config,
# loss_op=contrastive_loss,
# validation_batch_size=validation_batch_size,
# extra_checkpoint=extra_checkpoint)
# run_siamesetrainer(trainer)
# finally:
# try:
# shutil.rmtree(model_dir, ignore_errors=True)
# shutil.rmtree(model_dir_adapted, ignore_errors=True)
# except Exception:
# pass
# @attr('slow')
# def test_siamesetrainer_transfer_extraparams():
# def logits_input_fn():
# return single_batch(
# filenames,
# labels,
# data_shape,
# data_type,
# batch_size,
# epochs=epochs,
# output_shape=output_shape)
# # Trainer logits first than siamese
# try:
# extra_checkpoint = {
# "checkpoint_path": model_dir,
# "scopes": dict({
# "Dummy/": "Dummy/"
# }),
# "trainable_variables": ["Dummy"]
# }
# # Setting seed
# session_config, run_config, _, _, _ = reproducible.set_seed()
# run_config = run_config.replace(save_checkpoints_steps=500)
# # LOGISTS
# logits_trainer = Logits(
# model_dir=model_dir,
# architecture=dummy,
# optimizer=tf.train.GradientDescentOptimizer(learning_rate),
# n_classes=2,
# config=run_config,
# loss_op=mean_cross_entropy_loss,
# embedding_validation=False,
# validation_batch_size=validation_batch_size)
# logits_trainer.train(logits_input_fn, steps=steps)
# # NOW THE FUCKING SIAMESE
# trainer = Siamese(
# model_dir=model_dir_adapted,
# architecture=dummy_adapted,
# optimizer=tf.train.GradientDescentOptimizer(learning_rate),
# loss_op=contrastive_loss,
# config=run_config,
# validation_batch_size=validation_batch_size,
# extra_checkpoint=extra_checkpoint)
# run_siamesetrainer(trainer)
# finally:
# try:
# shutil.rmtree(model_dir, ignore_errors=True)
# shutil.rmtree(model_dir_adapted, ignore_errors=True)
# except Exception:
# pass
def run_siamesetrainer(trainer):
......
......@@ -42,7 +42,7 @@ def dummy_adapted(inputs,
Parameters
----------
inputs:
reuse:
mode:
......@@ -81,101 +81,101 @@ def dummy_adapted(inputs,
return graph, end_points
@attr('slow')
def test_logitstrainer():
# Trainer logits
try:
_, run_config, _, _, _ = reproducible.set_seed()
embedding_validation = False
trainer = Logits(
model_dir=model_dir,
architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=10,
loss_op=mean_cross_entropy_loss,
embedding_validation=embedding_validation,
validation_batch_size=validation_batch_size,
config=run_config)
run_logitstrainer_mnist(trainer, augmentation=True)
del trainer
## Again
extra_checkpoint = {
"checkpoint_path": "./temp",
"scopes": dict({
"Dummy/": "Dummy/"
}),
"trainable_variables": []
}
trainer = Logits(
model_dir=model_dir_adapted,
architecture=dummy_adapted,
optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=10,
loss_op=mean_cross_entropy_loss,
embedding_validation=embedding_validation,
validation_batch_size=validation_batch_size,
extra_checkpoint=extra_checkpoint,
config=run_config)
run_logitstrainer_mnist(trainer, augmentation=True)
finally:
try:
os.unlink(tfrecord_train)
os.unlink(tfrecord_validation)
shutil.rmtree(model_dir, ignore_errors=True)
shutil.rmtree(model_dir_adapted, ignore_errors=True)
pass
except Exception:
pass
@attr('slow')
def test_logitstrainer_center_loss():
# Trainer logits
try:
embedding_validation = False
_, run_config, _, _, _ = reproducible.set_seed()
trainer = LogitsCenterLoss(
model_dir=model_dir,
architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=10,
embedding_validation=embedding_validation,
validation_batch_size=validation_batch_size,
apply_moving_averages=False,
config=run_config)
run_logitstrainer_mnist(trainer, augmentation=True)
del trainer
## Again
extra_checkpoint = {
"checkpoint_path": "./temp",
"scopes": dict({
"Dummy/": "Dummy/"
}),
"trainable_variables": ["Dummy"]
}
trainer = LogitsCenterLoss(
model_dir=model_dir_adapted,
architecture=dummy_adapted,
optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=10,
embedding_validation=embedding_validation,
validation_batch_size=validation_batch_size,
extra_checkpoint=extra_checkpoint,
apply_moving_averages=False,
config=run_config)
run_logitstrainer_mnist(trainer, augmentation=True)
finally:
try:
os.unlink(tfrecord_train)
os.unlink(tfrecord_validation)
shutil.rmtree(model_dir, ignore_errors=True)
shutil.rmtree(model_dir_adapted, ignore_errors=True)
except Exception:
pass
# @attr('slow')
# def test_logitstrainer():
# # Trainer logits
# try:
# _, run_config, _, _, _ = reproducible.set_seed()
# embedding_validation = False
# trainer = Logits(
# model_dir=model_dir,
# architecture=dummy,
# optimizer=tf.train.GradientDescentOptimizer(learning_rate),
# n_classes=10,
# loss_op=mean_cross_entropy_loss,
# embedding_validation=embedding_validation,
# validation_batch_size=validation_batch_size,
# config=run_config)
# run_logitstrainer_mnist(trainer, augmentation=True)
# del trainer
# ## Again
# extra_checkpoint = {
# "checkpoint_path": "./temp",
# "scopes": dict({
# "Dummy/": "Dummy/"
# }),
# "trainable_variables": []
# }
# trainer = Logits(
# model_dir=model_dir_adapted,
# architecture=dummy_adapted,
# optimizer=tf.train.GradientDescentOptimizer(learning_rate),
# n_classes=10,
# loss_op=mean_cross_entropy_loss,
# embedding_validation=embedding_validation,
# validation_batch_size=validation_batch_size,
# extra_checkpoint=extra_checkpoint,
# config=run_config)
# run_logitstrainer_mnist(trainer, augmentation=True)
# finally:
# try:
# os.unlink(tfrecord_train)
# os.unlink(tfrecord_validation)
# shutil.rmtree(model_dir, ignore_errors=True)
# shutil.rmtree(model_dir_adapted, ignore_errors=True)
# pass
# except Exception:
# pass
# @attr('slow')
# def test_logitstrainer_center_loss():
# # Trainer logits
# try:
# embedding_validation = False
# _, run_config, _, _, _ = reproducible.set_seed()
# trainer = LogitsCenterLoss(
# model_dir=model_dir,
# architecture=dummy,
# optimizer=tf.train.GradientDescentOptimizer(learning_rate),
# n_classes=10,
# embedding_validation=embedding_validation,
# validation_batch_size=validation_batch_size,
# apply_moving_averages=False,
# config=run_config)
# run_logitstrainer_mnist(trainer, augmentation=True)
# del trainer
# ## Again
# extra_checkpoint = {
# "checkpoint_path": "./temp",
# "scopes": dict({
# "Dummy/": "Dummy/"
# }),
# "trainable_variables": ["Dummy"]
# }
# trainer = LogitsCenterLoss(
# model_dir=model_dir_adapted,
# architecture=dummy_adapted,
# optimizer=tf.train.GradientDescentOptimizer(learning_rate),
# n_classes=10,
# embedding_validation=embedding_validation,
# validation_batch_size=validation_batch_size,
# extra_checkpoint=extra_checkpoint,
# apply_moving_averages=False,
# config=run_config)
# run_logitstrainer_mnist(trainer, augmentation=True)
# finally:
# try:
# os.unlink(tfrecord_train)
# os.unlink(tfrecord_validation)
# shutil.rmtree(model_dir, ignore_errors=True)
# shutil.rmtree(model_dir_adapted, ignore_errors=True)
# except Exception:
# pass
......@@ -77,73 +77,73 @@ filenames = [
]
labels = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]
@attr('slow')
def test_triplet_estimator():
# Trainer logits
try:
trainer = Triplet(
model_dir=model_dir,
architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate),