Commit a836dbe5 authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

Remove tests of removed components

parent cbdc2d10
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
from nose.plugins.attrib import attr
import tensorflow as tf
from bob.learn.tensorflow.network import inception_resnet_v2, inception_resnet_v2_batch_norm,\
inception_resnet_v1, inception_resnet_v1_batch_norm,\
vgg_19, vgg_16, mlp_with_batchnorm_and_dropout
# @attr('slow')
# def test_inceptionv2():
# tf.reset_default_graph()
# # Testing WITHOUT batch norm
# inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# graph, _ = inception_resnet_v2(inputs)
# assert len(tf.trainable_variables()) == 490
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
# # Testing WITH batch norm
# inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# graph, _ = inception_resnet_v2_batch_norm(inputs)
# assert len(tf.trainable_variables()) == 490, len(tf.trainable_variables())
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
# @attr('slow')
# def test_inceptionv2_adaptation():
# tf.reset_default_graph()
# for n, trainable_variables in [
# (490, None),
# (0, []),
# (2, ['Conv2d_1a_3x3', 'Conv2d_1a_3x3_BN']),
# (4, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_1a_3x3_BN',
# 'Conv2d_2a_3x3_BN']),
# (6, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
# 'Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
# (1, ['Conv2d_1a_3x3_BN']),
# (2, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN']),
# (3, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
# ]:
# input = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# net, end_points = inception_resnet_v2_batch_norm(
# input, trainable_variables=trainable_variables)
# l = len(tf.trainable_variables())
# assert l == n, (l, n)
# tf.reset_default_graph()
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
# @attr('slow')
# def test_inceptionv1():
# tf.reset_default_graph()
# # Testing WITHOUT batch norm
# inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# graph, _ = inception_resnet_v1(inputs)
# assert len(tf.trainable_variables()) == 266
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
# # Testing WITH batch norm
# inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# graph, _ = inception_resnet_v1_batch_norm(inputs)
# assert len(tf.trainable_variables()) == 266
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
# @attr('slow')
# def test_inceptionv1_adaptation():
# tf.reset_default_graph()
# for n, trainable_variables in [
# (266, None),
# (0, []),
# (2, ['Conv2d_1a_3x3', 'Conv2d_1a_3x3_BN']),
# (4, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_1a_3x3_BN',
# 'Conv2d_2a_3x3_BN']),
# (6, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
# 'Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
# (1, ['Conv2d_1a_3x3_BN']),
# (2, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN']),
# (3, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
# ]:
# input = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# net, end_points = inception_resnet_v1_batch_norm(
# input, trainable_variables=trainable_variables)
# l = len(tf.trainable_variables())
# assert l == n, (l, n)
# tf.reset_default_graph()
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
def test_vgg():
tf.reset_default_graph()
# Testing VGG19 Training mode
inputs = tf.placeholder(tf.float32, shape=(1, 224, 224, 3))
graph, _ = vgg_19(inputs)
assert len(tf.trainable_variables()) == 38
tf.reset_default_graph()
assert len(tf.global_variables()) == 0
# Testing VGG19 predicting mode
inputs = tf.placeholder(tf.float32, shape=(1, 224, 224, 3))
graph, _ = vgg_19(inputs, mode=tf.estimator.ModeKeys.PREDICT)
assert len(tf.trainable_variables()) == 0
tf.reset_default_graph()
assert len(tf.global_variables()) == 0
# Testing VGG 16 training mode
inputs = tf.placeholder(tf.float32, shape=(1, 224, 224, 3))
graph, _ = vgg_16(inputs)
assert len(tf.trainable_variables()) == 30
tf.reset_default_graph()
assert len(tf.global_variables()) == 0
# Testing VGG 16 predicting mode
inputs = tf.placeholder(tf.float32, shape=(1, 224, 224, 3))
graph, _ = vgg_16(inputs, mode=tf.estimator.ModeKeys.PREDICT)
assert len(tf.trainable_variables()) == 0
tf.reset_default_graph()
assert len(tf.global_variables()) == 0
def test_mlp():
tf.reset_default_graph()
# Testing MLP Training mode
inputs = tf.placeholder(tf.float32, shape=(1, 10, 10, 3))
graph, _ = mlp_with_batchnorm_and_dropout(inputs, [6, 5])
assert len(tf.trainable_variables()) == 4
tf.reset_default_graph()
# Testing MLP Predicting mode
inputs = tf.placeholder(tf.float32, shape=(1, 10, 10, 3))
graph, _ = mlp_with_batchnorm_and_dropout(inputs, [6, 5], mode=tf.estimator.ModeKeys.PREDICT)
assert len(tf.trainable_variables()) == 0
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
from nose.plugins.attrib import attr
import tensorflow as tf
from bob.learn.tensorflow.network import dummy
from bob.learn.tensorflow.estimators import Logits, LogitsCenterLoss
from bob.learn.tensorflow.dataset.tfrecords import shuffle_data_and_labels, batch_data_and_labels, \
shuffle_data_and_labels_image_augmentation
from bob.learn.tensorflow.utils import load_mnist, create_mnist_tfrecord
from bob.learn.tensorflow.utils.hooks import LoggerHookEstimator
from bob.learn.tensorflow.loss import mean_cross_entropy_loss
from bob.learn.tensorflow.utils import reproducible
import numpy
import shutil
import os
# Fixing problem with MAC https://github.com/dmlc/xgboost/issues/1715
os.environ['KMP_DUPLICATE_LIB_OK']='True'
tfrecord_train = "./train_mnist.tfrecord"
tfrecord_validation = "./validation_mnist.tfrecord"
model_dir = "./temp"
learning_rate = 0.1
data_shape = (28, 28, 1) # size of atnt images
data_type = tf.float32
batch_size = 32
validation_batch_size = 250
epochs = 6
steps = 5000
reproducible.set_seed()
# @attr('slow')
# def test_logitstrainer():
# # Trainer logits
# try:
# embedding_validation = False
# _, run_config, _, _, _ = reproducible.set_seed()
# run_config = run_config.replace(
# keep_checkpoint_max=10, save_checkpoints_steps=100, save_checkpoints_secs=None)
# trainer = Logits(
# model_dir=model_dir,
# architecture=dummy,
# optimizer=tf.train.GradientDescentOptimizer(learning_rate),
# n_classes=10,
# loss_op=mean_cross_entropy_loss,
# embedding_validation=embedding_validation,
# validation_batch_size=validation_batch_size,
# config=run_config)
# run_logitstrainer_mnist(trainer, augmentation=True)
# finally:
# try:
# os.unlink(tfrecord_train)
# os.unlink(tfrecord_validation)
# shutil.rmtree(model_dir, ignore_errors=True)
# except Exception:
# pass
# @attr('slow')
# def test_logitstrainer_embedding():
# try:
# embedding_validation = True
# _, run_config, _, _, _ = reproducible.set_seed()
# trainer = Logits(
# model_dir=model_dir,
# architecture=dummy,
# optimizer=tf.train.GradientDescentOptimizer(learning_rate),
# n_classes=10,
# loss_op=mean_cross_entropy_loss,
# embedding_validation=embedding_validation,
# validation_batch_size=validation_batch_size,
# config=run_config)
# run_logitstrainer_mnist(trainer)
# finally:
# try:
# os.unlink(tfrecord_train)
# os.unlink(tfrecord_validation)
# shutil.rmtree(model_dir, ignore_errors=True)
# except Exception:
# pass
# @attr('slow')
# def test_logitstrainer_centerloss():
# try:
# embedding_validation = False
# _, run_config, _, _, _ = reproducible.set_seed()
# run_config = run_config.replace(save_checkpoints_steps=1000)
# trainer = LogitsCenterLoss(
# model_dir=model_dir,
# architecture=dummy,
# optimizer=tf.train.GradientDescentOptimizer(learning_rate),
# n_classes=10,
# embedding_validation=embedding_validation,
# validation_batch_size=validation_batch_size,
# factor=0.01,
# config=run_config)
# run_logitstrainer_mnist(trainer)
# # Checking if the centers were updated
# sess = tf.Session()
# checkpoint_path = tf.train.get_checkpoint_state(
# model_dir).model_checkpoint_path
# saver = tf.train.import_meta_graph(
# checkpoint_path + ".meta", clear_devices=True)
# saver.restore(sess, tf.train.latest_checkpoint(model_dir))
# centers = tf.get_collection(
# tf.GraphKeys.GLOBAL_VARIABLES, scope="center_loss/centers:0")[0]
# assert numpy.sum(numpy.abs(centers.eval(sess))) > 0.0
# finally:
# try:
# os.unlink(tfrecord_train)
# os.unlink(tfrecord_validation)
# shutil.rmtree(model_dir, ignore_errors=True)
# except Exception:
# pass
# @attr('slow')
# def test_logitstrainer_centerloss_embedding():
# try:
# embedding_validation = True
# _, run_config, _, _, _ = reproducible.set_seed()
# trainer = LogitsCenterLoss(
# model_dir=model_dir,
# architecture=dummy,
# optimizer=tf.train.GradientDescentOptimizer(learning_rate),
# n_classes=10,
# embedding_validation=embedding_validation,
# validation_batch_size=validation_batch_size,
# factor=0.01,
# config=run_config)
# run_logitstrainer_mnist(trainer)
# # Checking if the centers were updated
# sess = tf.Session()
# checkpoint_path = tf.train.get_checkpoint_state(
# model_dir).model_checkpoint_path
# saver = tf.train.import_meta_graph(
# checkpoint_path + ".meta", clear_devices=True)
# saver.restore(sess, tf.train.latest_checkpoint(model_dir))
# centers = tf.get_collection(
# tf.GraphKeys.GLOBAL_VARIABLES, scope="center_loss/centers:0")[0]
# assert numpy.sum(numpy.abs(centers.eval(sess))) > 0.0
# finally:
# try:
# os.unlink(tfrecord_train)
# os.unlink(tfrecord_validation)
# shutil.rmtree(model_dir, ignore_errors=True)
# except Exception:
# pass
def run_logitstrainer_mnist(trainer, augmentation=False):
# Cleaning up
tf.reset_default_graph()
assert len(tf.global_variables()) == 0
# Creating tf records for mnist
train_data, train_labels, validation_data, validation_labels = load_mnist()
create_mnist_tfrecord(
tfrecord_train, train_data, train_labels, n_samples=6000)
create_mnist_tfrecord(
tfrecord_validation,
validation_data,
validation_labels,
n_samples=validation_batch_size)
def input_fn():
if augmentation:
return shuffle_data_and_labels_image_augmentation(
tfrecord_train,
data_shape,
data_type,
batch_size,
random_flip=True,
random_rotate=False,
epochs=epochs)
else:
return shuffle_data_and_labels(
tfrecord_train,
data_shape,
data_type,
batch_size,
epochs=epochs)
def input_fn_validation():
return batch_data_and_labels(
tfrecord_validation,
data_shape,
data_type,
validation_batch_size,
epochs=1000)
hooks = [
LoggerHookEstimator(trainer, 16, 300),
tf.train.SummarySaverHook(
save_steps=1000,
output_dir=model_dir,
scaffold=tf.train.Scaffold(),
summary_writer=tf.summary.FileWriter(model_dir))
]
trainer.train(input_fn, steps=steps, hooks=hooks)
if not trainer.embedding_validation:
acc = trainer.evaluate(input_fn_validation)
assert acc['accuracy'] > 0.10
else:
acc = trainer.evaluate(input_fn_validation)
assert acc['accuracy'] > 0.10
# Cleaning up
tf.reset_default_graph()
assert len(tf.global_variables()) == 0
# @attr('slow')
# def test_moving_average_trainer():
# # define a fixed input data
# # train the same network with the same initialization
# # evaluate it
# # train and evaluate it again with moving average
# # Accuracy should be lower when moving average is on
# try:
# # Creating tf records for mnist
# train_data, train_labels, validation_data, validation_labels = load_mnist()
# create_mnist_tfrecord(
# tfrecord_train, train_data, train_labels, n_samples=6000)
# create_mnist_tfrecord(
# tfrecord_validation,
# validation_data,
# validation_labels,
# n_samples=validation_batch_size)
# def input_fn():
# return batch_data_and_labels(
# tfrecord_train,
# data_shape,
# data_type,
# batch_size,
# epochs=1)
# def input_fn_validation():
# return batch_data_and_labels(
# tfrecord_validation,
# data_shape,
# data_type,
# validation_batch_size,
# epochs=1)
# from bob.learn.tensorflow.network.Dummy import dummy as architecture
# run_config = reproducible.set_seed(183, 183)[1]
# run_config = run_config.replace(save_checkpoints_steps=2000)
# def _estimator(apply_moving_averages):
# return Logits(
# architecture,
# tf.train.GradientDescentOptimizer(1e-1),
# tf.losses.sparse_softmax_cross_entropy,
# 10,
# model_dir=model_dir,
# config=run_config,
# apply_moving_averages=apply_moving_averages,
# )
# def _evaluate(estimator, delete=True):
# try:
# estimator.train(input_fn)
# evaluations = estimator.evaluate(input_fn_validation)
# finally:
# if delete:
# shutil.rmtree(estimator.model_dir, ignore_errors=True)
# return evaluations
# estimator = _estimator(False)
# evaluations = _evaluate(estimator, delete=True)
# no_moving_average_acc = evaluations['accuracy']
# # same as above with moving average
# estimator = _estimator(True)
# evaluations = _evaluate(estimator, delete=False)
# with_moving_average_acc = evaluations['accuracy']
# assert no_moving_average_acc > with_moving_average_acc, \
# (no_moving_average_acc, with_moving_average_acc)
# # Can it resume training?
# del estimator
# tf.reset_default_graph()
# estimator = _estimator(True)
# _evaluate(estimator, delete=True)
# finally:
# try:
# os.unlink(tfrecord_train)
# os.unlink(tfrecord_validation)
# shutil.rmtree(model_dir, ignore_errors=True)
# except Exception:
# pass
# @attr('slow')
# def test_saver_with_moving_average():
# try:
# _, run_config, _, _, _ = reproducible.set_seed()
# run_config = run_config.replace(
# keep_checkpoint_max=10, save_checkpoints_steps=100,
# save_checkpoints_secs=None)
# estimator = Logits(
# model_dir=model_dir,
# architecture=dummy,
# optimizer=tf.train.GradientDescentOptimizer(learning_rate),
# n_classes=10,
# loss_op=mean_cross_entropy_loss,
# embedding_validation=False,
# validation_batch_size=validation_batch_size,
# config=run_config)
# run_logitstrainer_mnist(estimator, augmentation=True)
# ckpt = tf.train.get_checkpoint_state(estimator.model_dir)
# assert ckpt, "Failed to get any checkpoint!"
# assert len(
# ckpt.all_model_checkpoint_paths) == 10, ckpt.all_model_checkpoint_paths
# finally:
# try:
# os.unlink(tfrecord_train)
# os.unlink(tfrecord_validation)
# shutil.rmtree(model_dir, ignore_errors=True)
# except Exception:
# pass
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
from nose.plugins.attrib import attr
import tensorflow as tf
from bob.learn.tensorflow.network import dummy
from bob.learn.tensorflow.estimators import Siamese, Logits
from bob.learn.tensorflow.dataset.siamese_image import shuffle_data_and_labels_image_augmentation as siamese_batch
from bob.learn.tensorflow.dataset.image import shuffle_data_and_labels_image_augmentation as single_batch
from bob.learn.tensorflow.loss import contrastive_loss, mean_cross_entropy_loss
from bob.learn.tensorflow.utils.hooks import LoggerHookEstimator
from .test_estimator_transfer import dummy_adapted
from bob.learn.tensorflow.utils import reproducible
import pkg_resources
import shutil
# Fixing problem with MAC https://github.com/dmlc/xgboost/issues/1715
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
tfrecord_train = "./train_mnist.tfrecord"
tfrecord_validation = "./validation_mnist.tfrecord"
model_dir = "./temp"
model_dir_adapted = "./temp2"
learning_rate = 0.0001
data_shape = (250, 250, 3) # size of atnt images
output_shape = (50, 50)
data_type = tf.float32
batch_size = 4
validation_batch_size = 2
epochs = 1
steps = 5000
# Data
filenames = [
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m301_01_p01_i0_0.png'),
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m301_01_p02_i0_0.png'),
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m301_01_p01_i0_0.png'),
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m301_01_p02_i0_0.png'),
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m301_01_p01_i0_0.png'),
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m301_01_p02_i0_0.png'),
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m301_01_p02_i0_0.png'),
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m301_01_p01_i0_0.png'),
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m301_01_p02_i0_0.png'),
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m304_01_p01_i0_0.png'),
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m304_02_f12_i0_0.png'),
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m304_01_p01_i0_0.png'),
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m304_02_f12_i0_0.png'),
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m304_01_p01_i0_0.png'),
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m304_02_f12_i0_0.png'),
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m304_01_p01_i0_0.png'),
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m304_02_f12_i0_0.png'),
pkg_resources.resource_filename(
__name__, 'data/dummy_image_database/m304_02_f12_i0_0.png'),
]
labels = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# @attr('slow')
# def test_siamesetrainer():
# # Trainer logits
# try:
# # Setting seed
# session_config, run_config, _, _, _ = reproducible.set_seed()
# run_config = run_config.replace(save_checkpoints_steps=500)
# trainer = Siamese(
# model_dir=model_dir,
# architecture=dummy,
# config=run_config,
# optimizer=tf.train.GradientDescentOptimizer(learning_rate),
# loss_op=contrastive_loss,
# validation_batch_size=validation_batch_size)
# run_siamesetrainer(trainer)
# finally:
# try:
# shutil.rmtree(model_dir, ignore_errors=True)
# # pass
# except Exception:
# pass
# @attr('slow')
# def test_siamesetrainer_transfer():
# def logits_input_fn():
# return single_batch(
# filenames,
# labels,
# data_shape,
# data_type,
# batch_size,
# epochs=epochs,
# output_shape=output_shape)
# # Trainer logits first than siamese
# try:
# # Setting seed
# session_config, run_config, _, _, _ = reproducible.set_seed()