Skip to content
Snippets Groups Projects
Commit bdcb031a authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

remove slim references

parent a836dbe5
No related branches found
No related tags found
1 merge request!85Porting to TF2
...@@ -6,8 +6,6 @@ import logging ...@@ -6,8 +6,6 @@ import logging
import tensorflow as tf import tensorflow as tf
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
slim = tf.contrib.slim
def mean_cross_entropy_loss(logits, labels, add_regularization_losses=True): def mean_cross_entropy_loss(logits, labels, add_regularization_losses=True):
""" """
......
import tensorflow as tf
model_dir = "%(model_dir)s"
learning_rate = 0.00001
def architecture(images):
images = tf.cast(images, tf.float32)
logits = tf.reshape(images, [-1, 92 * 112])
logits = tf.contrib.slim.fully_connected(inputs=logits, num_outputs=20)
return logits
def model_fn(features, labels, mode, config):
key = features['key']
features = features['data']
logits = architecture(features)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor"),
"key": key,
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=labels)
accuracy = tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])
metrics = {'accuracy': accuracy}
# Configure the training op
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate)
train_op = optimizer.minimize(
loss=loss, global_step=tf.train.get_or_create_global_step())
# Log accuracy and loss
with tf.name_scope('train_metrics'):
tf.summary.scalar('accuracy', accuracy[1])
tf.summary.scalar('loss', loss)
else:
train_op = None
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics)
estimator = tf.estimator.Estimator(model_fn=model_fn, model_dir=model_dir)
import tensorflow as tf
import tensorflow.contrib.slim as slim
def append_logits(
graph,
n_classes,
reuse=False,
l2_regularizer=5e-05,
weights_std=0.1,
trainable_variables=None,
name="Logits",
):
trainable = is_trainable(name, trainable_variables)
return slim.fully_connected(
graph,
n_classes,
activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=weights_std),
weights_regularizer=slim.l2_regularizer(l2_regularizer),
scope=name,
reuse=reuse,
trainable=trainable,
)
def is_trainable(name, trainable_variables, mode=tf.estimator.ModeKeys.TRAIN):
"""
Check if a variable is trainable or not
Parameters
----------
name: str
Layer name
trainable_variables: list
List containing the variables or scopes to be trained.
If None, the variable/scope is trained
"""
# if mode is not training, so we shutdown
if mode != tf.estimator.ModeKeys.TRAIN:
return False
# If None, we train by default
if trainable_variables is None:
return True
# Here is my choice to shutdown the whole scope
return name in trainable_variables
...@@ -16,9 +16,6 @@ Users Guide ...@@ -16,9 +16,6 @@ Users Guide
:maxdepth: 2 :maxdepth: 2
user_guide user_guide
regression
style_transfer
transfer_learning
Reference Manual Reference Manual
================ ================
......
...@@ -24,19 +24,6 @@ Estimators ...@@ -24,19 +24,6 @@ Estimators
Architectures Architectures
============= =============
.. autosummary::
bob.learn.tensorflow.network.chopra
bob.learn.tensorflow.network.light_cnn9
bob.learn.tensorflow.network.dummy
bob.learn.tensorflow.network.mlp
bob.learn.tensorflow.network.mlp_with_batchnorm_and_dropout
bob.learn.tensorflow.network.inception_resnet_v2
bob.learn.tensorflow.network.inception_resnet_v1
bob.learn.tensorflow.network.inception_resnet_v2_batch_norm
bob.learn.tensorflow.network.inception_resnet_v1_batch_norm
bob.learn.tensorflow.network.SimpleCNN.slim_architecture
bob.learn.tensorflow.network.vgg_19
bob.learn.tensorflow.network.vgg_16
Data Data
......
.. vim: set fileencoding=utf-8 :
===========
Regression
===========
A flexible estimator for regression problems is implemented in
:py:class:`bob.learn.tensorflow.estimators.Regressor`. You can use this
estimator for various regression problems. The guide below (taken from
https://www.tensorflow.org/tutorials/keras/basic_regression) outlines a basic
regression example using the API of this package.
The Boston Housing Prices dataset
=================================
.. testsetup::
import tempfile
model_dir = tempfile.mkdtemp()
1. Let's do some imports:
*************************
.. doctest::
>>> import tensorflow as tf
>>> from tensorflow import keras
>>> import tensorflow.contrib.slim as slim
>>> from bob.learn.tensorflow.estimators import Regressor
2. Download the dataset:
************************
.. doctest::
>>> boston_housing = keras.datasets.boston_housing
>>> print("doctest s**t"); (train_data, train_labels), (test_data, test_labels) = boston_housing.load_data() # doctest: +ELLIPSIS
doc...
>>> print("Training set: {}".format(train_data.shape))
Training set: (404, 13)
>>> print("Testing set: {}".format(test_data.shape))
Testing set: (102, 13)
3. Normalize features
*********************
.. doctest::
>>> # Test data is *not* used when calculating the mean and std.
>>>
>>> mean = train_data.mean(axis=0)
>>> std = train_data.std(axis=0)
>>> train_data = (train_data - mean) / std
>>> test_data = (test_data - mean) / std
4. Define the input functions
*****************************
.. doctest::
>>> EPOCH = 2
>>> def input_fn(mode):
... if mode == tf.estimator.ModeKeys.TRAIN:
... features, labels = train_data, train_labels
... else:
... features, labels, = test_data, test_labels
... dataset = tf.data.Dataset.from_tensor_slices((features, labels, [str(x) for x in labels]))
... dataset = dataset.batch(1)
... if mode == tf.estimator.ModeKeys.TRAIN:
... dataset = dataset.apply(tf.contrib.data.shuffle_and_repeat(len(labels), EPOCH))
... data, label, key = dataset.make_one_shot_iterator().get_next()
... # key is a unique string identifier of each sample.
... # Here we just use the string version of labels.
... return {'data': data, 'key': key}, label
...
>>> def train_input_fn():
... return input_fn(tf.estimator.ModeKeys.TRAIN)
...
>>> def eval_input_fn():
... return input_fn(tf.estimator.ModeKeys.EVAL)
5. Create the estimator
***********************
.. doctest::
>>> def architecture(data, mode, **kwargs):
... endpoints = {}
...
... with tf.variable_scope('DNN'):
...
... name = 'fc1'
... net = slim.fully_connected(data, 64, scope=name)
... endpoints[name] = net
...
... name = 'fc2'
... net = slim.fully_connected(net, 64, scope=name)
... endpoints[name] = net
...
... return net, endpoints
...
>>> estimator = Regressor(architecture, model_dir=model_dir)
5. Train and evaluate the model
*******************************
.. doctest::
>>> estimator.train(train_input_fn) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE +SKIP
<bob.learn.tensorflow.estimators.Regressor ...
>>> 'rmse' in estimator.evaluate(eval_input_fn) # doctest: +SKIP
True
>>> list(estimator.predict(eval_input_fn)) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE +SKIP
[...
.. testcleanup::
import shutil
shutil.rmtree(model_dir, True)
.. vim: set fileencoding=utf-8 :
===============
Style Transfer
===============
We have implemented the style transfer strategy from::
Gatys, Leon A., Alexander S. Ecker, and Matthias Bethge. "A neural algorithm of artistic style." arXiv preprint arXiv:1508.06576 (2015).
Check as the usage possibilities with the command::
$ bob tf style_transfer --help
Here we have an example on how to do a style transfer using VGG 19 trained with the image net
.. doctest::
>>> from bob.learn.tensorflow.network import vgg_19
>>> # --architecture
>>> architecture = vgg_19
>>> import numpy
>>> # YOU CAN DOWNLOAD THE CHECKPOINTS FROM HERE
>>> # https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models
>>> checkpoint_dir = "[DOWNLOAD_YOUR_MODEL]"
>>> # --style-end-points and -- content-end-points
>>> content_end_points = ['vgg_19/conv4/conv4_2', 'vgg_19/conv5/conv5_2']
>>> style_end_points = ['vgg_19/conv1/conv1_2',
... 'vgg_19/conv2/conv2_1',
... 'vgg_19/conv3/conv3_1',
... 'vgg_19/conv4/conv4_1',
... 'vgg_19/conv5/conv5_1'
... ]
>>> # Transfering variables
>>> scopes = {"vgg_19/":"vgg_19/"}
>>> # Set if images using
>>> style_image_paths = ["vincent_van_gogh.jpg"]
>>> # Functions used to preprocess the input signal and
>>> # --preprocess-fn and --un-preprocess-fn
>>> # Taken from VGG19
>>> def mean_norm(tensor):
... return tensor - numpy.array([ 123.68 , 116.779, 103.939])
>>> def un_mean_norm(tensor):
... return tensor + numpy.array([ 123.68 , 116.779, 103.939])
>>> preprocess_fn = mean_norm
>>> un_preprocess_fn = un_mean_norm
Here we use an image from Angelina Jolie using Van Gogh style as an example::
$ bob tf style_transfer angelina.jpg angelina_output.jpg vgg19_example.py -i 1000.
.. figure:: img/angelina.jpg
:width: 35%
Source (content) image
.. figure:: img/vincent_van_gogh.jpg
:width: 27%
Style image
.. figure:: img/angelina_output.jpg
:width: 35%
Generated image
.. vim: set fileencoding=utf-8 :
==================
Transfer Learning
==================
To be done...
...@@ -45,7 +45,6 @@ the data pipeline in more detail. ...@@ -45,7 +45,6 @@ the data pipeline in more detail.
>>> from bob.learn.tensorflow.estimators import Logits >>> from bob.learn.tensorflow.estimators import Logits
>>> import bob.db.atnt >>> import bob.db.atnt
>>> import tensorflow as tf >>> import tensorflow as tf
>>> import tensorflow.contrib.slim as slim
2. Define the inputs: 2. Define the inputs:
********************* *********************
...@@ -124,44 +123,7 @@ the data pipeline in more detail. ...@@ -124,44 +123,7 @@ the data pipeline in more detail.
.. doctest:: .. doctest::
>>> def architecture(data, mode, **kwargs): >>> def architecture(data, mode, **kwargs):
... endpoints = {} ... pass
... training = mode == tf.estimator.ModeKeys.TRAIN
...
... with tf.variable_scope('CNN'):
...
... name = 'conv'
... net = slim.conv2d(data, 32, kernel_size=(
... 5, 5), stride=2, padding='SAME', activation_fn=tf.nn.relu, scope=name)
... endpoints[name] = net
...
... name = 'pool'
... net = slim.max_pool2d(net, (2, 2),
... stride=1, padding='SAME', scope=name)
... endpoints[name] = net
...
... name = 'pool-flat'
... net = slim.flatten(net, scope=name)
... endpoints[name] = net
...
... name = 'dense'
... net = slim.fully_connected(net, 128, scope=name)
... endpoints[name] = net
...
... name = 'dropout'
... net = slim.dropout(
... inputs=net, keep_prob=0.4, is_training=training)
... endpoints[name] = net
...
... return net, endpoints
.. important ::
Practical advice: use ``tf.contrib.slim`` to craft your CNNs. Although
Tensorflow's documentation recommend the usage of ``tf.layers`` and
``tf.keras``, in our experience ``slim`` has better defaults and is more
integrated with tensorflow's framework (compared to ``tf.keras``),
probably because it is used more often internally at Google.
4. Estimator: 4. Estimator:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment