diff --git a/bob/learn/tensorflow/examples/mnist/mnist_config.py b/bob/learn/tensorflow/examples/mnist/mnist_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..6227fcd462822776320db3c4e24a646f3ef2c721
--- /dev/null
+++ b/bob/learn/tensorflow/examples/mnist/mnist_config.py
@@ -0,0 +1,204 @@
+#  Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from bob.learn.tensorflow.utils.reproducible import session_conf
+import tensorflow as tf
+
+model_dir = '/tmp/mnist_model'
+train_tfrecords = ['/tmp/mnist_data/train.tfrecords']
+eval_tfrecords = ['/tmp/mnist_data/test.tfrecords']
+
+# by default create reproducible nets:
+run_config = tf.estimator.RunConfig()
+run_config = run_config.replace(session_config=session_conf)
+run_config = run_config.replace(keep_checkpoint_max=10**3)
+run_config = run_config.replace(save_checkpoints_secs=60)
+
+
+def input_fn(mode, batch_size=1):
+    """A simple input_fn using the contrib.data input pipeline."""
+
+    def example_parser(serialized_example):
+        """Parses a single tf.Example into image and label tensors."""
+        features = tf.parse_single_example(
+            serialized_example,
+            features={
+                'image_raw': tf.FixedLenFeature([], tf.string),
+                'label': tf.FixedLenFeature([], tf.int64),
+            })
+        image = tf.decode_raw(features['image_raw'], tf.uint8)
+        image.set_shape([28 * 28])
+
+        # Normalize the values of the image from the range
+        # [0, 255] to [-0.5, 0.5]
+        image = tf.cast(image, tf.float32) / 255 - 0.5
+        label = tf.cast(features['label'], tf.int32)
+        return image, tf.one_hot(label, 10)
+
+    if mode == tf.estimator.ModeKeys.TRAIN:
+        tfrecords_files = train_tfrecords
+    else:
+        assert mode == tf.estimator.ModeKeys.EVAL, 'invalid mode'
+        tfrecords_files = eval_tfrecords
+
+    for tfrecords_file in tfrecords_files:
+        assert tf.gfile.Exists(tfrecords_file), (
+            'Run github.com:tensorflow/models/official/mnist/'
+            'convert_to_records.py first to convert the MNIST data to '
+            'TFRecord file format.')
+
+    dataset = tf.contrib.data.TFRecordDataset(tfrecords_files)
+
+    # For training, repeat the dataset forever
+    if mode == tf.estimator.ModeKeys.TRAIN:
+        dataset = dataset.repeat()
+
+    # Map example_parser over dataset, and batch results by up to batch_size
+    dataset = dataset.map(
+        example_parser, num_threads=1, output_buffer_size=batch_size)
+    dataset = dataset.batch(batch_size)
+    images, labels = dataset.make_one_shot_iterator().get_next()
+
+    return images, labels
+
+
+def train_input_fn():
+    return input_fn(tf.estimator.ModeKeys.TRAIN)
+
+
+def eval_input_fn():
+    return input_fn(tf.estimator.ModeKeys.EVAL)
+
+
+def mnist_model(inputs, mode):
+    """Takes the MNIST inputs and mode and outputs a tensor of logits."""
+    # Input Layer
+    # Reshape X to 4-D tensor: [batch_size, width, height, channels]
+    # MNIST images are 28x28 pixels, and have one color channel
+    inputs = tf.reshape(inputs, [-1, 28, 28, 1])
+    data_format = 'channels_last'
+
+    if tf.test.is_built_with_cuda():
+        # When running on GPU, transpose the data from channels_last (NHWC) to
+        # channels_first (NCHW) to improve performance. See
+        # https://www.tensorflow.org/performance/performance_guide#data_formats
+        data_format = 'channels_first'
+        inputs = tf.transpose(inputs, [0, 3, 1, 2])
+
+    # Convolutional Layer #1
+    # Computes 32 features using a 5x5 filter with ReLU activation.
+    # Padding is added to preserve width and height.
+    # Input Tensor Shape: [batch_size, 28, 28, 1]
+    # Output Tensor Shape: [batch_size, 28, 28, 32]
+    conv1 = tf.layers.conv2d(
+        inputs=inputs,
+        filters=32,
+        kernel_size=[5, 5],
+        padding='same',
+        activation=tf.nn.relu,
+        data_format=data_format)
+
+    # Pooling Layer #1
+    # First max pooling layer with a 2x2 filter and stride of 2
+    # Input Tensor Shape: [batch_size, 28, 28, 32]
+    # Output Tensor Shape: [batch_size, 14, 14, 32]
+    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2,
+                                    data_format=data_format)
+
+    # Convolutional Layer #2
+    # Computes 64 features using a 5x5 filter.
+    # Padding is added to preserve width and height.
+    # Input Tensor Shape: [batch_size, 14, 14, 32]
+    # Output Tensor Shape: [batch_size, 14, 14, 64]
+    conv2 = tf.layers.conv2d(
+        inputs=pool1,
+        filters=64,
+        kernel_size=[5, 5],
+        padding='same',
+        activation=tf.nn.relu,
+        data_format=data_format)
+
+    # Pooling Layer #2
+    # Second max pooling layer with a 2x2 filter and stride of 2
+    # Input Tensor Shape: [batch_size, 14, 14, 64]
+    # Output Tensor Shape: [batch_size, 7, 7, 64]
+    pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2,
+                                    data_format=data_format)
+
+    # Flatten tensor into a batch of vectors
+    # Input Tensor Shape: [batch_size, 7, 7, 64]
+    # Output Tensor Shape: [batch_size, 7 * 7 * 64]
+    pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
+
+    # Dense Layer
+    # Densely connected layer with 1024 neurons
+    # Input Tensor Shape: [batch_size, 7 * 7 * 64]
+    # Output Tensor Shape: [batch_size, 1024]
+    dense = tf.layers.dense(inputs=pool2_flat, units=1024,
+                            activation=tf.nn.relu)
+
+    # Add dropout operation; 0.6 probability that element will be kept
+    dropout = tf.layers.dropout(
+        inputs=dense, rate=0.4, training=(mode == tf.estimator.ModeKeys.TRAIN))
+
+    # Logits layer
+    # Input Tensor Shape: [batch_size, 1024]
+    # Output Tensor Shape: [batch_size, 10]
+    logits = tf.layers.dense(inputs=dropout, units=10)
+    return logits
+
+
+def model_fn(features, labels, mode):
+    """Model function for MNIST."""
+    logits = mnist_model(features, mode)
+
+    predictions = {
+        'classes': tf.argmax(input=logits, axis=1),
+        'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
+    }
+
+    if mode == tf.estimator.ModeKeys.PREDICT:
+        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
+
+    loss = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)
+
+    # Configure the training op
+    if mode == tf.estimator.ModeKeys.TRAIN:
+        optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
+        train_op = optimizer.minimize(
+            loss, tf.train.get_or_create_global_step())
+    else:
+        train_op = None
+
+    accuracy = tf.metrics.accuracy(
+        tf.argmax(labels, axis=1), predictions['classes'])
+    metrics = {'accuracy': accuracy}
+
+    with tf.name_scope('train_metrics'):
+        # Create a tensor named train_accuracy for logging purposes
+        tf.summary.scalar('train_accuracy', accuracy[1])
+
+        tf.summary.scalar('train_loss', loss)
+
+    return tf.estimator.EstimatorSpec(
+        mode=mode,
+        predictions=predictions,
+        loss=loss,
+        train_op=train_op,
+        eval_metric_ops=metrics)
diff --git a/bob/learn/tensorflow/network/SimpleCNN.py b/bob/learn/tensorflow/network/SimpleCNN.py
new file mode 100644
index 0000000000000000000000000000000000000000..01bec65179bc7c5f96365afb4b2e403bcaff0782
--- /dev/null
+++ b/bob/learn/tensorflow/network/SimpleCNN.py
@@ -0,0 +1,107 @@
+import tensorflow as tf
+
+
+def architecture(input_layer, mode=tf.estimator.ModeKeys.TRAIN):
+    # TODO: figure out a way to accept different input sizes
+
+    # Convolutional Layer #1
+    # Computes 32 features using a 5x5 filter with ReLU activation.
+    # Padding is added to preserve width and height.
+    # Input Tensor Shape: [batch_size, 50, 1024, 1]
+    # Output Tensor Shape: [batch_size, 50, 1024, 32]
+    conv1 = tf.layers.conv2d(
+        inputs=input_layer,
+        filters=32,
+        kernel_size=[5, 5],
+        padding="same",
+        activation=tf.nn.relu)
+
+    # Pooling Layer #1
+    # First max pooling layer with a 2x2 filter and stride of 2
+    # Input Tensor Shape: [batch_size, 50, 1024, 32]
+    # Output Tensor Shape: [batch_size, 25, 512, 32]
+    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
+
+    # Convolutional Layer #2
+    # Computes 64 features using a 5x5 filter.
+    # Padding is added to preserve width and height.
+    # Input Tensor Shape: [batch_size, 25, 512, 32]
+    # Output Tensor Shape: [batch_size, 25, 512, 64]
+    conv2 = tf.layers.conv2d(
+        inputs=pool1,
+        filters=64,
+        kernel_size=[5, 5],
+        padding="same",
+        activation=tf.nn.relu)
+
+    # Pooling Layer #2
+    # Second max pooling layer with a 2x2 filter and stride of 2
+    # Input Tensor Shape: [batch_size, 25, 512, 64]
+    # Output Tensor Shape: [batch_size, 12, 256, 64]
+    pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
+
+    # Flatten tensor into a batch of vectors
+    # Input Tensor Shape: [batch_size, 12, 256, 64]
+    # Output Tensor Shape: [batch_size, 12 * 256 * 64]
+    pool2_flat = tf.reshape(pool2, [-1, 12 * 256 * 64])
+
+    # Dense Layer
+    # Densely connected layer with 1024 neurons
+    # Input Tensor Shape: [batch_size, 12 * 256 * 64]
+    # Output Tensor Shape: [batch_size, 1024]
+    dense = tf.layers.dense(
+        inputs=pool2_flat, units=1024, activation=tf.nn.relu)
+
+    # Add dropout operation; 0.6 probability that element will be kept
+    dropout = tf.layers.dropout(
+        inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
+
+    # Logits layer
+    # Input Tensor Shape: [batch_size, 1024]
+    # Output Tensor Shape: [batch_size, 2]
+    logits = tf.layers.dense(inputs=dropout, units=2)
+
+    return logits
+
+
+def model_fn(features, labels, mode, params, config):
+    """Model function for CNN."""
+    params = params or {}
+    learning_rate = params.get('learning_rate', 0.00001)
+
+    logits = architecture(features, mode)
+
+    predictions = {
+        # Generate predictions (for PREDICT and EVAL mode)
+        "classes": tf.argmax(input=logits, axis=1),
+        # Add `softmax_tensor` to the graph. It is used for PREDICT and by the
+        # `logging_hook`.
+        "probabilities": tf.nn.softmax(logits, name="softmax_tensor")
+    }
+    if mode == tf.estimator.ModeKeys.PREDICT:
+        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
+
+    # Calculate Loss (for both TRAIN and EVAL modes)
+    loss = tf.losses.sparse_softmax_cross_entropy(
+        logits=logits, labels=labels)
+
+    with tf.name_scope('train_metrics'):
+        # Create a tensor named train_loss for logging purposes
+        tf.summary.scalar('train_loss', loss)
+
+    # Configure the Training Op (for TRAIN mode)
+    if mode == tf.estimator.ModeKeys.TRAIN:
+        optimizer = tf.train.GradientDescentOptimizer(
+            learning_rate=learning_rate)
+        train_op = optimizer.minimize(
+            loss=loss,
+            global_step=tf.train.get_global_step())
+        return tf.estimator.EstimatorSpec(
+            mode=mode, loss=loss, train_op=train_op)
+
+    # Add evaluation metrics (for EVAL mode)
+    eval_metric_ops = {
+        "accuracy": tf.metrics.accuracy(
+            labels=labels, predictions=predictions["classes"])}
+    return tf.estimator.EstimatorSpec(
+        mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
diff --git a/bob/learn/tensorflow/network/__init__.py b/bob/learn/tensorflow/network/__init__.py
index cc09a91ef4f7f3d3830c3e018bc165b05df144bc..3226aa31d2fd87ca5d7f6011d3502e62431fde56 100755
--- a/bob/learn/tensorflow/network/__init__.py
+++ b/bob/learn/tensorflow/network/__init__.py
@@ -5,21 +5,24 @@ from .MLP import mlp
 from .Embedding import Embedding
 from .InceptionResnetV2 import inception_resnet_v2
 from .InceptionResnetV1 import inception_resnet_v1
+from . import SimpleCNN
 
 
 # gets sphinx autodoc done right - don't remove it
 def __appropriate__(*args):
-  """Says object was actually declared here, an not on the import module.
+    """Says object was actually declared here, an not on the import module.
 
-  Parameters:
+    Parameters:
 
-    *args: An iterable of objects to modify
+            *args: An iterable of objects to modify
 
-  Resolves `Sphinx referencing issues
-  <https://github.com/sphinx-doc/sphinx/issues/3048>`
-  """
+    Resolves `Sphinx referencing issues
+    <https://github.com/sphinx-doc/sphinx/issues/3048>`
+    """
+
+    for obj in args:
+        obj.__module__ = __name__
 
-  for obj in args: obj.__module__ = __name__
 
 __appropriate__(
     chopra,
@@ -28,5 +31,5 @@ __appropriate__(
     Embedding,
     mlp,
     )
-__all__ = [_ for _ in dir() if not _.startswith('_')]
 
+__all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/bob/learn/tensorflow/script/eval_generic.py b/bob/learn/tensorflow/script/eval_generic.py
index e4eb7f3f0bf9e7c2a9a2aa2a8ad0fec6681eb673..f29f756707c3c643711fbb6de9062dd3adb60aba 100644
--- a/bob/learn/tensorflow/script/eval_generic.py
+++ b/bob/learn/tensorflow/script/eval_generic.py
@@ -34,63 +34,8 @@ The configuration files should have the following objects totally:
   hooks
   name
 
-Example configuration::
-
-    import tensorflow as tf
-    from bob.learn.tensorflow.utils.tfrecords import batch_data_and_labels
-
-    model_dir = 'model_dir'
-    tfrecord_filenames = ['tfrecord_filenames']
-    data_shape = (1, 112, 92)  # size of atnt images
-    data_type = tf.uint8
-    batch_size = 2
-    epochs = 1
-    run_once = True
-
-    def eval_input_fn():
-        return batch_data_and_labels(tfrecord_filenames, data_shape, data_type,
-                                     batch_size, epochs=epochs)
-
-    def architecture(images):
-        images = tf.cast(images, tf.float32)
-        logits = tf.reshape(images, [-1, 92 * 112])
-        logits = tf.layers.dense(inputs=logits, units=20,
-                                 activation=tf.nn.relu)
-        return logits
-
-    def model_fn(features, labels, mode, params, config):
-        logits = architecture(features)
-
-        predictions = {
-            # Generate predictions (for PREDICT and EVAL mode)
-            "classes": tf.argmax(input=logits, axis=1),
-            # Add `softmax_tensor` to the graph. It is used for PREDICT and by
-            # the `logging_hook`.
-            "probabilities": tf.nn.softmax(logits, name="softmax_tensor")
-        }
-        if mode == tf.estimator.ModeKeys.PREDICT:
-            return tf.estimator.EstimatorSpec(mode=mode,
-                                              predictions=predictions)
-
-        # Calculate Loss (for both TRAIN and EVAL modes)
-        predictor = tf.nn.sparse_softmax_cross_entropy_with_logits(
-            logits=logits, labels=labels)
-        loss = tf.reduce_mean(predictor)
-
-        # Configure the Training Op (for TRAIN mode)
-        if mode == tf.estimator.ModeKeys.TRAIN:
-            global_step = tf.contrib.framework.get_or_create_global_step()
-            optimizer = tf.train.GradientDescentOptimizer(learning_rate)
-            train_op = optimizer.minimize(loss, global_step=global_step)
-            return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
-                                              train_op=train_op)
-
-        # Add evaluation metrics (for EVAL mode)
-        eval_metric_ops = {
-            "accuracy": tf.metrics.accuracy(
-                labels=labels, predictions=predictions["classes"])}
-        return tf.estimator.EstimatorSpec(
-            mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
+For an example configuration, please see:
+bob.learn.tensorflow/bob/learn/tensorflow/examples/mnist/mnist_config.py
 """
 from __future__ import absolute_import
 from __future__ import division
@@ -129,8 +74,11 @@ def main(argv=None):
     # Instantiate Estimator
     nn = tf.estimator.Estimator(model_fn=model_fn, model_dir=model_dir,
                                 params=model_params, config=run_config)
-
-    evaluated_file = os.path.join(nn.model_dir, name or 'eval', 'evaluated')
+    if name:
+        real_name = name + '_eval'
+    else:
+        real_name = 'eval'
+    evaluated_file = os.path.join(nn.model_dir, real_name, 'evaluated')
     while True:
         evaluated_steps = []
         if os.path.exists(evaluated_file):
diff --git a/bob/learn/tensorflow/script/train_generic.py b/bob/learn/tensorflow/script/train_generic.py
index c543d9bd4615d154a82ba560c1a4e30d45267581..11f7d18a421b8c5ef48196ca254116722f8c5138 100644
--- a/bob/learn/tensorflow/script/train_generic.py
+++ b/bob/learn/tensorflow/script/train_generic.py
@@ -32,63 +32,8 @@ The configuration files should have the following objects totally:
   steps
   max_steps
 
-Example configuration::
-
-    import tensorflow as tf
-    from bob.learn.tensorflow.utils.tfrecords import shuffle_data_and_labels
-
-    model_dir = 'model_dir'
-    tfrecord_filenames = ['tfrecord_filenames']
-    data_shape = (1, 112, 92)  # size of atnt images
-    data_type = tf.uint8
-    batch_size = 2
-    epochs = 1
-    learning_rate = 0.00001
-
-    def train_input_fn():
-        return shuffle_data_and_labels(tfrecord_filenames, data_shape,
-                                       data_type, batch_size, epochs=epochs)
-
-    def architecture(images):
-        images = tf.cast(images, tf.float32)
-        logits = tf.reshape(images, [-1, 92 * 112])
-        logits = tf.layers.dense(inputs=logits, units=20,
-                                 activation=tf.nn.relu)
-        return logits
-
-    def model_fn(features, labels, mode, params, config):
-        logits = architecture(features)
-
-        predictions = {
-            # Generate predictions (for PREDICT and EVAL mode)
-            "classes": tf.argmax(input=logits, axis=1),
-            # Add `softmax_tensor` to the graph. It is used for PREDICT and by
-            # the `logging_hook`.
-            "probabilities": tf.nn.softmax(logits, name="softmax_tensor")
-        }
-        if mode == tf.estimator.ModeKeys.PREDICT:
-            return tf.estimator.EstimatorSpec(mode=mode,
-                                              predictions=predictions)
-
-        # Calculate Loss (for both TRAIN and EVAL modes)
-        predictor = tf.nn.sparse_softmax_cross_entropy_with_logits(
-            logits=logits, labels=labels)
-        loss = tf.reduce_mean(predictor)
-
-        # Configure the Training Op (for TRAIN mode)
-        if mode == tf.estimator.ModeKeys.TRAIN:
-            global_step = tf.contrib.framework.get_or_create_global_step()
-            optimizer = tf.train.GradientDescentOptimizer(learning_rate)
-            train_op = optimizer.minimize(loss, global_step=global_step)
-            return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
-                                              train_op=train_op)
-
-        # Add evaluation metrics (for EVAL mode)
-        eval_metric_ops = {
-            "accuracy": tf.metrics.accuracy(
-                labels=labels, predictions=predictions["classes"])}
-        return tf.estimator.EstimatorSpec(
-            mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
+For an example configuration, please see:
+bob.learn.tensorflow/bob/learn/tensorflow/examples/mnist/mnist_config.py
 """
 from __future__ import absolute_import
 from __future__ import division
diff --git a/bob/learn/tensorflow/utils/eval.py b/bob/learn/tensorflow/utils/eval.py
index 0fe3c11767526ba70a4f02b1f2e571f0e1a8262f..cf836f6e0d1f6742b10fee3d5fa9390d255f1dab 100644
--- a/bob/learn/tensorflow/utils/eval.py
+++ b/bob/learn/tensorflow/utils/eval.py
@@ -1,6 +1,7 @@
 from __future__ import absolute_import
 from __future__ import division
 from __future__ import print_function
+import tensorflow as tf
 
 
 def get_global_step(path):
@@ -18,7 +19,5 @@ def get_global_step(path):
     global_step : int
         The global step number.
     """
-    from tensorflow.python.estimator.estimator import \
-        _load_global_step_from_checkpoint_dir
-    global_step = _load_global_step_from_checkpoint_dir(path)
-    return global_step
+    checkpoint_reader = tf.train.NewCheckpointReader(path)
+    return checkpoint_reader.get_tensor(tf.GraphKeys.GLOBAL_STEP)