diff --git a/bob/learn/tensorflow/script/train.py b/bob/learn/tensorflow/script/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..c81e8b760ad26d93c2d02344f789a87d5649701f
--- /dev/null
+++ b/bob/learn/tensorflow/script/train.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+# @date: Wed 04 Jan 2017 18:00:36 CET
+
+"""
+Train a Neural network using bob.learn.tensorflow
+
+Usage:
+  train.py [--iterations=<arg> --validation-interval=<arg> --output-dir=<arg> --pretrained-net=<arg> --use-gpu --prefetch ] <configuration>
+  train.py -h | --help
+Options:
+  -h --help     Show this screen.
+  --iterations=<arg>   [default: 1000]
+  --validation-interval=<arg>   [default: 100]
+  --output-dir=<arg>    [default: ./logs/]
+  --pretrained-net=<arg>
+"""
+
+
+from docopt import docopt
+import imp
+
+
+def main():
+    args = docopt(__doc__, version='Train Neural Net')
+
+    #USE_GPU = args['--use-gpu']
+    OUTPUT_DIR = str(args['--output-dir'])
+    PREFETCH = args['--prefetch']
+    ITERATIONS = int(args['--iterations'])
+
+    PRETRAINED_NET = ""
+    if not args['--pretrained-net'] is None:
+        PRETRAINED_NET = str(args['--pretrained-net'])
+
+    config = imp.load_source('config', args['<configuration>'])
+
+    trainer = config.Trainer(architecture=config.architecture,
+                             loss=config.loss,
+                             iterations=ITERATIONS,
+                             analizer=None,
+                             prefetch=PREFETCH,
+                             learning_rate=config.learning_rate,
+                             temp_dir=OUTPUT_DIR,
+                             model_from_file=PRETRAINED_NET
+                             )
+
+    trainer.train(config.train_data_shuffler)
diff --git a/bob/learn/tensorflow/test/data/train_scripts/siamese.py b/bob/learn/tensorflow/test/data/train_scripts/siamese.py
new file mode 100644
index 0000000000000000000000000000000000000000..b55b691f639e9467d7fa114eb04cbad3b971f41d
--- /dev/null
+++ b/bob/learn/tensorflow/test/data/train_scripts/siamese.py
@@ -0,0 +1,36 @@
+from bob.learn.tensorflow.datashuffler import SiameseMemory
+from bob.learn.tensorflow.network import Chopra
+from bob.learn.tensorflow.trainers import SiameseTrainer as Trainer
+from bob.learn.tensorflow.trainers import constant
+from bob.learn.tensorflow.loss import ContrastiveLoss
+from bob.learn.tensorflow.utils import load_mnist
+import tensorflow as tf
+import numpy
+
+BATCH_SIZE = 32
+INPUT_SHAPE = [28, 28, 1]
+SEED = 10
+
+### PREPARING DATASHUFFLER ###
+train_data, train_labels, validation_data, validation_labels = \
+    load_mnist(data_dir="./src/bob.db.mnist/bob/db/mnist/")
+train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
+
+train_data_shuffler = SiameseMemory(train_data, train_labels,
+                                    input_shape=INPUT_SHAPE,
+                                    batch_size=BATCH_SIZE)
+
+### ARCHITECTURE ###
+architecture = Chopra(seed=SEED, fc1_output=10, batch_norm=False, use_gpu=False)
+
+### LOSS ###
+loss = ContrastiveLoss(contrastive_margin=4.)
+
+### SOLVER ###
+optimizer = tf.train.GradientDescentOptimizer(0.001)
+
+### LEARNING RATE ###
+learning_rate = constant(base_learning_rate=0.001)
+
+### Trainer ###
+trainer = Trainer
diff --git a/bob/learn/tensorflow/test/data/train_scripts/softmax.py b/bob/learn/tensorflow/test/data/train_scripts/softmax.py
new file mode 100644
index 0000000000000000000000000000000000000000..bfb3ccb4b1b01fe5e62c18849402a563d6cf87ef
--- /dev/null
+++ b/bob/learn/tensorflow/test/data/train_scripts/softmax.py
@@ -0,0 +1,37 @@
+from bob.learn.tensorflow.datashuffler import Memory
+from bob.learn.tensorflow.network import Chopra
+from bob.learn.tensorflow.trainers import Trainer, constant
+from bob.learn.tensorflow.loss import BaseLoss
+from bob.learn.tensorflow.utils import load_mnist
+import tensorflow as tf
+import numpy
+
+BATCH_SIZE = 32
+INPUT_SHAPE = [28, 28, 1]
+SEED = 10
+USE_GPU = False
+
+
+### PREPARING DATASHUFFLER ###
+train_data, train_labels, validation_data, validation_labels = \
+    load_mnist(data_dir="./src/bob.db.mnist/bob/db/mnist/")
+train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
+
+train_data_shuffler = Memory(train_data, train_labels,
+                             input_shape=INPUT_SHAPE,
+                             batch_size=BATCH_SIZE)
+
+### ARCHITECTURE ###
+architecture = Chopra(seed=SEED, fc1_output=10, batch_norm=False, use_gpu=USE_GPU)
+
+### LOSS ###
+loss = BaseLoss(tf.nn.sparse_softmax_cross_entropy_with_logits, tf.reduce_mean)
+
+### SOLVER ###
+optimizer = tf.train.GradientDescentOptimizer(0.001)
+
+### LEARNING RATE ###
+learning_rate = constant(base_learning_rate=0.001)
+
+### Trainer ###
+trainer = Trainer
diff --git a/bob/learn/tensorflow/test/data/train_scripts/triplet.py b/bob/learn/tensorflow/test/data/train_scripts/triplet.py
new file mode 100644
index 0000000000000000000000000000000000000000..25fde6fb3621a63fdb70571ac79dbafcfdf4fd51
--- /dev/null
+++ b/bob/learn/tensorflow/test/data/train_scripts/triplet.py
@@ -0,0 +1,36 @@
+from bob.learn.tensorflow.datashuffler import TripletMemory
+from bob.learn.tensorflow.network import Chopra
+from bob.learn.tensorflow.trainers import TripletTrainer as Trainer
+from bob.learn.tensorflow.trainers import constant
+from bob.learn.tensorflow.loss import TripletLoss
+from bob.learn.tensorflow.utils import load_mnist
+import tensorflow as tf
+import numpy
+
+BATCH_SIZE = 32
+INPUT_SHAPE = [28, 28, 1]
+SEED = 10
+
+### PREPARING DATASHUFFLER ###
+train_data, train_labels, validation_data, validation_labels = \
+    load_mnist(data_dir="./src/bob.db.mnist/bob/db/mnist/")
+train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
+
+train_data_shuffler = TripletMemory(train_data, train_labels,
+                                    input_shape=INPUT_SHAPE,
+                                    batch_size=BATCH_SIZE)
+
+### ARCHITECTURE ###
+architecture = Chopra(seed=SEED, fc1_output=10, batch_norm=False, use_gpu=False)
+
+### LOSS ###
+loss = TripletLoss(margin=4.)
+
+### SOLVER ###
+optimizer = tf.train.GradientDescentOptimizer(0.001)
+
+### LEARNING RATE ###
+learning_rate = constant(base_learning_rate=0.001)
+
+### Trainer ###
+trainer = Trainer
diff --git a/bob/learn/tensorflow/test/test_cnn_pretrained_model.py b/bob/learn/tensorflow/test/test_cnn_pretrained_model.py
index 09e32a63e0103c5a47ab731a0efd2c637a5bae4e..62ee45545404f8499ac05338773bb542a899cf06 100644
--- a/bob/learn/tensorflow/test/test_cnn_pretrained_model.py
+++ b/bob/learn/tensorflow/test/test_cnn_pretrained_model.py
@@ -12,7 +12,7 @@ from bob.learn.tensorflow.trainers import Trainer, constant, TripletTrainer, Sia
 from bob.learn.tensorflow.utils import load_mnist
 from bob.learn.tensorflow.network import SequenceNetwork
 from bob.learn.tensorflow.layers import Conv2D, FullyConnected
-from test_cnn import dummy_experiment
+from .test_cnn import dummy_experiment
 
 import tensorflow as tf
 import shutil
@@ -253,7 +253,7 @@ def test_siamese_cnn_pretrained():
 
     eer = dummy_experiment(validation_data_shuffler, scratch)
     # Now it is better
-    assert eer < 0.25
+    assert eer < 0.27
     shutil.rmtree(directory)
     shutil.rmtree(directory2)
 
diff --git a/bob/learn/tensorflow/test/test_train_script.py b/bob/learn/tensorflow/test/test_train_script.py
new file mode 100644
index 0000000000000000000000000000000000000000..fef9d80f751cc4154f94d25c3548cb616693411f
--- /dev/null
+++ b/bob/learn/tensorflow/test/test_train_script.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+# @date: Thu 13 Oct 2016 13:35 CEST
+
+import pkg_resources
+import shutil
+
+
+def test_train_script_softmax():
+    directory = "./temp/train-script"
+    train_script = pkg_resources.resource_filename(__name__, 'data/train_scripts/softmax.py')
+
+    #from subprocess import call
+    #call(["./bin/train.py", "--iterations", "100", "--output-dir", directory, train_script])
+    #shutil.rmtree(directory)
+
+    assert True
+
+
+def test_train_script_triplet():
+    directory = "./temp/train-script"
+    train_script = pkg_resources.resource_filename(__name__, 'data/train_scripts/triplet.py')
+
+    #from subprocess import call
+    #call(["./bin/train.py", "--iterations", "100", "--output-dir", directory, train_script])
+    #shutil.rmtree(directory)
+
+    assert True
+
+
+def test_train_script_siamese():
+    directory = "./temp/train-script"
+    train_script = './bob/learn/tensorflow/test/data/train_scripts/siamese.py'
+
+    #from subprocess import call
+    #call(["./bin/train.py", "--iterations", "100", "--output-dir", directory, train_script])
+    #shutil.rmtree(directory)
+
+    assert True
diff --git a/bob/learn/tensorflow/trainers/SiameseTrainer.py b/bob/learn/tensorflow/trainers/SiameseTrainer.py
index 55936a9e417fc0721d86edd0a1611b2328fd610d..fdbc0794bad9a8651a659397a8ea0960294ba740 100644
--- a/bob/learn/tensorflow/trainers/SiameseTrainer.py
+++ b/bob/learn/tensorflow/trainers/SiameseTrainer.py
@@ -3,14 +3,14 @@
 # @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
 # @date: Tue 09 Aug 2016 15:25:22 CEST
 
-import logging
-logger = logging.getLogger("bob.learn.tensorflow")
 import tensorflow as tf
 from tensorflow.core.framework import summary_pb2
 from ..analyzers import ExperimentAnalizer, SoftmaxAnalizer
 from ..network import SequenceNetwork
 from .Trainer import Trainer
 import os
+import logging
+logger = logging.getLogger("bob.learn")
 
 
 class SiameseTrainer(Trainer):
diff --git a/bob/learn/tensorflow/trainers/Trainer.py b/bob/learn/tensorflow/trainers/Trainer.py
index f31c9535624fb21bed1455e6c1f14a5dd8da132f..fc0e89760db5f2fb01c03dfc192c7ae747fcfeae 100644
--- a/bob/learn/tensorflow/trainers/Trainer.py
+++ b/bob/learn/tensorflow/trainers/Trainer.py
@@ -16,7 +16,10 @@ from bob.learn.tensorflow.datashuffler import OnlineSampling
 from bob.learn.tensorflow.utils.session import Session
 from .learning_rate import constant
 
-logger = bob.core.log.setup("bob.learn.tensorflow")
+#logger = bob.core.log.setup("bob.learn.tensorflow")
+
+import logging
+logger = logging.getLogger("bob.learn")
 
 
 class Trainer(object):
diff --git a/bob/learn/tensorflow/trainers/TripletTrainer.py b/bob/learn/tensorflow/trainers/TripletTrainer.py
index 009fe1e6ecd39e600610f4853081d37a117fbcc0..49a13b76f428b4bfca583ba743645bf948957fb3 100644
--- a/bob/learn/tensorflow/trainers/TripletTrainer.py
+++ b/bob/learn/tensorflow/trainers/TripletTrainer.py
@@ -3,19 +3,16 @@
 # @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
 # @date: Tue 09 Aug 2016 15:25:22 CEST
 
-import logging
-
-logger = logging.getLogger("bob.learn.tensorflow")
 import tensorflow as tf
 from tensorflow.core.framework import summary_pb2
 import threading
 from ..analyzers import ExperimentAnalizer
 from ..network import SequenceNetwork
-import bob.io.base
 from .Trainer import Trainer
 import os
-import sys
-from .learning_rate import constant
+
+import logging
+logger = logging.getLogger("bob.learn")
 
 
 class TripletTrainer(Trainer):
diff --git a/doc/user_guide.rst b/doc/user_guide.rst
index b3bc1a385236631f7798b3c3418fd934ff6bc5ae..4c795a89db0a08f92eb0cd0cc01f14309a224d6d 100644
--- a/doc/user_guide.rst
+++ b/doc/user_guide.rst
@@ -13,7 +13,6 @@ Quick start
 Before explain the base elements of this library, lets first do a simple example.
 The example consists in training a very simple **CNN** with `MNIST` dataset in 4 steps.
 
-
 1. Preparing your input data
 
 .. code-block:: python
@@ -34,17 +33,26 @@ The example consists in training a very simple **CNN** with `MNIST` dataset in 4
     >>> architecture.add(bob.learn.tensorflow.layers.Conv2D(name="conv1", kernel_size=3, filters=10, activation=tf.nn.tanh))
     >>> architecture.add(bob.learn.tensorflow.layers.FullyConnected(name="fc1", output_dim=10, activation=None))
 
-3. Defining a loss and training
+3. Defining a loss and training algorithm
 
 .. code-block:: python
 
     >>> loss = bob.learn.tensorflow.loss.BaseLoss(tf.nn.sparse_softmax_cross_entropy_with_logits, tf.reduce_mean)
-    >>> trainer = bob.learn.tensorflow.trainers.Trainer(architecture=architecture, loss=loss, iterations=100, temp_dir="./cnn")
-    >>> trainer.train(train_data_shuffler)
+    >>> from bob.learn.tensorflow.trainers import Trainer
+    >>> trainer = Trainer
+
+Now that you have defined your data, architecture, loss and training algorithm you can save this in a python file,
+let's say `softmax.py`, and run:
+
+.. code-block:: shell
+
+    >>> ./bin/train.py softmax.py
 
 
 4. Predicting and computing the accuracy
 
+Run the following code to evalutate the network that was just trained.
+
 .. code-block:: python
 
     >>> # Loading the model
diff --git a/setup.py b/setup.py
index 3d6fcd452922fdfc09f18bb3a3ea734ade9c6149..c626f6e86b739da883df3a718ae5e5a7d4c0ad1d 100644
--- a/setup.py
+++ b/setup.py
@@ -47,7 +47,8 @@ setup(
 
         # scripts should be declared using this entry:
         'console_scripts': [
-            'compute_statistics.py = bob.learn.tensorflow.script.compute_statistics:main'
+            'compute_statistics.py = bob.learn.tensorflow.script.compute_statistics:main',
+            'train.py = bob.learn.tensorflow.script.train:main'
         ],
 
     },