diff --git a/bob/learn/tensorflow/analyzers/ExperimentAnalizer.py b/bob/learn/tensorflow/analyzers/ExperimentAnalizer.py
index c70a95bbf4f2f32006aa2aca3a198ead4ec3475a..8a838bcb7a73b2da0610889a57b1ab290362ade2 100644
--- a/bob/learn/tensorflow/analyzers/ExperimentAnalizer.py
+++ b/bob/learn/tensorflow/analyzers/ExperimentAnalizer.py
@@ -23,7 +23,7 @@ class ExperimentAnalizer:
 
     """
 
-    def __init__(self, data_shuffler, machine, session, convergence_threshold=0.01, convergence_reference='eer'):
+    def __init__(self, convergence_threshold=0.01, convergence_reference='eer'):
         """
         Use the CNN as feature extractor for a n-class classification
 
@@ -38,9 +38,9 @@ class ExperimentAnalizer:
 
         """
 
-        self.data_shuffler = data_shuffler
-        self.machine = machine
-        self.session = session
+        self.data_shuffler = None
+        self.network = None
+        self.session = None
 
         # Statistics
         self.eer = []
@@ -48,16 +48,21 @@ class ExperimentAnalizer:
         self.far100 = []
         self.far1000 = []
 
-    def __call__(self):
+    def __call__(self, data_shuffler, network, session):
+
+        if self.data_shuffler is None:
+            self.data_shuffler = data_shuffler
+            self.network = network
+            self.session = session
 
         # Extracting features for enrollment
         enroll_data, enroll_labels = self.data_shuffler.get_batch()
-        enroll_features = self.machine(enroll_data, session=self.session)
+        enroll_features = self.network(enroll_data, session=self.session)
         del enroll_data
 
         # Extracting features for probing
         probe_data, probe_labels = self.data_shuffler.get_batch()
-        probe_features = self.machine(probe_data, session=self.session)
+        probe_features = self.network(probe_data, session=self.session)
         del probe_data
 
         # Creating models
diff --git a/bob/learn/tensorflow/analyzers/SoftmaxAnalizer.py b/bob/learn/tensorflow/analyzers/SoftmaxAnalizer.py
index f87cb0a60354cd8553a286622cc305ec8af973ab..b7b4452f59740ebb004a8b2b6ca1a178cfa24304 100644
--- a/bob/learn/tensorflow/analyzers/SoftmaxAnalizer.py
+++ b/bob/learn/tensorflow/analyzers/SoftmaxAnalizer.py
@@ -31,66 +31,20 @@ class SoftmaxAnalizer(object):
         """
 
         self.data_shuffler = None
-        self.trainer = None
+        self.network = None
         self.session = None
 
-    def __call__(self, data_shuffler, trainer, session):
+    def __call__(self, data_shuffler, network, session):
 
         if self.data_shuffler is None:
             self.data_shuffler = data_shuffler
-            self.trainer = trainer
+            self.network = network
             self.session = session
 
         # Creating the graph
         feature_batch, label_batch = self.data_shuffler.get_placeholders(name="validation_accuracy")
         data, labels = self.data_shuffler.get_batch()
-        graph = self.trainer.architecture.compute_graph(feature_batch)
-
-        predictions = numpy.argmax(self.session.run(graph, feed_dict={feature_batch: data[:]}), 1)
-        accuracy = 100. * numpy.sum(predictions == labels) / predictions.shape[0]
-
-        summaries = []
-        summaries.append(summary_pb2.Summary.Value(tag="accuracy_validation", simple_value=float(accuracy)))
-        return summary_pb2.Summary(value=summaries)
-
-
-class SoftmaxSiameseAnalizer(object):
-    """
-    Analizer.
-    """
-
-    def __init__(self):
-        """
-        Softmax analizer
-
-        ** Parameters **
-
-          data_shuffler:
-          graph:
-          session:
-          convergence_threshold:
-          convergence_reference: References to analize the convergence. Possible values are `eer`, `far10`, `far10`
-
-
-        """
-
-        self.data_shuffler = None
-        self.trainer = None
-        self.session = None
-
-    def __call__(self, data_shuffler, machine, session):
-
-        if self.data_shuffler is None:
-            self.data_shuffler = data_shuffler
-            self.trainer = trainer
-            self.session = session
-
-        # Creating the graph
-        #feature_batch, label_batch = self.data_shuffler.get_placeholders(name="validation_accuracy")
-        feature_left_batch, feature_right_batch label_batch = self.data_shuffler.get_placeholders_pair(name="validation_accuracy")
-
-        batch_left, batch_right, labels = self.data_shuffler.get_batch()
-        left = self.machine.compute_graph(feature_batch)
+        graph = self.network.compute_graph(feature_batch)
 
         predictions = numpy.argmax(self.session.run(graph, feed_dict={feature_batch: data[:]}), 1)
         accuracy = 100. * numpy.sum(predictions == labels) / predictions.shape[0]
diff --git a/bob/learn/tensorflow/data/BaseDataShuffler.py b/bob/learn/tensorflow/data/BaseDataShuffler.py
index 16a40b2aa12d7a508217325b2a5f2b049e0c280c..f766555e53001ed27a187e253633a084944f6123 100644
--- a/bob/learn/tensorflow/data/BaseDataShuffler.py
+++ b/bob/learn/tensorflow/data/BaseDataShuffler.py
@@ -72,11 +72,26 @@ class BaseDataShuffler(object):
         if self.data2_placeholder is None:
             self.data2_placeholder = tf.placeholder(tf.float32, shape=tuple([None] + list(self.shape[1:])), name=name)
 
-        if self.label_placeholder:
+        if self.label_placeholder is None:
             self.label_placeholder = tf.placeholder(tf.int64, shape=[None, ])
 
         return self.data_placeholder, self.data2_placeholder, self.label_placeholder
 
+    def get_placeholders_triplet_forprefetch(self, name=""):
+        """
+        Returns a place holder with the size of your batch
+        """
+        if self.data_placeholder is None:
+            self.data_placeholder = tf.placeholder(tf.float32, shape=tuple([None] + list(self.shape[1:])), name=name)
+
+        if self.data2_placeholder is None:
+            self.data2_placeholder = tf.placeholder(tf.float32, shape=tuple([None] + list(self.shape[1:])), name=name)
+
+        if self.data3_placeholder is None:
+            self.data3_placeholder = tf.placeholder(tf.float32, shape=tuple([None] + list(self.shape[1:])), name=name)
+
+        return self.data_placeholder, self.data2_placeholder, self.data3_placeholder
+
     def get_placeholders(self, name=""):
         """
         Returns a place holder with the size of your batch
@@ -102,10 +117,26 @@ class BaseDataShuffler(object):
             self.data2_placeholder = tf.placeholder(tf.float32, shape=self.shape, name=name+"_left")
 
         if self.label_placeholder is None:
-            self.label_placeholder = tf.placeholder(tf.int64, shape=self.shape[0], name="label")
+            self.label_placeholder = tf.placeholder(tf.int64, shape=self.shape[0], name=name+"_label")
 
         return self.data_placeholder, self.data2_placeholder, self.label_placeholder
 
+    def get_placeholders_triplet(self, name=""):
+        """
+        Returns a place holder with the size of your batch
+        """
+
+        if self.data_placeholder is None:
+            self.data_placeholder = tf.placeholder(tf.float32, shape=self.shape, name=name+"_anchor")
+
+        if self.data2_placeholder is None:
+            self.data2_placeholder = tf.placeholder(tf.float32, shape=self.shape, name=name+"_positive")
+
+        if self.data3_placeholder is None:
+            self.data3_placeholder = tf.placeholder(tf.float32, shape=self.shape, name=name+"_negative")
+
+        return self.data_placeholder, self.data2_placeholder, self.data3_placeholder
+
     def get_genuine_or_not(self, input_data, input_labels, genuine=True):
 
         if genuine:
diff --git a/bob/learn/tensorflow/loss/ContrastiveLoss.py b/bob/learn/tensorflow/loss/ContrastiveLoss.py
index 686b97a7e1717783b6e0de76a3ccbc963e99acbd..76065f347462e85a70e10f6301ca067804eb59bb 100644
--- a/bob/learn/tensorflow/loss/ContrastiveLoss.py
+++ b/bob/learn/tensorflow/loss/ContrastiveLoss.py
@@ -44,4 +44,3 @@ class ContrastiveLoss(BaseLoss):
             loss = 0.5 * (within_class + between_class)
 
             return tf.reduce_mean(loss), tf.reduce_mean(between_class), tf.reduce_mean(within_class)
-            #return loss, between_class, within_class, label, left_feature, right_feature, d
diff --git a/bob/learn/tensorflow/loss/TripletLoss.py b/bob/learn/tensorflow/loss/TripletLoss.py
index 2806b1ecef8778970912272f4452c6508ed5259f..cacc1e313d7754210681dfbafc747602c9736650 100644
--- a/bob/learn/tensorflow/loss/TripletLoss.py
+++ b/bob/learn/tensorflow/loss/TripletLoss.py
@@ -41,5 +41,4 @@ class TripletLoss(BaseLoss):
             d_negative = tf.square(compute_euclidean_distance(anchor_feature, negative_feature))
 
             loss = tf.maximum(0., d_positive - d_negative + self.margin)
-            return tf.reduce_mean(loss), tf.reduce_mean(d_positive), tf.reduce_mean(d_negative)
-            #return loss, d_positive, d_negative
+            return tf.reduce_mean(loss), tf.reduce_mean(d_negative), tf.reduce_mean(d_positive)
diff --git a/bob/learn/tensorflow/script/train_mnist.py b/bob/learn/tensorflow/script/train_mnist.py
index 945f9a34b48357e17cbc77f7e5df627173523ec9..b5c2826781ad1e9240f2e3f46a99d616d74bd1c1 100644
--- a/bob/learn/tensorflow/script/train_mnist.py
+++ b/bob/learn/tensorflow/script/train_mnist.py
@@ -26,6 +26,7 @@ from bob.learn.tensorflow.data import MemoryDataShuffler, TextDataShuffler
 from bob.learn.tensorflow.network import Lenet, MLP, Dummy, Chopra
 from bob.learn.tensorflow.trainers import Trainer
 from bob.learn.tensorflow.loss import BaseLoss
+from ..analyzers import ExperimentAnalizer, SoftmaxAnalizer
 
 import numpy
 
@@ -93,7 +94,11 @@ def main():
         #architecture = Lenet(seed=SEED)
         #architecture = Dummy(seed=SEED)
         loss = BaseLoss(tf.nn.sparse_softmax_cross_entropy_with_logits, tf.reduce_mean)
-        trainer = Trainer(architecture=architecture, loss=loss, iterations=ITERATIONS, prefetch=False, temp_dir="./LOGS/cnn")
+        trainer = Trainer(architecture=architecture,
+                          loss=loss,
+                          iterations=ITERATIONS,
+                          analizer=ExperimentAnalizer(),
+                          prefetch=True, temp_dir="./LOGS/cnn")
         trainer.train(train_data_shuffler, validation_data_shuffler)
         #trainer.train(train_data_shuffler)
     else:
diff --git a/bob/learn/tensorflow/script/train_mnist_siamese.py b/bob/learn/tensorflow/script/train_mnist_siamese.py
index b63fa2f2dd8bb2cf53d87ceeaa5ff0bfa0b1a948..65d1980ddb8b4b132fb893ac8f5b34c38f01b305 100644
--- a/bob/learn/tensorflow/script/train_mnist_siamese.py
+++ b/bob/learn/tensorflow/script/train_mnist_siamese.py
@@ -132,7 +132,8 @@ def main():
                                  iterations=ITERATIONS,
                                  snapshot=VALIDATION_TEST,
                                  optimizer=optimizer,
-                                 temp_dir="./LOGS/siamese-cnn")
+                                 prefetch=True,
+                                 temp_dir="./LOGS/siamese-cnn-prefetch")
 
         #import ipdb; ipdb.set_trace();
         trainer.train(train_data_shuffler, validation_data_shuffler)
diff --git a/bob/learn/tensorflow/script/train_mnist_triplet.py b/bob/learn/tensorflow/script/train_mnist_triplet.py
index c46eb1c5e5f1816c238e9125cebb2ef91c43cbfd..4e2f4d0b32b5dc00eac11e545b3a3e9dc2f7bde8 100644
--- a/bob/learn/tensorflow/script/train_mnist_triplet.py
+++ b/bob/learn/tensorflow/script/train_mnist_triplet.py
@@ -39,89 +39,30 @@ def main():
     perc_train = 0.9
 
     # Loading data
-    mnist = False
+    train_data, train_labels, validation_data, validation_labels = \
+        util.load_mnist(data_dir="./src/bob.db.mnist/bob/db/mnist/")
+    train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
+    validation_data = numpy.reshape(validation_data, (validation_data.shape[0], 28, 28, 1))
 
-    if mnist:
-        train_data, train_labels, validation_data, validation_labels = \
-            util.load_mnist(data_dir="./src/bob.db.mnist/bob/db/mnist/")
-        train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
-        validation_data = numpy.reshape(validation_data, (validation_data.shape[0], 28, 28, 1))
+    train_data_shuffler = MemoryDataShuffler(train_data, train_labels,
+                                             input_shape=[28, 28, 1],
+                                             scale=True,
+                                             batch_size=BATCH_SIZE)
 
-        train_data_shuffler = MemoryDataShuffler(train_data, train_labels,
-                                                 input_shape=[28, 28, 1],
-                                                 scale=True,
-                                                 batch_size=BATCH_SIZE)
-
-        validation_data_shuffler = MemoryDataShuffler(validation_data, validation_labels,
-                                                      input_shape=[28, 28, 1],
-                                                      scale=True,
-                                                      batch_size=VALIDATION_BATCH_SIZE)
-
-    else:
-        import bob.db.mobio
-        db_mobio = bob.db.mobio.Database()
-
-        import bob.db.casia_webface
-        db_casia = bob.db.casia_webface.Database()
-
-        # Preparing train set
-        train_objects = db_casia.objects(groups="world")
-        #train_objects = db.objects(groups="world")
-        train_labels = [int(o.client_id) for o in train_objects]
-        directory = "/idiap/resource/database/CASIA-WebFace/CASIA-WebFace"
-
-        train_file_names = [o.make_path(
-            directory=directory,
-            extension="")
-                            for o in train_objects]
-        #import ipdb;
-        #ipdb.set_trace();
-
-        #train_file_names = [o.make_path(
-        #    directory="/idiap/group/biometric/databases/orl",
-        #    extension=".pgm")
-        #                    for o in train_objects]
-
-        train_data_shuffler = TextDataShuffler(train_file_names, train_labels,
-                                               input_shape=[250, 250, 3],
-                                               batch_size=BATCH_SIZE)
-
-        #train_data_shuffler = TextDataShuffler(train_file_names, train_labels,
-        #                                       input_shape=[56, 46, 1],
-        #                                       batch_size=BATCH_SIZE)
-
-        # Preparing train set
-        directory = "/idiap/temp/tpereira/DEEP_FACE/CASIA/preprocessed"
-        validation_objects = db_mobio.objects(protocol="male", groups="dev")
-        validation_labels = [o.client_id for o in validation_objects]
-        #validation_file_names = [o.make_path(
-        #    directory="/idiap/group/biometric/databases/orl",
-        #    extension=".pgm")
-        #                         for o in validation_objects]
-
-        validation_file_names = [o.make_path(
-            directory=directory,
-            extension=".hdf5")
-                                 for o in validation_objects]
-
-        validation_data_shuffler = TextDataShuffler(validation_file_names, validation_labels,
-                                                    input_shape=[250, 250, 3],
-                                                    batch_size=VALIDATION_BATCH_SIZE)
-        #validation_data_shuffler = TextDataShuffler(validation_file_names, validation_labels,
-        #                                            input_shape=[56, 46, 1],
-        #                                            batch_size=VALIDATION_BATCH_SIZE)
+    validation_data_shuffler = MemoryDataShuffler(validation_data, validation_labels,
+                                                  input_shape=[28, 28, 1],
+                                                  scale=True,
+                                                  batch_size=VALIDATION_BATCH_SIZE)
 
     # Preparing the architecture
     n_classes = len(train_data_shuffler.possible_labels)
     #n_classes = 200
     cnn = True
     if cnn:
-
-        #architecture = Chopra(default_feature_layer="fc7")
-        architecture = Lenet(default_feature_layer="fc2", n_classes=n_classes, conv1_output=8, conv2_output=16,use_gpu=USE_GPU)
+        architecture = Chopra(seed=SEED, fc1_output=n_classes)
+        #architecture = Lenet(default_feature_layer="fc2", n_classes=n_classes, conv1_output=8, conv2_output=16,use_gpu=USE_GPU)
         #architecture = VGG(n_classes=n_classes, use_gpu=USE_GPU)
         #architecture = Dummy(seed=SEED)
-
         #architecture = LenetDropout(default_feature_layer="fc2", n_classes=n_classes, conv1_output=4, conv2_output=8, use_gpu=USE_GPU)
 
         loss = TripletLoss()
@@ -130,6 +71,8 @@ def main():
                                  loss=loss,
                                  iterations=ITERATIONS,
                                  snapshot=VALIDATION_TEST,
+                                 temp_dir="cnn-triplet",
+                                 prefetch=True,
                                  optimizer=optimizer
                                  )
         trainer.train(train_data_shuffler, validation_data_shuffler)
@@ -140,6 +83,7 @@ def main():
         loss = TripletLoss()
         trainer = TripletTrainer(architecture=mlp,
                                  loss=loss,
+                                 temp_dir="dnn-triplet",
                                  iterations=ITERATIONS,
                                  snapshot=VALIDATION_TEST)
         trainer.train(train_data_shuffler, validation_data_shuffler)
diff --git a/bob/learn/tensorflow/script/train_mobio.py b/bob/learn/tensorflow/script/train_mobio.py
new file mode 100644
index 0000000000000000000000000000000000000000..14532667bba46bb5d9cc4623716f9d1b23fe0cfb
--- /dev/null
+++ b/bob/learn/tensorflow/script/train_mobio.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+# @date: Wed 11 May 2016 09:39:36 CEST 
+
+
+"""
+Simple script that trains CASIA WEBFACE
+
+Usage:
+  train_mobio.py [--batch-size=<arg> --validation-batch-size=<arg> --iterations=<arg> --validation-interval=<arg> --use-gpu]
+  train_mobio.py -h | --help
+Options:
+  -h --help     Show this screen.
+  --batch-size=<arg>  [default: 1]
+  --validation-batch-size=<arg>   [default:128]
+  --iterations=<arg>  [default: 30000]
+  --validation-interval=<arg>  [default: 100]
+"""
+
+from docopt import docopt
+import tensorflow as tf
+from .. import util
+SEED = 10
+from bob.learn.tensorflow.data import MemoryDataShuffler, TextDataShuffler
+from bob.learn.tensorflow.network import Lenet, MLP, LenetDropout, VGG, Chopra, Dummy
+from bob.learn.tensorflow.trainers import SiameseTrainer, Trainer, TripletTrainer
+from bob.learn.tensorflow.loss import ContrastiveLoss, BaseLoss, TripletLoss
+import numpy
+
+
+def main():
+    args = docopt(__doc__, version='Mnist training with TensorFlow')
+
+    BATCH_SIZE = int(args['--batch-size'])
+    VALIDATION_BATCH_SIZE = int(args['--validation-batch-size'])
+    ITERATIONS = int(args['--iterations'])
+    VALIDATION_TEST = int(args['--validation-interval'])
+    USE_GPU = args['--use-gpu']
+    perc_train = 0.9
+
+    import bob.db.mobio
+    db_mobio = bob.db.mobio.Database()
+    directory = "/idiap/temp/tpereira/DEEP_FACE/CASIA/preprocessed"
+
+    # Preparing train set
+    #train_objects = db_mobio.objects(protocol="male", groups="world")
+    train_objects = db_mobio.objects(protocol="male", groups="dev")
+    train_labels = [int(o.client_id) for o in train_objects]
+    n_classes = len(set(train_labels))
+
+    train_file_names = [o.make_path(
+        directory=directory,
+        extension=".hdf5")
+                        for o in train_objects]
+    train_data_shuffler = TextDataShuffler(train_file_names, train_labels,
+                                           input_shape=[125, 125, 3],
+                                           batch_size=BATCH_SIZE)
+
+    # Preparing train set
+    validation_objects = db_mobio.objects(protocol="male", groups="dev")
+    #validation_objects = db_mobio.objects(protocol="male", groups="world")
+    validation_labels = [o.client_id for o in validation_objects]
+
+    validation_file_names = [o.make_path(
+        directory=directory,
+        extension=".hdf5")
+                             for o in validation_objects]
+    validation_data_shuffler = TextDataShuffler(validation_file_names, validation_labels,
+                                                input_shape=[125, 125, 3],
+                                                batch_size=VALIDATION_BATCH_SIZE)
+    # Preparing the architecture
+    #architecture = Chopra(seed=SEED, fc1_output=n_classes)
+    architecture = Chopra(seed=SEED)
+    optimizer = tf.train.GradientDescentOptimizer(0.00000001)
+
+
+    #loss = BaseLoss(tf.nn.sparse_softmax_cross_entropy_with_logits, tf.reduce_mean)
+    #trainer = Trainer(architecture=architecture, loss=loss,
+    #                  iterations=ITERATIONS,
+    #                  prefetch=False,
+    #                  optimizer=optimizer,
+    #                  temp_dir="./LOGS/cnn")
+
+    #loss = ContrastiveLoss(contrastive_margin=4.)
+    #trainer = SiameseTrainer(architecture=architecture, loss=loss,
+    #                         iterations=ITERATIONS,
+    #                         prefetch=True,
+    #                         optimizer=optimizer,
+    #                         temp_dir="./LOGS_MOBIO/siamese-cnn-prefetch")
+
+    loss = TripletLoss(margin=4.)
+    trainer = TripletTrainer(architecture=architecture, loss=loss,
+                             iterations=ITERATIONS,
+                             prefetch=True,
+                             optimizer=optimizer,
+                             temp_dir="./LOGS_MOBIO/triplet-cnn-prefetch")
+
+    trainer.train(train_data_shuffler, validation_data_shuffler)
diff --git a/bob/learn/tensorflow/trainers/SiameseTrainer.py b/bob/learn/tensorflow/trainers/SiameseTrainer.py
index 38499ee52f1b5d9fbcdfda23ce756741a8cbe33f..29dc1413e4e31cec52ac2551e1c3ed2a02ed2af3 100644
--- a/bob/learn/tensorflow/trainers/SiameseTrainer.py
+++ b/bob/learn/tensorflow/trainers/SiameseTrainer.py
@@ -58,7 +58,7 @@ class SiameseTrainer(Trainer):
                  prefetch=False,
 
                  ## Analizer
-                 analizer=SoftmaxAnalizer(),
+                 analizer=ExperimentAnalizer(),
 
                  verbosity_level=2):
 
@@ -85,10 +85,13 @@ class SiameseTrainer(Trainer):
             verbosity_level=verbosity_level
         )
 
-        self.between_class_graph = None
-        self.within_class_graph = None
+        self.between_class_graph_train = None
+        self.within_class_graph_train = None
 
-    def compute_graph(self, data_shuffler, prefetch=False, name=""):
+        self.between_class_graph_validation = None
+        self.within_class_graph_validation = None
+
+    def compute_graph(self, data_shuffler, prefetch=False, name="", train=True):
         """
         Computes the graph for the trainer.
 
@@ -102,13 +105,7 @@ class SiameseTrainer(Trainer):
 
         # Defining place holders
         if prefetch:
-            placeholder_left_data, placeholder_right_data, placeholder_labels = data_shuffler.get_placeholders_pair_forprefetch(name="train")
-
-            # Creating two graphs
-            #placeholder_left_data, placeholder_labels = data_shuffler. \
-            #    get_placeholders_forprefetch(name="train_left")
-            #placeholder_right_data, _ = data_shuffler.get_placeholders(name="train_right")
-            feature_left_batch, feature_right_batch, label_batch = data_shuffler.get_placeholders_pair(name="train_")
+            placeholder_left_data, placeholder_right_data, placeholder_labels = data_shuffler.get_placeholders_pair_forprefetch(name=name)
 
             # Defining a placeholder queue for prefetching
             queue = tf.FIFOQueue(capacity=100,
@@ -126,9 +123,7 @@ class SiameseTrainer(Trainer):
                 raise ValueError("The variable `architecture` must be an instance of "
                                  "`bob.learn.tensorflow.network.SequenceNetwork`")
         else:
-            feature_left_batch, feature_right_batch, label_batch = data_shuffler.get_placeholders_pair(name="train_")
-            #feature_left_batch, label_batch = data_shuffler.get_placeholders(name="train_left")
-            #feature_right_batch, _ = data_shuffler.get_placeholders(name="train_right")
+            feature_left_batch, feature_right_batch, label_batch = data_shuffler.get_placeholders_pair(name=name)
 
         # Creating the siamese graph
         train_left_graph = self.architecture.compute_graph(feature_left_batch)
@@ -138,8 +133,12 @@ class SiameseTrainer(Trainer):
                                                                    train_left_graph,
                                                                    train_right_graph)
 
-        self.between_class_graph = between_class_graph
-        self.within_class_graph = within_class_graph
+        if train:
+            self.between_class_graph_train = between_class_graph
+            self.within_class_graph_train = within_class_graph
+        else:
+            self.between_class_graph_validation = between_class_graph
+            self.within_class_graph_validation = within_class_graph
 
         return graph
 
@@ -153,7 +152,7 @@ class SiameseTrainer(Trainer):
         """
 
         batch_left, batch_right, labels = data_shuffler.get_pair()
-        placeholder_left_data, placeholder_right_data, placeholder_label = data_shuffler.get_placeholders_pair(name="train")
+        placeholder_left_data, placeholder_right_data, placeholder_label = data_shuffler.get_placeholders_pair()
 
         feed_dict = {placeholder_left_data: batch_left,
                      placeholder_right_data: batch_right,
@@ -172,12 +171,12 @@ class SiameseTrainer(Trainer):
         """
         if self.prefetch:
             _, l, bt_class, wt_class, lr, summary = session.run([self.optimizer,
-                                             self.training_graph, self.between_class_graph, self.within_class_graph,
+                                             self.training_graph, self.between_class_graph_train, self.within_class_graph_train,
                                              self.learning_rate, self.summaries_train])
         else:
             feed_dict = self.get_feed_dict(self.train_data_shuffler)
             _, l, bt_class, wt_class, lr, summary = session.run([self.optimizer,
-                                             self.training_graph, self.between_class_graph, self.within_class_graph,
+                                             self.training_graph, self.between_class_graph_train, self.within_class_graph_train,
                                              self.learning_rate, self.summaries_train], feed_dict=feed_dict)
 
         logger.info("Loss training set step={0} = {1}".format(step, l))
@@ -197,12 +196,16 @@ class SiameseTrainer(Trainer):
         if self.validation_summary_writter is None:
             self.validation_summary_writter = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'validation'), session.graph)
 
-        self.validation_graph = self.compute_graph(data_shuffler, name="validation")
+        self.validation_graph = self.compute_graph(data_shuffler, name="validation", train=False)
         feed_dict = self.get_feed_dict(data_shuffler)
-        l = session.run(self.validation_graph, feed_dict=feed_dict)
+        l, bt_class, wt_class = session.run([self.validation_graph,
+                                             self.between_class_graph_validation, self.within_class_graph_validation],
+                                             feed_dict=feed_dict)
 
         summaries = []
         summaries.append(summary_pb2.Summary.Value(tag="loss", simple_value=float(l)))
+        summaries.append(summary_pb2.Summary.Value(tag="between_class_loss", simple_value=float(bt_class)))
+        summaries.append(summary_pb2.Summary.Value(tag="within_class_loss", simple_value=float(wt_class)))
         self.validation_summary_writter.add_summary(summary_pb2.Summary(value=summaries), step)
         logger.info("Loss VALIDATION set step={0} = {1}".format(step, l))
 
@@ -213,8 +216,8 @@ class SiameseTrainer(Trainer):
 
         # Train summary
         tf.scalar_summary('loss', self.training_graph, name="train")
-        tf.scalar_summary('between_class_loss', self.between_class_graph, name="train")
-        tf.scalar_summary('within_class_loss', self.within_class_graph, name="train")
+        tf.scalar_summary('between_class_loss', self.between_class_graph_train, name="train")
+        tf.scalar_summary('within_class_loss', self.within_class_graph_train, name="train")
         tf.scalar_summary('lr', self.learning_rate, name="train")
         return tf.merge_all_summaries()
 
diff --git a/bob/learn/tensorflow/trainers/Trainer.py b/bob/learn/tensorflow/trainers/Trainer.py
index 1043dd165e2f1c885f457ff3edc8a8a628763b8e..4f8c648c7e07d404a42d7b3db877c9db4976446d 100644
--- a/bob/learn/tensorflow/trainers/Trainer.py
+++ b/bob/learn/tensorflow/trainers/Trainer.py
@@ -175,7 +175,7 @@ class Trainer(object):
         logger.info("Loss training set step={0} = {1}".format(step, l))
         self.train_summary_writter.add_summary(summary, step)
 
-    def compute_validation(self, session, data_shuffler, step):
+    def compute_validation(self,  session, data_shuffler, step):
         """
         Computes the loss in the validation set
 
@@ -185,14 +185,14 @@ class Trainer(object):
             step: Iteration number
 
         """
-
-        if self.validation_summary_writter is None:
-            self.validation_summary_writter = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'validation'), session.graph)
-
+        # Opening a new session for validation
         self.validation_graph = self.compute_graph(data_shuffler, name="validation")
         feed_dict = self.get_feed_dict(data_shuffler)
         l = session.run(self.validation_graph, feed_dict=feed_dict)
 
+        if self.validation_summary_writter is None:
+            self.validation_summary_writter = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'validation'), session.graph)
+
         summaries = []
         summaries.append(summary_pb2.Summary.Value(tag="loss", simple_value=float(l)))
         self.validation_summary_writter.add_summary(summary_pb2.Summary(value=summaries), step)
@@ -283,7 +283,6 @@ class Trainer(object):
 
             # TENSOR BOARD SUMMARY
             self.train_summary_writter = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'train'), session.graph)
-
             for step in range(self.iterations):
 
                 start = time.time()
@@ -297,7 +296,7 @@ class Trainer(object):
 
                     if self.analizer is not None:
                         self.validation_summary_writter.add_summary(self.analizer(
-                             validation_data_shuffler, self, session), step)
+                             validation_data_shuffler, self.architecture, session), step)
 
             logger.info("Training finally finished")
 
diff --git a/bob/learn/tensorflow/trainers/TripletTrainer.py b/bob/learn/tensorflow/trainers/TripletTrainer.py
index fcb2c2c1c3c7b98daae8eef449b7e924c2dc8a4b..bfa91265e19e921f885326763129740db59508cd 100644
--- a/bob/learn/tensorflow/trainers/TripletTrainer.py
+++ b/bob/learn/tensorflow/trainers/TripletTrainer.py
@@ -6,6 +6,7 @@
 import logging
 logger = logging.getLogger("bob.learn.tensorflow")
 import tensorflow as tf
+from tensorflow.core.framework import summary_pb2
 import threading
 from ..analyzers import ExperimentAnalizer
 from ..network import SequenceNetwork
@@ -17,6 +18,28 @@ import sys
 
 class TripletTrainer(Trainer):
 
+    """
+    Trainer for Triple networks.
+
+    **Parameters**
+      architecture: The architecture that you want to run. Should be a :py:class`bob.learn.tensorflow.network.SequenceNetwork`
+      optimizer: One of the tensorflow optimizers https://www.tensorflow.org/versions/r0.10/api_docs/python/train.html
+      use_gpu: Use GPUs in the training
+      loss: Loss
+      temp_dir: The output directory
+
+      base_learning_rate: Initial learning rate
+      weight_decay:
+      convergence_threshold:
+
+      iterations: Maximum number of iterations
+      snapshot: Will take a snapshot of the network at every `n` iterations
+      prefetch: Use extra Threads to deal with the I/O
+      analizer: Neural network analizer :py:mod:`bob.learn.tensorflow.analyzers`
+      verbosity_level:
+
+    """
+
     def __init__(self,
                  architecture,
                  optimizer=tf.train.AdamOptimizer(),
@@ -31,7 +54,13 @@ class TripletTrainer(Trainer):
                  ###### training options ##########
                  convergence_threshold=0.01,
                  iterations=5000,
-                 snapshot=100):
+                 snapshot=100,
+                 prefetch=False,
+
+                 ## Analizer
+                 analizer=ExperimentAnalizer(),
+
+                 verbosity_level=2):
 
         super(TripletTrainer, self).__init__(
             architecture=architecture,
@@ -39,159 +68,183 @@ class TripletTrainer(Trainer):
             use_gpu=use_gpu,
             loss=loss,
             temp_dir=temp_dir,
+
+            # Learning rate
             base_learning_rate=base_learning_rate,
             weight_decay=weight_decay,
+
+            ###### training options ##########
             convergence_threshold=convergence_threshold,
             iterations=iterations,
-            snapshot=snapshot
+            snapshot=snapshot,
+            prefetch=prefetch,
+
+            ## Analizer
+            analizer=analizer,
+
+            verbosity_level=verbosity_level
         )
 
-    def train(self, train_data_shuffler, validation_data_shuffler=None):
-        """
-        Do the loop forward --> backward --|
-                      ^--------------------|
+        self.between_class_graph_train = None
+        self.within_class_graph_train = None
+
+        self.between_class_graph_validation = None
+        self.within_class_graph_validation = None
+
+
+    def compute_graph(self, data_shuffler, prefetch=False, name="", train=True):
         """
+        Computes the graph for the trainer.
 
-        def start_thread():
-            threads = []
-            for n in range(1):
-                t = threading.Thread(target=load_and_enqueue)
-                t.daemon = True  # thread will close when parent quits
-                t.start()
-                threads.append(t)
-            return threads
-
-        def load_and_enqueue():
-            """
-            Injecting data in the place holder queue
-            """
-            #for i in range(self.iterations+5):
-            while not thread_pool.should_stop():
-                batch_anchor, batch_positive, batch_negative = train_data_shuffler.get_random_triplet()
-
-                feed_dict = {train_placeholder_anchor_data: batch_anchor,
-                             train_placeholder_positive_data: batch_positive,
-                             train_placeholder_negative_data: batch_negative}
-
-                session.run(enqueue_op, feed_dict=feed_dict)
-
-        # TODO: find an elegant way to provide this as a parameter of the trainer
-        learning_rate = tf.train.exponential_decay(
-            self.base_learning_rate,  # Learning rate
-            train_data_shuffler.batch_size,
-            train_data_shuffler.n_samples,
-            self.weight_decay  # Decay step
-        )
 
-        # Creating directory
-        bob.io.base.create_directories_safe(self.temp_dir)
-
-        # Creating two graphs
-        train_placeholder_anchor_data, _ = train_data_shuffler.get_placeholders_forprefetch(name="train_anchor")
-        train_placeholder_positive_data, _ = train_data_shuffler.get_placeholders_forprefetch(name="train_positive")
-        train_placeholder_negative_data, _ = train_data_shuffler.get_placeholders_forprefetch(name="train_negative")
-
-        # Defining a placeholder queue for prefetching
-        queue = tf.FIFOQueue(capacity=100,
-                             dtypes=[tf.float32, tf.float32, tf.float32],
-                             shapes=[train_placeholder_anchor_data.get_shape().as_list()[1:],
-                                     train_placeholder_positive_data.get_shape().as_list()[1:],
-                                     train_placeholder_negative_data.get_shape().as_list()[1:]])
-
-        # Fetching the place holders from the queue
-        enqueue_op = queue.enqueue_many([train_placeholder_anchor_data,
-                                         train_placeholder_positive_data,
-                                         train_placeholder_negative_data])
-        train_anchor_feature_batch, train_positive_feature_batch, train_negative_feature_batch = \
-            queue.dequeue_many(train_data_shuffler.batch_size)
-
-        # Creating the architecture for train and validation
-        if not isinstance(self.architecture, SequenceNetwork):
-            raise ValueError("The variable `architecture` must be an instance of "
-                             "`bob.learn.tensorflow.network.SequenceNetwork`")
+        ** Parameters **
+
+            data_shuffler: Data shuffler
+            prefetch:
+            name: Name of the graph
+        """
+
+        # Defining place holders
+        if prefetch:
+            placeholder_anchor_data, placeholder_positive_data, placeholder_negative_data = \
+                data_shuffler.get_placeholders_triplet_forprefetch(name=name)
+
+            # Defining a placeholder queue for prefetching
+            queue = tf.FIFOQueue(capacity=100,
+                                 dtypes=[tf.float32, tf.float32, tf.float32],
+                                 shapes=[placeholder_anchor_data.get_shape().as_list()[1:],
+                                         placeholder_positive_data.get_shape().as_list()[1:],
+                                         placeholder_negative_data.get_shape().as_list()[1:]
+                                         ])
+
+            # Fetching the place holders from the queue
+            self.enqueue_op = queue.enqueue_many([placeholder_anchor_data, placeholder_positive_data,
+                                                  placeholder_negative_data])
+            feature_anchor_batch, feature_positive_batch, feature_negative_batch = \
+                queue.dequeue_many(data_shuffler.batch_size)
+
+            # Creating the architecture for train and validation
+            if not isinstance(self.architecture, SequenceNetwork):
+                raise ValueError("The variable `architecture` must be an instance of "
+                                 "`bob.learn.tensorflow.network.SequenceNetwork`")
+        else:
+            feature_anchor_batch, feature_positive_batch, feature_negative_batch = \
+                data_shuffler.get_placeholders_triplet(name=name)
 
         # Creating the siamese graph
-        #import ipdb; ipdb.set_trace();
-        train_anchor_graph = self.architecture.compute_graph(train_anchor_feature_batch)
-        train_positive_graph = self.architecture.compute_graph(train_positive_feature_batch)
-        train_negative_graph = self.architecture.compute_graph(train_negative_feature_batch)
+        train_anchor_graph = self.architecture.compute_graph(feature_anchor_batch)
+        train_positive_graph = self.architecture.compute_graph(feature_positive_batch)
+        train_negative_graph = self.architecture.compute_graph(feature_negative_batch)
 
-        loss_train, within_class, between_class = self.loss(train_anchor_graph,
-                                                            train_positive_graph,
-                                                            train_negative_graph)
+        graph, between_class_graph, within_class_graph = self.loss(train_anchor_graph,
+                                                                   train_positive_graph,
+                                                                   train_negative_graph)
 
-        # Preparing the optimizer
-        step = tf.Variable(0)
-        self.optimizer._learning_rate = learning_rate
-        optimizer = self.optimizer.minimize(loss_train, global_step=step)
-        #optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.99, use_locking=False,
-        #                                       name='Momentum').minimize(loss_train, global_step=step)
+        if train:
+            self.between_class_graph_train = between_class_graph
+            self.within_class_graph_train = within_class_graph
+        else:
+            self.between_class_graph_validation = between_class_graph
+            self.within_class_graph_validation = within_class_graph
 
-        print("Initializing !!")
-        # Training
-        hdf5 = bob.io.base.HDF5File(os.path.join(self.temp_dir, 'model.hdf5'), 'w')
+        return graph
 
-        with tf.Session() as session:
-            if validation_data_shuffler is not None:
-                analizer = ExperimentAnalizer(validation_data_shuffler, self.architecture, session)
+    def get_feed_dict(self, data_shuffler):
+        """
+        Given a data shuffler prepared the dictionary to be injected in the graph
 
-            tf.initialize_all_variables().run()
+        ** Parameters **
+            data_shuffler:
 
-            # Start a thread to enqueue data asynchronously, and hide I/O latency.
-            thread_pool = tf.train.Coordinator()
-            tf.train.start_queue_runners(coord=thread_pool)
-            threads = start_thread()
+        """
 
-            # TENSOR BOARD SUMMARY
-            train_writer = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'LOGS'), session.graph)
+        batch_anchor, batch_positive, batch_negative = data_shuffler.get_random_triplet()
+        placeholder_anchor_data, placeholder_positive_data, placeholder_negative_data = \
+            data_shuffler.get_placeholders_triplet()
 
-            # Siamese specific summary
-            tf.scalar_summary('loss', loss_train)
-            tf.scalar_summary('between_class', between_class)
-            tf.scalar_summary('within_class', within_class)
-            tf.scalar_summary('lr', learning_rate)
-            merged = tf.merge_all_summaries()
+        feed_dict = {placeholder_anchor_data: batch_anchor,
+                     placeholder_positive_data: batch_positive,
+                     placeholder_negative_data: batch_negative}
 
-            # Architecture summary
-            self.architecture.generate_summaries()
-            merged_validation = tf.merge_all_summaries()
+        return feed_dict
 
-            for step in range(self.iterations):
+    def fit(self, session, step):
+        """
+        Run one iteration (`forward` and `backward`)
 
-                #batch_anchor, batch_positive, batch_negative = train_data_shuffler.get_random_triplet()
+        ** Parameters **
+            session: Tensorflow session
+            step: Iteration number
 
-                #feed_dict = {train_anchor_feature_batch: batch_anchor,
-                #             train_positive_feature_batch: batch_positive,
-                #             train_negative_feature_batch: batch_negative}
+        """
+        if self.prefetch:
+            _, l, bt_class, wt_class, lr, summary = session.run([self.optimizer,
+                                             self.training_graph, self.between_class_graph_train,
+                                             self.within_class_graph_train, self.learning_rate, self.summaries_train])
+        else:
+            feed_dict = self.get_feed_dict(self.train_data_shuffler)
+            _, l, bt_class, wt_class, lr, summary = session.run([self.optimizer,
+                                             self.training_graph, self.between_class_graph_train,
+                                             self.within_class_graph_train,
+                                             self.learning_rate, self.summaries_train], feed_dict=feed_dict)
+
+        logger.info("Loss training set step={0} = {1}".format(step, l))
+        self.train_summary_writter.add_summary(summary, step)
+
+    def compute_validation(self, session, data_shuffler, step):
+        """
+        Computes the loss in the validation set
 
+        ** Parameters **
+            session: Tensorflow session
+            data_shuffler: The data shuffler to be used
+            step: Iteration number
 
-                #_, l, lr, summary, pos, neg = session.run([optimizer, loss_train, learning_rate, merged, within_class, between_class], feed_dict=feed_dict)
+        """
+
+        if self.validation_summary_writter is None:
+            self.validation_summary_writter = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'validation'), session.graph)
 
-                #_, l, lr, pos, neg, f_anchor, f_positive, f_negative = session.run(
-                #    [optimizer, loss_train, learning_rate, within_class, between_class, train_anchor_feature_batch, train_positive_feature_batch, train_negative_feature_batch], feed_dict=feed_dict)
+        self.validation_graph = self.compute_graph(data_shuffler, name="validation", train=False)
+        feed_dict = self.get_feed_dict(data_shuffler)
+        l, bt_class, wt_class = session.run([self.validation_graph,
+                                             self.between_class_graph_validation, self.within_class_graph_validation],
+                                             feed_dict=feed_dict)
 
-                #import ipdb; ipdb.set_trace();
+        summaries = []
+        summaries.append(summary_pb2.Summary.Value(tag="loss", simple_value=float(l)))
+        summaries.append(summary_pb2.Summary.Value(tag="between_class_loss", simple_value=float(bt_class)))
+        summaries.append(summary_pb2.Summary.Value(tag="within_class_loss", simple_value=float(wt_class)))
+        self.validation_summary_writter.add_summary(summary_pb2.Summary(value=summaries), step)
+        logger.info("Loss VALIDATION set step={0} = {1}".format(step, l))
 
-                _, l, lr, summary = session.run([optimizer, loss_train, learning_rate, merged])
-                train_writer.add_summary(summary, step)
-                #print str(step) + " -- loss: " + str(l)
-                #print str(step) + " -- loss: {0}; pos: {1}; neg: {2}".format(l, pos, neg)
-                sys.stdout.flush()
+    def create_general_summary(self):
+        """
+        Creates a simple tensorboard summary with the value of the loss and learning rate
+        """
 
-                if validation_data_shuffler is not None and step % self.snapshot == 0:
+        # Train summary
+        tf.scalar_summary('loss', self.training_graph, name="train")
+        tf.scalar_summary('between_class_loss', self.between_class_graph_train, name="train")
+        tf.scalar_summary('within_class_loss', self.within_class_graph_train, name="train")
+        tf.scalar_summary('lr', self.learning_rate, name="train")
+        return tf.merge_all_summaries()
 
-                    #summary = session.run(merged_validation)
-                    #train_writer.add_summary(summary, step)
+    def load_and_enqueue(self, session):
+        """
+        Injecting data in the place holder queue
+
+        **Parameters**
+          session: Tensorflow session
+        """
 
-                    summary = analizer()
-                    train_writer.add_summary(summary, step)
-                    print str(step)
-                sys.stdout.flush()
+        while not self.thread_pool.should_stop():
+            batch_anchor, batch_positive, batch_negative = self.train_data_shuffler.get_random_triplet()
+            placeholder_anchor_data, placeholder_positive_data, placeholder_negative_data = \
+                self.train_data_shuffler.get_placeholders_triplet()
 
-            print("#######DONE##########")
-            self.architecture.save(hdf5)
-            del hdf5
-            train_writer.close()
+            feed_dict = {placeholder_anchor_data: batch_anchor,
+                         placeholder_positive_data: batch_positive,
+                         placeholder_negative_data: batch_negative}
 
-            #thread_pool.request_stop()
-            #thread_pool.join(threads)
+            session.run(self.enqueue_op, feed_dict=feed_dict)
diff --git a/doc/extra-intersphinx.txt b/doc/extra-intersphinx.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0f57144081205a052b60bc93ad35e847eb8691e0
--- /dev/null
+++ b/doc/extra-intersphinx.txt
@@ -0,0 +1 @@
+tensorflow