diff --git a/bob/learn/tensorflow/datashuffler/TFRecord.py b/bob/learn/tensorflow/datashuffler/TFRecord.py
index b4450dbc175a28ab4b0d6c2f602101c48a464bde..3043c9a281e63b090f936e31272a9c7c2dd2f65d 100644
--- a/bob/learn/tensorflow/datashuffler/TFRecord.py
+++ b/bob/learn/tensorflow/datashuffler/TFRecord.py
@@ -91,7 +91,7 @@ class TFRecord(object):
 
         # Define a reader and read the next record
         reader = tf.TFRecordReader()
-        _, serialized_example = reader.read(self.filename_queue)        
+        _, serialized_example = reader.read(self.filename_queue)
         
         # Decode the record read by the reader
         features = tf.parse_single_example(serialized_example, features=feature)
@@ -103,9 +103,9 @@ class TFRecord(object):
         label = tf.cast(features['train/label'], tf.int64)
         
         # Reshape image data into the original shape
-        image = tf.reshape(image, self.input_shape[1:])
+        image = tf.reshape(image, list(self.input_shape[1:]))
                 
-        images, labels = tf.train.shuffle_batch([image, label], batch_size=32, capacity=1000, num_threads=1, min_after_dequeue=1)
+        images, labels = tf.train.shuffle_batch([image, label], batch_size=32, capacity=1000, num_threads=1, min_after_dequeue=1, name="XUXA1")
         self.data_ph = images
         self.label_ph = labels
 
@@ -113,7 +113,6 @@ class TFRecord(object):
         self.label_ph_from_queue = self.label_ph
 
 
-
     def __call__(self, element, from_queue=False):
         """
         Return the necessary placeholder
diff --git a/bob/learn/tensorflow/test/test_cnn_scratch.py b/bob/learn/tensorflow/test/test_cnn_scratch.py
index d283a1f15fe3d82bb7a1d537c2465e06cf911107..ae7ed25f08ef87fd7e54735aabc29a65a3f89c50 100644
--- a/bob/learn/tensorflow/test/test_cnn_scratch.py
+++ b/bob/learn/tensorflow/test/test_cnn_scratch.py
@@ -103,11 +103,13 @@ def test_cnn_trainer_scratch():
 def test_cnn_trainer_scratch_tfrecord():
     tf.reset_default_graph()
 
+    #import ipdb; ipdb.set_trace();
+
     #train_data, train_labels, validation_data, validation_labels = load_mnist()
     #train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
 
     tfrecords_filename = "/idiap/user/tpereira/gitlab/workspace_HTFace/mnist_train.tfrecords"
-    filename_queue = tf.train.string_input_producer([tfrecords_filename], num_epochs=1)
+    filename_queue = tf.train.string_input_producer([tfrecords_filename], num_epochs=1, name="XUXA")
     train_data_shuffler  = TFRecord(filename_queue=filename_queue,
                                     batch_size=batch_size)
 
@@ -125,10 +127,11 @@ def test_cnn_trainer_scratch_tfrecord():
                       analizer=None,
                       temp_dir=directory)
 
+    learning_rate = constant(0.01, name="regular_lr")
     trainer.create_network_from_scratch(graph=graph,
                                         loss=loss,
-                                        learning_rate=constant(0.01, name="regular_lr"),
-                                        optimizer=tf.train.GradientDescentOptimizer(0.01),
+                                        learning_rate=learning_rate,
+                                        optimizer=tf.train.GradientDescentOptimizer(learning_rate),
                                         )
 
     trainer.train()
@@ -136,3 +139,141 @@ def test_cnn_trainer_scratch_tfrecord():
     #assert accuracy > 70
     #shutil.rmtree(directory)
     #del trainer    
+    
+    
+    
+def test_xuxa():
+    tfrecords_filename = '/idiap/user/tpereira/gitlab/workspace_HTFace/mnist_train.tfrecords'
+    def read_and_decode(filename_queue):
+
+        feature = {'train/image': tf.FixedLenFeature([], tf.string),
+                   'train/label': tf.FixedLenFeature([], tf.int64)}
+
+        # Define a reader and read the next record
+        reader = tf.TFRecordReader()
+        
+        _, serialized_example = reader.read(filename_queue)
+        
+        
+        # Decode the record read by the reader
+        features = tf.parse_single_example(serialized_example, features=feature)
+        
+        # Convert the image data from string back to the numbers
+        image = tf.decode_raw(features['train/image'], tf.float32)
+        
+        # Cast label data into int32
+        label = tf.cast(features['train/label'], tf.int64)
+        
+        # Reshape image data into the original shape
+        image = tf.reshape(image, [28, 28, 1])
+        
+        
+        images, labels = tf.train.shuffle_batch([image, label], batch_size=32, capacity=1000, num_threads=1, min_after_dequeue=1, name="XUXA1")
+
+        return images, labels
+
+
+
+    slim = tf.contrib.slim
+
+
+    def scratch_network(inputs, reuse=False):
+
+        # Creating a random network
+        initializer = tf.contrib.layers.xavier_initializer(seed=10)
+        graph = slim.conv2d(inputs, 10, [3, 3], activation_fn=tf.nn.relu, stride=1, scope='conv1',
+                            weights_initializer=initializer, reuse=reuse)
+        graph = slim.max_pool2d(graph, [4, 4], scope='pool1')
+        graph = slim.flatten(graph, scope='flatten1')
+        graph = slim.fully_connected(graph, 10, activation_fn=None, scope='fc1',
+                                     weights_initializer=initializer, reuse=reuse)
+
+        return graph
+
+    def create_general_summary(predictor):
+        """
+        Creates a simple tensorboard summary with the value of the loss and learning rate
+        """
+
+        # Train summary
+        tf.summary.scalar('loss', predictor)
+        return tf.summary.merge_all()
+
+
+    #create_tf_record()
+
+
+    # Create a list of filenames and pass it to a queue
+    filename_queue = tf.train.string_input_producer([tfrecords_filename], num_epochs=5, name="XUXA")
+
+    images, labels = read_and_decode(filename_queue)
+    graph = scratch_network(images)
+    predictor = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=graph, labels=labels)
+    loss = tf.reduce_mean(predictor)
+
+    global_step = tf.contrib.framework.get_or_create_global_step()
+    optimizer = tf.train.GradientDescentOptimizer(0.1).minimize(loss, global_step=global_step)
+
+
+
+    print("Batching")
+    #import ipdb; ipdb.set_trace()
+    sess = tf.Session()
+    #with tf.Session() as sess:
+
+    sess.run(tf.local_variables_initializer())
+    sess.run(tf.global_variables_initializer())
+
+
+    saver = tf.train.Saver(var_list=tf.global_variables() + tf.local_variables())
+
+    train_summary_writter = tf.summary.FileWriter('./tf-record/train', sess.graph)
+    summary_op = create_general_summary(loss)
+
+        
+    #tf.global_variables_initializer().run(session=self.session)
+
+    # Any preprocessing here ...
+
+    ############# Batching ############
+
+    # Creates batches by randomly shuffling tensors
+    #images, labels = tf.train.shuffle_batch([image, label], batch_size=10, capacity=30, num_threads=1, min_after_dequeue=10)
+    #images, labels = tf.train.batch([image, label], batch_size=10)
+
+
+    #import ipdb; ipdb.set_trace();
+    #init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
+    #sess.run(init_op)
+    #sess.run(tf.initialize_all_variables())
+
+    coord = tf.train.Coordinator()
+    threads = tf.train.start_queue_runners(coord=coord, sess=sess)
+
+    #import ipdb; ipdb.set_trace();
+
+    #import ipdb; ipdb.set_trace()
+    for i in range(10):
+        _, l, summary = sess.run([optimizer, loss, summary_op])
+        print l
+
+        #img, lbl = sess.run([images, labels])        
+        #print img.shape
+        #print lbl
+        train_summary_writter.add_summary(summary, i)
+
+
+    # Stop the threads
+    coord.request_stop()
+
+    # Wait for threads to stop
+    coord.join(threads)    
+    x = 0
+    train_summary_writter.close()
+    saver.save(sess, "xuxa.ckp")
+
+
+
+
+
+
diff --git a/bob/learn/tensorflow/trainers/Trainer.py b/bob/learn/tensorflow/trainers/Trainer.py
index e289e4b3f5943102410fdf8bd488a1ce89456192..350c58097d145cf8889a13116bce8cbbfdeb05ca 100644
--- a/bob/learn/tensorflow/trainers/Trainer.py
+++ b/bob/learn/tensorflow/trainers/Trainer.py
@@ -134,7 +134,14 @@ class Trainer(object):
         self.label_ph = self.train_data_shuffler("label", from_queue=True)
         self.graph = graph
         self.loss = loss
-        self.predictor = self.loss(self.graph, self.label_ph)
+        
+        #TODO: DEBUG
+        #self.predictor = self.loss(self.graph, self.label_ph)
+        
+        self.predictor = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.graph,
+                                                                        labels=self.label_ph)
+        self.loss = tf.reduce_mean(self.predictor)
+
 
         self.optimizer_class = optimizer
         self.learning_rate = learning_rate
@@ -142,7 +149,7 @@ class Trainer(object):
         self.global_step = tf.contrib.framework.get_or_create_global_step()
 
         # Saving all the variables
-        self.saver = tf.train.Saver(var_list=tf.global_variables())
+        self.saver = tf.train.Saver(var_list=tf.global_variables() + tf.local_variables())
 
         tf.add_to_collection("global_step", self.global_step)
 
@@ -165,7 +172,7 @@ class Trainer(object):
         self.summaries_validation = tf.add_to_collection("summaries_validation", self.summaries_validation)
 
         # Creating the variables
-        self.session.run(tf.local_variables_initializer())
+        #tf.local_variables_initializer().run(session=self.session)
         tf.global_variables_initializer().run(session=self.session)
 
     def create_network_from_file(self, file_name, clear_devices=True):
@@ -321,7 +328,7 @@ class Trainer(object):
         #if isinstance(train_data_shuffler, OnlineSampling):
         #    train_data_shuffler.set_feature_extractor(self.architecture, session=self.session)
 
-        # Start a thread to enqueue data asynchronously, and hide I/O latency.
+        # Start a thread to enqueue data asynchronously, and hide I/O latency.        
         if self.train_data_shuffler.prefetch:
             self.thread_pool = tf.train.Coordinator()
             tf.train.start_queue_runners(coord=self.thread_pool, sess=self.session)
@@ -332,9 +339,9 @@ class Trainer(object):
         # TODO: JUST FOR TESTING THE INTEGRATION
         #import ipdb; ipdb.set_trace();
         if isinstance(self.train_data_shuffler, TFRecord):
+            tf.local_variables_initializer().run(session=self.session)
             self.thread_pool = tf.train.Coordinator()
             threads = tf.train.start_queue_runners(coord=self.thread_pool, sess=self.session)
- 
         
 
         # TENSOR BOARD SUMMARY