diff --git a/cnn_training/._centerloss_mixed_precision.py b/cnn_training/._centerloss_mixed_precision.py
deleted file mode 100644
index 881da051d61cfeb4e02036b9ea9c9df85b2fefdf..0000000000000000000000000000000000000000
Binary files a/cnn_training/._centerloss_mixed_precision.py and /dev/null differ
diff --git a/cnn_training/arcface.py b/cnn_training/arcface.py
index 712cf6dca83019dae9adea274a5622b491a295b9..17b9f508e3283c8f8b4c311c659da27c3b12d460 100644
--- a/cnn_training/arcface.py
+++ b/cnn_training/arcface.py
@@ -52,23 +52,24 @@ validation-tf-record-path: "/path/lfw_pairs.tfrecord"
 
 
 Usage:
-    arcface.py <config-yaml> <checkpoint_path> [--pre-train]
+    arcface.py <config-yaml> <checkpoint_path> [--pre-train --pre-train-epochs=<kn>]
     arcface.py -h | --help
 
 Options:
-  -h --help             Show this screen.
-  --pre-train           If set pretrains the CNN with the crossentropy softmax for 2 epochs  
+  -h --help                        Show this screen.
+  --pre-train                      If set pretrains the CNN with the crossentropy softmax for 2 epochs  
+  --pre-train-epochs=<kn>          Number of epochs to pretrain [default: 40]
   arcface.py arcface -h | help
 
 """
 
 import os
 from functools import partial
-
+import numpy as np
 import pkg_resources
 import tensorflow as tf
 from bob.learn.tensorflow.models.inception_resnet_v2 import InceptionResNetV2
-from bob.learn.tensorflow.models import resnet50v1
+from bob.learn.tensorflow.models import resnet50_modified, resnet101_modified
 from bob.learn.tensorflow.metrics import predict_using_tensors
 from tensorflow.keras import layers
 from bob.learn.tensorflow.callbacks import add_backup_callback
@@ -100,11 +101,11 @@ BACKBONES = dict()
 BACKBONES["inception-resnet-v2"] = InceptionResNetV2
 BACKBONES["efficientnet-B0"] = tf.keras.applications.EfficientNetB0
 BACKBONES["resnet50"] = tf.keras.applications.ResNet50
+BACKBONES["resnet50_modified"] = resnet50_modified
+BACKBONES["resnet101_modified"] = resnet101_modified
 BACKBONES["mobilenet-v2"] = tf.keras.applications.MobileNetV2
 # from bob.learn.tensorflow.models.lenet5 import LeNet5_simplified
 
-BACKBONES["resnet50v1"] = resnet50v1
-
 ##############################
 # SOLVER SPECIFICATIONS
 ##############################
@@ -134,7 +135,8 @@ DATA_SHAPES = dict()
 
 # Inputs with 182x182 are cropped to 160x160
 DATA_SHAPES[182] = 160
-DATA_SHAPES[112] = 98
+DATA_SHAPES[112] = 112
+# DATA_SHAPES[112] = 98
 DATA_SHAPES[126] = 112
 
 
@@ -157,7 +159,6 @@ VALIDATION_BATCH_SIZE = 38
 def create_model(
     n_classes, model_spec, backbone, bottleneck, dropout_rate, input_shape, pre_train
 ):
-
     if backbone == "inception-resnet-v2":
         pre_model = BACKBONES[backbone](
             include_top=False, bottleneck=False, input_shape=input_shape,
@@ -177,12 +178,9 @@ def create_model(
 
     if pre_train:
         # pre_model = add_top(pre_model, n_classes=n_classes)
-        logits_premodel = ArcFaceLayer(
-            n_classes,
-            s=model_spec["arcface"]["s"],
-            m=model_spec["arcface"]["m"],
-            arc=False,
-        )(embeddings, None)
+        logits_premodel = ArcFaceLayer(n_classes, s=0, m=0, arc=False,)(
+            embeddings, None
+        )
 
         # Wrapping the embedding validation
         # logits_premodel = pre_model.get_layer("logits").output
@@ -284,6 +282,7 @@ def train_and_evaluate(
     validation_path,
     lerning_rate_schedule,
     pre_train=False,
+    pre_train_epochs=30,
 ):
 
     # number of training steps to do before validating a model. This also defines an epoch
@@ -343,12 +342,15 @@ def train_and_evaluate(
         # Tracking in the tensorboard
         tf.summary.scalar("learning rate", data=lr, step=epoch)
 
-        if epoch in range(200):
+        if epoch in range(40):
             return 1 * lr
-        elif epoch < 1000:
-            return lr * np.exp(-0.005)
-        else:
+        elif epoch < 300:
+            # return lr * np.exp(-0.005)
+            return 0.01
+        elif epoch < 1200:
             return 0.0001
+        else:
+            return 0.00001
 
     if lerning_rate_schedule == "cosine-decay-restarts":
         decay_steps = 50
@@ -381,7 +383,7 @@ def train_and_evaluate(
         # STEPS_PER_EPOCH
         pre_model.fit(
             train_ds,
-            epochs=20,
+            epochs=int(pre_train_epochs),
             validation_data=val_ds,
             steps_per_epoch=STEPS_PER_EPOCH,
             validation_steps=VALIDATION_SAMPLES // VALIDATION_BATCH_SIZE,
@@ -449,5 +451,6 @@ if __name__ == "__main__":
         if "lerning-rate-schedule" in config
         else None,
         pre_train=args["--pre-train"],
+        pre_train_epochs=args["--pre-train-epochs"],
     )
 
diff --git a/cnn_training/msceleb_to_tfrecord.py b/cnn_training/msceleb_to_tfrecord.py
new file mode 100644
index 0000000000000000000000000000000000000000..7138d7cce60407689e724a7c850666fcb170e44b
--- /dev/null
+++ b/cnn_training/msceleb_to_tfrecord.py
@@ -0,0 +1,291 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+"""
+Converts the MSCeleb annotated version to TF RECORD
+
+Usage:
+    msceleb_to_tfrecord.py <msceleb-path> <output-path> [--keys=<arg> --image-size=<arg> --use-eyes]
+    msceleb_to_tfrecord.py -h | --help
+
+Options:
+  -h --help             Show this screen.    
+  --keys=<arg>          Pickle with the keys
+  --image-size=<arg>    Final image size [default: 126]  
+  --use-eyes            Use eyes annotations. If not set, it will use the face crop only
+
+"""
+
+
+from docopt import docopt
+import numpy as np
+import os
+import bob.io.image
+import bob.io.base
+import tensorflow as tf
+import sys
+from datetime import datetime
+import pickle
+import numpy
+from bob.bio.face.preprocessor import FaceCrop
+
+
+def _bytes_feature(value):
+    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
+
+
+def _int64_feature(value):
+    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
+
+
+def detect_mtcnn_margin_face_crop(annotations, image, margin=44, final_size=126):
+    """
+    Face crop using bounding box
+    """
+
+    annotations["topleft"] = [annotations["topleft"][0], annotations["topleft"][1]]
+    annotations["bottomright"] = [
+        annotations["bottomright"][0],
+        annotations["bottomright"][1],
+    ]
+
+    annotations["topleft"][0] = numpy.uint(
+        numpy.maximum(annotations["topleft"][0] - margin / 2, 0)
+    )
+    annotations["topleft"][1] = numpy.uint(
+        numpy.maximum(annotations["topleft"][1] - margin / 2, 0)
+    )
+
+    annotations["bottomright"][0] = numpy.uint(
+        numpy.minimum(annotations["bottomright"][0] + margin / 2, image.shape[1])
+    )
+    annotations["bottomright"][1] = numpy.uint(
+        numpy.minimum(annotations["bottomright"][1] + margin / 2, image.shape[2])
+    )
+
+    cropped_positions = {"topleft": (0, 0), "bottomright": (final_size, final_size)}
+    cropper = FaceCrop(
+        cropped_image_size=(final_size, final_size),
+        cropped_positions=cropped_positions,
+        color_channel="rgb",
+        fixed_positions=None,
+        annotator=None,
+    )
+
+    detected = cropper.transform([image], [annotations])[0]
+
+    return detected.astype("uint8")
+
+
+def detect_mtcnn_margin_eyes(annotations, image, margin=44, final_size=126):
+
+    # final image position w.r.t the image size
+    RIGHT_EYE_POS = (final_size / 3.44, final_size / 3.02)
+    LEFT_EYE_POS = (final_size / 3.44, final_size / 1.49)
+
+    # RIGHT_EYE_POS = (final_size / 3.34,
+    #                 final_size / 3.02)
+    # LEFT_EYE_POS = (final_size / 3.44,
+    #                final_size / 1.59)
+
+    cropped_positions = {"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
+
+    cropper = FaceCrop(
+        cropped_image_size=(final_size, final_size),
+        cropped_positions=cropped_positions,
+        color_channel="rgb",
+        fixed_positions=None,
+        annotator=None,
+    )
+
+    detected = cropper.transform([image], [annotations])[0]
+
+    return detected.astype("uint8")
+
+
+def generate_tfrecord(
+    chunk_path,
+    output_tf_record_path,
+    detector,
+    keys,
+    final_size=126,
+    margin=44,
+    use_eyes=False,
+):
+    def write_single_line_tfrecord(writer, image, offset, user_id):
+
+        # Serializing
+        serialized_img = image.tobytes()
+
+        # Writing
+        feature = {
+            "data": _bytes_feature(serialized_img),
+            "label": _int64_feature(offset),
+            "key": _bytes_feature(str.encode(user_id)),
+        }
+
+        example = tf.train.Example(features=tf.train.Features(feature=feature))
+        writer.write(example.SerializeToString())
+
+    os.makedirs(os.path.dirname(output_tf_record_path), exist_ok=True)
+
+    with tf.io.TFRecordWriter(output_tf_record_path) as tf_writer:
+
+        for identity in os.listdir(chunk_path):
+            # Discarting the one we've discarted already
+            if identity not in keys:
+                continue
+
+            identity_path = os.path.join(chunk_path, identity)
+            if not os.path.isdir(identity_path):
+                continue
+
+            sys.stdout.write(f"Processing {identity} \n")
+            sys.stdout.flush()
+
+            for image_path in os.listdir(identity_path):
+                image_path = os.path.join(identity_path, image_path)
+                if os.path.splitext(image_path)[-1] != ".png":
+                    continue
+                image = bob.io.image.load(image_path)
+                annotations = detector(image)
+
+                if len(annotations) == 0:
+                    continue
+                else:
+                    # Getting the first annotation
+                    annotations = annotations[0]
+
+                if use_eyes:
+                    detected_image = detect_mtcnn_margin_eyes(
+                        annotations, image, margin=margin, final_size=final_size
+                    )
+                else:
+
+                    detected_image = detect_mtcnn_margin_face_crop(
+                        annotations, image, margin=margin, final_size=final_size
+                    )
+                # Converting H x W x C
+                detected_image = bob.io.image.to_matplotlib(detected_image)
+
+                write_single_line_tfrecord(
+                    tf_writer, detected_image, keys[identity], identity
+                )
+
+
+def get_keys(base_path, all_chunks):
+    """
+    Read the file structure from `annotations.csv` to get the samples properly annotated
+    """
+
+    def decide(annotations_path):
+        """
+        Decide if we should consider an identity or not.
+        The annotation has the following format.
+
+        ```
+        0,3.png,4.png,1
+        1,4.png,40.png,1
+        2,40.png,46.png,1
+        3,46.png,47.png,1
+        4,47.png,55.png,1
+        5,55.png,56.png,1
+        6,56.png,71.png,1
+        7,71.png,79.png,1
+        8,79.png,99.png,1
+        9,99.png,100.png,1
+        10,100.png,3.png,1
+        ```
+
+        The last collumn can assume the values:
+          - `0`: The pair of images are NOT from the same identity
+          - `1`: The pair of images ARE from the same identity
+          - `2`: The annotator doesn't know what to say
+
+
+        Here I'm considering the identity if you have more than 75% `1` and  `2`.
+
+        """
+
+        with open(annotations_path) as f:
+            lines = 0
+            lines_with_zero = 0
+            for l in f.readlines():
+                lines += 1
+                if l.split(",")[-1] == "\n":
+                    lines_with_zero += 1
+                    continue
+
+                decision = int(l.split(",")[-1])
+                if decision == 0:
+                    lines_with_zero += 1
+
+        # Discarting identities with more than 50% of the pairs not
+        # considered from the same identity
+        # This is the first cut
+        return True if lines_with_zero / lines < 0.50 else False
+
+    offset = 0
+    keys = dict()
+    for chunk in all_chunks:
+        path = os.path.join(base_path, chunk)
+        for identity in os.listdir(path):
+            if not os.path.isdir(os.path.join(path, identity)):
+                continue
+
+            statistics = os.path.join(path, identity, "annotations.csv")
+
+            if decide(statistics):
+                keys[identity] = offset
+                offset += 1
+            else:
+                print(f"Rejected {identity}")
+    return keys
+
+
+if __name__ == "__main__":
+    args = docopt(__doc__)
+
+    MSCELEB_PATH = args["<msceleb-path>"]
+    output_path = args["<output-path>"]
+    image_size = int(args["--image-size"])
+    use_eyes = args["--use-eyes"]
+    margin = 0
+
+    if "SGE_TASK_LAST" in os.environ:
+        TOTAL_CHUNKS = int(os.environ["SGE_TASK_LAST"])
+        CURRENT_CHUNK = int(os.environ["SGE_TASK_ID"]) - 1
+    else:
+        TOTAL_CHUNKS = 1
+        CURRENT_CHUNK = 0
+
+    all_chunks = [f"chunk_{i}" for i in range(43)]
+
+    if args["--keys"] is None:
+        keys = get_keys(MSCELEB_PATH, all_chunks)
+        with open("keys.pickle", "wb") as f:
+            f.write(pickle.dumps(keys))
+    else:
+        keys = pickle.loads(open(args["--keys"], "rb").read())
+
+    chunk_id = all_chunks[CURRENT_CHUNK]
+
+    from bob.ip.facedetect.mtcnn import MTCNN
+
+    detector = MTCNN()
+
+    output_tf_record_path = os.path.join(output_path, chunk_id + ".tfrecords")
+
+    generate_tfrecord(
+        os.path.join(MSCELEB_PATH, chunk_id),
+        output_tf_record_path,
+        detector,
+        keys,
+        final_size=image_size,
+        margin=margin,
+        use_eyes=use_eyes,
+    )
+
+    sys.stdout.write("Done \n")
+    sys.stdout.flush()
+
diff --git a/cnn_training/vgg2_2_tfrecords.py b/cnn_training/vgg2_2_tfrecords.py
new file mode 100644
index 0000000000000000000000000000000000000000..8beeb95a1067635afa738dcd7d3703e7812949e3
--- /dev/null
+++ b/cnn_training/vgg2_2_tfrecords.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+"""
+Trains some face recognition baselines using ARC based models
+
+Usage:
+    vgg2_2_tfrecords.py <vgg-path> <output-path> 
+    vgg2_2_tfrecords.py -h | --help
+
+Options:
+  -h --help             Show this screen.  
+
+"""
+
+
+from docopt import docopt
+import numpy as np
+import os
+import bob.io.image
+import bob.io.base
+import tensorflow as tf
+import sys
+from datetime import datetime
+
+
+def _bytes_feature(value):
+    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
+
+
+def _int64_feature(value):
+    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
+
+
+def search_landmark(landmark_path, img_path):
+    with open(landmark_path) as f:
+        next(f)
+        for line in f:
+            line = line.split(",")
+            if img_path in line[0]:
+                return np.array(
+                    [[float(line[i + 1]), float(line[i + 2])] for i in [0, 2, 4, 6, 8]]
+                )
+        else:
+            return None
+
+
+from bob.bio.face.preprocessor import FaceCrop
+
+
+def align(image, annotations, cropped_image_size=(126, 126)):
+
+    cropped_image_height, cropped_image_width = cropped_image_size
+
+    # RIGHT_EYE_POS = (40, 46)
+    # LEFT_EYE_POS = (40, 80)
+    # cropped_positions = {"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
+    # cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
+    cropped_positions = {"leye": (55, 81), "reye": (55, 42)}
+
+    cropper = FaceCrop(
+        cropped_image_size=cropped_image_size,
+        cropped_positions=cropped_positions,
+        color_channel="rgb",
+        fixed_positions=None,
+        annotator=None,
+    )
+    return bob.io.image.to_matplotlib(
+        cropper.transform([image], [annotations])[0].astype("uint8")
+    )
+
+
+def get_id_by_line(line):
+    return line.split("/")[0]
+
+
+def generate_tfrecord(
+    base_path, landmark_path, file_list, output_tf_record_path, indexes
+):
+    def write_single_line_tfrecord(writer, image, offset, user_id):
+
+        # Serializing
+        serialized_img = image.tobytes()
+
+        # Writing
+        feature = {
+            "data": _bytes_feature(serialized_img),
+            "label": _int64_feature(offset),
+            "key": _bytes_feature(str.encode(user_id)),
+        }
+
+        example = tf.train.Example(features=tf.train.Features(feature=feature))
+        writer.write(example.SerializeToString())
+
+    with tf.io.TFRecordWriter(output_tf_record_path) as tf_writer:
+
+        current_id = None
+        with open(file_list) as f:
+            for file_name in f.readlines():
+
+                user_id = get_id_by_line(file_name)
+                if user_id in indexes:
+
+                    img = bob.io.base.load(
+                        os.path.join(base_path, file_name).rstrip("\n")
+                    )
+                    l_name = file_name.rstrip(".jpg\n")
+
+                    if current_id != user_id:
+                        current_id = user_id
+                        sys.stdout.write(
+                            f"Writing user {current_id}. {str(datetime.now())} \n"
+                        )
+                        sys.stdout.flush()
+
+                    landmarks = search_landmark(landmark_path, l_name)
+                    if landmarks[0][0] > landmarks[1][0]:
+                        annotations = {
+                            "reye": (landmarks[1][1], landmarks[1][0]),
+                            "leye": (landmarks[0][1], landmarks[0][0]),
+                        }
+                    else:
+                        annotations = {
+                            "reye": (landmarks[0][1], landmarks[0][0]),
+                            "leye": (landmarks[1][1], landmarks[1][0]),
+                        }
+                    if landmarks is None:
+                        raise ValueError(f"Landmark for {file_name} not found!")
+
+                    aligned_image = align(img, annotations)
+                    write_single_line_tfrecord(
+                        tf_writer, aligned_image, int(indexes[user_id]), user_id
+                    )
+
+
+def map_indexes(image_path, n_chunks):
+    """
+    Create a dictionary mapping the ID to VGG2-ID, like:
+
+    {0: 'n000001'],
+    1: 'n000002']}
+
+    """
+
+    indexes = sorted(list(set([l.split("/")[0] for l in open(image_path).readlines()])))
+
+    identities_map = {indexes[i]: i for i in range(len(indexes))}
+
+    # SPLIT THE DICTIONARY IN TOTAL_CHUNKS
+    indexes_as_list = list(identities_map.items())
+    dict_as_list = np.array_split(indexes_as_list, n_chunks)
+    dicts = [dict(d) for d in dict_as_list]
+
+    return dicts
+
+
+if __name__ == "__main__":
+    args = docopt(__doc__)
+
+    VGG2_PATH = args["<vgg-path>"]
+    LANDMARK_PATH = os.path.join(VGG2_PATH, "bb_landmark", "loose_landmark_train.csv")
+
+    if "SGE_TASK_LAST" in os.environ:
+        TOTAL_CHUNKS = int(os.environ["SGE_TASK_LAST"])
+        CURRENT_CHUNK = int(os.environ["SGE_TASK_ID"]) - 1
+    else:
+        TOTAL_CHUNKS = 1
+        CURRENT_CHUNK = 0
+
+    # TOTAL_CHUNKS = 140
+    # CURRENT_CHUNK = 0
+
+    TRAINING_LIST = os.path.join(VGG2_PATH, "train_list.txt")
+    # TEST_LIST = os.path.join(VGG2_PATH, "test_list.txt")
+
+    # MAP ALL INDEXES
+
+    indexes = map_indexes(TRAINING_LIST, TOTAL_CHUNKS)
+
+    generate_tfrecord(
+        os.path.join(VGG2_PATH, "train"),
+        LANDMARK_PATH,
+        TRAINING_LIST,
+        os.path.join(
+            args["<output-path>"], f"train_vgg2_chunk{CURRENT_CHUNK}.tfrecords"
+        ),
+        indexes[CURRENT_CHUNK],
+    )
+
diff --git a/cnn_training/vgg2_2_tfrecords_loose.py b/cnn_training/vgg2_2_tfrecords_loose.py
new file mode 100644
index 0000000000000000000000000000000000000000..1fb8d6c86a06f3921ed4211efa01058d1716dc23
--- /dev/null
+++ b/cnn_training/vgg2_2_tfrecords_loose.py
@@ -0,0 +1,209 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+"""
+Crop VGG2 with loose crop based on bounding box
+
+Usage:
+    vgg2_2_tfrecords.py <vgg-path> <output-path>  [--factor=<kn>]
+    vgg2_2_tfrecords.py -h | --help
+
+Options:
+  -h --help             Show this screen.  
+  --factor=<kn>         Crop Factor [default: 0.3]
+
+"""
+
+
+from docopt import docopt
+import numpy as np
+import os
+import bob.io.image
+import bob.io.base
+import tensorflow as tf
+import sys
+from datetime import datetime
+
+
+def _bytes_feature(value):
+    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
+
+
+def _int64_feature(value):
+    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
+
+
+def search_landmark(landmark_path, img_path):
+    with open(landmark_path) as f:
+        next(f)
+        for line in f:
+            line = line.split(",")
+            if img_path in line[0]:
+                landmarks = np.array([float(line[i]) for i in [1, 2, 3, 4]])
+                return {
+                    "topleft": (landmarks[1], landmarks[0]),
+                    "dimensions": (landmarks[3], landmarks[2]),
+                    "bottomright": (
+                        landmarks[1] + landmarks[3],
+                        landmarks[0] + landmarks[2],
+                    ),
+                }
+
+        else:
+            return None
+
+
+def extend_annotations(annotations, img_bottom_right, factor=0.3):
+    width = annotations["dimensions"][1]
+    height = annotations["dimensions"][0]
+
+    new_annotations = {"topleft": [0, 0], "bottomright": [0, 0]}
+
+    new_annotations["topleft"][0] = max(0, annotations["topleft"][0] - height * factor)
+    new_annotations["topleft"][1] = max(0, annotations["topleft"][1] - width * factor)
+
+    new_annotations["bottomright"][0] = min(
+        img_bottom_right[1], annotations["bottomright"][0] + height * factor
+    )
+    new_annotations["bottomright"][1] = min(
+        img_bottom_right[0], annotations["bottomright"][1] + width * factor
+    )
+
+    return new_annotations
+
+
+from bob.bio.face.preprocessor import FaceCrop
+
+
+def align(image, annotations, cropped_image_size=(126, 126), factor=0.3):
+
+    cropped_image_height, cropped_image_width = cropped_image_size
+
+    img_bottom_right = (image.shape[1], image.shape[2])
+    new_annotations = extend_annotations(annotations, img_bottom_right, factor=factor)
+
+    cropped_positions = {"topleft": (0, 0), "bottomright": cropped_image_size}
+    cropper = FaceCrop(
+        cropped_image_size=cropped_image_size,
+        cropped_positions=cropped_positions,
+        color_channel="rgb",
+        fixed_positions=None,
+        annotator=None,
+    )
+    return bob.io.image.to_matplotlib(
+        cropper.transform([image], [new_annotations])[0]
+    ).astype("uint8")
+
+
+def get_id_by_line(line):
+    return line.split("/")[0]
+
+
+def generate_tfrecord(
+    base_path, landmark_path, file_list, output_tf_record_path, indexes, factor=0.3
+):
+    def write_single_line_tfrecord(writer, image, offset, user_id):
+
+        # Serializing
+        serialized_img = image.tobytes()
+
+        # Writing
+        feature = {
+            "data": _bytes_feature(serialized_img),
+            "label": _int64_feature(offset),
+            "key": _bytes_feature(str.encode(user_id)),
+        }
+
+        example = tf.train.Example(features=tf.train.Features(feature=feature))
+        writer.write(example.SerializeToString())
+
+    with tf.io.TFRecordWriter(output_tf_record_path) as tf_writer:
+
+        current_id = None
+        with open(file_list) as f:
+            for file_name in f.readlines():
+
+                user_id = get_id_by_line(file_name)
+                if user_id in indexes:
+
+                    img = bob.io.base.load(
+                        os.path.join(base_path, file_name).rstrip("\n")
+                    )
+                    l_name = file_name.rstrip(".jpg\n")
+
+                    if current_id != user_id:
+                        current_id = user_id
+                        sys.stdout.write(
+                            f"Writing user {current_id}. {str(datetime.now())} \n"
+                        )
+                        sys.stdout.flush()
+
+                    landmarks = search_landmark(landmark_path, l_name)
+                    if landmarks is None:
+                        raise ValueError(f"Landmark for {file_name} not found!")
+
+                    # aligned_image = align(img, annotations)
+                    aligned_image = align(
+                        img, landmarks, factor=factor, cropped_image_size=(126, 126)
+                    )
+                    bob.io.base.save(bob.io.image.to_bob(aligned_image), "xuucu.png")
+                    write_single_line_tfrecord(
+                        tf_writer, aligned_image, int(indexes[user_id]), user_id
+                    )
+
+
+def map_indexes(image_path, n_chunks):
+    """
+    Create a dictionary mapping the ID to VGG2-ID, like:
+
+    {0: 'n000001'],
+    1: 'n000002']}
+
+    """
+
+    indexes = sorted(list(set([l.split("/")[0] for l in open(image_path).readlines()])))
+
+    identities_map = {indexes[i]: i for i in range(len(indexes))}
+
+    # SPLIT THE DICTIONARY IN TOTAL_CHUNKS
+    indexes_as_list = list(identities_map.items())
+    dict_as_list = np.array_split(indexes_as_list, n_chunks)
+    dicts = [dict(d) for d in dict_as_list]
+
+    return dicts
+
+
+if __name__ == "__main__":
+    args = docopt(__doc__)
+
+    VGG2_PATH = args["<vgg-path>"]
+    LANDMARK_PATH = os.path.join(VGG2_PATH, "bb_landmark", "loose_bb_train.csv")
+
+    if "SGE_TASK_LAST" in os.environ:
+        TOTAL_CHUNKS = int(os.environ["SGE_TASK_LAST"])
+        CURRENT_CHUNK = int(os.environ["SGE_TASK_ID"]) - 1
+    else:
+        TOTAL_CHUNKS = 1
+        CURRENT_CHUNK = 0
+
+    # TOTAL_CHUNKS = 140
+    # CURRENT_CHUNK = 0
+
+    TRAINING_LIST = os.path.join(VGG2_PATH, "train_list.txt")
+    # TEST_LIST = os.path.join(VGG2_PATH, "test_list.txt")
+
+    # MAP ALL INDEXES
+
+    indexes = map_indexes(TRAINING_LIST, TOTAL_CHUNKS)
+
+    generate_tfrecord(
+        os.path.join(VGG2_PATH, "train"),
+        LANDMARK_PATH,
+        TRAINING_LIST,
+        os.path.join(
+            args["<output-path>"], f"train_vgg2_chunk{CURRENT_CHUNK}.tfrecords"
+        ),
+        indexes[CURRENT_CHUNK],
+        factor=float(args["--factor"]),
+    )
+
diff --git a/cnn_training/webface360_to_tfrecord.py b/cnn_training/webface360_to_tfrecord.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0c40a1ddfa656f54e4231d35f43de435a860a12
--- /dev/null
+++ b/cnn_training/webface360_to_tfrecord.py
@@ -0,0 +1,190 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+"""
+Converts WEB360 to TF RECORD
+
+Usage:
+    webface360_to_tfrecords.py <web360-path> <output-path> 
+    webface360_to_tfrecords.py -h | --help
+
+Options:
+  -h --help             Show this screen.  
+
+"""
+
+
+from docopt import docopt
+import numpy as np
+import os
+import bob.io.image
+import bob.io.base
+import tensorflow as tf
+import sys
+from datetime import datetime
+
+
+def _bytes_feature(value):
+    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
+
+
+def _int64_feature(value):
+    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
+
+
+def get_web360dirs():
+    """
+    Here I'm hardcoding the paths so we get consistent tfrecords,
+    just in case the IT decides to reestructure the web360 directory
+    """
+
+    return [
+        "0_0_000",
+        "0_0_001",
+        "0_0_002",
+        "0_1_003",
+        "0_1_004",
+        "0_1_005",
+        "0_2_006",
+        "0_2_007",
+        "0_2_008",
+        "0_3_009",
+        "0_3_010",
+        "0_3_011",
+        "0_4_012",
+        "0_4_013",
+        "0_4_014",
+        "0_5_015",
+        "0_5_016",
+        "0_5_017",
+        "0_6_018",
+        "0_6_019",
+        "0_6_020",
+        "1_0_000",
+        "1_0_001",
+        "1_0_002",
+        "1_1_003",
+        "1_1_004",
+        "1_1_005",
+        "1_2_006",
+        "1_2_007",
+        "1_2_008",
+        "1_3_009",
+        "1_3_010",
+        "1_3_011",
+        "1_4_012",
+        "1_4_013",
+        "1_4_014",
+        "1_5_015",
+        "1_5_016",
+        "1_5_017",
+        "1_6_018",
+        "1_6_019",
+        "1_6_020",
+        "2_0_000",
+        "2_0_001",
+        "2_0_002",
+        "2_1_003",
+        "2_1_004",
+        "2_1_005",
+        "2_2_006",
+        "2_2_007",
+        "2_2_008",
+        "2_3_009",
+        "2_3_010",
+        "2_3_011",
+        "2_4_012",
+        "2_4_013",
+        "2_4_014",
+        "2_5_015",
+        "2_5_016",
+        "2_5_017",
+        "2_6_018",
+        "2_6_019",
+        "2_6_020",
+    ]
+
+
+def get_keys(base_path):
+
+    root_dirs = get_web360dirs()
+    keys = dict()
+    offset = 0
+    for r in root_dirs:
+
+        identities_dir = os.path.join(base_path, r)
+        for i in os.listdir(identities_dir):
+            id_dir = os.path.join(identities_dir, i)
+            if os.path.isdir(id_dir):
+                keys[i] = offset
+                offset += 1
+    return keys
+
+
+def generate_tfrecord(
+    chunk_path, output_tf_record_path, keys,
+):
+    def write_single_line_tfrecord(writer, image, offset, user_id):
+
+        # Serializing
+        serialized_img = image.tobytes()
+
+        # Writing
+        feature = {
+            "data": _bytes_feature(serialized_img),
+            "label": _int64_feature(offset),
+            "key": _bytes_feature(str.encode(user_id)),
+        }
+
+        example = tf.train.Example(features=tf.train.Features(feature=feature))
+        writer.write(example.SerializeToString())
+
+    os.makedirs(os.path.dirname(output_tf_record_path), exist_ok=True)
+
+    with tf.io.TFRecordWriter(output_tf_record_path) as tf_writer:
+
+        for identity in os.listdir(chunk_path):
+            # Discarting the one we've discarted already
+            if identity not in keys:
+                continue
+
+            identity_path = os.path.join(chunk_path, identity)
+            if not os.path.isdir(identity_path):
+                continue
+
+            sys.stdout.write(f"Processing {identity} \n")
+            sys.stdout.flush()
+
+            for image_path in os.listdir(identity_path):
+                image_path = os.path.join(identity_path, image_path)
+                if os.path.splitext(image_path)[-1] != ".jpg":
+                    continue
+                image = bob.io.image.to_matplotlib(bob.io.image.load(image_path))
+
+                write_single_line_tfrecord(tf_writer, image, keys[identity], identity)
+
+
+if __name__ == "__main__":
+    args = docopt(__doc__)
+
+    WEB360_PATH = args["<web360-path>"]
+    output_path = args["<output-path>"]
+
+    if "SGE_TASK_LAST" in os.environ:
+        TOTAL_CHUNKS = int(os.environ["SGE_TASK_LAST"])
+        CURRENT_CHUNK = int(os.environ["SGE_TASK_ID"]) - 1
+    else:
+        TOTAL_CHUNKS = 1
+        CURRENT_CHUNK = 0
+
+    # keys = get_keys(WEB360_PATH)
+    import pickle
+
+    keys = pickle.loads(open("keys-web360.pickle", "rb").read())
+
+    root_dirs = get_web360dirs()
+    output_tf_record_path = os.path.join(output_path, f"chunk_{CURRENT_CHUNK}.tfrecord")
+    chunk_path = os.path.join(WEB360_PATH, root_dirs[CURRENT_CHUNK])
+
+    generate_tfrecord(chunk_path, output_tf_record_path, keys)
+
diff --git a/doc/faq/.ipynb_checkpoints/facecrop-checkpoint.ipynb b/doc/faq/.ipynb_checkpoints/facecrop-checkpoint.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..8ab56222451c7530c2d398163c0be122023c4b46
--- /dev/null
+++ b/doc/faq/.ipynb_checkpoints/facecrop-checkpoint.ipynb
@@ -0,0 +1,70 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## How to crop a face"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "CROPPED_IMAGE_HEIGHT = 128\n",
+    "CROPPED_IMAGE_WIDTH = 128\n",
+    "\n",
+    "EYE_CENTER_POS = (40, 64)\n",
+    "MOUTH_CENTER_POS = (88, 64)\n",
+    "\n",
+    "\n",
+    "mouth_center=(int((annotations['mouthleft'][0]+annotations['mouthright'][0])/2.0), int((annotations['mouthleft'][1]+annotations['mouthright'][1])/2.0))\n",
+    "\n",
+    "eye_center=(int((annotations['leye'][0]+annotations['reye'][0])/2.0), int((annotations['leye'][1]+annotations['reye'][1])/2.0))\n",
+    "\n",
+    "annotations['eye_center'] =eye_center\n",
+    "\n",
+    "annotations['mouth_center']=mouth_center\n",
+    "\n",
+    "light_cnn_face_cropper=bob.bio.face.preprocessor.FaceCrop(\n",
+    "    cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),\n",
+    "    cropped_positions={'eye_center': EYE_CENTER_POS, 'mouth_center': MOUTH_CENTER_POS})\n",
+    "\n",
+    "\n",
+    "normalized_image = light_cnn_face_cropper.crop_face( image, annotations=annotations)"
+   ]
+  }
+ ],
+ "metadata": {
+  "jupytext": {
+   "formats": "ipynb,py:light",
+   "text_representation": {
+    "extension": ".py",
+    "format_name": "light",
+    "format_version": "1.5",
+    "jupytext_version": "1.7.1"
+   }
+  },
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.7.7"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/doc/faq/.ipynb_checkpoints/facecrop-checkpoint.py b/doc/faq/.ipynb_checkpoints/facecrop-checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac6b78f43fc4d4f5eb5ac85bb9db10f89db51c81
--- /dev/null
+++ b/doc/faq/.ipynb_checkpoints/facecrop-checkpoint.py
@@ -0,0 +1,54 @@
+# ---
+# jupyter:
+#   jupytext:
+#     formats: ipynb,py:light
+#     text_representation:
+#       extension: .py
+#       format_name: light
+#       format_version: '1.5'
+#       jupytext_version: 1.11.1
+#   kernelspec:
+#     display_name: Python 3
+#     language: python
+#     name: python3
+# ---
+
+# ## How to crop a face
+
+# +
+import bob.bio.face
+import bob.io.image
+
+# Loading Ada's images
+image = bob.io.image.load("./img/838_ada.jpg")
+
+# Setting Ada's eyes
+annotations = dict()
+annotations['reye'] = (265, 203)
+annotations['leye'] = (278, 294)
+
+# Final cropped size
+cropped_image_size = (224, 224)
+
+# Defining where we want the eyes to be located after the crop
+cropped_positions = {"leye": (65, 150), "reye": (65, 77)}
+
+
+face_cropper=bob.bio.face.preprocessor.FaceCrop(
+    cropped_image_size=cropped_image_size,
+    cropped_positions=cropped_positions,
+    color_channel="rgb")
+
+# Crops always a batch of images
+cropped_image = face_cropper.transform([image], annotations=[annotations])
+
+
+# +
+import matplotlib.pyplot as plt
+
+figure = plt.figure()
+plt.subplot(121)
+bob.io.image.imshow(image)
+plt.subplot(122)
+bob.io.image.imshow(cropped_image[0].astype("uint8"))
+figure.show()
diff --git a/doc/faq/facecrop.ipynb b/doc/faq/facecrop.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..03044580b12b5036b22b697a8f57eae6d64d502c
--- /dev/null
+++ b/doc/faq/facecrop.ipynb
@@ -0,0 +1,109 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## How to crop a face"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "lines_to_next_cell": 2
+   },
+   "outputs": [],
+   "source": [
+    "import bob.bio.face\n",
+    "import bob.io.image\n",
+    "\n",
+    "# Loading Ada's images\n",
+    "image = bob.io.image.load(\"./img/838_ada.jpg\")\n",
+    "\n",
+    "# Setting Ada's eyes\n",
+    "annotations = dict()\n",
+    "annotations['reye'] = (265, 203)\n",
+    "annotations['leye'] = (278, 294)\n",
+    "\n",
+    "# Final cropped size\n",
+    "cropped_image_size = (224, 224)\n",
+    "\n",
+    "# Defining where we want the eyes to be located after the crop\n",
+    "cropped_positions = {\"leye\": (65, 150), \"reye\": (65, 77)}\n",
+    "\n",
+    "\n",
+    "face_cropper=bob.bio.face.preprocessor.FaceCrop(\n",
+    "    cropped_image_size=cropped_image_size,\n",
+    "    cropped_positions=cropped_positions,\n",
+    "    color_channel=\"rgb\")\n",
+    "\n",
+    "# Crops always a batch of images\n",
+    "cropped_image = face_cropper.transform([image], annotations=[annotations])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "application/vnd.jupyter.widget-view+json": {
+       "model_id": "62f5e3a70d3247e4beeed7318775d33a",
+       "version_major": 2,
+       "version_minor": 0
+      },
+      "text/plain": [
+       "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "%matplotlib widget\n",
+    "import matplotlib.pyplot as plt\n",
+    "\n",
+    "figure = plt.figure()\n",
+    "plt.subplot(121)\n",
+    "bob.io.image.imshow(image)\n",
+    "plt.subplot(122)\n",
+    "bob.io.image.imshow(cropped_image[0].astype(\"uint8\"))\n",
+    "figure.show()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "jupytext": {
+   "formats": "ipynb,py:light"
+  },
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.7.7"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}