diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 08ae74117cd54402e260369ed4cbf761e09cc283..89b2f412388a6867e31cf47eb2e9eacdcea1c416 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -2,16 +2,16 @@
 # See https://pre-commit.com/hooks.html for more hooks
 repos:
   - repo: https://github.com/timothycrosley/isort
-    rev: 4.3.21-2
+    rev: 5.6.4
     hooks:
     - id: isort
-      args: [-sl]
+      args: [--sl, --line-length, "88"]
   - repo: https://github.com/psf/black
-    rev: stable
+    rev: 20.8b1
     hooks:
       - id: black
   - repo: https://github.com/pre-commit/pre-commit-hooks
-    rev: v2.0.0
+    rev: v3.3.0
     hooks:
       - id: check-ast
       - id: check-case-conflict
@@ -19,22 +19,8 @@ repos:
       - id: end-of-file-fixer
       - id: debug-statements
       - id: check-added-large-files
-      - id: flake8
-  - repo: local
+  - repo: https://gitlab.com/pycqa/flake8
+    rev: 3.8.4
     hooks:
-      - id: sphinx-build
-        name: sphinx build
-        entry: python -m sphinx.cmd.build
-        args: [-a, -E, -W, doc, sphinx]
-        language: system
-        files: ^doc/
-        types: [file]
-        pass_filenames: false
-      - id: sphinx-doctest
-        name: sphinx doctest
-        entry: python -m sphinx.cmd.build
-        args: [-a, -E, -b, doctest, doc, sphinx]
-        language: system
-        files: ^doc/
-        types: [file]
-        pass_filenames: false
+      - id: flake8
+        args: [--ignore, "E203,W503,E501,E302,E111,E114,E121,E402"]
diff --git a/bob/learn/tensorflow/callbacks.py b/bob/learn/tensorflow/callbacks.py
index efd97e09bc8ddb67495f6f7f69d82b80feaf2afc..c39dc9e40daec26b30794c2bc2e930b81d5c4795 100644
--- a/bob/learn/tensorflow/callbacks.py
+++ b/bob/learn/tensorflow/callbacks.py
@@ -2,7 +2,6 @@ import json
 import os
 
 import tensorflow as tf
-from tensorflow.keras import callbacks
 
 
 class CustomBackupAndRestore(tf.keras.callbacks.experimental.BackupAndRestore):
diff --git a/bob/learn/tensorflow/data/__init__.py b/bob/learn/tensorflow/data/__init__.py
index 3d31417512b35396459e77db40c3448d4372a34e..17b458ece1d90eefd22ac72f7a0bbebec008a9a2 100644
--- a/bob/learn/tensorflow/data/__init__.py
+++ b/bob/learn/tensorflow/data/__init__.py
@@ -1,5 +1,9 @@
-from .generator import Generator, dataset_using_generator
-from .tfrecords import dataset_to_tfrecord, dataset_from_tfrecord, TFRECORDS_EXT
+from .generator import Generator
+from .generator import dataset_using_generator  # noqa: F401
+from .tfrecords import TFRECORDS_EXT  # noqa: F401
+from .tfrecords import dataset_from_tfrecord  # noqa: F401
+from .tfrecords import dataset_to_tfrecord  # noqa: F401
+
 
 # gets sphinx autodoc done right - don't remove it
 def __appropriate__(*args):
diff --git a/bob/learn/tensorflow/data/generator.py b/bob/learn/tensorflow/data/generator.py
index 7e68c0246eb227862f6d79214c1000caaa6a8561..764996367e664e456620de151b60fa1f4a4eee04 100644
--- a/bob/learn/tensorflow/data/generator.py
+++ b/bob/learn/tensorflow/data/generator.py
@@ -14,21 +14,21 @@ class Generator:
     ----------
     epoch : int
         The number of epochs that have been passed so far.
+
     multiple_samples : :obj:`bool`, optional
         If true, it assumes that the bio database's samples actually contain
         multiple samples. This is useful for when you want to for example treat
         video databases as image databases.
+
     reader : :obj:`object`, optional
         A callable with the signature of ``data, label, key = reader(sample)``
         which takes a sample and loads it.
+
     samples : [:obj:`object`]
         A list of samples to be given to ``reader`` to load the data.
+
     shuffle_on_epoch_end : :obj:`bool`, optional
         If True, it shuffle the samples at the end of each epoch.
-    output_types : (object, object, object)
-        The types of the returned samples.
-    output_shapes : ``(tf.TensorShape, tf.TensorShape, tf.TensorShape)``
-        The shapes of the returned samples.
     """
 
     def __init__(
@@ -76,10 +76,12 @@ class Generator:
 
     @property
     def output_types(self):
+        "The types of the returned samples"
         return self._output_types
 
     @property
     def output_shapes(self):
+        "The shapes of the returned samples"
         return self._output_shapes
 
     def __call__(self):
@@ -87,8 +89,8 @@ class Generator:
 
         Yields
         ------
-        (data, label, key) : tuple
-            A tuple containing the data, label, and the key.
+        object
+            Samples one by one.
         """
         for sample in self.samples:
             dlk = self.reader(sample)
diff --git a/bob/learn/tensorflow/data/tfrecords.py b/bob/learn/tensorflow/data/tfrecords.py
index bacf49cf8c10bbecd8f613a52033c104c212290a..954680848dc625a5eabeeecadfd567775fdc0490 100644
--- a/bob/learn/tensorflow/data/tfrecords.py
+++ b/bob/learn/tensorflow/data/tfrecords.py
@@ -8,7 +8,6 @@ import json
 
 import tensorflow as tf
 
-
 TFRECORDS_EXT = ".tfrecords"
 
 
@@ -102,7 +101,7 @@ def dataset_from_tfrecord(tfrecord, num_parallel_reads=None):
         A dataset that contains the data from the TFRecord file.
     """
     # these imports are needed so that eval can work
-    from tensorflow import TensorShape
+    from tensorflow import TensorShape  # noqa: F401
 
     if isinstance(tfrecord, str):
         tfrecord = [tfrecord]
diff --git a/bob/learn/tensorflow/losses/__init__.py b/bob/learn/tensorflow/losses/__init__.py
index 65cdcab2e9e6b5ba4090b269eb5fc68e5544797c..267da85903a0c59f6ef199240ba30e3c047cbda7 100644
--- a/bob/learn/tensorflow/losses/__init__.py
+++ b/bob/learn/tensorflow/losses/__init__.py
@@ -1,4 +1,6 @@
-from .center_loss import CenterLoss, CenterLossLayer
+from .center_loss import CenterLoss
+from .center_loss import CenterLossLayer
+
 
 # gets sphinx autodoc done right - don't remove it
 def __appropriate__(*args):
@@ -16,8 +18,5 @@ def __appropriate__(*args):
         obj.__module__ = __name__
 
 
-__appropriate__(
-    CenterLoss,
-    CenterLossLayer
-)
+__appropriate__(CenterLoss, CenterLossLayer)
 __all__ = [_ for _ in dir() if not _.startswith("_")]
diff --git a/bob/learn/tensorflow/losses/center_loss.py b/bob/learn/tensorflow/losses/center_loss.py
index 894a461200639042094c6b9e29e1721eee1478cb..19196df105d6ff52114c42439cb09228c887b9e3 100644
--- a/bob/learn/tensorflow/losses/center_loss.py
+++ b/bob/learn/tensorflow/losses/center_loss.py
@@ -6,10 +6,12 @@ class CenterLossLayer(tf.keras.layers.Layer):
 
     Attributes
     ----------
-    centers : tf.Variable
+    centers
         The variable that keeps track of centers.
+
     n_classes : int
         Number of classes of the task.
+
     n_features : int
         The size of prelogits.
     """
@@ -49,12 +51,17 @@ class CenterLoss(tf.keras.losses.Loss):
 
     Attributes
     ----------
-    alpha : float
+    alpha: float
         The moving average coefficient for updating centers in each batch.
-    centers : tf.Variable
+
+    centers
         The variable that keeps track of centers.
+
     centers_layer
         The layer that keeps track of centers.
+
+    update_centers: bool
+        Update the centers? Used at training
     """
 
     def __init__(
diff --git a/bob/learn/tensorflow/metrics/__init__.py b/bob/learn/tensorflow/metrics/__init__.py
index 72ee7b5fce68569d2359176ed78f076030030d47..c85e2397852389036e4b5da10c1cfdbb02009820 100644
--- a/bob/learn/tensorflow/metrics/__init__.py
+++ b/bob/learn/tensorflow/metrics/__init__.py
@@ -1,4 +1,6 @@
-from .embedding_accuracy import EmbeddingAccuracy, predict_using_tensors
+from .embedding_accuracy import EmbeddingAccuracy
+from .embedding_accuracy import predict_using_tensors  # noqa: F401
+
 
 # gets sphinx autodoc done right - don't remove it
 def __appropriate__(*args):
diff --git a/bob/learn/tensorflow/models/__init__.py b/bob/learn/tensorflow/models/__init__.py
index 48804ec2338b14d47fe6e4e5c332b561d56daed9..6e89a96e2210d7cf9e5409f5654703d9e990ee6c 100644
--- a/bob/learn/tensorflow/models/__init__.py
+++ b/bob/learn/tensorflow/models/__init__.py
@@ -2,6 +2,7 @@ from .alexnet import AlexNet_simplified
 from .densenet import DenseNet
 from .mine import MineModel
 
+
 # gets sphinx autodoc done right - don't remove it
 def __appropriate__(*args):
     """Says object was actually declared here, an not on the import module.
@@ -18,9 +19,5 @@ def __appropriate__(*args):
         obj.__module__ = __name__
 
 
-__appropriate__(
-    AlexNet_simplified,
-    DenseNet,
-    MineModel
-)
+__appropriate__(AlexNet_simplified, DenseNet, MineModel)
 __all__ = [_ for _ in dir() if not _.startswith("_")]
diff --git a/bob/learn/tensorflow/models/alexnet.py b/bob/learn/tensorflow/models/alexnet.py
index 1ff0907775faa81c6ce5777f0065bf1ad49b3e42..f49332018af25b1d0d5d92b141ac1079637e26bc 100644
--- a/bob/learn/tensorflow/models/alexnet.py
+++ b/bob/learn/tensorflow/models/alexnet.py
@@ -61,7 +61,7 @@ def AlexNet_simplified(name="AlexNet", **kwargs):
 
 
 if __name__ == "__main__":
-    import pkg_resources
+    import pkg_resources  # noqa: F401
 
     from bob.learn.tensorflow.utils import model_summary
 
diff --git a/bob/learn/tensorflow/models/autoencoder_face.py b/bob/learn/tensorflow/models/autoencoder_face.py
index bb92f92c6eb0658e31e6881e38a5794fc9a5a581..919752460afcdbc40da7cea7f625d065f068e79b 100644
--- a/bob/learn/tensorflow/models/autoencoder_face.py
+++ b/bob/learn/tensorflow/models/autoencoder_face.py
@@ -113,7 +113,7 @@ def autoencoder_face(z_dim=256, weight_decay=1e-10, decoder_last_act="tanh"):
 
 
 if __name__ == "__main__":
-    import pkg_resources
+    import pkg_resources  # noqa: F401
     from tabulate import tabulate
 
     from bob.learn.tensorflow.utils import model_summary
diff --git a/bob/learn/tensorflow/models/densenet.py b/bob/learn/tensorflow/models/densenet.py
index f17f5b860b2d3b92f06cd2b8586d5b590e0378e3..e09a82ba865c184a92a3dbf4fed81a04b92a9f54 100644
--- a/bob/learn/tensorflow/models/densenet.py
+++ b/bob/learn/tensorflow/models/densenet.py
@@ -446,7 +446,7 @@ class DeepPixBiS(tf.keras.Model):
 
 
 if __name__ == "__main__":
-    import pkg_resources
+    import pkg_resources  # noqa: F401
     from tabulate import tabulate
 
     from bob.learn.tensorflow.utils import model_summary
diff --git a/bob/learn/tensorflow/models/inception.py b/bob/learn/tensorflow/models/inception.py
index 5a8314aa9ce82c1ddadcab47be0a16a16b5a0739..c1b37c2af87b7a8c5c6962df4a9278fd41e44815 100644
--- a/bob/learn/tensorflow/models/inception.py
+++ b/bob/learn/tensorflow/models/inception.py
@@ -142,7 +142,7 @@ def GoogLeNet(*, num_classes=1000, name="GoogLeNet", **kwargs):
 
 
 if __name__ == "__main__":
-    import pkg_resources
+    import pkg_resources  # noqa: F401
     from tabulate import tabulate
 
     from bob.learn.tensorflow.utils import model_summary
diff --git a/bob/learn/tensorflow/models/inception_resnet_v1.py b/bob/learn/tensorflow/models/inception_resnet_v1.py
index 21ba09571652855e283f98a6d1578bf231d7d8fa..bfab04e16273ec410fc98326ec5360470ac5d8eb 100644
--- a/bob/learn/tensorflow/models/inception_resnet_v1.py
+++ b/bob/learn/tensorflow/models/inception_resnet_v1.py
@@ -6,7 +6,6 @@ import logging
 import tensorflow as tf
 from tensorflow.keras import backend as K
 from tensorflow.keras.layers import Activation
-from tensorflow.keras.layers import AvgPool2D
 from tensorflow.keras.layers import BatchNormalization
 from tensorflow.keras.layers import Concatenate
 from tensorflow.keras.layers import Conv2D
@@ -14,9 +13,7 @@ from tensorflow.keras.layers import Dense
 from tensorflow.keras.layers import Dropout
 from tensorflow.keras.layers import GlobalAvgPool2D
 from tensorflow.keras.layers import GlobalMaxPool2D
-from tensorflow.keras.layers import Input
 from tensorflow.keras.layers import MaxPool2D
-from tensorflow.keras.models import Model
 from tensorflow.keras.models import Sequential
 
 from bob.learn.tensorflow.utils import SequentialLayer
@@ -240,7 +237,7 @@ class ReductionA(tf.keras.layers.Layer):
         self,
         padding,
         k=256,
-        l=256,
+        l=256,  # noqa: E741
         m=384,
         n=384,
         use_atrous=False,
@@ -250,7 +247,7 @@ class ReductionA(tf.keras.layers.Layer):
         super().__init__(name=name, **kwargs)
         self.padding = padding
         self.k = k
-        self.l = l
+        self.l = l  # noqa: E741
         self.m = m
         self.n = n
         self.use_atrous = use_atrous
@@ -448,7 +445,6 @@ def InceptionResNetV1(
         Conv2D_BN(80, 1, padding="valid", name="Conv2d_3b_1x1"),
         Conv2D_BN(192, 3, padding="valid", name="Conv2d_4a_3x3"),
         Conv2D_BN(256, 3, strides=2, padding="valid", name="Conv2d_4b_3x3"),
-        
     ]
 
     # 5x block35 (Inception-ResNet-A block): 35 x 35 x 320
@@ -503,7 +499,6 @@ def InceptionResNetV1(
         )
     )
 
-    
     # 5x block8 (Inception-ResNet-C block): 8 x 8 x 2080
     for block_idx in range(1, 5):
         layers.append(
@@ -515,7 +510,7 @@ def InceptionResNetV1(
                 name=f"block8_{block_idx}",
             )
         )
-    
+
     layers.append(
         InceptionResnetBlock(
             n_channels=1792,
@@ -523,10 +518,10 @@ def InceptionResNetV1(
             activation=None,
             block_type="block8",
             block_idx=5,
-            name=f"block8_5",
+            name="block8_5",
         )
     )
-    
+
     if (include_top and pooling is None) or (bottleneck):
         pooling = "avg"
 
@@ -545,7 +540,7 @@ def InceptionResNetV1(
     # Classification block
     if include_top:
         layers.append(Dense(classes, name="logits"))
-    
+
     # Create model and call it on input to create its variables.
     model = Sequential(layers, name=name, **kwargs)
     model(img_input)
@@ -554,10 +549,11 @@ def InceptionResNetV1(
 
 
 if __name__ == "__main__":
-    import pkg_resources
-    from bob.learn.tensorflow.utils import model_summary
+    import pkg_resources  # noqa: F401
     from tabulate import tabulate
 
+    from bob.learn.tensorflow.utils import model_summary
+
     def print_model(inputs, outputs, name=None):
         print("")
         print("===============")
@@ -568,7 +564,9 @@ if __name__ == "__main__":
         del rows[-2]
         print(tabulate(rows, headers="firstrow", tablefmt="latex"))
 
-    model = InceptionResNetV1(input_shape=(160, 160, 3), bottleneck=True, include_top=False)
+    model = InceptionResNetV1(
+        input_shape=(160, 160, 3), bottleneck=True, include_top=False
+    )
     inputs = tf.keras.Input((160, 160, 3))
     outputs = model.call(inputs)
-    model.summary()
\ No newline at end of file
+    model.summary()
diff --git a/bob/learn/tensorflow/models/inception_resnet_v2.py b/bob/learn/tensorflow/models/inception_resnet_v2.py
index ad5b8629f52b27496aa5c92aeaa2e3793965042a..f8b146f373aba5bf2ef0dc61b557904099c9afac 100644
--- a/bob/learn/tensorflow/models/inception_resnet_v2.py
+++ b/bob/learn/tensorflow/models/inception_resnet_v2.py
@@ -744,10 +744,11 @@ def MultiScaleInceptionResNetV2(
 
 
 if __name__ == "__main__":
-    import pkg_resources
-    from bob.learn.tensorflow.utils import model_summary
+    import pkg_resources  # noqa: F401
     from tabulate import tabulate
 
+    from bob.learn.tensorflow.utils import model_summary
+
     def print_model(inputs, outputs, name=None):
         print("")
         print("===============")
diff --git a/bob/learn/tensorflow/models/lenet5.py b/bob/learn/tensorflow/models/lenet5.py
index 425b337725b60b6dc471c572c80df1f83eded74e..24ebf6e074c89f19176ca9ab57164947cd770cdf 100644
--- a/bob/learn/tensorflow/models/lenet5.py
+++ b/bob/learn/tensorflow/models/lenet5.py
@@ -31,7 +31,7 @@ def LeNet5_simplified(name="LeNet5", **kwargs):
 
 
 if __name__ == "__main__":
-    import pkg_resources
+    import pkg_resources  # noqa: F401
 
     from bob.learn.tensorflow.utils import model_summary
 
diff --git a/bob/learn/tensorflow/models/mine.py b/bob/learn/tensorflow/models/mine.py
index 4f766236f0cf33f6b1d73ffd14a2eafec636b1ad..176eb9d17cb236e3bf34ae5d3d094770587e66c5 100644
--- a/bob/learn/tensorflow/models/mine.py
+++ b/bob/learn/tensorflow/models/mine.py
@@ -7,6 +7,7 @@ Mutual Information Neural Estimation (https://arxiv.org/pdf/1801.04062.pdf)
 
 import tensorflow as tf
 
+
 class MineModel(tf.keras.Model):
     """
 
@@ -21,7 +22,7 @@ class MineModel(tf.keras.Model):
         super().__init__(name=name, **kwargs)
         self.units = units
         self.is_mine_f = is_mine_f
-        
+
         self.transformer_x = tf.keras.layers.Dense(self.units)
         self.transformer_z = tf.keras.layers.Dense(self.units)
         self.transformer_xz = tf.keras.layers.Dense(self.units)
@@ -32,19 +33,21 @@ class MineModel(tf.keras.Model):
             h1_x = self.transformer_x(x)
             h1_z = self.transformer_z(z)
             h1 = tf.keras.layers.ReLU()(h1_x + h1_z)
-            h2 = self.transformer_output(tf.keras.layers.ReLU()(self.transformer_xz(h1)))
+            h2 = self.transformer_output(
+                tf.keras.layers.ReLU()(self.transformer_xz(h1))
+            )
 
             return h2
 
         def compute_lower_bound(x, z):
-            t_xz = compute(x,z)
+            t_xz = compute(x, z)
             z_shuffle = tf.random.shuffle(z)
             t_x_z = compute(x, z_shuffle)
 
             if self.is_mine_f:
                 lb = -(
                     tf.reduce_mean(t_xz, axis=0)
-                    - tf.reduce_mean(tf.math.exp(t_x_z-1))
+                    - tf.reduce_mean(tf.math.exp(t_x_z - 1))
                 )
             else:
                 lb = -(
@@ -60,9 +63,7 @@ class MineModel(tf.keras.Model):
 
         return compute_lower_bound(x, z)
 
-
     def get_config(self):
         config = super().get_config()
         config.update({"units": self.units})
         return config
-
diff --git a/bob/learn/tensorflow/models/msu_patch.py b/bob/learn/tensorflow/models/msu_patch.py
index 21ad7bf98ca0adc7bf2efab14177ff82b2c3531f..cb50bdb0182d2df5c3326e63d9b98339227bcd80 100644
--- a/bob/learn/tensorflow/models/msu_patch.py
+++ b/bob/learn/tensorflow/models/msu_patch.py
@@ -65,7 +65,7 @@ def MSUPatch(name="MSUPatch", **kwargs):
 
 
 if __name__ == "__main__":
-    import pkg_resources
+    import pkg_resources  # noqa: F401
     from tabulate import tabulate
 
     from bob.learn.tensorflow.utils import model_summary
diff --git a/bob/learn/tensorflow/models/simple_cnn.py b/bob/learn/tensorflow/models/simple_cnn.py
index f4c4ec56f71dbaecec83b0ddf23a0010fb62539a..a435feaac2341892d304b5a03302c034868fe797 100644
--- a/bob/learn/tensorflow/models/simple_cnn.py
+++ b/bob/learn/tensorflow/models/simple_cnn.py
@@ -41,7 +41,7 @@ def SimpleCNN(input_shape=(28, 28, 3), inputs=None, name="SimpleCNN", **kwargs):
 
 
 if __name__ == "__main__":
-    import pkg_resources
+    import pkg_resources  # noqa: F401
     from tabulate import tabulate
 
     from bob.learn.tensorflow.utils import model_summary
diff --git a/bob/learn/tensorflow/scripts/datasets_to_tfrecords.py b/bob/learn/tensorflow/scripts/datasets_to_tfrecords.py
index 96f16e7ae1e0e93752e0c383a4c769665de3e6ad..65ebe9a2af58b5821a6db47f2bd30284bae62e1c 100644
--- a/bob/learn/tensorflow/scripts/datasets_to_tfrecords.py
+++ b/bob/learn/tensorflow/scripts/datasets_to_tfrecords.py
@@ -12,7 +12,6 @@ from bob.extension.scripts.click_helper import ConfigCommand
 from bob.extension.scripts.click_helper import ResourceOption
 from bob.extension.scripts.click_helper import verbosity_option
 
-
 logger = logging.getLogger(__name__)
 
 
@@ -45,8 +44,9 @@ def datasets_to_tfrecords(dataset, output, force, **kwargs):
     To use this script with SGE, change your dataset (like shard it) and output a part
     of the dataset based on the SGE_TASK_ID environment variable in your config file.
     """
-    from bob.extension.scripts.click_helper import log_parameters
     import os
+
+    from bob.extension.scripts.click_helper import log_parameters
     from bob.learn.tensorflow.data.tfrecords import dataset_to_tfrecord
     from bob.learn.tensorflow.data.tfrecords import tfrecord_name_and_json_name
 
diff --git a/bob/learn/tensorflow/tests/data/db_to_tfrecords_config.py b/bob/learn/tensorflow/tests/data/db_to_tfrecords_config.py
index 52799ddd8bf9cd2c1cbf2f52bab0419c90678d62..e5eb76ec81bdc32ad900d2ec78a55107158886bf 100644
--- a/bob/learn/tensorflow/tests/data/db_to_tfrecords_config.py
+++ b/bob/learn/tensorflow/tests/data/db_to_tfrecords_config.py
@@ -1,10 +1,12 @@
 import tensorflow as tf
+
 from bob.learn.tensorflow.data import dataset_using_generator
 
 mnist = tf.keras.datasets.mnist
 
 (x_train, y_train), (_, _) = mnist.load_data()
-samples = (tf.keras.backend.arange(len(x_train)), x_train, y_train)
+x_train, y_train = x_train[:10], y_train[:10]
+samples = zip(tf.keras.backend.arange(len(x_train)), x_train, y_train)
 
 
 def reader(sample):
diff --git a/bob/learn/tensorflow/tests/test_datasets_to_tfrecords.py b/bob/learn/tensorflow/tests/test_datasets_to_tfrecords.py
index 4b6548abab4d14e5b308cb73a96fe7ddacc89d25..42318f4142ec7f3ced7ea6d98c30d1737a7a7162 100644
--- a/bob/learn/tensorflow/tests/test_datasets_to_tfrecords.py
+++ b/bob/learn/tensorflow/tests/test_datasets_to_tfrecords.py
@@ -1,10 +1,11 @@
 import pkg_resources
 import tensorflow as tf
+from click.testing import CliRunner
+
 from bob.extension.config import load
 from bob.extension.scripts.click_helper import assert_click_runner_result
 from bob.learn.tensorflow.data.tfrecords import dataset_from_tfrecord
 from bob.learn.tensorflow.scripts.datasets_to_tfrecords import datasets_to_tfrecords
-from click.testing import CliRunner
 
 regenerate_reference = False
 
diff --git a/bob/learn/tensorflow/tests/test_mine.py b/bob/learn/tensorflow/tests/test_mine.py
index 89cf943b858bd87e7c61707a9616c7894254c1fd..e0be0ac2620aed99f68209d9adb3c20f4f179c08 100644
--- a/bob/learn/tensorflow/tests/test_mine.py
+++ b/bob/learn/tensorflow/tests/test_mine.py
@@ -1,33 +1,34 @@
 import numpy as np
 import tensorflow as tf
+
 from bob.learn.tensorflow.models import MineModel
 
+
 def run_mine(is_mine_f):
     np.random.seed(10)
     N = 20000
     d = 1
-    EPOCHS = 100
-
-    X = np.sign(np.random.normal(0.,1.,[N, d]))
-    Z = X + np.random.normal(0.,np.sqrt(0.2),[N, d])
+    EPOCHS = 10
 
+    X = np.sign(np.random.normal(0.0, 1.0, [N, d]))
+    Z = X + np.random.normal(0.0, np.sqrt(0.2), [N, d])
 
     from sklearn.feature_selection import mutual_info_regression
+
     mi_numerical = mutual_info_regression(X.reshape(-1, 1), Z.ravel())[0]
 
     model = MineModel(is_mine_f=is_mine_f)
     model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01))
 
-    callback = model.fit(
-        x=[X, Z], epochs=EPOCHS, verbose=1, batch_size=100
-    )
+    callback = model.fit(x=[X, Z], epochs=EPOCHS, verbose=1, batch_size=100)
     mine = -np.array(callback.history["loss"])[-1]
 
-    np.allclose(mine,mi_numerical, atol=0.01)
+    np.allclose(mine, mi_numerical, atol=0.01)
 
 
 def test_mine():
     run_mine(False)
 
+
 def test_mine_f():
-    run_mine(True)
\ No newline at end of file
+    run_mine(True)
diff --git a/bob/learn/tensorflow/utils/__init__.py b/bob/learn/tensorflow/utils/__init__.py
index 444a481681f2f06bb9bb06377db7c492e3f201fc..a5c56fe7275b95e45ebdbcfc763042d7422b38bc 100644
--- a/bob/learn/tensorflow/utils/__init__.py
+++ b/bob/learn/tensorflow/utils/__init__.py
@@ -1,3 +1,3 @@
-from .keras import *
-from .math import *
-from .image import *
+from .image import *  # noqa: F401,F403
+from .keras import *  # noqa: F401,F403
+from .math import *  # noqa: F401,F403
diff --git a/conda/meta.yaml b/conda/meta.yaml
index 87eacc443e98438c71d6ce9d2ed316569d7a71a0..9ee0693b163280e8784070ddead2e12966c34759 100644
--- a/conda/meta.yaml
+++ b/conda/meta.yaml
@@ -6,6 +6,7 @@ package:
   version: {{ environ.get('BOB_PACKAGE_VERSION', '0.0.1') }}
 
 build:
+  skip: true  # [not linux]
   number: {{ environ.get('BOB_BUILD_NUMBER', 0) }}
   run_exports:
     - {{ pin_subpackage(name) }}
@@ -21,33 +22,29 @@ requirements:
     - python {{ python }}
     - setuptools {{ setuptools }}
     - bob.extension
-    - bob.io.base
-    - bob.io.image
-    - bob.measure
+    - numpy {{ numpy }}
+    - scipy {{ scipy }}
     - click {{ click }}
     - click-plugins {{ click_plugins }}
+    - scikit-learn {{ scikit_learn }}
+    - tensorflow {{ tensorflow }}  # [linux]
   run:
     - python
     - setuptools
-    - numpy
-    - scipy
+    - {{ pin_compatible('numpy') }}
+    - {{ pin_compatible('scipy') }}
     - {{ pin_compatible('click') }}
     - {{ pin_compatible('click-plugins') }}
-    - tensorflow >=1.4
+    - {{ pin_compatible('tensorflow') }}  # [linux]
+  run_constrained:
+    - {{ pin_compatible('scikit-learn') }}
 
 test:
   imports:
     - {{ name }}
   commands:
     - bob tf --help
-    - bob tf compute-statistics --help
-    - bob tf db-to-tfrecords --help
-    - bob tf eval --help
-    - bob tf predict-bio --help
-    - bob tf style-transfer --help
-    - bob tf train --help
-    - bob tf train-and-evaluate --help
-    - bob tf trim --help
+    - bob tf datasets-to-tfrecords --help
     - nosetests --with-coverage --cover-package={{ name }} -sv {{ name }}
     - sphinx-build -aEW {{ project_dir }}/doc {{ project_dir }}/sphinx
     - sphinx-build -aEb doctest {{ project_dir }}/doc sphinx
@@ -59,10 +56,9 @@ test:
     - coverage
     - sphinx
     - sphinx_rtd_theme
-    - bob.io.image
     - bob.db.atnt
     - matplotlib
-    - gridtk
+    - scikit-learn
 
 about:
   home: https://www.idiap.ch/software/bob/
diff --git a/develop.cfg b/develop.cfg
index 1e13ea43d333158f85a596194e38c606816cb052..ceea826680420b22eab9e26106af006122b0a9c4 100644
--- a/develop.cfg
+++ b/develop.cfg
@@ -4,19 +4,15 @@
 
 [buildout]
 parts = scripts
-eggs = bob.learn.tensorflow
-       bob.db.casia_webface
-       bob.db.mobio
-       gridtk
+eggs = bob.extension
+       bob.db.atnt
+       bob.learn.tensorflow
 
 extensions = bob.buildout
              mr.developer
 auto-checkout = *
-develop = src/bob.db.mnist
-          src/gridtk
-          src/bob.db.casia_webface
-          src/bob.db.mobio
-          src/bob.db.lfw
+develop = src/bob.extension
+          src/bob.db.atnt
           .
 
 ; options for bob.buildout
@@ -26,12 +22,8 @@ newest = false
 
 
 [sources]
-bob.db.mnist = git git@gitlab.idiap.ch:bob/bob.db.mnist.git
-bob.db.base = git git@gitlab.idiap.ch:bob/bob.db.base.git
-bob.db.mobio = git git@gitlab.idiap.ch:bob/bob.db.mobio.git
-bob.db.lfw = git git@gitlab.idiap.ch:bob/bob.db.lfw.git
-bob.db.casia_webface = git git@gitlab.idiap.ch:bob/bob.db.casia_webface.git
-gridtk = git git@gitlab.idiap.ch:bob/gridtk
+bob.extension = git git@gitlab.idiap.ch:bob/bob.extension.git
+bob.db.atnt = git git@gitlab.idiap.ch:bob/bob.db.atnt.git
 
 
 [scripts]
diff --git a/doc/conf.py b/doc/conf.py
index a6c97e9a724dfb84560f81b42e393b344de7cf33..a7a68a78b4ccf2560fe7a3cf2a10aa91cf40808e 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -1,15 +1,10 @@
 #!/usr/bin/env python
 # vim: set fileencoding=utf-8 :
 
-import glob
 import os
-import sys
 import time
 
 import pkg_resources
-
-# The theme to use for HTML and HTML Help pages.  See the documentation for
-# a list of builtin themes.
 import sphinx_rtd_theme
 
 # For inter-documentation mapping:
@@ -234,9 +229,9 @@ rst_epilog = """
 autoclass_content = "class"
 autodoc_member_order = "bysource"
 autodoc_default_options = {
-  "members": True,
-  "undoc-members": True,
-  "show-inheritance": True,
+    "members": True,
+    "undoc-members": True,
+    "show-inheritance": True,
 }
 
 
@@ -247,5 +242,3 @@ if os.path.exists(sphinx_requirements):
     )
 else:
     intersphinx_mapping = link_documentation()
-
-
diff --git a/doc/nitpick-exceptions.txt b/doc/nitpick-exceptions.txt
index 3589e5c2a88cea6c0c41831c2c5339eefd073bce..4d519b031adaa9f7512abf3328a24be22319d739 100644
--- a/doc/nitpick-exceptions.txt
+++ b/doc/nitpick-exceptions.txt
@@ -2,3 +2,5 @@ py:class list
 py:exc ValueError
 py:class tensorflow.python.estimator.estimator.Estimator
 py:class tensorflow_estimator.python.estimator.estimator.Estimator
+py:class tensorflow.python.keras.losses.Loss
+py:class tensorflow.python.keras.engine.base_layer.Layer
diff --git a/doc/py_api.rst b/doc/py_api.rst
index 2fec97b9c55bcd617d16a2cef044b327c3007560..6cf7d2859bb0123af75c09d651f27455c1c6c360 100644
--- a/doc/py_api.rst
+++ b/doc/py_api.rst
@@ -7,63 +7,27 @@
 ============
 
 
-Estimators
-==========
-
-.. autosummary::
-    bob.learn.tensorflow.estimators.Logits
-    bob.learn.tensorflow.estimators.LogitsCenterLoss
-    bob.learn.tensorflow.estimators.Triplet
-    bob.learn.tensorflow.estimators.Siamese
-    bob.learn.tensorflow.estimators.Regressor
-    bob.learn.tensorflow.estimators.MovingAverageOptimizer
-    bob.learn.tensorflow.estimators.learning_rate_decay_fn
-
-
-
-Architectures
-=============
-
+Models
+======
 
+.. todo:
+  Summary the models
 
 Data
 ====
 
 .. autosummary::
-    bob.learn.tensorflow.dataset.bio.BioGenerator
-    bob.learn.tensorflow.dataset.image.shuffle_data_and_labels_image_augmentation
-    bob.learn.tensorflow.dataset.siamese_image.shuffle_data_and_labels_image_augmentation
-    bob.learn.tensorflow.dataset.triplet_image.shuffle_data_and_labels_image_augmentation
-    bob.learn.tensorflow.dataset.tfrecords.shuffle_data_and_labels_image_augmentation
-    bob.learn.tensorflow.dataset.tfrecords.shuffle_data_and_labels
-    bob.learn.tensorflow.dataset.generator.dataset_using_generator
-    bob.learn.tensorflow.utils.util.to_channels_last
-    bob.learn.tensorflow.utils.util.to_channels_first
-
-
-Style Transfer
-==============
-
-.. autosummary::
-    bob.learn.tensorflow.style_transfer.do_style_transfer
+    bob.learn.tensorflow.data.dataset_using_generator
+    bob.learn.tensorflow.data.dataset_to_tfrecord
+    bob.learn.tensorflow.data.dataset_from_tfrecord
 
 
 Losses
 ======
 
 .. autosummary::
-    bob.learn.tensorflow.loss.mean_cross_entropy_loss
-    bob.learn.tensorflow.loss.mean_cross_entropy_center_loss
-    bob.learn.tensorflow.loss.contrastive_loss
-    bob.learn.tensorflow.loss.triplet_loss
-    bob.learn.tensorflow.loss.triplet_average_loss
-    bob.learn.tensorflow.loss.triplet_fisher_loss
-    bob.learn.tensorflow.loss.linear_gram_style_loss
-    bob.learn.tensorflow.loss.content_loss
-    bob.learn.tensorflow.loss.denoising_loss
-    bob.learn.tensorflow.loss.balanced_softmax_cross_entropy_loss_weights
-    bob.learn.tensorflow.loss.balanced_sigmoid_cross_entropy_loss_weights
-
+    bob.learn.tensorflow.losses.CenterLossLayer
+    bob.learn.tensorflow.losses.CenterLoss
 
 
 
@@ -71,17 +35,6 @@ Detailed Information
 ====================
 
 .. automodule:: bob.learn.tensorflow
-.. automodule:: bob.learn.tensorflow.estimators
-.. automodule:: bob.learn.tensorflow.dataset
-.. automodule:: bob.learn.tensorflow.dataset.generator
-.. automodule:: bob.learn.tensorflow.dataset.bio
-.. automodule:: bob.learn.tensorflow.dataset.image
-.. automodule:: bob.learn.tensorflow.dataset.siamese_image
-.. automodule:: bob.learn.tensorflow.dataset.triplet_image
-.. automodule:: bob.learn.tensorflow.dataset.tfrecords
-.. automodule:: bob.learn.tensorflow.network
-.. automodule:: bob.learn.tensorflow.network.SimpleCNN
+.. automodule:: bob.learn.tensorflow.data
 .. automodule:: bob.learn.tensorflow.utils
-.. automodule:: bob.learn.tensorflow.utils.util
-.. automodule:: bob.learn.tensorflow.style_transfer
-.. automodule:: bob.learn.tensorflow.loss
+.. automodule:: bob.learn.tensorflow.losses
diff --git a/doc/user_guide.rst b/doc/user_guide.rst
index 52b530db3726d5594aa7435a1cad72a1343c5a17..b36953da60e98298d01e9a280d28a565a90bd998 100644
--- a/doc/user_guide.rst
+++ b/doc/user_guide.rst
@@ -76,6 +76,7 @@ If you are working with Bob databases, below is an example of converting them to
 
     >>> dataset = dataset_using_generator(samples, reader)
     >>> dataset
+    <FlatMapDataset shapes: ((112, 92), ()), types: (tf.uint8, tf.int32)>
 
 Create TFRecords from tf.data.Datasets
 ======================================
@@ -91,6 +92,7 @@ TFRecords:
     >>> dataset_to_tfrecord(dataset, path)
     >>> dataset = dataset_from_tfrecord(path)
     >>> dataset
+    <MapDataset shapes: ((112, 92), ()), types: (tf.uint8, tf.int32)>
 
 There is also a script called ``bob tf dataset-to-tfrecord`` that wraps the
 :any:`bob.learn.tensorflow.data.dataset_to_tfrecord` for easy Grid job
diff --git a/examples/MSCeleba_centerloss_mixed_precision_multi_worker.py b/examples/MSCeleba_centerloss_mixed_precision_multi_worker.py
deleted file mode 100644
index bd4b7e53b2fd2e6c71361784be926b542a2633c1..0000000000000000000000000000000000000000
--- a/examples/MSCeleba_centerloss_mixed_precision_multi_worker.py
+++ /dev/null
@@ -1,322 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import pickle
-from functools import partial
-from multiprocessing import cpu_count
-
-import pkg_resources
-import tensorflow as tf
-from bob.extension import rc
-from bob.learn.tensorflow.callbacks import add_backup_callback
-from bob.learn.tensorflow.losses import CenterLoss
-from bob.learn.tensorflow.losses import CenterLossLayer
-from bob.learn.tensorflow.models.inception_resnet_v2 import InceptionResNetV2
-from bob.learn.tensorflow.utils import predict_using_tensors
-from tensorflow.keras import layers
-from tensorflow.keras.mixed_precision import experimental as mixed_precision
-
-policy = mixed_precision.Policy("mixed_float16")
-mixed_precision.set_policy(policy)
-
-
-TRAIN_TF_RECORD_PATHS = (
-    f"{rc['htface']}/databases/tfrecords/msceleba/"
-    "tfrecord_182x_hand_prunned_44/*.tfrecord"
-)
-VALIDATION_TF_RECORD_PATHS = (
-    f"{rc['htface']}/databases/tfrecords/lfw/182x/RGB/*.tfrecord"
-)
-# there are 2812 samples in the validation set
-VALIDATION_SAMPLES = 2812
-
-CHECKPOINT = f"{rc['temp']}/models/inception_v2_batchnorm_rgb_msceleba_mixed_precision"
-
-AUTOTUNE = tf.data.experimental.AUTOTUNE
-TFRECORD_PARALLEL_READ = cpu_count()
-N_CLASSES = 87662
-DATA_SHAPE = (182, 182, 3)  # size of faces
-DATA_TYPE = tf.uint8
-OUTPUT_SHAPE = (160, 160)
-
-SHUFFLE_BUFFER = int(2e4)
-
-LEARNING_RATE = 0.1
-BATCH_SIZE = 90 * 2  # should be a multiple of 8
-# we want to run 35 epochs of tfrecords. There are 959083 samples in train tfrecords,
-# depending on batch size, steps per epoch, and keras epoch multiplier should change
-EPOCHS = 35
-# number of training steps to do before validating a model. This also defines an epoch
-# for keras which is not really true. We want to evaluate every 180000 (90 * 2000)
-# samples
-STEPS_PER_EPOCH = 180000 // BATCH_SIZE
-# np.ceil(959083/180000=5.33)
-KERAS_EPOCH_MULTIPLIER = 6
-
-VALIDATION_BATCH_SIZE = 38  # should be a multiple of 8
-
-
-FEATURES = {
-    "data": tf.io.FixedLenFeature([], tf.string),
-    "label": tf.io.FixedLenFeature([], tf.int64),
-    "key": tf.io.FixedLenFeature([], tf.string),
-}
-LOSS_WEIGHTS = {"cross_entropy": 1.0, "center_loss": 0.01}
-
-
-def decode_tfrecords(x):
-    features = tf.io.parse_single_example(x, FEATURES)
-    image = tf.io.decode_raw(features["data"], DATA_TYPE)
-    image = tf.reshape(image, DATA_SHAPE)
-    features["data"] = image
-    return features
-
-
-def get_preprocessor():
-    preprocessor = tf.keras.Sequential(
-        [
-            # rotate before cropping
-            # 5 random degree rotation
-            layers.experimental.preprocessing.RandomRotation(5 / 360),
-            layers.experimental.preprocessing.RandomCrop(
-                height=OUTPUT_SHAPE[0], width=OUTPUT_SHAPE[1]
-            ),
-            layers.experimental.preprocessing.RandomFlip("horizontal"),
-            # FIXED_STANDARDIZATION from https://github.com/davidsandberg/facenet
-            # [-0.99609375, 0.99609375]
-            layers.experimental.preprocessing.Rescaling(
-                scale=1 / 128, offset=-127.5 / 128
-            ),
-        ]
-    )
-    return preprocessor
-
-
-def preprocess(preprocessor, features, augment=False):
-    image = features["data"]
-    label = features["label"]
-    image = preprocessor(image, training=augment)
-    return image, label
-
-
-def prepare_dataset(tf_record_paths, batch_size, shuffle=False, augment=False):
-    ds = tf.data.Dataset.list_files(tf_record_paths, shuffle=shuffle)
-    ds = tf.data.TFRecordDataset(ds, num_parallel_reads=TFRECORD_PARALLEL_READ)
-    if shuffle:
-        # ignore order and read files as soon as they come in
-        ignore_order = tf.data.Options()
-        ignore_order.experimental_deterministic = False
-        ds = ds.with_options(ignore_order)
-    ds = ds.map(decode_tfrecords).prefetch(buffer_size=AUTOTUNE)
-    if shuffle:
-        ds = ds.shuffle(SHUFFLE_BUFFER).repeat(EPOCHS)
-    preprocessor = get_preprocessor()
-    ds = ds.batch(batch_size).map(
-        partial(preprocess, preprocessor, augment=augment),
-        num_parallel_calls=AUTOTUNE,
-    )
-
-    # Use buffered prefecting on all datasets
-    return ds.prefetch(buffer_size=AUTOTUNE)
-    # return ds.apply(tf.data.experimental.prefetch_to_device(
-    #         device, buffer_size=AUTOTUNE))
-
-
-def accuracy_from_embeddings(labels, prelogits):
-    labels = tf.reshape(labels, (-1,))
-    embeddings = tf.nn.l2_normalize(prelogits, 1)
-    predictions = predict_using_tensors(embeddings, labels)
-    return tf.math.equal(labels, predictions)
-
-
-class CustomModel(tf.keras.Model):
-    def compile(
-        self,
-        cross_entropy,
-        center_loss,
-        loss_weights,
-        train_loss,
-        train_cross_entropy,
-        train_center_loss,
-        test_acc,
-        global_batch_size,
-        **kwargs,
-    ):
-        super().compile(**kwargs)
-        self.cross_entropy = cross_entropy
-        self.center_loss = center_loss
-        self.loss_weights = loss_weights
-        self.train_loss = train_loss
-        self.train_cross_entropy = train_cross_entropy
-        self.train_center_loss = train_center_loss
-        self.test_acc = test_acc
-        self.global_batch_size = global_batch_size
-
-    def train_step(self, data):
-        images, labels = data
-        with tf.GradientTape() as tape:
-            logits, prelogits = self(images, training=True)
-            loss_cross = self.cross_entropy(labels, logits)
-            loss_center = self.center_loss(labels, prelogits)
-            loss = (
-                loss_cross * self.loss_weights[self.cross_entropy.name]
-                + loss_center * self.loss_weights[self.center_loss.name]
-            )
-            unscaled_loss = tf.nn.compute_average_loss(
-                loss, global_batch_size=self.global_batch_size
-            )
-            loss = self.optimizer.get_scaled_loss(unscaled_loss)
-
-        trainable_vars = self.trainable_variables
-        gradients = tape.gradient(loss, trainable_vars)
-        gradients = self.optimizer.get_unscaled_gradients(gradients)
-        self.optimizer.apply_gradients(zip(gradients, trainable_vars))
-
-        self.train_loss(unscaled_loss)
-        self.train_cross_entropy(loss_cross)
-        self.train_center_loss(loss_center)
-        return {
-            m.name: m.result()
-            for m in [self.train_loss, self.train_cross_entropy, self.train_center_loss]
-        }
-
-    def test_step(self, data):
-        images, labels = data
-        logits, prelogits = self(images, training=False)
-        self.test_acc(accuracy_from_embeddings(labels, prelogits))
-        return {m.name: m.result() for m in [self.test_acc]}
-
-
-def create_model():
-
-    model = InceptionResNetV2(
-        include_top=True,
-        classes=N_CLASSES,
-        bottleneck=True,
-        input_shape=OUTPUT_SHAPE + (3,),
-    )
-    float32_layer = layers.Activation("linear", dtype="float32")
-
-    prelogits = model.get_layer("Bottleneck/BatchNorm").output
-    prelogits = CenterLossLayer(
-        n_classes=N_CLASSES, n_features=prelogits.shape[-1], name="centers"
-    )(prelogits)
-    prelogits = float32_layer(prelogits)
-    logits = float32_layer(model.get_layer("logits").output)
-    model = CustomModel(
-        inputs=model.input, outputs=[logits, prelogits], name=model.name
-    )
-    return model
-
-
-def build_and_compile_model(global_batch_size):
-    model = create_model()
-
-    cross_entropy = tf.keras.losses.SparseCategoricalCrossentropy(
-        from_logits=True, name="cross_entropy", reduction=tf.keras.losses.Reduction.NONE
-    )
-    center_loss = CenterLoss(
-        centers_layer=model.get_layer("centers"),
-        alpha=0.9,
-        name="center_loss",
-        reduction=tf.keras.losses.Reduction.NONE,
-    )
-
-    optimizer = tf.keras.optimizers.RMSprop(
-        learning_rate=LEARNING_RATE, rho=0.9, momentum=0.9, epsilon=1.0
-    )
-    optimizer = mixed_precision.LossScaleOptimizer(optimizer, loss_scale="dynamic")
-
-    train_loss = tf.keras.metrics.Mean(name="loss")
-    train_cross_entropy = tf.keras.metrics.Mean(name="cross_entropy")
-    train_center_loss = tf.keras.metrics.Mean(name="center_loss")
-
-    test_acc = tf.keras.metrics.Mean(name="accuracy")
-
-    model.compile(
-        optimizer=optimizer,
-        cross_entropy=cross_entropy,
-        center_loss=center_loss,
-        loss_weights=LOSS_WEIGHTS,
-        train_loss=train_loss,
-        train_cross_entropy=train_cross_entropy,
-        train_center_loss=train_center_loss,
-        test_acc=test_acc,
-        global_batch_size=global_batch_size,
-    )
-    return model
-
-
-def train_and_evaluate(tf_config):
-    os.environ["TF_CONFIG"] = json.dumps(tf_config)
-
-    per_worker_batch_size = BATCH_SIZE
-    num_workers = len(tf_config["cluster"]["worker"])
-
-    strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
-
-    global_batch_size = per_worker_batch_size * num_workers
-    val_global_batch_size = VALIDATION_BATCH_SIZE * num_workers
-
-    train_ds = prepare_dataset(
-        TRAIN_TF_RECORD_PATHS, batch_size=global_batch_size, shuffle=True, augment=True
-    )
-
-    val_ds = prepare_dataset(
-        VALIDATION_TF_RECORD_PATHS,
-        batch_size=val_global_batch_size,
-        shuffle=False,
-        augment=False,
-    )
-
-    with strategy.scope():
-        model = build_and_compile_model(global_batch_size=global_batch_size)
-
-    val_metric_name = "val_accuracy"
-
-    def scheduler(epoch, lr):
-        # 20 epochs at 0.1, 10 at 0.01 and 5 0.001
-        # The epoch number here is Keras's which is different from actual epoch number
-        epoch = epoch // KERAS_EPOCH_MULTIPLIER
-        if epoch in range(20):
-            return 0.1
-        elif epoch in range(20, 30):
-            return 0.01
-        else:
-            return 0.001
-
-    callbacks = {
-        "latest": tf.keras.callbacks.ModelCheckpoint(f"{CHECKPOINT}/latest", verbose=1),
-        "best": tf.keras.callbacks.ModelCheckpoint(
-            f"{CHECKPOINT}/best",
-            monitor=val_metric_name,
-            save_best_only=True,
-            mode="max",
-            verbose=1,
-        ),
-        "tensorboard": tf.keras.callbacks.TensorBoard(
-            log_dir=f"{CHECKPOINT}/logs", update_freq=15, profile_batch="10,50"
-        ),
-        "lr": tf.keras.callbacks.LearningRateScheduler(scheduler, verbose=1),
-        # "lr": tf.keras.callbacks.ReduceLROnPlateau(
-        #     monitor=val_metric_name, factor=0.2, patience=5, min_lr=0.001
-        # ),
-        "nan": tf.keras.callbacks.TerminateOnNaN(),
-    }
-    callbacks = add_backup_callback(
-        callbacks=callbacks, backup_dir=f"{CHECKPOINT}/backup"
-    )
-
-    model.fit(
-        train_ds,
-        validation_data=val_ds,
-        epochs=EPOCHS * KERAS_EPOCH_MULTIPLIER,
-        steps_per_epoch=STEPS_PER_EPOCH,
-        validation_steps=VALIDATION_SAMPLES // VALIDATION_BATCH_SIZE,
-        callbacks=callbacks,
-        verbose=2 if os.environ.get("SGE_TASK_ID") else 1,
-    )
-
-
-if __name__ == "__main__":
-    train_and_evaluate({})
diff --git a/requirements.txt b/requirements.txt
index d4073f98022ae3c0d249fe10c5072dca5c20739d..c92ac746dd6e31c039ccc5354ef6c7e79a89e103 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,8 +1,6 @@
 setuptools
 bob.extension
-bob.io.base
-bob.io.image
-bob.measure
 numpy
+scipy
 click >= 7
-scipy
\ No newline at end of file
+click-plugins
diff --git a/setup.py b/setup.py
index a2ce901099acc2d2c5f608b0f317b5bf22a6d17f..8e00780b0c80252e84e0785b7142d72c6bb7b04e 100644
--- a/setup.py
+++ b/setup.py
@@ -43,29 +43,16 @@ setup(
     entry_points={
         # main entry for bob tf cli
         "bob.cli": [
-            "tf = bob.learn.tensorflow.script.tf:tf",
-            "keras = bob.learn.tensorflow.script.keras:keras",
+            "tf = bob.learn.tensorflow.scripts.tf:tf",
+            "keras = bob.learn.tensorflow.scripts.keras:keras",
         ],
         # bob tf scripts
         "bob.learn.tensorflow.cli": [
-            "cache-dataset = bob.learn.tensorflow.script.cache_dataset:cache_dataset",
-            "compute-statistics = bob.learn.tensorflow.script.compute_statistics:compute_statistics",
-            "dataset-to-hdf5 = bob.learn.tensorflow.script.db_to_tfrecords:dataset_to_hdf5",
-            "datasets-to-tfrecords = bob.learn.tensorflow.script.db_to_tfrecords:datasets_to_tfrecords",
-            "db-to-tfrecords = bob.learn.tensorflow.script.db_to_tfrecords:db_to_tfrecords",
-            "describe-tfrecord = bob.learn.tensorflow.script.db_to_tfrecords:describe_tfrecord",
-            "distance-matrix = bob.learn.tensorflow.script.cgm:distance_matrix",
-            "eval = bob.learn.tensorflow.script.eval:eval",
-            "predict = bob.learn.tensorflow.script.predict_bio:predict",
-            "predict-bio = bob.learn.tensorflow.script.predict_bio:predict_bio",
-            "style-transfer = bob.learn.tensorflow.script.style_transfer:style_transfer",
-            "train = bob.learn.tensorflow.script.train:train",
-            "train-and-evaluate = bob.learn.tensorflow.script.train_and_evaluate:train_and_evaluate",
-            "trim = bob.learn.tensorflow.script.trim:trim",
+            "datasets-to-tfrecords = bob.learn.tensorflow.scripts.datasets_to_tfrecords:datasets_to_tfrecords",
         ],
         # bob keras scripts
         "bob.learn.tensorflow.keras_cli": [
-            "fit = bob.learn.tensorflow.script.fit:fit",
+            "fit = bob.learn.tensorflow.scripts.fit:fit",
         ],
     },
     # Classifiers are important if you plan to distribute this package through
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a21395717ede5ab9219ebc022f44fa23199bada4
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1,3 @@
+bob.db.atnt
+matplotlib
+scikit-learn
diff --git a/version.txt b/version.txt
index 52f720e0457f2a463ffa96ae433940d7699e03df..33b3a3ef6e67cc7961f819a4ad1caafda4bad3a4 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-1.2.2b0
\ No newline at end of file
+1.2.2b0