diff --git a/bob/__init__.py b/bob/__init__.py
index 2ca5e07cb73f0bdddcb863ef497955964087e301..edbb4090fca046b19d22d3982711084621bff3be 100644
--- a/bob/__init__.py
+++ b/bob/__init__.py
@@ -1,3 +1,4 @@
 # see https://docs.python.org/3/library/pkgutil.html
 from pkgutil import extend_path
-__path__ = extend_path(__path__, __name__)
\ No newline at end of file
+
+__path__ = extend_path(__path__, __name__)
diff --git a/bob/learn/__init__.py b/bob/learn/__init__.py
index 2ab1e28b150f0549def9963e9e87de3fdd6b2579..edbb4090fca046b19d22d3982711084621bff3be 100644
--- a/bob/learn/__init__.py
+++ b/bob/learn/__init__.py
@@ -1,3 +1,4 @@
 # see https://docs.python.org/3/library/pkgutil.html
 from pkgutil import extend_path
+
 __path__ = extend_path(__path__, __name__)
diff --git a/bob/learn/tensorflow/__init__.py b/bob/learn/tensorflow/__init__.py
index b17348644c602f4d16633e7c392b19a37c59086a..1b0e2db6061b8fcd5e328762aadf66d77a398193 100644
--- a/bob/learn/tensorflow/__init__.py
+++ b/bob/learn/tensorflow/__init__.py
@@ -3,4 +3,5 @@ def get_config():
     Returns a string containing the configuration information.
     """
     import bob.extension
+
     return bob.extension.get_config(__name__)
diff --git a/bob/learn/tensorflow/dataset/generator.py b/bob/learn/tensorflow/dataset/generator.py
index 54d700949e5f72af67c6861a0f1161e310e97136..a8510487eda8a6f6e483a3c4cc298e9f0fc4870c 100644
--- a/bob/learn/tensorflow/dataset/generator.py
+++ b/bob/learn/tensorflow/dataset/generator.py
@@ -30,7 +30,14 @@ class Generator:
         The shapes of the returned samples.
     """
 
-    def __init__(self, samples, reader, multiple_samples=False, shuffle_on_epoch_end=False, **kwargs):
+    def __init__(
+        self,
+        samples,
+        reader,
+        multiple_samples=False,
+        shuffle_on_epoch_end=False,
+        **kwargs
+    ):
         super().__init__(**kwargs)
         self.reader = reader
         self.samples = list(samples)
diff --git a/bob/learn/tensorflow/dataset/image.py b/bob/learn/tensorflow/dataset/image.py
index bc8c315e2ca8729100406e5c7f33af26d838ecb0..239d83c251e856e8e2c527e9d7782390b3a87139 100644
--- a/bob/learn/tensorflow/dataset/image.py
+++ b/bob/learn/tensorflow/dataset/image.py
@@ -7,22 +7,24 @@ from functools import partial
 from . import append_image_augmentation, from_filename_to_tensor
 
 
-def shuffle_data_and_labels_image_augmentation(filenames,
-                                               labels,
-                                               data_shape,
-                                               data_type,
-                                               batch_size,
-                                               epochs=None,
-                                               buffer_size=10**3,
-                                               gray_scale=False,
-                                               output_shape=None,
-                                               random_flip=False,
-                                               random_brightness=False,
-                                               random_contrast=False,
-                                               random_saturation=False,
-                                               random_rotate=False,
-                                               per_image_normalization=True,
-                                               extension=None):
+def shuffle_data_and_labels_image_augmentation(
+    filenames,
+    labels,
+    data_shape,
+    data_type,
+    batch_size,
+    epochs=None,
+    buffer_size=10 ** 3,
+    gray_scale=False,
+    output_shape=None,
+    random_flip=False,
+    random_brightness=False,
+    random_contrast=False,
+    random_saturation=False,
+    random_rotate=False,
+    per_image_normalization=True,
+    extension=None,
+):
     """
     Dump random batches from a list of image paths and labels:
 
@@ -95,7 +97,8 @@ def shuffle_data_and_labels_image_augmentation(filenames,
         random_saturation=random_saturation,
         random_rotate=random_rotate,
         per_image_normalization=per_image_normalization,
-        extension=extension)
+        extension=extension,
+    )
 
     dataset = dataset.shuffle(buffer_size).batch(batch_size).repeat(epochs)
 
@@ -103,19 +106,21 @@ def shuffle_data_and_labels_image_augmentation(filenames,
     return data, labels
 
 
-def create_dataset_from_path_augmentation(filenames,
-                                          labels,
-                                          data_shape,
-                                          data_type,
-                                          gray_scale=False,
-                                          output_shape=None,
-                                          random_flip=False,
-                                          random_brightness=False,
-                                          random_contrast=False,
-                                          random_saturation=False,
-                                          random_rotate=False,
-                                          per_image_normalization=True,
-                                          extension=None):
+def create_dataset_from_path_augmentation(
+    filenames,
+    labels,
+    data_shape,
+    data_type,
+    gray_scale=False,
+    output_shape=None,
+    random_flip=False,
+    random_brightness=False,
+    random_contrast=False,
+    random_saturation=False,
+    random_rotate=False,
+    per_image_normalization=True,
+    extension=None,
+):
     """
     Create dataset from a list of tf-record files
 
@@ -149,26 +154,29 @@ def create_dataset_from_path_augmentation(filenames,
         random_saturation=random_saturation,
         random_rotate=random_rotate,
         per_image_normalization=per_image_normalization,
-        extension=extension)
+        extension=extension,
+    )
 
     dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
     dataset = dataset.map(parser)
     return dataset
 
 
-def image_augmentation_parser(filename,
-                              label,
-                              data_shape,
-                              data_type,
-                              gray_scale=False,
-                              output_shape=None,
-                              random_flip=False,
-                              random_brightness=False,
-                              random_contrast=False,
-                              random_saturation=False,
-                              random_rotate=False,
-                              per_image_normalization=True,
-                              extension=None):
+def image_augmentation_parser(
+    filename,
+    label,
+    data_shape,
+    data_type,
+    gray_scale=False,
+    output_shape=None,
+    random_flip=False,
+    random_brightness=False,
+    random_contrast=False,
+    random_saturation=False,
+    random_rotate=False,
+    per_image_normalization=True,
+    extension=None,
+):
     """
     Parses a single tf.Example into image and label tensors.
     """
@@ -179,7 +187,7 @@ def image_augmentation_parser(filename,
     # Reshape image data into the original shape
     image = tf.reshape(image, data_shape)
 
-    #Applying image augmentation
+    # Applying image augmentation
     image = append_image_augmentation(
         image,
         gray_scale=gray_scale,
@@ -189,12 +197,13 @@ def image_augmentation_parser(filename,
         random_contrast=random_contrast,
         random_saturation=random_saturation,
         random_rotate=random_rotate,
-        per_image_normalization=per_image_normalization)
+        per_image_normalization=per_image_normalization,
+    )
 
     label = tf.cast(label, tf.int64)
     features = dict()
-    features['data'] = image
-    features['key'] = filename
+    features["data"] = image
+    features["key"] = filename
 
     return features, label
 
diff --git a/bob/learn/tensorflow/dataset/tfrecords.py b/bob/learn/tensorflow/dataset/tfrecords.py
index 0fbf69e9b88aac01f3925810ff84f9b02e65056f..32f732733eb1d5d34c1a630f0900a24c52a6309e 100644
--- a/bob/learn/tensorflow/dataset/tfrecords.py
+++ b/bob/learn/tensorflow/dataset/tfrecords.py
@@ -227,5 +227,3 @@ def dataset_from_tfrecord(tfrecord, num_parallel_reads=None):
 #     key = tf.cast(features["key"], tf.string)
 
 #     return image, label, key
-
-
diff --git a/bob/learn/tensorflow/gan/losses.py b/bob/learn/tensorflow/gan/losses.py
index 46be1eaa8c76b3a253968d609a1948fc08a013cd..843f795c10c077c0b3757aadccbedefd70806909 100644
--- a/bob/learn/tensorflow/gan/losses.py
+++ b/bob/learn/tensorflow/gan/losses.py
@@ -14,26 +14,26 @@ def relativistic_discriminator_loss(
 ):
     """Relativistic (average) loss
 
-  Args:
-    discriminator_real_outputs: Discriminator output on real data.
-    discriminator_gen_outputs: Discriminator output on generated data. Expected
-      to be in the range of (-inf, inf).
-    label_smoothing: The amount of smoothing for positive labels. This technique
-      is taken from `Improved Techniques for Training GANs`
-      (https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
-    real_weights: Optional `Tensor` whose rank is either 0, or the same rank as
-      `real_data`, and must be broadcastable to `real_data` (i.e., all
-      dimensions must be either `1`, or the same as the corresponding
-      dimension).
-    generated_weights: Same as `real_weights`, but for `generated_data`.
-    scope: The scope for the operations performed in computing the loss.
-    loss_collection: collection to which this loss will be added.
-    reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
-    add_summaries: Whether or not to add summaries for the loss.
-
-  Returns:
-    A loss Tensor. The shape depends on `reduction`.
-  """
+    Args:
+      discriminator_real_outputs: Discriminator output on real data.
+      discriminator_gen_outputs: Discriminator output on generated data. Expected
+        to be in the range of (-inf, inf).
+      label_smoothing: The amount of smoothing for positive labels. This technique
+        is taken from `Improved Techniques for Training GANs`
+        (https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
+      real_weights: Optional `Tensor` whose rank is either 0, or the same rank as
+        `real_data`, and must be broadcastable to `real_data` (i.e., all
+        dimensions must be either `1`, or the same as the corresponding
+        dimension).
+      generated_weights: Same as `real_weights`, but for `generated_data`.
+      scope: The scope for the operations performed in computing the loss.
+      loss_collection: collection to which this loss will be added.
+      reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
+      add_summaries: Whether or not to add summaries for the loss.
+
+    Returns:
+      A loss Tensor. The shape depends on `reduction`.
+    """
     with tf.compat.v1.name_scope(
         scope,
         "discriminator_relativistic_loss",
@@ -75,8 +75,12 @@ def relativistic_discriminator_loss(
         tf.compat.v1.losses.add_loss(loss, loss_collection)
 
         if add_summaries:
-            tf.compat.v1.summary.scalar("discriminator_gen_relativistic_loss", loss_on_generated)
-            tf.compat.v1.summary.scalar("discriminator_real_relativistic_loss", loss_on_real)
+            tf.compat.v1.summary.scalar(
+                "discriminator_gen_relativistic_loss", loss_on_generated
+            )
+            tf.compat.v1.summary.scalar(
+                "discriminator_real_relativistic_loss", loss_on_real
+            )
             tf.compat.v1.summary.scalar("discriminator_relativistic_loss", loss)
 
     return loss
@@ -96,26 +100,26 @@ def relativistic_generator_loss(
 ):
     """Relativistic (average) loss
 
-  Args:
-    discriminator_real_outputs: Discriminator output on real data.
-    discriminator_gen_outputs: Discriminator output on generated data. Expected
-      to be in the range of (-inf, inf).
-    label_smoothing: The amount of smoothing for positive labels. This technique
-      is taken from `Improved Techniques for Training GANs`
-      (https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
-    real_weights: Optional `Tensor` whose rank is either 0, or the same rank as
-      `real_data`, and must be broadcastable to `real_data` (i.e., all
-      dimensions must be either `1`, or the same as the corresponding
-      dimension).
-    generated_weights: Same as `real_weights`, but for `generated_data`.
-    scope: The scope for the operations performed in computing the loss.
-    loss_collection: collection to which this loss will be added.
-    reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
-    add_summaries: Whether or not to add summaries for the loss.
-
-  Returns:
-    A loss Tensor. The shape depends on `reduction`.
-  """
+    Args:
+      discriminator_real_outputs: Discriminator output on real data.
+      discriminator_gen_outputs: Discriminator output on generated data. Expected
+        to be in the range of (-inf, inf).
+      label_smoothing: The amount of smoothing for positive labels. This technique
+        is taken from `Improved Techniques for Training GANs`
+        (https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
+      real_weights: Optional `Tensor` whose rank is either 0, or the same rank as
+        `real_data`, and must be broadcastable to `real_data` (i.e., all
+        dimensions must be either `1`, or the same as the corresponding
+        dimension).
+      generated_weights: Same as `real_weights`, but for `generated_data`.
+      scope: The scope for the operations performed in computing the loss.
+      loss_collection: collection to which this loss will be added.
+      reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
+      add_summaries: Whether or not to add summaries for the loss.
+
+    Returns:
+      A loss Tensor. The shape depends on `reduction`.
+    """
     with tf.compat.v1.name_scope(
         scope,
         "generator_relativistic_loss",
@@ -164,8 +168,12 @@ def relativistic_generator_loss(
         tf.compat.v1.losses.add_loss(loss, loss_collection)
 
         if add_summaries:
-            tf.compat.v1.summary.scalar("generator_gen_relativistic_loss", loss_on_generated)
-            tf.compat.v1.summary.scalar("generator_real_relativistic_loss", loss_on_real)
+            tf.compat.v1.summary.scalar(
+                "generator_gen_relativistic_loss", loss_on_generated
+            )
+            tf.compat.v1.summary.scalar(
+                "generator_real_relativistic_loss", loss_on_real
+            )
             tf.compat.v1.summary.scalar("generator_relativistic_loss", loss)
 
     return loss
diff --git a/bob/learn/tensorflow/gan/spectral_normalization.py b/bob/learn/tensorflow/gan/spectral_normalization.py
index ad2ecfaa4487892592f733858c88f0c24b569912..a2f228f07f66016ac0cc0f01a7d5f2831d4d5be4 100644
--- a/bob/learn/tensorflow/gan/spectral_normalization.py
+++ b/bob/learn/tensorflow/gan/spectral_normalization.py
@@ -38,279 +38,290 @@ from tensorflow.python.ops import variable_scope
 from tensorflow.python.platform import tf_logging as logging
 
 __all__ = [
-    'compute_spectral_norm', 'spectral_normalize', 'spectral_norm_regularizer',
-    'spectral_normalization_custom_getter', 'keras_spectral_normalization'
+    "compute_spectral_norm",
+    "spectral_normalize",
+    "spectral_norm_regularizer",
+    "spectral_normalization_custom_getter",
+    "keras_spectral_normalization",
 ]
 
 # tf.bfloat16 should work, but tf.matmul converts those to tf.float32 which then
 # can't directly be assigned back to the tf.bfloat16 variable.
 _OK_DTYPES_FOR_SPECTRAL_NORM = (dtypes.float16, dtypes.float32, dtypes.float64)
-_PERSISTED_U_VARIABLE_SUFFIX = 'spectral_norm_u'
+_PERSISTED_U_VARIABLE_SUFFIX = "spectral_norm_u"
 
 
 def compute_spectral_norm(w_tensor, power_iteration_rounds=1, name=None):
-  """Estimates the largest singular value in the weight tensor.
-
-  Args:
-    w_tensor: The weight matrix whose spectral norm should be computed.
-    power_iteration_rounds: The number of iterations of the power method to
-      perform. A higher number yields a better approximation.
-    name: An optional scope name.
-
-  Returns:
-    The largest singular value (the spectral norm) of w.
-  """
-  with variable_scope.variable_scope(name, 'spectral_norm'):
-    # The paper says to flatten convnet kernel weights from
-    # (C_out, C_in, KH, KW) to (C_out, C_in * KH * KW). But TensorFlow's Conv2D
-    # kernel weight shape is (KH, KW, C_in, C_out), so it should be reshaped to
-    # (KH * KW * C_in, C_out), and similarly for other layers that put output
-    # channels as last dimension.
-    # n.b. this means that w here is equivalent to w.T in the paper.
-    w = array_ops.reshape(w_tensor, (-1, w_tensor.get_shape()[-1]))
-
-    # Persisted approximation of first left singular vector of matrix `w`.
-    u_var = variable_scope.get_variable(
-        _PERSISTED_U_VARIABLE_SUFFIX,
-        shape=(w.shape[0], 1),
-        dtype=w.dtype,
-        initializer=init_ops.random_normal_initializer(),
-        trainable=False)
-    u = u_var
-
-    # Use power iteration method to approximate spectral norm.
-    for _ in range(power_iteration_rounds):
-      # `v` approximates the first right singular vector of matrix `w`.
-      v = nn.l2_normalize(math_ops.matmul(array_ops.transpose(w), u))
-      u = nn.l2_normalize(math_ops.matmul(w, v))
-
-    # Update persisted approximation.
-    with ops.control_dependencies([u_var.assign(u, name='update_u')]):
-      u = array_ops.identity(u)
-
-    u = array_ops.stop_gradient(u)
-    v = array_ops.stop_gradient(v)
-
-    # Largest singular value of `w`.
-    spectral_norm = math_ops.matmul(
-        math_ops.matmul(array_ops.transpose(u), w), v)
-    spectral_norm.shape.assert_is_fully_defined()
-    spectral_norm.shape.assert_is_compatible_with([1, 1])
-
-    return spectral_norm[0][0]
+    """Estimates the largest singular value in the weight tensor.
+
+    Args:
+      w_tensor: The weight matrix whose spectral norm should be computed.
+      power_iteration_rounds: The number of iterations of the power method to
+        perform. A higher number yields a better approximation.
+      name: An optional scope name.
+
+    Returns:
+      The largest singular value (the spectral norm) of w.
+    """
+    with variable_scope.variable_scope(name, "spectral_norm"):
+        # The paper says to flatten convnet kernel weights from
+        # (C_out, C_in, KH, KW) to (C_out, C_in * KH * KW). But TensorFlow's Conv2D
+        # kernel weight shape is (KH, KW, C_in, C_out), so it should be reshaped to
+        # (KH * KW * C_in, C_out), and similarly for other layers that put output
+        # channels as last dimension.
+        # n.b. this means that w here is equivalent to w.T in the paper.
+        w = array_ops.reshape(w_tensor, (-1, w_tensor.get_shape()[-1]))
+
+        # Persisted approximation of first left singular vector of matrix `w`.
+        u_var = variable_scope.get_variable(
+            _PERSISTED_U_VARIABLE_SUFFIX,
+            shape=(w.shape[0], 1),
+            dtype=w.dtype,
+            initializer=init_ops.random_normal_initializer(),
+            trainable=False,
+        )
+        u = u_var
+
+        # Use power iteration method to approximate spectral norm.
+        for _ in range(power_iteration_rounds):
+            # `v` approximates the first right singular vector of matrix `w`.
+            v = nn.l2_normalize(math_ops.matmul(array_ops.transpose(w), u))
+            u = nn.l2_normalize(math_ops.matmul(w, v))
+
+        # Update persisted approximation.
+        with ops.control_dependencies([u_var.assign(u, name="update_u")]):
+            u = array_ops.identity(u)
+
+        u = array_ops.stop_gradient(u)
+        v = array_ops.stop_gradient(v)
+
+        # Largest singular value of `w`.
+        spectral_norm = math_ops.matmul(math_ops.matmul(array_ops.transpose(u), w), v)
+        spectral_norm.shape.assert_is_fully_defined()
+        spectral_norm.shape.assert_is_compatible_with([1, 1])
+
+        return spectral_norm[0][0]
 
 
 def spectral_normalize(w, power_iteration_rounds=1, name=None):
-  """Normalizes a weight matrix by its spectral norm.
+    """Normalizes a weight matrix by its spectral norm.
 
-  Args:
-    w: The weight matrix to be normalized.
-    power_iteration_rounds: The number of iterations of the power method to
-      perform. A higher number yields a better approximation.
-    name: An optional scope name.
+    Args:
+      w: The weight matrix to be normalized.
+      power_iteration_rounds: The number of iterations of the power method to
+        perform. A higher number yields a better approximation.
+      name: An optional scope name.
 
-  Returns:
-    A normalized weight matrix tensor.
-  """
-  with variable_scope.variable_scope(name, 'spectral_normalize'):
-    w_normalized = w / compute_spectral_norm(
-        w, power_iteration_rounds=power_iteration_rounds)
-    return array_ops.reshape(w_normalized, w.get_shape())
+    Returns:
+      A normalized weight matrix tensor.
+    """
+    with variable_scope.variable_scope(name, "spectral_normalize"):
+        w_normalized = w / compute_spectral_norm(
+            w, power_iteration_rounds=power_iteration_rounds
+        )
+        return array_ops.reshape(w_normalized, w.get_shape())
 
 
 def spectral_norm_regularizer(scale, power_iteration_rounds=1, scope=None):
-  """Returns a functions that can be used to apply spectral norm regularization.
-
-  Small spectral norms enforce a small Lipschitz constant, which is necessary
-  for Wasserstein GANs.
-
-  Args:
-    scale: A scalar multiplier. 0.0 disables the regularizer.
-    power_iteration_rounds: The number of iterations of the power method to
-      perform. A higher number yields a better approximation.
-    scope: An optional scope name.
-
-  Returns:
-    A function with the signature `sn(weights)` that applies spectral norm
-    regularization.
-
-  Raises:
-    ValueError: If scale is negative or if scale is not a float.
-  """
-  if isinstance(scale, numbers.Integral):
-    raise ValueError('scale cannot be an integer: %s' % scale)
-  if isinstance(scale, numbers.Real):
-    if scale < 0.0:
-      raise ValueError(
-          'Setting a scale less than 0 on a regularizer: %g' % scale)
-    if scale == 0.0:
-      logging.info('Scale of 0 disables regularizer.')
-      return lambda _: None
-
-  def sn(weights, name=None):
-    """Applies spectral norm regularization to weights."""
-    with ops.name_scope(scope, 'SpectralNormRegularizer', [weights]) as name:
-      scale_t = ops.convert_to_tensor(
-          scale, dtype=weights.dtype.base_dtype, name='scale')
-      return math_ops.multiply(
-          scale_t,
-          compute_spectral_norm(
-              weights, power_iteration_rounds=power_iteration_rounds),
-          name=name)
-
-  return sn
+    """Returns a functions that can be used to apply spectral norm regularization.
+
+    Small spectral norms enforce a small Lipschitz constant, which is necessary
+    for Wasserstein GANs.
+
+    Args:
+      scale: A scalar multiplier. 0.0 disables the regularizer.
+      power_iteration_rounds: The number of iterations of the power method to
+        perform. A higher number yields a better approximation.
+      scope: An optional scope name.
+
+    Returns:
+      A function with the signature `sn(weights)` that applies spectral norm
+      regularization.
+
+    Raises:
+      ValueError: If scale is negative or if scale is not a float.
+    """
+    if isinstance(scale, numbers.Integral):
+        raise ValueError("scale cannot be an integer: %s" % scale)
+    if isinstance(scale, numbers.Real):
+        if scale < 0.0:
+            raise ValueError("Setting a scale less than 0 on a regularizer: %g" % scale)
+        if scale == 0.0:
+            logging.info("Scale of 0 disables regularizer.")
+            return lambda _: None
+
+    def sn(weights, name=None):
+        """Applies spectral norm regularization to weights."""
+        with ops.name_scope(scope, "SpectralNormRegularizer", [weights]) as name:
+            scale_t = ops.convert_to_tensor(
+                scale, dtype=weights.dtype.base_dtype, name="scale"
+            )
+            return math_ops.multiply(
+                scale_t,
+                compute_spectral_norm(
+                    weights, power_iteration_rounds=power_iteration_rounds
+                ),
+                name=name,
+            )
+
+    return sn
 
 
 def _default_name_filter(name):
-  """A filter function to identify common names of weight variables.
-
-  Args:
-    name: The variable name.
-
-  Returns:
-    Whether `name` is a standard name for a weight/kernel variables used in the
-    Keras, tf.layers, tf.contrib.layers or tf.contrib.slim libraries.
-  """
-  match = re.match(r'(.*\/)?(depthwise_|pointwise_)?(weights|kernel)$', name)
-  return match is not None
-
-
-def spectral_normalization_custom_getter(name_filter=_default_name_filter,
-                                         power_iteration_rounds=1):
-  """Custom getter that performs Spectral Normalization on a weight tensor.
-
-  Specifically it divides the weight tensor by its largest singular value. This
-  is intended to stabilize GAN training, by making the discriminator satisfy a
-  local 1-Lipschitz constraint.
-
-  Based on [Spectral Normalization for Generative Adversarial Networks][sn-gan].
-
-  [sn-gan]: https://openreview.net/forum?id=B1QRgziT-
-
-  To reproduce an SN-GAN, apply this custom_getter to every weight tensor of
-  your discriminator. The last dimension of the weight tensor must be the number
-  of output channels.
-
-  Apply this to layers by supplying this as the `custom_getter` of a
-  `tf.compat.v1.variable_scope`. For example:
-
-    with tf.compat.v1.variable_scope('discriminator',
-                           custom_getter=spectral_norm_getter()):
-      net = discriminator_fn(net)
-
-  IMPORTANT: Keras does not respect the custom_getter supplied by the
-  VariableScope, so Keras users should use `keras_spectral_normalization`
-  instead of (or in addition to) this approach.
-
-  It is important to carefully select to which weights you want to apply
-  Spectral Normalization. In general you want to normalize the kernels of
-  convolution and dense layers, but you do not want to normalize biases. You
-  also want to avoid normalizing batch normalization (and similar) variables,
-  but in general such layers play poorly with Spectral Normalization, since the
-  gamma can cancel out the normalization in other layers. By default we supply a
-  filter that matches the kernel variable names of the dense and convolution
-  layers of the tf.layers, tf.contrib.layers, tf.keras and tf.contrib.slim
-  libraries. If you are using anything else you'll need a custom `name_filter`.
-
-  This custom getter internally creates a variable used to compute the spectral
-  norm by power iteration. It will update every time the variable is accessed,
-  which means the normalized discriminator weights may change slightly whilst
-  training the generator. Whilst unusual, this matches how the paper's authors
-  implement it, and in general additional rounds of power iteration can't hurt.
-
-  Args:
-    name_filter: Optionally, a method that takes a Variable name as input and
-      returns whether this Variable should be normalized.
-    power_iteration_rounds: The number of iterations of the power method to
-      perform per step. A higher number yields a better approximation of the
-      true spectral norm.
-
-  Returns:
-    A custom getter function that applies Spectral Normalization to all
-    Variables whose names match `name_filter`.
-
-  Raises:
-    ValueError: If name_filter is not callable.
-  """
-  if not callable(name_filter):
-    raise ValueError('name_filter must be callable')
-
-  def _internal_getter(getter, name, *args, **kwargs):
-    """A custom getter function that applies Spectral Normalization.
+    """A filter function to identify common names of weight variables.
 
     Args:
-      getter: The true getter to call.
-      name: Name of new/existing variable, in the same format as
-        tf.get_variable.
-      *args: Other positional arguments, in the same format as tf.get_variable.
-      **kwargs: Keyword arguments, in the same format as tf.get_variable.
+      name: The variable name.
 
     Returns:
-      The return value of `getter(name, *args, **kwargs)`, spectrally
-      normalized.
+      Whether `name` is a standard name for a weight/kernel variables used in the
+      Keras, tf.layers, tf.contrib.layers or tf.contrib.slim libraries.
+    """
+    match = re.match(r"(.*\/)?(depthwise_|pointwise_)?(weights|kernel)$", name)
+    return match is not None
+
+
+def spectral_normalization_custom_getter(
+    name_filter=_default_name_filter, power_iteration_rounds=1
+):
+    """Custom getter that performs Spectral Normalization on a weight tensor.
+
+    Specifically it divides the weight tensor by its largest singular value. This
+    is intended to stabilize GAN training, by making the discriminator satisfy a
+    local 1-Lipschitz constraint.
+
+    Based on [Spectral Normalization for Generative Adversarial Networks][sn-gan].
+
+    [sn-gan]: https://openreview.net/forum?id=B1QRgziT-
+
+    To reproduce an SN-GAN, apply this custom_getter to every weight tensor of
+    your discriminator. The last dimension of the weight tensor must be the number
+    of output channels.
+
+    Apply this to layers by supplying this as the `custom_getter` of a
+    `tf.compat.v1.variable_scope`. For example:
+
+      with tf.compat.v1.variable_scope('discriminator',
+                             custom_getter=spectral_norm_getter()):
+        net = discriminator_fn(net)
+
+    IMPORTANT: Keras does not respect the custom_getter supplied by the
+    VariableScope, so Keras users should use `keras_spectral_normalization`
+    instead of (or in addition to) this approach.
+
+    It is important to carefully select to which weights you want to apply
+    Spectral Normalization. In general you want to normalize the kernels of
+    convolution and dense layers, but you do not want to normalize biases. You
+    also want to avoid normalizing batch normalization (and similar) variables,
+    but in general such layers play poorly with Spectral Normalization, since the
+    gamma can cancel out the normalization in other layers. By default we supply a
+    filter that matches the kernel variable names of the dense and convolution
+    layers of the tf.layers, tf.contrib.layers, tf.keras and tf.contrib.slim
+    libraries. If you are using anything else you'll need a custom `name_filter`.
+
+    This custom getter internally creates a variable used to compute the spectral
+    norm by power iteration. It will update every time the variable is accessed,
+    which means the normalized discriminator weights may change slightly whilst
+    training the generator. Whilst unusual, this matches how the paper's authors
+    implement it, and in general additional rounds of power iteration can't hurt.
+
+    Args:
+      name_filter: Optionally, a method that takes a Variable name as input and
+        returns whether this Variable should be normalized.
+      power_iteration_rounds: The number of iterations of the power method to
+        perform per step. A higher number yields a better approximation of the
+        true spectral norm.
+
+    Returns:
+      A custom getter function that applies Spectral Normalization to all
+      Variables whose names match `name_filter`.
 
     Raises:
-      ValueError: If used incorrectly, or if `dtype` is not supported.
+      ValueError: If name_filter is not callable.
     """
-    if not name_filter(name):
-      return getter(name, *args, **kwargs)
+    if not callable(name_filter):
+        raise ValueError("name_filter must be callable")
 
-    if name.endswith(_PERSISTED_U_VARIABLE_SUFFIX):
-      raise ValueError(
-          'Cannot apply Spectral Normalization to internal variables created '
-          'for Spectral Normalization. Tried to normalized variable [%s]' %
-          name)
+    def _internal_getter(getter, name, *args, **kwargs):
+        """A custom getter function that applies Spectral Normalization.
 
-    if kwargs['dtype'] not in _OK_DTYPES_FOR_SPECTRAL_NORM:
-      raise ValueError('Disallowed data type {}'.format(kwargs['dtype']))
+        Args:
+          getter: The true getter to call.
+          name: Name of new/existing variable, in the same format as
+            tf.get_variable.
+          *args: Other positional arguments, in the same format as tf.get_variable.
+          **kwargs: Keyword arguments, in the same format as tf.get_variable.
 
-    # This layer's weight Variable/PartitionedVariable.
-    w_tensor = getter(name, *args, **kwargs)
+        Returns:
+          The return value of `getter(name, *args, **kwargs)`, spectrally
+          normalized.
 
-    if len(w_tensor.get_shape()) < 2:
-      raise ValueError(
-          'Spectral norm can only be applied to multi-dimensional tensors')
+        Raises:
+          ValueError: If used incorrectly, or if `dtype` is not supported.
+        """
+        if not name_filter(name):
+            return getter(name, *args, **kwargs)
 
-    return spectral_normalize(
-        w_tensor,
-        power_iteration_rounds=power_iteration_rounds,
-        name=(name + '/spectral_normalize'))
+        if name.endswith(_PERSISTED_U_VARIABLE_SUFFIX):
+            raise ValueError(
+                "Cannot apply Spectral Normalization to internal variables created "
+                "for Spectral Normalization. Tried to normalized variable [%s]" % name
+            )
 
-  return _internal_getter
+        if kwargs["dtype"] not in _OK_DTYPES_FOR_SPECTRAL_NORM:
+            raise ValueError("Disallowed data type {}".format(kwargs["dtype"]))
 
+        # This layer's weight Variable/PartitionedVariable.
+        w_tensor = getter(name, *args, **kwargs)
 
-@contextlib.contextmanager
-def keras_spectral_normalization(name_filter=_default_name_filter,
-                                 power_iteration_rounds=1):
-  """A context manager that enables Spectral Normalization for Keras.
+        if len(w_tensor.get_shape()) < 2:
+            raise ValueError(
+                "Spectral norm can only be applied to multi-dimensional tensors"
+            )
 
-  Keras doesn't respect the `custom_getter` in the VariableScope, so this is a
-  bit of a hack to make things work.
+        return spectral_normalize(
+            w_tensor,
+            power_iteration_rounds=power_iteration_rounds,
+            name=(name + "/spectral_normalize"),
+        )
 
-  Usage:
-    with keras_spectral_normalization():
-      net = discriminator_fn(net)
+    return _internal_getter
 
-  Args:
-    name_filter: Optionally, a method that takes a Variable name as input and
-      returns whether this Variable should be normalized.
-    power_iteration_rounds: The number of iterations of the power method to
-      perform per step. A higher number yields a better approximation of the
-      true spectral norm.
 
-  Yields:
-    A context manager that wraps the standard Keras variable creation method
-    with the `spectral_normalization_custom_getter`.
-  """
-  original_make_variable = keras_base_layer_utils.make_variable
-  sn_getter = spectral_normalization_custom_getter(
-      name_filter=name_filter, power_iteration_rounds=power_iteration_rounds)
+@contextlib.contextmanager
+def keras_spectral_normalization(
+    name_filter=_default_name_filter, power_iteration_rounds=1
+):
+    """A context manager that enables Spectral Normalization for Keras.
+
+    Keras doesn't respect the `custom_getter` in the VariableScope, so this is a
+    bit of a hack to make things work.
+
+    Usage:
+      with keras_spectral_normalization():
+        net = discriminator_fn(net)
+
+    Args:
+      name_filter: Optionally, a method that takes a Variable name as input and
+        returns whether this Variable should be normalized.
+      power_iteration_rounds: The number of iterations of the power method to
+        perform per step. A higher number yields a better approximation of the
+        true spectral norm.
+
+    Yields:
+      A context manager that wraps the standard Keras variable creation method
+      with the `spectral_normalization_custom_getter`.
+    """
+    original_make_variable = keras_base_layer_utils.make_variable
+    sn_getter = spectral_normalization_custom_getter(
+        name_filter=name_filter, power_iteration_rounds=power_iteration_rounds
+    )
 
-  def make_variable_wrapper(name, *args, **kwargs):
-    return sn_getter(original_make_variable, name, *args, **kwargs)
+    def make_variable_wrapper(name, *args, **kwargs):
+        return sn_getter(original_make_variable, name, *args, **kwargs)
 
-  keras_base_layer_utils.make_variable = make_variable_wrapper
+    keras_base_layer_utils.make_variable = make_variable_wrapper
 
-  yield
+    yield
 
-  keras_base_layer_utils.make_variable = original_make_variable
+    keras_base_layer_utils.make_variable = original_make_variable
diff --git a/bob/learn/tensorflow/image/__init__.py b/bob/learn/tensorflow/image/__init__.py
index 9a8962844d659e6b1e13f6eeeb5e1d11dc698e8d..02ce0f96ec08647cdb3effcdc6b699f8a3260752 100644
--- a/bob/learn/tensorflow/image/__init__.py
+++ b/bob/learn/tensorflow/image/__init__.py
@@ -4,13 +4,13 @@ from .filter import gaussian_kernel, GaussianFilter
 def __appropriate__(*args):
     """Says object was actually declared here, an not on the import module.
 
-  Parameters:
+    Parameters:
 
-    *args: An iterable of objects to modify
+      *args: An iterable of objects to modify
 
-  Resolves `Sphinx referencing issues
-  <https://github.com/sphinx-doc/sphinx/issues/3048>`
-  """
+    Resolves `Sphinx referencing issues
+    <https://github.com/sphinx-doc/sphinx/issues/3048>`
+    """
     for obj in args:
         obj.__module__ = __name__
 
diff --git a/bob/learn/tensorflow/loss/BaseLoss.py b/bob/learn/tensorflow/loss/BaseLoss.py
index 161f653ab290e4dfe505ad0b569f5cc2176dceed..8380ec17efee5fe8c705055accd1642b05fc3f9f 100644
--- a/bob/learn/tensorflow/loss/BaseLoss.py
+++ b/bob/learn/tensorflow/loss/BaseLoss.py
@@ -4,6 +4,7 @@
 
 import logging
 import tensorflow as tf
+
 logger = logging.getLogger(__name__)
 
 
@@ -39,12 +40,9 @@ logger = logging.getLogger(__name__)
 #             return cross_loss
 
 
-def mean_cross_entropy_center_loss(logits,
-                                   prelogits,
-                                   labels,
-                                   n_classes,
-                                   alpha=0.9,
-                                   factor=0.01):
+def mean_cross_entropy_center_loss(
+    logits, prelogits, labels, n_classes, alpha=0.9, factor=0.01
+):
     """
     Implementation of the CrossEntropy + Center Loss from the paper
     "A Discriminative Feature Learning Approach for Deep Face Recognition"(http://ydwen.github.io/papers/WenECCV16.pdf)
@@ -59,44 +57,49 @@ def mean_cross_entropy_center_loss(logits,
 
     """
     # Cross entropy
-    with tf.compat.v1.variable_scope('cross_entropy_loss'):
+    with tf.compat.v1.variable_scope("cross_entropy_loss"):
         cross_loss = tf.reduce_mean(
             input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits(
-                logits=logits, labels=labels),
-            name="cross_entropy_loss")
+                logits=logits, labels=labels
+            ),
+            name="cross_entropy_loss",
+        )
         tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.LOSSES, cross_loss)
-        tf.compat.v1.summary.scalar('loss_cross_entropy', cross_loss)
+        tf.compat.v1.summary.scalar("loss_cross_entropy", cross_loss)
 
     # Appending center loss
-    with tf.compat.v1.variable_scope('center_loss'):
+    with tf.compat.v1.variable_scope("center_loss"):
         n_features = prelogits.get_shape()[1]
 
         centers = tf.compat.v1.get_variable(
-            'centers', [n_classes, n_features],
+            "centers",
+            [n_classes, n_features],
             dtype=tf.float32,
             initializer=tf.compat.v1.constant_initializer(0),
-            trainable=False)
+            trainable=False,
+        )
 
         # label = tf.reshape(labels, [-1])
         centers_batch = tf.gather(centers, labels)
         diff = (1 - alpha) * (centers_batch - prelogits)
         centers = tf.compat.v1.scatter_sub(centers, labels, diff)
         center_loss = tf.reduce_mean(input_tensor=tf.square(prelogits - centers_batch))
-        tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES,
-                             center_loss * factor)
-        tf.compat.v1.summary.scalar('loss_center', center_loss)
+        tf.compat.v1.add_to_collection(
+            tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES, center_loss * factor
+        )
+        tf.compat.v1.summary.scalar("loss_center", center_loss)
 
     # Adding the regularizers in the loss
-    with tf.compat.v1.variable_scope('total_loss'):
+    with tf.compat.v1.variable_scope("total_loss"):
         regularization_losses = tf.compat.v1.get_collection(
-            tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES)
-        total_loss = tf.add_n(
-            [cross_loss] + regularization_losses, name="total_loss")
+            tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
+        )
+        total_loss = tf.add_n([cross_loss] + regularization_losses, name="total_loss")
         tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.LOSSES, total_loss)
-        tf.compat.v1.summary.scalar('loss_total', total_loss)
+        tf.compat.v1.summary.scalar("loss_total", total_loss)
 
     loss = dict()
-    loss['loss'] = total_loss
-    loss['centers'] = centers
+    loss["loss"] = total_loss
+    loss["centers"] = centers
 
     return loss
diff --git a/bob/learn/tensorflow/loss/ContrastiveLoss.py b/bob/learn/tensorflow/loss/ContrastiveLoss.py
index d484b025bb7658403ae106df15444729016595cc..4e1a22eb1f38f10752302ed2594b8313eb5e64c6 100644
--- a/bob/learn/tensorflow/loss/ContrastiveLoss.py
+++ b/bob/learn/tensorflow/loss/ContrastiveLoss.py
@@ -47,16 +47,24 @@ def contrastive_loss(left_embedding, right_embedding, labels, contrastive_margin
         with tf.compat.v1.name_scope("within_class"):
             one = tf.constant(1.0)
             within_class = tf.multiply(one - labels, tf.square(d))  # (1-Y)*(d^2)
-            within_class_loss = tf.reduce_mean(input_tensor=within_class, name="within_class")
-            tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.LOSSES, within_class_loss)
+            within_class_loss = tf.reduce_mean(
+                input_tensor=within_class, name="within_class"
+            )
+            tf.compat.v1.add_to_collection(
+                tf.compat.v1.GraphKeys.LOSSES, within_class_loss
+            )
 
         with tf.compat.v1.name_scope("between_class"):
             max_part = tf.square(tf.maximum(contrastive_margin - d, 0))
             between_class = tf.multiply(
                 labels, max_part
             )  # (Y) * max((margin - d)^2, 0)
-            between_class_loss = tf.reduce_mean(input_tensor=between_class, name="between_class")
-            tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.LOSSES, between_class_loss)
+            between_class_loss = tf.reduce_mean(
+                input_tensor=between_class, name="between_class"
+            )
+            tf.compat.v1.add_to_collection(
+                tf.compat.v1.GraphKeys.LOSSES, between_class_loss
+            )
 
         with tf.compat.v1.name_scope("total_loss"):
             loss = 0.5 * (within_class + between_class)
diff --git a/bob/learn/tensorflow/loss/StyleLoss.py b/bob/learn/tensorflow/loss/StyleLoss.py
index 93d632777a04192a7f4e9697482c53b96019114b..75054a5aced8c124b60cfcb889ef4d5499f58c56 100644
--- a/bob/learn/tensorflow/loss/StyleLoss.py
+++ b/bob/learn/tensorflow/loss/StyleLoss.py
@@ -5,6 +5,7 @@
 import logging
 import tensorflow as tf
 import functools
+
 logger = logging.getLogger(__name__)
 
 
@@ -32,7 +33,7 @@ def content_loss(noises, content_features):
     """
 
     content_losses = []
-    for n,c in zip(noises, content_features):
+    for n, c in zip(noises, content_features):
         content_losses.append((2 * tf.nn.l2_loss(n - c) / c.size))
     return functools.reduce(tf.add, content_losses)
 
@@ -61,13 +62,12 @@ def linear_gram_style_loss(noises, gram_style_features):
     """
 
     style_losses = []
-    for n,s in zip(noises, gram_style_features):
+    for n, s in zip(noises, gram_style_features):
         style_losses.append((2 * tf.nn.l2_loss(n - s)) / s.size)
 
     return functools.reduce(tf.add, style_losses)
 
 
-
 def denoising_loss(noise):
     """
     Computes the denoising loss as in:
@@ -81,16 +81,25 @@ def denoising_loss(noise):
           Input noise
 
     """
+
     def _tensor_size(tensor):
         from operator import mul
+
         return functools.reduce(mul, (d.value for d in tensor.get_shape()), 1)
 
     shape = noise.get_shape().as_list()
 
-    noise_y_size = _tensor_size(noise[:,1:,:,:])
-    noise_x_size = _tensor_size(noise[:,:,1:,:])
-    denoise_loss = 2 * ( (tf.nn.l2_loss(noise[:,1:,:,:] - noise[:,:shape[1]-1,:,:]) / noise_y_size) +
-                    (tf.nn.l2_loss(noise[:,:,1:,:] - noise[:,:,:shape[2]-1,:]) / noise_x_size))
+    noise_y_size = _tensor_size(noise[:, 1:, :, :])
+    noise_x_size = _tensor_size(noise[:, :, 1:, :])
+    denoise_loss = 2 * (
+        (
+            tf.nn.l2_loss(noise[:, 1:, :, :] - noise[:, : shape[1] - 1, :, :])
+            / noise_y_size
+        )
+        + (
+            tf.nn.l2_loss(noise[:, :, 1:, :] - noise[:, :, : shape[2] - 1, :])
+            / noise_x_size
+        )
+    )
 
     return denoise_loss
-
diff --git a/bob/learn/tensorflow/loss/TripletLoss.py b/bob/learn/tensorflow/loss/TripletLoss.py
index 14dc4e98e0e16db2f4d67d862df7bc89993dab39..fb4b469b821f7730296a13d4516e8b88c60191d9 100644
--- a/bob/learn/tensorflow/loss/TripletLoss.py
+++ b/bob/learn/tensorflow/loss/TripletLoss.py
@@ -3,16 +3,14 @@
 # @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
 
 import logging
+
 logger = logging.getLogger(__name__)
 import tensorflow as tf
 
 from bob.learn.tensorflow.utils import compute_euclidean_distance
 
 
-def triplet_loss(anchor_embedding,
-                 positive_embedding,
-                 negative_embedding,
-                 margin=5.0):
+def triplet_loss(anchor_embedding, positive_embedding, negative_embedding, margin=5.0):
     """
     Compute the triplet loss as in
 
@@ -40,55 +38,70 @@ def triplet_loss(anchor_embedding,
 
     with tf.compat.v1.name_scope("triplet_loss"):
         # Normalize
-        anchor_embedding = tf.nn.l2_normalize(
-            anchor_embedding, 1, 1e-10, name="anchor")
+        anchor_embedding = tf.nn.l2_normalize(anchor_embedding, 1, 1e-10, name="anchor")
         positive_embedding = tf.nn.l2_normalize(
-            positive_embedding, 1, 1e-10, name="positive")
+            positive_embedding, 1, 1e-10, name="positive"
+        )
         negative_embedding = tf.nn.l2_normalize(
-            negative_embedding, 1, 1e-10, name="negative")
+            negative_embedding, 1, 1e-10, name="negative"
+        )
 
         d_positive = tf.reduce_sum(
-            input_tensor=tf.square(tf.subtract(anchor_embedding, positive_embedding)), axis=1)
+            input_tensor=tf.square(tf.subtract(anchor_embedding, positive_embedding)),
+            axis=1,
+        )
         d_negative = tf.reduce_sum(
-            input_tensor=tf.square(tf.subtract(anchor_embedding, negative_embedding)), axis=1)
+            input_tensor=tf.square(tf.subtract(anchor_embedding, negative_embedding)),
+            axis=1,
+        )
 
         basic_loss = tf.add(tf.subtract(d_positive, d_negative), margin)
 
         with tf.compat.v1.name_scope("TripletLoss"):
             # Between
             between_class_loss = tf.reduce_mean(input_tensor=d_negative)
-            tf.compat.v1.summary.scalar('loss_between_class', between_class_loss)
-            tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.LOSSES, between_class_loss)
+            tf.compat.v1.summary.scalar("loss_between_class", between_class_loss)
+            tf.compat.v1.add_to_collection(
+                tf.compat.v1.GraphKeys.LOSSES, between_class_loss
+            )
 
             # Within
             within_class_loss = tf.reduce_mean(input_tensor=d_positive)
-            tf.compat.v1.summary.scalar('loss_within_class', within_class_loss)
-            tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.LOSSES, within_class_loss)
+            tf.compat.v1.summary.scalar("loss_within_class", within_class_loss)
+            tf.compat.v1.add_to_collection(
+                tf.compat.v1.GraphKeys.LOSSES, within_class_loss
+            )
 
             # Total loss
             loss = tf.reduce_mean(
-                input_tensor=tf.maximum(basic_loss, 0.0), axis=0, name="total_loss")
+                input_tensor=tf.maximum(basic_loss, 0.0), axis=0, name="total_loss"
+            )
             tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.LOSSES, loss)
-            tf.compat.v1.summary.scalar('loss_triplet', loss)
+            tf.compat.v1.summary.scalar("loss_triplet", loss)
 
         return loss
 
 
-def triplet_fisher_loss(anchor_embedding, positive_embedding,
-                        negative_embedding):
+def triplet_fisher_loss(anchor_embedding, positive_embedding, negative_embedding):
 
     with tf.compat.v1.name_scope("triplet_loss"):
         # Normalize
-        anchor_embedding = tf.nn.l2_normalize(
-            anchor_embedding, 1, 1e-10, name="anchor")
+        anchor_embedding = tf.nn.l2_normalize(anchor_embedding, 1, 1e-10, name="anchor")
         positive_embedding = tf.nn.l2_normalize(
-            positive_embedding, 1, 1e-10, name="positive")
+            positive_embedding, 1, 1e-10, name="positive"
+        )
         negative_embedding = tf.nn.l2_normalize(
-            negative_embedding, 1, 1e-10, name="negative")
+            negative_embedding, 1, 1e-10, name="negative"
+        )
 
         average_class = tf.reduce_mean(input_tensor=anchor_embedding, axis=0)
-        average_total = tf.compat.v1.div(tf.add(tf.reduce_mean(input_tensor=anchor_embedding, axis=0),\
-                        tf.reduce_mean(input_tensor=negative_embedding, axis=0)), 2)
+        average_total = tf.compat.v1.div(
+            tf.add(
+                tf.reduce_mean(input_tensor=anchor_embedding, axis=0),
+                tf.reduce_mean(input_tensor=negative_embedding, axis=0),
+            ),
+            2,
+        )
 
         length = anchor_embedding.get_shape().as_list()[0]
         dim = anchor_embedding.get_shape().as_list()[1]
@@ -101,15 +114,11 @@ def triplet_fisher_loss(anchor_embedding, positive_embedding,
             positive = s[0]
             negative = s[1]
 
-            buffer_sw = tf.reshape(
-                tf.subtract(positive, average_class), shape=(dim, 1))
-            buffer_sw = tf.matmul(buffer_sw,
-                                  tf.reshape(buffer_sw, shape=(1, dim)))
+            buffer_sw = tf.reshape(tf.subtract(positive, average_class), shape=(dim, 1))
+            buffer_sw = tf.matmul(buffer_sw, tf.reshape(buffer_sw, shape=(1, dim)))
 
-            buffer_sb = tf.reshape(
-                tf.subtract(negative, average_total), shape=(dim, 1))
-            buffer_sb = tf.matmul(buffer_sb,
-                                  tf.reshape(buffer_sb, shape=(1, dim)))
+            buffer_sb = tf.reshape(tf.subtract(negative, average_total), shape=(dim, 1))
+            buffer_sb = tf.matmul(buffer_sb, tf.reshape(buffer_sb, shape=(1, dim)))
 
             if Sw is None:
                 Sw = buffer_sw
@@ -120,16 +129,17 @@ def triplet_fisher_loss(anchor_embedding, positive_embedding,
 
         # Sw = tf.trace(Sw)
         # Sb = tf.trace(Sb)
-        #loss = tf.trace(tf.div(Sb, Sw))
-        loss = tf.linalg.trace(tf.compat.v1.div(Sw, Sb), name=tf.compat.v1.GraphKeys.LOSSES)
+        # loss = tf.trace(tf.div(Sb, Sw))
+        loss = tf.linalg.trace(
+            tf.compat.v1.div(Sw, Sb), name=tf.compat.v1.GraphKeys.LOSSES
+        )
 
         return loss, tf.linalg.trace(Sb), tf.linalg.trace(Sw)
 
 
-def triplet_average_loss(anchor_embedding,
-                         positive_embedding,
-                         negative_embedding,
-                         margin=5.0):
+def triplet_average_loss(
+    anchor_embedding, positive_embedding, negative_embedding, margin=5.0
+):
     """
     Compute the triplet loss as in
 
@@ -157,24 +167,32 @@ def triplet_average_loss(anchor_embedding,
 
     with tf.compat.v1.name_scope("triplet_loss"):
         # Normalize
-        anchor_embedding = tf.nn.l2_normalize(
-            anchor_embedding, 1, 1e-10, name="anchor")
+        anchor_embedding = tf.nn.l2_normalize(anchor_embedding, 1, 1e-10, name="anchor")
         positive_embedding = tf.nn.l2_normalize(
-            positive_embedding, 1, 1e-10, name="positive")
+            positive_embedding, 1, 1e-10, name="positive"
+        )
         negative_embedding = tf.nn.l2_normalize(
-            negative_embedding, 1, 1e-10, name="negative")
+            negative_embedding, 1, 1e-10, name="negative"
+        )
 
         anchor_mean = tf.reduce_mean(input_tensor=anchor_embedding, axis=0)
 
         d_positive = tf.reduce_sum(
-            input_tensor=tf.square(tf.subtract(anchor_mean, positive_embedding)), axis=1)
+            input_tensor=tf.square(tf.subtract(anchor_mean, positive_embedding)), axis=1
+        )
         d_negative = tf.reduce_sum(
-            input_tensor=tf.square(tf.subtract(anchor_mean, negative_embedding)), axis=1)
+            input_tensor=tf.square(tf.subtract(anchor_mean, negative_embedding)), axis=1
+        )
 
         basic_loss = tf.add(tf.subtract(d_positive, d_negative), margin)
         loss = tf.reduce_mean(
-            input_tensor=tf.maximum(basic_loss, 0.0), axis=0, name=tf.compat.v1.GraphKeys.LOSSES)
-
-        return loss, tf.reduce_mean(input_tensor=d_negative), tf.reduce_mean(input_tensor=d_positive)
-
-
+            input_tensor=tf.maximum(basic_loss, 0.0),
+            axis=0,
+            name=tf.compat.v1.GraphKeys.LOSSES,
+        )
+
+        return (
+            loss,
+            tf.reduce_mean(input_tensor=d_negative),
+            tf.reduce_mean(input_tensor=d_positive),
+        )
diff --git a/bob/learn/tensorflow/loss/__init__.py b/bob/learn/tensorflow/loss/__init__.py
index 6d93dbe48bcc959b19fac29088a8e81f43e3f875..215237f0145e141c39c5ae84e09f5ea6a0cc5031 100644
--- a/bob/learn/tensorflow/loss/__init__.py
+++ b/bob/learn/tensorflow/loss/__init__.py
@@ -14,13 +14,13 @@ from .utils import *
 def __appropriate__(*args):
     """Says object was actually declared here, an not on the import module.
 
-  Parameters:
+    Parameters:
 
-    *args: An iterable of objects to modify
+      *args: An iterable of objects to modify
 
-  Resolves `Sphinx referencing issues
-  <https://github.com/sphinx-doc/sphinx/issues/3048>`
-  """
+    Resolves `Sphinx referencing issues
+    <https://github.com/sphinx-doc/sphinx/issues/3048>`
+    """
 
     for obj in args:
         obj.__module__ = __name__
diff --git a/bob/learn/tensorflow/loss/center_loss.py b/bob/learn/tensorflow/loss/center_loss.py
index 553f01e95926e28a9c44935ee56200eaa0a915d9..d419a20f6ddb999878d479c744eea30cfc854fc1 100644
--- a/bob/learn/tensorflow/loss/center_loss.py
+++ b/bob/learn/tensorflow/loss/center_loss.py
@@ -18,7 +18,7 @@ class CenterLoss:
                 "centers",
                 [n_classes, n_features],
                 dtype=tf.float32,
-                initializer=tf.compat.v1.constant_initializer(0.),
+                initializer=tf.compat.v1.constant_initializer(0.0),
                 trainable=False,
             )
 
@@ -26,8 +26,12 @@ class CenterLoss:
         with tf.compat.v1.name_scope(self.name):
             centers_batch = tf.gather(self.centers, sparse_labels)
             diff = (1 - self.alpha) * (centers_batch - prelogits)
-            self.centers_update_op = tf.compat.v1.scatter_sub(self.centers, sparse_labels, diff)
-            center_loss = tf.reduce_mean(input_tensor=tf.square(prelogits - centers_batch))
+            self.centers_update_op = tf.compat.v1.scatter_sub(
+                self.centers, sparse_labels, diff
+            )
+            center_loss = tf.reduce_mean(
+                input_tensor=tf.square(prelogits - centers_batch)
+            )
         tf.compat.v1.summary.scalar("loss_center", center_loss)
         # Add histogram for all centers
         for i in range(self.n_classes):
diff --git a/bob/learn/tensorflow/loss/mmd.py b/bob/learn/tensorflow/loss/mmd.py
index 2a0efff5fcb68cbda78d8c5e8b1a6d5c4159bf0f..a48aaba224088d0af63a5c3b5555ed16cae826c7 100644
--- a/bob/learn/tensorflow/loss/mmd.py
+++ b/bob/learn/tensorflow/loss/mmd.py
@@ -2,8 +2,7 @@ import tensorflow as tf
 
 
 def compute_kernel(x, y):
-    """Gaussian kernel.
-    """
+    """Gaussian kernel."""
     x_size = tf.shape(input=x)[0]
     y_size = tf.shape(input=y)[0]
     dim = tf.shape(input=x)[1]
@@ -14,7 +13,8 @@ def compute_kernel(x, y):
         tf.reshape(y, tf.stack([1, y_size, dim])), tf.stack([x_size, 1, 1])
     )
     return tf.exp(
-        -tf.reduce_mean(input_tensor=tf.square(tiled_x - tiled_y), axis=2) / tf.cast(dim, tf.float32)
+        -tf.reduce_mean(input_tensor=tf.square(tiled_x - tiled_y), axis=2)
+        / tf.cast(dim, tf.float32)
     )
 
 
diff --git a/bob/learn/tensorflow/loss/pairwise_confusion.py b/bob/learn/tensorflow/loss/pairwise_confusion.py
index b4c319711733e0631cece2bf25a7e75dcbac687e..f5309bfeed2fc240f4f6733c588c7dca087a304c 100644
--- a/bob/learn/tensorflow/loss/pairwise_confusion.py
+++ b/bob/learn/tensorflow/loss/pairwise_confusion.py
@@ -1,11 +1,12 @@
 import tensorflow as tf
 from ..utils import pdist_safe, upper_triangle
 
+
 def total_pairwise_confusion(prelogits, name=None):
     """Total Pairwise Confusion Loss
 
-        [1]X. Tu et al., “Learning Generalizable and Identity-Discriminative
-        Representations for Face Anti-Spoofing,” arXiv preprint arXiv:1901.05602, 2019.
+    [1]X. Tu et al., “Learning Generalizable and Identity-Discriminative
+    Representations for Face Anti-Spoofing,” arXiv preprint arXiv:1901.05602, 2019.
     """
     # compute L2 norm between all prelogits and sum them.
     with tf.compat.v1.name_scope(name, default_name="total_pairwise_confusion"):
diff --git a/bob/learn/tensorflow/loss/utils.py b/bob/learn/tensorflow/loss/utils.py
index aad477ed5747cfe102015e77742ce93220073cf4..013de65aac1136d903f6714ad0d2dfe5e7291f51 100644
--- a/bob/learn/tensorflow/loss/utils.py
+++ b/bob/learn/tensorflow/loss/utils.py
@@ -135,7 +135,7 @@ def balanced_sigmoid_cross_entropy_loss_weights(labels, dtype="float32"):
     >>> #weights = balanced_sigmoid_cross_entropy_loss_weights(labels, dtype=logits.dtype)
     >>> #loss = tf.losses.sigmoid_cross_entropy(logits=logits, labels=labels, weights=weights)
     """
-    labels = tf.cast(labels, dtype='int32')
+    labels = tf.cast(labels, dtype="int32")
     batch_size = tf.cast(tf.shape(input=labels)[0], dtype=dtype)
     weights = tf.cast(tf.reduce_sum(input_tensor=labels), dtype=dtype)
     weights = tf.convert_to_tensor(value=[batch_size - weights, weights])
diff --git a/bob/learn/tensorflow/loss/vat.py b/bob/learn/tensorflow/loss/vat.py
index d77ecf605bce000841734b2d5dae8e15da7febf4..e51c24f441a4ee7f5a24834ef7d48ae222ceb6b9 100644
--- a/bob/learn/tensorflow/loss/vat.py
+++ b/bob/learn/tensorflow/loss/vat.py
@@ -28,27 +28,44 @@ from functools import partial
 
 
 def get_normalized_vector(d):
-    d /= (1e-12 + tf.reduce_max(input_tensor=tf.abs(d), axis=list(range(1, len(d.get_shape()))), keepdims=True))
-    d /= tf.sqrt(1e-6 + tf.reduce_sum(input_tensor=tf.pow(d, 2.0), axis=list(range(1, len(d.get_shape()))), keepdims=True))
+    d /= 1e-12 + tf.reduce_max(
+        input_tensor=tf.abs(d), axis=list(range(1, len(d.get_shape()))), keepdims=True
+    )
+    d /= tf.sqrt(
+        1e-6
+        + tf.reduce_sum(
+            input_tensor=tf.pow(d, 2.0),
+            axis=list(range(1, len(d.get_shape()))),
+            keepdims=True,
+        )
+    )
     return d
 
 
 def logsoftmax(x):
     xdev = x - tf.reduce_max(input_tensor=x, axis=1, keepdims=True)
-    lsm = xdev - tf.math.log(tf.reduce_sum(input_tensor=tf.exp(xdev), axis=1, keepdims=True))
+    lsm = xdev - tf.math.log(
+        tf.reduce_sum(input_tensor=tf.exp(xdev), axis=1, keepdims=True)
+    )
     return lsm
 
 
 def kl_divergence_with_logit(q_logit, p_logit):
     q = tf.nn.softmax(q_logit)
-    qlogq = tf.reduce_mean(input_tensor=tf.reduce_sum(input_tensor=q * logsoftmax(q_logit), axis=1))
-    qlogp = tf.reduce_mean(input_tensor=tf.reduce_sum(input_tensor=q * logsoftmax(p_logit), axis=1))
+    qlogq = tf.reduce_mean(
+        input_tensor=tf.reduce_sum(input_tensor=q * logsoftmax(q_logit), axis=1)
+    )
+    qlogp = tf.reduce_mean(
+        input_tensor=tf.reduce_sum(input_tensor=q * logsoftmax(p_logit), axis=1)
+    )
     return qlogq - qlogp
 
 
 def entropy_y_x(logit):
     p = tf.nn.softmax(logit)
-    return -tf.reduce_mean(input_tensor=tf.reduce_sum(input_tensor=p * logsoftmax(logit), axis=1))
+    return -tf.reduce_mean(
+        input_tensor=tf.reduce_sum(input_tensor=p * logsoftmax(logit), axis=1)
+    )
 
 
 class VATLoss:
@@ -68,7 +85,9 @@ class VATLoss:
         small constant for finite difference
     """
 
-    def __init__(self, epsilon=8.0, xi=1e-6, num_power_iterations=1, method='vatent', **kwargs):
+    def __init__(
+        self, epsilon=8.0, xi=1e-6, num_power_iterations=1, method="vatent", **kwargs
+    ):
         super(VATLoss, self).__init__(**kwargs)
         self.epsilon = epsilon
         self.xi = xi
@@ -104,15 +123,17 @@ class VATLoss:
             If self.method is not ``vat`` or ``vatent``.
         """
         if mode != tf.estimator.ModeKeys.TRAIN:
-            return 0.
+            return 0.0
         architecture = partial(architecture, reuse=True)
         with tf.compat.v1.variable_scope(tf.compat.v1.get_variable_scope(), reuse=True):
-            vat_loss = self.virtual_adversarial_loss(features, logits, architecture, mode)
+            vat_loss = self.virtual_adversarial_loss(
+                features, logits, architecture, mode
+            )
             tf.compat.v1.summary.scalar("loss_VAT", vat_loss)
             tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.LOSSES, vat_loss)
-            if self.method == 'vat':
+            if self.method == "vat":
                 loss = vat_loss
-            elif self.method == 'vatent':
+            elif self.method == "vatent":
                 ent_loss = entropy_y_x(logits)
                 tf.compat.v1.summary.scalar("loss_entropy", ent_loss)
                 tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.LOSSES, ent_loss)
@@ -121,8 +142,12 @@ class VATLoss:
                 raise ValueError
             return loss
 
-    def virtual_adversarial_loss(self, features, logits, architecture, mode, name="vat_loss_op"):
-        r_vadv = self.generate_virtual_adversarial_perturbation(features, logits, architecture, mode)
+    def virtual_adversarial_loss(
+        self, features, logits, architecture, mode, name="vat_loss_op"
+    ):
+        r_vadv = self.generate_virtual_adversarial_perturbation(
+            features, logits, architecture, mode
+        )
         logit_p = tf.stop_gradient(logits)
         adversarial_input = features + r_vadv
         tf.compat.v1.summary.image("Adversarial_Image", adversarial_input)
@@ -130,7 +155,9 @@ class VATLoss:
         loss = kl_divergence_with_logit(logit_p, logit_m)
         return tf.identity(loss, name=name)
 
-    def generate_virtual_adversarial_perturbation(self, features, logits, architecture, mode):
+    def generate_virtual_adversarial_perturbation(
+        self, features, logits, architecture, mode
+    ):
         d = tf.random.normal(shape=tf.shape(input=features))
 
         for _ in range(self.num_power_iterations):
diff --git a/bob/learn/tensorflow/models/alexnet.py b/bob/learn/tensorflow/models/alexnet.py
index 202348034fdbbd299b1fb2f64c09e439e822d682..3428aa39635402f31b87b630c994603cd81bc8fd 100644
--- a/bob/learn/tensorflow/models/alexnet.py
+++ b/bob/learn/tensorflow/models/alexnet.py
@@ -9,13 +9,43 @@ def AlexNet_simplified(name="AlexNet", **kwargs):
     model = tf.keras.Sequential(
         [
             tf.keras.Input(shape=(227, 227, 3)),
-            tf.keras.layers.Conv2D(filters=96, kernel_size=11, strides=4, name="C1", activation="relu"),
+            tf.keras.layers.Conv2D(
+                filters=96, kernel_size=11, strides=4, name="C1", activation="relu"
+            ),
             tf.keras.layers.MaxPool2D(pool_size=3, strides=2, name="P1"),
-            tf.keras.layers.Conv2D(filters=256, kernel_size=5, strides=1, name="C2", activation="relu", padding="same"),
+            tf.keras.layers.Conv2D(
+                filters=256,
+                kernel_size=5,
+                strides=1,
+                name="C2",
+                activation="relu",
+                padding="same",
+            ),
             tf.keras.layers.MaxPool2D(pool_size=3, strides=2, name="P2"),
-            tf.keras.layers.Conv2D(filters=384, kernel_size=3, strides=1, name="C3", activation="relu", padding="same"),
-            tf.keras.layers.Conv2D(filters=384, kernel_size=3, strides=1, name="C4", activation="relu", padding="same"),
-            tf.keras.layers.Conv2D(filters=256, kernel_size=3, strides=1, name="C5", activation="relu", padding="same"),
+            tf.keras.layers.Conv2D(
+                filters=384,
+                kernel_size=3,
+                strides=1,
+                name="C3",
+                activation="relu",
+                padding="same",
+            ),
+            tf.keras.layers.Conv2D(
+                filters=384,
+                kernel_size=3,
+                strides=1,
+                name="C4",
+                activation="relu",
+                padding="same",
+            ),
+            tf.keras.layers.Conv2D(
+                filters=256,
+                kernel_size=3,
+                strides=1,
+                name="C5",
+                activation="relu",
+                padding="same",
+            ),
             tf.keras.layers.MaxPool2D(pool_size=3, strides=2, name="P5"),
             tf.keras.layers.Flatten(name="FLATTEN"),
             tf.keras.layers.Dropout(rate=0.5, name="D6"),
diff --git a/bob/learn/tensorflow/models/inception.py b/bob/learn/tensorflow/models/inception.py
index 3e25a59fa4ae9c6bb9d823ea7d6c20c311f37ca4..a4666b1b331fb8f23532b0d43342483437926175 100644
--- a/bob/learn/tensorflow/models/inception.py
+++ b/bob/learn/tensorflow/models/inception.py
@@ -2,8 +2,7 @@ import tensorflow as tf
 
 
 class LRN(tf.keras.layers.Lambda):
-    """local response normalization with default parameters for GoogLeNet
-    """
+    """local response normalization with default parameters for GoogLeNet"""
 
     def __init__(self, alpha=0.0001, beta=0.75, depth_radius=5, **kwargs):
         self.alpha = alpha
@@ -21,8 +20,8 @@ class LRN(tf.keras.layers.Lambda):
 class InceptionModule(tf.keras.Model):
     """The inception module as it was introduced in:
 
-        C. Szegedy et al., “Going deeper with convolutions,” in Proceedings of the IEEE
-        Conference on Computer Vision and Pattern Recognition, 2015, pp. 1–9.
+    C. Szegedy et al., “Going deeper with convolutions,” in Proceedings of the IEEE
+    Conference on Computer Vision and Pattern Recognition, 2015, pp. 1–9.
     """
 
     def __init__(
@@ -112,7 +111,9 @@ def GoogLeNet(*, num_classes=1000, name="GoogLeNet", **kwargs):
             ),
             tf.keras.layers.MaxPool2D(3, 2, padding="same", name="pool1/3x3_s2"),
             LRN(name="pool1/norm1"),
-            tf.keras.layers.Conv2D(64, 1, padding="same", activation="relu", name="conv2/3x3_reduce"),
+            tf.keras.layers.Conv2D(
+                64, 1, padding="same", activation="relu", name="conv2/3x3_reduce"
+            ),
             tf.keras.layers.Conv2D(
                 192, 3, padding="same", activation="relu", name="conv2/3x3"
             ),
diff --git a/bob/learn/tensorflow/models/inception_resnet_v2.py b/bob/learn/tensorflow/models/inception_resnet_v2.py
index e5711e27ff6e075b271e7f97d7239ea75ac4809b..8f15d0b2cb089c721856a9657f45bf56301187e7 100644
--- a/bob/learn/tensorflow/models/inception_resnet_v2.py
+++ b/bob/learn/tensorflow/models/inception_resnet_v2.py
@@ -98,6 +98,7 @@ class Conv2D_BN(tf.keras.Sequential):
 
 class ScaledResidual(tf.keras.Model):
     """A scaled residual connection layer"""
+
     def __init__(self, scale, name="scaled_residual", **kwargs):
         super().__init__(name=name, **kwargs)
         self.scale = scale
@@ -174,22 +175,14 @@ class InceptionResnetBlock(tf.keras.Model):
         elif block_type == "block17":
             branch_0 = [Conv2D_BN(192 // n, 1, name="branch0_conv1")]
             branch_1 = [Conv2D_BN(128 // n, 1, name="branch1_conv1")]
-            branch_1 += [
-                Conv2D_BN(160 // n, (1, 7), name="branch1_conv2")
-            ]
-            branch_1 += [
-                Conv2D_BN(192 // n, (7, 1), name="branch1_conv3")
-            ]
+            branch_1 += [Conv2D_BN(160 // n, (1, 7), name="branch1_conv2")]
+            branch_1 += [Conv2D_BN(192 // n, (7, 1), name="branch1_conv3")]
             branches = [branch_0, branch_1]
         elif block_type == "block8":
             branch_0 = [Conv2D_BN(192 // n, 1, name="branch0_conv1")]
             branch_1 = [Conv2D_BN(192 // n, 1, name="branch1_conv1")]
-            branch_1 += [
-                Conv2D_BN(224 // n, (1, 3), name="branch1_conv2")
-            ]
-            branch_1 += [
-                Conv2D_BN(256 // n, (3, 1), name="branch1_conv3")
-            ]
+            branch_1 += [Conv2D_BN(224 // n, (1, 3), name="branch1_conv2")]
+            branch_1 += [Conv2D_BN(256 // n, (3, 1), name="branch1_conv3")]
             branches = [branch_0, branch_1]
         else:
             raise ValueError(
@@ -335,31 +328,21 @@ class ReductionB(tf.keras.Model):
 
         branch_1 = [
             Conv2D_BN(n, 1, name="branch1_conv1"),
-            Conv2D_BN(
-                no, 3, strides=2, padding=padding, name="branch1_conv2"
-            ),
+            Conv2D_BN(no, 3, strides=2, padding=padding, name="branch1_conv2"),
         ]
 
         branch_2 = [
             Conv2D_BN(p, 1, name="branch2_conv1"),
-            Conv2D_BN(
-                pq, 3, strides=2, padding=padding, name="branch2_conv2"
-            ),
+            Conv2D_BN(pq, 3, strides=2, padding=padding, name="branch2_conv2"),
         ]
 
         branch_3 = [
             Conv2D_BN(k, 1, name="branch3_conv1"),
             Conv2D_BN(kl, 3, name="branch3_conv2"),
-            Conv2D_BN(
-                km, 3, strides=2, padding=padding, name="branch3_conv3"
-            ),
+            Conv2D_BN(km, 3, strides=2, padding=padding, name="branch3_conv3"),
         ]
 
-        branch_pool = [
-            MaxPool2D(
-                3, strides=2, padding=padding, name=f"branch4_pool1"
-            )
-        ]
+        branch_pool = [MaxPool2D(3, strides=2, padding=padding, name=f"branch4_pool1")]
         self.branches = [branch_1, branch_2, branch_3, branch_pool]
         channel_axis = 1 if K.image_data_format() == "channels_first" else 3
         self.concat = Concatenate(axis=channel_axis, name=f"{name}/mixed")
@@ -383,17 +366,33 @@ class InceptionA(tf.keras.Model):
         super().__init__(name=name, **kwargs)
         self.pool_filters = pool_filters
 
-        self.branch1x1 = Conv2D_BN(96, kernel_size=1, padding="same", name="branch1_conv1")
+        self.branch1x1 = Conv2D_BN(
+            96, kernel_size=1, padding="same", name="branch1_conv1"
+        )
 
-        self.branch3x3dbl_1 = Conv2D_BN(64, kernel_size=1, padding="same", name="branch2_conv1")
-        self.branch3x3dbl_2 = Conv2D_BN(96, kernel_size=3, padding="same", name="branch2_conv2")
-        self.branch3x3dbl_3 = Conv2D_BN(96, kernel_size=3, padding="same", name="branch2_conv3")
+        self.branch3x3dbl_1 = Conv2D_BN(
+            64, kernel_size=1, padding="same", name="branch2_conv1"
+        )
+        self.branch3x3dbl_2 = Conv2D_BN(
+            96, kernel_size=3, padding="same", name="branch2_conv2"
+        )
+        self.branch3x3dbl_3 = Conv2D_BN(
+            96, kernel_size=3, padding="same", name="branch2_conv3"
+        )
 
-        self.branch5x5_1 = Conv2D_BN(48, kernel_size=1, padding="same", name="branch3_conv1")
-        self.branch5x5_2 = Conv2D_BN(64, kernel_size=5, padding="same", name="branch3_conv2")
+        self.branch5x5_1 = Conv2D_BN(
+            48, kernel_size=1, padding="same", name="branch3_conv1"
+        )
+        self.branch5x5_2 = Conv2D_BN(
+            64, kernel_size=5, padding="same", name="branch3_conv2"
+        )
 
-        self.branch_pool_1 = AvgPool2D(pool_size=3, strides=1, padding="same", name="branch4_pool1")
-        self.branch_pool_2 = Conv2D_BN(pool_filters, kernel_size=1, padding="same", name="branch4_conv1")
+        self.branch_pool_1 = AvgPool2D(
+            pool_size=3, strides=1, padding="same", name="branch4_pool1"
+        )
+        self.branch_pool_2 = Conv2D_BN(
+            pool_filters, kernel_size=1, padding="same", name="branch4_conv1"
+        )
 
         channel_axis = 1 if K.image_data_format() == "channels_first" else 3
         self.concat = Concatenate(axis=channel_axis)
@@ -495,7 +494,10 @@ def InceptionResNetV2(
     # 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
     for block_idx in range(1, 11):
         x = InceptionResnetBlock(
-            n_channels=320, scale=0.17, block_type="block35", block_idx=block_idx,
+            n_channels=320,
+            scale=0.17,
+            block_type="block35",
+            block_idx=block_idx,
             name=f"block35_{block_idx}",
         )(x)
 
@@ -505,7 +507,10 @@ def InceptionResNetV2(
     # 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
     for block_idx in range(1, 21):
         x = InceptionResnetBlock(
-            n_channels=1088, scale=0.1, block_type="block17", block_idx=block_idx,
+            n_channels=1088,
+            scale=0.1,
+            block_type="block17",
+            block_idx=block_idx,
             name=f"block17_{block_idx}",
         )(x)
 
@@ -517,11 +522,18 @@ def InceptionResNetV2(
     # 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
     for block_idx in range(1, 10):
         x = InceptionResnetBlock(
-            n_channels=2080, scale=0.2, block_type="block8", block_idx=block_idx,
+            n_channels=2080,
+            scale=0.2,
+            block_type="block8",
+            block_idx=block_idx,
             name=f"block8_{block_idx}",
         )(x)
     x = InceptionResnetBlock(
-        n_channels=2080, scale=1.0, activation=None, block_type="block8", block_idx=10,
+        n_channels=2080,
+        scale=1.0,
+        activation=None,
+        block_type="block8",
+        block_idx=10,
         name=f"block8_{block_idx+1}",
     )(x)
 
diff --git a/bob/learn/tensorflow/models/mcae.py b/bob/learn/tensorflow/models/mcae.py
index 07cbb529be878551cfd2efcf2e438f802c52557f..e4241c409f747464e9f0ab8408acfe591658107c 100644
--- a/bob/learn/tensorflow/models/mcae.py
+++ b/bob/learn/tensorflow/models/mcae.py
@@ -67,7 +67,7 @@ class ConvDecoder(tf.keras.Model):
         name="Decoder",
         **kwargs,
     ):
-        super().__init__(name=name, ** kwargs)
+        super().__init__(name=name, **kwargs)
         self.data_format = data_format
         l2_kw = get_l2_kw(weight_decay)
         layers = []
diff --git a/bob/learn/tensorflow/models/mlp.py b/bob/learn/tensorflow/models/mlp.py
index 3804c4e3222b2b0616e59f243bf8498f4f8f151f..71076818853e976063e64be108a3f27e3bd1a3d8 100644
--- a/bob/learn/tensorflow/models/mlp.py
+++ b/bob/learn/tensorflow/models/mlp.py
@@ -22,7 +22,9 @@ class MLP(tf.keras.Model):
         for i, n in enumerate(hidden_layers, start=1):
             sequential_layers.extend(
                 [
-                    tf.keras.layers.Dense(n, use_bias=False, name=f"dense_{i}", **dense_kw),
+                    tf.keras.layers.Dense(
+                        n, use_bias=False, name=f"dense_{i}", **dense_kw
+                    ),
                     tf.keras.layers.BatchNormalization(scale=False, name=f"bn_{i}"),
                     tf.keras.layers.Activation("relu", name=f"relu_{i}"),
                 ]
@@ -77,7 +79,9 @@ class MLPDropout(tf.keras.Model):
         for i, n in enumerate(hidden_layers, start=1):
             sequential_layers.extend(
                 [
-                    tf.keras.layers.Dense(n, use_bias=False, name=f"dense_{i}", **dense_kw),
+                    tf.keras.layers.Dense(
+                        n, use_bias=False, name=f"dense_{i}", **dense_kw
+                    ),
                     tf.keras.layers.Activation("relu", name=f"relu_{i}"),
                     tf.keras.layers.Dropout(rate=drop_rate, name=f"drop_{i}"),
                 ]
diff --git a/bob/learn/tensorflow/models/msu_patch.py b/bob/learn/tensorflow/models/msu_patch.py
index af1ad1da9a287d3cd55b2e616385a0145acc50d5..3cd63ff95facae61a62b83bc7945a93726cad615 100644
--- a/bob/learn/tensorflow/models/msu_patch.py
+++ b/bob/learn/tensorflow/models/msu_patch.py
@@ -13,24 +13,39 @@ def MSUPatch(name="MSUPatch", **kwargs):
     return tf.keras.Sequential(
         [
             tf.keras.layers.Conv2D(
-                50, (5, 5), padding="same", use_bias=False, name="Conv-1", input_shape=(96, 96, 3)
+                50,
+                (5, 5),
+                padding="same",
+                use_bias=False,
+                name="Conv-1",
+                input_shape=(96, 96, 3),
             ),
             tf.keras.layers.BatchNormalization(scale=False, name="BN-1"),
             tf.keras.layers.Activation("relu", name="ReLU-1"),
             tf.keras.layers.MaxPool2D(padding="same", name="MaxPool-1"),
-            tf.keras.layers.Conv2D(100, (3, 3), padding="same", use_bias=False, name="Conv-2"),
+            tf.keras.layers.Conv2D(
+                100, (3, 3), padding="same", use_bias=False, name="Conv-2"
+            ),
             tf.keras.layers.BatchNormalization(scale=False, name="BN-2"),
             tf.keras.layers.Activation("relu", name="ReLU-2"),
             tf.keras.layers.MaxPool2D(padding="same", name="MaxPool-2"),
-            tf.keras.layers.Conv2D(150, (3, 3), padding="same", use_bias=False, name="Conv-3"),
+            tf.keras.layers.Conv2D(
+                150, (3, 3), padding="same", use_bias=False, name="Conv-3"
+            ),
             tf.keras.layers.BatchNormalization(scale=False, name="BN-3"),
             tf.keras.layers.Activation("relu", name="ReLU-3"),
-            tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding="same", name="MaxPool-3"),
-            tf.keras.layers.Conv2D(200, (3, 3), padding="same", use_bias=False, name="Conv-4"),
+            tf.keras.layers.MaxPool2D(
+                pool_size=3, strides=2, padding="same", name="MaxPool-3"
+            ),
+            tf.keras.layers.Conv2D(
+                200, (3, 3), padding="same", use_bias=False, name="Conv-4"
+            ),
             tf.keras.layers.BatchNormalization(scale=False, name="BN-4"),
             tf.keras.layers.Activation("relu", name="ReLU-4"),
             tf.keras.layers.MaxPool2D(padding="same", name="MaxPool-4"),
-            tf.keras.layers.Conv2D(250, (3, 3), padding="same", use_bias=False, name="Conv-5"),
+            tf.keras.layers.Conv2D(
+                250, (3, 3), padding="same", use_bias=False, name="Conv-5"
+            ),
             tf.keras.layers.BatchNormalization(scale=False, name="BN-5"),
             tf.keras.layers.Activation("relu", name="ReLU-5"),
             tf.keras.layers.MaxPool2D(padding="same", name="MaxPool-5"),
diff --git a/bob/learn/tensorflow/script/__init__.py b/bob/learn/tensorflow/script/__init__.py
index b156cdd2398fd5af2b88db8279fd84c85b767b36..435eb712f6b140c26c5402f125616b558e7e223d 100644
--- a/bob/learn/tensorflow/script/__init__.py
+++ b/bob/learn/tensorflow/script/__init__.py
@@ -1,2 +1,2 @@
 # gets sphinx autodoc done right - don't remove it
-__all__ = [_ for _ in dir() if not _.startswith('_')]
+__all__ = [_ for _ in dir() if not _.startswith("_")]
diff --git a/bob/learn/tensorflow/script/cache_dataset.py b/bob/learn/tensorflow/script/cache_dataset.py
index 3fe31750cf6d8abaf47558d36472eead700d7efd..5ec99916e62c1df2feb337f14ef24a618008aa98 100644
--- a/bob/learn/tensorflow/script/cache_dataset.py
+++ b/bob/learn/tensorflow/script/cache_dataset.py
@@ -8,41 +8,50 @@ import logging
 import click
 import tensorflow as tf
 from bob.extension.scripts.click_helper import (
-    verbosity_option, ConfigCommand, ResourceOption, log_parameters)
+    verbosity_option,
+    ConfigCommand,
+    ResourceOption,
+    log_parameters,
+)
 from ..utils import is_argument_available
 
 logger = logging.getLogger(__name__)
 
 
-@click.command(
-    entry_point_group='bob.learn.tensorflow.config', cls=ConfigCommand)
+@click.command(entry_point_group="bob.learn.tensorflow.config", cls=ConfigCommand)
 @click.option(
-    '--input-fn',
-    '-i',
+    "--input-fn",
+    "-i",
     required=True,
     cls=ResourceOption,
-    entry_point_group='bob.learn.tensorflow.input_fn',
-    help='The ``input_fn`` that will return the features and labels. '
-         'You should call the dataset.cache(...) yourself in the input '
-         'function. If the ``input_fn`` accepts a ``cache_only`` argument, '
-         'it will be given as True.')
+    entry_point_group="bob.learn.tensorflow.input_fn",
+    help="The ``input_fn`` that will return the features and labels. "
+    "You should call the dataset.cache(...) yourself in the input "
+    "function. If the ``input_fn`` accepts a ``cache_only`` argument, "
+    "it will be given as True.",
+)
 @click.option(
-    '--mode',
+    "--mode",
     cls=ResourceOption,
     default=tf.estimator.ModeKeys.TRAIN,
     show_default=True,
-    type=click.Choice((tf.estimator.ModeKeys.TRAIN,
-                       tf.estimator.ModeKeys.EVAL,
-                       tf.estimator.ModeKeys.PREDICT)),
-    help='mode value to be given to the input_fn.')
+    type=click.Choice(
+        (
+            tf.estimator.ModeKeys.TRAIN,
+            tf.estimator.ModeKeys.EVAL,
+            tf.estimator.ModeKeys.PREDICT,
+        )
+    ),
+    help="mode value to be given to the input_fn.",
+)
 @verbosity_option(cls=ResourceOption)
 def cache_dataset(input_fn, mode, **kwargs):
     """Trains networks using Tensorflow estimators."""
     log_parameters(logger)
 
     kwargs = {}
-    if is_argument_available('cache_only', input_fn):
-        kwargs['cache_only'] = True
+    if is_argument_available("cache_only", input_fn):
+        kwargs["cache_only"] = True
         logger.info("cache_only as True will be passed to input_fn.")
 
     # call the input function manually
diff --git a/bob/learn/tensorflow/script/compute_statistics.py b/bob/learn/tensorflow/script/compute_statistics.py
index b876c5e4ae5be9f3df6f4f18c34aaaf50ef2395e..3638dee11beb94a7a24f24536bfd2c170db2b97c 100644
--- a/bob/learn/tensorflow/script/compute_statistics.py
+++ b/bob/learn/tensorflow/script/compute_statistics.py
@@ -8,75 +8,80 @@ import logging
 import click
 import numpy as np
 from bob.extension.scripts.click_helper import (
-    verbosity_option, ConfigCommand, ResourceOption, log_parameters)
+    verbosity_option,
+    ConfigCommand,
+    ResourceOption,
+    log_parameters,
+)
 from bob.learn.tensorflow.dataset.bio import BioGenerator
 
 logger = logging.getLogger(__name__)
 
 
 @click.command(
-    entry_point_group='bob.learn.tensorflow.config', cls=ConfigCommand,
+    entry_point_group="bob.learn.tensorflow.config",
+    cls=ConfigCommand,
     epilog="""\b
 An example configuration could be::
     # define the database:
     from bob.bio.base.test.dummy.database import database
     groups = ['dev']
     biofiles = database.all_files(groups)
-"""
+""",
 )
 @click.option(
-    '--database',
-    '-d',
+    "--database",
+    "-d",
     required=True,
     cls=ResourceOption,
-    entry_point_group='bob.bio.database',
-    help='A bio database. Its original_directory must point to the correct '
-    'path.')
+    entry_point_group="bob.bio.database",
+    help="A bio database. Its original_directory must point to the correct " "path.",
+)
 @click.option(
-    '--biofiles',
+    "--biofiles",
     required=True,
     cls=ResourceOption,
-    help='The list of the bio files. You can only provide this through '
-    'config files.')
+    help="The list of the bio files. You can only provide this through "
+    "config files.",
+)
 @click.option(
-    '--load-data',
+    "--load-data",
     cls=ResourceOption,
-    entry_point_group='bob.learn.tensorflow.load_data',
-    help='A callable with the signature of '
-    '``data = load_data(database, biofile)``. '
-    ':any:`bob.bio.base.read_original_data` is used by default.')
+    entry_point_group="bob.learn.tensorflow.load_data",
+    help="A callable with the signature of "
+    "``data = load_data(database, biofile)``. "
+    ":any:`bob.bio.base.read_original_data` is used by default.",
+)
 @click.option(
-    '--multiple-samples',
+    "--multiple-samples",
     is_flag=True,
     cls=ResourceOption,
-    help='If provided, it means that the data provided by reader contains '
-    'multiple samples with same label and path.')
+    help="If provided, it means that the data provided by reader contains "
+    "multiple samples with same label and path.",
+)
 @verbosity_option(cls=ResourceOption)
-def compute_statistics(database, biofiles, load_data, multiple_samples,
-                       **kwargs):
+def compute_statistics(database, biofiles, load_data, multiple_samples, **kwargs):
     """Computes statistics on a BioGenerator.
 
     This script works with bob.bio.base databases. It will load all the samples
     and print their mean.
     """
-    log_parameters(logger, ignore=('biofiles', ))
+    log_parameters(logger, ignore=("biofiles",))
     logger.debug("len(biofiles): %d", len(biofiles))
 
     assert len(biofiles), "biofiles are empty!"
-    logger.info('Calculating the mean for %d files', len(biofiles))
+    logger.info("Calculating the mean for %d files", len(biofiles))
 
     generator = BioGenerator(
-        database,
-        biofiles,
-        load_data=load_data,
-        multiple_samples=multiple_samples)
+        database, biofiles, load_data=load_data, multiple_samples=multiple_samples
+    )
 
     for i, (data, _, _) in enumerate(generator()):
         if i == 0:
-            mean = np.cast['float'](data)
+            mean = np.cast["float"](data)
         else:
             mean += data
 
     mean = mean.reshape(mean.shape[0], -1)
     mean = np.mean(mean, axis=1)
-    click.echo(mean / (i + 1.))
+    click.echo(mean / (i + 1.0))
diff --git a/bob/learn/tensorflow/script/eval.py b/bob/learn/tensorflow/script/eval.py
index 0ff48972f05cf70aa80c849ac7f6e85f0c9508b1..003f72f3c6e8235108206f11d801a4bb10cc8dd5 100644
--- a/bob/learn/tensorflow/script/eval.py
+++ b/bob/learn/tensorflow/script/eval.py
@@ -15,14 +15,18 @@ from glob import glob
 from collections import defaultdict, OrderedDict
 from ..utils.eval import get_global_step
 from bob.extension.scripts.click_helper import (
-    verbosity_option, ConfigCommand, ResourceOption, log_parameters)
+    verbosity_option,
+    ConfigCommand,
+    ResourceOption,
+    log_parameters,
+)
 from bob.io.base import create_directories_safe
 
 logger = logging.getLogger(__name__)
 
 
 def copy_one_step(train_dir, global_step, save_dir, fail_on_error=False):
-    for path in glob('{}/model.ckpt-{}.*'.format(train_dir, global_step)):
+    for path in glob("{}/model.ckpt-{}.*".format(train_dir, global_step)):
         dst = os.path.join(save_dir, os.path.basename(path))
         if os.path.isfile(dst):
             continue
@@ -30,38 +34,41 @@ def copy_one_step(train_dir, global_step, save_dir, fail_on_error=False):
             shutil.copy(path, dst)
             logger.info("Copied `%s' over to `%s'", path, dst)
         except OSError:
-            logger.warning(
-                "Failed to copy `%s' over to `%s'", path, dst,
-                exc_info=True)
+            logger.warning("Failed to copy `%s' over to `%s'", path, dst, exc_info=True)
             if fail_on_error:
                 raise
 
 
-def save_n_best_models(train_dir, save_dir, evaluated_file,
-                       keep_n_best_models, sort_by, exceptions=tuple()):
+def save_n_best_models(
+    train_dir, save_dir, evaluated_file, keep_n_best_models, sort_by, exceptions=tuple()
+):
     logger.debug(
         "save_n_best_models was called with %s, %s, %s, %s, %s, %s",
-        train_dir, save_dir, evaluated_file, keep_n_best_models, sort_by,
-        exceptions)
+        train_dir,
+        save_dir,
+        evaluated_file,
+        keep_n_best_models,
+        sort_by,
+        exceptions,
+    )
 
     create_directories_safe(save_dir)
     evaluated = read_evaluated_file(evaluated_file)
 
     def _key(x):
         x = x[1][sort_by]
-        if 'loss' in sort_by:
+        if "loss" in sort_by:
             return x
         else:
             return -x
 
-    best_models = OrderedDict(
-        sorted(evaluated.items(), key=_key)[:keep_n_best_models])
+    best_models = OrderedDict(sorted(evaluated.items(), key=_key)[:keep_n_best_models])
     logger.info("Best models: %s", best_models)
 
     # delete the old saved models that are not in top N best anymore
     saved_models = defaultdict(list)
-    for path in glob('{}/model.ckpt-*'.format(save_dir)):
-        global_step = path.split('model.ckpt-')[1].split('.')[0]
+    for path in glob("{}/model.ckpt-*".format(save_dir)):
+        global_step = path.split("model.ckpt-")[1].split(".")[0]
         saved_models[global_step].append(path)
 
     for global_step, paths in saved_models.items():
@@ -77,37 +84,35 @@ def save_n_best_models(train_dir, save_dir, evaluated_file,
     # create a checkpoint file indicating to the best existing model:
     # 1. filter non-existing models first
     def _filter(x):
-        return len(glob('{}/model.ckpt-{}.*'.format(save_dir, x[0]))) > 0
+        return len(glob("{}/model.ckpt-{}.*".format(save_dir, x[0]))) > 0
 
     best_models = OrderedDict(filter(_filter, best_models.items()))
 
     # 2. create the checkpoint file
-    with open(os.path.join(save_dir, 'checkpoint'), 'wt') as f:
+    with open(os.path.join(save_dir, "checkpoint"), "wt") as f:
         if not len(best_models):
             return
         the_best_global_step = list(best_models)[0]
-        f.write('model_checkpoint_path: "model.ckpt-{}"\n'.format(
-            the_best_global_step))
+        f.write('model_checkpoint_path: "model.ckpt-{}"\n'.format(the_best_global_step))
         # reverse the models before saving since the last ones in checkpoints
         # are usually more important. This aligns with the bob tf trim script.
         for i, global_step in enumerate(reversed(best_models)):
-            f.write('all_model_checkpoint_paths: "model.ckpt-{}"\n'.format(
-                global_step))
+            f.write('all_model_checkpoint_paths: "model.ckpt-{}"\n'.format(global_step))
 
 
 def read_evaluated_file(path):
     evaluated = {}
     with open(path) as f:
         for line in f:
-            global_step, line = line.split(' ', 1)
+            global_step, line = line.split(" ", 1)
             temp = {}
-            for k_v in line.strip().split(', '):
-                k, v = k_v.split(' = ')
+            for k_v in line.strip().split(", "):
+                k, v = k_v.split(" = ")
                 try:
                     v = float(v)
                 except ValueError:  # not all values could be floats
                     pass
-                if 'global_step' in k:
+                if "global_step" in k:
                     v = int(v)
                 temp[k] = v
             evaluated[global_step] = temp
@@ -115,90 +120,105 @@ def read_evaluated_file(path):
 
 
 def append_evaluated_file(path, evaluations):
-    str_evaluations = ', '.join(
-        '%s = %s' % (k, v) for k, v in sorted(evaluations.items()))
-    with open(path, 'a') as f:
-        f.write('{} {}\n'.format(evaluations['global_step'], str_evaluations))
+    str_evaluations = ", ".join(
+        "%s = %s" % (k, v) for k, v in sorted(evaluations.items())
+    )
+    with open(path, "a") as f:
+        f.write("{} {}\n".format(evaluations["global_step"], str_evaluations))
     return str_evaluations
 
 
-@click.command(
-    entry_point_group='bob.learn.tensorflow.config', cls=ConfigCommand)
+@click.command(entry_point_group="bob.learn.tensorflow.config", cls=ConfigCommand)
 @click.option(
-    '--estimator',
-    '-e',
+    "--estimator",
+    "-e",
     required=True,
     cls=ResourceOption,
-    entry_point_group='bob.learn.tensorflow.estimator',
-    help='The estimator that will be evaluated.')
+    entry_point_group="bob.learn.tensorflow.estimator",
+    help="The estimator that will be evaluated.",
+)
 @click.option(
-    '--eval-input-fn',
-    '-i',
+    "--eval-input-fn",
+    "-i",
     required=True,
     cls=ResourceOption,
-    entry_point_group='bob.learn.tensorflow.input_fn',
-    help='The ``input_fn`` that will be given to '
-    ':any:`tf.estimator.Estimator.eval`.')
+    entry_point_group="bob.learn.tensorflow.input_fn",
+    help="The ``input_fn`` that will be given to "
+    ":any:`tf.estimator.Estimator.eval`.",
+)
 @click.option(
-    '--hooks',
+    "--hooks",
     cls=ResourceOption,
     multiple=True,
-    entry_point_group='bob.learn.tensorflow.hook',
-    help='List of SessionRunHook subclass instances. Used for callbacks '
-    'inside the evaluation loop.')
+    entry_point_group="bob.learn.tensorflow.hook",
+    help="List of SessionRunHook subclass instances. Used for callbacks "
+    "inside the evaluation loop.",
+)
 @click.option(
-    '--run-once',
+    "--run-once",
     cls=ResourceOption,
     default=False,
     show_default=True,
     is_flag=True,
-    help='If given, the model will be evaluated only once.')
+    help="If given, the model will be evaluated only once.",
+)
 @click.option(
-    '--eval-interval-secs',
+    "--eval-interval-secs",
     cls=ResourceOption,
     type=click.INT,
     default=60,
     show_default=True,
-    help='The seconds to wait for the next evaluation.')
-@click.option('--name', cls=ResourceOption, help='Name of the evaluation')
+    help="The seconds to wait for the next evaluation.",
+)
+@click.option("--name", cls=ResourceOption, help="Name of the evaluation")
 @click.option(
-    '--keep-n-best-models',
-    '-K',
+    "--keep-n-best-models",
+    "-K",
     type=click.INT,
     cls=ResourceOption,
     default=1,
     show_default=True,
-    help='If more than 0, will keep the best N models in the evaluation folder'
+    help="If more than 0, will keep the best N models in the evaluation folder",
 )
 @click.option(
-    '--sort-by',
+    "--sort-by",
     cls=ResourceOption,
     default="loss",
     show_default=True,
-    help='The metric for sorting the N best models.')
+    help="The metric for sorting the N best models.",
+)
 @click.option(
-    '--max-wait-intervals',
+    "--max-wait-intervals",
     cls=ResourceOption,
     type=click.INT,
     default=-1,
     show_default=True,
-    help='If given, the maximum number of intervals waiting for new training checkpoint.')
+    help="If given, the maximum number of intervals waiting for new training checkpoint.",
+)
 @click.option(
-    '--force-re-run',
-    is_flag=True,
-    default=False,
-    help='A debugging flag. Do not use!')
+    "--force-re-run", is_flag=True, default=False, help="A debugging flag. Do not use!"
+)
 @verbosity_option(cls=ResourceOption)
-def eval(estimator, eval_input_fn, hooks, run_once, eval_interval_secs, name,
-         keep_n_best_models, sort_by, max_wait_intervals, force_re_run,
-         **kwargs):
+def eval(
+    estimator,
+    eval_input_fn,
+    hooks,
+    run_once,
+    eval_interval_secs,
+    name,
+    keep_n_best_models,
+    sort_by,
+    max_wait_intervals,
+    force_re_run,
+    **kwargs
+):
     """Evaluates networks using Tensorflow estimators."""
     log_parameters(logger)
 
-    real_name = 'eval_' + name if name else 'eval'
+    real_name = "eval_" + name if name else "eval"
     eval_dir = os.path.join(estimator.model_dir, real_name)
     os.makedirs(eval_dir, exist_ok=True)
-    evaluated_file = os.path.join(eval_dir, 'evaluated')
+    evaluated_file = os.path.join(eval_dir, "evaluated")
     wait_interval_count = 0
     evaluated_steps_count = 0
     while True:
@@ -233,8 +253,14 @@ def eval(estimator, eval_input_fn, hooks, run_once, eval_interval_secs, name,
                         wait_interval_count = 0
 
             # Save the best N models into the eval directory
-            save_n_best_models(estimator.model_dir, eval_dir, evaluated_file,
-                               keep_n_best_models, sort_by, exceptions)
+            save_n_best_models(
+                estimator.model_dir,
+                eval_dir,
+                evaluated_file,
+                keep_n_best_models,
+                sort_by,
+                exceptions,
+            )
 
         if (not ckpt) or (not ckpt.model_checkpoint_path):
             if max_wait_intervals > 0:
@@ -249,8 +275,10 @@ def eval(estimator, eval_input_fn, hooks, run_once, eval_interval_secs, name,
                 global_step = str(get_global_step(checkpoint_path))
             except Exception:
                 logger.warning(
-                    'Failed to find global_step for checkpoint_path {}, '
-                    'skipping ...'.format(checkpoint_path), exc_info=True)
+                    "Failed to find global_step for checkpoint_path {}, "
+                    "skipping ...".format(checkpoint_path),
+                    exc_info=True,
+                )
                 continue
             if global_step in evaluated_steps and not force_re_run:
                 continue
@@ -258,13 +286,17 @@ def eval(estimator, eval_input_fn, hooks, run_once, eval_interval_secs, name,
             # copy over the checkpoint before evaluating since it might
             # disappear after evaluation.
             try:
-                copy_one_step(estimator.model_dir, global_step, eval_dir, fail_on_error=True)
+                copy_one_step(
+                    estimator.model_dir, global_step, eval_dir, fail_on_error=True
+                )
             except Exception:
                 # skip testing this checkpoint
                 continue
 
             # evaluate based on the just copied checkpoint_path
-            checkpoint_path = checkpoint_path.replace(estimator.model_dir, eval_dir + os.sep)
+            checkpoint_path = checkpoint_path.replace(
+                estimator.model_dir, eval_dir + os.sep
+            )
             checkpoint_path = os.path.abspath(checkpoint_path)
             logger.debug("Evaluating the model from %s", checkpoint_path)
 
@@ -281,14 +313,18 @@ def eval(estimator, eval_input_fn, hooks, run_once, eval_interval_secs, name,
                 logger.info("Something went wrong in evaluation.")
                 raise
 
-            str_evaluations = append_evaluated_file(evaluated_file,
-                                                    evaluations)
+            str_evaluations = append_evaluated_file(evaluated_file, evaluations)
             click.echo(str_evaluations)
             sys.stdout.flush()
 
             # Save the best N models into the eval directory
-            save_n_best_models(estimator.model_dir, eval_dir, evaluated_file,
-                               keep_n_best_models, sort_by)
+            save_n_best_models(
+                estimator.model_dir,
+                eval_dir,
+                evaluated_file,
+                keep_n_best_models,
+                sort_by,
+            )
 
         if run_once:
             break
diff --git a/bob/learn/tensorflow/script/fit.py b/bob/learn/tensorflow/script/fit.py
index f19776c5da551fa9dd611d0279eab99d0eee6469..177d2761bad0c483216db353ff47216a76594e8b 100644
--- a/bob/learn/tensorflow/script/fit.py
+++ b/bob/learn/tensorflow/script/fit.py
@@ -10,82 +10,101 @@ import logging
 import os
 import tensorflow as tf
 from bob.extension.scripts.click_helper import (
-    verbosity_option, ConfigCommand, ResourceOption, log_parameters)
+    verbosity_option,
+    ConfigCommand,
+    ResourceOption,
+    log_parameters,
+)
 
 logger = logging.getLogger(__name__)
 
 
-@click.command(
-    entry_point_group='bob.learn.tensorflow.config', cls=ConfigCommand)
+@click.command(entry_point_group="bob.learn.tensorflow.config", cls=ConfigCommand)
 @click.option(
-    '--model',
-    '-m',
+    "--model",
+    "-m",
     required=True,
     cls=ResourceOption,
-    entry_point_group='bob.learn.tensorflow.model',
-    help='The keras model that will be trained.')
+    entry_point_group="bob.learn.tensorflow.model",
+    help="The keras model that will be trained.",
+)
 @click.option(
-    '--train-input-fn',
-    '-i',
+    "--train-input-fn",
+    "-i",
     required=True,
     cls=ResourceOption,
-    entry_point_group='bob.learn.tensorflow.input_fn',
-    help='A function that will return the training data as a tf.data.Dataset '
-    'or tf.data.Iterator. This will be given as `x` to '
-    'tf.keras.Model.fit.')
+    entry_point_group="bob.learn.tensorflow.input_fn",
+    help="A function that will return the training data as a tf.data.Dataset "
+    "or tf.data.Iterator. This will be given as `x` to "
+    "tf.keras.Model.fit.",
+)
 @click.option(
-    '--epochs',
-    '-e',
+    "--epochs",
+    "-e",
     default=1,
     type=click.types.INT,
     cls=ResourceOption,
-    help='Number of epochs to train model. See '
-    'tf.keras.Model.fit.')
+    help="Number of epochs to train model. See " "tf.keras.Model.fit.",
+)
 @click.option(
-    '--callbacks',
+    "--callbacks",
     cls=ResourceOption,
     multiple=True,
-    entry_point_group='bob.learn.tensorflow.callback',
-    help='List of tf.keras.callbacks. Used for callbacks '
-    'inside the training loop.')
+    entry_point_group="bob.learn.tensorflow.callback",
+    help="List of tf.keras.callbacks. Used for callbacks " "inside the training loop.",
+)
 @click.option(
-    '--eval-input-fn',
-    '-i',
+    "--eval-input-fn",
+    "-i",
     cls=ResourceOption,
-    entry_point_group='bob.learn.tensorflow.input_fn',
-    help='A function that will return the validation data as a tf.data.Dataset'
-    ' or tf.data.Iterator. This will be given as `validation_data` to '
-    'tf.keras.Model.fit.')
+    entry_point_group="bob.learn.tensorflow.input_fn",
+    help="A function that will return the validation data as a tf.data.Dataset"
+    " or tf.data.Iterator. This will be given as `validation_data` to "
+    "tf.keras.Model.fit.",
+)
 @click.option(
-    '--class-weight',
-    '-c',
-    cls=ResourceOption,
-    help='See tf.keras.Model.fit.')
+    "--class-weight", "-c", cls=ResourceOption, help="See tf.keras.Model.fit."
+)
 @click.option(
-    '--initial-epoch',
+    "--initial-epoch",
     default=0,
     type=click.types.INT,
     cls=ResourceOption,
-    help='See tf.keras.Model.fit.')
+    help="See tf.keras.Model.fit.",
+)
 @click.option(
-    '--steps-per-epoch',
+    "--steps-per-epoch",
     type=click.types.INT,
     cls=ResourceOption,
-    help='See tf.keras.Model.fit.')
+    help="See tf.keras.Model.fit.",
+)
 @click.option(
-    '--validation-steps',
+    "--validation-steps",
     type=click.types.INT,
     cls=ResourceOption,
-    help='See tf.keras.Model.fit.')
+    help="See tf.keras.Model.fit.",
+)
 @verbosity_option(cls=ResourceOption)
-def fit(model, train_input_fn, epochs, verbose, callbacks, eval_input_fn,
-        class_weight, initial_epoch, steps_per_epoch, validation_steps,
-        **kwargs):
+def fit(
+    model,
+    train_input_fn,
+    epochs,
+    verbose,
+    callbacks,
+    eval_input_fn,
+    class_weight,
+    initial_epoch,
+    steps_per_epoch,
+    validation_steps,
+    **kwargs
+):
     """Trains networks using Keras models."""
     log_parameters(logger)
 
     # Train
-    save_callback = [c for c in callbacks if isinstance(c, tf.keras.callbacks.ModelCheckpoint)]
+    save_callback = [
+        c for c in callbacks if isinstance(c, tf.keras.callbacks.ModelCheckpoint)
+    ]
     model_dir = None
     if save_callback:
         model_dir = save_callback[0].filepath
@@ -103,5 +122,5 @@ def fit(model, train_input_fn, epochs, verbose, callbacks, eval_input_fn,
     )
     click.echo(history.history)
     if model_dir is not None:
-        with open(os.path.join(model_dir, 'keras_fit_history.json'), 'w') as f:
+        with open(os.path.join(model_dir, "keras_fit_history.json"), "w") as f:
             json.dump(history.history, f)
diff --git a/bob/learn/tensorflow/script/keras.py b/bob/learn/tensorflow/script/keras.py
index 9f6ee9c24b09f174ceba366b98e4d17cf0ebbdd0..794f4d672836c5b0143e02435073ea92108127e2 100644
--- a/bob/learn/tensorflow/script/keras.py
+++ b/bob/learn/tensorflow/script/keras.py
@@ -7,7 +7,7 @@ from bob.extension.scripts.click_helper import AliasedGroup
 from .utils import eager_execution_option
 
 
-@with_plugins(pkg_resources.iter_entry_points('bob.learn.tensorflow.keras_cli'))
+@with_plugins(pkg_resources.iter_entry_points("bob.learn.tensorflow.keras_cli"))
 @click.group(cls=AliasedGroup)
 @eager_execution_option()
 def keras():
diff --git a/bob/learn/tensorflow/script/predict_bio.py b/bob/learn/tensorflow/script/predict_bio.py
index 54c5bb2c12cacac2c87416f4ded0df7a4140cfdf..31a4d05c94445031c3918536d4b5ecc7c2a04eda 100644
--- a/bob/learn/tensorflow/script/predict_bio.py
+++ b/bob/learn/tensorflow/script/predict_bio.py
@@ -435,7 +435,9 @@ def generic_predict(
         if last_key == key:
             continue
         else:
-            save_predictions(output_dir, last_key, pred_buffer, video_container, remove_nan)
+            save_predictions(
+                output_dir, last_key, pred_buffer, video_container, remove_nan
+            )
             # delete saved data so we don't run out of RAM
             del pred_buffer[last_key]
             # start saving this new key
diff --git a/bob/learn/tensorflow/script/tf.py b/bob/learn/tensorflow/script/tf.py
index f460df4333a100a9809b89d84012dec9ba0e3eb3..859efe27ba390cdf4a8ebb8fe2f1ac099deddf67 100644
--- a/bob/learn/tensorflow/script/tf.py
+++ b/bob/learn/tensorflow/script/tf.py
@@ -7,7 +7,7 @@ from bob.extension.scripts.click_helper import AliasedGroup
 from .utils import eager_execution_option
 
 
-@with_plugins(pkg_resources.iter_entry_points('bob.learn.tensorflow.cli'))
+@with_plugins(pkg_resources.iter_entry_points("bob.learn.tensorflow.cli"))
 @click.group(cls=AliasedGroup)
 @eager_execution_option()
 def tf():
diff --git a/bob/learn/tensorflow/script/train.py b/bob/learn/tensorflow/script/train.py
index d33d5767abb82a3c012805df4117cce227df3bf5..6f7a0c0d5036aca5a4835b5a98e9f6684deceda4 100644
--- a/bob/learn/tensorflow/script/train.py
+++ b/bob/learn/tensorflow/script/train.py
@@ -7,49 +7,57 @@ from __future__ import print_function
 import logging
 import click
 from bob.extension.scripts.click_helper import (
-    verbosity_option, ConfigCommand, ResourceOption, log_parameters)
+    verbosity_option,
+    ConfigCommand,
+    ResourceOption,
+    log_parameters,
+)
 
 logger = logging.getLogger(__name__)
 
 
-@click.command(
-    entry_point_group='bob.learn.tensorflow.config', cls=ConfigCommand)
+@click.command(entry_point_group="bob.learn.tensorflow.config", cls=ConfigCommand)
 @click.option(
-    '--estimator',
-    '-e',
+    "--estimator",
+    "-e",
     required=True,
     cls=ResourceOption,
-    entry_point_group='bob.learn.tensorflow.estimator',
-    help='The estimator that will be trained.')
+    entry_point_group="bob.learn.tensorflow.estimator",
+    help="The estimator that will be trained.",
+)
 @click.option(
-    '--train-input-fn',
-    '-i',
+    "--train-input-fn",
+    "-i",
     required=True,
     cls=ResourceOption,
-    entry_point_group='bob.learn.tensorflow.input_fn',
-    help='The ``input_fn`` that will be given to '
-    ':any:`tf.estimator.Estimator.train`.')
+    entry_point_group="bob.learn.tensorflow.input_fn",
+    help="The ``input_fn`` that will be given to "
+    ":any:`tf.estimator.Estimator.train`.",
+)
 @click.option(
-    '--hooks',
+    "--hooks",
     cls=ResourceOption,
     multiple=True,
-    entry_point_group='bob.learn.tensorflow.hook',
-    help='List of SessionRunHook subclass instances. Used for callbacks '
-    'inside the training loop.')
+    entry_point_group="bob.learn.tensorflow.hook",
+    help="List of SessionRunHook subclass instances. Used for callbacks "
+    "inside the training loop.",
+)
 @click.option(
-    '--steps',
-    '-s',
+    "--steps",
+    "-s",
     cls=ResourceOption,
     type=click.types.INT,
-    help='Number of steps for which to train model. See '
-    ':any:`tf.estimator.Estimator.train`.')
+    help="Number of steps for which to train model. See "
+    ":any:`tf.estimator.Estimator.train`.",
+)
 @click.option(
-    '--max-steps',
-    '-m',
+    "--max-steps",
+    "-m",
     cls=ResourceOption,
     type=click.types.INT,
-    help='Number of total steps for which to train model. See '
-    ':any:`tf.estimator.Estimator.train`.')
+    help="Number of total steps for which to train model. See "
+    ":any:`tf.estimator.Estimator.train`.",
+)
 @verbosity_option(cls=ResourceOption)
 def train(estimator, train_input_fn, hooks, steps, max_steps, **kwargs):
     """Trains networks using Tensorflow estimators."""
@@ -58,4 +66,5 @@ def train(estimator, train_input_fn, hooks, steps, max_steps, **kwargs):
     # Train
     logger.info("Training a model in %s", estimator.model_dir)
     estimator.train(
-        input_fn=train_input_fn, hooks=hooks, steps=steps, max_steps=max_steps)
+        input_fn=train_input_fn, hooks=hooks, steps=steps, max_steps=max_steps
+    )
diff --git a/bob/learn/tensorflow/script/train_and_evaluate.py b/bob/learn/tensorflow/script/train_and_evaluate.py
index 3ba4ebde0c874750ef97ea6ec8a942c1a3d5bc85..1a6cdc840ec0c33b1b33a408a16099c385a15cb0 100644
--- a/bob/learn/tensorflow/script/train_and_evaluate.py
+++ b/bob/learn/tensorflow/script/train_and_evaluate.py
@@ -8,47 +8,52 @@ import tensorflow as tf
 from bob.learn.tensorflow.utils.hooks import EarlyStopException
 import logging
 import click
-from bob.extension.scripts.click_helper import (verbosity_option,
-                                                ConfigCommand, ResourceOption)
+from bob.extension.scripts.click_helper import (
+    verbosity_option,
+    ConfigCommand,
+    ResourceOption,
+)
 
 logger = logging.getLogger(__name__)
 
 
-@click.command(
-    entry_point_group='bob.learn.tensorflow.config', cls=ConfigCommand)
+@click.command(entry_point_group="bob.learn.tensorflow.config", cls=ConfigCommand)
 @click.option(
-    '--estimator',
-    '-e',
+    "--estimator",
+    "-e",
     required=True,
     cls=ResourceOption,
-    entry_point_group='bob.learn.tensorflow.estimator',
-    help='The estimator that will be trained and evaluated.')
+    entry_point_group="bob.learn.tensorflow.estimator",
+    help="The estimator that will be trained and evaluated.",
+)
 @click.option(
-    '--train-spec',
-    '-it',
+    "--train-spec",
+    "-it",
     required=True,
     cls=ResourceOption,
-    entry_point_group='bob.learn.tensorflow.trainspec',
-    help='See :any:`tf.estimator.Estimator.train_and_evaluate`.')
+    entry_point_group="bob.learn.tensorflow.trainspec",
+    help="See :any:`tf.estimator.Estimator.train_and_evaluate`.",
+)
 @click.option(
-    '--eval-spec',
-    '-ie',
+    "--eval-spec",
+    "-ie",
     required=True,
     cls=ResourceOption,
-    entry_point_group='bob.learn.tensorflow.evalspec',
-    help='See :any:`tf.estimator.Estimator.train_and_evaluate`.')
+    entry_point_group="bob.learn.tensorflow.evalspec",
+    help="See :any:`tf.estimator.Estimator.train_and_evaluate`.",
+)
 @click.option(
-    '--exit-ok-exceptions',
+    "--exit-ok-exceptions",
     cls=ResourceOption,
     multiple=True,
-    default=(EarlyStopException, ),
+    default=(EarlyStopException,),
     show_default=True,
-    entry_point_group='bob.learn.tensorflow.exception',
-    help='A list of exceptions to exit properly if they occur. If nothing is '
-    'provided, the EarlyStopException is handled by default.')
+    entry_point_group="bob.learn.tensorflow.exception",
+    help="A list of exceptions to exit properly if they occur. If nothing is "
+    "provided, the EarlyStopException is handled by default.",
+)
 @verbosity_option(cls=ResourceOption)
-def train_and_evaluate(estimator, train_spec, eval_spec, exit_ok_exceptions,
-                       **kwargs):
+def train_and_evaluate(estimator, train_spec, eval_spec, exit_ok_exceptions, **kwargs):
     """Trains and evaluates a network using Tensorflow estimators.
 
     This script calls the estimator.train_and_evaluate function. Please see:
@@ -57,11 +62,11 @@ def train_and_evaluate(estimator, train_spec, eval_spec, exit_ok_exceptions,
     https://www.tensorflow.org/api_docs/python/tf/estimator/EvalSpec
     for more details.
     """
-    logger.debug('estimator: %s', estimator)
-    logger.debug('train_spec: %s', train_spec)
-    logger.debug('eval_spec: %s', eval_spec)
-    logger.debug('exit_ok_exceptions: %s', exit_ok_exceptions)
-    logger.debug('kwargs: %s', kwargs)
+    logger.debug("estimator: %s", estimator)
+    logger.debug("train_spec: %s", train_spec)
+    logger.debug("eval_spec: %s", eval_spec)
+    logger.debug("exit_ok_exceptions: %s", exit_ok_exceptions)
+    logger.debug("kwargs: %s", kwargs)
 
     # Train and evaluate
     try:
diff --git a/bob/learn/tensorflow/script/trim.py b/bob/learn/tensorflow/script/trim.py
index ed29b4c28847499a283d9f9908ba427f8604d8b6..7f58ce47ab11ebac534ed294c8fb0f1058773467 100644
--- a/bob/learn/tensorflow/script/trim.py
+++ b/bob/learn/tensorflow/script/trim.py
@@ -20,59 +20,65 @@ def delete_extra_checkpoints(directory, keep_last_n, dry_run):
     except Exception:
         return
     if (not ckpt) or (not ckpt.model_checkpoint_path):
-        logger.debug('Could not find a checkpoint in %s', directory)
+        logger.debug("Could not find a checkpoint in %s", directory)
         return
     for checkpoint_path in ckpt.all_model_checkpoint_paths[:-keep_last_n]:
         if checkpoint_path == ckpt.model_checkpoint_path:
             continue
         if dry_run:
-            click.echo('Would delete {}.*'.format(checkpoint_path))
+            click.echo("Would delete {}.*".format(checkpoint_path))
         else:
-            logger.info('Deleting %s.*', checkpoint_path)
-            for path in glob.glob('{}.*'.format(checkpoint_path)):
+            logger.info("Deleting %s.*", checkpoint_path)
+            for path in glob.glob("{}.*".format(checkpoint_path)):
                 os.remove(path)
 
     def _existing(x):
-        return glob.glob('{}.*'.format(x))
+        return glob.glob("{}.*".format(x))
 
     # update the checkpoint file
     all_paths = filter(_existing, ckpt.all_model_checkpoint_paths)
     all_paths = list(map(os.path.basename, all_paths))
     model_checkpoint_path = os.path.basename(ckpt.model_checkpoint_path)
     tf.compat.v1.train.update_checkpoint_state(
-        directory, model_checkpoint_path, all_paths)
+        directory, model_checkpoint_path, all_paths
+    )
 
 
-@click.command(epilog='''\b
+@click.command(
+    epilog="""\b
 Examples:
 $ bob tf trim -vv ~/my_models/model_dir
 $ bob tf trim -vv ~/my_models/model_dir1 ~/my_models/model_dir2
 $ bob tf trim -vvr ~/my_models
 $ bob tf trim -vvrn ~/my_models
 $ bob tf trim -vvrK 2 ~/my_models
-''')
+"""
+)
 @click.argument(
-    'root_dirs',
+    "root_dirs",
     nargs=-1,
     type=click.Path(exists=True, file_okay=False, dir_okay=True),
 )
 @click.option(
-    '--keep-last-n-models',
-    '-K',
+    "--keep-last-n-models",
+    "-K",
     type=click.INT,
     default=1,
     show_default=True,
-    help='The number of recent checkpoints to keep.')
+    help="The number of recent checkpoints to keep.",
+)
 @click.option(
-    '--recurse',
-    '-r',
+    "--recurse",
+    "-r",
     is_flag=True,
-    help='If given, it will delete checkpoints recursively.')
+    help="If given, it will delete checkpoints recursively.",
+)
 @click.option(
-    '--dry-run',
-    '-n',
+    "--dry-run",
+    "-n",
     is_flag=True,
-    help='If given, will only print what will be deleted.')
+    help="If given, will only print what will be deleted.",
+)
 @verbosity_option()
 def trim(root_dirs, keep_last_n_models, recurse, dry_run, **kwargs):
     """Deletes extra tensorflow checkpoints."""
@@ -81,7 +87,6 @@ def trim(root_dirs, keep_last_n_models, recurse, dry_run, **kwargs):
     for root_dir in root_dirs:
         if recurse:
             for directory, _, _ in os.walk(root_dir):
-                delete_extra_checkpoints(directory, keep_last_n_models,
-                                         dry_run)
+                delete_extra_checkpoints(directory, keep_last_n_models, dry_run)
         else:
             delete_extra_checkpoints(root_dir, keep_last_n_models, dry_run)
diff --git a/bob/learn/tensorflow/script/utils.py b/bob/learn/tensorflow/script/utils.py
index ca5ba4ef219b093b4706729a37bbc55b71bba508..db6efdb4594f8f4e62227dd94c8a4df63906feb8 100644
--- a/bob/learn/tensorflow/script/utils.py
+++ b/bob/learn/tensorflow/script/utils.py
@@ -9,19 +9,29 @@ def eager_execution_option(**kwargs):
      callable
       A decorator to be used for adding this option to click commands
     """
+
     def custom_eager_execution_option(func):
         def callback(ctx, param, value):
             if not value or ctx.resilient_parsing:
                 return
             import tensorflow as tf
+
             tf.compat.v1.enable_eager_execution()
             if not tf.executing_eagerly():
                 raise click.ClickException(
-                    "Could not enable tensorflow eager execution mode!")
+                    "Could not enable tensorflow eager execution mode!"
+                )
             else:
                 click.echo("Executing tensorflow operations eagerly!")
 
         return click.option(
-            '-e', '--eager', is_flag=True, callback=callback,
-            expose_value=False, is_eager=True, **kwargs)(func)
+            "-e",
+            "--eager",
+            is_flag=True,
+            callback=callback,
+            expose_value=False,
+            is_eager=True,
+            **kwargs
+        )(func)
+
     return custom_eager_execution_option
diff --git a/bob/learn/tensorflow/test/data/input_biogenerator_config.py b/bob/learn/tensorflow/test/data/input_biogenerator_config.py
index 2aca7ccd4cda182bc882f1ca512b65b837a18810..2d03e4b19efe5ede90a5e9bffd4018f036ad3941 100644
--- a/bob/learn/tensorflow/test/data/input_biogenerator_config.py
+++ b/bob/learn/tensorflow/test/data/input_biogenerator_config.py
@@ -10,9 +10,9 @@ def input_fn(mode):
     from bob.bio.base.test.dummy.database import database as db
 
     if mode == tf.estimator.ModeKeys.TRAIN:
-        groups = 'world'
+        groups = "world"
     elif mode == tf.estimator.ModeKeys.EVAL:
-        groups = 'dev'
+        groups = "dev"
 
     files = db.objects(groups=groups)
 
@@ -33,7 +33,8 @@ def input_fn(mode):
     generator = BioGenerator(db, files, load_data, biofile_to_label)
 
     dataset = tf.data.Dataset.from_generator(
-        generator, generator.output_types, generator.output_shapes)
+        generator, generator.output_types, generator.output_shapes
+    )
 
     def transform(image, label, key):
         # convert to channels last
@@ -51,7 +52,7 @@ def input_fn(mode):
     dataset = dataset.batch(batch_size)
 
     data, label, key = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
-    return {'data': data, 'key': key}, label
+    return {"data": data, "key": key}, label
 
 
 def train_input_fn():
diff --git a/bob/learn/tensorflow/test/data/input_predict_bio_config.py b/bob/learn/tensorflow/test/data/input_predict_bio_config.py
index d355768b5a84064730878326f879d8f02dd77f2d..cf4d862ac27da74c2fb001c6d8d2ca4111022e3a 100644
--- a/bob/learn/tensorflow/test/data/input_predict_bio_config.py
+++ b/bob/learn/tensorflow/test/data/input_predict_bio_config.py
@@ -1,17 +1,20 @@
 import tensorflow as tf
 from bob.bio.base.test.dummy.database import database
-biofiles = database.all_files(['dev'])
+
+biofiles = database.all_files(["dev"])
 
 
 def bio_predict_input_fn(generator, output_types, output_shapes):
     def input_fn():
-        dataset = tf.data.Dataset.from_generator(
-            generator, output_types, output_shapes)
+        dataset = tf.data.Dataset.from_generator(generator, output_types, output_shapes)
         # apply all kinds of transformations here, process the data
         # even further if you want.
         dataset = dataset.prefetch(1)
-        dataset = dataset.batch(10**3)
-        images, labels, keys = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
+        dataset = dataset.batch(10 ** 3)
+        images, labels, keys = tf.compat.v1.data.make_one_shot_iterator(
+            dataset
+        ).get_next()
+
+        return {"data": images, "key": keys}, labels
 
-        return {'data': images, 'key': keys}, labels
     return input_fn
diff --git a/bob/learn/tensorflow/test/data/input_tfrecords_config.py b/bob/learn/tensorflow/test/data/input_tfrecords_config.py
index 2c5ec8f02e8f547bc31de2e062f2240ab044d0e5..f6df06c6c9af58d9e376c2d320724c079fe4d064 100644
--- a/bob/learn/tensorflow/test/data/input_tfrecords_config.py
+++ b/bob/learn/tensorflow/test/data/input_tfrecords_config.py
@@ -1,8 +1,10 @@
 import tensorflow as tf
-from bob.learn.tensorflow.dataset.tfrecords import shuffle_data_and_labels, \
-    batch_data_and_labels
+from bob.learn.tensorflow.dataset.tfrecords import (
+    shuffle_data_and_labels,
+    batch_data_and_labels,
+)
 
-tfrecord_filenames = ['%(tfrecord_filenames)s']
+tfrecord_filenames = ["%(tfrecord_filenames)s"]
 data_shape = (1, 112, 92)  # size of atnt images
 data_type = tf.uint8
 batch_size = 2
@@ -10,13 +12,15 @@ epochs = 2
 
 
 def train_input_fn():
-    return shuffle_data_and_labels(tfrecord_filenames, data_shape, data_type,
-                                   batch_size, epochs=epochs)
+    return shuffle_data_and_labels(
+        tfrecord_filenames, data_shape, data_type, batch_size, epochs=epochs
+    )
 
 
 def eval_input_fn():
-    return batch_data_and_labels(tfrecord_filenames, data_shape, data_type,
-                                 batch_size, epochs=1)
+    return batch_data_and_labels(
+        tfrecord_filenames, data_shape, data_type, batch_size, epochs=1
+    )
 
 
 # config for train_and_evaluate
diff --git a/bob/learn/tensorflow/test/data/mnist_estimator.py b/bob/learn/tensorflow/test/data/mnist_estimator.py
index 9957657681b9c46594647c68302eb592ceadadb4..378b52628da8c6f1a1bbbdf29ff8eaacd508216b 100644
--- a/bob/learn/tensorflow/test/data/mnist_estimator.py
+++ b/bob/learn/tensorflow/test/data/mnist_estimator.py
@@ -1,3 +1,6 @@
 import tensorflow as tf
-data = tf.feature_column.numeric_column('data', shape=[784])
-estimator = tf.estimator.LinearClassifier(feature_columns=[data], n_classes=10, loss_reduction=tf.keras.losses.Reduction.SUM)
+
+data = tf.feature_column.numeric_column("data", shape=[784])
+estimator = tf.estimator.LinearClassifier(
+    feature_columns=[data], n_classes=10, loss_reduction=tf.keras.losses.Reduction.SUM
+)
diff --git a/bob/learn/tensorflow/test/data/mnist_input_fn.py b/bob/learn/tensorflow/test/data/mnist_input_fn.py
index 0274f5075d5dede7e5d7a98538e05613089733bc..df1d81e7ff3c8195206ddc26d9ca05d0329d6ab3 100644
--- a/bob/learn/tensorflow/test/data/mnist_input_fn.py
+++ b/bob/learn/tensorflow/test/data/mnist_input_fn.py
@@ -6,23 +6,21 @@ database = Database()
 
 def input_fn(mode):
     if mode == tf.estimator.ModeKeys.TRAIN:
-        groups = 'train'
+        groups = "train"
         num_epochs = None
         shuffle = True
     else:
-        groups = 'test'
+        groups = "test"
         num_epochs = 1
         shuffle = True
     data, labels = database.data(groups=groups)
     return tf.compat.v1.estimator.inputs.numpy_input_fn(
-        x={
-            "data": data.astype('float32'),
-            'key': labels.astype('float32')
-        },
-        y=labels.astype('int32'),
+        x={"data": data.astype("float32"), "key": labels.astype("float32")},
+        y=labels.astype("int32"),
         batch_size=128,
         num_epochs=num_epochs,
-        shuffle=shuffle)
+        shuffle=shuffle,
+    )
 
 
 train_input_fn = input_fn(tf.estimator.ModeKeys.TRAIN)
diff --git a/bob/learn/tensorflow/test/test_db_to_tfrecords.py b/bob/learn/tensorflow/test/test_db_to_tfrecords.py
index 790fceea2980a7ed5d260f3d6763d6bdca051fb4..be07bde07a73b776f3bd2bff3d3fa8beb42c79cb 100644
--- a/bob/learn/tensorflow/test/test_db_to_tfrecords.py
+++ b/bob/learn/tensorflow/test/test_db_to_tfrecords.py
@@ -6,8 +6,7 @@ import tensorflow as tf
 import numpy as np
 from click.testing import CliRunner
 from bob.io.base import create_directories_safe
-from bob.learn.tensorflow.script.db_to_tfrecords import (
-    datasets_to_tfrecords)
+from bob.learn.tensorflow.script.db_to_tfrecords import datasets_to_tfrecords
 from bob.learn.tensorflow.utils import load_mnist, create_mnist_tfrecord
 from bob.extension.scripts.click_helper import assert_click_runner_result
 from bob.extension.config import load
@@ -16,7 +15,8 @@ from bob.learn.tensorflow.dataset.tfrecords import dataset_from_tfrecord
 regenerate_reference = False
 
 dummy_config = pkg_resources.resource_filename(
-    'bob.learn.tensorflow', 'test/data/db_to_tfrecords_config.py')
+    "bob.learn.tensorflow", "test/data/db_to_tfrecords_config.py"
+)
 
 
 def compare_datasets(ds1, ds2, sess=None):
@@ -44,19 +44,17 @@ def compare_datasets(ds1, ds2, sess=None):
     return True
 
 
-
-
 def test_datasets_to_tfrecords():
     runner = CliRunner()
     with runner.isolated_filesystem():
-        output_path = './test'
-        args = (dummy_config, '--output', output_path)
-        result = runner.invoke(
-            datasets_to_tfrecords, args=args, standalone_mode=False)
+        output_path = "./test"
+        args = (dummy_config, "--output", output_path)
+        result = runner.invoke(datasets_to_tfrecords, args=args, standalone_mode=False)
         assert_click_runner_result(result)
         # read back the tfrecod
         with tf.compat.v1.Session() as sess:
             dataset2 = dataset_from_tfrecord(output_path)
             dataset1 = load(
-                [dummy_config], attribute_name='dataset', entry_point_group='bob')
+                [dummy_config], attribute_name="dataset", entry_point_group="bob"
+            )
             assert compare_datasets(dataset1, dataset2, sess)
diff --git a/bob/learn/tensorflow/test/test_loss.py b/bob/learn/tensorflow/test/test_loss.py
index f54d49e700129856d2517516d5758f6f45b27a70..cccdf600229bcac48a7526827174e465450e15c3 100644
--- a/bob/learn/tensorflow/test/test_loss.py
+++ b/bob/learn/tensorflow/test/test_loss.py
@@ -4,73 +4,175 @@
 
 import tensorflow as tf
 import numpy
-from bob.learn.tensorflow.loss import balanced_softmax_cross_entropy_loss_weights,\
-                                      balanced_sigmoid_cross_entropy_loss_weights
+from bob.learn.tensorflow.loss import (
+    balanced_softmax_cross_entropy_loss_weights,
+    balanced_sigmoid_cross_entropy_loss_weights,
+)
 
 
 def test_balanced_softmax_cross_entropy_loss_weights():
-    labels = numpy.array([[1, 0, 0],
-                          [1, 0, 0],
-                          [0, 0, 1],
-                          [0, 1, 0],
-                          [0, 0, 1],
-                          [1, 0, 0],
-                          [1, 0, 0],
-                          [0, 0, 1],
-                          [1, 0, 0],
-                          [1, 0, 0],
-                          [1, 0, 0],
-                          [1, 0, 0],
-                          [1, 0, 0],
-                          [1, 0, 0],
-                          [0, 1, 0],
-                          [1, 0, 0],
-                          [0, 1, 0],
-                          [1, 0, 0],
-                          [0, 0, 1],
-                          [0, 0, 1],
-                          [1, 0, 0],
-                          [0, 0, 1],
-                          [1, 0, 0],
-                          [1, 0, 0],
-                          [0, 1, 0],
-                          [1, 0, 0],
-                          [1, 0, 0],
-                          [1, 0, 0],
-                          [0, 1, 0],
-                          [1, 0, 0],
-                          [0, 0, 1],
-                          [1, 0, 0]], dtype="int32")
+    labels = numpy.array(
+        [
+            [1, 0, 0],
+            [1, 0, 0],
+            [0, 0, 1],
+            [0, 1, 0],
+            [0, 0, 1],
+            [1, 0, 0],
+            [1, 0, 0],
+            [0, 0, 1],
+            [1, 0, 0],
+            [1, 0, 0],
+            [1, 0, 0],
+            [1, 0, 0],
+            [1, 0, 0],
+            [1, 0, 0],
+            [0, 1, 0],
+            [1, 0, 0],
+            [0, 1, 0],
+            [1, 0, 0],
+            [0, 0, 1],
+            [0, 0, 1],
+            [1, 0, 0],
+            [0, 0, 1],
+            [1, 0, 0],
+            [1, 0, 0],
+            [0, 1, 0],
+            [1, 0, 0],
+            [1, 0, 0],
+            [1, 0, 0],
+            [0, 1, 0],
+            [1, 0, 0],
+            [0, 0, 1],
+            [1, 0, 0],
+        ],
+        dtype="int32",
+    )
 
     with tf.compat.v1.Session() as session:
         weights = session.run(balanced_softmax_cross_entropy_loss_weights(labels))
- 
-    expected_weights = numpy.array([0.53333336, 0.53333336, 1.5238096 , 2.1333334,\
-                                    1.5238096 , 0.53333336, 0.53333336, 1.5238096,\
-                                    0.53333336, 0.53333336, 0.53333336, 0.53333336,\
-                                    0.53333336, 0.53333336, 2.1333334 , 0.53333336,\
-                                    2.1333334 , 0.53333336, 1.5238096 , 1.5238096 ,\
-                                    0.53333336, 1.5238096 , 0.53333336, 0.53333336,\
-                                    2.1333334 , 0.53333336, 0.53333336, 0.53333336,\
-                                    2.1333334 , 0.53333336, 1.5238096 , 0.53333336],\
-                                    dtype="float32")
+
+    expected_weights = numpy.array(
+        [
+            0.53333336,
+            0.53333336,
+            1.5238096,
+            2.1333334,
+            1.5238096,
+            0.53333336,
+            0.53333336,
+            1.5238096,
+            0.53333336,
+            0.53333336,
+            0.53333336,
+            0.53333336,
+            0.53333336,
+            0.53333336,
+            2.1333334,
+            0.53333336,
+            2.1333334,
+            0.53333336,
+            1.5238096,
+            1.5238096,
+            0.53333336,
+            1.5238096,
+            0.53333336,
+            0.53333336,
+            2.1333334,
+            0.53333336,
+            0.53333336,
+            0.53333336,
+            2.1333334,
+            0.53333336,
+            1.5238096,
+            0.53333336,
+        ],
+        dtype="float32",
+    )
 
     assert numpy.allclose(weights, expected_weights)
 
 
 def test_balanced_sigmoid_cross_entropy_loss_weights():
-    labels = numpy.array([1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0,
-                          1, 1, 0, 1, 1, 1, 0, 1, 0, 1], dtype="int32")
-    
+    labels = numpy.array(
+        [
+            1,
+            1,
+            0,
+            0,
+            0,
+            1,
+            1,
+            0,
+            1,
+            1,
+            1,
+            1,
+            1,
+            1,
+            0,
+            1,
+            0,
+            1,
+            0,
+            0,
+            1,
+            0,
+            1,
+            1,
+            0,
+            1,
+            1,
+            1,
+            0,
+            1,
+            0,
+            1,
+        ],
+        dtype="int32",
+    )
+
     with tf.compat.v1.Session() as session:
-        weights = session.run(balanced_sigmoid_cross_entropy_loss_weights(labels, dtype='float32'))
-        
-    expected_weights = numpy.array([0.8, 0.8, 1.3333334, 1.3333334, 1.3333334, 0.8,
-                                    0.8, 1.3333334, 0.8, 0.8, 0.8, 0.8,
-                                    0.8, 0.8, 1.3333334, 0.8, 1.3333334, 0.8,
-                                    1.3333334, 1.3333334, 0.8, 1.3333334, 0.8, 0.8,
-                                    1.3333334, 0.8, 0.8, 0.8, 1.3333334, 0.8,
-                                    1.3333334, 0.8], dtype="float32")
+        weights = session.run(
+            balanced_sigmoid_cross_entropy_loss_weights(labels, dtype="float32")
+        )
 
-    assert numpy.allclose(weights, expected_weights)
+    expected_weights = numpy.array(
+        [
+            0.8,
+            0.8,
+            1.3333334,
+            1.3333334,
+            1.3333334,
+            0.8,
+            0.8,
+            1.3333334,
+            0.8,
+            0.8,
+            0.8,
+            0.8,
+            0.8,
+            0.8,
+            1.3333334,
+            0.8,
+            1.3333334,
+            0.8,
+            1.3333334,
+            1.3333334,
+            0.8,
+            1.3333334,
+            0.8,
+            0.8,
+            1.3333334,
+            0.8,
+            0.8,
+            0.8,
+            1.3333334,
+            0.8,
+            1.3333334,
+            0.8,
+        ],
+        dtype="float32",
+    )
 
+    assert numpy.allclose(weights, expected_weights)
diff --git a/bob/learn/tensorflow/test/test_utils.py b/bob/learn/tensorflow/test/test_utils.py
index 6edfe4519f74b02b160b343f52943e0d2e608670..29d9f276ae27a6aa7135487f46c562cc10cbf284 100644
--- a/bob/learn/tensorflow/test/test_utils.py
+++ b/bob/learn/tensorflow/test/test_utils.py
@@ -3,10 +3,13 @@
 # @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
 
 import numpy
-from bob.learn.tensorflow.utils import compute_embedding_accuracy, \
-     compute_embedding_accuracy_tensors
+from bob.learn.tensorflow.utils import (
+    compute_embedding_accuracy,
+    compute_embedding_accuracy_tensors,
+)
 
 import tensorflow as tf
+
 """
 Some unit tests for the datashuffler
 """
@@ -17,18 +20,16 @@ def test_embedding_accuracy():
     numpy.random.seed(10)
     samples_per_class = 5
 
-    class_a = numpy.random.normal(
-        loc=0, scale=0.1, size=(samples_per_class, 2))
+    class_a = numpy.random.normal(loc=0, scale=0.1, size=(samples_per_class, 2))
     labels_a = numpy.zeros(samples_per_class)
 
-    class_b = numpy.random.normal(
-        loc=10, scale=0.1, size=(samples_per_class, 2))
+    class_b = numpy.random.normal(loc=10, scale=0.1, size=(samples_per_class, 2))
     labels_b = numpy.ones(samples_per_class)
 
     data = numpy.vstack((class_a, class_b))
     labels = numpy.concatenate((labels_a, labels_b))
 
-    assert compute_embedding_accuracy(data, labels) == 1.
+    assert compute_embedding_accuracy(data, labels) == 1.0
 
     # Adding noise
     noise = numpy.random.normal(loc=0, scale=0.1, size=(samples_per_class, 2))
@@ -37,7 +38,7 @@ def test_embedding_accuracy():
     data = numpy.vstack((data, noise))
     labels = numpy.concatenate((labels, noise_labels))
 
-    assert compute_embedding_accuracy(data, labels) == 10 / 15.
+    assert compute_embedding_accuracy(data, labels) == 10 / 15.0
 
 
 def test_embedding_accuracy_tensors():
@@ -45,12 +46,10 @@ def test_embedding_accuracy_tensors():
     numpy.random.seed(10)
     samples_per_class = 5
 
-    class_a = numpy.random.normal(
-        loc=0, scale=0.1, size=(samples_per_class, 2))
+    class_a = numpy.random.normal(loc=0, scale=0.1, size=(samples_per_class, 2))
     labels_a = numpy.zeros(samples_per_class)
 
-    class_b = numpy.random.normal(
-        loc=10, scale=0.1, size=(samples_per_class, 2))
+    class_b = numpy.random.normal(loc=10, scale=0.1, size=(samples_per_class, 2))
     labels_b = numpy.ones(samples_per_class)
 
     data = numpy.vstack((class_a, class_b))
@@ -60,4 +59,4 @@ def test_embedding_accuracy_tensors():
     labels = tf.convert_to_tensor(value=labels.astype("int64"))
 
     accuracy = compute_embedding_accuracy_tensors(data, labels)
-    assert accuracy == 1.
+    assert accuracy == 1.0
diff --git a/bob/learn/tensorflow/utils/graph.py b/bob/learn/tensorflow/utils/graph.py
index 3f551dd7d230d12856773ec451812eda1ded52a2..0a5fd5904180659bd92bf6e71ca5a7ddefd213f9 100644
--- a/bob/learn/tensorflow/utils/graph.py
+++ b/bob/learn/tensorflow/utils/graph.py
@@ -2,12 +2,7 @@ import tensorflow as tf
 
 
 def call_on_frozen_graph(
-    graph_def_path,
-    input,
-    return_elements,
-    input_name,
-    name=None,
-    **kwargs
+    graph_def_path, input, return_elements, input_name, name=None, **kwargs
 ):
     """Loads a frozen graph def file (.pb) and replaces its input with the given input
     and return the requested output tensors.
diff --git a/bob/learn/tensorflow/utils/sequences.py b/bob/learn/tensorflow/utils/sequences.py
index c2de433a50c65e13d3cb7353bd1f036a8055ddc6..99cdf5aa47982570d0862a08743e8998970ab0e6 100644
--- a/bob/learn/tensorflow/utils/sequences.py
+++ b/bob/learn/tensorflow/utils/sequences.py
@@ -1,6 +1,7 @@
 from __future__ import division
 import numpy
 from keras.utils import Sequence
+
 # documentation imports
 from bob.dap.base.database import PadDatabase, PadFile
 from bob.bio.base.preprocessor import Preprocessor
@@ -22,8 +23,15 @@ class PadSequence(Sequence):
         The preprocessor to be used to load and process the data.
     """
 
-    def __init__(self, files, labels, batch_size, preprocessor,
-                 original_directory, original_extension):
+    def __init__(
+        self,
+        files,
+        labels,
+        batch_size,
+        preprocessor,
+        original_directory,
+        original_extension,
+    ):
         super(PadSequence, self).__init__()
         self.files = files
         self.labels = labels
@@ -43,8 +51,8 @@ class PadSequence(Sequence):
         return int(numpy.ceil(len(self.files) / self.batch_size))
 
     def __getitem__(self, idx):
-        files = self.files[idx * self.batch_size:(idx + 1) * self.batch_size]
-        labels = self.labels[idx * self.batch_size:(idx + 1) * self.batch_size]
+        files = self.files[idx * self.batch_size : (idx + 1) * self.batch_size]
+        labels = self.labels[idx * self.batch_size : (idx + 1) * self.batch_size]
         return self.load_batch(files, labels)
 
     def load_batch(self, files, labels):
@@ -65,7 +73,8 @@ class PadSequence(Sequence):
         data, targets = [], []
         for file_object, target in zip(files, labels):
             loaded_data = self.preprocessor.read_original_data(
-                file_object, self.original_directory, self.original_extension)
+                file_object, self.original_directory, self.original_extension
+            )
             preprocessed_data = self.preprocessor(loaded_data)
             data.append(preprocessed_data)
             targets.append(target)
@@ -104,12 +113,14 @@ def get_pad_files_labels(database, groups):
     return files, labels
 
 
-def get_pad_sequences(database,
-                      preprocessor,
-                      batch_size,
-                      groups=('world', 'dev', 'eval'),
-                      shuffle=False,
-                      limit=None):
+def get_pad_sequences(
+    database,
+    preprocessor,
+    batch_size,
+    groups=("world", "dev", "eval"),
+    shuffle=False,
+    limit=None,
+):
     """Returns a list of :any:`Sequence` objects for the database.
 
     Parameters
@@ -138,7 +149,13 @@ def get_pad_sequences(database,
         if limit is not None:
             files, labels = files[:limit], labels[:limit]
         seqs.append(
-            PadSequence(files, labels, batch_size, preprocessor,
-                        database.original_directory,
-                        database.original_extension))
+            PadSequence(
+                files,
+                labels,
+                batch_size,
+                preprocessor,
+                database.original_directory,
+                database.original_extension,
+            )
+        )
     return seqs
diff --git a/bob/learn/tensorflow/utils/session.py b/bob/learn/tensorflow/utils/session.py
index 67f4856d3047c713d98b4af8ee38c853e914fc7e..34022df16d0f2f834c0fd268d58eb255bba94a71 100644
--- a/bob/learn/tensorflow/utils/session.py
+++ b/bob/learn/tensorflow/utils/session.py
@@ -18,7 +18,8 @@ class Session(object):
         config = tf.compat.v1.ConfigProto(
             log_device_placement=False,
             allow_soft_placement=True,
-            gpu_options=tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.5))
+            gpu_options=tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.5),
+        )
         config.gpu_options.allow_growth = True
         self.session = tf.compat.v1.Session()
         if debug:
diff --git a/bob/learn/tensorflow/utils/singleton.py b/bob/learn/tensorflow/utils/singleton.py
index d91f3c3ab5cfc339936066e3642c4f728edd20da..4e7769d02134cee901c4d49ab29764bdf291fa0b 100644
--- a/bob/learn/tensorflow/utils/singleton.py
+++ b/bob/learn/tensorflow/utils/singleton.py
@@ -45,8 +45,7 @@ class Singleton(object):
         return self._instance
 
     def __call__(self):
-        raise TypeError(
-            'Singletons must be accessed through the `instance()` method.')
+        raise TypeError("Singletons must be accessed through the `instance()` method.")
 
     def __instancecheck__(self, inst):
         return isinstance(inst, self._decorated)
diff --git a/bob/learn/tensorflow/utils/util.py b/bob/learn/tensorflow/utils/util.py
index 4710d026c8a87cd1014cb180dcef813473f0e116..a7cd02d39cfd87adc1625d418e8488072e56660a 100644
--- a/bob/learn/tensorflow/utils/util.py
+++ b/bob/learn/tensorflow/utils/util.py
@@ -51,7 +51,8 @@ def cdist(A, B, metric="sqeuclidean"):
     M1, M2 = tf.shape(input=A)[0], tf.shape(input=B)[0]
     # code from https://stackoverflow.com/a/43839605/1286165
     p1 = tf.matmul(
-        tf.expand_dims(tf.reduce_sum(input_tensor=tf.square(A), axis=1), 1), tf.ones(shape=(1, M2))
+        tf.expand_dims(tf.reduce_sum(input_tensor=tf.square(A), axis=1), 1),
+        tf.ones(shape=(1, M2)),
     )
     p2 = tf.transpose(
         a=tf.matmul(
@@ -187,8 +188,7 @@ def compute_accuracy(
 
 
 def debug_embbeding(image, architecture, embbeding_dim=2, feature_layer="fc3"):
-    """
-    """
+    """"""
     import tensorflow as tf
     from bob.learn.tensorflow.utils.session import Session
 
@@ -216,12 +216,16 @@ def pdist(A):
     """
     with tf.compat.v1.name_scope("Pairwisedistance"):
         ones_1 = tf.reshape(tf.cast(tf.ones_like(A), tf.float32)[:, 0], [1, -1])
-        p1 = tf.matmul(tf.expand_dims(tf.reduce_sum(input_tensor=tf.square(A), axis=1), 1), ones_1)
+        p1 = tf.matmul(
+            tf.expand_dims(tf.reduce_sum(input_tensor=tf.square(A), axis=1), 1), ones_1
+        )
 
         ones_2 = tf.reshape(tf.cast(tf.ones_like(A), tf.float32)[:, 0], [-1, 1])
         p2 = tf.transpose(
             a=tf.matmul(
-                tf.reshape(tf.reduce_sum(input_tensor=tf.square(A), axis=1), shape=[-1, 1]),
+                tf.reshape(
+                    tf.reduce_sum(input_tensor=tf.square(A), axis=1), shape=[-1, 1]
+                ),
                 ones_2,
                 transpose_b=True,
             )
@@ -467,6 +471,7 @@ def random_choice_no_replacement(one_dim_input, num_indices_to_drop=3, sort=Fals
     result = tf.gather(one_dim_input, sorted_indices_to_keep)
     return result
 
+
 def is_argument_available(argument, method):
     """
     Check if an argument (or keyword argument) is available in a method
diff --git a/doc/conf.py b/doc/conf.py
index efb56b77423f80b23671cedadca42b3c3e2ef65d..7959b3ebb4d3be85efe68a80b767051c67e89d89 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -10,24 +10,24 @@ import pkg_resources
 # -- General configuration -----------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
-needs_sphinx = '1.3'
+needs_sphinx = "1.3"
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 extensions = [
-    'sphinx.ext.todo',
-    'sphinx.ext.coverage',
-    'sphinx.ext.ifconfig',
-    'sphinx.ext.autodoc',
-    'sphinx.ext.autosummary',
-    'sphinx.ext.doctest',
-    'sphinx.ext.graphviz',
-    'sphinx.ext.intersphinx',
-    'sphinx.ext.napoleon',
-    'sphinx.ext.viewcode',
-    'sphinx.ext.mathjax',
-    'matplotlib.sphinxext.plot_directive'
-    ]
+    "sphinx.ext.todo",
+    "sphinx.ext.coverage",
+    "sphinx.ext.ifconfig",
+    "sphinx.ext.autodoc",
+    "sphinx.ext.autosummary",
+    "sphinx.ext.doctest",
+    "sphinx.ext.graphviz",
+    "sphinx.ext.intersphinx",
+    "sphinx.ext.napoleon",
+    "sphinx.ext.viewcode",
+    "sphinx.ext.mathjax",
+    "matplotlib.sphinxext.plot_directive",
+]
 
 # Be picky about warnings
 nitpicky = True
@@ -36,13 +36,13 @@ nitpicky = True
 nitpick_ignore = []
 
 # Allows the user to override warnings from a separate file
-if os.path.exists('nitpick-exceptions.txt'):
-    for line in open('nitpick-exceptions.txt'):
+if os.path.exists("nitpick-exceptions.txt"):
+    for line in open("nitpick-exceptions.txt"):
         if line.strip() == "" or line.startswith("#"):
             continue
         dtype, target = line.split(None, 1)
         target = target.strip()
-        try: # python 2.x
+        try:  # python 2.x
             target = unicode(target)
         except NameError:
             pass
@@ -58,25 +58,27 @@ autosummary_generate = True
 numfig = True
 
 # If we are on OSX, the 'dvipng' path maybe different
-dvipng_osx = '/opt/local/libexec/texlive/binaries/dvipng'
-if os.path.exists(dvipng_osx): pngmath_dvipng = dvipng_osx
+dvipng_osx = "/opt/local/libexec/texlive/binaries/dvipng"
+if os.path.exists(dvipng_osx):
+    pngmath_dvipng = dvipng_osx
 
 # Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
 
 # The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
 
 # The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
 
 # The master toctree document.
-master_doc = 'index'
+master_doc = "index"
 
 # General information about the project.
-project = u'bob.learn.tensorflow'
+project = u"bob.learn.tensorflow"
 import time
-copyright = u'%s, Idiap Research Institute' % time.strftime('%Y')
+
+copyright = u"%s, Idiap Research Institute" % time.strftime("%Y")
 
 # Grab the setup entry
 distribution = pkg_resources.require(project)[0]
@@ -92,42 +94,42 @@ release = distribution.version
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
-#language = None
+# language = None
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
-#today = ''
+# today = ''
 # Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
-exclude_patterns = ['links.rst']
+exclude_patterns = ["links.rst"]
 
 # The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
 
 # If true, the current module name will be prepended to all description
 # unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
-#show_authors = False
+# show_authors = False
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = "sphinx"
 
 # A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
 
 # Some variables which are useful for generated material
-project_variable = project.replace('.', '_')
-short_description = u'bob.learn.tensorflow API'
-owner = [u'Idiap Research Institute']
+project_variable = project.replace(".", "_")
+short_description = u"bob.learn.tensorflow API"
+owner = [u"Idiap Research Institute"]
 
 
 # -- Options for HTML output ---------------------------------------------------
@@ -135,80 +137,81 @@ owner = [u'Idiap Research Institute']
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
 import sphinx_rtd_theme
-html_theme = 'sphinx_rtd_theme'
+
+html_theme = "sphinx_rtd_theme"
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # documentation.
-#html_theme_options = {}
+# html_theme_options = {}
 
 # Add any paths that contain custom themes here, relative to this directory.
 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
 
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
-#html_title = None
+# html_title = None
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = project_variable
+# html_short_title = project_variable
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-html_logo = 'img/logo.png'
+html_logo = "img/logo.png"
 
 # The name of an image file (within the static path) to use as favicon of the
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
 # pixels large.
-html_favicon = 'img/favicon.ico'
+html_favicon = "img/favicon.ico"
 
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-#html_static_path = ['_static']
+# html_static_path = ['_static']
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
+# html_sidebars = {}
 
 # Additional templates that should be rendered to pages, maps page names to
 # template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
 
 # If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
 
 # If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
 
 # If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
 
 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
 
 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
 
 # If true, an OpenSearch description file will be output, and all pages will
 # contain a <link> tag referring to it.  The value of this option must be the
 # base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
 
 # This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = project_variable + u'_doc'
+htmlhelp_basename = project_variable + u"_doc"
 
 
 # -- Post configuration --------------------------------------------------------
@@ -218,46 +221,49 @@ rst_epilog = """
 .. |project| replace:: Bob
 .. |version| replace:: %s
 .. |current-year| date:: %%Y
-""" % (version,)
+""" % (
+    version,
+)
 
 # Default processing flags for sphinx
-autoclass_content = 'class'
-autodoc_member_order = 'bysource'
+autoclass_content = "class"
+autodoc_member_order = "bysource"
 autodoc_default_flags = [
-  'members',
-  'undoc-members',
-  'show-inheritance',
-  ]
+    "members",
+    "undoc-members",
+    "show-inheritance",
+]
 
 # For inter-documentation mapping:
 from bob.extension.utils import link_documentation, load_requirements
+
 sphinx_requirements = "extra-intersphinx.txt"
 if os.path.exists(sphinx_requirements):
-  intersphinx_mapping = link_documentation(
-      additional_packages=['python','numpy'] + \
-          load_requirements(sphinx_requirements)
-          )
+    intersphinx_mapping = link_documentation(
+        additional_packages=["python", "numpy"] + load_requirements(sphinx_requirements)
+    )
 else:
-  intersphinx_mapping = link_documentation()
+    intersphinx_mapping = link_documentation()
 
 
 # We want to remove all private (i.e. _. or __.__) members
 # that are not in the list of accepted functions
-accepted_private_functions = ['__array__']
+accepted_private_functions = ["__array__"]
+
 
 def member_function_test(app, what, name, obj, skip, options):
-  # test if we have a private function
-  if len(name) > 1 and name[0] == '_':
-    # test if this private function should be allowed
-    if name not in accepted_private_functions:
-      # omit privat functions that are not in the list of accepted private functions
-      return skip
-    else:
-      # test if the method is documented
-      if not hasattr(obj, '__doc__') or not obj.__doc__:
-        return skip
-  return False
+    # test if we have a private function
+    if len(name) > 1 and name[0] == "_":
+        # test if this private function should be allowed
+        if name not in accepted_private_functions:
+            # omit privat functions that are not in the list of accepted private functions
+            return skip
+        else:
+            # test if the method is documented
+            if not hasattr(obj, "__doc__") or not obj.__doc__:
+                return skip
+    return False
 
-def setup(app):
-  app.connect('autodoc-skip-member', member_function_test)
 
+def setup(app):
+    app.connect("autodoc-skip-member", member_function_test)
diff --git a/setup.py b/setup.py
index 2b1997fbdcb0bf7d8bbe19fa0280a3f20d01f0e1..c53aa82aa7f5f9cfe2aaacb565bedbceb0384597 100644
--- a/setup.py
+++ b/setup.py
@@ -6,33 +6,31 @@
 from setuptools import setup
 
 from setuptools import setup, dist
-dist.Distribution(dict(setup_requires=['bob.extension']))
+
+dist.Distribution(dict(setup_requires=["bob.extension"]))
 
 from bob.extension.utils import load_requirements, find_packages
+
 install_requires = load_requirements()
 
 # The only thing we do in this file is to call the setup() function with all
 # parameters that define our package.
 setup(
-
     # This is the basic information about your project. Modify all this
     # information before releasing code publicly.
-    name='bob.learn.tensorflow',
+    name="bob.learn.tensorflow",
     version=open("version.txt").read().rstrip(),
-    description='Bob bindings for tensorflow',
-    url='',
-    license='BSD',
-    author='Tiago de Freitas Pereira',
-    author_email='tiago.pereira@idiap.ch',
-    keywords='tensorflow',
-
+    description="Bob bindings for tensorflow",
+    url="",
+    license="BSD",
+    author="Tiago de Freitas Pereira",
+    author_email="tiago.pereira@idiap.ch",
+    keywords="tensorflow",
     # If you have a better, long description of your package, place it on the
     # 'doc' directory and then hook it here
-    long_description=open('README.rst').read(),
-
+    long_description=open("README.rst").read(),
     # This line is required for any distutils based packaging.
     include_package_data=True,
-
     # This line defines which packages should be installed when you "install"
     # this package. All packages that are mentioned here, but are not installed
     # on the current system will be installed locally and only visible to the
@@ -42,46 +40,43 @@ setup(
     packages=find_packages(),
     zip_safe=False,
     entry_points={
-
         # main entry for bob tf cli
-        'bob.cli': [
-            'tf = bob.learn.tensorflow.script.tf:tf',
-            'keras = bob.learn.tensorflow.script.keras:keras',
+        "bob.cli": [
+            "tf = bob.learn.tensorflow.script.tf:tf",
+            "keras = bob.learn.tensorflow.script.keras:keras",
         ],
-
         # bob tf scripts
-        'bob.learn.tensorflow.cli': [
-            'cache-dataset = bob.learn.tensorflow.script.cache_dataset:cache_dataset',
-            'compute-statistics = bob.learn.tensorflow.script.compute_statistics:compute_statistics',
-            'dataset-to-hdf5 = bob.learn.tensorflow.script.db_to_tfrecords:dataset_to_hdf5',
-            'datasets-to-tfrecords = bob.learn.tensorflow.script.db_to_tfrecords:datasets_to_tfrecords',
-            'db-to-tfrecords = bob.learn.tensorflow.script.db_to_tfrecords:db_to_tfrecords',
-            'describe-tfrecord = bob.learn.tensorflow.script.db_to_tfrecords:describe_tfrecord',
-            'distance-matrix = bob.learn.tensorflow.script.cgm:distance_matrix',
-            'eval = bob.learn.tensorflow.script.eval:eval',
-            'predict = bob.learn.tensorflow.script.predict_bio:predict',
-            'predict-bio = bob.learn.tensorflow.script.predict_bio:predict_bio',
-            'style-transfer = bob.learn.tensorflow.script.style_transfer:style_transfer',
-            'train = bob.learn.tensorflow.script.train:train',
-            'train-and-evaluate = bob.learn.tensorflow.script.train_and_evaluate:train_and_evaluate',
-            'trim = bob.learn.tensorflow.script.trim:trim',
+        "bob.learn.tensorflow.cli": [
+            "cache-dataset = bob.learn.tensorflow.script.cache_dataset:cache_dataset",
+            "compute-statistics = bob.learn.tensorflow.script.compute_statistics:compute_statistics",
+            "dataset-to-hdf5 = bob.learn.tensorflow.script.db_to_tfrecords:dataset_to_hdf5",
+            "datasets-to-tfrecords = bob.learn.tensorflow.script.db_to_tfrecords:datasets_to_tfrecords",
+            "db-to-tfrecords = bob.learn.tensorflow.script.db_to_tfrecords:db_to_tfrecords",
+            "describe-tfrecord = bob.learn.tensorflow.script.db_to_tfrecords:describe_tfrecord",
+            "distance-matrix = bob.learn.tensorflow.script.cgm:distance_matrix",
+            "eval = bob.learn.tensorflow.script.eval:eval",
+            "predict = bob.learn.tensorflow.script.predict_bio:predict",
+            "predict-bio = bob.learn.tensorflow.script.predict_bio:predict_bio",
+            "style-transfer = bob.learn.tensorflow.script.style_transfer:style_transfer",
+            "train = bob.learn.tensorflow.script.train:train",
+            "train-and-evaluate = bob.learn.tensorflow.script.train_and_evaluate:train_and_evaluate",
+            "trim = bob.learn.tensorflow.script.trim:trim",
         ],
         # bob keras scripts
-        'bob.learn.tensorflow.keras_cli': [
-            'fit = bob.learn.tensorflow.script.fit:fit',
+        "bob.learn.tensorflow.keras_cli": [
+            "fit = bob.learn.tensorflow.script.fit:fit",
         ],
     },
-
     # Classifiers are important if you plan to distribute this package through
     # PyPI. You can find the complete list of classifiers that are valid and
     # useful here (http://pypi.python.org/pypi?%3Aaction=list_classifiers).
     classifiers=[
-        'Framework :: Bob',
-        'Development Status :: 3 - Alpha',
-        'Intended Audience :: Developers',
-        'License :: OSI Approved :: BSD License',
-        'Natural Language :: English',
-        'Programming Language :: Python',
-        'Topic :: Scientific/Engineering :: Artificial Intelligence',
+        "Framework :: Bob",
+        "Development Status :: 3 - Alpha",
+        "Intended Audience :: Developers",
+        "License :: OSI Approved :: BSD License",
+        "Natural Language :: English",
+        "Programming Language :: Python",
+        "Topic :: Scientific/Engineering :: Artificial Intelligence",
     ],
 )