diff --git a/bob/learn/tensorflow/losses/__init__.py b/bob/learn/tensorflow/losses/__init__.py
index 2bfcbd152f92206f5d818bc535833673cefa564f..65cdcab2e9e6b5ba4090b269eb5fc68e5544797c 100644
--- a/bob/learn/tensorflow/losses/__init__.py
+++ b/bob/learn/tensorflow/losses/__init__.py
@@ -18,6 +18,6 @@ def __appropriate__(*args):
 
 __appropriate__(
     CenterLoss,
-    CenterLossLayer,
+    CenterLossLayer
 )
 __all__ = [_ for _ in dir() if not _.startswith("_")]
diff --git a/bob/learn/tensorflow/metrics/__init__.py b/bob/learn/tensorflow/metrics/__init__.py
index 55cec2bd10e2e0778b2ea858ab32b5e14ebe1b27..72ee7b5fce68569d2359176ed78f076030030d47 100644
--- a/bob/learn/tensorflow/metrics/__init__.py
+++ b/bob/learn/tensorflow/metrics/__init__.py
@@ -1,4 +1,4 @@
-from .embedding_accuracy import EmbeddingAccuracy
+from .embedding_accuracy import EmbeddingAccuracy, predict_using_tensors
 
 # gets sphinx autodoc done right - don't remove it
 def __appropriate__(*args):
diff --git a/bob/learn/tensorflow/models/__init__.py b/bob/learn/tensorflow/models/__init__.py
index d18ceb93bc0a1be07c5d9ffc52688f8695e83717..48804ec2338b14d47fe6e4e5c332b561d56daed9 100644
--- a/bob/learn/tensorflow/models/__init__.py
+++ b/bob/learn/tensorflow/models/__init__.py
@@ -1,5 +1,6 @@
 from .alexnet import AlexNet_simplified
 from .densenet import DenseNet
+from .mine import MineModel
 
 # gets sphinx autodoc done right - don't remove it
 def __appropriate__(*args):
@@ -20,5 +21,6 @@ def __appropriate__(*args):
 __appropriate__(
     AlexNet_simplified,
     DenseNet,
+    MineModel
 )
 __all__ = [_ for _ in dir() if not _.startswith("_")]
diff --git a/bob/learn/tensorflow/models/mine.py b/bob/learn/tensorflow/models/mine.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f766236f0cf33f6b1d73ffd14a2eafec636b1ad
--- /dev/null
+++ b/bob/learn/tensorflow/models/mine.py
@@ -0,0 +1,68 @@
+"""
+Implements the MINE loss from the paper:
+
+Mutual Information Neural Estimation (https://arxiv.org/pdf/1801.04062.pdf)
+
+"""
+
+import tensorflow as tf
+
+class MineModel(tf.keras.Model):
+    """
+
+    Parameters
+    **********
+
+      is_mine_f: bool
+         If true, will implement MINE-F (equation 6), otherwise will implement equation 5
+    """
+
+    def __init__(self, is_mine_f=False, name="MINE", units=10, **kwargs):
+        super().__init__(name=name, **kwargs)
+        self.units = units
+        self.is_mine_f = is_mine_f
+        
+        self.transformer_x = tf.keras.layers.Dense(self.units)
+        self.transformer_z = tf.keras.layers.Dense(self.units)
+        self.transformer_xz = tf.keras.layers.Dense(self.units)
+        self.transformer_output = tf.keras.layers.Dense(1)
+
+    def call(self, inputs):
+        def compute(x, z):
+            h1_x = self.transformer_x(x)
+            h1_z = self.transformer_z(z)
+            h1 = tf.keras.layers.ReLU()(h1_x + h1_z)
+            h2 = self.transformer_output(tf.keras.layers.ReLU()(self.transformer_xz(h1)))
+
+            return h2
+
+        def compute_lower_bound(x, z):
+            t_xz = compute(x,z)
+            z_shuffle = tf.random.shuffle(z)
+            t_x_z = compute(x, z_shuffle)
+
+            if self.is_mine_f:
+                lb = -(
+                    tf.reduce_mean(t_xz, axis=0)
+                    - tf.reduce_mean(tf.math.exp(t_x_z-1))
+                )
+            else:
+                lb = -(
+                    tf.reduce_mean(t_xz, axis=0)
+                    - tf.math.log(tf.reduce_mean(tf.math.exp(t_x_z)))
+                )
+
+            self.add_loss(lb)
+            return -lb
+
+        x = inputs[0]
+        z = inputs[1]
+
+        return compute_lower_bound(x, z)
+
+
+    def get_config(self):
+        config = super().get_config()
+        config.update({"units": self.units})
+        return config
+
diff --git a/bob/learn/tensorflow/tests/test_mine.py b/bob/learn/tensorflow/tests/test_mine.py
new file mode 100644
index 0000000000000000000000000000000000000000..89cf943b858bd87e7c61707a9616c7894254c1fd
--- /dev/null
+++ b/bob/learn/tensorflow/tests/test_mine.py
@@ -0,0 +1,33 @@
+import numpy as np
+import tensorflow as tf
+from bob.learn.tensorflow.models import MineModel
+
+def run_mine(is_mine_f):
+    np.random.seed(10)
+    N = 20000
+    d = 1
+    EPOCHS = 100
+
+    X = np.sign(np.random.normal(0.,1.,[N, d]))
+    Z = X + np.random.normal(0.,np.sqrt(0.2),[N, d])
+
+
+    from sklearn.feature_selection import mutual_info_regression
+    mi_numerical = mutual_info_regression(X.reshape(-1, 1), Z.ravel())[0]
+
+    model = MineModel(is_mine_f=is_mine_f)
+    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01))
+
+    callback = model.fit(
+        x=[X, Z], epochs=EPOCHS, verbose=1, batch_size=100
+    )
+    mine = -np.array(callback.history["loss"])[-1]
+
+    np.allclose(mine,mi_numerical, atol=0.01)
+
+
+def test_mine():
+    run_mine(False)
+
+def test_mine_f():
+    run_mine(True)
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 317ae0d9cd6baaf8623f7b675bdbca41f095d0f3..a6cac7731ef9ce9572f4a03a4d324dc9140f601e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,16 +2,5 @@ setuptools
 bob.extension
 bob.io.base
 bob.io.image
-bob.learn.activation
-bob.learn.em
-bob.learn.linear
 bob.ip.base
-bob.math
-bob.measure
-bob.sp
-bob.db.mnist
-bob.db.atnt
-gridtk
-numpy
-scipy
-click >= 7
+bob.measure
\ No newline at end of file