From 666eb5dbde039715691758308e2a2978917ba755 Mon Sep 17 00:00:00 2001
From: Tiago Freitas Pereira <tiagofrepereira@gmail.com>
Date: Thu, 31 Mar 2022 15:54:55 +0200
Subject: [PATCH] [precommit] Fixes

---
 bob/learn/em/factor_analysis.py       |  1 -
 bob/learn/em/test/test_jfa_trainer.py | 13 ++-----------
 doc/guide.rst                         | 10 +++++-----
 doc/plot/plot_MAP.py                  |  3 ++-
 4 files changed, 9 insertions(+), 18 deletions(-)

diff --git a/bob/learn/em/factor_analysis.py b/bob/learn/em/factor_analysis.py
index 275a8af..fc11ded 100644
--- a/bob/learn/em/factor_analysis.py
+++ b/bob/learn/em/factor_analysis.py
@@ -2,7 +2,6 @@
 # @author: Tiago de Freitas Pereira
 
 
-from ast import Return
 import logging
 
 import numpy as np
diff --git a/bob/learn/em/test/test_jfa_trainer.py b/bob/learn/em/test/test_jfa_trainer.py
index 5d41c7c..f2b08d1 100644
--- a/bob/learn/em/test/test_jfa_trainer.py
+++ b/bob/learn/em/test/test_jfa_trainer.py
@@ -378,6 +378,8 @@ def test_ISVTrainAndEnrol():
     gse2.sum_px = Fe[:, 1].reshape(2, 3)
 
     gse = [gse1, gse2]
+    latent_z = it.enroll(gse, 5)
+    assert np.allclose(latent_z, z_ref, eps)
 
 
 def test_ISVTrainAndEnrolWithNumpy():
@@ -406,17 +408,6 @@ def test_ISVTrainAndEnrolWithNumpy():
         ],
         "float64",
     )
-    z_ref = np.array(
-        [
-            -0.079315777443826,
-            0.092702428248543,
-            -0.342488761656616,
-            -0.059922635809136,
-            0.133539981073604,
-            0.213118695516570,
-        ],
-        "float64",
-    )
 
     """
     Calls the train function
diff --git a/doc/guide.rst b/doc/guide.rst
index 51e308d..a94857f 100644
--- a/doc/guide.rst
+++ b/doc/guide.rst
@@ -203,7 +203,7 @@ Follow bellow an snippet on how to train a GMM using the MAP estimator.
    ...      [  0.5, -0.5,   0.2 ]])
    >>> prior_gmm.weights = numpy.array([ 0.8,   0.5])
    >>> # Creating the model for the adapted GMM, and setting the `prior_gmm` as the source GMM
-   >>> # note that we have set `trainer="map"`, so we use the Maximum a posteriori estimator 
+   >>> # note that we have set `trainer="map"`, so we use the Maximum a posteriori estimator
    >>> adapted_gmm = bob.learn.em.GMMMachine(2, ubm=prior_gmm, trainer="map")
    >>> # Training
    >>> adapted_gmm = adapted_gmm.fit(data)
@@ -271,9 +271,9 @@ prior GMM.
     ...      [1.2, 1.4, 1],
     ...      [0.8, 1., 1]], dtype='float64')
     >>> # Training a GMM with 2 Gaussians of dimension 3
-    >>> prior_gmm = bob.learn.em.GMMMachine(2).fit(data)    
+    >>> prior_gmm = bob.learn.em.GMMMachine(2).fit(data)
     >>> # Creating the container
-    >>> gmm_stats = prior_gmm.acc_statistics(data)    
+    >>> gmm_stats = prior_gmm.acc_statistics(data)
     >>> # Printing the responsibilities
     >>> print(gmm_stats.n/gmm_stats.t)
      [0.6  0.4]
@@ -352,7 +352,7 @@ The snippet bellow shows how to:
    >>> model = isv_machine.enroll_with_array(enroll_data)
    >>> print(model)
      [[ 0.54   0.246  0.505  1.617 -0.791  0.746]]
-   
+
    >>> # Probing
    >>> probe_data = np.array([[1.2, 0.1, 1.4], [0.5, 0.2, 0.3]])
    >>> score = isv_machine.score_with_array(model, probe_data)
@@ -414,7 +414,7 @@ such session variability model.
 
    >>> # Finally doing the JFA training with U and V subspaces with dimension of 2
    >>> jfa_machine = bob.learn.em.JFAMachine(ubm, r_U=2, r_V=2).fit(gmm_stats, y)
-   >>> print(jfa_machine.U)      
+   >>> print(jfa_machine.U)
      [[-0.069 -0.029]
      [ 0.079  0.039]
      [ 0.123  0.042]
diff --git a/doc/plot/plot_MAP.py b/doc/plot/plot_MAP.py
index f668c8c..24e12ca 100644
--- a/doc/plot/plot_MAP.py
+++ b/doc/plot/plot_MAP.py
@@ -1,8 +1,9 @@
 import matplotlib.pyplot as plt
+import numpy as np
+
 from sklearn.datasets import load_iris
 
 import bob.learn.em
-import numpy as np
 
 np.random.seed(10)
 
-- 
GitLab