diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index b4d1d1ca583eb211cc4b0a5ab9b2243c715bb45e..6148eb3495a0c73554b2cfde6373cd31b71f219b 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -2,20 +2,20 @@
 # See https://pre-commit.com/hooks.html for more hooks
 repos:
   - repo: https://github.com/timothycrosley/isort
-    rev: 5.10.1
+    rev: 5.12.0
     hooks:
       - id: isort
         args: [--settings-path, "pyproject.toml"]
   - repo: https://github.com/psf/black
-    rev: 22.3.0
+    rev: 22.12.0
     hooks:
       - id: black
   - repo: https://github.com/pycqa/flake8
-    rev: 3.9.2
+    rev: 6.0.0
     hooks:
       - id: flake8
   - repo: https://github.com/pre-commit/pre-commit-hooks
-    rev: v4.2.0
+    rev: v4.4.0
     hooks:
       - id: check-ast
       - id: check-case-conflict
diff --git a/src/bob/learn/em/factor_analysis.py b/src/bob/learn/em/factor_analysis.py
index 37c3af6d9acda80cc7429982c4900e6b191184e7..d31ba5304f34511d768392f2eb6c4088d9fd9118 100644
--- a/src/bob/learn/em/factor_analysis.py
+++ b/src/bob/learn/em/factor_analysis.py
@@ -15,7 +15,7 @@ from sklearn.base import BaseEstimator
 from sklearn.utils import check_consistent_length
 from sklearn.utils.multiclass import unique_labels
 
-from .gmm import GMMMachine
+from .gmm import GMMMachine, GMMStats
 from .linear_scoring import linear_scoring
 from .utils import check_and_persist_dask_input
 
@@ -148,7 +148,6 @@ class FactorAnalysisBase(BaseEstimator):
     @property
     def feature_dimension(self):
         """Get the UBM Dimension"""
-
         # TODO: Add this on the GMMMachine class
         return self.ubm.means.shape[1]
 
@@ -159,16 +158,12 @@ class FactorAnalysisBase(BaseEstimator):
 
     @property
     def mean_supervector(self):
-        """
-        Returns the mean supervector
-        """
+        """Returns the mean supervector"""
         return self.ubm.means.flatten()
 
     @property
     def variance_supervector(self):
-        """
-        Returns the variance supervector
-        """
+        """Returns the variance supervector"""
         return self.ubm.variances.flatten()
 
     @property
@@ -199,10 +194,7 @@ class FactorAnalysisBase(BaseEstimator):
         self._V = np.array(value)
 
     def estimate_number_of_classes(self, y):
-        """
-        Estimates the number of classes given the labels
-        """
-
+        """Estimates the number of classes given the labels"""
         return len(unique_labels(y))
 
     def initialize_using_array(self, X):
@@ -279,6 +271,7 @@ class FactorAnalysisBase(BaseEstimator):
             D: (n_gaussians*feature_dimension) represents the client offset vector
 
         """
+
         if self.random_state is not None:
             np.random.seed(self.random_state)
 
@@ -319,6 +312,7 @@ class FactorAnalysisBase(BaseEstimator):
                 (n_classes, n_gaussians) representing the accumulated 0th order statistics
 
         """
+
         # 0th order stats
         n_acc = np.zeros((n_classes, self.ubm.n_gaussians), like=X[0].n)
 
@@ -380,12 +374,11 @@ class FactorAnalysisBase(BaseEstimator):
             i: int
                 Class id to return the statistics for
         """
+
         indices = np.where(np.array(y) == i)[0]
         return [X[i] for i in indices]
 
-    """
-    Estimating U and x
-    """
+    #  Estimating U and x  #
 
     def _compute_id_plus_u_prod_ih(self, x_i, UProd):
         """
@@ -430,6 +423,7 @@ class FactorAnalysisBase(BaseEstimator):
                 E[y_i] for class `i`
 
         """
+
         f_i = x_i.sum_px
         n_i = x_i.n
         n_ic = np.repeat(n_i, self.feature_dimension)
@@ -447,9 +441,8 @@ class FactorAnalysisBase(BaseEstimator):
                 self.mean_supervector + self._D * latent_z_i
             )
 
-        """
-        # JFA Part (eq 29)
-        """
+        #  JFA Part (eq 29)  #
+
         V_dot_v = V @ latent_y_i if latent_y_i is not None else 0
         fn_x_ih -= n_ic * V_dot_v if latent_y_i is not None else 0
 
@@ -545,6 +538,7 @@ class FactorAnalysisBase(BaseEstimator):
                 Accumulated statistics for U_A2(n_gaussians* feature_dimension, r_U)
 
         """
+
         # Inverting A1 over the zero axis
         # https://stackoverflow.com/questions/11972102/is-there-a-way-to-efficiently-invert-an-array-of-matrices-with-numpy
         inv_A1 = np.linalg.inv(acc_U_A1)
@@ -568,6 +562,7 @@ class FactorAnalysisBase(BaseEstimator):
 
         https://gitlab.idiap.ch/bob/bob.learn.em/-/blob/da92d0e5799d018f311f1bf5cdd5a80e19e142ca/bob/learn/em/cpp/FABaseTrainer.cpp#L325
         """
+
         # UProd = (self.ubm.n_gaussians, self.r_U, self.r_U)
 
         Uc = self._U.reshape(
@@ -662,9 +657,7 @@ class FactorAnalysisBase(BaseEstimator):
 
         return acc_U_A1, acc_U_A2
 
-    """
-    Estimating D and z
-    """
+    #  Estimating D and z  #
 
     def update_z(self, X, y, latent_x, latent_y, latent_z, n_acc, f_acc):
         """
@@ -771,8 +764,8 @@ class FactorAnalysisBase(BaseEstimator):
         fn_z_i = np.array(fn_z_i, like=X_i[0].n)
 
         # Looping over the sessions
-        for session_id in range(len(X_i)):
-            n_i = X_i[session_id].n
+        for session_id, x_i_s in enumerate(X_i):
+            n_i = x_i_s.n
             tmp_CD = np.repeat(n_i, self.feature_dimension)
             x_i_h = latent_x_i[:, session_id]
 
@@ -878,6 +871,7 @@ class FactorAnalysisBase(BaseEstimator):
         latent_x = (n_classes, r_U, n_sessions)
 
         """
+
         kw = dict(like=like) if isinstance(like, dask.array.core.Array) else {}
 
         # x (Eq. 36)
@@ -897,9 +891,7 @@ class FactorAnalysisBase(BaseEstimator):
 
         return latent_x, latent_y, latent_z
 
-    """
-     Estimating V and y
-    """
+    #  Estimating V and y  #
 
     def update_y(
         self,
@@ -948,6 +940,7 @@ class FactorAnalysisBase(BaseEstimator):
             Accumulated 1st order statistics for each class (math:`F_{i}`)
 
         """
+
         # V.T / sigma
         VTinvSigma = self._V.T / self.variance_supervector
 
@@ -1019,7 +1012,7 @@ class FactorAnalysisBase(BaseEstimator):
 
         I = np.eye(self.r_V, self.r_V)  # noqa: E741
 
-        # TODO: make the invertion matrix function as a parameter
+        # TODO: make the inversion matrix function as a parameter
         return np.linalg.inv(I + (VProd * n_acc_i[:, None, None]).sum(axis=0))
 
     def _compute_vprod(self):
@@ -1176,17 +1169,15 @@ class FactorAnalysisBase(BaseEstimator):
         )  # Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i})
 
         # Looping over the sessions of a ;ane;
-        for session_id in range(len(X_i)):
-            n_i = X_i[session_id].n
+        for session_id, x_i_s in enumerate(X_i):
+            n_i = x_i_s.n
             U_dot_x = U @ latent_x_i[:, session_id]
             tmp_CD = np.repeat(n_i, self.feature_dimension)
             fn_y_i -= tmp_CD * U_dot_x
 
         return fn_y_i
 
-    """
-     Scoring
-    """
+    #  Scoring  #
 
     def estimate_x(self, X):
 
@@ -1212,6 +1203,7 @@ class FactorAnalysisBase(BaseEstimator):
             X_i: list of :py:class:`bob.learn.em.GMMStats`
                 List of statistics for a class
         """
+
         I = np.eye(self.r_U, self.r_U)  # noqa: E741
 
         Uc = self._U.reshape(
@@ -1222,7 +1214,9 @@ class FactorAnalysisBase(BaseEstimator):
 
         sigma_c = self.ubm.variances[:, np.newaxis]
 
-        n_i_c = np.expand_dims(X_i.n[:, np.newaxis], axis=2)
+        n_sum = sum(x_i_s.n for x_i_s in X_i)
+
+        n_i_c = np.expand_dims(n_sum[:, np.newaxis], axis=2)
 
         id_plus_us_prod_inv = I + (((UcT / sigma_c) @ Uc) * n_i_c).sum(axis=0)
         id_plus_us_prod_inv = np.linalg.inv(id_plus_us_prod_inv)
@@ -1241,8 +1235,11 @@ class FactorAnalysisBase(BaseEstimator):
 
         """
 
-        n = X_i.n[:, np.newaxis]
-        f = X_i.sum_px
+        n_sum = sum(x_i_s.n for x_i_s in X_i)
+        sum_px_sum = sum(x_i_s.sum_px for x_i_s in X_i)
+
+        n = n_sum[:, np.newaxis]
+        f = sum_px_sum
 
         fn_x = f - self.ubm.means * n
 
@@ -1267,7 +1264,7 @@ class FactorAnalysisBase(BaseEstimator):
 
         """
 
-        return self.score(model, self.ubm.acc_stats(data))
+        return self.score(model, [self.ubm.acc_stats(d) for d in data])
 
     def fit_using_array(self, X, y):
         """Fits the model using a numpy array or a dask array as input
@@ -1319,10 +1316,11 @@ class FactorAnalysisBase(BaseEstimator):
             z
 
         """
+
         return self.enroll([self.ubm.acc_stats(X)])
 
     def _prepare_dask_input(self, X, y):
-        """Perpare the input for the fit method"""
+        """Prepare the input for the fit method"""
         logger.info(
             "Rechunking bag of stats to delayed list of stats per class. If your worker runs "
             "out of memory in this training step, you have to use workers with more memory."
@@ -1406,9 +1404,7 @@ class ISVMachine(FactorAnalysisBase):
         )
 
     def e_step(self, X, y, n_samples_per_class, n_acc, f_acc):
-        """
-        E-step of the EM algorithm
-        """
+        """E-step of the EM algorithm."""
         # self.initialize_XYZ(y)
         UProd = self._compute_uprod()
         _, _, latent_z = self.initialize_XYZ(
@@ -1452,6 +1448,7 @@ class ISVMachine(FactorAnalysisBase):
                 Accumulated statistics for U_A2(n_gaussians* feature_dimension, r_U)
 
         """
+
         acc_U_A1 = [acc[0] for acc in acc_U_A1_acc_U_A2_list]
         acc_U_A2 = [acc[1] for acc in acc_U_A1_acc_U_A2_list]
 
@@ -1476,6 +1473,7 @@ class ISVMachine(FactorAnalysisBase):
             Returns self.
 
         """
+
         if isinstance(X, dask.bag.Bag):
             X, y = self._prepare_dask_input(X, y)
 
@@ -1535,6 +1533,7 @@ class ISVMachine(FactorAnalysisBase):
             z
 
         """
+
         iterations = self.enroll_iterations
         # We have only one class for enrollment
         y = list(np.zeros(len(X), dtype=np.int32))
@@ -1584,6 +1583,7 @@ class ISVMachine(FactorAnalysisBase):
             z
 
         """
+
         return self.enroll([self.ubm.acc_stats(X)])
 
     def score(self, latent_z, data):
@@ -1596,7 +1596,7 @@ class ISVMachine(FactorAnalysisBase):
             Latent representation of the client (E[z_i])
 
         data : list of :py:class:`bob.learn.em.GMMStats`
-            List of statistics to be scored
+            List of statistics of one probe template to be scored
 
         Returns
         -------
@@ -1604,6 +1604,7 @@ class ISVMachine(FactorAnalysisBase):
             The linear scored
 
         """
+
         x = self.estimate_x(data)
         Ux = self._U @ x
 
@@ -1612,10 +1613,23 @@ class ISVMachine(FactorAnalysisBase):
         # m + Dz
         z = self.D * latent_z + self.mean_supervector
 
+        # Support a probe template and a list of probe templates.
+        if isinstance(data[0], GMMStats) and len(data) > 1:
+            data_sum = sum(data[1:], start=data[0])
+        elif not isinstance(data[0], GMMStats):
+            data_sum = []
+            for d in data:
+                if len(d) > 1:
+                    data_sum.append(sum(d[1:], start=d[0]))
+                else:
+                    data_sum.append(d[0])
+        else:
+            data_sum = data
+
         return linear_scoring(
             z.reshape((self.ubm.n_gaussians, self.feature_dimension)),
             self.ubm,
-            data,
+            data_sum,
             Ux.reshape((self.ubm.n_gaussians, self.feature_dimension)),
             frame_length_normalization=True,
         )[0]
@@ -1759,6 +1773,7 @@ class JFAMachine(FactorAnalysisBase):
                 Accumulated statistics for V_A2(n_gaussians* feature_dimension, r_V)
 
         """
+
         acc_V_A1 = [acc[0] for acc in acc_V_A1_acc_V_A2_list]
         acc_V_A2 = [acc[1] for acc in acc_V_A1_acc_V_A2_list]
 
@@ -1807,6 +1822,7 @@ class JFAMachine(FactorAnalysisBase):
                 E[y]
 
         """
+
         VProd = self._compute_vprod()
 
         latent_x, latent_y, latent_z = self.initialize_XYZ(
@@ -1856,6 +1872,7 @@ class JFAMachine(FactorAnalysisBase):
                 Accumulated statistics for U_A2(n_gaussians* feature_dimension, r_U)
 
         """
+
         # self.initialize_XYZ(y)
         UProd = self._compute_uprod()
         latent_x, _, latent_z = self.initialize_XYZ(
@@ -1897,6 +1914,7 @@ class JFAMachine(FactorAnalysisBase):
                 Accumulated statistics for V_A2(n_gaussians* feature_dimension, r_V)
 
         """
+
         acc_U_A1 = [acc[0] for acc in acc_U_A1_acc_U_A2_list]
         acc_U_A2 = [acc[1] for acc in acc_U_A1_acc_U_A2_list]
 
@@ -1995,6 +2013,7 @@ class JFAMachine(FactorAnalysisBase):
                 Accumulated statistics for D_A2(n_gaussians* feature_dimension, )
 
         """
+
         _, _, latent_z = self.initialize_XYZ(
             n_samples_per_class=n_samples_per_class
         )
@@ -2030,6 +2049,7 @@ class JFAMachine(FactorAnalysisBase):
                 Accumulated statistics for D_A2(n_gaussians* feature_dimension, )
 
         """
+
         acc_D_A1 = [acc[0] for acc in acc_D_A1_acc_D_A2_list]
         acc_D_A2 = [acc[1] for acc in acc_D_A1_acc_D_A2_list]
 
@@ -2055,6 +2075,7 @@ class JFAMachine(FactorAnalysisBase):
             z, y latent variables
 
         """
+
         iterations = self.enroll_iterations
         # We have only one class for enrollment
         y = list(np.zeros(len(X), dtype=np.int32))
@@ -2118,6 +2139,7 @@ class JFAMachine(FactorAnalysisBase):
             Returns self.
 
         """
+
         if isinstance(X, dask.bag.Bag):
             X, y = self._prepare_dask_input(X, y)
 
@@ -2240,6 +2262,7 @@ class JFAMachine(FactorAnalysisBase):
             The linear scored
 
         """
+
         latent_y = model[0]
         latent_z = model[1]
 
@@ -2251,10 +2274,23 @@ class JFAMachine(FactorAnalysisBase):
         # m + Vy + Dz
         zy = self.V @ latent_y + self.D * latent_z + self.mean_supervector
 
+        # Support a probe template and a list of probe templates.
+        if isinstance(data[0], GMMStats) and len(data) > 1:
+            data_sum = sum(data[1:], start=data[0])
+        elif not isinstance(data[0], GMMStats):
+            data_sum = []
+            for d in data:
+                if len(d) > 1:
+                    data_sum.append(sum(d[1:], start=d[0]))
+                else:
+                    data_sum.append(d[0])
+        else:
+            data_sum = data
+
         return linear_scoring(
             zy.reshape((self.ubm.n_gaussians, self.feature_dimension)),
             self.ubm,
-            data,
+            data_sum,
             Ux.reshape((self.ubm.n_gaussians, self.feature_dimension)),
             frame_length_normalization=True,
         )[0]
diff --git a/src/bob/learn/em/ivector.py b/src/bob/learn/em/ivector.py
index 927e51e25076c6052f2922fd5150127eb727b234..3b0d5b3b0038687e4757f11bf549d0bc1c145513 100644
--- a/src/bob/learn/em/ivector.py
+++ b/src/bob/learn/em/ivector.py
@@ -287,8 +287,13 @@ class IVectorMachine(BaseEstimator):
         )
         self.sigma = copy.deepcopy(self.ubm.variances)
 
+        logger.info("Training I-Vector...")
         for step in range(self.max_iterations):
+            logger.info(
+                f"IVector step {step+1:{len(str(self.max_iterations))}d}/{self.max_iterations}."
+            )
             if chunky:
+                # Compute the IVectorStats of each chunk
                 stats = [
                     dask.delayed(e_step)(
                         machine=self,
@@ -300,27 +305,27 @@ class IVectorMachine(BaseEstimator):
                 # Workaround to prevent memory issues at compute with too many chunks.
                 # This adds pairs of stats together instead of sending all the stats to
                 # one worker.
-                while (l := len(stats)) > 1:
+                while (length := len(stats)) > 1:
                     last = stats[-1]
                     stats = [
-                        dask.delayed(operator.add)(stats[i], stats[l // 2 + i])
-                        for i in range(l // 2)
+                        dask.delayed(operator.add)(
+                            stats[i], stats[length // 2 + i]
+                        )
+                        for i in range(length // 2)
                     ]
-                    if l % 2 != 0:
+                    if length % 2 != 0:
                         stats.append(last)
-
                 stats_sum = stats[0]
+
+                # Update the machine parameters with the aggregated stats
                 new_machine = dask.compute(
                     dask.delayed(m_step)(self, stats_sum)
                 )[0]
                 for attr in ["T", "sigma"]:
                     setattr(self, attr, getattr(new_machine, attr))
-            else:
+            else:  # Working directly on numpy array, not dask.Bags
                 stats = e_step(machine=self, data=X)
                 _ = m_step(self, stats)
-            logger.info(
-                f"IVector step {step+1:{len(str(self.max_iterations))}d}/{self.max_iterations}."
-            )
         logger.info(f"Reached {step+1} steps.")
         return self
 
diff --git a/tests/test_factor_analysis.py b/tests/test_factor_analysis.py
index 8c9f8a343a1d1e4c71cc2a46b36fbeabc2fe280e..25d09626a4df2635a22e06d0b5c505bd4d93a04c 100644
--- a/tests/test_factor_analysis.py
+++ b/tests/test_factor_analysis.py
@@ -114,7 +114,7 @@ M_y = [y1, y2]
 M_x = [x1, x2]
 
 
-def test_JFATrainAndEnrol():
+def test_JFA_train_and_enroll():
     # Train and enroll a JFAMachine
 
     # Calls the train function
@@ -213,7 +213,7 @@ def test_JFATrainAndEnrol():
     np.testing.assert_allclose(latent_z, z_ref, rtol=eps, atol=1e-8)
 
 
-def test_ISVTrainAndEnrol():
+def test_ISV_train_and_enroll():
     # Train and enroll an 'ISVMachine'
 
     eps = 1e-10
@@ -253,9 +253,8 @@ def test_ISVTrainAndEnrol():
         "float64",
     )
 
-    """
-    Calls the train function
-    """
+    # Calls the train function
+
     ubm = GMMMachine(2, 3)
     ubm.means = UBM_MEAN.reshape((2, 3))
     ubm.variances = UBM_VAR.reshape((2, 3))
@@ -274,9 +273,7 @@ def test_ISVTrainAndEnrol():
     np.testing.assert_allclose(it.D, d_ref, rtol=eps, atol=1e-8)
     np.testing.assert_allclose(it.U, u_ref, rtol=eps, atol=1e-8)
 
-    """
-    Calls the enroll function
-    """
+    # Calls the enroll function
 
     Ne = np.array([0.1579, 0.9245, 0.1323, 0.2458]).reshape((2, 2))
     Fe = np.array(
@@ -307,7 +304,7 @@ def test_ISVTrainAndEnrol():
     np.testing.assert_allclose(latent_z, z_ref, rtol=eps, atol=1e-8)
 
 
-def test_JFATrainInitialize():
+def test_JFA_train_initialize():
     # Check that the initialization is consistent and using the rng (cf. issue #118)
 
     eps = 1e-10
@@ -338,7 +335,7 @@ def test_JFATrainInitialize():
     np.testing.assert_allclose(d1, d2, rtol=eps, atol=1e-8)
 
 
-def test_ISVTrainInitialize():
+def test_ISV_train_initialize():
 
     # Check that the initialization is consistent and using the rng (cf. issue #118)
     eps = 1e-10
@@ -398,14 +395,14 @@ def test_JFAMachine():
     model = [y, z]
 
     score_ref = -2.111577181208289
-    score = m.score(model, gs)
+    score = m.score(model, [gs])
     np.testing.assert_allclose(score, score_ref, atol=eps)
 
     # Scoring with numpy array
     np.random.seed(0)
     X = np.random.normal(loc=0.0, scale=1.0, size=(50, 3))
     score_ref = 2.028009315286946
-    score = m.score_using_array(model, X)
+    score = m.score_using_array(model, [X])
     np.testing.assert_allclose(score, score_ref, atol=eps)
 
 
@@ -437,7 +434,7 @@ def test_ISVMachine():
 
     # Enrolled model
     latent_z = np.array([3, 4, 1, 2, 0, 1], "float64")
-    score = isv_machine.score(latent_z, gs)
+    score = isv_machine.score(latent_z, [gs])
     score_ref = -3.280498193082100
     np.testing.assert_allclose(score, score_ref, atol=eps)
 
@@ -445,7 +442,7 @@ def test_ISVMachine():
     np.random.seed(0)
     X = np.random.normal(loc=0.0, scale=1.0, size=(50, 3))
     score_ref = -1.2343813195374242
-    score = isv_machine.score_using_array(latent_z, X)
+    score = isv_machine.score_using_array(latent_z, [X])
     np.testing.assert_allclose(score, score_ref, atol=eps)
 
 
@@ -547,7 +544,7 @@ def test_ISV_JFA_fit():
             if prior is None:
                 ubm = None
                 # we still provide an initial UBM because KMeans training is not
-                # determenistic depending on inputting numpy or dask arrays
+                # deterministic depending on inputting numpy or dask arrays
                 ubm_kwargs = dict(n_gaussians=2, ubm=_create_ubm_prior(means))
             else:
                 ubm = _create_ubm_prior(means)