diff --git a/bob/learn/em/mixture/__init__.py b/bob/learn/em/mixture/__init__.py
index 476e3a1362912d9fdff71d2f0c2a5cb1dadbb523..229f13e224bae240869b1965f580bccf591fc073 100644
--- a/bob/learn/em/mixture/__init__.py
+++ b/bob/learn/em/mixture/__init__.py
@@ -1,4 +1,3 @@
 from .gmm import GMMMachine
-from .gmm import Gaussians
 from .gmm import GMMStats
 from .linear_scoring import linear_scoring
diff --git a/bob/learn/em/mixture/gmm.py b/bob/learn/em/mixture/gmm.py
index 60bb58e6ad17dba529bbc624839e558da0cd0c10..e3f72ff7987c0e784359b9178f8d5b95873596cf 100644
--- a/bob/learn/em/mixture/gmm.py
+++ b/bob/learn/em/mixture/gmm.py
@@ -5,7 +5,6 @@
 """This module provides classes and functions for the training and usage of GMM."""
 
 import logging
-import numbers
 import copy
 from typing import Union
 
@@ -23,197 +22,6 @@ logger = logging.getLogger(__name__)
 EPSILON = np.finfo(float).eps
 
 
-class Gaussians(np.ndarray):
-    """Represents a set of multi-dimensional Gaussian.
-
-    One Gaussian is represented by three 1D-arrays:
-        - its mean,
-        - its (diagonal) variance,
-        - the variance threshold values.
-
-    Each array can be accessed with the `[]` operator (`my_gaussians["variances"]`).
-
-    Variances thresholds are automatically applied when setting the variances (and when
-    setting the threshold values).
-
-    Usage:
-    >>> my_gaussians = Gaussians(means=np.array([[0,0,0],[1,1,1]]))
-    >>> print(my_gaussians["means"])
-    [[0. 0. 0.]
-     [1. 1. 1.]]
-    >>> print(my_gaussians["variances"])
-    [[1. 1. 1.]
-     [1. 1. 1.]]
-
-    Methods
-    -------
-    log_likelihood(X)
-        Returns the log likelihood of each element of X on each Gaussian.
-    is_similar_to(other, rtol=1e-5, atol=1e-8)
-        Returns True if other is equal to self (with tolerances).
-    """
-
-    def __new__(cls, means, variances=None, variance_thresholds=None):
-        """
-        Creates the backend 'structured numpy array' with initial values.
-
-        Parameters
-        ----------
-        means: array of shape (n_gaussians, n_features)
-            Center of the Gaussian distribution.
-        variances: array of shape (n_gaussians, n_features)
-            Diagonal variance matrix of the Gaussian distribution. Defaults to 1.
-        variance_thresholds: array of shape (n_gaussians, n_features)
-            Threshold values applied to the variances. Defaults to epsilon.
-        """
-
-        means = np.array(means)
-        if means.ndim < 2:
-            means = means.reshape((1, -1))
-        n_features = means.shape[-1]
-        n_gaussians = means.shape[0]
-        if variances is None:
-            variances = np.ones_like(means, dtype=float)
-        if variance_thresholds is None:
-            variance_thresholds = np.full_like(means, fill_value=EPSILON, dtype=float)
-        rec = np.ndarray(
-            shape=(n_gaussians, n_features),
-            dtype=[
-                ("means", float),
-                ("variances", float),
-                ("variance_thresholds", float),
-            ],
-        )
-        rec["means"] = means
-        rec["variance_thresholds"] = variance_thresholds
-        rec["variances"] = np.maximum(variance_thresholds, variances)
-        self = rec.view(cls)
-        self._g_norms = None
-        return self
-
-    @property
-    def g_norms(self):
-        if not hasattr(self, "_g_norms") or self._g_norms is None:
-            n_log_2pi = self.shape[-1] * np.log(2 * np.pi)
-            # Precomputed g_norm for each gaussian [array of shape (n_gaussians,)]
-            self._g_norms = n_log_2pi + np.log(self["variances"]).sum(axis=-1)
-        return self._g_norms
-
-    @classmethod
-    def from_hdf5(cls, hdf5):
-        """Creates a new Gaussians object from an `HDF5File` object."""
-        if isinstance(hdf5, str):
-            hdf5 = HDF5File(hdf5, "r")
-        try:
-            version_major, version_minor = hdf5.get("meta_file_version")[()].split(".")
-            logger.debug(
-                f"Reading a Gaussians HDF5 file of version {version_major}.{version_minor}"
-            )
-        except TypeError:
-            version_major, version_minor = 0, 0
-        if int(version_major) >= 1:
-            if hdf5["meta_writer_class"][()] != str(cls):
-                logger.warning(f"{hdf5['meta_writer_class'][()]} is not {cls}.")
-            self = cls(
-                means=hdf5["means"][()],
-                variances=hdf5["variances"][()],
-                variance_thresholds=hdf5["variance_thresholds"][()],
-            )
-        else:
-            raise RuntimeError("Unsupported Gaussians file version.")
-        return self
-
-    def save(self, hdf5):
-        """Saves the current Gaussians parameters in an `HDF5File` object."""
-        if isinstance(hdf5, str):
-            hdf5 = HDF5File(hdf5, "w")
-        hdf5["meta_file_version"] = "1.0"
-        hdf5["meta_writer_class"] = str(self.__class__)
-        hdf5["means"] = self["means"]
-        hdf5["variances"] = self["variances"]
-        hdf5["variance_thresholds"] = self["variance_thresholds"]
-
-    def load(self, hdf5):
-        """Loads Gaussians parameters from an `HDF5File` object."""
-        if isinstance(hdf5, str):
-            hdf5 = HDF5File(hdf5, "r")
-        new_self = Gaussians.from_hdf5(hdf5)
-        if new_self.shape != self.shape:
-            logger.warning(
-                f"Loaded Gaussians {new_self.shape} are not the same shape as "
-                f"the previous ones {self.shape}. Object shape will change."
-            )
-            raise ValueError(f"Could not load Gaussians of shape {new_self.shape}.")
-        self["means"] = new_self["means"]
-        self["variances"] = new_self["variances"]
-        self["variance_thresholds"] = new_self["variance_thresholds"]
-
-    def log_likelihood(self, data):
-        """Returns the log-likelihood for x on each gaussian.
-
-        Parameters
-        ----------
-        data: array of shape (n_features,) or (n_samples, n_features)
-            The point (or points) to compute the log-likelihood of.
-
-        Returns
-        -------
-        array of shape (n_gaussians, n_samples)
-            The log likelihood of each points in x for each Gaussian.
-        """
-
-        # Compute the likelihood for each data point on each Gaussian
-        z = da.sum(
-            da.power(data[None, ..., :] - self["means"][..., None, :], 2)
-            / self["variances"][..., None, :],
-            axis=-1,
-        )
-        return -0.5 * (self.g_norms[:, None] + z)
-
-    def __setitem__(self, key, value) -> None:
-        """Set values of items (operator `[]`) of this numpy array.
-
-        Applies the threshold on the variances when setting `variances` or
-        `variance_thresholds`.
-        """
-        value = np.array(value)
-
-        if key == "variances":
-            value = np.maximum(self["variance_thresholds"], value)
-        elif key == "variance_thresholds":
-            super().__setitem__("variances", np.maximum(value, self["variances"]))
-
-        retval = super().__setitem__(key, value)
-        if key == "variances" or key == "variance_thresholds":
-            # Recompute g_norm for each gaussian [array of shape (n_gaussians,)]
-            n_log_2pi = self["means"].shape[-1] * np.log(2 * np.pi)
-            self._g_norms = n_log_2pi + np.log(self["variances"]).sum(axis=-1)
-        return retval
-
-    def __getitem__(self, key):
-        """Get values of items (operator `[]`) of this Gaussians as numpy array."""
-        return super().__getitem__(key).view(np.ndarray)
-
-    def __eq__(self, other):
-        return np.array_equal(self, other)
-
-    def __ne__(self, other):
-        return not np.array_equal(self, other)
-
-    def is_similar_to(self, other, rtol=1e-5, atol=1e-8):
-        """Returns True if `other` has the same values (within a tolerance)."""
-        return (
-            np.allclose(self["means"], other["means"], rtol=rtol, atol=atol)
-            and np.allclose(self["variances"], other["variances"], rtol=rtol, atol=atol)
-            and np.allclose(
-                self["variance_thresholds"],
-                other["variance_thresholds"],
-                rtol=rtol,
-                atol=atol,
-            )
-        )
-
-
 class GMMStats:
     """Stores accumulated statistics of a GMM.
 
@@ -236,9 +44,9 @@ class GMMStats:
         self.n_features = n_features
         self.log_likelihood = 0
         self.t = 0
-        self.n = da.zeros(shape=(self.n_gaussians,), dtype=float)
-        self.sum_px = da.zeros(shape=(self.n_gaussians, self.n_features), dtype=float)
-        self.sum_pxx = da.zeros(shape=(self.n_gaussians, self.n_features), dtype=float)
+        self.n = np.zeros(shape=(self.n_gaussians,), dtype=float)
+        self.sum_px = np.zeros(shape=(self.n_gaussians, self.n_features), dtype=float)
+        self.sum_pxx = np.zeros(shape=(self.n_gaussians, self.n_features), dtype=float)
 
     def init_fields(self, log_likelihood=0.0, t=0, n=None, sum_px=None, sum_pxx=None):
         """Initializes the statistics values to a defined value, or zero by default."""
@@ -248,17 +56,17 @@ class GMMStats:
         self.t = t
         # For each Gaussian, the accumulated sum of responsibilities, i.e. the sum of
         # P(gaussian_i|x)
-        self.n = da.zeros(shape=(self.n_gaussians,), dtype=float) if n is None else n
+        self.n = np.zeros(shape=(self.n_gaussians,), dtype=float) if n is None else n
         # For each Gaussian, the accumulated sum of responsibility times the sample
         self.sum_px = (
-            da.zeros(shape=(self.n_gaussians, self.n_features), dtype=float)
+            np.zeros(shape=(self.n_gaussians, self.n_features), dtype=float)
             if sum_px is None
             else sum_px
         )
         # For each Gaussian, the accumulated sum of responsibility times the sample
         # squared
         self.sum_pxx = (
-            da.zeros(shape=(self.n_gaussians, self.n_features), dtype=float)
+            np.zeros(shape=(self.n_gaussians, self.n_features), dtype=float)
             if sum_pxx is None
             else sum_pxx
         )
@@ -453,8 +261,6 @@ class GMMMachine(BaseEstimator):
     ----------
     means, variances, variance_thresholds
         Gaussians parameters.
-    gaussians_
-        All Gaussians parameters.
     weights
         Gaussians weights.
     """
@@ -466,8 +272,7 @@ class GMMMachine(BaseEstimator):
         ubm: "Union[GMMMachine, None]" = None,
         convergence_threshold: float = 1e-5,
         max_fitting_steps: Union[int, None] = 200,
-        random_state: Union[int, da.random.RandomState] = 0,
-        initial_gaussians: Union[Gaussians, None] = None,
+        random_state: Union[int, np.random.RandomState] = 0,
         weights: "Union[np.ndarray[('n_gaussians',), float], None]" = None,
         k_means_trainer: Union[KMeansTrainer, None] = None,
         update_means: bool = True,
@@ -493,8 +298,6 @@ class GMMMachine(BaseEstimator):
             the convergence threshold isn't met.
         random_state
             Specifies a RandomState or a seed for reproducibility.
-        initial_gaussians
-            Optional set of values to skip the k-means initialization.
         weights
             The weight of each Gaussian. (defaults to `1/n_gaussians`)
         k_means_trainer
@@ -522,23 +325,27 @@ class GMMMachine(BaseEstimator):
         self.convergence_threshold = convergence_threshold
         self.max_fitting_steps = max_fitting_steps
         self.random_state = random_state
-        self.initial_gaussians = initial_gaussians
-        if weights is None:
-            weights = np.full(
-                shape=(self.n_gaussians,), fill_value=(1 / self.n_gaussians)
-            )
-        self.weights = weights
         self.k_means_trainer = k_means_trainer
         self.update_means = update_means
         self.update_variances = update_variances
         self.update_weights = update_weights
         self.mean_var_update_threshold = mean_var_update_threshold
-        if initial_gaussians is not None:
-            self.gaussians_ = initial_gaussians
-        elif self.ubm is not None:
-            self.gaussians_ = copy.deepcopy(self.ubm.gaussians_)
+        self._means = None
+        self._variances = None
+        self._variance_thresholds = None
+        self._g_norms = None
+
+        if self.ubm is not None:
+            self.means = copy.deepcopy(self.ubm.means)
+            self.variances = copy.deepcopy(self.ubm.variances)
+            self.variance_thresholds = copy.deepcopy(self.ubm.variance_thresholds)
+            self.weights = copy.deepcopy(self.ubm.weights)
         else:
-            self.gaussians_ = None  # Will initialize at `fit` with k-means.
+            self.weights = np.full(
+                (self.n_gaussians,), fill_value=(1 / self.n_gaussians), dtype=float
+            )
+        if weights is not None:
+            self.weights = weights
 
     @property
     def weights(self):
@@ -553,44 +360,66 @@ class GMMMachine(BaseEstimator):
     @property
     def means(self):
         """The means of each Gaussian."""
-        return self.gaussians_["means"]
+        if self._means is None:
+            raise ValueError("GMMMachine means were never set.")
+        return self._means
 
     @means.setter
     def means(self, means: "np.ndarray[('n_gaussians', 'n_features'), float]"):
-        if self.gaussians_ is not None:
-            self.gaussians_["means"] = means
-        else:
-            self.gaussians_ = Gaussians(means=means)
+        if self._means is None:
+            if self._variances is None:
+                self._variances = np.ones_like(means, dtype=float)
+            if self._variance_thresholds is None:
+                self._variance_thresholds = np.full_like(means, fill_value=EPSILON, dtype=float)
+        self._means = means
 
     @property
     def variances(self):
         """The (diagonal) variances of the gaussians."""
-        return self.gaussians_["variances"]
+        if self._variances is None:
+            raise ValueError("GMMMachine variances were never set.")
+        return self._variances
 
     @variances.setter
     def variances(self, variances: "np.ndarray[('n_gaussians', 'n_features'), float]"):
-        if self.gaussians_ is not None:
-            self.gaussians_["variances"] = variances
-        else:
-            self.gaussians_ = Gaussians(
-                means=np.zeros_like(variances), variances=variances
-            )
+        if self._variances is None:
+            if self._means is None:
+                self._means = np.zeros_like(variances, dtype=float)
+            if self._variance_thresholds is None:
+                self._variance_thresholds = np.full_like(variances, fill_value=EPSILON, dtype=float)
+        self._variances = np.maximum(self.variance_thresholds, variances)
+        # Recompute g_norm for each gaussian [array of shape (n_gaussians,)]
+        n_log_2pi = self.variances.shape[-1] * np.log(2 * np.pi)
+        self._g_norms = np.array(n_log_2pi + np.log(self._variances).sum(axis=-1))
 
     @property
     def variance_thresholds(self):
         """Threshold below which variances are clamped to prevent precision losses."""
-        return self.gaussians_["variance_thresholds"]
+        if self._variance_thresholds is None:
+            raise ValueError("GMMMachine variance thresholds were never set.")
+        return self._variance_thresholds
 
     @variance_thresholds.setter
     def variance_thresholds(
-        self, threshold: "np.ndarray[('n_gaussians', 'n_features'), float]"
+        self,
+        threshold: "Union[float, np.ndarray[('n_gaussians', 'n_features'), float]]",
     ):
-        if self.gaussians_ is not None:
-            self.gaussians_["variance_thresholds"] = threshold
-        else:
-            self.gaussians_ = Gaussians(
-                means=np.zeros_like(threshold), variance_thresholds=threshold
-            )
+        if not hasattr(threshold, "ndim"):
+            threshold = np.full_like(self.means, fill_value=threshold, dtype=float)
+        elif threshold.ndim == 1:
+            threshold = threshold[None,:].repeat(self.n_gaussians, axis=0)
+        self._variance_thresholds = threshold
+        self.variances = np.maximum(threshold, self.variances)
+
+    @property
+    def g_norms(self):
+        """Precomputed g_norms (depends on variances and feature shape)."""
+        if self._g_norms is None:
+            # Recompute g_norm for each gaussian [array of shape (n_gaussians,)]
+            n_log_2pi = self.variances.shape[-1] * np.log(2 * np.pi)
+            self._g_norms = n_log_2pi + np.log(self._variances).sum(axis=-1)
+        return self._g_norms
+
 
     @property
     def log_weights(self):
@@ -600,7 +429,7 @@ class GMMMachine(BaseEstimator):
     @property
     def shape(self):
         """Shape of the gaussians in the GMM machine."""
-        return self.gaussians_.shape
+        return self.means.shape
 
     @classmethod
     def from_hdf5(cls, hdf5, ubm=None):
@@ -612,56 +441,52 @@ class GMMMachine(BaseEstimator):
             logger.debug(
                 f"Reading a GMMStats HDF5 file of version {version_major}.{version_minor}"
             )
-        except TypeError:
+        except (TypeError, RuntimeError):
             version_major, version_minor = 0, 0
         if int(version_major) >= 1:
             if hdf5["meta_writer_class"][()] != str(cls):
                 logger.warning(f"{hdf5['meta_writer_class'][()]} is not {cls}.")
             if hdf5["trainer"][()] == "map" and ubm is None:
                 raise ValueError("The UBM is needed when loading a MAP machine.")
-            hdf5.cd("gaussians")
-            gaussians = Gaussians.from_hdf5(hdf5)
-            hdf5.cd("..")
             self = cls(
                 n_gaussians=hdf5["n_gaussians"][()],
                 trainer=hdf5["trainer"][()],
                 ubm=ubm,
                 convergence_threshold=1e-5,
                 max_fitting_steps=hdf5["max_fitting_steps"][()],
-                initial_gaussians=gaussians,
                 weights=hdf5["weights"][()],
                 k_means_trainer=None,
                 update_means=hdf5["update_means"][()],
                 update_variances=hdf5["update_variances"][()],
                 update_weights=hdf5["update_weights"][()],
             )
+            gaussians = hdf5["gaussians"]
+            self.means = gaussians["means"][()]
+            self.variances = gaussians["variances"][()]
+            self.variance_thresholds = gaussians["variance_thresholds"][()]
         else:  # Legacy file version
             logger.info("Loading a legacy HDF5 stats file.")
-            n_gaussians = hdf5["m_n_gaussians"][()]
+            n_gaussians = int(hdf5["m_n_gaussians"][()])
             g_means = []
             g_variances = []
             g_variance_thresholds = []
             for i in range(n_gaussians):
-                hdf5.cd(f"m_gaussians{i}")
-                g_means.append(hdf5["m_mean"][()])
-                g_variances.append(hdf5["m_variance"][()])
-                g_variance_thresholds.append(hdf5["m_variance_thresholds"][()])
-                hdf5.cd("..")
-            gaussians = Gaussians(
-                means=g_means,
-                variances=g_variances,
-                variance_thresholds=g_variance_thresholds,
-            )
+                gaussian_group = hdf5[f"m_gaussians{i}"]
+                g_means.append(gaussian_group["m_mean"][()])
+                g_variances.append(gaussian_group["m_variance"][()])
+                g_variance_thresholds.append(gaussian_group["m_variance_thresholds"][()])
             self = cls(
                 n_gaussians=n_gaussians,
                 ubm=ubm,
-                initial_gaussians=gaussians,
                 weights=hdf5["m_weights"][()],
             )
+            self.means = g_means
+            self.variances = g_variances
+            self.variance_thresholds = g_variance_thresholds
         return self
 
     def save(self, hdf5):
-        """Saves the current statistsics in an `HDF5File` object."""
+        """Saves the current statistics in an `HDF5File` object."""
         if isinstance(hdf5, str):
             hdf5 = HDF5File(hdf5, "w")
         hdf5["meta_file_version"] = "1.0"
@@ -674,30 +499,41 @@ class GMMMachine(BaseEstimator):
         hdf5["update_means"] = self.update_means
         hdf5["update_variances"] = self.update_variances
         hdf5["update_weights"] = self.update_weights
-        hdf5.create_group("gaussians")
-        hdf5.cd("gaussians")
-        self.gaussians_.save(hdf5)
-        hdf5.cd("..")
+        gaussians_group = hdf5.create_group("gaussians")
+        gaussians_group["means"] = self.means
+        gaussians_group["variances"] = self.variances
+        gaussians_group["variance_thresholds"] = self.variance_thresholds
 
     def __eq__(self, other):
-        return np.array_equal(self.gaussians_, other.gaussians_)
+        return (
+            np.array_equal(self.means, other.means)
+            and np.array_equal(self.variances, other.variances)
+            and np.array_equal(self.variance_thresholds, other.variance_thresholds)
+            and np.array_equal(self.weights, other.weights)
+        )
 
     def is_similar_to(self, other, rtol=1e-5, atol=1e-8):
         """Returns True if `other` has the same gaussians (within a tolerance)."""
-        return self.gaussians_.is_similar_to(
-            other.gaussians_, rtol=rtol, atol=atol
-        ) and np.allclose(self.weights, other.weights, rtol=rtol, atol=atol)
+        return (
+            np.allclose(self.means, other.means, rtol=rtol, atol=atol)
+            and np.allclose(self.variances, other.variances, rtol=rtol, atol=atol)
+            and np.allclose(self.variance_thresholds, other.variance_thresholds, rtol=rtol, atol=atol)
+            and np.allclose(self.weights, other.weights, rtol=rtol, atol=atol)
+        )
 
     def initialize_gaussians(
         self, data: "Union[np.ndarray[('n_samples', 'n_features'), float], None]" = None
     ):
-        """Populates `gaussians_` with either k-means or the UBM values."""
+        """Populates gaussians parameters with either k-means or the UBM values."""
         if self.trainer == "map":
+            self.means = copy.deepcopy(self.ubm.means)
+            self.variances_ = copy.deepcopy(self.ubm.variances)
+            self.variance_thresholds = copy.deepcopy(self.ubm.variance_thresholds)
             self.weights = copy.deepcopy(self.ubm.weights)
-            self.gaussians_ = copy.deepcopy(self.ubm.gaussians_)
-            self.gaussians_._g_norms = self.ubm.gaussians_._g_norms
+            self._g_norms = copy.deepcopy(self.ubm.g_norms)
         else:
-            if self.initial_gaussians is None:
+            if self._means is None:
+                logger.debug("GMM means was never set. Initializing with k-means.")
                 if data is None:
                     raise ValueError("Data is required when training with k-means.")
                 logger.info("Initializing GMM with k-means.")
@@ -714,16 +550,11 @@ class GMMMachine(BaseEstimator):
                 ) = kmeans_machine.get_variances_and_weights_for_each_cluster(data)
 
                 # Set the GMM machine's gaussians with the results of k-means
-                self.gaussians_ = Gaussians(
-                    means=kmeans_machine.centroids_,
-                    variances=variances,
-                )
-                self.weights = copy.deepcopy(weights)
+                self.means = np.array(copy.deepcopy(kmeans_machine.centroids_))
+                self.variances = np.array(copy.deepcopy(variances))
+                self.weights = np.array(copy.deepcopy(weights))
             else:
-                logger.info("Initializing GMM with user-provided values.")
-                self.gaussians_ = copy.deepcopy(self.initial_gaussians)
-                self.gaussians_._g_norms = self.initial_gaussians._g_norms
-                self.weights = np.full((self.n_gaussians,), 1 / self.n_gaussians)
+                logger.debug("Initialize GMMMachine: GMM means already set.")
 
     def log_weighted_likelihood(
         self, data: "np.ndarray[('n_samples', 'n_features'), float]"
@@ -740,7 +571,9 @@ class GMMMachine(BaseEstimator):
         array of shape (n_gaussians, n_samples)
             The weighted log likelihood of each sample of each Gaussian.
         """
-        l = self.gaussians_.log_likelihood(data)
+        # Compute the likelihood for each data point on each Gaussian
+        z = ((data[None, ..., :] - self.means[..., None, :]) ** 2 / self.variances[..., None, :]).sum(axis=-1)
+        l = -0.5 * (self.g_norms[:, None] + z)
         log_weighted_likelihood = self.log_weights[:, None] + l
         return log_weighted_likelihood
 
@@ -763,22 +596,26 @@ class GMMMachine(BaseEstimator):
         # All likelihoods [array of shape (n_gaussians, n_samples)]
         log_weighted_likelihood = self.log_weighted_likelihood(data)
 
-        def logaddexp_reduce(array, axis, keepdims):
+        def logaddexp_reduce(array, axis=0, keepdims=False):
             return np.logaddexp.reduce(
                 array, axis=axis, keepdims=keepdims, initial=-np.inf
             )
-
-        # Sum along gaussians axis (using logAddExp to prevent underflow)
-        ll_reduced = da.reduction(
-            x=log_weighted_likelihood,
-            chunk=logaddexp_reduce,
-            aggregate=logaddexp_reduce,
-            axis=0,
-            dtype=float,
-            keepdims=False,
-        )
+        if isinstance(log_weighted_likelihood, np.ndarray):
+            ll_reduced = logaddexp_reduce(log_weighted_likelihood)
+        else:
+            # Sum along gaussians axis (using logAddExp to prevent underflow)
+            ll_reduced = da.reduction(
+                x=log_weighted_likelihood,
+                chunk=logaddexp_reduce,
+                aggregate=logaddexp_reduce,
+                axis=0,
+                dtype=float,
+                keepdims=False,
+            )
         return ll_reduced
 
+        # Likelihoods of each sample on this machine. [array of shape (n_samples,)]
+
     def acc_statistics(
         self,
         data: "np.ndarray[('n_samples', 'n_features'), float]",
@@ -786,6 +623,10 @@ class GMMMachine(BaseEstimator):
     ):
         """Accumulates the statistics of GMMStats for a set of data.
 
+        This can be used to compute a GMM step in parallel: each worker/thread applies
+        the e-step of a copy of the same GMM on part of the training data, and the
+        resulting `GMMStats` object of each worker is summed before applying the m-step.
+
         Parameters
         ----------
         data:
@@ -808,23 +649,23 @@ class GMMMachine(BaseEstimator):
         # Log likelihood [array of shape (n_samples,)]
         log_likelihood = self.log_likelihood(data)
         # Responsibility P [array of shape (n_gaussians, n_samples)]
-        responsibility = da.exp(log_weighted_likelihoods - log_likelihood[None, :])
+        responsibility = np.exp(log_weighted_likelihoods - log_likelihood[None, :])
 
         # Accumulate
 
         # Total likelihood [float]
-        statistics.log_likelihood += da.sum(log_likelihood)
+        statistics.log_likelihood += log_likelihood.sum()
         # Count of samples [int]
         statistics.t += data.shape[0]
         # Responsibilities [array of shape (n_gaussians,)]
-        statistics.n += da.sum(responsibility, axis=-1)
+        statistics.n += responsibility.sum(axis=-1)
         # p * x [array of shape (n_gaussians, n_samples, n_features)]
-        px = da.multiply(responsibility[:, :, None], data[None, :, :])
+        px = np.multiply(responsibility[:, :, None], data[None, :, :])
         # First order stats [array of shape (n_gaussians, n_features)]
-        statistics.sum_px += da.sum(px, axis=1)
+        statistics.sum_px += px.sum(axis=1)
         # Second order stats [array of shape (n_gaussians, n_features)]
-        pxx = da.multiply(px[:, :, :], data[None, :, :])
-        statistics.sum_pxx += da.sum(pxx, axis=1)
+        pxx = np.multiply(px[:, :, :], data[None, :, :])
+        statistics.sum_pxx += pxx.sum(axis=1)
 
         return statistics
 
@@ -850,7 +691,7 @@ class GMMMachine(BaseEstimator):
 
     def fit(self, X, y=None, **kwargs):
         """Trains the GMM on data until convergence or maximum step is reached."""
-        if self.gaussians_ is None:
+        if self._means is None:
             self.initialize_gaussians(X)
 
         average_output = 0
@@ -895,7 +736,7 @@ class GMMMachine(BaseEstimator):
 
     def fit_partial(self, X, y=None, **kwargs):
         """Applies one iteration of GMM training."""
-        if self.gaussians_ is None:
+        if self._means is None:
             self.initialize_gaussians(X)
 
         stats = self.e_step(X)
@@ -934,7 +775,7 @@ def ml_gmm_m_step(
         machine.weights = statistics.n / statistics.t
 
     # Threshold the low n to prevent divide by zero
-    thresholded_n = da.clip(statistics.n, mean_var_update_threshold, None)
+    thresholded_n = np.clip(statistics.n, mean_var_update_threshold, None)
 
     # Update GMM parameters using the sufficient statistics (m_ss):
 
@@ -952,7 +793,7 @@ def ml_gmm_m_step(
     #      = 1/n * sum (Pxx) - mean^2
     if update_variances:
         logger.debug("Update variances.")
-        machine.variances = statistics.sum_pxx / thresholded_n[:, None] - da.power(
+        machine.variances = statistics.sum_pxx / thresholded_n[:, None] - np.power(
             machine.means, 2
         )
 
@@ -999,13 +840,13 @@ def map_gmm_m_step(
     #   Gaussian Mixture Models", Digital Signal Processing, 2000
     if update_means:
         new_means = (
-            da.multiply(
+            np.multiply(
                 alpha[:, None],
                 (statistics.sum_px / statistics.n[:, None]),
             )
-            + da.multiply((1 - alpha[:, None]), machine.ubm.means)
+            + np.multiply((1 - alpha[:, None]), machine.ubm.means)
         )
-        machine.means = da.where(
+        machine.means = np.where(
             statistics.n[:, None] < mean_var_update_threshold,
             machine.ubm.means,
             new_means,
@@ -1016,15 +857,15 @@ def map_gmm_m_step(
     #   Gaussian Mixture Models", Digital Signal Processing, 2000
     if update_variances:
         # Calculate new variances (equation 13)
-        prior_norm_variances = (machine.ubm.variances + machine.ubm.means) - da.power(
+        prior_norm_variances = (machine.ubm.variances + machine.ubm.means) - np.power(
             machine.means, 2
         )
         new_variances = (
             alpha[:, None] * statistics.sum_pxx / statistics.n[:, None]
             + (1 - alpha[:, None]) * (machine.ubm.variances + machine.ubm.means)
-            - da.power(machine.means, 2)
+            - np.power(machine.means, 2)
         )
-        machine.variances = da.where(
+        machine.variances = np.where(
             statistics.n[:, None] < mean_var_update_threshold,
             prior_norm_variances,
             new_variances,
diff --git a/bob/learn/em/mixture/linear_scoring.py b/bob/learn/em/mixture/linear_scoring.py
index d4ba6d62d12225ec66c08b018a1eed7a708e5ae6..1f04ca777c7a4e362103c850d8fb14a427eaea1f 100644
--- a/bob/learn/em/mixture/linear_scoring.py
+++ b/bob/learn/em/mixture/linear_scoring.py
@@ -10,7 +10,6 @@ from typing import Union
 import numpy as np
 
 from bob.learn.em.mixture import GMMMachine
-from bob.learn.em.mixture import Gaussians
 from bob.learn.em.mixture import GMMStats
 
 logger = logging.getLogger(__name__)
@@ -20,7 +19,7 @@ EPSILON = np.finfo(float).eps
 
 def linear_scoring(
     models_means: "Union[list[GMMMachine], np.ndarray[('n_models', 'n_gaussians', 'n_features'), float]]",
-    ubm: Union[GMMMachine, Gaussians],
+    ubm: GMMMachine,
     test_stats: Union["list[GMMStats]", GMMStats],
     test_channel_offsets: "np.ndarray[('n_test_stats', 'n_gaussians'), float]" = 0,
     frame_length_normalization: bool = False,
@@ -37,8 +36,8 @@ def linear_scoring(
         The model(s) to score against. If a list of `GMMMachine` is given, the means
         of each model are considered.
     ubm:
-        The Universal Background Model. Accepts either a `GMMMachine`, or a `Gaussians`
-        object. If the `GMMMachine` uses MAP, it's `ubm` attribute is used.
+        The Universal Background Model. Accepts a `GMMMachine` object. If the
+        `GMMMachine` uses MAP, it's `ubm` attribute is used.
     test_stats:
         The instances to score.
     test_channel_offsets
@@ -60,11 +59,8 @@ def linear_scoring(
     if models_means.ndim == 2:
         models_means = models_means[None, :, :]
 
-    if isinstance(ubm, GMMMachine):
-        if ubm.trainer == "ml":
-            ubm = ubm.gaussians_
-        else:
-            ubm = ubm.ubm.gaussians_
+    if ubm.traier == "map":
+        ubm = ubm.ubm
 
     if isinstance(test_stats, GMMStats):
         test_stats = [test_stats]
@@ -79,10 +75,10 @@ def linear_scoring(
     test_channel_offsets = np.array(test_channel_offsets)
 
     # Compute A [array of shape (n_models, n_gaussians * n_features)]
-    a = (models_means - ubm["means"]) / ubm["variances"]
+    a = (models_means - ubm.means) / ubm.variances
     # Compute B [array of shape (n_gaussians * n_features, n_test_stats)]
     b = sum_px[:, :, :] - (
-        n[:, :, None] * (ubm["means"][None, :, :] + test_channel_offsets)
+        n[:, :, None] * (ubm.means[None, :, :] + test_channel_offsets)
     )
     b = np.transpose(b, axes=(1, 2, 0))
     # Apply normalization if needed.
diff --git a/bob/learn/em/test/test_gmm.py b/bob/learn/em/test/test_gmm.py
index 0cebdb591432e69ed6d81e870ca61ebc21668a4b..3f8dc37d7a51edb1f9854005c3d837bff9aa9f68 100644
--- a/bob/learn/em/test/test_gmm.py
+++ b/bob/learn/em/test/test_gmm.py
@@ -9,6 +9,7 @@
 """
 
 import numpy as np
+import dask.array as da
 
 import os
 import tempfile
@@ -20,7 +21,6 @@ from bob.io.base.test_utils import datafile
 
 from bob.learn.em.mixture import GMMMachine
 from bob.learn.em.mixture import GMMStats
-from bob.learn.em.mixture import Gaussians
 
 from bob.learn.em.cluster import KMeansTrainer
 
@@ -32,9 +32,9 @@ def test_GMMStats():
   gs = GMMStats(n_gaussians,n_features)
   log_likelihood = -3.
   T = 57
-  n = np.array([4.37, 5.31], 'float64')
-  sumpx = np.array([[1., 2., 3.], [4., 5., 6.]], 'float64')
-  sumpxx = np.array([[10., 20., 30.], [40., 50., 60.]], 'float64')
+  n = np.array([4.37, 5.31], "float64")
+  sumpx = np.array([[1., 2., 3.], [4., 5., 6.]], "float64")
+  sumpxx = np.array([[10., 20., 30.], [40., 50., 60.]], "float64")
   gs.log_likelihood = log_likelihood
   gs.t = T
   gs.n = n
@@ -49,7 +49,7 @@ def test_GMMStats():
 
   # Saves and reads from file using `from_hdf5`
   filename = str(tempfile.mkstemp(".hdf5")[1])
-  gs.save(HDF5File(filename, 'w'))
+  gs.save(HDF5File(filename, "w"))
   gs_loaded = GMMStats.from_hdf5(HDF5File(filename, "r"))
   assert gs == gs_loaded
   assert (gs != gs_loaded ) is False
@@ -57,7 +57,7 @@ def test_GMMStats():
 
   # Saves and load from file using `load`
   filename = str(tempfile.mkstemp(".hdf5")[1])
-  gs.save(hdf5=HDF5File(filename, 'w'))
+  gs.save(hdf5=HDF5File(filename, "w"))
   gs_loaded = GMMStats(n_gaussians, n_features)
   gs_loaded.load(HDF5File(filename, "r"))
   assert gs == gs_loaded
@@ -104,14 +104,14 @@ def test_GMMStats():
 def test_GMMMachine_1():
   # Test a GMMMachine basic features
 
-  weights   = np.array([0.5, 0.5], 'float64')
-  weights2   = np.array([0.6, 0.4], 'float64')
-  means     = np.array([[3, 70, 0], [4, 72, 0]], 'float64')
-  means2     = np.array([[3, 7, 0], [4, 72, 0]], 'float64')
-  variances = np.array([[1, 10, 1], [2, 5, 2]], 'float64')
-  variances2 = np.array([[10, 10, 1], [2, 5, 2]], 'float64')
-  varianceThresholds = np.array([[0, 0, 0], [0, 0, 0]], 'float64')
-  varianceThresholds2 = np.array([[0.0005, 0.0005, 0.0005], [0, 0, 0]], 'float64')
+  weights   = np.array([0.5, 0.5], "float64")
+  weights2   = np.array([0.6, 0.4], "float64")
+  means     = np.array([[3, 70, 0], [4, 72, 0]], "float64")
+  means2     = np.array([[3, 7, 0], [4, 72, 0]], "float64")
+  variances = np.array([[1, 10, 1], [2, 5, 2]], "float64")
+  variances2 = np.array([[10, 10, 1], [2, 5, 2]], "float64")
+  varianceThresholds = np.array([[0, 0, 0], [0, 0, 0]], "float64")
+  varianceThresholds2 = np.array([[0.0005, 0.0005, 0.0005], [0, 0, 0]], "float64")
 
   # Initializes a GMMMachine
   gmm = GMMMachine(n_gaussians=2)
@@ -127,11 +127,11 @@ def test_GMMMachine_1():
   np.testing.assert_equal(gmm.variances, variances)
   np.testing.assert_equal(gmm.variance_thresholds, varianceThresholds)
 
-  newMeans = np.array([[3, 70, 2], [4, 72, 2]], 'float64')
-  newVariances = np.array([[1, 1, 1], [2, 2, 2]], 'float64')
+  newMeans = np.array([[3, 70, 2], [4, 72, 2]], "float64")
+  newVariances = np.array([[1, 1, 1], [2, 2, 2]], "float64")
 
   # Checks particular varianceThresholds-related methods
-  varianceThresholds1D = np.array([0.3, 1, 0.5], 'float64')
+  varianceThresholds1D = np.array([0.3, 1, 0.5], "float64")
   gmm.variance_thresholds = varianceThresholds1D
   np.testing.assert_equal(gmm.variance_thresholds[0,:], varianceThresholds1D)
   np.testing.assert_equal(gmm.variance_thresholds[1,:], varianceThresholds1D)
@@ -139,13 +139,10 @@ def test_GMMMachine_1():
   gmm.variance_thresholds = 0.005
   np.testing.assert_equal(gmm.variance_thresholds, np.full((2,3), 0.005))
 
-  # Checks Gaussians access
   gmm.means     = newMeans
   gmm.variances = newVariances
-  np.testing.assert_equal(gmm.gaussians_[0]["means"], newMeans[0,:])
-  np.testing.assert_equal(gmm.gaussians_[1]["means"], newMeans[1,:])
-  np.testing.assert_equal(gmm.gaussians_[0]["variances"], newVariances[0,:])
-  np.testing.assert_equal(gmm.gaussians_[1]["variances"], newVariances[1,:])
+  np.testing.assert_equal(gmm.means, newMeans)
+  np.testing.assert_equal(gmm.variances, newVariances)
 
   # Checks comparison
   gmm2 = deepcopy(gmm)
@@ -192,10 +189,10 @@ def test_GMMMachine_2():
 
   arrayset = bob.io.base.load(datafile("faithful.torch3_f64.hdf5", __name__, path="../data/"))
   gmm = GMMMachine(n_gaussians=2)
-  gmm.weights   = np.array([0.5, 0.5], 'float64')
-  gmm.means     = np.array([[3, 70], [4, 72]], 'float64')
-  gmm.variances = np.array([[1, 10], [2, 5]], 'float64')
-  gmm.variance_thresholds = np.array([[0, 0], [0, 0]], 'float64')
+  gmm.weights   = np.array([0.5, 0.5], "float64")
+  gmm.means     = np.array([[3, 70], [4, 72]], "float64")
+  gmm.variances = np.array([[1, 10], [2, 5]], "float64")
+  gmm.variance_thresholds = np.array([[0, 0], [0, 0]], "float64")
 
   stats = gmm.acc_statistics(arrayset)
 
@@ -213,11 +210,11 @@ def test_GMMMachine_2():
 def test_GMMMachine_3():
   # Test a GMMMachine (log-likelihood computation)
 
-  data = bob.io.base.load(datafile('data.hdf5', __name__, path="../data/"))
+  data = bob.io.base.load(datafile("data.hdf5", __name__, path="../data/"))
   gmm = GMMMachine(n_gaussians=2)
-  gmm.weights   = bob.io.base.load(datafile('weights.hdf5', __name__, path="../data/"))
-  gmm.means     = bob.io.base.load(datafile('means.hdf5', __name__, path="../data/"))
-  gmm.variances = bob.io.base.load(datafile('variances.hdf5', __name__, path="../data/"))
+  gmm.weights   = bob.io.base.load(datafile("weights.hdf5", __name__, path="../data/"))
+  gmm.means     = bob.io.base.load(datafile("means.hdf5", __name__, path="../data/"))
+  gmm.variances = bob.io.base.load(datafile("variances.hdf5", __name__, path="../data/"))
 
   # Compare the log-likelihood with the one obtained using Chris Matlab
   # implementation
@@ -232,9 +229,9 @@ def test_GMMMachine_4():
   data = np.random.rand(100,50) # Doesn't matter if it is random. The average of 1D array (in python) MUST output the same result for the 2D array (in C++)
 
   gmm = GMMMachine(n_gaussians=2)
-  gmm.weights   = bob.io.base.load(datafile('weights.hdf5', __name__, path="../data/"))
-  gmm.means     = bob.io.base.load(datafile('means.hdf5', __name__, path="../data/"))
-  gmm.variances = bob.io.base.load(datafile('variances.hdf5', __name__, path="../data/"))
+  gmm.weights   = bob.io.base.load(datafile("weights.hdf5", __name__, path="../data/"))
+  gmm.means     = bob.io.base.load(datafile("means.hdf5", __name__, path="../data/"))
+  gmm.variances = bob.io.base.load(datafile("variances.hdf5", __name__, path="../data/"))
 
 
   ll = 0
@@ -253,7 +250,7 @@ def test_GMMStats_2():
     n_features = data.shape[-1]
     machine = GMMMachine(n_gaussians)
 
-    machine.gaussians_ = Gaussians(means=np.array([[0, 0, 0], [8, 8, 8]]))
+    machine.means = np.array([[0, 0, 0], [8, 8, 8]])
 
     # Populate the GMMStats
     stats = machine.acc_statistics(data)
@@ -316,24 +313,18 @@ def test_machine_parameters():
     n_gaussians = 3
     n_features = 2
     machine = GMMMachine(n_gaussians)
-    machine.gaussians_ = Gaussians(
-        means=np.repeat([[0], [1], [-1]], n_features, 1)
-    )
-    np.testing.assert_almost_equal(
-        machine.means, np.repeat([[0], [1], [-1]], n_features, 1)
-    )
-    np.testing.assert_almost_equal(machine.variances, np.ones((n_gaussians, n_features)))
+    machine.means = np.repeat([[0], [1], [-1]], n_features, 1)
+    np.testing.assert_equal(machine.means, np.repeat([[0], [1], [-1]], n_features, 1))
+    np.testing.assert_equal(machine.variances, np.ones((n_gaussians, n_features)))
 
     # Setters
 
     new_means = np.repeat([[1], [2], [3]], n_features, axis=1)
     machine.means = new_means
-    np.testing.assert_almost_equal(machine.gaussians_["means"], new_means)
     assert machine.means.shape == (n_gaussians, n_features)
     np.testing.assert_almost_equal(machine.means, new_means)
     new_variances = np.repeat([[0.2], [1.1], [1]], n_features, axis=1)
     machine.variances = new_variances
-    np.testing.assert_almost_equal(machine.gaussians_["variances"], new_variances)
     assert machine.variances.shape == (n_gaussians, n_features)
     np.testing.assert_almost_equal(machine.variances, new_variances)
 
@@ -364,9 +355,7 @@ def test_likelihood():
     data = np.array([[1, 1, 1], [-1, 0, 0], [0, 0, 1], [2, 2, 2]])
     n_gaussians = 3
     machine = GMMMachine(n_gaussians)
-    machine.gaussians_ = Gaussians(
-        means=np.repeat([[0], [1], [-1]], 3, 1)
-    )
+    machine.means = np.repeat([[0], [1], [-1]], 3, 1)
     log_likelihood = machine.log_likelihood(data)
     expected_ll = np.array(
         [-3.6519900964986527, -3.83151883210222, -3.83151883210222, -5.344374066745753]
@@ -378,14 +367,12 @@ def test_likelihood_variance():
     data = np.array([[1, 1, 1], [-1, 0, 0], [0, 0, 1], [2, 2, 2]])
     n_gaussians = 3
     machine = GMMMachine(n_gaussians)
-    machine.gaussians_ = Gaussians(
-        means=np.repeat([[0], [1], [-1]], 3, 1),
-        variances=np.array([
-            [1.1, 1.2, 0.8],
-            [0.2, 0.4, 0.5],
-            [1, 1, 1],
-        ])
-    )
+    machine.means = np.repeat([[0], [1], [-1]], 3, 1)
+    machine.variances = np.array([
+        [1.1, 1.2, 0.8],
+        [0.2, 0.4, 0.5],
+        [1, 1, 1],
+    ])
     log_likelihood = machine.log_likelihood(data)
     expected_ll = np.array(
         [
@@ -402,9 +389,7 @@ def test_likelihood_weight():
     data = np.array([[1, 1, 1], [-1, 0, 0], [0, 0, 1], [2, 2, 2]])
     n_gaussians = 3
     machine = GMMMachine(n_gaussians)
-    machine.gaussians_ = Gaussians(
-        means=np.repeat([[0], [1], [-1]], 3, 1)
-    )
+    machine.means = np.repeat([[0], [1], [-1]], 3, 1)
     machine.weights = [0.6, 0.1, 0.3]
     log_likelihood = machine.log_likelihood(data)
     expected_ll = np.array(
@@ -441,9 +426,9 @@ def test_ml_em():
     data = np.array([[1, 2, 2], [2, 1, 2], [7, 8, 9], [7, 7, 8], [7, 9, 7]])
     n_gaussians = 2
     n_features = data.shape[-1]
-    gaussians_init = Gaussians(means=np.repeat([[2], [8]], n_features, 1))
 
-    machine = GMMMachine(n_gaussians, initial_gaussians=gaussians_init, update_means=True, update_variances=True, update_weights=True)
+    machine = GMMMachine(n_gaussians, update_means=True, update_variances=True, update_weights=True)
+    machine.means = np.repeat([[2], [8]], n_features, 1)
     machine.initialize_gaussians(None)
 
     stats = machine.e_step( data)
@@ -461,9 +446,7 @@ def test_ml_em():
 def test_map_em():
     n_gaussians = 2
     prior_machine = GMMMachine(n_gaussians)
-    prior_machine.gaussians_ = Gaussians(
-        means=np.array([[2, 2, 2], [8, 8, 8]])
-    )
+    prior_machine.means = np.array([[2, 2, 2], [8, 8, 8]])
     prior_machine.weights = np.array([0.5, 0.5])
 
     machine = GMMMachine(n_gaussians, trainer="map", ubm=prior_machine,  update_means=True, update_variances=True, update_weights=True)
@@ -497,9 +480,9 @@ def test_ml_transformer():
     test_data = np.array([[1, 1, 1], [1, 1, 2], [8, 9, 9], [8, 8, 8]])
     n_gaussians = 2
     n_features = 3
-    gaussians_init = Gaussians(means=np.array([[2, 2, 2], [8, 8, 8]]))
 
-    machine = GMMMachine(n_gaussians, initial_gaussians=gaussians_init, update_means=True, update_variances=True, update_weights=True)
+    machine = GMMMachine(n_gaussians, update_means=True, update_variances=True, update_weights=True)
+    machine.means = np.array([[2, 2, 2], [8, 8, 8]])
 
     machine = machine.fit(data)
 
@@ -530,9 +513,7 @@ def test_map_transformer():
     n_gaussians = 2
     n_features = 3
     prior_machine = GMMMachine(n_gaussians)
-    prior_machine.gaussians_ = Gaussians(
-        means=np.array([[2, 2, 2], [8, 8, 8]])
-    )
+    prior_machine.means = np.array([[2, 2, 2], [8, 8, 8]])
     prior_machine.weights = np.array([0.5, 0.5])
 
     machine = GMMMachine(n_gaussians, trainer="map", ubm=prior_machine,  update_means=True, update_variances=True, update_weights=True)