Skip to content
Snippets Groups Projects
Commit 74fcd5c5 authored by Yannick DAYER's avatar Yannick DAYER
Browse files

Remove GMM and Gaussian tests

parent 800ed9ca
No related branches found
No related tags found
2 merge requests!43Remove C++ code, tests, dependencies, and build scripts,!40Transition to a pure python implementation
Pipeline #56567 failed
import bob.extension import bob.extension
from . import cluster from . import cluster
from . import mixture
def get_config(): def get_config():
......
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
# Thu Feb 16 16:54:45 2012 +0200
#
# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
"""Tests the Gaussian machine
"""
import os
import tempfile
import numpy
import bob.io.base
from bob.learn.em import Gaussian
def equals(x, y, epsilon):
return abs(x - y) < epsilon
def test_GaussianNormal():
# Test the likelihood computation of a simple normal Gaussian
gaussian = Gaussian(2)
# By default, initialized with zero mean and unit variance
logLH = gaussian.log_likelihood(numpy.array([0.4, 0.2], "float64"))
assert equals(logLH, -1.93787706641, 1e-10)
def test_GaussianMachine():
# Test a GaussianMachine more thoroughly
# Initializes a Gaussian with zero mean and unit variance
g = Gaussian(3)
assert (g.mean == 0.0).all()
assert (g.variance == 1.0).all()
assert g.shape == (3,)
# Set and check mean, variance, variance thresholds
mean = numpy.array([0, 1, 2], "float64")
variance = numpy.array([3, 2, 1], "float64")
g.mean = mean
g.variance = variance
g.set_variance_thresholds(0.0005)
assert (g.mean == mean).all()
assert (g.variance == variance).all()
assert (g.variance_thresholds == 0.0005).all()
# Save and read from file
filename = str(tempfile.mkstemp(".hdf5")[1])
g.save(bob.io.base.HDF5File(filename, "w"))
g_loaded = Gaussian(bob.io.base.HDF5File(filename))
assert g == g_loaded
assert (g != g_loaded) is False
assert g.is_similar_to(g_loaded)
# Save and read from file using the keyword argument
filename = str(tempfile.mkstemp(".hdf5")[1])
g.save(hdf5=bob.io.base.HDF5File(filename, "w"))
g_loaded = Gaussian(hdf5=bob.io.base.HDF5File(filename))
assert g == g_loaded
assert (g != g_loaded) is False
assert g.is_similar_to(g_loaded)
# Save and loading from file using the keyword argument
filename = str(tempfile.mkstemp(".hdf5")[1])
g.save(bob.io.base.HDF5File(filename, "w"))
g_loaded = bob.learn.em.Gaussian()
g_loaded.load(bob.io.base.HDF5File(filename))
assert g == g_loaded
assert (g != g_loaded) is False
assert g.is_similar_to(g_loaded)
# Save and loading from file using the keyword argument
filename = str(tempfile.mkstemp(".hdf5")[1])
g.save(bob.io.base.HDF5File(filename, "w"))
g_loaded = bob.learn.em.Gaussian()
g_loaded.load(hdf5=bob.io.base.HDF5File(filename))
assert g == g_loaded
assert (g != g_loaded) is False
assert g.is_similar_to(g_loaded)
# Make them different
g_loaded.set_variance_thresholds(0.001)
assert (g == g_loaded) is False
assert g != g_loaded
# Check likelihood computation
sample1 = numpy.array([0, 1, 2], "float64")
sample2 = numpy.array([1, 2, 3], "float64")
sample3 = numpy.array([2, 3, 4], "float64")
ref1 = -3.652695334228046
ref2 = -4.569362000894712
ref3 = -7.319362000894712
eps = 1e-10
assert equals(g.log_likelihood(sample1), ref1, eps)
assert equals(g.log_likelihood(sample2), ref2, eps)
assert equals(g.log_likelihood(sample3), ref3, eps)
# Check resize and assignment
g.resize(5)
assert g.shape == (5,)
g2 = Gaussian()
g2 = g
assert g == g2
assert (g != g2) is False
g3 = Gaussian(g)
assert g == g3
assert (g != g3) is False
# Clean-up
os.unlink(filename)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
# Thu Feb 16 17:57:10 2012 +0200
#
# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
"""Tests the GMM machine and the GMMStats container
"""
import os
import tempfile
import numpy
import bob.io.base
from bob.io.base.test_utils import datafile
from bob.learn.em import GMMMachine
from bob.learn.em import GMMStats
def test_GMMStats():
# Test a GMMStats
# Initializes a GMMStats
gs = GMMStats(2, 3)
log_likelihood = -3.0
T = 57
n = numpy.array([4.37, 5.31], "float64")
sumpx = numpy.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], "float64")
sumpxx = numpy.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]], "float64")
gs.log_likelihood = log_likelihood
gs.t = T
gs.n = n
gs.sum_px = sumpx
gs.sum_pxx = sumpxx
assert gs.log_likelihood == log_likelihood
assert gs.t == T
assert (gs.n == n).all()
assert (gs.sum_px == sumpx).all()
assert (gs.sum_pxx == sumpxx).all()
assert gs.shape == (2, 3)
# Saves and reads from file
filename = str(tempfile.mkstemp(".hdf5")[1])
gs.save(bob.io.base.HDF5File(filename, "w"))
gs_loaded = GMMStats(bob.io.base.HDF5File(filename))
assert gs == gs_loaded
assert (gs != gs_loaded) is False
assert gs.is_similar_to(gs_loaded)
# Saves and reads from file using the keyword argument
filename = str(tempfile.mkstemp(".hdf5")[1])
gs.save(hdf5=bob.io.base.HDF5File(filename, "w"))
gs_loaded = GMMStats(bob.io.base.HDF5File(filename))
assert gs == gs_loaded
assert (gs != gs_loaded) is False
assert gs.is_similar_to(gs_loaded)
# Saves and load from file using the keyword argument
filename = str(tempfile.mkstemp(".hdf5")[1])
gs.save(hdf5=bob.io.base.HDF5File(filename, "w"))
gs_loaded = GMMStats()
gs_loaded.load(bob.io.base.HDF5File(filename))
assert gs == gs_loaded
assert (gs != gs_loaded) is False
assert gs.is_similar_to(gs_loaded)
# Saves and load from file using the keyword argument
filename = str(tempfile.mkstemp(".hdf5")[1])
gs.save(hdf5=bob.io.base.HDF5File(filename, "w"))
gs_loaded = GMMStats()
gs_loaded.load(hdf5=bob.io.base.HDF5File(filename))
assert gs == gs_loaded
assert (gs != gs_loaded) is False
assert gs.is_similar_to(gs_loaded)
# Makes them different
gs_loaded.t = 58
assert (gs == gs_loaded) is False
assert gs != gs_loaded
assert (gs.is_similar_to(gs_loaded)) is False
# Accumulates from another GMMStats
gs2 = GMMStats(2, 3)
gs2.log_likelihood = log_likelihood
gs2.t = T
gs2.n = n
gs2.sum_px = sumpx
gs2.sum_pxx = sumpxx
gs2 += gs
eps = 1e-8
assert gs2.log_likelihood == 2 * log_likelihood
assert gs2.t == 2 * T
assert numpy.allclose(gs2.n, 2 * n, eps)
assert numpy.allclose(gs2.sum_px, 2 * sumpx, eps)
assert numpy.allclose(gs2.sum_pxx, 2 * sumpxx, eps)
# Reinit and checks for zeros
gs_loaded.init()
assert gs_loaded.log_likelihood == 0
assert gs_loaded.t == 0
assert (gs_loaded.n == 0).all()
assert (gs_loaded.sum_px == 0).all()
assert (gs_loaded.sum_pxx == 0).all()
# Resize and checks size
assert gs_loaded.shape == (2, 3)
gs_loaded.resize(4, 5)
assert gs_loaded.shape == (4, 5)
assert gs_loaded.sum_px.shape[0] == 4
assert gs_loaded.sum_px.shape[1] == 5
# Clean-up
os.unlink(filename)
def test_GMMMachine_1():
# Test a GMMMachine basic features
weights = numpy.array([0.5, 0.5], "float64")
weights2 = numpy.array([0.6, 0.4], "float64")
means = numpy.array([[3, 70, 0], [4, 72, 0]], "float64")
means2 = numpy.array([[3, 7, 0], [4, 72, 0]], "float64")
variances = numpy.array([[1, 10, 1], [2, 5, 2]], "float64")
variances2 = numpy.array([[10, 10, 1], [2, 5, 2]], "float64")
varianceThresholds = numpy.array([[0, 0, 0], [0, 0, 0]], "float64")
varianceThresholds2 = numpy.array([[0.0005, 0.0005, 0.0005], [0, 0, 0]], "float64")
# Initializes a GMMMachine
gmm = GMMMachine(2, 3)
# Sets the weights, means, variances and varianceThresholds and
# Checks correctness
gmm.weights = weights
gmm.means = means
gmm.variances = variances
gmm.variance_thresholds = varianceThresholds
assert gmm.shape == (2, 3)
assert (gmm.weights == weights).all()
assert (gmm.means == means).all()
assert (gmm.variances == variances).all()
assert (gmm.variance_thresholds == varianceThresholds).all()
# Checks supervector-like accesses
assert (gmm.mean_supervector == means.reshape(means.size)).all()
assert (gmm.variance_supervector == variances.reshape(variances.size)).all()
newMeans = numpy.array([[3, 70, 2], [4, 72, 2]], "float64")
newVariances = numpy.array([[1, 1, 1], [2, 2, 2]], "float64")
# Checks particular varianceThresholds-related methods
varianceThresholds1D = numpy.array([0.3, 1, 0.5], "float64")
gmm.set_variance_thresholds(varianceThresholds1D)
assert (gmm.variance_thresholds[0, :] == varianceThresholds1D).all()
assert (gmm.variance_thresholds[1, :] == varianceThresholds1D).all()
gmm.set_variance_thresholds(0.005)
assert (gmm.variance_thresholds == 0.005).all()
# Checks Gaussians access
gmm.means = newMeans
gmm.variances = newVariances
assert (gmm.get_gaussian(0).mean == newMeans[0, :]).all()
assert (gmm.get_gaussian(1).mean == newMeans[1, :]).all()
assert (gmm.get_gaussian(0).variance == newVariances[0, :]).all()
assert (gmm.get_gaussian(1).variance == newVariances[1, :]).all()
# Checks resize
gmm.resize(4, 5)
assert gmm.shape == (4, 5)
# Checks comparison
gmm2 = GMMMachine(gmm)
gmm3 = GMMMachine(2, 3)
gmm3.weights = weights2
gmm3.means = means
gmm3.variances = variances
# gmm3.varianceThresholds = varianceThresholds
gmm4 = GMMMachine(2, 3)
gmm4.weights = weights
gmm4.means = means2
gmm4.variances = variances
# gmm4.varianceThresholds = varianceThresholds
gmm5 = GMMMachine(2, 3)
gmm5.weights = weights
gmm5.means = means
gmm5.variances = variances2
# gmm5.varianceThresholds = varianceThresholds
gmm6 = GMMMachine(2, 3)
gmm6.weights = weights
gmm6.means = means
gmm6.variances = variances
# gmm6.varianceThresholds = varianceThresholds2
assert gmm == gmm2
assert (gmm != gmm2) is False
assert gmm.is_similar_to(gmm2)
assert gmm != gmm3
assert (gmm == gmm3) is False
assert gmm.is_similar_to(gmm3) is False
assert gmm != gmm4
assert (gmm == gmm4) is False
assert gmm.is_similar_to(gmm4) is False
assert gmm != gmm5
assert (gmm == gmm5) is False
assert gmm.is_similar_to(gmm5) is False
assert gmm != gmm6
assert (gmm == gmm6) is False
assert gmm.is_similar_to(gmm6) is False
def test_GMMMachine_2():
# Test a GMMMachine (statistics)
arrayset = bob.io.base.load(
datafile("faithful.torch3_f64.hdf5", __name__, path="../data/")
)
gmm = GMMMachine(2, 2)
gmm.weights = numpy.array([0.5, 0.5], "float64")
gmm.means = numpy.array([[3, 70], [4, 72]], "float64")
gmm.variances = numpy.array([[1, 10], [2, 5]], "float64")
gmm.variance_thresholds = numpy.array([[0, 0], [0, 0]], "float64")
stats = GMMStats(2, 2)
gmm.acc_statistics(arrayset, stats)
stats_ref = GMMStats(
bob.io.base.HDF5File(datafile("stats.hdf5", __name__, path="../data/"))
)
assert stats.t == stats_ref.t
assert numpy.allclose(stats.n, stats_ref.n, atol=1e-10)
# assert numpy.array_equal(stats.sumPx, stats_ref.sumPx)
# Note AA: precision error above
assert numpy.allclose(stats.sum_px, stats_ref.sum_px, atol=1e-10)
assert numpy.allclose(stats.sum_pxx, stats_ref.sum_pxx, atol=1e-10)
def test_GMMMachine_3():
# Test a GMMMachine (log-likelihood computation)
data = bob.io.base.load(datafile("data.hdf5", __name__, path="../data/"))
gmm = GMMMachine(2, 50)
gmm.weights = bob.io.base.load(datafile("weights.hdf5", __name__, path="../data/"))
gmm.means = bob.io.base.load(datafile("means.hdf5", __name__, path="../data/"))
gmm.variances = bob.io.base.load(
datafile("variances.hdf5", __name__, path="../data/")
)
# Compare the log-likelihood with the one obtained using Chris Matlab
# implementation
matlab_ll_ref = -2.361583051672024e02
assert abs(gmm(data) - matlab_ll_ref) < 1e-10
def test_GMMMachine_4():
import numpy
numpy.random.seed(3) # FIXING A SEED
data = numpy.random.rand(
100, 50
) # Doesn't matter if it is ramdom. The average of 1D array (in python) MUST output the same result for the 2D array (in C++)
gmm = GMMMachine(2, 50)
gmm.weights = bob.io.base.load(datafile("weights.hdf5", __name__, path="../data/"))
gmm.means = bob.io.base.load(datafile("means.hdf5", __name__, path="../data/"))
gmm.variances = bob.io.base.load(
datafile("variances.hdf5", __name__, path="../data/")
)
ll = 0
for i in range(data.shape[0]):
ll += gmm(data[i, :])
ll /= data.shape[0]
assert ll == gmm(data)
...@@ -56,11 +56,13 @@ def test_kmeans_fit(): ...@@ -56,11 +56,13 @@ def test_kmeans_fit():
data2 = da.random.normal(loc=-1, size=(2000, 3)) data2 = da.random.normal(loc=-1, size=(2000, 3))
data = da.concatenate([data1, data2], axis=0) data = da.concatenate([data1, data2], axis=0)
machine = KMeansMachine(2, random_state=0).fit(data) machine = KMeansMachine(2, random_state=0).fit(data)
centroids = np.array(machine.centroids_) # Silences `argsort not implemented`
centroids = centroids[np.argsort(centroids[:,0])]
expected = [ expected = [
[1.00426431, 1.00359693, 1.05996704],
[-0.99262315, -1.05226141, -1.00525245], [-0.99262315, -1.05226141, -1.00525245],
[1.00426431, 1.00359693, 1.05996704],
] ]
np.testing.assert_almost_equal(machine.centroids_, expected) np.testing.assert_almost_equal(centroids, expected)
def test_kmeans_fit_init_pp(): def test_kmeans_fit_init_pp():
...@@ -70,11 +72,13 @@ def test_kmeans_fit_init_pp(): ...@@ -70,11 +72,13 @@ def test_kmeans_fit_init_pp():
data = da.concatenate([data1, data2], axis=0) data = da.concatenate([data1, data2], axis=0)
trainer = KMeansTrainer(init_method="k-means++", random_state=0) trainer = KMeansTrainer(init_method="k-means++", random_state=0)
machine = KMeansMachine(2).fit(data, trainer=trainer) machine = KMeansMachine(2).fit(data, trainer=trainer)
centroids = np.array(machine.centroids_) # Silences `argsort not implemented`
centroids = centroids[np.argsort(centroids[:,0])]
expected = [ expected = [
[-0.99262315, -1.05226141, -1.00525245], [-0.99262315, -1.05226141, -1.00525245],
[1.00426431, 1.00359693, 1.05996704], [1.00426431, 1.00359693, 1.05996704],
] ]
np.testing.assert_almost_equal(machine.centroids_, expected) np.testing.assert_almost_equal(centroids, expected, decimal=7)
def test_kmeans_fit_init_random(): def test_kmeans_fit_init_random():
...@@ -84,8 +88,10 @@ def test_kmeans_fit_init_random(): ...@@ -84,8 +88,10 @@ def test_kmeans_fit_init_random():
data = da.concatenate([data1, data2], axis=0) data = da.concatenate([data1, data2], axis=0)
trainer = KMeansTrainer(init_method="random", random_state=0) trainer = KMeansTrainer(init_method="random", random_state=0)
machine = KMeansMachine(2).fit(data, trainer=trainer) machine = KMeansMachine(2).fit(data, trainer=trainer)
centroids = np.array(machine.centroids_) # Silences `argsort not implemented`
centroids = centroids[np.argsort(centroids[:,0])]
expected = [ expected = [
[-0.99433738, -1.05561588, -1.01236246], [-0.99433738, -1.05561588, -1.01236246],
[0.99800688, 0.99873325, 1.05879539], [0.99800688, 0.99873325, 1.05879539],
] ]
np.testing.assert_almost_equal(machine.centroids_, expected) np.testing.assert_almost_equal(centroids, expected, decimal=7)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Francois Moulin <Francois.Moulin@idiap.ch>
# Wed Jul 13 16:00:04 2011 +0200
#
# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
"""Tests on the LinearScoring function
"""
import numpy
from bob.learn.em import GMMMachine
from bob.learn.em import GMMStats
from bob.learn.em import linear_scoring
def test_LinearScoring():
ubm = GMMMachine(2, 2)
ubm.weights = numpy.array([0.5, 0.5], "float64")
ubm.means = numpy.array([[3, 70], [4, 72]], "float64")
ubm.variances = numpy.array([[1, 10], [2, 5]], "float64")
ubm.variance_thresholds = numpy.array([[0, 0], [0, 0]], "float64")
model1 = GMMMachine(2, 2)
model1.weights = numpy.array([0.5, 0.5], "float64")
model1.means = numpy.array([[1, 2], [3, 4]], "float64")
model1.variances = numpy.array([[9, 10], [11, 12]], "float64")
model1.variance_thresholds = numpy.array([[0, 0], [0, 0]], "float64")
model2 = GMMMachine(2, 2)
model2.weights = numpy.array([0.5, 0.5], "float64")
model2.means = numpy.array([[5, 6], [7, 8]], "float64")
model2.variances = numpy.array([[13, 14], [15, 16]], "float64")
model2.variance_thresholds = numpy.array([[0, 0], [0, 0]], "float64")
stats1 = GMMStats(2, 2)
stats1.sum_px = numpy.array([[1, 2], [3, 4]], "float64")
stats1.n = numpy.array([1, 2], "float64")
stats1.t = 1 + 2
stats2 = GMMStats(2, 2)
stats2.sum_px = numpy.array([[5, 6], [7, 8]], "float64")
stats2.n = numpy.array([3, 4], "float64")
stats2.t = 3 + 4
stats3 = GMMStats(2, 2)
stats3.sum_px = numpy.array([[5, 6], [7, 3]], "float64")
stats3.n = numpy.array([3, 4], "float64")
stats3.t = 3 + 4
test_channeloffset = [
numpy.array([9, 8, 7, 6], "float64"),
numpy.array([5, 4, 3, 2], "float64"),
numpy.array([1, 0, 1, 2], "float64"),
]
# Reference scores (from Idiap internal matlab implementation)
ref_scores_00 = numpy.array(
[[2372.9, 5207.7, 5275.7], [2215.7, 4868.1, 4932.1]], "float64"
)
ref_scores_01 = numpy.array(
[
[790.9666666666667, 743.9571428571428, 753.6714285714285],
[738.5666666666667, 695.4428571428572, 704.5857142857144],
],
"float64",
)
ref_scores_10 = numpy.array(
[[2615.5, 5434.1, 5392.5], [2381.5, 4999.3, 5022.5]], "float64"
)
ref_scores_11 = numpy.array(
[
[871.8333333333332, 776.3000000000001, 770.3571428571427],
[793.8333333333333, 714.1857142857143, 717.5000000000000],
],
"float64",
)
# 1/ Use GMMMachines
# 1/a/ Without test_channelOffset, without frame-length normalisation
scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3])
assert (abs(scores - ref_scores_00) < 1e-7).all()
# 1/b/ Without test_channelOffset, with frame-length normalisation
scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3], [], True)
assert (abs(scores - ref_scores_01) < 1e-7).all()
# scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3], (), True)
# assert (abs(scores - ref_scores_01) < 1e-7).all()
# scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3], None, True)
# assert (abs(scores - ref_scores_01) < 1e-7).all()
# 1/c/ With test_channelOffset, without frame-length normalisation
scores = linear_scoring(
[model1, model2], ubm, [stats1, stats2, stats3], test_channeloffset
)
assert (abs(scores - ref_scores_10) < 1e-7).all()
# 1/d/ With test_channelOffset, with frame-length normalisation
scores = linear_scoring(
[model1, model2], ubm, [stats1, stats2, stats3], test_channeloffset, True
)
assert (abs(scores - ref_scores_11) < 1e-7).all()
# 2/ Use mean/variance supervectors
# 2/a/ Without test_channelOffset, without frame-length normalisation
scores = linear_scoring(
[model1.mean_supervector, model2.mean_supervector],
ubm.mean_supervector,
ubm.variance_supervector,
[stats1, stats2, stats3],
)
assert (abs(scores - ref_scores_00) < 1e-7).all()
# 2/b/ Without test_channelOffset, with frame-length normalisation
scores = linear_scoring(
[model1.mean_supervector, model2.mean_supervector],
ubm.mean_supervector,
ubm.variance_supervector,
[stats1, stats2, stats3],
[],
True,
)
assert (abs(scores - ref_scores_01) < 1e-7).all()
# 2/c/ With test_channelOffset, without frame-length normalisation
scores = linear_scoring(
[model1.mean_supervector, model2.mean_supervector],
ubm.mean_supervector,
ubm.variance_supervector,
[stats1, stats2, stats3],
test_channeloffset,
)
assert (abs(scores - ref_scores_10) < 1e-7).all()
# 2/d/ With test_channelOffset, with frame-length normalisation
scores = linear_scoring(
[model1.mean_supervector, model2.mean_supervector],
ubm.mean_supervector,
ubm.variance_supervector,
[stats1, stats2, stats3],
test_channeloffset,
True,
)
assert (abs(scores - ref_scores_11) < 1e-7).all()
# 3/ Using single model/sample
# 3/a/ without frame-length normalisation
score = linear_scoring(
model1.mean_supervector,
ubm.mean_supervector,
ubm.variance_supervector,
stats1,
test_channeloffset[0],
)
assert abs(score - ref_scores_10[0, 0]) < 1e-7
score = linear_scoring(
model1.mean_supervector,
ubm.mean_supervector,
ubm.variance_supervector,
stats2,
test_channeloffset[1],
)
assert abs(score - ref_scores_10[0, 1]) < 1e-7
score = linear_scoring(
model1.mean_supervector,
ubm.mean_supervector,
ubm.variance_supervector,
stats3,
test_channeloffset[2],
)
assert abs(score - ref_scores_10[0, 2]) < 1e-7
score = linear_scoring(
model2.mean_supervector,
ubm.mean_supervector,
ubm.variance_supervector,
stats1,
test_channeloffset[0],
)
assert abs(score - ref_scores_10[1, 0]) < 1e-7
score = linear_scoring(
model2.mean_supervector,
ubm.mean_supervector,
ubm.variance_supervector,
stats2,
test_channeloffset[1],
)
assert abs(score - ref_scores_10[1, 1]) < 1e-7
score = linear_scoring(
model2.mean_supervector,
ubm.mean_supervector,
ubm.variance_supervector,
stats3,
test_channeloffset[2],
)
assert abs(score - ref_scores_10[1, 2]) < 1e-7
# 3/b/ without frame-length normalisation
score = linear_scoring(
model1.mean_supervector,
ubm.mean_supervector,
ubm.variance_supervector,
stats1,
test_channeloffset[0],
True,
)
assert abs(score - ref_scores_11[0, 0]) < 1e-7
score = linear_scoring(
model1.mean_supervector,
ubm.mean_supervector,
ubm.variance_supervector,
stats2,
test_channeloffset[1],
True,
)
assert abs(score - ref_scores_11[0, 1]) < 1e-7
score = linear_scoring(
model1.mean_supervector,
ubm.mean_supervector,
ubm.variance_supervector,
stats3,
test_channeloffset[2],
True,
)
assert abs(score - ref_scores_11[0, 2]) < 1e-7
score = linear_scoring(
model2.mean_supervector,
ubm.mean_supervector,
ubm.variance_supervector,
stats1,
test_channeloffset[0],
True,
)
assert abs(score - ref_scores_11[1, 0]) < 1e-7
score = linear_scoring(
model2.mean_supervector,
ubm.mean_supervector,
ubm.variance_supervector,
stats2,
test_channeloffset[1],
True,
)
assert abs(score - ref_scores_11[1, 1]) < 1e-7
score = linear_scoring(
model2.mean_supervector,
ubm.mean_supervector,
ubm.variance_supervector,
stats3,
test_channeloffset[2],
True,
)
assert abs(score - ref_scores_11[1, 2]) < 1e-7
...@@ -6,26 +6,7 @@ import pickle ...@@ -6,26 +6,7 @@ import pickle
import numpy import numpy
from bob.learn.em import GMMMachine from bob.learn.em.cluster import KMeansMachine
from bob.learn.em import GMMStats
from bob.learn.em import KMeansMachine
def test_gmm_machine():
gmm_machine = GMMMachine(3, 3)
gmm_machine.means = numpy.arange(9).reshape(3, 3).astype("float")
gmm_machine_after_pickle = pickle.loads(pickle.dumps(gmm_machine))
assert numpy.allclose(
gmm_machine_after_pickle.means, gmm_machine_after_pickle.means, 10e-3
)
assert numpy.allclose(
gmm_machine_after_pickle.variances, gmm_machine_after_pickle.variances, 10e-3
)
assert numpy.allclose(
gmm_machine_after_pickle.weights, gmm_machine_after_pickle.weights, 10e-3
)
def test_kmeans_machine(): def test_kmeans_machine():
# Test a KMeansMachine # Test a KMeansMachine
...@@ -41,21 +22,3 @@ def test_kmeans_machine(): ...@@ -41,21 +22,3 @@ def test_kmeans_machine():
assert numpy.allclose( assert numpy.allclose(
kmeans_machine_after_pickle.means, kmeans_machine.means, 10e-3 kmeans_machine_after_pickle.means, kmeans_machine.means, 10e-3
) )
def test_gmmstats():
gs = GMMStats(2, 3)
log_likelihood = -3.0
T = 1
n = numpy.array([0.4, 0.6], numpy.float64)
sumpx = numpy.array([[1.0, 2.0, 3.0], [2.0, 4.0, 3.0]], numpy.float64)
sumpxx = numpy.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]], numpy.float64)
gs.log_likelihood = log_likelihood
gs.t = T
gs.n = n
gs.sum_px = sumpx
gs.sum_pxx = sumpxx
gs_after_pickle = pickle.loads(pickle.dumps(gs))
assert gs == gs_after_pickle
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment