Commit 4cfcee92 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira

Merge branch 'dask-pipelines' into 'master'

Making key objects picklable

See merge request !38
parents 2d44cf8d 5921c042
Pipeline #44924 failed with stages
in 2 minutes and 14 seconds
......@@ -6,9 +6,14 @@ import bob.sp
# import our own Library
import bob.extension
bob.extension.load_bob_library('bob.learn.em', __file__)
bob.extension.load_bob_library("bob.learn.em", __file__)
from ._library import *
from ._library import GMMMachine as _GMMMachine_C
from ._library import ISVBase as _ISVBase_C
from ._library import ISVMachine as _ISVMachine_C
from . import version
from .version import module as __version__
from .version import api as __api_version__
......@@ -16,25 +21,131 @@ from .train import *
def ztnorm_same_value(vect_a, vect_b):
"""Computes the matrix of boolean D for the ZT-norm, which indicates where
"""Computes the matrix of boolean D for the ZT-norm, which indicates where
the client ids of the T-Norm models and Z-Norm samples match.
vect_a An (ordered) list of client_id corresponding to the T-Norm models
vect_b An (ordered) list of client_id corresponding to the Z-Norm impostor samples
"""
import numpy
sameMatrix = numpy.ndarray((len(vect_a), len(vect_b)), 'bool')
for j in range(len(vect_a)):
for i in range(len(vect_b)):
sameMatrix[j, i] = (vect_a[j] == vect_b[i])
return sameMatrix
"""
import numpy
sameMatrix = numpy.ndarray((len(vect_a), len(vect_b)), "bool")
for j in range(len(vect_a)):
for i in range(len(vect_b)):
sameMatrix[j, i] = vect_a[j] == vect_b[i]
return sameMatrix
def get_config():
"""Returns a string containing the configuration information.
"""
return bob.extension.get_config(__name__, version.externals, version.api)
"""Returns a string containing the configuration information.
"""
return bob.extension.get_config(__name__, version.externals, version.api)
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
__all__ = [_ for _ in dir() if not _.startswith("_")]
class GMMMachine(_GMMMachine_C):
__doc__ = _GMMMachine_C.__doc__
def update_dict(self, d):
self.means = d["means"]
self.variances = d["variances"]
self.means = d["means"]
@staticmethod
def gmm_shape_from_dict(d):
return d["means"].shape
@classmethod
def create_from_dict(cls, d):
shape = GMMMachine.gmm_shape_from_dict(d)
gmm_machine = cls(shape[0], shape[1])
gmm_machine.update_dict(d)
return gmm_machine
@staticmethod
def to_dict(gmm_machine):
gmm_data = dict()
gmm_data["means"] = gmm_machine.means
gmm_data["variances"] = gmm_machine.variances
gmm_data["weights"] = gmm_machine.weights
return gmm_data
def __getstate__(self):
d = dict(self.__dict__)
d.update(self.__class__.to_dict(self))
return d
def __setstate__(self, d):
self.__dict__ = d
shape = self.gmm_shape_from_dict(d)
self.__init__(shape[0], shape[1])
self.update_dict(d)
class ISVBase(_ISVBase_C):
__doc__ = _ISVBase_C.__doc__
@staticmethod
def to_dict(isv_base):
isv_data = dict()
isv_data["gmm"] = GMMMachine.to_dict(isv_base.ubm)
isv_data["u"] = isv_base.u
isv_data["d"] = isv_base.d
return isv_data
def update_dict(self, d):
ubm = GMMMachine.create_from_dict(d["gmm"])
u = d["u"]
self.__init__(ubm, u.shape[1])
self.u = u
self.d = d["d"]
@classmethod
def create_from_dict(cls, d):
ubm = GMMMachine.create_from_dict(d["gmm"])
ru = d["u"].shape[1]
isv_base = ISVBase(ubm, ru)
isv_base.u = d["u"]
isv_base.d = d["d"]
return isv_base
def __getstate__(self):
d = dict(self.__dict__)
d.update(self.__class__.to_dict(self))
return d
def __setstate__(self, d):
self.__dict__ = d
self.update_dict(d)
class ISVMachine(_ISVMachine_C):
__doc__ = _ISVMachine_C.__doc__
@staticmethod
def to_dict(isv_machine):
isv_data = dict()
isv_data["x"] = isv_machine.x
isv_data["z"] = isv_machine.z
isv_data["isv_base"] = ISVBase.to_dict(isv_machine.isv_base)
return isv_data
def update_dict(self, d):
isv_base = ISVBase.create_from_dict(d["isv_base"])
self.__init__(isv_base)
self.x = d["x"]
self.z = d["z"]
def __getstate__(self):
d = dict(self.__dict__)
d.update(self.__class__.to_dict(self))
return d
def __setstate__(self, d):
self.__dict__ = d
self.update_dict(d)
......@@ -108,6 +108,19 @@ void bob::learn::em::ISVMachine::setZ(const blitz::Array<double,1>& z)
updateCache();
}
void bob::learn::em::ISVMachine::setX(const blitz::Array<double,1>& x)
{
if(x.extent(0) != m_cache_x.extent(0)) { //checks dimension
boost::format m("size of input vector `x' (%d) does not match the expected size (%d)");
m % x.extent(0) % m_cache_x.extent(0);
throw std::runtime_error(m.str());
}
m_cache_x.reference(bob::core::array::ccopy(x));
// update cache
updateCache();
}
void bob::learn::em::ISVMachine::setISVBase(const boost::shared_ptr<bob::learn::em::ISVBase> isv_base)
{
if (!isv_base->getUbm())
......
/**
* @date Tue Jul 19 15:33:20 2011 +0200
* @author Francois Moulin <Francois.Moulin@idiap.ch>
* @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
*
* Copyright (C) Idiap Research Institute, Martigny, Switzerland
*/
#include <bob.learn.em/ZTNorm.h>
#include <bob.core/assert.h>
#include <limits>
static void _ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
const blitz::Array<double,2>* rawscores_zprobes_vs_models,
const blitz::Array<double,2>* rawscores_probes_vs_tmodels,
const blitz::Array<double,2>* rawscores_zprobes_vs_tmodels,
const blitz::Array<bool,2>* mask_zprobes_vs_tmodels_istruetrial,
blitz::Array<double,2>& scores)
{
// Rename variables
const blitz::Array<double,2>& A = rawscores_probes_vs_models;
const blitz::Array<double,2>* B = rawscores_zprobes_vs_models;
const blitz::Array<double,2>* C = rawscores_probes_vs_tmodels;
const blitz::Array<double,2>* D = rawscores_zprobes_vs_tmodels;
// Compute the sizes
int size_eval = A.extent(0);
int size_enroll = A.extent(1);
int size_tnorm = (C ? C->extent(0) : 0);
int size_znorm = (B ? B->extent(1) : 0);
// Check the inputs
bob::core::array::assertSameDimensionLength(A.extent(0), size_eval);
bob::core::array::assertSameDimensionLength(A.extent(1), size_enroll);
if (B) {
bob::core::array::assertSameDimensionLength(B->extent(1), size_znorm);
if (size_znorm > 0)
bob::core::array::assertSameDimensionLength(B->extent(0), size_eval);
}
if (C) {
bob::core::array::assertSameDimensionLength(C->extent(0), size_tnorm);
if (size_tnorm > 0)
bob::core::array::assertSameDimensionLength(C->extent(1), size_enroll);
}
if (D && size_znorm > 0 && size_tnorm > 0) {
bob::core::array::assertSameDimensionLength(D->extent(0), size_tnorm);
bob::core::array::assertSameDimensionLength(D->extent(1), size_znorm);
}
if (mask_zprobes_vs_tmodels_istruetrial) {
bob::core::array::assertSameDimensionLength(mask_zprobes_vs_tmodels_istruetrial->extent(0), size_tnorm);
bob::core::array::assertSameDimensionLength(mask_zprobes_vs_tmodels_istruetrial->extent(1), size_znorm);
}
bob::core::array::assertSameDimensionLength(scores.extent(0), size_eval);
bob::core::array::assertSameDimensionLength(scores.extent(1), size_enroll);
// Declare needed IndexPlaceholder
blitz::firstIndex ii;
blitz::secondIndex jj;
// Constant to check if the std is close to 0.
const double eps = std::numeric_limits<double>::min();
// zA
blitz::Array<double,2> zA(A.shape());
if (B && size_znorm > 0) {
// Znorm --> zA = (A - mean(B) ) / std(B) [znorm on oringinal scores]
// mean(B)
blitz::Array<double,1> mean_B(blitz::mean(*B, jj));
// std(B)
blitz::Array<double,2> B2n(B->shape());
B2n = blitz::pow2((*B)(ii, jj) - mean_B(ii));
blitz::Array<double,1> std_B(B->extent(0));
if(size_znorm>1)
std_B = blitz::sqrt(blitz::sum(B2n, jj) / (size_znorm - 1));
else // 1 single value -> std = 0
std_B = 0;
std_B = blitz::where( std_B <= eps, 1., std_B);
zA = (A(ii, jj) - mean_B(ii)) / std_B(ii);
}
else
zA = A;
blitz::Array<double,2> zC(size_tnorm, size_enroll);
if (D && size_tnorm > 0 && size_znorm > 0) {
blitz::Array<double,1> mean_Dimp(size_tnorm);
blitz::Array<double,1> std_Dimp(size_tnorm);
// Compute mean_Dimp and std_Dimp = D only with impostors
for (int i = 0; i < size_tnorm; ++i) {
double sum = 0;
double sumsq = 0;
double count = 0;
for (int j = 0; j < size_znorm; ++j) {
bool keep;
// The second part is never executed if mask_zprobes_vs_tmodels_istruetrial==NULL
keep = (mask_zprobes_vs_tmodels_istruetrial == NULL) || !(*mask_zprobes_vs_tmodels_istruetrial)(i, j); //tnorm_models_spk_ids(i) != znorm_tests_spk_ids(j);
double value = keep * (*D)(i, j);
sum += value;
sumsq += value*value;
count += keep;
}
double mean = sum / count;
mean_Dimp(i) = mean;
if (count > 1)
std_Dimp(i) = sqrt((sumsq - count * mean * mean) / (count -1));
else // 1 single value -> std = 0
std_Dimp(i) = 0;
}
// zC = (C - mean(D)) / std(D) [znorm the tnorm scores]
std_Dimp = blitz::where( std_Dimp <= eps, 1., std_Dimp);
zC = ((*C)(ii, jj) - mean_Dimp(ii)) / std_Dimp(ii);
}
else if (C && size_tnorm > 0)
zC = *C;
if (C && size_tnorm > 0)
{
blitz::Array<double,1> mean_zC(size_enroll);
blitz::Array<double,1> std_zC(size_enroll);
// ztA = (zA - mean(zC)) / std(zC) [ztnorm on eval scores]
mean_zC = blitz::mean(zC(jj, ii), jj);
if (size_tnorm > 1)
std_zC = sqrt(blitz::sum(pow(zC(jj, ii) - mean_zC(ii), 2) , jj) / (size_tnorm - 1));
else // 1 single value -> std = 0
std_zC = 0;
std_zC = blitz::where( std_zC <= eps, 1., std_zC);
// Normalised scores
scores = (zA(ii, jj) - mean_zC(jj)) / std_zC(jj);
}
else
scores = zA;
}
void bob::learn::em::ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
const blitz::Array<double,2>& rawscores_zprobes_vs_models,
const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
const blitz::Array<double,2>& rawscores_zprobes_vs_tmodels,
const blitz::Array<bool,2>& mask_zprobes_vs_tmodels_istruetrial,
blitz::Array<double,2>& scores)
{
_ztNorm(rawscores_probes_vs_models, &rawscores_zprobes_vs_models, &rawscores_probes_vs_tmodels,
&rawscores_zprobes_vs_tmodels, &mask_zprobes_vs_tmodels_istruetrial, scores);
}
void bob::learn::em::ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
const blitz::Array<double,2>& rawscores_zprobes_vs_models,
const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
const blitz::Array<double,2>& rawscores_zprobes_vs_tmodels,
blitz::Array<double,2>& scores)
{
_ztNorm(rawscores_probes_vs_models, &rawscores_zprobes_vs_models, &rawscores_probes_vs_tmodels,
&rawscores_zprobes_vs_tmodels, NULL, scores);
}
void bob::learn::em::tNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
blitz::Array<double,2>& scores)
{
_ztNorm(rawscores_probes_vs_models, NULL, &rawscores_probes_vs_tmodels,
NULL, NULL, scores);
}
void bob::learn::em::zNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
const blitz::Array<double,2>& rawscores_zprobes_vs_models,
blitz::Array<double,2>& scores)
{
_ztNorm(rawscores_probes_vs_models, &rawscores_zprobes_vs_models, NULL,
NULL, NULL, scores);
}
......@@ -966,7 +966,7 @@ bool init_BobLearnEMGMMMachine(PyObject* module)
// initialize the type struct
PyBobLearnEMGMMMachine_Type.tp_name = GMMMachine_doc.name();
PyBobLearnEMGMMMachine_Type.tp_basicsize = sizeof(PyBobLearnEMGMMMachineObject);
PyBobLearnEMGMMMachine_Type.tp_flags = Py_TPFLAGS_DEFAULT;
PyBobLearnEMGMMMachine_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
PyBobLearnEMGMMMachine_Type.tp_doc = GMMMachine_doc.doc();
// set the functions
......
......@@ -147,6 +147,13 @@ class ISVMachine
*/
void setZ(const blitz::Array<double,1>& z);
/**
* @brief Sets the session variable
*/
void setX(const blitz::Array<double,1>& x);
/**
* @brief Returns the ISVBase
*/
......
/**
* @date Tue Jul 19 15:33:20 2011 +0200
* @author Francois Moulin <Francois.Moulin@idiap.ch>
* @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
*
* Copyright (C) Idiap Research Institute, Martigny, Switzerland
*/
#ifndef BOB_LEARN_EM_ZTNORM_H
#define BOB_LEARN_EM_ZTNORM_H
#include <blitz/array.h>
namespace bob { namespace learn { namespace em {
/**
* Normalise raw scores with ZT-Norm
*
* @exception std::runtime_error matrix sizes are not consistent
*
* @param rawscores_probes_vs_models
* @param rawscores_zprobes_vs_models
* @param rawscores_probes_vs_tmodels
* @param rawscores_zprobes_vs_tmodels
* @param mask_zprobes_vs_tmodels_istruetrial
* @param[out] normalizedscores normalized scores
* @warning The destination score array should have the correct size
* (Same size as rawscores_probes_vs_models)
*/
void ztNorm(const blitz::Array<double, 2>& rawscores_probes_vs_models,
const blitz::Array<double, 2>& rawscores_zprobes_vs_models,
const blitz::Array<double, 2>& rawscores_probes_vs_tmodels,
const blitz::Array<double, 2>& rawscores_zprobes_vs_tmodels,
const blitz::Array<bool, 2>& mask_zprobes_vs_tmodels_istruetrial,
blitz::Array<double, 2>& normalizedscores);
/**
* Normalise raw scores with ZT-Norm.
* Assume that znorm and tnorm have no common subject id.
*
* @exception std::runtime_error matrix sizes are not consistent
*
* @param rawscores_probes_vs_models
* @param rawscores_zprobes_vs_models
* @param rawscores_probes_vs_tmodels
* @param rawscores_zprobes_vs_tmodels
* @param[out] normalizedscores normalized scores
* @warning The destination score array should have the correct size
* (Same size as rawscores_probes_vs_models)
*/
void ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
const blitz::Array<double,2>& rawscores_zprobes_vs_models,
const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
const blitz::Array<double,2>& rawscores_zprobes_vs_tmodels,
blitz::Array<double,2>& normalizedscores);
/**
* Normalise raw scores with T-Norm.
*
* @exception std::runtime_error matrix sizes are not consistent
*
* @param rawscores_probes_vs_models
* @param rawscores_probes_vs_tmodels
* @param[out] normalizedscores normalized scores
* @warning The destination score array should have the correct size
* (Same size as rawscores_probes_vs_models)
*/
void tNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
blitz::Array<double,2>& normalizedscores);
/**
* Normalise raw scores with Z-Norm.
*
* @exception std::runtime_error matrix sizes are not consistent
*
* @param rawscores_probes_vs_models
* @param rawscores_zprobes_vs_models
* @param[out] normalizedscores normalized scores
* @warning The destination score array should have the correct size
* (Same size as rawscores_probes_vs_models)
*/
void zNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
const blitz::Array<double,2>& rawscores_zprobes_vs_models,
blitz::Array<double,2>& normalizedscores);
} } } // namespaces
#endif /* BOB_LEARN_EM_ZTNORM_H */
......@@ -539,7 +539,7 @@ bool init_BobLearnEMISVBase(PyObject* module)
// initialize the type struct
PyBobLearnEMISVBase_Type.tp_name = ISVBase_doc.name();
PyBobLearnEMISVBase_Type.tp_basicsize = sizeof(PyBobLearnEMISVBaseObject);
PyBobLearnEMISVBase_Type.tp_flags = Py_TPFLAGS_DEFAULT;
PyBobLearnEMISVBase_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
PyBobLearnEMISVBase_Type.tp_doc = ISVBase_doc.doc();
// set the functions
......
......@@ -240,6 +240,37 @@ PyObject* PyBobLearnEMISVMachine_getX(PyBobLearnEMISVMachineObject* self, void*)
return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getX());
BOB_CATCH_MEMBER("`x` could not be read", 0)
}
int PyBobLearnEMISVMachine_setX(PyBobLearnEMISVMachineObject* self, PyObject* value, void*){
BOB_TRY
PyBlitzArrayObject* input;
if (!PyBlitzArray_Converter(value, &input)){
PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, X.name());
return -1;
}
auto o_ = make_safe(input);
// perform check on the input
if (input->type_num != NPY_FLOAT64){
PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, X.name());
return -1;
}
if (input->ndim != 1){
PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, X.name());
return -1;
}
if (input->shape[0] != (Py_ssize_t)self->cxx->getX().extent(0)) {
PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d, elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, (Py_ssize_t)self->cxx->getX().extent(0), (Py_ssize_t)input->shape[0], X.name());
return -1;
}
auto b = PyBlitzArrayCxx_AsBlitz<double,1>(input, "x");
if (!b) return -1;
self->cxx->setX(*b);
return 0;
BOB_CATCH_MEMBER("`x` vector could not be set", -1)
}
/***** isv_base *****/
......@@ -318,7 +349,7 @@ static PyGetSetDef PyBobLearnEMISVMachine_getseters[] = {
{
X.name(),
(getter)PyBobLearnEMISVMachine_getX,
0,
(setter)PyBobLearnEMISVMachine_setX,
X.doc(),
0
},
......@@ -648,7 +679,7 @@ bool init_BobLearnEMISVMachine(PyObject* module)
// initialize the type struct
PyBobLearnEMISVMachine_Type.tp_name = ISVMachine_doc.name();
PyBobLearnEMISVMachine_Type.tp_basicsize = sizeof(PyBobLearnEMISVMachineObject);
PyBobLearnEMISVMachine_Type.tp_flags = Py_TPFLAGS_DEFAULT;
PyBobLearnEMISVMachine_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
PyBobLearnEMISVMachine_Type.tp_doc = ISVMachine_doc.doc();
// set the functions
......
......@@ -11,24 +11,6 @@
#include "main.h"
static PyMethodDef module_methods[] = {
{
zt_norm.name(),
(PyCFunction)PyBobLearnEM_ztNorm,
METH_VARARGS|METH_KEYWORDS,
zt_norm.doc()
},
{
t_norm.name(),
(PyCFunction)PyBobLearnEM_tNorm,
METH_VARARGS|METH_KEYWORDS,
t_norm.doc()
},
{
z_norm.name(),
(PyCFunction)PyBobLearnEM_zNorm,
METH_VARARGS|METH_KEYWORDS,
z_norm.doc()
},
{
linear_scoring1.name(),
(PyCFunction)PyBobLearnEM_linear_scoring,
......
......@@ -49,7 +49,6 @@
#include <bob.learn.em/PLDAMachine.h>
#include <bob.learn.em/PLDATrainer.h>
#include <bob.learn.em/ZTNorm.h>
/// inserts the given key, value pair into the given dictionaries
static inline int insert_item_string(PyObject* dict, PyObject* entries, const char* key, Py_ssize_t value){
......@@ -265,16 +264,6 @@ bool init_BobLearnEMEMPCATrainer(PyObject* module);
int PyBobLearnEMEMPCATrainer_Check(PyObject* o);
//ZT Normalization
PyObject* PyBobLearnEM_ztNorm(PyObject*, PyObject* args, PyObject* kwargs);
extern bob::extension::FunctionDoc zt_norm;
PyObject* PyBobLearnEM_tNorm(PyObject*, PyObject* args, PyObject* kwargs);
extern bob::extension::FunctionDoc t_norm;
PyObject* PyBobLearnEM_zNorm(PyObject*, PyObject* args, PyObject* kwargs);
extern bob::extension::FunctionDoc z_norm;
//Linear scoring
PyObject* PyBobLearnEM_linear_scoring(PyObject*, PyObject* args, PyObject* kwargs);
......
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
from bob.learn.em import GMMMachine, ISVBase, ISVMachine
import numpy
import pickle
def test_gmm_machine():
gmm_machine = GMMMachine(3,3)
gmm_machine.means = numpy.arange(9).reshape(3,3).astype("float")
gmm_machine_after_pickle = pickle.loads(pickle.dumps(gmm_machine))
assert numpy.allclose(gmm_machine_after_pickle.means, gmm_machine_after_pickle.means, 10e-3)
assert numpy.allclose(gmm_machine_after_pickle.variances, gmm_machine_after_pickle.variances, 10e-3)
assert numpy.allclose(gmm_machine_after_pickle.weights, gmm_machine_after_pickle.weights, 10e-3)
def test_isv_base():
ubm = GMMMachine(3,3)
ubm.means = numpy.arange(9).reshape(3,3).astype("float")
isv_base = ISVBase(ubm, 2)
isv_base.u = numpy.arange(18).reshape(9,2).astype("float")
isv_base.d = numpy.arange(9).astype("float")
isv_base_after_pickle = pickle.loads(pickle.dumps(isv_base))
assert numpy.allclose(isv_base.u, isv_base_after_pickle.u, 10e-3)
assert numpy.allclose(isv_base.d, isv_base_after_pickle.d, 10e-3)
def test_isv_machine():
# Creates a UBM
weights = numpy.array([0.4, 0.6], 'float64')
means = numpy.array([[1, 6, 2], [4, 3, 2]], 'float64')
variances = numpy.array([[1, 2, 1], [2, 1, 2]], 'float64')
ubm = GMMMachine(2,3)
ubm.weights = weights
ubm.means = means
ubm.variances = variances
# Creates a ISVBaseMachine
U = numpy.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]], 'float64')
#V = numpy.array([[0], [0], [0], [0], [0], [0]], 'float64')
d = numpy.array([0, 1, 0, 1, 0, 1], 'float64')
base = ISVBase(ubm,2)
base.u = U
base.d = d
# Creates a ISVMachine
z = numpy.array([3,4,1,2,0,1], 'float64')
x = numpy.array([1,2], 'float64')
isv_machine = ISVMachine(base)
isv_machine.z = z
isv_machine.x = x
isv_machine_after_pickle = pickle.loads(pickle.dumps(isv_machine))
assert numpy.allclose(isv_machine_after_pickle.isv_base.u, isv_machine.isv_base.u, 10e-3)
assert numpy.allclose(isv_machine_after_pickle.isv_base.d, isv_machine.isv_base.d, 10e-3)
assert numpy.allclose(isv_machine_after_pickle.x, isv_machine.x, 10e-3)
assert numpy.allclose(isv_machine_after_pickle.z, isv_machine.z, 10e-3)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Francois Moulin <Francois.Moulin@idiap.ch>
# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
# Tue Jul 19 15:33:20 2011 +0200
#
# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
"""Tests on the ZTNorm function
"""
import numpy
from bob.io.base.test_utils import datafile
import bob.io.base
#from . import znorm, tnorm, ztnorm
import bob.learn.em
def sameValue(vect_A, vect_B):
sameMatrix = numpy.zeros((vect_A.shape[0], vect_B.shape[0]), 'bool')
for j in range(vect_A.shape[0]):
for i in range(vect_B.shape[0]):
sameMatrix[j, i] = (vect_A[j] == vect_B[i])
return sameMatrix
def tnorm(A, C):
Cmean = numpy.mean(C, axis=0)
if C.shape[1] > 1:
Cstd = numpy.sqrt(numpy.sum((C - numpy.tile(Cmean.reshape(1,C.shape[1]), (C.shape[0],1))) ** 2, axis=0) / (C.shape[0]-1))
else: