Commit fc924e7f authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Solved all memory issues

parent 6b29c6f9
......@@ -18,9 +18,10 @@ bob::learn::em::IVectorMachine::IVectorMachine()
bob::learn::em::IVectorMachine::IVectorMachine(const boost::shared_ptr<bob::learn::em::GMMMachine> ubm,
const size_t rt, const double variance_threshold):
m_ubm(ubm), m_rt(rt),
m_T(getSupervectorLength(),rt), m_sigma(getSupervectorLength()),
m_T(getSupervectorLength(),rt), m_sigma(getSupervectorLength()),
m_variance_threshold(variance_threshold)
{
m_sigma = 0.0;
resizePrecompute();
}
......@@ -153,6 +154,7 @@ void bob::learn::em::IVectorMachine::precompute()
blitz::Range rall = blitz::Range::all();
const int C = (int)m_ubm->getNGaussians();
const int D = (int)m_ubm->getNInputs();
// T_{c}^{T}.sigma_{c}^{-1}
for (int c=0; c<C; ++c)
{
......
......@@ -139,51 +139,7 @@ static PyObject* PyBobLearnEMEMPCATrainer_RichCompare(PyBobLearnEMEMPCATrainerOb
/************ Variables Section ***********************************/
/******************************************************************/
/***** rng *****/
static auto rng = bob::extension::VariableDoc(
"rng",
"str",
"The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.",
""
);
PyObject* PyBobLearnEMEMPCATrainer_getRng(PyBobLearnEMEMPCATrainerObject* self, void*) {
BOB_TRY
//Allocating the correspondent python object
PyBoostMt19937Object* retval =
(PyBoostMt19937Object*)PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type, 0);
retval->rng = self->cxx->getRng().get();
return Py_BuildValue("N", retval);
BOB_CATCH_MEMBER("Rng method could not be read", 0)
}
int PyBobLearnEMEMPCATrainer_setRng(PyBobLearnEMEMPCATrainerObject* self, PyObject* value, void*) {
BOB_TRY
if (!PyBoostMt19937_Check(value)){
PyErr_Format(PyExc_RuntimeError, "%s %s expects an PyBoostMt19937_Check", Py_TYPE(self)->tp_name, rng.name());
return -1;
}
PyBoostMt19937Object* boostObject = 0;
PyBoostMt19937_Converter(value, &boostObject);
self->cxx->setRng((boost::shared_ptr<boost::mt19937>)boostObject->rng);
return 0;
BOB_CATCH_MEMBER("Rng could not be set", 0)
}
static PyGetSetDef PyBobLearnEMEMPCATrainer_getseters[] = {
{
rng.name(),
(getter)PyBobLearnEMEMPCATrainer_getRng,
(setter)PyBobLearnEMEMPCATrainer_setRng,
rng.doc(),
0
},
{0} // Sentinel
};
......@@ -201,7 +157,8 @@ static auto initialize = bob::extension::FunctionDoc(
)
.add_prototype("linear_machine,data")
.add_parameter("linear_machine", ":py:class:`bob.learn.linear.Machine`", "LinearMachine Object")
.add_parameter("data", "array_like <float, 2D>", "Input data");
.add_parameter("data", "array_like <float, 2D>", "Input data")
.add_parameter("rng", ":py:class:`bob.core.random.mt19937`", "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.");
static PyObject* PyBobLearnEMEMPCATrainer_initialize(PyBobLearnEMEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
BOB_TRY
......@@ -209,11 +166,19 @@ static PyObject* PyBobLearnEMEMPCATrainer_initialize(PyBobLearnEMEMPCATrainerObj
char** kwlist = initialize.kwlist(0);
PyBobLearnLinearMachineObject* linear_machine = 0;
PyBlitzArrayObject* data = 0;
PyBlitzArrayObject* data = 0;
PyBoostMt19937Object* rng = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnLinearMachine_Type, &linear_machine,
&PyBlitzArray_Converter, &data)) return 0;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&|O!", kwlist, &PyBobLearnLinearMachine_Type, &linear_machine,
&PyBlitzArray_Converter, &data,
&PyBoostMt19937_Type, &rng)) return 0;
auto data_ = make_safe(data);
if(rng){
boost::shared_ptr<boost::mt19937> rng_cpy = (boost::shared_ptr<boost::mt19937>)new boost::mt19937(*rng->rng);
self->cxx->setRng(rng_cpy);
}
self->cxx->initialize(*linear_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
......
......@@ -75,18 +75,9 @@ static int PyBobLearnEMGaussian_init_hdf5(PyBobLearnEMGaussianObject* self, PyOb
Gaussian_doc.print_usage();
return -1;
}
auto config_ = make_safe(config);
try {
self->cxx.reset(new bob::learn::em::Gaussian(*(config->f)));
}
catch (std::exception& ex) {
PyErr_SetString(PyExc_RuntimeError, ex.what());
return -1;
}
catch (...) {
PyErr_Format(PyExc_RuntimeError, "cannot create new object of type `%s' - unknown exception thrown", Py_TYPE(self)->tp_name);
return -1;
}
self->cxx.reset(new bob::learn::em::Gaussian(*(config->f)));
return 0;
}
......@@ -170,7 +161,7 @@ int PyBobLearnEMGaussian_Check(PyObject* o) {
/***** MEAN *****/
static auto mean = bob::extension::VariableDoc(
"mean",
"array_like <double, 1D>",
"array_like <float, 1D>",
"Mean of the Gaussian",
""
);
......@@ -181,13 +172,30 @@ PyObject* PyBobLearnEMGaussian_getMean(PyBobLearnEMGaussianObject* self, void*){
}
int PyBobLearnEMGaussian_setMean(PyBobLearnEMGaussianObject* self, PyObject* value, void*){
BOB_TRY
PyBlitzArrayObject* o;
if (!PyBlitzArray_Converter(value, &o)){
PyBlitzArrayObject* input;
if (!PyBlitzArray_Converter(value, &input)){
PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, mean.name());
return -1;
}
auto o_ = make_safe(o);
auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "mean");
// perform check on the input
if (input->type_num != NPY_FLOAT64){
PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, mean.name());
return -1;
}
if (input->ndim != 1){
PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, mean.name());
return -1;
}
if (input->shape[0] != (Py_ssize_t)self->cxx->getNInputs()){
PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), input->shape[0], mean.name());
return -1;
}
auto o_ = make_safe(input);
auto b = PyBlitzArrayCxx_AsBlitz<double,1>(input, "mean");
if (!b) return -1;
self->cxx->setMean(*b);
return 0;
......@@ -197,7 +205,7 @@ int PyBobLearnEMGaussian_setMean(PyBobLearnEMGaussianObject* self, PyObject* val
/***** Variance *****/
static auto variance = bob::extension::VariableDoc(
"variance",
"array_like <double, 1D>",
"array_like <float, 1D>",
"Variance of the Gaussian",
""
);
......@@ -208,13 +216,30 @@ PyObject* PyBobLearnEMGaussian_getVariance(PyBobLearnEMGaussianObject* self, voi
}
int PyBobLearnEMGaussian_setVariance(PyBobLearnEMGaussianObject* self, PyObject* value, void*){
BOB_TRY
PyBlitzArrayObject* o;
if (!PyBlitzArray_Converter(value, &o)){
PyBlitzArrayObject* input;
if (!PyBlitzArray_Converter(value, &input)){
PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, variance.name());
return -1;
}
auto o_ = make_safe(o);
auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "variance");
auto input_ = make_safe(input);
// perform check on the input
if (input->type_num != NPY_FLOAT64){
PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, variance.name());
return -1;
}
if (input->ndim != 1){
PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, variance.name());
return -1;
}
if (input->shape[0] != (Py_ssize_t)self->cxx->getNInputs()){
PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), input->shape[0], variance.name());
return -1;
}
auto b = PyBlitzArrayCxx_AsBlitz<double,1>(input, "variance");
if (!b) return -1;
self->cxx->setVariance(*b);
return 0;
......@@ -225,7 +250,7 @@ int PyBobLearnEMGaussian_setVariance(PyBobLearnEMGaussianObject* self, PyObject*
/***** variance_thresholds *****/
static auto variance_thresholds = bob::extension::VariableDoc(
"variance_thresholds",
"array_like <double, 1D>",
"array_like <float, 1D>",
"The variance flooring thresholds, i.e. the minimum allowed value of variance in each dimension. ",
"The variance will be set to this value if an attempt is made to set it to a smaller value."
);
......@@ -236,13 +261,31 @@ PyObject* PyBobLearnEMGaussian_getVarianceThresholds(PyBobLearnEMGaussianObject*
}
int PyBobLearnEMGaussian_setVarianceThresholds(PyBobLearnEMGaussianObject* self, PyObject* value, void*){
BOB_TRY
PyBlitzArrayObject* o;
if (!PyBlitzArray_Converter(value, &o)){
PyBlitzArrayObject* input;
if (!PyBlitzArray_Converter(value, &input)){
PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, variance_thresholds.name());
return -1;
}
auto input_ = make_safe(input);
// perform check on the input
if (input->type_num != NPY_FLOAT64){
PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, variance_thresholds.name());
return -1;
}
if (input->ndim != 1){
PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, variance_thresholds.name());
return -1;
}
if (input->shape[0] != (Py_ssize_t)self->cxx->getNInputs()){
PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), input->shape[0], variance_thresholds.name());
return -1;
}
auto o_ = make_safe(o);
auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "variance_thresholds");
auto b = PyBlitzArrayCxx_AsBlitz<double,1>(input, "variance_thresholds");
if (!b) return -1;
self->cxx->setVarianceThresholds(*b);
return 0;
......@@ -336,7 +379,7 @@ static auto log_likelihood = bob::extension::FunctionDoc(
true
)
.add_prototype("input","output")
.add_parameter("input", "array_like <double, 1D>", "Input vector")
.add_parameter("input", "array_like <float, 1D>", "Input vector")
.add_return("output","float","The log likelihood");
static PyObject* PyBobLearnEMGaussian_loglikelihood(PyBobLearnEMGaussianObject* self, PyObject* args, PyObject* kwargs) {
BOB_TRY
......@@ -349,6 +392,25 @@ static PyObject* PyBobLearnEMGaussian_loglikelihood(PyBobLearnEMGaussianObject*
//protects acquired resources through this scope
auto input_ = make_safe(input);
// perform check on the input
if (input->type_num != NPY_FLOAT64){
PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `input`", Py_TYPE(self)->tp_name);
log_likelihood.print_usage();
return 0;
}
if (input->ndim != 1){
PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64", Py_TYPE(self)->tp_name);
log_likelihood.print_usage();
return 0;
}
if (input->shape[0] != (Py_ssize_t)self->cxx->getNInputs()){
PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), input->shape[0]);
log_likelihood.print_usage();
return 0;
}
double value = self->cxx->logLikelihood(*PyBlitzArrayCxx_AsBlitz<double,1>(input));
return Py_BuildValue("d", value);
......@@ -362,8 +424,8 @@ static auto log_likelihood_ = bob::extension::FunctionDoc(
"Output the log likelihood given a sample. The input size is NOT checked."
)
.add_prototype("input","output")
.add_parameter("input", "array_like <double, 1D>", "Input vector")
.add_return("output","double","The log likelihood");
.add_parameter("input", "array_like <float, 1D>", "Input vector")
.add_return("output","float","The log likelihood");
static PyObject* PyBobLearnEMGaussian_loglikelihood_(PyBobLearnEMGaussianObject* self, PyObject* args, PyObject* kwargs) {
BOB_TRY
char** kwlist = log_likelihood_.kwlist(0);
......@@ -373,6 +435,25 @@ static PyObject* PyBobLearnEMGaussian_loglikelihood_(PyBobLearnEMGaussianObject*
//protects acquired resources through this scope
auto input_ = make_safe(input);
// perform check on the input
if (input->type_num != NPY_FLOAT64){
PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `input`", Py_TYPE(self)->tp_name);
log_likelihood.print_usage();
return 0;
}
if (input->ndim != 1){
PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64", Py_TYPE(self)->tp_name);
log_likelihood.print_usage();
return 0;
}
if (input->shape[0] != (Py_ssize_t)self->cxx->getNInputs()){
PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), input->shape[0]);
log_likelihood.print_usage();
return 0;
}
double value = self->cxx->logLikelihood_(*PyBlitzArrayCxx_AsBlitz<double,1>(input));
return Py_BuildValue("d", value);
......@@ -386,8 +467,7 @@ static auto save = bob::extension::FunctionDoc(
"Save the configuration of the Gassian Machine to a given HDF5 file"
)
.add_prototype("hdf5")
.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing")
;
.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
static PyObject* PyBobLearnEMGaussian_Save(PyBobLearnEMGaussianObject* self, PyObject* args, PyObject* kwargs) {
BOB_TRY
......
......@@ -84,23 +84,13 @@ static int PyBobLearnEMGMMStats_init_hdf5(PyBobLearnEMGMMStatsObject* self, PyOb
char** kwlist = GMMStats_doc.kwlist(2);
/*
PyBobIoHDF5FileObject* config = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
GMMStats_doc.print_usage();
return -1;
}
*/
PyObject* config = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobIoHDF5File_Type, &config)){
GMMStats_doc.print_usage();
return -1;
}
auto h5f = reinterpret_cast<PyBobIoHDF5FileObject*>(config);
self->cxx.reset(new bob::learn::em::GMMStats(*(h5f->f)));
auto config_ = make_safe(config);
self->cxx.reset(new bob::learn::em::GMMStats(*(config->f)));
return 0;
}
......
......@@ -222,43 +222,43 @@ class PLDATrainer
/**
* @brief Sets the Random Number Generator
*/
void setRng(const boost::shared_ptr<boost::mt19937> rng)
void setRng(boost::shared_ptr<boost::mt19937> rng)
{ m_rng = rng; }
/**
* @brief Gets the Random Number Generator
*/
const boost::shared_ptr<boost::mt19937> getRng() const
boost::shared_ptr<boost::mt19937> getRng() const
{ return m_rng; }
private:
boost::shared_ptr<boost::mt19937> m_rng;
//representation
size_t m_dim_d; ///< Dimensionality of the input features
size_t m_dim_f; ///< Size/rank of the \f$F\f$ subspace
size_t m_dim_g; ///< Size/rank of the \f$G\f$ subspace
bool m_use_sum_second_order; ///< If set, only the sum of the second order statistics is stored/allocated
InitFMethod m_initF_method; ///< Initialization method for \f$F\f$
double m_initF_ratio; ///< Ratio/factor used for the initialization of \f$F\f$
InitGMethod m_initG_method; ///< Initialization method for \f$G\f$
double m_initG_ratio; ///< Ratio/factor used for the initialization of \f$G\f$
InitSigmaMethod m_initSigma_method; ///< Initialization method for \f$\Sigma\f$
double m_initSigma_ratio; ///< Ratio/factor used for the initialization of \f$\Sigma\f$
boost::shared_ptr<boost::mt19937> m_rng;
//representation
size_t m_dim_d; ///< Dimensionality of the input features
size_t m_dim_f; ///< Size/rank of the \f$F\f$ subspace
size_t m_dim_g; ///< Size/rank of the \f$G\f$ subspace
bool m_use_sum_second_order; ///< If set, only the sum of the second order statistics is stored/allocated
InitFMethod m_initF_method; ///< Initialization method for \f$F\f$
double m_initF_ratio; ///< Ratio/factor used for the initialization of \f$F\f$
InitGMethod m_initG_method; ///< Initialization method for \f$G\f$
double m_initG_ratio; ///< Ratio/factor used for the initialization of \f$G\f$
InitSigmaMethod m_initSigma_method; ///< Initialization method for \f$\Sigma\f$
double m_initSigma_ratio; ///< Ratio/factor used for the initialization of \f$\Sigma\f$
// Statistics and covariance computed during the training process
blitz::Array<double,2> m_cache_S; ///< Covariance of the training data
std::vector<blitz::Array<double,2> > m_cache_z_first_order; ///< Current mean of the z_{n} latent variable (1 for each sample)
blitz::Array<double,2> m_cache_sum_z_second_order; ///< Current sum of the covariance of the z_{n} latent variable
std::vector<blitz::Array<double,3> > m_cache_z_second_order; ///< Current covariance of the z_{n} latent variable
// Precomputed
/**
* @brief Number of training samples for each individual in the training set
*/
std::vector<size_t> m_cache_n_samples_per_id;
/**
* @brief Tells if there is an identity with a 'key'/particular number of
// Statistics and covariance computed during the training process
blitz::Array<double,2> m_cache_S; ///< Covariance of the training data
std::vector<blitz::Array<double,2> > m_cache_z_first_order; ///< Current mean of the z_{n} latent variable (1 for each sample)
blitz::Array<double,2> m_cache_sum_z_second_order; ///< Current sum of the covariance of the z_{n} latent variable
std::vector<blitz::Array<double,3> > m_cache_z_second_order; ///< Current covariance of the z_{n} latent variable
// Precomputed
/**
* @brief Number of training samples for each individual in the training set
*/
std::vector<size_t> m_cache_n_samples_per_id;
/**
* @brief Tells if there is an identity with a 'key'/particular number of
* training samples, and if corresponding matrices are up to date.
*/
std::map<size_t,bool> m_cache_n_samples_in_training;
......
......@@ -62,6 +62,7 @@ static int PyBobLearnEMISVBase_init_hdf5(PyBobLearnEMISVBaseObject* self, PyObje
ISVBase_doc.print_usage();
return -1;
}
auto config_ = make_safe(config);
self->cxx.reset(new bob::learn::em::ISVBase(*(config->f)));
......
......@@ -59,7 +59,7 @@ static int PyBobLearnEMISVMachine_init_hdf5(PyBobLearnEMISVMachineObject* self,
ISVMachine_doc.print_usage();
return -1;
}
auto config_ = make_safe(config);
self->cxx.reset(new bob::learn::em::ISVMachine(*(config->f)));
return 0;
......
......@@ -325,41 +325,6 @@ int PyBobLearnEMISVTrainer_set_Z(PyBobLearnEMISVTrainerObject* self, PyObject* v
}
/***** rng *****/
static auto rng = bob::extension::VariableDoc(
"rng",
"str",
"The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.",
""
);
PyObject* PyBobLearnEMISVTrainer_getRng(PyBobLearnEMISVTrainerObject* self, void*) {
BOB_TRY
//Allocating the correspondent python object
PyBoostMt19937Object* retval =
(PyBoostMt19937Object*)PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type, 0);
retval->rng = self->cxx->getRng().get();
return Py_BuildValue("N", retval);
BOB_CATCH_MEMBER("Rng method could not be read", 0)
}
int PyBobLearnEMISVTrainer_setRng(PyBobLearnEMISVTrainerObject* self, PyObject* value, void*) {
BOB_TRY
if (!PyBoostMt19937_Check(value)){
PyErr_Format(PyExc_RuntimeError, "%s %s expects an PyBoostMt19937_Check", Py_TYPE(self)->tp_name, rng.name());
return -1;
}
PyBoostMt19937Object* boostObject = 0;
PyBoostMt19937_Converter(value, &boostObject);
self->cxx->setRng((boost::shared_ptr<boost::mt19937>)boostObject->rng);
return 0;
BOB_CATCH_MEMBER("Rng could not be set", 0)
}
static PyGetSetDef PyBobLearnEMISVTrainer_getseters[] = {
{
acc_u_a1.name(),
......@@ -390,14 +355,6 @@ static PyGetSetDef PyBobLearnEMISVTrainer_getseters[] = {
0
},
{
rng.name(),
(getter)PyBobLearnEMISVTrainer_getRng,
(setter)PyBobLearnEMISVTrainer_setRng,
rng.doc(),
0
},
{0} // Sentinel
};
......@@ -414,9 +371,10 @@ static auto initialize = bob::extension::FunctionDoc(
"",
true
)
.add_prototype("isv_base,stats")
.add_prototype("isv_base,stats,rng")
.add_parameter("isv_base", ":py:class:`bob.learn.em.ISVBase`", "ISVBase Object")
.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object");
.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object")
.add_parameter("rng", ":py:class:`bob.core.random.mt19937`", "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.");
static PyObject* PyBobLearnEMISVTrainer_initialize(PyBobLearnEMISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
BOB_TRY
......@@ -425,9 +383,16 @@ static PyObject* PyBobLearnEMISVTrainer_initialize(PyBobLearnEMISVTrainerObject*
PyBobLearnEMISVBaseObject* isv_base = 0;
PyObject* stats = 0;
PyBoostMt19937Object* rng = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMISVBase_Type, &isv_base,
&PyList_Type, &stats)) return 0;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!|O!", kwlist, &PyBobLearnEMISVBase_Type, &isv_base,
&PyList_Type, &stats,
&PyBoostMt19937_Type, &rng)) return 0;
if(rng){
boost::shared_ptr<boost::mt19937> rng_cpy = (boost::shared_ptr<boost::mt19937>)new boost::mt19937(*rng->rng);
self->cxx->setRng(rng_cpy);
}
std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > > training_data;
if(extract_GMMStats_2d(stats ,training_data)==0)
......
......@@ -25,7 +25,7 @@ static auto IVectorMachine_doc = bob::extension::ClassDoc(
"",
true
)
.add_prototype("ubm, rt, variance_threshold","")
.add_prototype("ubm,rt,variance_threshold","")
.add_prototype("other","")
.add_prototype("hdf5","")
......@@ -62,7 +62,7 @@ static int PyBobLearnEMIVectorMachine_init_hdf5(PyBobLearnEMIVectorMachineObject
IVectorMachine_doc.print_usage();
return -1;
}
auto config_ = make_safe(config);
self->cxx.reset(new bob::learn::em::IVectorMachine(*(config->f)));
return 0;
......@@ -78,7 +78,8 @@ static int PyBobLearnEMIVectorMachine_init_ubm(PyBobLearnEMIVectorMachineObject*
double variance_threshold = 1e-10;
//Here we have to select which keyword argument to read
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!i|d", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine, &rt, &variance_threshold)){
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!i|d", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine,
&rt, &variance_threshold)){
IVectorMachine_doc.print_usage();
return -1;
}
......@@ -625,15 +626,6 @@ static PyMethodDef PyBobLearnEMIVectorMachine_methods[] = {
__compute_TtSigmaInvFnorm__.doc()
},
/*
{
forward.name(),
(PyCFunction)PyBobLearnEMIVectorMachine_Forward,
METH_VARARGS|METH_KEYWORDS,
forward.doc()
},*/
{0} /* Sentinel */
};
......
......@@ -62,7 +62,7 @@ static int PyBobLearnEMJFABase_init_hdf5(PyBobLearnEMJFABaseObject* self, PyObje
JFABase_doc.print_usage();
return -1;
}
auto config_ = make_safe(config);
self->cxx.reset(new bob::learn::em::JFABase(*(config->f)));
return 0;
......
......@@ -59,7 +59,7 @@ static int PyBobLearnEMJFAMachine_init_hdf5(PyBobLearnEMJFAMachineObject* self,
JFAMachine_doc.print_usage();
return -1;
}
auto config_ = make_safe(config);