From 2601b117b72c5169658fed4133a458e551886400 Mon Sep 17 00:00:00 2001 From: Tiago Freitas Pereira <tiagofrepereira@gmail.com> Date: Wed, 4 Mar 2015 12:12:42 +0100 Subject: [PATCH] Added more tests with blitz arrays --- bob/learn/em/MAP_gmm_trainer.cpp | 4 ++-- bob/learn/em/cpp/MAP_GMMTrainer.cpp | 4 ++++ bob/learn/em/cpp/ML_GMMTrainer.cpp | 13 ++++--------- bob/learn/em/gaussian.cpp | 2 +- bob/learn/em/gmm_stats.cpp | 2 +- bob/learn/em/ivector_machine.cpp | 2 +- bob/learn/em/kmeans_trainer.cpp | 9 ++++++--- bob/learn/em/plda_base.cpp | 2 +- bob/learn/em/plda_machine.cpp | 4 ++-- version.txt | 2 +- 10 files changed, 23 insertions(+), 21 deletions(-) diff --git a/bob/learn/em/MAP_gmm_trainer.cpp b/bob/learn/em/MAP_gmm_trainer.cpp index 4594673..7c16576 100644 --- a/bob/learn/em/MAP_gmm_trainer.cpp +++ b/bob/learn/em/MAP_gmm_trainer.cpp @@ -185,7 +185,7 @@ PyObject* PyBobLearnEMMAPGMMTrainer_getRelevanceFactor(PyBobLearnEMMAPGMMTrainer int PyBobLearnEMMAPGMMTrainer_setRelevanceFactor(PyBobLearnEMMAPGMMTrainerObject* self, PyObject* value, void*){ BOB_TRY - if(!PyNumber_Check(value)){ + if(!PyBob_NumberCheck(value)){ PyErr_Format(PyExc_RuntimeError, "%s %s expects a double", Py_TYPE(self)->tp_name, relevance_factor.name()); return -1; } @@ -211,7 +211,7 @@ PyObject* PyBobLearnEMMAPGMMTrainer_getAlpha(PyBobLearnEMMAPGMMTrainerObject* se int PyBobLearnEMMAPGMMTrainer_setAlpha(PyBobLearnEMMAPGMMTrainerObject* self, PyObject* value, void*){ BOB_TRY - if(!PyNumber_Check(value)){ + if(!PyBob_NumberCheck(value)){ PyErr_Format(PyExc_RuntimeError, "%s %s expects a double", Py_TYPE(self)->tp_name, alpha.name()); return -1; } diff --git a/bob/learn/em/cpp/MAP_GMMTrainer.cpp b/bob/learn/em/cpp/MAP_GMMTrainer.cpp index 64779c5..7dcf504 100644 --- a/bob/learn/em/cpp/MAP_GMMTrainer.cpp +++ b/bob/learn/em/cpp/MAP_GMMTrainer.cpp @@ -75,6 +75,10 @@ void bob::learn::em::MAP_GMMTrainer::mStep(bob::learn::em::GMMMachine& gmm) { // Read options and variables double n_gaussians = gmm.getNGaussians(); + + //Checking if it is necessary to resize the cache + if((size_t)m_cache_alpha.extent(0) != n_gaussians) + initialize(gmm); //If it is different for some reason, there is no way, you have to initialize // Check that the prior GMM has been specified if (!m_prior_gmm) diff --git a/bob/learn/em/cpp/ML_GMMTrainer.cpp b/bob/learn/em/cpp/ML_GMMTrainer.cpp index 1c5bc84..79f7802 100644 --- a/bob/learn/em/cpp/ML_GMMTrainer.cpp +++ b/bob/learn/em/cpp/ML_GMMTrainer.cpp @@ -41,6 +41,10 @@ void bob::learn::em::ML_GMMTrainer::mStep(bob::learn::em::GMMMachine& gmm) // Read options and variables const size_t n_gaussians = gmm.getNGaussians(); + //Checking if it is necessary to resize the cache + if((size_t)m_cache_ss_n_thresholded.extent(0) != n_gaussians) + initialize(gmm); //If it is different for some reason, there is no way, you have to initialize + // - Update weights if requested // Equation 9.26 of Bishop, "Pattern recognition and machine learning", 2006 if (m_gmm_base_trainer.getUpdateWeights()) { @@ -101,12 +105,3 @@ bool bob::learn::em::ML_GMMTrainer::operator!= { return !(this->operator==(other)); } - -/* -bool bob::learn::em::ML_GMMTrainer::is_similar_to - (const bob::learn::em::ML_GMMTrainer &other, const double r_epsilon, - const double a_epsilon) const -{ - return m_gmm_base_trainer.is_similar_to(other, r_epsilon, a_epsilon); -} -*/ diff --git a/bob/learn/em/gaussian.cpp b/bob/learn/em/gaussian.cpp index 01327ea..8021a71 100644 --- a/bob/learn/em/gaussian.cpp +++ b/bob/learn/em/gaussian.cpp @@ -105,7 +105,7 @@ static int PyBobLearnEMGaussian_init(PyBobLearnEMGaussianObject* self, PyObject* } /**If the constructor input is a number**/ - if (PyNumber_Check(arg)) + if (PyBob_NumberCheck(arg)) return PyBobLearnEMGaussian_init_number(self, args, kwargs); /**If the constructor input is Gaussian object**/ else if (PyBobLearnEMGaussian_Check(arg)) diff --git a/bob/learn/em/gmm_stats.cpp b/bob/learn/em/gmm_stats.cpp index c38a674..5befeb8 100644 --- a/bob/learn/em/gmm_stats.cpp +++ b/bob/learn/em/gmm_stats.cpp @@ -350,7 +350,7 @@ PyObject* PyBobLearnEMGMMStats_getLog_likelihood(PyBobLearnEMGMMStatsObject* sel int PyBobLearnEMGMMStats_setLog_likelihood(PyBobLearnEMGMMStatsObject* self, PyObject* value, void*){ BOB_TRY - if (!PyNumber_Check(value)){ + if (!PyBob_NumberCheck(value)){ PyErr_Format(PyExc_RuntimeError, "%s %s expects an double", Py_TYPE(self)->tp_name, t.name()); return -1; } diff --git a/bob/learn/em/ivector_machine.cpp b/bob/learn/em/ivector_machine.cpp index 257aafe..82df5e0 100644 --- a/bob/learn/em/ivector_machine.cpp +++ b/bob/learn/em/ivector_machine.cpp @@ -270,7 +270,7 @@ PyObject* PyBobLearnEMIVectorMachine_getVarianceThreshold(PyBobLearnEMIVectorMac int PyBobLearnEMIVectorMachine_setVarianceThreshold(PyBobLearnEMIVectorMachineObject* self, PyObject* value, void*){ BOB_TRY - if (!PyNumber_Check(value)){ + if (!PyBob_NumberCheck(value)){ PyErr_Format(PyExc_RuntimeError, "%s %s expects an double", Py_TYPE(self)->tp_name, variance_threshold.name()); return -1; } diff --git a/bob/learn/em/kmeans_trainer.cpp b/bob/learn/em/kmeans_trainer.cpp index a55ee12..dd8f252 100644 --- a/bob/learn/em/kmeans_trainer.cpp +++ b/bob/learn/em/kmeans_trainer.cpp @@ -49,7 +49,7 @@ static auto KMeansTrainer_doc = bob::extension::ClassDoc( .add_prototype("other","") .add_prototype("","") - .add_parameter("initialization_method", "str", "The initialization method of the means") + .add_parameter("initialization_method", "str", "The initialization method of the means.\nPossible values are: 'RANDOM', 'RANDOM_NO_DUPLICATE', 'KMEANS_PLUS_PLUS' ") .add_parameter("other", ":py:class:`bob.learn.em.KMeansTrainer`", "A KMeansTrainer object to be copied.") ); @@ -162,7 +162,10 @@ static auto initialization_method = bob::extension::VariableDoc( "initialization_method", "str", "Initialization method.", - "" + "Possible values:\n" + " `RANDOM`: Random initialization \n\n" + " `RANDOM_NO_DUPLICATE`: Random initialization without repetition \n\n" + " `KMEANS_PLUS_PLUS`: Apply the kmeans++ initialization http://en.wikipedia.org/wiki/K-means%2B%2B \n\n" ); PyObject* PyBobLearnEMKMeansTrainer_getInitializationMethod(PyBobLearnEMKMeansTrainerObject* self, void*) { BOB_TRY @@ -254,7 +257,7 @@ PyObject* PyBobLearnEMKMeansTrainer_getAverageMinDistance(PyBobLearnEMKMeansTrai int PyBobLearnEMKMeansTrainer_setAverageMinDistance(PyBobLearnEMKMeansTrainerObject* self, PyObject* value, void*) { BOB_TRY - if (!PyNumber_Check(value)){ + if (!PyBob_NumberCheck(value)){ PyErr_Format(PyExc_RuntimeError, "%s %s expects an double", Py_TYPE(self)->tp_name, average_min_distance.name()); return -1; } diff --git a/bob/learn/em/plda_base.cpp b/bob/learn/em/plda_base.cpp index 5e1b9d6..5cfb508 100644 --- a/bob/learn/em/plda_base.cpp +++ b/bob/learn/em/plda_base.cpp @@ -397,7 +397,7 @@ static PyObject* PyBobLearnEMPLDABase_getVarianceThreshold(PyBobLearnEMPLDABaseO int PyBobLearnEMPLDABase_setVarianceThreshold(PyBobLearnEMPLDABaseObject* self, PyObject* value, void*){ BOB_TRY - if (!PyNumber_Check(value)){ + if (!PyBob_NumberCheck(value)){ PyErr_Format(PyExc_RuntimeError, "%s %s expects an float", Py_TYPE(self)->tp_name, variance_threshold.name()); return -1; } diff --git a/bob/learn/em/plda_machine.cpp b/bob/learn/em/plda_machine.cpp index 734cb58..4174084 100644 --- a/bob/learn/em/plda_machine.cpp +++ b/bob/learn/em/plda_machine.cpp @@ -220,7 +220,7 @@ static PyObject* PyBobLearnEMPLDAMachine_getWSumXitBetaXi(PyBobLearnEMPLDAMachin int PyBobLearnEMPLDAMachine_setWSumXitBetaXi(PyBobLearnEMPLDAMachineObject* self, PyObject* value, void*){ BOB_TRY - if (!PyNumber_Check(value)){ + if (!PyBob_NumberCheck(value)){ PyErr_Format(PyExc_RuntimeError, "%s %s expects an float", Py_TYPE(self)->tp_name, w_sum_xit_beta_xi.name()); return -1; } @@ -312,7 +312,7 @@ static PyObject* PyBobLearnEMPLDAMachine_getLogLikelihood(PyBobLearnEMPLDAMachin int PyBobLearnEMPLDAMachine_setLogLikelihood(PyBobLearnEMPLDAMachineObject* self, PyObject* value, void*){ BOB_TRY - if (!PyNumber_Check(value)){ + if (!PyBob_NumberCheck(value)){ PyErr_Format(PyExc_RuntimeError, "%s %s expects an double", Py_TYPE(self)->tp_name, log_likelihood.name()); return -1; } diff --git a/version.txt b/version.txt index 75a0313..4cc35b7 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -2.0.0b3 \ No newline at end of file +2.0.0b6 -- GitLab