diff --git a/bob/learn/em/empca_trainer.cpp b/bob/learn/em/empca_trainer.cpp index 8c74b4338459d9188c47daced19d8859af906c86..2db9bb6f30e2357f095d907f454014e92b8395a5 100644 --- a/bob/learn/em/empca_trainer.cpp +++ b/bob/learn/em/empca_trainer.cpp @@ -139,7 +139,7 @@ static PyObject* PyBobLearnEMEMPCATrainer_RichCompare(PyBobLearnEMEMPCATrainerOb /************ Variables Section ***********************************/ /******************************************************************/ -static PyGetSetDef PyBobLearnEMEMPCATrainer_getseters[] = { +static PyGetSetDef PyBobLearnEMEMPCATrainer_getseters[] = { {0} // Sentinel }; @@ -173,12 +173,12 @@ static PyObject* PyBobLearnEMEMPCATrainer_initialize(PyBobLearnEMEMPCATrainerObj &PyBlitzArray_Converter, &data, &PyBoostMt19937_Type, &rng)) return 0; auto data_ = make_safe(data); - + if(rng){ boost::shared_ptr<boost::mt19937> rng_cpy = (boost::shared_ptr<boost::mt19937>)new boost::mt19937(*rng->rng); self->cxx->setRng(rng_cpy); } - + self->cxx->initialize(*linear_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data)); @@ -188,9 +188,9 @@ static PyObject* PyBobLearnEMEMPCATrainer_initialize(PyBobLearnEMEMPCATrainerObj } -/*** eStep ***/ -static auto eStep = bob::extension::FunctionDoc( - "eStep", +/*** e_step ***/ +static auto e_step = bob::extension::FunctionDoc( + "e_step", "", "", true @@ -198,11 +198,11 @@ static auto eStep = bob::extension::FunctionDoc( .add_prototype("linear_machine,data") .add_parameter("linear_machine", ":py:class:`bob.learn.linear.Machine`", "LinearMachine Object") .add_parameter("data", "array_like <float, 2D>", "Input data"); -static PyObject* PyBobLearnEMEMPCATrainer_eStep(PyBobLearnEMEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) { +static PyObject* PyBobLearnEMEMPCATrainer_e_step(PyBobLearnEMEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY /* Parses input arguments in a single shot */ - char** kwlist = eStep.kwlist(0); + char** kwlist = e_step.kwlist(0); PyBobLearnLinearMachineObject* linear_machine; PyBlitzArrayObject* data = 0; @@ -213,15 +213,15 @@ static PyObject* PyBobLearnEMEMPCATrainer_eStep(PyBobLearnEMEMPCATrainerObject* self->cxx->eStep(*linear_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data)); - BOB_CATCH_MEMBER("cannot perform the eStep method", 0) + BOB_CATCH_MEMBER("cannot perform the e_step method", 0) Py_RETURN_NONE; } -/*** mStep ***/ -static auto mStep = bob::extension::FunctionDoc( - "mStep", +/*** m_step ***/ +static auto m_step = bob::extension::FunctionDoc( + "m_step", "", 0, true @@ -229,11 +229,11 @@ static auto mStep = bob::extension::FunctionDoc( .add_prototype("linear_machine,data") .add_parameter("linear_machine", ":py:class:`bob.learn.em.LinearMachine`", "LinearMachine Object") .add_parameter("data", "array_like <float, 2D>", "Input data"); -static PyObject* PyBobLearnEMEMPCATrainer_mStep(PyBobLearnEMEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) { +static PyObject* PyBobLearnEMEMPCATrainer_m_step(PyBobLearnEMEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY /* Parses input arguments in a single shot */ - char** kwlist = mStep.kwlist(0); + char** kwlist = m_step.kwlist(0); PyBobLearnLinearMachineObject* linear_machine; PyBlitzArrayObject* data = 0; @@ -244,7 +244,7 @@ static PyObject* PyBobLearnEMEMPCATrainer_mStep(PyBobLearnEMEMPCATrainerObject* self->cxx->mStep(*linear_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data)); - BOB_CATCH_MEMBER("cannot perform the mStep method", 0) + BOB_CATCH_MEMBER("cannot perform the m_step method", 0) Py_RETURN_NONE; } @@ -284,16 +284,16 @@ static PyMethodDef PyBobLearnEMEMPCATrainer_methods[] = { initialize.doc() }, { - eStep.name(), - (PyCFunction)PyBobLearnEMEMPCATrainer_eStep, + e_step.name(), + (PyCFunction)PyBobLearnEMEMPCATrainer_e_step, METH_VARARGS|METH_KEYWORDS, - eStep.doc() + e_step.doc() }, { - mStep.name(), - (PyCFunction)PyBobLearnEMEMPCATrainer_mStep, + m_step.name(), + (PyCFunction)PyBobLearnEMEMPCATrainer_m_step, METH_VARARGS|METH_KEYWORDS, - mStep.doc() + m_step.doc() }, { compute_likelihood.name(), @@ -340,4 +340,3 @@ bool init_BobLearnEMEMPCATrainer(PyObject* module) Py_INCREF(&PyBobLearnEMEMPCATrainer_Type); return PyModule_AddObject(module, "EMPCATrainer", (PyObject*)&PyBobLearnEMEMPCATrainer_Type) >= 0; } - diff --git a/bob/learn/em/isv_trainer.cpp b/bob/learn/em/isv_trainer.cpp index 468d49f3b498ac7257d6d8ef5251eff3bbb521b7..ae50c97fc45e61588feaf72d89abb2b13e1a61f7 100644 --- a/bob/learn/em/isv_trainer.cpp +++ b/bob/learn/em/isv_trainer.cpp @@ -441,7 +441,7 @@ static PyObject* PyBobLearnEMISVTrainer_initialize(PyBobLearnEMISVTrainerObject* /*** e_step ***/ static auto e_step = bob::extension::FunctionDoc( - "eStep", + "e_step", "Call the e-step procedure (for the U subspace).", "", true @@ -473,7 +473,7 @@ static PyObject* PyBobLearnEMISVTrainer_e_step(PyBobLearnEMISVTrainerObject* sel /*** m_step ***/ static auto m_step = bob::extension::FunctionDoc( - "mStep", + "m_step", "Call the m-step procedure (for the U subspace).", "", true diff --git a/bob/learn/em/ivector_trainer.cpp b/bob/learn/em/ivector_trainer.cpp index 2d639ac07e6074f554f84a5755a24c2fe59b3721..bb8aa28bfcb60a2530d3d5be592c16a61d05ed70 100644 --- a/bob/learn/em/ivector_trainer.cpp +++ b/bob/learn/em/ivector_trainer.cpp @@ -20,7 +20,7 @@ static int extract_GMMStats_1d(PyObject *list, std::vector<bob::learn::em::GMMStats>& training_data) { for (int i=0; i<PyList_GET_SIZE(list); i++){ - + PyBobLearnEMGMMStatsObject* stats; if (!PyArg_Parse(PyList_GetItem(list, i), "O!", &PyBobLearnEMGMMStats_Type, &stats)){ PyErr_Format(PyExc_RuntimeError, "Expected GMMStats objects"); @@ -76,7 +76,7 @@ static int PyBobLearnEMIVectorTrainer_init_bool(PyBobLearnEMIVectorTrainerObject //Parsing the input argments if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBool_Type, &update_sigma)) return -1; - + self->cxx.reset(new bob::learn::em::IVectorTrainer(f(update_sigma))); return 0; } @@ -105,11 +105,11 @@ static int PyBobLearnEMIVectorTrainer_init(PyBobLearnEMIVectorTrainerObject* sel } // If the constructor input is IVectorTrainer object - if(PyBobLearnEMIVectorTrainer_Check(arg)) + if(PyBobLearnEMIVectorTrainer_Check(arg)) return PyBobLearnEMIVectorTrainer_init_copy(self, args, kwargs); else - return PyBobLearnEMIVectorTrainer_init_bool(self, args, kwargs); - + return PyBobLearnEMIVectorTrainer_init_bool(self, args, kwargs); + } default:{ PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires only 0 or 1 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs); @@ -268,14 +268,14 @@ int PyBobLearnEMIVectorTrainer_set_acc_snormij(PyBobLearnEMIVectorTrainerObject* -static PyGetSetDef PyBobLearnEMIVectorTrainer_getseters[] = { +static PyGetSetDef PyBobLearnEMIVectorTrainer_getseters[] = { { acc_nij_wij2.name(), (getter)PyBobLearnEMIVectorTrainer_get_acc_nij_wij2, (setter)PyBobLearnEMIVectorTrainer_set_acc_nij_wij2, acc_nij_wij2.doc(), 0 - }, + }, { acc_fnormij_wij.name(), (getter)PyBobLearnEMIVectorTrainer_get_acc_fnormij_wij, @@ -338,7 +338,7 @@ static PyObject* PyBobLearnEMIVectorTrainer_initialize(PyBobLearnEMIVectorTraine /*** e_step ***/ static auto e_step = bob::extension::FunctionDoc( - "eStep", + "e_step", "Call the e-step procedure (for the U subspace).", "", true @@ -369,7 +369,7 @@ static PyObject* PyBobLearnEMIVectorTrainer_e_step(PyBobLearnEMIVectorTrainerObj /*** m_step ***/ static auto m_step = bob::extension::FunctionDoc( - "mStep", + "m_step", "Call the m-step procedure (for the U subspace).", "", true @@ -380,7 +380,7 @@ static auto m_step = bob::extension::FunctionDoc( static PyObject* PyBobLearnEMIVectorTrainer_m_step(PyBobLearnEMIVectorTrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY - // Parses input arguments in a single shot + // Parses input arguments in a single shot char** kwlist = m_step.kwlist(0); PyBobLearnEMIVectorMachineObject* ivector_machine = 0; @@ -456,4 +456,3 @@ bool init_BobLearnEMIVectorTrainer(PyObject* module) Py_INCREF(&PyBobLearnEMIVectorTrainer_Type); return PyModule_AddObject(module, "IVectorTrainer", (PyObject*)&PyBobLearnEMIVectorTrainer_Type) >= 0; } - diff --git a/bob/learn/em/jfa_trainer.cpp b/bob/learn/em/jfa_trainer.cpp index 19b5e153790a15093df587ce05f3eb9fed0b7fc4..02ea874d39c00016c4fa5b1f343485b318193ec3 100644 --- a/bob/learn/em/jfa_trainer.cpp +++ b/bob/learn/em/jfa_trainer.cpp @@ -650,9 +650,9 @@ static PyObject* PyBobLearnEMJFATrainer_initialize(PyBobLearnEMJFATrainerObject* } -/*** e_step1 ***/ -static auto e_step1 = bob::extension::FunctionDoc( - "e_step1", +/*** e_stepv ***/ +static auto e_step_v = bob::extension::FunctionDoc( + "e_step_v", "Call the 1st e-step procedure (for the V subspace).", "", true @@ -660,11 +660,11 @@ static auto e_step1 = bob::extension::FunctionDoc( .add_prototype("jfa_base,stats") .add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object") .add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object"); -static PyObject* PyBobLearnEMJFATrainer_e_step1(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { +static PyObject* PyBobLearnEMJFATrainer_e_step_v(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY //Parses input arguments in a single shot - char** kwlist = e_step1.kwlist(0); + char** kwlist = e_step_v.kwlist(0); PyBobLearnEMJFABaseObject* jfa_base = 0; PyObject* stats = 0; @@ -677,15 +677,15 @@ static PyObject* PyBobLearnEMJFATrainer_e_step1(PyBobLearnEMJFATrainerObject* se self->cxx->eStep1(*jfa_base->cxx, training_data); - BOB_CATCH_MEMBER("cannot perform the e_step1 method", 0) + BOB_CATCH_MEMBER("cannot perform the e_step_v method", 0) Py_RETURN_NONE; } -/*** m_step1 ***/ -static auto m_step1 = bob::extension::FunctionDoc( - "m_step1", +/*** m_step_v ***/ +static auto m_step_v = bob::extension::FunctionDoc( + "m_step_v", "Call the 1st m-step procedure (for the V subspace).", "", true @@ -693,11 +693,11 @@ static auto m_step1 = bob::extension::FunctionDoc( .add_prototype("jfa_base,stats") .add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object") .add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object"); -static PyObject* PyBobLearnEMJFATrainer_m_step1(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { +static PyObject* PyBobLearnEMJFATrainer_m_step_v(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY // Parses input arguments in a single shot - char** kwlist = m_step1.kwlist(0); + char** kwlist = m_step_v.kwlist(0); PyBobLearnEMJFABaseObject* jfa_base = 0; PyObject* stats = 0; @@ -709,15 +709,15 @@ static PyObject* PyBobLearnEMJFATrainer_m_step1(PyBobLearnEMJFATrainerObject* se if(extract_GMMStats_2d(stats ,training_data)==0) self->cxx->mStep1(*jfa_base->cxx, training_data); - BOB_CATCH_MEMBER("cannot perform the m_step1 method", 0) + BOB_CATCH_MEMBER("cannot perform the m_step_v method", 0) Py_RETURN_NONE; } -/*** finalize1 ***/ -static auto finalize1 = bob::extension::FunctionDoc( - "finalize1", +/*** finalize_v ***/ +static auto finalize_v = bob::extension::FunctionDoc( + "finalize_v", "Call the 1st finalize procedure (for the V subspace).", "", true @@ -725,11 +725,11 @@ static auto finalize1 = bob::extension::FunctionDoc( .add_prototype("jfa_base,stats") .add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object") .add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object"); -static PyObject* PyBobLearnEMJFATrainer_finalize1(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { +static PyObject* PyBobLearnEMJFATrainer_finalize_v(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY //Parses input arguments in a single shot - char** kwlist = finalize1.kwlist(0); + char** kwlist = finalize_v.kwlist(0); PyBobLearnEMJFABaseObject* jfa_base = 0; PyObject* stats = 0; @@ -741,15 +741,15 @@ static PyObject* PyBobLearnEMJFATrainer_finalize1(PyBobLearnEMJFATrainerObject* if(extract_GMMStats_2d(stats ,training_data)==0) self->cxx->finalize1(*jfa_base->cxx, training_data); - BOB_CATCH_MEMBER("cannot perform the finalize1 method", 0) + BOB_CATCH_MEMBER("cannot perform the finalize_v method", 0) Py_RETURN_NONE; } -/*** e_step2 ***/ -static auto e_step2 = bob::extension::FunctionDoc( - "e_step2", +/*** e_step_u ***/ +static auto e_step_u = bob::extension::FunctionDoc( + "e_step_u", "Call the 2nd e-step procedure (for the U subspace).", "", true @@ -757,11 +757,11 @@ static auto e_step2 = bob::extension::FunctionDoc( .add_prototype("jfa_base,stats") .add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object") .add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object"); -static PyObject* PyBobLearnEMJFATrainer_e_step2(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { +static PyObject* PyBobLearnEMJFATrainer_e_step_u(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY // Parses input arguments in a single shot - char** kwlist = e_step2.kwlist(0); + char** kwlist = e_step_u.kwlist(0); PyBobLearnEMJFABaseObject* jfa_base = 0; PyObject* stats = 0; @@ -773,15 +773,15 @@ static PyObject* PyBobLearnEMJFATrainer_e_step2(PyBobLearnEMJFATrainerObject* se if(extract_GMMStats_2d(stats ,training_data)==0) self->cxx->eStep2(*jfa_base->cxx, training_data); - BOB_CATCH_MEMBER("cannot perform the e_step2 method", 0) + BOB_CATCH_MEMBER("cannot perform the e_step_u method", 0) Py_RETURN_NONE; } -/*** m_step2 ***/ -static auto m_step2 = bob::extension::FunctionDoc( - "m_step2", +/*** m_step_u ***/ +static auto m_step_u = bob::extension::FunctionDoc( + "m_step_u", "Call the 2nd m-step procedure (for the U subspace).", "", true @@ -789,11 +789,11 @@ static auto m_step2 = bob::extension::FunctionDoc( .add_prototype("jfa_base,stats") .add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object") .add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object"); -static PyObject* PyBobLearnEMJFATrainer_m_step2(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { +static PyObject* PyBobLearnEMJFATrainer_m_step_u(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY // Parses input arguments in a single shot - char** kwlist = m_step2.kwlist(0); + char** kwlist = m_step_u.kwlist(0); PyBobLearnEMJFABaseObject* jfa_base = 0; PyObject* stats = 0; @@ -805,15 +805,15 @@ static PyObject* PyBobLearnEMJFATrainer_m_step2(PyBobLearnEMJFATrainerObject* se if(extract_GMMStats_2d(stats ,training_data)==0) self->cxx->mStep2(*jfa_base->cxx, training_data); - BOB_CATCH_MEMBER("cannot perform the m_step2 method", 0) + BOB_CATCH_MEMBER("cannot perform the m_step_u method", 0) Py_RETURN_NONE; } -/*** finalize2 ***/ -static auto finalize2 = bob::extension::FunctionDoc( - "finalize2", +/*** finalize_u ***/ +static auto finalize_u = bob::extension::FunctionDoc( + "finalize_u", "Call the 2nd finalize procedure (for the U subspace).", "", true @@ -821,11 +821,11 @@ static auto finalize2 = bob::extension::FunctionDoc( .add_prototype("jfa_base,stats") .add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object") .add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object"); -static PyObject* PyBobLearnEMJFATrainer_finalize2(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { +static PyObject* PyBobLearnEMJFATrainer_finalize_u(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY // Parses input arguments in a single shot - char** kwlist = finalize2.kwlist(0); + char** kwlist = finalize_u.kwlist(0); PyBobLearnEMJFABaseObject* jfa_base = 0; PyObject* stats = 0; @@ -837,15 +837,15 @@ static PyObject* PyBobLearnEMJFATrainer_finalize2(PyBobLearnEMJFATrainerObject* if(extract_GMMStats_2d(stats ,training_data)==0) self->cxx->finalize2(*jfa_base->cxx, training_data); - BOB_CATCH_MEMBER("cannot perform the finalize2 method", 0) + BOB_CATCH_MEMBER("cannot perform the finalize_u method", 0) Py_RETURN_NONE; } -/*** e_step3 ***/ -static auto e_step3 = bob::extension::FunctionDoc( - "e_step3", +/*** e_step_d ***/ +static auto e_step_d = bob::extension::FunctionDoc( + "e_step_d", "Call the 3rd e-step procedure (for the d subspace).", "", true @@ -853,11 +853,11 @@ static auto e_step3 = bob::extension::FunctionDoc( .add_prototype("jfa_base,stats") .add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object") .add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object"); -static PyObject* PyBobLearnEMJFATrainer_e_step3(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { +static PyObject* PyBobLearnEMJFATrainer_e_step_d(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY // Parses input arguments in a single shot - char** kwlist = e_step3.kwlist(0); + char** kwlist = e_step_d.kwlist(0); PyBobLearnEMJFABaseObject* jfa_base = 0; PyObject* stats = 0; @@ -869,15 +869,15 @@ static PyObject* PyBobLearnEMJFATrainer_e_step3(PyBobLearnEMJFATrainerObject* se if(extract_GMMStats_2d(stats ,training_data)==0) self->cxx->eStep3(*jfa_base->cxx, training_data); - BOB_CATCH_MEMBER("cannot perform the e_step3 method", 0) + BOB_CATCH_MEMBER("cannot perform the e_step_d method", 0) Py_RETURN_NONE; } -/*** m_step3 ***/ -static auto m_step3 = bob::extension::FunctionDoc( - "m_step3", +/*** m_step_d ***/ +static auto m_step_d = bob::extension::FunctionDoc( + "m_step_d", "Call the 3rd m-step procedure (for the d subspace).", "", true @@ -885,11 +885,11 @@ static auto m_step3 = bob::extension::FunctionDoc( .add_prototype("jfa_base,stats") .add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object") .add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object"); -static PyObject* PyBobLearnEMJFATrainer_m_step3(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { +static PyObject* PyBobLearnEMJFATrainer_m_step_d(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY // Parses input arguments in a single shot - char** kwlist = m_step3.kwlist(0); + char** kwlist = m_step_d.kwlist(0); PyBobLearnEMJFABaseObject* jfa_base = 0; PyObject* stats = 0; @@ -901,15 +901,15 @@ static PyObject* PyBobLearnEMJFATrainer_m_step3(PyBobLearnEMJFATrainerObject* se if(extract_GMMStats_2d(stats ,training_data)==0) self->cxx->mStep3(*jfa_base->cxx, training_data); - BOB_CATCH_MEMBER("cannot perform the m_step3 method", 0) + BOB_CATCH_MEMBER("cannot perform the m_step_d method", 0) Py_RETURN_NONE; } -/*** finalize3 ***/ -static auto finalize3 = bob::extension::FunctionDoc( - "finalize3", +/*** finalize_d ***/ +static auto finalize_d = bob::extension::FunctionDoc( + "finalize_d", "Call the 3rd finalize procedure (for the d subspace).", "", true @@ -917,11 +917,11 @@ static auto finalize3 = bob::extension::FunctionDoc( .add_prototype("jfa_base,stats") .add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object") .add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object"); -static PyObject* PyBobLearnEMJFATrainer_finalize3(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { +static PyObject* PyBobLearnEMJFATrainer_finalize_d(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY // Parses input arguments in a single shot - char** kwlist = finalize3.kwlist(0); + char** kwlist = finalize_d.kwlist(0); PyBobLearnEMJFABaseObject* jfa_base = 0; PyObject* stats = 0; @@ -933,7 +933,7 @@ static PyObject* PyBobLearnEMJFATrainer_finalize3(PyBobLearnEMJFATrainerObject* if(extract_GMMStats_2d(stats ,training_data)==0) self->cxx->finalize3(*jfa_base->cxx, training_data); - BOB_CATCH_MEMBER("cannot perform the finalize3 method", 0) + BOB_CATCH_MEMBER("cannot perform the finalize_d method", 0) Py_RETURN_NONE; } @@ -983,58 +983,58 @@ static PyMethodDef PyBobLearnEMJFATrainer_methods[] = { initialize.doc() }, { - e_step1.name(), - (PyCFunction)PyBobLearnEMJFATrainer_e_step1, + e_step_v.name(), + (PyCFunction)PyBobLearnEMJFATrainer_e_step_v, METH_VARARGS|METH_KEYWORDS, - e_step1.doc() + e_step_v.doc() }, { - e_step2.name(), - (PyCFunction)PyBobLearnEMJFATrainer_e_step2, + e_step_u.name(), + (PyCFunction)PyBobLearnEMJFATrainer_e_step_u, METH_VARARGS|METH_KEYWORDS, - e_step2.doc() + e_step_u.doc() }, { - e_step3.name(), - (PyCFunction)PyBobLearnEMJFATrainer_e_step3, + e_step_d.name(), + (PyCFunction)PyBobLearnEMJFATrainer_e_step_d, METH_VARARGS|METH_KEYWORDS, - e_step3.doc() + e_step_d.doc() }, { - m_step1.name(), - (PyCFunction)PyBobLearnEMJFATrainer_m_step1, + m_step_v.name(), + (PyCFunction)PyBobLearnEMJFATrainer_m_step_v, METH_VARARGS|METH_KEYWORDS, - m_step1.doc() + m_step_v.doc() }, { - m_step2.name(), - (PyCFunction)PyBobLearnEMJFATrainer_m_step2, + m_step_u.name(), + (PyCFunction)PyBobLearnEMJFATrainer_m_step_u, METH_VARARGS|METH_KEYWORDS, - m_step2.doc() + m_step_u.doc() }, { - m_step3.name(), - (PyCFunction)PyBobLearnEMJFATrainer_m_step3, + m_step_d.name(), + (PyCFunction)PyBobLearnEMJFATrainer_m_step_d, METH_VARARGS|METH_KEYWORDS, - m_step3.doc() + m_step_d.doc() }, { - finalize1.name(), - (PyCFunction)PyBobLearnEMJFATrainer_finalize1, + finalize_v.name(), + (PyCFunction)PyBobLearnEMJFATrainer_finalize_v, METH_VARARGS|METH_KEYWORDS, - finalize1.doc() + finalize_v.doc() }, { - finalize2.name(), - (PyCFunction)PyBobLearnEMJFATrainer_finalize2, + finalize_u.name(), + (PyCFunction)PyBobLearnEMJFATrainer_finalize_u, METH_VARARGS|METH_KEYWORDS, - finalize2.doc() + finalize_u.doc() }, { - finalize3.name(), - (PyCFunction)PyBobLearnEMJFATrainer_finalize3, + finalize_d.name(), + (PyCFunction)PyBobLearnEMJFATrainer_finalize_d, METH_VARARGS|METH_KEYWORDS, - finalize3.doc() + finalize_d.doc() }, { enroll.name(), diff --git a/bob/learn/em/kmeans_trainer.cpp b/bob/learn/em/kmeans_trainer.cpp index dd8f252c3067c4ee973880e7cfe91655f60ec0e1..fb1fc5fac583e9727964e8e43445b6c9a5e695d8 100644 --- a/bob/learn/em/kmeans_trainer.cpp +++ b/bob/learn/em/kmeans_trainer.cpp @@ -270,7 +270,7 @@ int PyBobLearnEMKMeansTrainer_setAverageMinDistance(PyBobLearnEMKMeansTrainerObj -static PyGetSetDef PyBobLearnEMKMeansTrainer_getseters[] = { +static PyGetSetDef PyBobLearnEMKMeansTrainer_getseters[] = { { initialization_method.name(), (getter)PyBobLearnEMKMeansTrainer_getInitializationMethod, @@ -326,23 +326,23 @@ static PyObject* PyBobLearnEMKMeansTrainer_initialize(PyBobLearnEMKMeansTrainerO PyBobLearnEMKMeansMachineObject* kmeans_machine = 0; PyBlitzArrayObject* data = 0; - PyBoostMt19937Object* rng = 0; + PyBoostMt19937Object* rng = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&|O!", kwlist, &PyBobLearnEMKMeansMachine_Type, &kmeans_machine, &PyBlitzArray_Converter, &data, &PyBoostMt19937_Type, &rng)) return 0; auto data_ = make_safe(data); - - // perform check on the input + + // perform check on the input if (data->type_num != NPY_FLOAT64){ PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, initialize.name()); return 0; - } + } if (data->ndim != 2){ PyErr_Format(PyExc_TypeError, "`%s' only processes 2D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, initialize.name()); return 0; - } + } if (data->shape[1] != (Py_ssize_t)kmeans_machine->cxx->getNInputs() ) { PyErr_Format(PyExc_TypeError, "`%s' 2D `input` array should have the shape [N, %" PY_FORMAT_SIZE_T "d] not [N, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, kmeans_machine->cxx->getNInputs(), data->shape[1], initialize.name()); @@ -362,10 +362,10 @@ static PyObject* PyBobLearnEMKMeansTrainer_initialize(PyBobLearnEMKMeansTrainerO } -/*** eStep ***/ -static auto eStep = bob::extension::FunctionDoc( - "eStep", - "Compute the eStep, which is basically the distances ", +/*** e_step ***/ +static auto e_step = bob::extension::FunctionDoc( + "e_step", + "Compute the E-step, which is basically the distances ", "Accumulate across the dataset:" " -zeroeth and first order statistics" " -average (Square Euclidean) distance from the closest mean", @@ -374,11 +374,11 @@ static auto eStep = bob::extension::FunctionDoc( .add_prototype("kmeans_machine,data") .add_parameter("kmeans_machine", ":py:class:`bob.learn.em.KMeansMachine`", "KMeansMachine Object") .add_parameter("data", "array_like <float, 2D>", "Input data"); -static PyObject* PyBobLearnEMKMeansTrainer_eStep(PyBobLearnEMKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) { +static PyObject* PyBobLearnEMKMeansTrainer_e_step(PyBobLearnEMKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY /* Parses input arguments in a single shot */ - char** kwlist = eStep.kwlist(0); + char** kwlist = e_step.kwlist(0); PyBobLearnEMKMeansMachineObject* kmeans_machine; PyBlitzArrayObject* data = 0; @@ -387,32 +387,32 @@ static PyObject* PyBobLearnEMKMeansTrainer_eStep(PyBobLearnEMKMeansTrainerObject auto data_ = make_safe(data); if (data->type_num != NPY_FLOAT64){ - PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, eStep.name()); + PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, e_step.name()); return 0; - } + } if (data->ndim != 2){ - PyErr_Format(PyExc_TypeError, "`%s' only processes 2D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, eStep.name()); + PyErr_Format(PyExc_TypeError, "`%s' only processes 2D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, e_step.name()); return 0; - } + } if (data->shape[1] != (Py_ssize_t)kmeans_machine->cxx->getNInputs() ) { - PyErr_Format(PyExc_TypeError, "`%s' 2D `input` array should have the shape [N, %" PY_FORMAT_SIZE_T "d] not [N, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, kmeans_machine->cxx->getNInputs(), data->shape[1], eStep.name()); + PyErr_Format(PyExc_TypeError, "`%s' 2D `input` array should have the shape [N, %" PY_FORMAT_SIZE_T "d] not [N, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, kmeans_machine->cxx->getNInputs(), data->shape[1], e_step.name()); return 0; } self->cxx->eStep(*kmeans_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data)); - BOB_CATCH_MEMBER("cannot perform the eStep method", 0) + BOB_CATCH_MEMBER("cannot perform the e_step method", 0) Py_RETURN_NONE; } -/*** mStep ***/ -static auto mStep = bob::extension::FunctionDoc( - "mStep", +/*** m_step ***/ +static auto m_step = bob::extension::FunctionDoc( + "m_step", "Updates the mean based on the statistics from the E-step", 0, true @@ -420,11 +420,11 @@ static auto mStep = bob::extension::FunctionDoc( .add_prototype("kmeans_machine,data") .add_parameter("kmeans_machine", ":py:class:`bob.learn.em.KMeansMachine`", "KMeansMachine Object") .add_parameter("data", "array_like <float, 2D>", "Ignored."); -static PyObject* PyBobLearnEMKMeansTrainer_mStep(PyBobLearnEMKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) { +static PyObject* PyBobLearnEMKMeansTrainer_m_step(PyBobLearnEMKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY /* Parses input arguments in a single shot */ - char** kwlist = mStep.kwlist(0); + char** kwlist = m_step.kwlist(0); PyBobLearnEMKMeansMachineObject* kmeans_machine; PyBlitzArrayObject* data = 0; @@ -435,7 +435,7 @@ static PyObject* PyBobLearnEMKMeansTrainer_mStep(PyBobLearnEMKMeansTrainerObject self->cxx->mStep(*kmeans_machine->cxx); - BOB_CATCH_MEMBER("cannot perform the mStep method", 0) + BOB_CATCH_MEMBER("cannot perform the m_step method", 0) Py_RETURN_NONE; } @@ -499,16 +499,16 @@ static PyMethodDef PyBobLearnEMKMeansTrainer_methods[] = { initialize.doc() }, { - eStep.name(), - (PyCFunction)PyBobLearnEMKMeansTrainer_eStep, + e_step.name(), + (PyCFunction)PyBobLearnEMKMeansTrainer_e_step, METH_VARARGS|METH_KEYWORDS, - eStep.doc() + e_step.doc() }, { - mStep.name(), - (PyCFunction)PyBobLearnEMKMeansTrainer_mStep, + m_step.name(), + (PyCFunction)PyBobLearnEMKMeansTrainer_m_step, METH_VARARGS|METH_KEYWORDS, - mStep.doc() + m_step.doc() }, { compute_likelihood.name(), @@ -561,4 +561,3 @@ bool init_BobLearnEMKMeansTrainer(PyObject* module) Py_INCREF(&PyBobLearnEMKMeansTrainer_Type); return PyModule_AddObject(module, "KMeansTrainer", (PyObject*)&PyBobLearnEMKMeansTrainer_Type) >= 0; } - diff --git a/bob/learn/em/MAP_gmm_trainer.cpp b/bob/learn/em/map_gmm_trainer.cpp similarity index 94% rename from bob/learn/em/MAP_gmm_trainer.cpp rename to bob/learn/em/map_gmm_trainer.cpp index d62fb6f9749c669cdbf58f5d8d72323eadf7fc33..a4fdfd4cab7a3c384df4b11cbf657b0ac303661d 100644 --- a/bob/learn/em/MAP_gmm_trainer.cpp +++ b/bob/learn/em/map_gmm_trainer.cpp @@ -300,26 +300,26 @@ static PyObject* PyBobLearnEMMAPGMMTrainer_initialize(PyBobLearnEMMAPGMMTrainerO } -/*** eStep ***/ -static auto eStep = bob::extension::FunctionDoc( - "eStep", +/*** e_step ***/ +static auto e_step = bob::extension::FunctionDoc( + "e_step", "Calculates and saves statistics across the dataset," "and saves these as m_ss. ", "Calculates the average log likelihood of the observations given the GMM," "and returns this in average_log_likelihood." - "The statistics, m_ss, will be used in the mStep() that follows.", + "The statistics, m_ss, will be used in the m_step() that follows.", true ) .add_prototype("gmm_machine,data") .add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object") .add_parameter("data", "array_like <float, 2D>", "Input data"); -static PyObject* PyBobLearnEMMAPGMMTrainer_eStep(PyBobLearnEMMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) { +static PyObject* PyBobLearnEMMAPGMMTrainer_e_step(PyBobLearnEMMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY /* Parses input arguments in a single shot */ - char** kwlist = eStep.kwlist(0); + char** kwlist = e_step.kwlist(0); PyBobLearnEMGMMMachineObject* gmm_machine; PyBlitzArrayObject* data = 0; @@ -330,32 +330,32 @@ static PyObject* PyBobLearnEMMAPGMMTrainer_eStep(PyBobLearnEMMAPGMMTrainerObject // perform check on the input if (data->type_num != NPY_FLOAT64){ - PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, eStep.name()); + PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, e_step.name()); return 0; } if (data->ndim != 2){ - PyErr_Format(PyExc_TypeError, "`%s' only processes 2D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, eStep.name()); + PyErr_Format(PyExc_TypeError, "`%s' only processes 2D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, e_step.name()); return 0; } if (data->shape[1] != (Py_ssize_t)gmm_machine->cxx->getNInputs() ) { - PyErr_Format(PyExc_TypeError, "`%s' 2D `input` array should have the shape [N, %" PY_FORMAT_SIZE_T "d] not [N, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, gmm_machine->cxx->getNInputs(), data->shape[1], eStep.name()); + PyErr_Format(PyExc_TypeError, "`%s' 2D `input` array should have the shape [N, %" PY_FORMAT_SIZE_T "d] not [N, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, gmm_machine->cxx->getNInputs(), data->shape[1], e_step.name()); return 0; } self->cxx->eStep(*gmm_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data)); - BOB_CATCH_MEMBER("cannot perform the eStep method", 0) + BOB_CATCH_MEMBER("cannot perform the e_step method", 0) Py_RETURN_NONE; } -/*** mStep ***/ -static auto mStep = bob::extension::FunctionDoc( - "mStep", +/*** m_step ***/ +static auto m_step = bob::extension::FunctionDoc( + "m_step", "Performs a maximum a posteriori (MAP) update of the GMM:" "* parameters using the accumulated statistics in :py:class:`bob.learn.em.GMMBaseTrainer.m_ss` and the" @@ -366,11 +366,11 @@ static auto mStep = bob::extension::FunctionDoc( .add_prototype("gmm_machine,data") .add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object") .add_parameter("data", "array_like <float, 2D>", "Ignored."); -static PyObject* PyBobLearnEMMAPGMMTrainer_mStep(PyBobLearnEMMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) { +static PyObject* PyBobLearnEMMAPGMMTrainer_m_step(PyBobLearnEMMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY /* Parses input arguments in a single shot */ - char** kwlist = mStep.kwlist(0); + char** kwlist = m_step.kwlist(0); PyBobLearnEMGMMMachineObject* gmm_machine; PyBlitzArrayObject* data = 0; @@ -382,7 +382,7 @@ static PyObject* PyBobLearnEMMAPGMMTrainer_mStep(PyBobLearnEMMAPGMMTrainerObject self->cxx->mStep(*gmm_machine->cxx); - BOB_CATCH_MEMBER("cannot perform the mStep method", 0) + BOB_CATCH_MEMBER("cannot perform the m_step method", 0) Py_RETURN_NONE; } @@ -422,16 +422,16 @@ static PyMethodDef PyBobLearnEMMAPGMMTrainer_methods[] = { initialize.doc() }, { - eStep.name(), - (PyCFunction)PyBobLearnEMMAPGMMTrainer_eStep, + e_step.name(), + (PyCFunction)PyBobLearnEMMAPGMMTrainer_e_step, METH_VARARGS|METH_KEYWORDS, - eStep.doc() + e_step.doc() }, { - mStep.name(), - (PyCFunction)PyBobLearnEMMAPGMMTrainer_mStep, + m_step.name(), + (PyCFunction)PyBobLearnEMMAPGMMTrainer_m_step, METH_VARARGS|METH_KEYWORDS, - mStep.doc() + m_step.doc() }, { compute_likelihood.name(), diff --git a/bob/learn/em/ML_gmm_trainer.cpp b/bob/learn/em/ml_gmm_trainer.cpp similarity index 89% rename from bob/learn/em/ML_gmm_trainer.cpp rename to bob/learn/em/ml_gmm_trainer.cpp index f71b5d81bedec797ec91527fb2115f7ae9f1fe40..aa2e95d0609cf8a44fd2c7f97291221c25f719e5 100644 --- a/bob/learn/em/ML_gmm_trainer.cpp +++ b/bob/learn/em/ml_gmm_trainer.cpp @@ -55,22 +55,22 @@ static int PyBobLearnEMMLGMMTrainer_init_copy(PyBobLearnEMMLGMMTrainerObject* se static int PyBobLearnEMMLGMMTrainer_init_base_trainer(PyBobLearnEMMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) { char** kwlist = ML_GMMTrainer_doc.kwlist(0); - + PyObject* update_means = Py_True; PyObject* update_variances = Py_False; PyObject* update_weights = Py_False; double mean_var_update_responsibilities_threshold = std::numeric_limits<double>::epsilon(); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O!O!O!d", kwlist, - &PyBool_Type, &update_means, - &PyBool_Type, &update_variances, - &PyBool_Type, &update_weights, + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O!O!O!d", kwlist, + &PyBool_Type, &update_means, + &PyBool_Type, &update_variances, + &PyBool_Type, &update_weights, &mean_var_update_responsibilities_threshold)){ ML_GMMTrainer_doc.print_usage(); return -1; } - self->cxx.reset(new bob::learn::em::ML_GMMTrainer(f(update_means), f(update_variances), f(update_weights), + self->cxx.reset(new bob::learn::em::ML_GMMTrainer(f(update_means), f(update_variances), f(update_weights), mean_var_update_responsibilities_threshold)); return 0; } @@ -84,8 +84,8 @@ static int PyBobLearnEMMLGMMTrainer_init(PyBobLearnEMMLGMMTrainerObject* self, P int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0); if (nargs==0) - return PyBobLearnEMMLGMMTrainer_init_base_trainer(self, args, kwargs); - else{ + return PyBobLearnEMMLGMMTrainer_init_base_trainer(self, args, kwargs); + else{ //Reading the input argument PyObject* arg = 0; @@ -96,7 +96,7 @@ static int PyBobLearnEMMLGMMTrainer_init(PyBobLearnEMMLGMMTrainerObject* self, P auto tmp_ = make_safe(tmp); arg = PyList_GET_ITEM(tmp, 0); } - + // If the constructor input is GMMBaseTrainer object if (PyBobLearnEMMLGMMTrainer_Check(arg)) return PyBobLearnEMMLGMMTrainer_init_copy(self, args, kwargs); @@ -144,7 +144,7 @@ static PyObject* PyBobLearnEMMLGMMTrainer_RichCompare(PyBobLearnEMMLGMMTrainerOb /************ Variables Section ***********************************/ /******************************************************************/ -static PyGetSetDef PyBobLearnEMMLGMMTrainer_getseters[] = { +static PyGetSetDef PyBobLearnEMMLGMMTrainer_getseters[] = { {0} // Sentinel }; @@ -169,7 +169,7 @@ static PyObject* PyBobLearnEMMLGMMTrainer_initialize(PyBobLearnEMMLGMMTrainerObj /* Parses input arguments in a single shot */ char** kwlist = initialize.kwlist(0); PyBobLearnEMGMMMachineObject* gmm_machine = 0; - PyBlitzArrayObject* data = 0; + PyBlitzArrayObject* data = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O&", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine, &PyBlitzArray_Converter, &data)) return 0; @@ -182,62 +182,62 @@ static PyObject* PyBobLearnEMMLGMMTrainer_initialize(PyBobLearnEMMLGMMTrainerObj } -/*** eStep ***/ -static auto eStep = bob::extension::FunctionDoc( - "eStep", +/*** e_step ***/ +static auto e_step = bob::extension::FunctionDoc( + "e_step", "Calculates and saves statistics across the dataset," "and saves these as m_ss. ", "Calculates the average log likelihood of the observations given the GMM," "and returns this in average_log_likelihood." - "The statistics, m_ss, will be used in the mStep() that follows.", + "The statistics, m_ss, will be used in the :py:func:`m_step` that follows.", true ) .add_prototype("gmm_machine,data") .add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object") .add_parameter("data", "array_like <float, 2D>", "Input data"); -static PyObject* PyBobLearnEMMLGMMTrainer_eStep(PyBobLearnEMMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) { +static PyObject* PyBobLearnEMMLGMMTrainer_e_step(PyBobLearnEMMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY /* Parses input arguments in a single shot */ - char** kwlist = eStep.kwlist(0); + char** kwlist = e_step.kwlist(0); PyBobLearnEMGMMMachineObject* gmm_machine; PyBlitzArrayObject* data = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine, &PyBlitzArray_Converter, &data)) return 0; auto data_ = make_safe(data); - - // perform check on the input + + // perform check on the input if (data->type_num != NPY_FLOAT64){ - PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, eStep.name()); + PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, e_step.name()); return 0; - } + } if (data->ndim != 2){ - PyErr_Format(PyExc_TypeError, "`%s' only processes 2D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, eStep.name()); + PyErr_Format(PyExc_TypeError, "`%s' only processes 2D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, e_step.name()); return 0; - } + } if (data->shape[1] != (Py_ssize_t)gmm_machine->cxx->getNInputs() ) { - PyErr_Format(PyExc_TypeError, "`%s' 2D `input` array should have the shape [N, %" PY_FORMAT_SIZE_T "d] not [N, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, gmm_machine->cxx->getNInputs(), data->shape[1], eStep.name()); + PyErr_Format(PyExc_TypeError, "`%s' 2D `input` array should have the shape [N, %" PY_FORMAT_SIZE_T "d] not [N, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, gmm_machine->cxx->getNInputs(), data->shape[1], e_step.name()); return 0; } - + self->cxx->eStep(*gmm_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data)); - BOB_CATCH_MEMBER("cannot perform the eStep method", 0) + BOB_CATCH_MEMBER("cannot perform the e_step method", 0) Py_RETURN_NONE; } -/*** mStep ***/ -static auto mStep = bob::extension::FunctionDoc( - "mStep", +/*** m_step ***/ +static auto m_step = bob::extension::FunctionDoc( + "m_step", "Performs a maximum likelihood (ML) update of the GMM parameters " - "using the accumulated statistics in :py:class:`bob.learn.em.GMMBaseTrainer.m_ss`", + "using the accumulated statistics in :py:attr:`bob.learn.em.GMMBaseTrainer.m_ss`", "See Section 9.2.2 of Bishop, \"Pattern recognition and machine learning\", 2006", @@ -246,14 +246,14 @@ static auto mStep = bob::extension::FunctionDoc( .add_prototype("gmm_machine,data") .add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object") .add_parameter("data", "array_like <float, 2D>", "Ignored."); -static PyObject* PyBobLearnEMMLGMMTrainer_mStep(PyBobLearnEMMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) { +static PyObject* PyBobLearnEMMLGMMTrainer_m_step(PyBobLearnEMMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) { BOB_TRY /* Parses input arguments in a single shot */ - char** kwlist = mStep.kwlist(0); + char** kwlist = m_step.kwlist(0); PyBobLearnEMGMMMachineObject* gmm_machine = 0; - PyBlitzArrayObject* data = 0; + PyBlitzArrayObject* data = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O&", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine, &PyBlitzArray_Converter, &data)) return 0; @@ -262,7 +262,7 @@ static PyObject* PyBobLearnEMMLGMMTrainer_mStep(PyBobLearnEMMLGMMTrainerObject* self->cxx->mStep(*gmm_machine->cxx); - BOB_CATCH_MEMBER("cannot perform the mStep method", 0) + BOB_CATCH_MEMBER("cannot perform the m_step method", 0) Py_RETURN_NONE; } @@ -302,16 +302,16 @@ static PyMethodDef PyBobLearnEMMLGMMTrainer_methods[] = { initialize.doc() }, { - eStep.name(), - (PyCFunction)PyBobLearnEMMLGMMTrainer_eStep, + e_step.name(), + (PyCFunction)PyBobLearnEMMLGMMTrainer_e_step, METH_VARARGS|METH_KEYWORDS, - eStep.doc() + e_step.doc() }, { - mStep.name(), - (PyCFunction)PyBobLearnEMMLGMMTrainer_mStep, + m_step.name(), + (PyCFunction)PyBobLearnEMMLGMMTrainer_m_step, METH_VARARGS|METH_KEYWORDS, - mStep.doc() + m_step.doc() }, { compute_likelihood.name(), @@ -358,4 +358,3 @@ bool init_BobLearnEMMLGMMTrainer(PyObject* module) Py_INCREF(&PyBobLearnEMMLGMMTrainer_Type); return PyModule_AddObject(module, "ML_GMMTrainer", (PyObject*)&PyBobLearnEMMLGMMTrainer_Type) >= 0; } - diff --git a/bob/learn/em/plda_trainer.cpp b/bob/learn/em/plda_trainer.cpp index c0b179cd23940c4ea463c0f857563c02e3cc81a0..f3964541eb818b95493ea075349b32a33b1863da 100644 --- a/bob/learn/em/plda_trainer.cpp +++ b/bob/learn/em/plda_trainer.cpp @@ -460,7 +460,7 @@ static PyObject* PyBobLearnEMPLDATrainer_initialize(PyBobLearnEMPLDATrainerObjec /*** e_step ***/ static auto e_step = bob::extension::FunctionDoc( - "eStep", + "e_step", "Expectation step before the EM steps", "", true @@ -492,7 +492,7 @@ static PyObject* PyBobLearnEMPLDATrainer_e_step(PyBobLearnEMPLDATrainerObject* s /*** m_step ***/ static auto m_step = bob::extension::FunctionDoc( - "mStep", + "m_step", "Maximization step ", "", true diff --git a/bob/learn/em/test/test_ivector_trainer.py b/bob/learn/em/test/test_ivector_trainer.py index 34d8cad36500f911987bdf42e218f2e9ccfbf1b3..4456119bac5c4cb194cbdc584cb2b29fa0125c31 100644 --- a/bob/learn/em/test/test_ivector_trainer.py +++ b/bob/learn/em/test/test_ivector_trainer.py @@ -234,14 +234,14 @@ def test_trainer_nosigma(): m.sigma = sigma for it in range(2): # E-Step - trainer.eStep(m, data) + trainer.e_step(m, data) for k in acc_Nij_Sigma_wij2_ref[it]: assert numpy.allclose(acc_Nij_Sigma_wij2_ref[it][k], trainer.acc_nij_wij2[k], 1e-5) for k in acc_Fnorm_Sigma_wij_ref[it]: assert numpy.allclose(acc_Fnorm_Sigma_wij_ref[it][k], trainer.acc_fnormij_wij[k], 1e-5) # M-Step - trainer.mStep(m) + trainer.m_step(m) assert numpy.allclose(t_ref[it], m.t, 1e-5) def test_trainer_update_sigma(): @@ -348,7 +348,7 @@ def test_trainer_update_sigma(): m.sigma = sigma for it in range(2): # E-Step - trainer.eStep(m, data) + trainer.e_step(m, data) for k in acc_Nij_Sigma_wij2_ref[it]: assert numpy.allclose(acc_Nij_Sigma_wij2_ref[it][k], trainer.acc_nij_wij2[k], 1e-5) for k in acc_Fnorm_Sigma_wij_ref[it]: @@ -357,7 +357,6 @@ def test_trainer_update_sigma(): assert numpy.allclose(N_ref[it], trainer.acc_nij, 1e-5) # M-Step - trainer.mStep(m) + trainer.m_step(m) assert numpy.allclose(t_ref[it], m.t, 1e-5) assert numpy.allclose(sigma_ref[it], m.sigma, 1e-5) - diff --git a/bob/learn/em/test/test_jfa_trainer.py b/bob/learn/em/test/test_jfa_trainer.py index a479831807b839998170722aa55838acc55aea68..33e65310eb42b8de4aa8d26f3ccfc9d4d1c16b4f 100644 --- a/bob/learn/em/test/test_jfa_trainer.py +++ b/bob/learn/em/test/test_jfa_trainer.py @@ -89,8 +89,8 @@ def test_JFATrainer_updateYandV(): t.__X__ = M_x t.__Y__ = y t.__Z__ = M_z - t.e_step1(m, TRAINING_STATS) - t.m_step1(m, TRAINING_STATS) + t.e_step_v(m, TRAINING_STATS) + t.m_step_v(m, TRAINING_STATS) # Expected results(JFA cookbook, matlab) assert equals(t.__Y__[0], y3, 2e-4) @@ -123,8 +123,8 @@ def test_JFATrainer_updateXandU(): t.__X__ = x t.__Y__ = M_y t.__Z__ = M_z - t.e_step2(m, TRAINING_STATS) - t.m_step2(m, TRAINING_STATS) + t.e_step_u(m, TRAINING_STATS) + t.m_step_u(m, TRAINING_STATS) # Expected results(JFA cookbook, matlab) assert equals(t.__X__[0], x3, 2e-4) @@ -156,8 +156,8 @@ def test_JFATrainer_updateZandD(): t.__X__ = M_x t.__Y__ = M_y t.__Z__ = z - t.e_step3(m, TRAINING_STATS) - t.m_step3(m, TRAINING_STATS) + t.e_step_d(m, TRAINING_STATS) + t.m_step_d(m, TRAINING_STATS) # Expected results(JFA cookbook, matlab) assert equals(t.__Z__[0], z3_ref, 2e-4) @@ -230,8 +230,8 @@ def test_ISVTrainAndEnrol(): t.initialize(mb, TRAINING_STATS) mb.u = M_u for i in range(10): - t.eStep(mb, TRAINING_STATS) - t.mStep(mb) + t.e_step(mb, TRAINING_STATS) + t.m_step(mb) assert numpy.allclose(mb.d, d_ref, eps) assert numpy.allclose(mb.u, u_ref, eps) diff --git a/bob/learn/em/test/test_plda_trainer.py b/bob/learn/em/test/test_plda_trainer.py index edc26b1214452a3d83bbcc18a10c1e3033a4bbe2..4e8abfd7b52115880a0cb14af47411199ed455aa 100644 --- a/bob/learn/em/test/test_plda_trainer.py +++ b/bob/learn/em/test/test_plda_trainer.py @@ -536,7 +536,7 @@ def test_plda_EM_vs_Prince(): m_py.f = F_init # E-step 1 - t.eStep(m,l) + t.e_step(m,l) t_py.e_step(m_py,l) # Compares statistics to Prince matlab reference assert numpy.allclose(t.z_first_order[0], z_first_order_a_1, 1e-10) @@ -548,7 +548,7 @@ def test_plda_EM_vs_Prince(): assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10) # M-step 1 - t.mStep(m,l) + t.m_step(m,l) t_py.m_step(m_py,l) # Compares F, G and sigma to Prince matlab reference assert numpy.allclose(m.f, F_1, 1e-10) @@ -560,7 +560,7 @@ def test_plda_EM_vs_Prince(): assert numpy.allclose(m.sigma, m_py.sigma, 1e-10) # E-step 2 - t.eStep(m,l) + t.e_step(m,l) t_py.e_step(m_py,l) # Compares statistics to Prince matlab reference assert numpy.allclose(t.z_first_order[0], z_first_order_a_2, 1e-10) @@ -572,7 +572,7 @@ def test_plda_EM_vs_Prince(): assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10) # M-step 2 - t.mStep(m,l) + t.m_step(m,l) t_py.m_step(m_py,l) # Compares F, G and sigma to Prince matlab reference assert numpy.allclose(m.f, F_2, 1e-10) @@ -598,7 +598,7 @@ def test_plda_EM_vs_Prince(): m_py.f = F_init # E-step 1 - t.eStep(m,l) + t.e_step(m,l) t_py.e_step(m_py,l) # Compares statistics to Prince matlab reference assert numpy.allclose(t.z_first_order[0], z_first_order_a_1, 1e-10) @@ -611,7 +611,7 @@ def test_plda_EM_vs_Prince(): assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10) # M-step 1 - t.mStep(m,l) + t.m_step(m,l) t_py.m_step(m_py,l) # Compares F, G and sigma to the ones of the python implementation assert numpy.allclose(m.f, m_py.f, 1e-10) @@ -619,7 +619,7 @@ def test_plda_EM_vs_Prince(): assert numpy.allclose(m.sigma, m_py.sigma, 1e-10) # E-step 2 - t.eStep(m,l) + t.e_step(m,l) t_py.e_step(m_py,l) # Compares statistics to Prince matlab reference assert numpy.allclose(t.z_first_order[0], z_first_order_a_2, 1e-10) @@ -632,7 +632,7 @@ def test_plda_EM_vs_Prince(): assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10) # M-step 2 - t.mStep(m,l) + t.m_step(m,l) t_py.m_step(m_py,l) # Compares F, G and sigma to the ones of the python implementation assert numpy.allclose(m.f, m_py.f, 1e-10) @@ -721,23 +721,23 @@ def test_plda_comparisons(): m = PLDABase(4,1,1,1e-8) rng = bob.core.random.mt19937(37) t1.initialize(m, training_set,rng) - t1.eStep(m, training_set) - t1.mStep(m, training_set) + t1.e_step(m, training_set) + t1.m_step(m, training_set) assert (t1 == t2 ) is False assert t1 != t2 assert (t1.is_similar_to(t2) ) is False rng = bob.core.random.mt19937(37) t2.initialize(m, training_set, rng) - t2.eStep(m, training_set) - t2.mStep(m, training_set) + t2.e_step(m, training_set) + t2.m_step(m, training_set) assert t1 == t2 assert (t1 != t2 ) is False assert t1.is_similar_to(t2) rng = bob.core.random.mt19937(77) t2.initialize(m, training_set, rng) - t2.eStep(m, training_set) - t2.mStep(m, training_set) + t2.e_step(m, training_set) + t2.m_step(m, training_set) assert (t1 == t2 ) is False assert t1 != t2 assert (t1.is_similar_to(t2) ) is False diff --git a/bob/learn/em/train.py b/bob/learn/em/train.py index 1fe9a4268522d220f6d055a8580589ebb430d5f2..1e0fe5f9ea41fc5e90f4b0e5a67bbe9e1850a393 100644 --- a/bob/learn/em/train.py +++ b/bob/learn/em/train.py @@ -34,19 +34,19 @@ def train(trainer, machine, data, max_iterations = 50, convergence_threshold=Non else: trainer.initialize(machine, data) - trainer.eStep(machine, data) + trainer.e_step(machine, data) average_output = 0 average_output_previous = 0 if convergence_threshold!=None and hasattr(trainer,"compute_likelihood"): average_output = trainer.compute_likelihood(machine) - + for i in range(max_iterations): average_output_previous = average_output - trainer.mStep(machine, data) - trainer.eStep(machine, data) + trainer.m_step(machine, data) + trainer.e_step(machine, data) - if convergence_threshold!=None and hasattr(trainer,"compute_likelihood"): + if convergence_threshold!=None and hasattr(trainer,"compute_likelihood"): average_output = trainer.compute_likelihood(machine) #Terminates if converged (and likelihood computation is set) @@ -76,22 +76,21 @@ def train_jfa(trainer, jfa_base, data, max_iterations=10, initialize=True): if initialize: trainer.initialize(jfa_base, data) - + #V Subspace for i in range(max_iterations): - trainer.e_step1(jfa_base, data) - trainer.m_step1(jfa_base, data) - trainer.finalize1(jfa_base, data) + trainer.e_step_v(jfa_base, data) + trainer.m_step_v(jfa_base, data) + trainer.finalize_v(jfa_base, data) #U subspace for i in range(max_iterations): - trainer.e_step2(jfa_base, data) - trainer.m_step2(jfa_base, data) - trainer.finalize2(jfa_base, data) + trainer.e_step_u(jfa_base, data) + trainer.m_step_u(jfa_base, data) + trainer.finalize_u(jfa_base, data) # d subspace for i in range(max_iterations): - trainer.e_step3(jfa_base, data) - trainer.m_step3(jfa_base, data) - trainer.finalize3(jfa_base, data) - + trainer.e_step_d(jfa_base, data) + trainer.m_step_d(jfa_base, data) + trainer.finalize_d(jfa_base, data) diff --git a/doc/guide.rst b/doc/guide.rst index ac286249d5212b530e1222c8d0cac3078db1e4cf..8cf1d57e9154576e0f49eb68419af7642a2a1ce3 100644 --- a/doc/guide.rst +++ b/doc/guide.rst @@ -374,8 +374,8 @@ For example, to train a K-Means with 10 iterations you can use the following ste >>> max_iterations = 10 >>> kmeans_trainer.initialize(kmeans_machine, data) #Initilizing the means with random values >>> for i in range(max_iterations): - ... kmeans_trainer.eStep(kmeans_machine, data) - ... kmeans_trainer.mStep(kmeans_machine, data) + ... kmeans_trainer.e_step(kmeans_machine, data) + ... kmeans_trainer.m_step(kmeans_machine, data) >>> print(kmeans_machine.means) [[ -6. 6. -100.5] [ 3.5 -3.5 99. ]] diff --git a/setup.py b/setup.py index 3f89ff52018f14f373ba3a4206ca5e33946942e9..8f1e3c7ebbbd0506bc111909b8d57c0eb66de615 100644 --- a/setup.py +++ b/setup.py @@ -114,8 +114,8 @@ setup( "bob/learn/em/kmeans_machine.cpp", "bob/learn/em/kmeans_trainer.cpp", - "bob/learn/em/ML_gmm_trainer.cpp", - "bob/learn/em/MAP_gmm_trainer.cpp", + "bob/learn/em/ml_gmm_trainer.cpp", + "bob/learn/em/map_gmm_trainer.cpp", "bob/learn/em/jfa_base.cpp", "bob/learn/em/jfa_machine.cpp", @@ -124,10 +124,10 @@ setup( "bob/learn/em/isv_base.cpp", "bob/learn/em/isv_machine.cpp", "bob/learn/em/isv_trainer.cpp", - + "bob/learn/em/ivector_machine.cpp", "bob/learn/em/ivector_trainer.cpp", - + "bob/learn/em/plda_base.cpp", "bob/learn/em/plda_machine.cpp",