Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • bob/bob.learn.em
1 result
Show changes
Commits on Source (17)
Showing
with 129 additions and 126 deletions
......@@ -15,5 +15,6 @@ dist
.nfs*
.gdb_history
build
.DS_Store
*.egg
src/
......@@ -142,31 +142,6 @@ deploy_linux_27:
- conda-linux
# Linux + Python 3.4: Builds and tests
build_linux_34:
<<: *build_job
variables: &linux_34_build_variables
PYTHON_VERSION: "3.4"
tags:
- conda-linux
test_linux_34:
<<: *test_job
variables: *linux_34_build_variables
dependencies:
- build_linux_34
tags:
- conda-linux
wheels_linux_34:
<<: *wheels_job
variables: *linux_34_build_variables
dependencies:
- build_linux_34
tags:
- conda-linux
# Linux + Python 3.5: Builds, tests and uploads wheel
build_linux_35:
<<: *build_job
......@@ -200,6 +175,31 @@ docs_linux_35:
- conda-linux
# Linux + Python 3.6: Builds and tests
build_linux_36:
<<: *build_job
variables: &linux_36_build_variables
PYTHON_VERSION: "3.6"
tags:
- conda-linux
test_linux_36:
<<: *test_job
variables: *linux_36_build_variables
dependencies:
- build_linux_36
tags:
- conda-linux
wheels_linux_36:
<<: *wheels_job
variables: *linux_36_build_variables
dependencies:
- build_linux_36
tags:
- conda-linux
# Mac OSX + Python 2.7: Builds and tests
build_macosx_27:
<<: *build_job
......@@ -225,51 +225,51 @@ wheels_macosx_27:
- conda-macosx
# Mac OSX + Python 3.4: Builds and tests
build_macosx_34:
# Mac OSX + Python 3.5: Builds and tests
build_macosx_35:
<<: *build_job
variables: &macosx_34_build_variables
PYTHON_VERSION: "3.4"
variables: &macosx_35_build_variables
PYTHON_VERSION: "3.5"
tags:
- conda-macosx
test_macosx_34:
test_macosx_35:
<<: *test_job
variables: *macosx_34_build_variables
variables: *macosx_35_build_variables
dependencies:
- build_macosx_34
- build_macosx_35
tags:
- conda-macosx
wheels_macosx_34:
wheels_macosx_35:
<<: *wheels_job
variables: *macosx_34_build_variables
variables: *macosx_35_build_variables
dependencies:
- build_macosx_34
- build_macosx_35
tags:
- conda-macosx
# Mac OSX + Python 3.5: Builds and tests
build_macosx_35:
# Mac OSX + Python 3.6: Builds and tests
build_macosx_36:
<<: *build_job
variables: &macosx_35_build_variables
PYTHON_VERSION: "3.5"
variables: &macosx_36_build_variables
PYTHON_VERSION: "3.6"
tags:
- conda-macosx
test_macosx_35:
test_macosx_36:
<<: *test_job
variables: *macosx_35_build_variables
variables: *macosx_36_build_variables
dependencies:
- build_macosx_35
- build_macosx_36
tags:
- conda-macosx
wheels_macosx_35:
wheels_macosx_36:
<<: *wheels_job
variables: *macosx_35_build_variables
variables: *macosx_36_build_variables
dependencies:
- build_macosx_35
- build_macosx_36
tags:
- conda-macosx
include LICENSE README.rst bootstrap-buildout.py buildout.cfg develop.cfg requirements.txt version.txt
include LICENSE README.rst buildout.cfg develop.cfg requirements.txt version.txt
recursive-include doc conf.py *.rst
recursive-include bob/learn/em *.cpp *.h
recursive-include bob/learn/em/data *.*
......@@ -7,6 +7,8 @@
:target: https://www.idiap.ch/software/bob/docs/latest/bob/bob.learn.em/master/index.html
.. image:: https://gitlab.idiap.ch/bob/bob.learn.em/badges/master/build.svg
:target: https://gitlab.idiap.ch/bob/bob.learn.em/commits/master
.. image:: https://gitlab.idiap.ch/bob/bob.learn.em/badges/master/coverage.svg
:target: https://gitlab.idiap.ch/bob/bob.learn.em/commits/master
.. image:: https://img.shields.io/badge/gitlab-project-0000c0.svg
:target: https://gitlab.idiap.ch/bob/bob.learn.em
.. image:: http://img.shields.io/pypi/v/bob.learn.em.svg
......@@ -47,11 +49,10 @@ The package includes the machine definition per se and a selection of different
Installation
------------
Follow our `installation`_ instructions. Then, using the Python interpreter
provided by the distribution, bootstrap and buildout this package::
Complete Bob's `installation`_ instructions. Then, to install this pacakge,
run::
$ python bootstrap-buildout.py
$ ./bin/buildout
$ conda install bob.learn.em
Contact
......
......@@ -206,7 +206,7 @@ void bob::learn::em::IVectorMachine::resizeTmp()
void bob::learn::em::IVectorMachine::forward(const bob::learn::em::GMMStats& gs,
blitz::Array<double,1>& ivector) const
{
bob::core::array::assertSameDimensionLength(ivector.extent(0), (int)m_rt);
bob::core::array::assertSameDimensionLength(ivector.extent(0), (int)m_rt);
forward_(gs, ivector);
}
......@@ -246,6 +246,5 @@ void bob::learn::em::IVectorMachine::forward_(const bob::learn::em::GMMStats& gs
computeTtSigmaInvFnorm(gs, m_tmp_t1);
// Solves m_tmp_tt.ivector = m_tmp_t1
bob::math::linsolve(m_tmp_tt, ivector, m_tmp_t1);
bob::math::linsolve(m_tmp_tt, m_tmp_t1, ivector);
}
......@@ -170,7 +170,7 @@ void bob::learn::em::IVectorTrainer::mStep(
if (blitz::all(acc_Nij_wij2_c == 0)) // TODO
Tt_c = 0;
else
bob::math::linsolve(tacc_Nij_wij2_c, Tt_c, tacc_Fnormij_wij_c);
bob::math::linsolve(tacc_Nij_wij2_c, tacc_Fnormij_wij_c, Tt_c);
if (m_update_sigma)
{
blitz::Array<double,1> sigma_c = sigma(blitz::Range(c*D,(c+1)*D-1));
......
......@@ -15,7 +15,8 @@
static auto GMMMachine_doc = bob::extension::ClassDoc(
BOB_EXT_MODULE_PREFIX ".GMMMachine",
"This class implements a multivariate diagonal Gaussian distribution.",
"This class implements the statistical model for multivariate diagonal mixture Gaussian distribution (GMM). "
"A GMM is defined as :math:`\\sum_{c=0}^{C} \\omega_c \\mathcal{N}(x | \\mu_c, \\sigma_c)`, where :math:`C` is the number of Gaussian components :math:`\\mu_c`, :math:`\\sigma_c` and :math:`\\omega_c` are respectively the the mean, variance and the weight of each gaussian component :math:`c`.",
"See Section 2.3.9 of Bishop, \"Pattern recognition and machine learning\", 2006"
).add_constructor(
bob::extension::FunctionDoc(
......@@ -744,7 +745,7 @@ static PyObject* PyBobLearnEMGMMMachine_loglikelihood_(PyBobLearnEMGMMMachineObj
/*** acc_statistics ***/
static auto acc_statistics = bob::extension::FunctionDoc(
"acc_statistics",
"Accumulate the GMM statistics for this sample(s). Inputs are checked.",
"Accumulate the GMM statistics (:py:class:`bob.learn.em.GMMStats`) for this sample(s). Inputs are checked.",
"",
true
)
......@@ -780,7 +781,7 @@ static PyObject* PyBobLearnEMGMMMachine_accStatistics(PyBobLearnEMGMMMachineObje
/*** acc_statistics_ ***/
static auto acc_statistics_ = bob::extension::FunctionDoc(
"acc_statistics_",
"Accumulate the GMM statistics for this sample(s). Inputs are NOT checked.",
"Accumulate the GMM statistics (:py:class:`bob.learn.em.GMMStats`) for this sample(s). Inputs are NOT checked.",
"",
true
)
......@@ -853,7 +854,7 @@ static PyObject* PyBobLearnEMGMMMachine_setVarianceThresholds_method(PyBobLearnE
/*** get_gaussian ***/
static auto get_gaussian = bob::extension::FunctionDoc(
"get_gaussian",
"Get the specified Gaussian component.",
"Get the specified Gaussian (:py:class:`bob.learn.em.Gaussian`) component.",
".. note:: An exception is thrown if i is out of range.",
true
)
......
......@@ -40,7 +40,7 @@ class KMeansTrainer
/**
* @brief Constructor
*/
KMeansTrainer(InitializationMethod=RANDOM);
KMeansTrainer(InitializationMethod=RANDOM_NO_DUPLICATE);
/**
* @brief Virtualize destructor
......
......@@ -184,7 +184,7 @@ static auto supervector_length = bob::extension::VariableDoc(
"int",
"Returns the supervector length.",
"NGaussians x NInputs: Number of Gaussian components by the feature dimensionality"
"WARNING An exception is thrown if no Universal Background Model has been set yet."
"An exception is thrown if no Universal Background Model has been set yet."
""
);
PyObject* PyBobLearnEMISVBase_getSupervectorLength(PyBobLearnEMISVBaseObject* self, void*) {
......
......@@ -69,15 +69,15 @@ static int PyBobLearnEMISVMachine_init_hdf5(PyBobLearnEMISVMachineObject* self,
static int PyBobLearnEMISVMachine_init_isvbase(PyBobLearnEMISVMachineObject* self, PyObject* args, PyObject* kwargs) {
char** kwlist = ISVMachine_doc.kwlist(0);
PyBobLearnEMISVBaseObject* isv_base;
//Here we have to select which keyword argument to read
//Here we have to select which keyword argument to read
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMISVBase_Type, &isv_base)){
ISVMachine_doc.print_usage();
return -1;
}
self->cxx.reset(new bob::learn::em::ISVMachine(isv_base->cxx));
return 0;
}
......@@ -115,7 +115,7 @@ static int PyBobLearnEMISVMachine_init(PyBobLearnEMISVMachineObject* self, PyObj
ISVMachine_doc.print_usage();
return -1;
}
BOB_CATCH_MEMBER("cannot create ISVMachine", -1)
return 0;
}
......@@ -173,8 +173,8 @@ static auto supervector_length = bob::extension::VariableDoc(
"int",
"Returns the supervector length.",
"NGaussians x NInputs: Number of Gaussian components by the feature dimensionality"
"@warning An exception is thrown if no Universal Background Model has been set yet."
"NGaussians x NInputs: Number of Gaussian components by the feature dimensionality. "
"An exception is thrown if no Universal Background Model has been set yet."
""
);
PyObject* PyBobLearnEMISVMachine_getSupervectorLength(PyBobLearnEMISVMachineObject* self, void*) {
......@@ -203,17 +203,17 @@ int PyBobLearnEMISVMachine_setZ(PyBobLearnEMISVMachineObject* self, PyObject* va
return -1;
}
auto o_ = make_safe(input);
// perform check on the input
// perform check on the input
if (input->type_num != NPY_FLOAT64){
PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, Z.name());
return -1;
}
}
if (input->ndim != 1){
PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, Z.name());
return -1;
}
}
if (input->shape[0] != (Py_ssize_t)self->cxx->getZ().extent(0)) {
PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d, elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, (Py_ssize_t)self->cxx->getZ().extent(0), (Py_ssize_t)input->shape[0], Z.name());
......@@ -276,13 +276,13 @@ int PyBobLearnEMISVMachine_setISVBase(PyBobLearnEMISVMachineObject* self, PyObje
self->cxx->setISVBase(isv_base_o->cxx);
return 0;
BOB_CATCH_MEMBER("isv_base could not be set", -1)
BOB_CATCH_MEMBER("isv_base could not be set", -1)
}
static PyGetSetDef PyBobLearnEMISVMachine_getseters[] = {
static PyGetSetDef PyBobLearnEMISVMachine_getseters[] = {
{
shape.name(),
(getter)PyBobLearnEMISVMachine_getShape,
......@@ -290,7 +290,7 @@ static PyGetSetDef PyBobLearnEMISVMachine_getseters[] = {
shape.doc(),
0
},
{
supervector_length.name(),
(getter)PyBobLearnEMISVMachine_getSupervectorLength,
......@@ -298,7 +298,7 @@ static PyGetSetDef PyBobLearnEMISVMachine_getseters[] = {
supervector_length.doc(),
0
},
{
isv_base.name(),
(getter)PyBobLearnEMISVMachine_getISVBase,
......@@ -343,9 +343,9 @@ static auto save = bob::extension::FunctionDoc(
static PyObject* PyBobLearnEMISVMachine_Save(PyBobLearnEMISVMachineObject* self, PyObject* args, PyObject* kwargs) {
BOB_TRY
// get list of arguments
char** kwlist = save.kwlist(0);
char** kwlist = save.kwlist(0);
PyBobIoHDF5FileObject* hdf5;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
......@@ -365,12 +365,12 @@ static auto load = bob::extension::FunctionDoc(
.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
static PyObject* PyBobLearnEMISVMachine_Load(PyBobLearnEMISVMachineObject* self, PyObject* args, PyObject* kwargs) {
BOB_TRY
char** kwlist = load.kwlist(0);
char** kwlist = load.kwlist(0);
PyBobIoHDF5FileObject* hdf5;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
auto hdf5_ = make_safe(hdf5);
auto hdf5_ = make_safe(hdf5);
self->cxx->load(*hdf5->f);
BOB_CATCH_MEMBER("cannot load the data", 0)
......@@ -381,7 +381,7 @@ static PyObject* PyBobLearnEMISVMachine_Load(PyBobLearnEMISVMachineObject* self,
/*** is_similar_to ***/
static auto is_similar_to = bob::extension::FunctionDoc(
"is_similar_to",
"Compares this ISVMachine with the ``other`` one to be approximately the same.",
"The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
"relative and absolute precision for the ``weights``, ``biases`` "
......@@ -406,8 +406,8 @@ static PyObject* PyBobLearnEMISVMachine_IsSimilarTo(PyBobLearnEMISVMachineObject
&PyBobLearnEMISVMachine_Type, &other,
&r_epsilon, &a_epsilon)){
is_similar_to.print_usage();
return 0;
is_similar_to.print_usage();
return 0;
}
if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
......@@ -421,7 +421,7 @@ static PyObject* PyBobLearnEMISVMachine_IsSimilarTo(PyBobLearnEMISVMachineObject
static auto estimate_x = bob::extension::FunctionDoc(
"estimate_x",
"Estimates the session offset x (LPT assumption) given GMM statistics.",
"Estimates :math:`x` from the GMM statistics considering the LPT assumption, that is the latent session variable :math:`x` is approximated using the UBM",
"Estimates :math:`x` from the GMM statistics considering the LPT assumption, that is the latent session variable :math:`x` is approximated using the UBM",
true
)
.add_prototype("stats,input")
......@@ -435,29 +435,29 @@ static PyObject* PyBobLearnEMISVMachine_estimateX(PyBobLearnEMISVMachineObject*
PyBobLearnEMGMMStatsObject* stats = 0;
PyBlitzArrayObject* input = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats,
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats,
&PyBlitzArray_Converter,&input))
return 0;
//protects acquired resources through this scope
auto input_ = make_safe(input);
// perform check on the input
// perform check on the input
if (input->type_num != NPY_FLOAT64){
PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, estimate_x.name());
return 0;
}
}
if (input->ndim != 1){
PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, estimate_x.name());
return 0;
}
}
if (input->shape[0] != (Py_ssize_t)self->cxx->getNGaussians()) {
PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d, elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), (Py_ssize_t)input->shape[0], estimate_x.name());
return 0;
}
self->cxx->estimateX(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(input));
BOB_CATCH_MEMBER("cannot estimate X", 0)
......@@ -469,7 +469,7 @@ static PyObject* PyBobLearnEMISVMachine_estimateX(PyBobLearnEMISVMachineObject*
static auto estimate_ux = bob::extension::FunctionDoc(
"estimate_ux",
"Estimates Ux (LPT assumption) given GMM statistics.",
"Estimates Ux from the GMM statistics considering the LPT assumption, that is the latent session variable x is approximated using the UBM.",
"Estimates Ux from the GMM statistics considering the LPT assumption, that is the latent session variable x is approximated using the UBM.",
true
)
.add_prototype("stats,input")
......@@ -483,29 +483,29 @@ static PyObject* PyBobLearnEMISVMachine_estimateUx(PyBobLearnEMISVMachineObject*
PyBobLearnEMGMMStatsObject* stats = 0;
PyBlitzArrayObject* input = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats,
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats,
&PyBlitzArray_Converter,&input))
return 0;
//protects acquired resources through this scope
auto input_ = make_safe(input);
// perform check on the input
// perform check on the input
if (input->type_num != NPY_FLOAT64){
PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, estimate_ux.name());
return 0;
}
}
if (input->ndim != 1){
PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, estimate_ux.name());
return 0;
}
}
if (input->shape[0] != (Py_ssize_t)self->cxx->getNGaussians()*(Py_ssize_t)self->cxx->getNInputs()) {
PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d, elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNInputs()*(Py_ssize_t)self->cxx->getNGaussians(), (Py_ssize_t)input->shape[0], estimate_ux.name());
return 0;
}
self->cxx->estimateUx(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(input));
BOB_CATCH_MEMBER("cannot estimate Ux", 0)
......@@ -517,7 +517,7 @@ static PyObject* PyBobLearnEMISVMachine_estimateUx(PyBobLearnEMISVMachineObject*
static auto forward_ux = bob::extension::FunctionDoc(
"forward_ux",
"Computes a score for the given UBM statistics and given the Ux vector",
"",
"",
true
)
.add_prototype("stats,ux")
......@@ -531,30 +531,30 @@ static PyObject* PyBobLearnEMISVMachine_ForwardUx(PyBobLearnEMISVMachineObject*
PyBobLearnEMGMMStatsObject* stats = 0;
PyBlitzArrayObject* ux_input = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats,
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats,
&PyBlitzArray_Converter,&ux_input))
return 0;
//protects acquired resources through this scope
auto ux_input_ = make_safe(ux_input);
// perform check on the input
// perform check on the input
if (ux_input->type_num != NPY_FLOAT64){
PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, forward_ux.name());
return 0;
}
}
if (ux_input->ndim != 1){
PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, forward_ux.name());
return 0;
}
}
if (ux_input->shape[0] != (Py_ssize_t)self->cxx->getNGaussians()*(Py_ssize_t)self->cxx->getNInputs()) {
PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d, elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, (Py_ssize_t)self->cxx->getNGaussians()*(Py_ssize_t)self->cxx->getNInputs(), (Py_ssize_t)ux_input->shape[0], forward_ux.name());
return 0;
}
double score = self->cxx->forward(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(ux_input));
return Py_BuildValue("d", score);
BOB_CATCH_MEMBER("cannot forward_ux", 0)
}
......@@ -564,7 +564,7 @@ static PyObject* PyBobLearnEMISVMachine_ForwardUx(PyBobLearnEMISVMachineObject*
static auto forward = bob::extension::FunctionDoc(
"forward",
"Execute the machine",
"",
"",
true
)
.add_prototype("stats")
......@@ -575,7 +575,7 @@ static PyObject* PyBobLearnEMISVMachine_Forward(PyBobLearnEMISVMachineObject* se
char** kwlist = forward.kwlist(0);
PyBobLearnEMGMMStatsObject* stats = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMStats_Type, &stats))
return 0;
......@@ -607,14 +607,14 @@ static PyMethodDef PyBobLearnEMISVMachine_methods[] = {
METH_VARARGS|METH_KEYWORDS,
is_similar_to.doc()
},
{
estimate_x.name(),
(PyCFunction)PyBobLearnEMISVMachine_estimateX,
METH_VARARGS|METH_KEYWORDS,
estimate_x.doc()
},
{
estimate_ux.name(),
(PyCFunction)PyBobLearnEMISVMachine_estimateUx,
......
......@@ -88,7 +88,7 @@ int list_as_vector(PyObject* list, std::vector<blitz::Array<double,N> >& vec)
static auto ISVTrainer_doc = bob::extension::ClassDoc(
BOB_EXT_MODULE_PREFIX ".ISVTrainer",
"ISVTrainer"
"References: [Vogt2008,McCool2013]",
"Train Intersession varibility modeling :ref:`ISV <isv>`.",
""
).add_constructor(
bob::extension::FunctionDoc(
......
......@@ -15,8 +15,8 @@
static auto IVectorMachine_doc = bob::extension::ClassDoc(
BOB_EXT_MODULE_PREFIX ".IVectorMachine",
"An IVectorMachine consists of a Total Variability subspace :math:`T` and allows the extraction of IVector"
"References: [Dehak2010]_",
"Statistical model for the Total Variability training for more information and explanation see the user guide in documentation (:ref:`iVectors <ivector>`)" // this documentation text is intentionally written to be long!
"",
""
).add_constructor(
bob::extension::FunctionDoc(
......@@ -189,7 +189,7 @@ static auto supervector_length = bob::extension::VariableDoc(
"Returns the supervector length.",
"NGaussians x NInputs: Number of Gaussian components by the feature dimensionality"
"@warning An exception is thrown if no Universal Background Model has been set yet."
"An exception is thrown if no Universal Background Model has been set yet."
""
);
PyObject* PyBobLearnEMIVectorMachine_getSupervectorLength(PyBobLearnEMIVectorMachineObject* self, void*) {
......
......@@ -36,9 +36,9 @@ static int extract_GMMStats_1d(PyObject *list,
static auto IVectorTrainer_doc = bob::extension::ClassDoc(
BOB_EXT_MODULE_PREFIX ".IVectorTrainer",
"IVectorTrainer"
"An IVectorTrainer to learn a Total Variability subspace :math:`$T$`"
" (and eventually a covariance matrix :math:`$\\Sigma$`).",
" References: [Dehak2010]"
"Trains the Total Variability subspace :math:`$T$` to generate :ref:`iVectors <ivector>`."
"",
""
).add_constructor(
bob::extension::FunctionDoc(
"__init__",
......
......@@ -15,8 +15,8 @@
static auto JFABase_doc = bob::extension::ClassDoc(
BOB_EXT_MODULE_PREFIX ".JFABase",
"A JFABase instance can be seen as a container for :math:`U`, :math:`V` and :math:`D` when performing Joint Factor Analysis (JFA).\n\n"
"References: [Vogt2008]_ [McCool2013]_",
"Container for :math:`U`, :math:`V` and :math:`D` when performing Joint Factor Analysis (:ref:`JFA <jfa>`).\n\n"
"",
""
).add_constructor(
bob::extension::FunctionDoc(
......@@ -192,7 +192,7 @@ static auto supervector_length = bob::extension::VariableDoc(
"Returns the supervector length.",
"NGaussians x NInputs: Number of Gaussian components by the feature dimensionality"
"@warning An exception is thrown if no Universal Background Model has been set yet."
"An exception is thrown if no Universal Background Model has been set yet."
""
);
PyObject* PyBobLearnEMJFABase_getSupervectorLength(PyBobLearnEMJFABaseObject* self, void*) {
......
......@@ -173,8 +173,8 @@ static auto supervector_length = bob::extension::VariableDoc(
"int",
"Returns the supervector length.",
"NGaussians x NInputs: Number of Gaussian components by the feature dimensionality"
"@warning An exception is thrown if no Universal Background Model has been set yet."
"NGaussians x NInputs: Number of Gaussian components by the feature dimensionality. "
"An exception is thrown if no Universal Background Model has been set yet."
""
);
PyObject* PyBobLearnEMJFAMachine_getSupervectorLength(PyBobLearnEMJFAMachineObject* self, void*) {
......
......@@ -87,8 +87,8 @@ int list_as_vector(PyObject* list, std::vector<blitz::Array<double,N> >& vec)
static auto JFATrainer_doc = bob::extension::ClassDoc(
BOB_EXT_MODULE_PREFIX ".JFATrainer",
"JFATrainer"
"References: [Vogt2008,McCool2013]",
"Trains a Joint Factor Analysis (:ref:`JFA <jfa>`) on top of GMMs"
"",
""
).add_constructor(
bob::extension::FunctionDoc(
......
......@@ -15,7 +15,7 @@
static auto KMeansMachine_doc = bob::extension::ClassDoc(
BOB_EXT_MODULE_PREFIX ".KMeansMachine",
"This class implements a k-means classifier.\n"
"Statistical model for the :ref:`k-means <kmeans>` .\n"
"See Section 9.1 of Bishop, \"Pattern recognition and machine learning\", 2006"
).add_constructor(
bob::extension::FunctionDoc(
......
......@@ -42,8 +42,8 @@ static inline const std::string& IM2string(bob::learn::em::KMeansTrainer::Initia
static auto KMeansTrainer_doc = bob::extension::ClassDoc(
BOB_EXT_MODULE_PREFIX ".KMeansTrainer",
"Trains a KMeans machine."
"This class implements the expectation-maximization algorithm for a k-means machine."
"Trains a KMeans clustering :ref:`k-means <kmeans>`."
"This class implements the expectation-maximization algorithm for a k-means."
"See Section 9.1 of Bishop, \"Pattern recognition and machine learning\", 2006"
"It uses a random initialization of the means followed by the expectation-maximization algorithm"
).add_constructor(
......
......@@ -71,6 +71,7 @@ static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}
/*** linear_scoring ***/
bob::extension::FunctionDoc linear_scoring1 = bob::extension::FunctionDoc(
"linear_scoring",
"The :ref:`Linear scoring <linearscoring>` is an approximation to the log-likelihood ratio that was shown to be as accurate and up to two orders of magnitude more efficient to compute [Glembek2009]_."
"",
0,
true
......
......@@ -17,7 +17,7 @@ static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;} /*
static auto MAP_GMMTrainer_doc = bob::extension::ClassDoc(
BOB_EXT_MODULE_PREFIX ".MAP_GMMTrainer",
"This class implements the maximum a posteriori M-step of the expectation-maximization algorithm for a GMM Machine. The prior parameters are encoded in the form of a GMM (e.g. a universal background model). The EM algorithm thus performs GMM adaptation."
"This class implements the maximum a posteriori (:ref:`MAP <map>`) M-step of the expectation-maximization algorithm for a GMM Machine. The prior parameters are encoded in the form of a GMM (e.g. a universal background model). The EM algorithm thus performs GMM adaptation."
).add_constructor(
bob::extension::FunctionDoc(
"__init__",
......