Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.em
Commits
580ba1db
Commit
580ba1db
authored
Feb 17, 2015
by
Tiago de Freitas Pereira
Browse files
Documenting and changing the documentation stuff
parent
0e70e3e7
Changes
28
Hide whitespace changes
Inline
Side-by-side
bob/learn/em/ML_gmm_trainer.cpp
View file @
580ba1db
...
...
@@ -156,20 +156,21 @@ static auto initialize = bob::extension::FunctionDoc(
""
,
true
)
.
add_prototype
(
"gmm_machine"
)
.
add_parameter
(
"gmm_machine"
,
":py:class:`bob.learn.em.GMMMachine`"
,
"GMMMachine Object"
);
.
add_prototype
(
"gmm_machine,data"
)
.
add_parameter
(
"gmm_machine"
,
":py:class:`bob.learn.em.GMMMachine`"
,
"GMMMachine Object"
)
.
add_parameter
(
"data"
,
"array_like <float, 2D>"
,
"Ignored."
);
static
PyObject
*
PyBobLearnEMMLGMMTrainer_initialize
(
PyBobLearnEMMLGMMTrainerObject
*
self
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
BOB_TRY
/* Parses input arguments in a single shot */
char
**
kwlist
=
initialize
.
kwlist
(
0
);
PyBobLearnEMGMMMachineObject
*
gmm_machine
=
0
;
PyBlitzArrayObject
*
data
=
0
;
if
(
!
PyArg_ParseTupleAndKeywords
(
args
,
kwargs
,
"O!|O&"
,
kwlist
,
&
PyBobLearnEMGMMMachine_Type
,
&
gmm_machine
,
&
PyBlitzArray_Converter
,
&
data
))
return
0
;
auto
data_
=
make_safe
(
data
);
if
(
!
PyArg_ParseTupleAndKeywords
(
args
,
kwargs
,
"O!"
,
kwlist
,
&
PyBobLearnEMGMMMachine_Type
,
&
gmm_machine
)){
PyErr_Format
(
PyExc_RuntimeError
,
"%s.%s. Was not possible to read :py:class:`bob.learn.em.GMMMachine`"
,
Py_TYPE
(
self
)
->
tp_name
,
initialize
.
name
());
return
0
;
}
self
->
cxx
->
initialize
(
*
gmm_machine
->
cxx
);
BOB_CATCH_MEMBER
(
"cannot perform the initialize method"
,
0
)
...
...
@@ -222,17 +223,22 @@ static auto mStep = bob::extension::FunctionDoc(
true
)
.
add_prototype
(
"gmm_machine"
)
.
add_parameter
(
"gmm_machine"
,
":py:class:`bob.learn.em.GMMMachine`"
,
"GMMMachine Object"
);
.
add_prototype
(
"gmm_machine,data"
)
.
add_parameter
(
"gmm_machine"
,
":py:class:`bob.learn.em.GMMMachine`"
,
"GMMMachine Object"
)
.
add_parameter
(
"data"
,
"array_like <float, 2D>"
,
"Ignored."
);
static
PyObject
*
PyBobLearnEMMLGMMTrainer_mStep
(
PyBobLearnEMMLGMMTrainerObject
*
self
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
BOB_TRY
/* Parses input arguments in a single shot */
char
**
kwlist
=
mStep
.
kwlist
(
0
);
PyBobLearnEMGMMMachineObject
*
gmm_machine
;
if
(
!
PyArg_ParseTupleAndKeywords
(
args
,
kwargs
,
"O!"
,
kwlist
,
&
PyBobLearnEMGMMMachine_Type
,
&
gmm_machine
))
return
0
;
PyBobLearnEMGMMMachineObject
*
gmm_machine
=
0
;
PyBlitzArrayObject
*
data
=
0
;
if
(
!
PyArg_ParseTupleAndKeywords
(
args
,
kwargs
,
"O!|O&"
,
kwlist
,
&
PyBobLearnEMGMMMachine_Type
,
&
gmm_machine
,
&
PyBlitzArray_Converter
,
&
data
))
return
0
;
if
(
data
!=
NULL
)
auto
data_
=
make_safe
(
data
);
self
->
cxx
->
mStep
(
*
gmm_machine
->
cxx
);
...
...
@@ -330,6 +336,6 @@ bool init_BobLearnEMMLGMMTrainer(PyObject* module)
// add the type to the module
Py_INCREF
(
&
PyBobLearnEMMLGMMTrainer_Type
);
return
PyModule_AddObject
(
module
,
"
_
ML_GMMTrainer"
,
(
PyObject
*
)
&
PyBobLearnEMMLGMMTrainer_Type
)
>=
0
;
return
PyModule_AddObject
(
module
,
"ML_GMMTrainer"
,
(
PyObject
*
)
&
PyBobLearnEMMLGMMTrainer_Type
)
>=
0
;
}
bob/learn/em/__MAP_gmm_trainer__.py
View file @
580ba1db
...
...
@@ -11,7 +11,7 @@ import numpy
# define the class
class
MAP_GMMTrainer
(
_MAP_GMMTrainer
):
def
__init__
(
self
,
prior_gmm
,
update_means
=
True
,
update_variances
=
False
,
update_weights
=
False
,
convergence_threshold
=
0.001
,
max_iterations
=
10
,
converge_by_likelihood
=
True
,
**
kwargs
):
def
__init__
(
self
,
prior_gmm
,
update_means
=
True
,
update_variances
=
False
,
update_weights
=
False
,
**
kwargs
):
"""
:py:class:`bob.learn.em.MAP_GMMTrainer` constructor
...
...
@@ -43,57 +43,6 @@ class MAP_GMMTrainer(_MAP_GMMTrainer):
relevance_factor
=
kwargs
.
get
(
'relevance_factor'
)
_MAP_GMMTrainer
.
__init__
(
self
,
prior_gmm
,
relevance_factor
=
relevance_factor
,
update_means
=
update_means
,
update_variances
=
update_variances
,
update_weights
=
update_weights
)
self
.
convergence_threshold
=
convergence_threshold
self
.
max_iterations
=
max_iterations
self
.
converge_by_likelihood
=
converge_by_likelihood
def
train
(
self
,
gmm_machine
,
data
):
"""
Train the :py:class:bob.learn.em.GMMMachine using data
Keyword Parameters:
gmm_machine
The :py:class:bob.learn.em.GMMMachine class
data
The data to be trained
"""
#Initialization
self
.
initialize
(
gmm_machine
);
#Do the Expectation-Maximization algorithm
average_output_previous
=
0
average_output
=
-
numpy
.
inf
;
#eStep
self
.
eStep
(
gmm_machine
,
data
);
if
(
self
.
converge_by_likelihood
):
average_output
=
self
.
compute_likelihood
(
gmm_machine
);
for
i
in
range
(
self
.
max_iterations
):
#saves average output from last iteration
average_output_previous
=
average_output
;
#mStep
self
.
mStep
(
gmm_machine
);
#eStep
self
.
eStep
(
gmm_machine
,
data
);
#Computes log likelihood if required
if
(
self
.
converge_by_likelihood
):
average_output
=
self
.
compute_likelihood
(
gmm_machine
);
#Terminates if converged (and likelihood computation is set)
if
abs
((
average_output_previous
-
average_output
)
/
average_output_previous
)
<=
self
.
convergence_threshold
:
break
# copy the documentation from the base class
__doc__
=
_MAP_GMMTrainer
.
__doc__
bob/learn/em/__ML_gmm_trainer__.py
deleted
100644 → 0
View file @
0e70e3e7
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# Mon Jan 22 18:29:10 2015
#
# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
from
._library
import
_ML_GMMTrainer
import
numpy
# define the class
class
ML_GMMTrainer
(
_ML_GMMTrainer
):
def
__init__
(
self
,
update_means
=
True
,
update_variances
=
False
,
update_weights
=
False
,
convergence_threshold
=
0.001
,
max_iterations
=
10
,
converge_by_likelihood
=
True
):
"""
:py:class:bob.learn.em.ML_GMMTrainer constructor
Keyword Parameters:
update_means
update_variances
update_weights
convergence_threshold
Convergence threshold
max_iterations
Number of maximum iterations
converge_by_likelihood
Tells whether we compute log_likelihood as a convergence criteria, or not
"""
_ML_GMMTrainer
.
__init__
(
self
,
update_means
=
update_means
,
update_variances
=
update_variances
,
update_weights
=
update_weights
)
self
.
convergence_threshold
=
convergence_threshold
self
.
max_iterations
=
max_iterations
self
.
converge_by_likelihood
=
converge_by_likelihood
def
train
(
self
,
gmm_machine
,
data
):
"""
Train the :py:class:bob.learn.em.GMMMachine using data
Keyword Parameters:
gmm_machine
The :py:class:bob.learn.em.GMMMachine class
data
The data to be trained
"""
#Initialization
self
.
initialize
(
gmm_machine
);
#Do the Expectation-Maximization algorithm
average_output_previous
=
0
average_output
=
-
numpy
.
inf
;
#eStep
self
.
eStep
(
gmm_machine
,
data
);
if
(
self
.
converge_by_likelihood
):
average_output
=
self
.
compute_likelihood
(
gmm_machine
);
for
i
in
range
(
self
.
max_iterations
):
#saves average output from last iteration
average_output_previous
=
average_output
;
#mStep
self
.
mStep
(
gmm_machine
);
#eStep
self
.
eStep
(
gmm_machine
,
data
);
#Computes log likelihood if required
if
(
self
.
converge_by_likelihood
):
average_output
=
self
.
compute_likelihood
(
gmm_machine
);
#Terminates if converged (and likelihood computation is set)
if
abs
((
average_output_previous
-
average_output
)
/
average_output_previous
)
<=
self
.
convergence_threshold
:
break
# copy the documentation from the base class
__doc__
=
_ML_GMMTrainer
.
__doc__
bob/learn/em/__empca_trainer__.py
deleted
100644 → 0
View file @
0e70e3e7
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# Wed Fev 04 13:35:10 2015 +0200
#
# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
from
._library
import
_EMPCATrainer
import
numpy
# define the class
class
EMPCATrainer
(
_EMPCATrainer
):
def
__init__
(
self
,
convergence_threshold
=
0.001
,
max_iterations
=
10
,
compute_likelihood
=
True
):
"""
:py:class:`bob.learn.em.EMPCATrainer` constructor
Keyword Parameters:
convergence_threshold
Convergence threshold
max_iterations
Number of maximum iterations
compute_likelihood
"""
_EMPCATrainer
.
__init__
(
self
,
convergence_threshold
)
self
.
_max_iterations
=
max_iterations
self
.
_compute_likelihood
=
compute_likelihood
def
train
(
self
,
linear_machine
,
data
):
"""
Train the :py:class:bob.learn.em.LinearMachine using data
Keyword Parameters:
linear_machine
The :py:class:bob.learn.em.LinearMachine class
data
The data to be trained
"""
#Initialization
self
.
initialize
(
linear_machine
,
data
);
#Do the Expectation-Maximization algorithm
average_output_previous
=
0
average_output
=
-
numpy
.
inf
;
#eStep
self
.
eStep
(
linear_machine
,
data
);
if
(
self
.
_compute_likelihood
):
average_output
=
self
.
compute_likelihood
(
linear_machine
);
for
i
in
range
(
self
.
_max_iterations
):
#saves average output from last iteration
average_output_previous
=
average_output
;
#mStep
self
.
mStep
(
linear_machine
);
#eStep
self
.
eStep
(
linear_machine
,
data
);
#Computes log likelihood if required
if
(
self
.
_compute_likelihood
):
average_output
=
self
.
compute_likelihood
(
linear_machine
);
#Terminates if converged (and likelihood computation is set)
if
abs
((
average_output_previous
-
average_output
)
/
average_output_previous
)
<=
self
.
_convergence_threshold
:
break
# copy the documentation from the base class
__doc__
=
_EMPCATrainer
.
__doc__
bob/learn/em/__init__.py
View file @
580ba1db
...
...
@@ -11,14 +11,8 @@ bob.extension.load_bob_library('bob.learn.em', __file__)
from
._library
import
*
from
.
import
version
from
.version
import
module
as
__version__
from
.__kmeans_trainer__
import
*
from
.__ML_gmm_trainer__
import
*
from
.__MAP_gmm_trainer__
import
*
from
.__jfa_trainer__
import
*
from
.__isv_trainer__
import
*
from
.__ivector_trainer__
import
*
from
.__plda_trainer__
import
*
from
train
import
*
def
ztnorm_same_value
(
vect_a
,
vect_b
):
"""Computes the matrix of boolean D for the ZT-norm, which indicates where
...
...
bob/learn/em/__isv_trainer__.py
deleted
100644 → 0
View file @
0e70e3e7
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# Mon Fev 02 21:40:10 2015 +0200
#
# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
from
._library
import
_ISVTrainer
import
numpy
# define the class
class
ISVTrainer
(
_ISVTrainer
):
def
__init__
(
self
,
max_iterations
=
10
,
relevance_factor
=
4.
):
"""
:py:class:`bob.learn.em.ISVTrainer` constructor
Keyword Parameters:
max_iterations
Number of maximum iterations
"""
_ISVTrainer
.
__init__
(
self
,
relevance_factor
)
self
.
_max_iterations
=
max_iterations
def
train
(
self
,
isv_base
,
data
):
"""
Train the :py:class:`bob.learn.em.ISVBase` using data
Keyword Parameters:
jfa_base
The `:py:class:bob.learn.em.ISVBase` class
data
The data to be trained
"""
#Initialization
self
.
initialize
(
isv_base
,
data
);
for
i
in
range
(
self
.
_max_iterations
):
#eStep
self
.
eStep
(
isv_base
,
data
);
#mStep
self
.
mStep
(
isv_base
);
# copy the documentation from the base class
__doc__
=
_ISVTrainer
.
__doc__
bob/learn/em/__ivector_trainer__.py
deleted
100644 → 0
View file @
0e70e3e7
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# Tue Fev 03 13:20:10 2015 +0200
#
# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
from
._library
import
_IVectorTrainer
import
numpy
# define the class
class
IVectorTrainer
(
_IVectorTrainer
):
def
__init__
(
self
,
max_iterations
=
10
,
update_sigma
=
False
):
"""
:py:class:`bob.learn.em.IVectorTrainer` constructor
Keyword Parameters:
max_iterations
Number of maximum iterations
update_sigma
"""
_IVectorTrainer
.
__init__
(
self
,
update_sigma
)
self
.
_max_iterations
=
max_iterations
def
train
(
self
,
ivector_machine
,
data
):
"""
Train the :py:class:`bob.learn.em.IVectorMachine` using data
Keyword Parameters:
ivector_machine
The `:py:class:bob.learn.em.IVectorMachine` class
data
The data to be trained
"""
#Initialization
self
.
initialize
(
ivector_machine
);
for
i
in
range
(
self
.
_max_iterations
):
#eStep
self
.
eStep
(
ivector_machine
,
data
);
#mStep
self
.
mStep
(
ivector_machine
);
# copy the documentation from the base class
__doc__
=
_IVectorTrainer
.
__doc__
bob/learn/em/__jfa_trainer__.py
deleted
100644 → 0
View file @
0e70e3e7
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# Sun Fev 01 21:10:10 2015 +0200
#
# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
from
._library
import
_JFATrainer
import
numpy
# define the class
class
JFATrainer
(
_JFATrainer
):
def
__init__
(
self
,
max_iterations
=
10
):
"""
:py:class:`bob.learn.em.JFATrainer` constructor
Keyword Parameters:
max_iterations
Number of maximum iterations
"""
_JFATrainer
.
__init__
(
self
)
self
.
_max_iterations
=
max_iterations
def
train_loop
(
self
,
jfa_base
,
data
):
"""
Train the :py:class:`bob.learn.em.JFABase` using data
Keyword Parameters:
jfa_base
The `:py:class:bob.learn.em.JFABase` class
data
The data to be trained
"""
#V Subspace
for
i
in
range
(
self
.
_max_iterations
):
self
.
e_step1
(
jfa_base
,
data
)
self
.
m_step1
(
jfa_base
,
data
)
self
.
finalize1
(
jfa_base
,
data
)
#U subspace
for
i
in
range
(
self
.
_max_iterations
):
self
.
e_step2
(
jfa_base
,
data
)
self
.
m_step2
(
jfa_base
,
data
)
self
.
finalize2
(
jfa_base
,
data
)
# d subspace
for
i
in
range
(
self
.
_max_iterations
):
self
.
e_step3
(
jfa_base
,
data
)
self
.
m_step3
(
jfa_base
,
data
)
self
.
finalize3
(
jfa_base
,
data
)
def
train
(
self
,
jfa_base
,
data
):
"""
Train the :py:class:`bob.learn.em.JFABase` using data
Keyword Parameters:
jfa_base
The `:py:class:bob.learn.em.JFABase` class
data
The data to be trained
"""
self
.
initialize
(
jfa_base
,
data
)
self
.
train_loop
(
jfa_base
,
data
)
# copy the documentation from the base class
__doc__
=
_JFATrainer
.
__doc__
bob/learn/em/__kmeans_trainer__.py
deleted
100644 → 0
View file @
0e70e3e7
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# Mon Jan 19 11:35:10 2015 +0200
#
# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
from
._library
import
_KMeansTrainer
import
numpy
# define the class
class
KMeansTrainer
(
_KMeansTrainer
):
def
__init__
(
self
,
initialization_method
=
"RANDOM"
,
convergence_threshold
=
0.001
,
max_iterations
=
10
,
converge_by_average_min_distance
=
True
):
"""
:py:class:`bob.learn.em.KMeansTrainer` constructor
Keyword Parameters:
initialization_method
The initialization method to generate the initial means
convergence_threshold
Convergence threshold
max_iterations
Number of maximum iterations
converge_by_average_min_distance
Tells whether we compute the average min (square Euclidean) distance, as a convergence criteria, or not
"""
_KMeansTrainer
.
__init__
(
self
,
initialization_method
=
"RANDOM"
,
)
self
.
_convergence_threshold
=
convergence_threshold
self
.
_max_iterations
=
max_iterations
self
.
_converge_by_average_min_distance
=
converge_by_average_min_distance
def
train
(
self
,
kmeans_machine
,
data
):
"""
Train the :py:class:bob.learn.em.KMeansMachine using data
Keyword Parameters:
kmeans_machine
The :py:class:bob.learn.em.KMeansMachine class
data
The data to be trained
"""
#Initialization
self
.
initialize
(
kmeans_machine
,
data
);
#Do the Expectation-Maximization algorithm
average_output_previous
=
0
average_output
=
-
numpy
.
inf
;
#eStep
self
.
eStep
(
kmeans_machine
,
data
);
if
(
self
.
_converge_by_average_min_distance
):
average_output
=
self
.
compute_likelihood
(
kmeans_machine
);
for
i
in
range
(
self
.
_max_iterations
):
#saves average output from last iteration
average_output_previous
=
average_output
;
#mStep
self
.
mStep
(
kmeans_machine
);
#eStep
self
.
eStep
(
kmeans_machine
,
data
);
#Computes log likelihood if required
if
(
self
.
_converge_by_average_min_distance
):
average_output
=
self
.
compute_likelihood
(
kmeans_machine
);
#Terminates if converged (and likelihood computation is set)
if
abs
((
average_output_previous
-
average_output
)
/
average_output_previous
)
<=
self
.
_convergence_threshold
:
break
# copy the documentation from the base class
__doc__
=
_KMeansTrainer
.
__doc__
bob/learn/em/__plda_trainer__.py
deleted
100644 → 0
View file @
0e70e3e7
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# Mon Fev 02 21:40:10 2015 +0200
#
# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
from
._library
import
_PLDATrainer
import
numpy
# define the class
class
PLDATrainer
(
_PLDATrainer
):
def
__init__
(
self
,
max_iterations
=
10
,
use_sum_second_order
=
False
):
"""
:py:class:`bob.learn.em.PLDATrainer` constructor
Keyword Parameters:
max_iterations
Number of maximum iterations
"""
_PLDATrainer
.
__init__
(
self
,
use_sum_second_order
)
self
.
_max_iterations
=
max_iterations
def
train
(
self
,
plda_base
,
data
):
"""
Train the :py:class:`bob.learn.em.PLDABase` using data
Keyword Parameters:
jfa_base
The `:py:class:bob.learn.em.PLDABase` class
data
The data to be trained
"""
#Initialization
self
.
initialize
(
plda_base
,
data
);
for
i
in
range
(
self
.
_max_iterations
):
#eStep
self
.
e_step
(
plda_base
,
data
);
#mStep
self
.
m_step
(
plda_base
,
data
);
self
.
finalize
(
plda_base
,
data
);