Commit acac1643 authored by Manuel Günther's avatar Manuel Günther
Browse files

Reworked xbob.boosting:\n- Rewrote bindings using Python C-API\n- Improved...

Reworked xbob.boosting:\n- Rewrote bindings using Python C-API\n- Improved documentation\n- Implemented more tests\n- Put machines in xbob.boosting.machine sub-module.
parent ce6b05b1
......@@ -72,7 +72,7 @@ To control the experimentation, you can choose:
For information and debugging purposes, it might be interesting to use:
* ``--verbose`` (can be used several times): Increases the verbosity level from 0 (error) over 1 (warning) and 2 (info) to 3 (debug). Verbosity level 2 (``-vv``) is recommended.
* ``number-of-elements``: Reduce the number of elements per class (digit) to the given value.
* ``--number-of-elements``: Reduce the number of elements per class (digit) to the given value.
Four different kinds of experiments can be performed:
......
......@@ -5,14 +5,29 @@
[buildout]
parts = scripts
eggs = xbob.boosting
xbob.extension
xbob.blitz
xbob.io
newest = false
verbose = true
;debug = true
; debug = true
extensions = xbob.buildout
develop = .
mr.developer
auto-checkout = *
develop = src/xbob.extension
src/xbob.blitz
src/xbob.io
.
; prefixes = /idiap/user/mguenther/Bob/release
[sources]
xbob.extension = git https://github.com/bioidiap/xbob.extension branch=prototype
xbob.blitz = git https://github.com/bioidiap/xbob.blitz
xbob.io = git https://github.com/bioidiap/xbob.io
[scripts]
recipe = xbob.buildout:scripts
......
......@@ -4,20 +4,21 @@
# Mon 13 Aug 2012 12:38:15 CEST
#
# Copyright (C) 2011-2012 Idiap Research Institute, Martigny, Switzerland
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, os
import pkg_resources
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
......@@ -56,7 +57,7 @@ dvipng_osx = '/opt/local/libexec/texlive/binaries/dvipng'
if os.path.exists(dvipng_osx): pngmath_dvipng = dvipng_osx
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
......@@ -68,10 +69,12 @@ source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
project = u'Bob Project Example'
project = u'Boosting extension for Bob'
import time
copyright = u'%s, Idiap Research Institute' % time.strftime('%Y')
distribution = pkg_resources.require('xbob.boosting')[0]
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
......@@ -150,7 +153,7 @@ html_favicon = ''
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
......@@ -247,7 +250,7 @@ man_pages = [
]
# We want to remove all private (i.e. _. or __.__) members
# We want to remove all private (i.e. _. or __.__) members
# that are not in the list of accepted functions
accepted_private_functions = ['__call__']
......@@ -262,10 +265,10 @@ def member_function_test(app, what, name, obj, skip, options):
# test if the method is documented
if not hasattr(obj, '__doc__') or not obj.__doc__:
return True
# Skips selected members in auto-generated documentation. Unfortunately, old
# versions of Boost.Python will not generate a __self__ member for static
# methods and that screws-up Sphinx processing.
# methods and that screws-up Sphinx processing.
if sphinx.__version__ < "1.0":
# We have to remove objects that do not have a __self__ attribute set
import types
......@@ -275,11 +278,11 @@ def member_function_test(app, what, name, obj, skip, options):
return True
return False
# Default processing flags for sphinx
autoclass_content = 'both'
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members', 'undoc-members', 'special-members', 'inherited-members', 'show-inheritance']
autodoc_default_flags = ['members', 'undoc-members', 'private-members', 'special-members', 'inherited-members', 'show-inheritance']
def setup(app):
app.connect('autodoc-skip-member', member_function_test)
.. vim: set fileencoding=utf-8 :
.. Manuel Guenther <Manuel.Guenther@idiap.ch>
.. Thu May 1 19:08:03 CEST 2014
..
.. Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
.. testsetup:: *
import os
import numpy
import xbob.boosting
import xbob.db.mnist
numpy.set_printoptions(precision=3, suppress=True)
===========================================
Example: Handwritten Digit Classification
===========================================
As an example for the classification task, we perform a classification of hand-written digits using the `MNIST <http://yann.lecun.com/exdb/mnist>`_ database.
There, images of single hand-written digits are stored, and a training and test set is provided, which we can access with our `xbob.db.mnist <http://pypi.python.org/pypi/xbob.db.mnist>`_ database interface.
In our experiments, we simply use the pixel gray values as features.
Since the gray values are discrete in range :math:`[0, 255]`, we can employ both the stump decision classifiers and the look-up-table's.
Nevertheless, other discrete features, like Local Binary Patterns (LBP) could be used as well.
Running the example script
--------------------------
The script ``./bin/boosting_example.py`` is provided to execute digit classification tasks.
This script has several command line parameters, which vary the behavior of the training and/or testing procedure.
All parameters have a long value (starting with ``--``) and a shortcut (starting with a single ``-``).
These parameters are (see also ``./bin/boosting_example.py --help``):
To control the type of training, you can select:
* ``--trainer-type``: Select the type of weak classifier. Possible values are ``stump`` and ``lut``
* ``--loss-type``: Select the loss function. Possible values are ``tan``, ``log`` and ``exp``. By default, a loss function suitable to the trainer type is selected.
* ``--number-of-boosting-rounds``: The number of weak classifiers to select.
* ``--multi-variate`` (only valid for LUT trainer): Perform multi-variate classification, or binary (one-to-one) classification.
* ``--feature-selection-style`` (only valid for multi-variate training): Select the feature for each output ``independent``ly or ``shared``?
To control the experimentation, you can choose:
* ``--digits``: The digits to classify. For multi-variate training, one classifier is trained for all given digits, while for uni-variate training all possible one-to-one classifiers are trained.
* ``--all``: Select all 10 digits.
* ``--classifier-file``: Save the trained classifier(s) into the given file and/or read the classifier(s) from this file.
* ``--force``: Overwrite the given classifier file if it already exists.
For information and debugging purposes, it might be interesting to use:
* ``--verbose`` (can be used several times): Increases the verbosity level from 0 (error) over 1 (warning) and 2 (info) to 3 (debug). Verbosity level 2 (``-vv``) is recommended.
* ``--number-of-elements``: Reduce the number of elements per class (digit) to the given value.
Four different kinds of experiments can be performed:
1. Uni-variate classification using the stump classifier, classifying digits 5 and 6::
$ ./bin/boosting_example.py -vv --trainer-type stump --digits 5 6
2. Uni-variate classification using the LUT classifier, classifying digits 5 and 6::
$ ./bin/boosting_example.py -vv --trainer-type lut --digits 5 6
3. Multi-variate classification using LUT classifier and shared features, classifying all 10 digits::
$ ./bin/boosting_example.py -vv --trainer-type lut --all-digits --multi-variate --feature-selection-style shared
4. Multi-variate classification using LUT classifier and independent features, classifying all 10 digits::
$ ./bin/boosting_example.py -vv --trainer-type lut --all-digits --multi-variate --feature-selection-style independent
.. note:
During the execution of the experiments, the warning message "L-BFGS returned warning '2': ABNORMAL_TERMINATION_IN_LNSRCH" might appear.
This warning message is normal and does not influence the results much.
.. note:
For experiment 1, the training terminates after 75 of 100 rounds since the computed weight for the weak classifier of that round is vanishing.
Hence, performing more boosting rounds will not change the strong classifier any more.
All experiments should be able to run using several minutes of execution time.
The results of the above experiments should be the following (split in the remaining classification error on the training set, and the error on the test set)
+------------+----------+----------+
| Experiment | Training | Test |
+------------+----------+----------+
| 1 | 91.04 % | 92.05 % |
+------------+----------+----------+
| 2 | 100.0 % | 95.35 % |
+------------+----------+----------+
| 3 | 97.59 % | 83.47 % |
+------------+----------+----------+
| 4 | 99.04 % | 86.25 % |
+------------+----------+----------+
Of course, you can try out different combinations of digits for experiments 1 and 2.
One exemplary test case in details
----------------------------------
Having a closer look into the example script, there are several steps that are performed.
The first step is generating the training examples from the ``xbob.db.mnist`` database interface.
Here, we describe the more complex way, i.e., the multi-variate case.
.. doctest::
>>> # open the database interface (will download the digits from the webpage)
>>> db = xbob.db.mnist.Database()
Downloading the mnist database from http://yann.lecun.com/exdb/mnist/ ...
>>> # get the training data for digits 0, 1
>>> training_samples, training_labels = db.data("train", labels = [0, 1])
>>> # limit the training samples (for test purposes only)
>>> training_samples = training_samples[:100]
>>> training_labels = training_labels[:100]
>>> # create the correct entries for the training targets from the classes; pre-fill with negative class
>>> training_targets = -numpy.ones((training_labels.shape[0], 2))
>>> # set positive class
>>> for i in [0,1]:
... training_targets[training_labels == i, i] = 1
>>> training_labels[:10]
array([0, 1, 1, 1, 1, 0, 1, 1, 0, 0], dtype=uint8)
>>> training_targets[:10]
array([[ 1., -1.],
[-1., 1.],
[-1., 1.],
[-1., 1.],
[-1., 1.],
[ 1., -1.],
[-1., 1.],
[-1., 1.],
[ 1., -1.],
[ 1., -1.]])
Now, we can train the classifier. Here, we use the multi-variate LUT trainer with logit loss:
.. doctest::
>>> weak_trainer = xbob.boosting.trainer.LUTTrainer(
... maximum_feature_value = 256,
... number_of_outputs = 2,
... selection_style = 'independent'
... )
>>> loss_function = xbob.boosting.loss.LogitLoss()
>>> strong_trainer = xbob.boosting.trainer.Boosting(weak_trainer, loss_function)
>>> # perform training for 100 rounds (i.e., select 100 weak machines)
>>> strong_classifier = strong_trainer.train(training_samples.astype(numpy.uint16), training_targets, 10)
Having the strong classifier, we can classify the test samples:
.. doctest::
>>> # get the test data for digits 0, 1
>>> test_samples, test_labels = db.data("test", labels = [0, 1])
>>> # create the correct entries for the test targets from the classes; pre-fill with negative class
>>> test_targets = -numpy.ones((test_labels.shape[0], 2))
>>> # set positive class
>>> for i in [0,1]:
... test_targets[test_labels == i, i] = 1
>>> # classify the test samples
>>> scores = numpy.zeros(test_targets.shape)
>>> classification = numpy.zeros(test_targets.shape)
>>> strong_classifier(test_samples.astype(numpy.uint16), scores, classification)
>>> # evaluate the results
>>> row_sum = numpy.sum(test_targets == classification, 1)
>>> # the example is correctly classified, when all test labels correspond to all target labels
>>> correctly_classified = numpy.sum(row_sum == 2)
>>> correctly_classified
2004
>>> classification.shape[0]
2115
.. vim: set fileencoding=utf-8 :
.. Manuel Guenther <Manuel.Guenther@idiap.ch>
.. Thu May 1 14:44:48 CEST 2014
..
.. Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
=============================
Boosting Strong Classifiers
=============================
Several tasks can be achieved by a boosted classifier:
1. A univariate classification task assigns each sample :math:`\vec x` one of two possible classes: :math:`{+1, -1}`.
In this implementation, class :math:`+1` is assigned when the (real-valued) outcome of the classifier is positive, or :math:`-1` otherwise.
2. A multivariate classification task assigns each sample :math:`\vec x` one of :math:`N` possible classes: :math:`{C_1, C_2, \dots, C_N}.
In this implementation, an :math:`N`-dimensional output vector :math:`\vec y = [y_1, y_2, ... y_n]` is assigned for each class, and the class with the highest outcome is assigned: :math:`C_n` with :math:`n = \arg \max_n y_n`.
To train the multi-variate classifier, target values for each training sample are assigned a :math:`+1` for the correct class, and a :math:`-1` for all other classes.
3. A (multivariate) regression task tries to learn a function :math:`f(\vec x) = \vec y` based on several training examples.
To achieve this goal, a strong classifier :math:`S` is build out of a weighted list of :math:`I` weak classifiers :math:`W_i`:
.. math::
S(\vec x) = \sum_{i=1}^I w_i \cdot W_i(\vec x)
.. note::
For the univariate case, both :math:`w_i` and the weak classifier result :math:`W_i` are floating point values.
In the multivariate case, :math:`w_i` is a vector of weights -- one for each output dimension -- and the weak classifier :math:`W_i` returns a vector of floating point values as well.
Weak Classifiers
----------------
Currently, two types of weak classifiers are implemented in this boosting framework.
Stump classifier
................
The first classifier, which can only handle univariate classification tasks, is the :py:class:`xbob.boosting.machine.StumpMachine`.
For a given input vector :math:`\vec x`, the classifier bases its decision on **a single element** :math:`x_m` of the input vector:
.. math::
W(\vec x) = \left\{ \begin{array}{r@{\text{ if }}l} +1 & (x_m - \theta) * \phi >= 0 \\ -1 & (x_m - \theta) * \phi < 0 \end{array}\right.
Threshold :math:`\theta`, polarity :math:`phi` and index :math:`m` are parameters of the classifier, which are trained using the :py:class:`xbob.boosting.trainer.StumpTrainer`.
For a given training set :math:`\{\vec x_p \mid p=1,\dots,P\}` and according target values :math:`\{t_p \mid p=1,\dots,P\}`, the threshold :math:`\theta_m` is computed for each input index :math:`m`, such that the lowest classification error is obtained, and the :math:`m` with the lowest training classification error is taken.
The polarity :math:`\phi` is set to :math:`-1`, if values lower than the threshold should be considered as positive examples, or to :math:`+1` otherwise.
To compute the classification error for a given :math:`\theta_m`, the gradient of a loss function is taken into consideration.
For the stump trainer, usually the :py:class:`xbob.boosting.loss.ExponentialLoss` is considered as the loss function.
Look-Up-Table classifier
........................
The second classifier, which can handle univariate and multivariate classification and regression tasks, is the :py:class:`xbob.boosting.machine.LUTMachine`.
This classifier is designed to handle input vectors with **discrete** values only.
Again, the decision of the weak classifier is based on a single element of the input vector :math:`\vec x`.
In the univariate case, for each of the possible discrete values of :math:`x_m`, a decision :math:`{+1, -1}` is selected:
.. math::
W(\vec x) = LUT[x_m]
This look-up-table LUT and the feature index :math:`m` is trained by the :py:class:`xbob.boosting.trainer.LUTTrainer`.
In the multivariate case, each output :math:`W^o` is handled independently, i.e., a separate look-up-table :math:`LUT^o` and a separate feature index :math:`m^o` is assigned for each output dimension :math:`o`:
.. math::
W^o(\vec x) = LUT^o[x_{m^o}]
.. note::
As a variant, the feature index :math:`m^o` can be selected to be ``shared`` for all outputs, see :py:class:`xbob.boosting.trainer.LUTTrainer` for details.
A weak look-up-table classifier is learned using the :py:class:`xbob.boosting.trainer.LUTTrainer`.
Strong classifier
-----------------
The strong classifier, which is of type :py:class:`xbob.boosting.machine.BoostedMachine`, is a weighted combination of weak classifiers, which are usually of the same type.
It can be trained with the :py:class:`xbob.boosting.trainer.Boosting` trainer, which takes a list of training samples, and a list of univariate or multivariate target vectors.
In several rounds, the trainer computes (here, only the univariate case is considered, but the multivariate case is similar -- simply replace scores by score vectors.):
1. The classification results (the so-called *scores*) for the current strong classifier:
.. math::
s_p = S(\vec x_p)
2. The derivative :math:`L'` of the loss function, based on the current scores and the target values:
.. math::
\nabla_p = L'(t_p, s_p)
3. This loss gradient is used to select a new weak machine :math:`W_i` using a weak trainer (see above).
.. code-block:: py
W_i = trainer.train([\vec x_p], [\nabla_p])
4. The scores of the *weak machine* are computed:
.. math::
r_p = W_i(\vec x_p)
5. The weight for the new machine is optimized using ``scipy.optimize.fmin_l_bfgs_b``.
This call will use both the loss :math:`L` and its derivative :math:`L'` to compute the optimal weight for the new classifier:
.. code-block:: py
w_i = scipy.optimize.fmin_l_bfgs_b(...)
6. The new weak machine is added to the strong classifier.
Loss functions
--------------
As shown above, the loss functions define, how well the currently predicted scores :math:`s_p` fit to the target values :math:`t_p`.
Depending on the desired task, and on the type of classifier, different loss functions might be used:
1. The :py:class:`xbob.boosting.loss.ExponentialLoss` can be used for the binary classification task, i.e., when target values are in :math:`{+1, -1}`
2. The :py:class:`xbob.boosting.loss.LogitLoss` can be used for the multi-variate classification task, i.e., when target vectors have entries from :math:`{+1, 0}`
3. The :py:class:`xbob.boosting.loss.JesorskyLoss` can be used for the particular multi-variate regression task of learning the locations of facial features.
Other loss functions, e.g., using the Euclidean distance for regression, should be easily implementable.
.. vim: set fileencoding=utf-8 :
.. Andre Anjos <andre.anjos@idiap.ch>
.. Mon 13 Aug 2012 12:36:40 CEST
.. Mon 25 Nov 09:43:43 2013 CET
..
.. Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
=====================
Bob Project Example
=====================
=============================
Bob's extension to boosting
=============================
.. todolist::
The package implements a generalized boosting framework, which incorporates different boosting approaches.
The implementation is a mix of pure Python code and C++ implementations of identified bottle-necks, including their python bindings.
The Boosting algorithms implemented in this package are:
1) Gradient Boost [Fri00]_ (generalized version of Adaboost [FS99]_) for univariate cases using stump decision classifiers, as in [VJ04]_.
2) TaylorBoost [SMV11]_ for univariate and multivariate cases using Look-Up-Table based classifiers [Ata12]_
.. [Fri00] *Jerome H. Friedman*. **Greedy function approximation: a gradient boosting machine**. Annals of Statistics, 29:1189--1232, 2000.
.. [FS99] *Yoav Freund and Robert E. Schapire*. **A short introduction to boosting**. Journal of Japanese Society for Artificial Intelligence, 14(5):771-780, September, 1999.
.. [VJ04] *Paul Viola and Michael J. Jones*. **Robust real-time face detection**. International Journal of Computer Vision (IJCV), 57(2): 137--154, 2004.
.. [SMV11] *Mohammad J. Saberian, Hamed Masnadi-Shirazi, Nuno Vasconcelos*. **TaylorBoost: First and second-order boosting algorithms with explicit margin control**. IEEE Conference on Conference on Computer Vision and Pattern Recognition (CVPR), 2929--2934, 2011.
.. [Ata12] *Cosmin Atanasoaei*. **Multivariate boosting with look-up tables for face processing**. PhD Thesis, École Polytechnique Fédérale de Lausanne (EPFL), Switzerland, 2012.
Documentation
-------------
.. toctree::
:maxdepth: 2
guide
example
py_api
Indices and tables
------------------
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
Script Documentation
--------------------
.. automodule:: example.script.version
============
Python API
============
This section includes information for using the Python API of ``xbob.boosting``.
Machines
........
The :py:mod:`xbob.boosting.machine` sub-module contains classifiers that can predict classes for given input values.
The strong classifier is the :py:class:`xbob.boosting.machine.BoostedMachine`, which is a weighted combination of :py:class:`xbob.boosting.machine.WeakMachine`.
Weak machines might be a :py:class:`xbob.boosting.machine.LUTMachine` or a :py:class:`xbob.boosting.machine.StumpMachine`.
Theoretically, the strong classifier can consist of different types of weak classifiers, but usually all weak classifiers have the same type.
.. automodule:: xbob.boosting.machine
Trainers
........
The :py:mod:`xbob.boosting.trainer` sub-module contains trainers that trains:
* :py:class:`xbob.boosting.trainer.Boosting` : a strong machine of type :py:class:`xbob.boosting.machine.BoostedMachine`
* :py:class:`xbob.boosting.trainer.LUTTrainer` : a weak machine of type :py:class:`xbob.boosting.machine.LUTMachine`
* :py:class:`xbob.boosting.trainer.StrumTrainer` : a weak machine of type :py:class:`xbob.boosting.machine.StumpMachine`
.. automodule:: xbob.boosting.trainer
Loss functions
..............
Loss functions are used to define new weights for the weak machines using the ``scipy.optimize.fmin_l_bfgs_b`` function.
A base class loss function :py:class:`xbob.boosting.loss.LossFunction` is called by that function, and derived classes implement the actual loss for a single sample.
.. note::
Loss functions are designed to be used in combination with a specific weak trainer in specific cases.
Not all combinations of loss functions and weak trainers make sense.
Here is a list of useful combinations:
1. :py:class:`xbob.boosting.loss.ExponentialLoss` with :py:class:`xbob.boosting.trainer.StrumTrainer` (uni-variate classification only)
2. :py:class:`xbob.boosting.loss.LogitLoss` with :py:class:`xbob.boosting.trainer.StrumTrainer` or :py:class:`xbob.boosting.trainer.LUTTrainer` (uni-variate or multi-variate classification)
3. :py:class:`xbob.boosting.loss.TangentialLoss` with :py:class:`xbob.boosting.trainer.StrumTrainer` or :py:class:`xbob.boosting.trainer.LUTTrainer` (uni-variate or multi-variate classification)
4. :py:class:`xbob.boosting.loss.JesorskyLoss` with :py:class:`xbob.boosting.trainer.LUTTrainer` (multi-variate regression only)
.. automodule:: xbob.boosting.loss
......@@ -20,8 +20,15 @@
# administrative interventions.
from setuptools import setup, find_packages, dist
dist.Distribution(dict(setup_requires='xbob.extension'))
from xbob.extension import Extension, build_ext
dist.Distribution(dict(setup_requires=['xbob.blitz', 'xbob.io', 'xbob.extension']))
from xbob.blitz.extension import Extension
import xbob.io
import os
include_dirs = [
xbob.blitz.get_include(),
xbob.io.get_include(),
]
# The only thing we do in this file is to call the setup() function with all
# parameters that define our package.
......@@ -30,7 +37,7 @@ setup(
# This is the basic information about your project. Modify all this
# information before releasing code publicly.
name='xbob.boosting',
version='1.0.1a0',
version='1.1.0a0',
description='Boosting framework for Bob',
url='https://gitlab.idiap.ch/biometric/xbob-boosting',
......@@ -46,10 +53,6 @@ setup(
packages=find_packages(),
include_package_data=True,
setup_requires=[
'xbob.extension',
],
# This line defines which packages should be installed when you "install"
# this package. All packages that are mentioned here, but are not installed
# on the current system will be installed locally and only visible to the
......@@ -57,31 +60,31 @@ setup(
# privileges when using buildout.
install_requires=[
'setuptools',
'bob', # base signal proc./machine learning library
'xbob.extension',
'xbob.blitz',
'xbob.io',
'xbob.db.mnist' # for testing and the example
],
# Set up the C++ compiler to compile the C++ source code of this package
cmdclass={
'build_ext': build_ext,
},
ext_modules = [
Extension(
'xbob.boosting._boosting',
'xbob.boosting._library',
[
"xbob/boosting/cpp/LossFunction.cpp",
"xbob/boosting/cpp/JesorskyLoss.cpp",
"xbob/boosting/cpp/WeakMachine.cpp",
"xbob/boosting/cpp/StumpMachine.cpp",
"xbob/boosting/cpp/LUTMachine.cpp",
"xbob/boosting/cpp/BoostedMachine.cpp",
"xbob/boosting/cpp/LUTTrainer.cpp",
"xbob/boosting/cpp/LossFunction.cpp",
"xbob/boosting/cpp/JesorskyLoss.cpp",
"xbob/boosting/cpp/bindings.cpp",
],
pkgconfig = [
'bob-io',
"xbob/boosting/cpp/Bindings.cpp",
],
)
include_dirs = include_dirs,
packages = ['bob-io'],
),
],
# Declare that the package is in the namespace xbob
......
# import the C++ stuff
from ._boosting import StumpMachine, LUTMachine, BoostedMachine
#from ._boosting_old import StumpMachine, LUTMachine, BoostedMachine
from . import trainer
from . import loss
from . import machine
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
#ifdef NO_IMPORT_ARRAY
#undef NO_IMPORT_ARRAY
#endif // NO_IMPORT_ARRAY
#include "Bindings.h"