Commit 36851b4d authored by Manuel Günther's avatar Manuel Günther
Browse files

Updated tests; added version and .travis.yml.

parent 1d014bcf
language: python
matrix:
include:
- python: 2.6
- python: 2.7
env:
- secure: lzPxGD45F6DRm108SBxkcsnM+zVH7p59/s34WVc6ZVlRI792xajoTJBC7pE087W01HPiofkVigqjCbsZvgDI9JggPgtOhE9Ugifzpm1vXRTZOlBXDx3fTsH/FxcHfWYRx8M3rnONgdNoyeBvw8mz+TKm6zCtNdZ+0IZEXSIDvhU=
- secure: ZgUPtwmsOIGnb4aevKHxm2YqTRsKKt+2MAcsgqhG8ClD4OOEUV7nyo2tVZt3RcoURjZGoCaLfWYI4MkzfwD/m1GjA1BcEi5DeLUEYvEIv3N69+eTldZBHCONL3heLbrmNHBLP0tyxHV9eSd2B1qsknn4ndyGXJm6Llu9J8Frv8E=
- python: 3.2
env:
- NUMPYSPEC===1.8.0
- python: 3.3
env:
- NUMPYSPEC===1.8.0
before_install:
- sudo add-apt-repository -y ppa:biometrics/bob
- sudo apt-get update -qq
- sudo apt-get install -qq --force-yes libboost-all-dev libblitz1-dev libhdf5-serial-dev
- if [ -n "${NUMPYSPEC}" ]; then sudo apt-get install -qq libatlas-dev libatlas-base-dev liblapack-dev gfortran; fi
- if [ -n "${NUMPYSPEC}" ]; then pip install --upgrade pip setuptools; fi
- if [ -n "${NUMPYSPEC}" ]; then pip install --find-links http://wheels.astropy.org/ --find-links http://wheels2.astropy.org/ --use-wheel numpy$NUMPYSPEC sphinx nose; fi
- pip install cpp-coveralls
install:
- python bootstrap.py
- CFLAGS=-coverage ./bin/buildout
script:
- ./bin/python -c 'from bob.learn.boosting import get_config; print(get_config())'
- ./bin/coverage run --source=bob.learn.boosting ./bin/nosetests -sv
- ./bin/sphinx-build -b doctest doc sphinx
- ./bin/sphinx-build -b html doc sphinx
after_success:
- coveralls --build-root=`pwd` --exclude=src
- ./src/bob.extension/scripts/upload-sphinx.sh
from ._library import BoostedMachine
import numpy
import scipy.optimize
import logging
......
from ._library import StumpMachine
import numpy
class StumpTrainer():
......
......@@ -9,6 +9,28 @@ from .loss import *
from .trainer import *
from .machine import *
from ._library import weighted_histogram
def get_config():
"""Returns a string containing the configuration information.
"""
import pkg_resources
from .version import externals
packages = pkg_resources.require(__name__)
this = packages[0]
deps = packages[1:]
retval = "%s: %s [api=0x%04x] (%s)\n" % (this.key, this.version,
version.api, this.location)
retval += " - c/c++ dependencies:\n"
for k in sorted(externals): retval += " - %s: %s\n" % (k, externals[k])
retval += " - python dependencies:\n"
for d in deps: retval += " - %s: %s (%s)\n" % (d.key, d.version, d.location)
return retval.strip()
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
......@@ -14,22 +14,22 @@ import numpy
import argparse
import os
import bob
import xbob.db.mnist
import xbob.boosting
import bob.io.base
import bob.learn.boosting
import bob.learn.boosting.utils
import logging
logger = logging.getLogger('bob')
TRAINER = {
'stump' : xbob.boosting.trainer.StumpTrainer,
'lut' : xbob.boosting.trainer.LUTTrainer,
'stump' : bob.learn.boosting.StumpTrainer,
'lut' : bob.learn.boosting.LUTTrainer,
}
LOSS = {
'exp' : xbob.boosting.loss.ExponentialLoss,
'log' : xbob.boosting.loss.LogitLoss,
'tan' : xbob.boosting.loss.TangentialLoss,
'exp' : bob.learn.boosting.ExponentialLoss,
'log' : bob.learn.boosting.LogitLoss,
'tan' : bob.learn.boosting.TangentialLoss,
}
def command_line_arguments(command_line_options):
......@@ -131,7 +131,7 @@ def main(command_line_options = None):
# open connection to the MNIST database
if not os.path.isdir(args.database_directory):
os.makedirs(args.database_directory)
db = xbob.db.mnist.Database(args.database_directory)
db = bob.learn.boosting.utils.MNIST()
# perform training, if desired
if args.force and os.path.exists(args.classifier_file):
......@@ -143,9 +143,9 @@ def main(command_line_options = None):
# get weak trainer according to command line options
if args.trainer_type == 'stump':
weak_trainer = xbob.boosting.trainer.StumpTrainer()
weak_trainer = bob.learn.boosting.StumpTrainer()
elif args.trainer_type == 'lut':
weak_trainer = xbob.boosting.trainer.LUTTrainer(
weak_trainer = bob.learn.boosting.LUTTrainer(
256,
list(training_data.values())[0][1].shape[1] if args.multi_variate else 1,
args.feature_selection_style
......@@ -154,7 +154,7 @@ def main(command_line_options = None):
loss_function = LOSS[args.loss_type]()
# create strong trainer
trainer = xbob.boosting.trainer.Boosting(weak_trainer, loss_function)
trainer = bob.learn.boosting.Boosting(weak_trainer, loss_function)
strong_classifiers = {}
for key in sorted(training_data.keys()):
......@@ -170,7 +170,7 @@ def main(command_line_options = None):
# write strong classifier to file
if args.classifier_file is not None:
hdf5 = bob.io.HDF5File(args.classifier_file, 'a')
hdf5 = bob.io.base.HDF5File(args.classifier_file, 'a')
hdf5.create_group(key)
hdf5.cd(key)
strong_classifier.save(hdf5)
......@@ -188,10 +188,10 @@ def main(command_line_options = None):
else:
# read strong classifier from file
strong_classifiers = {}
hdf5 = bob.io.HDF5File(args.classifier_file, 'r')
hdf5 = bob.io.base.HDF5File(args.classifier_file, 'r')
for key in hdf5.sub_groups(relative=True, recursive=False):
hdf5.cd(key)
strong_classifiers[key] = xbob.boosting.BoostedMachine(hdf5)
strong_classifiers[key] = bob.learn.boosting.BoostedMachine(hdf5)
hdf5.cd("..")
logger.info("Reading test data")
......
/**
* @author Manuel Guenther <manuel.guenther@idiap.ch>
* @date Wed Aug 27 19:24:51 CEST 2014
*
* @brief General directives for all modules in bob.learn.linear
*/
#ifndef BOB_LEARN_BOOSTING_CONFIG_H
#define BOB_LEARN_BOOSTING_CONFIG_H
/* Macros that define versions and important names */
#define BOB_LEARN_BOOSTING_API_VERSION 0x0200
#endif /* BOB_LEARN_BOOSTING_CONFIG_H */
......@@ -2,6 +2,7 @@ from . import LossFunction # Just to get the documentation for it
from .ExponentialLoss import ExponentialLoss
from .LogitLoss import LogitLoss
from .TangentialLoss import TangentialLoss
from ._library import JesorskyLoss
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
......@@ -3,6 +3,64 @@
#endif // NO_IMPORT_ARRAY
#include "main.h"
#include <bob.learn.boosting/Functions.h>
auto weighted_histogram_doc = bob::extension::FunctionDoc(
"weighted_histogram",
"Computes a weighted histogram from the given features."
)
.add_prototype("features, weights, histogram")
.add_parameter("features", "array_like <1D, uint16>", "The vector of features to compute a histogram for")
.add_parameter("weights", "array_like <1D, float>", "The vector of weights; must be of the same size as the features")
.add_parameter("histogram", "array_like <1D, float>", "The histogram that will be filled")
;
PyObject* weighted_histogram(PyObject* args, PyObject* kwargs){
char* kwlist[] = {c("features"), c("weights"), c("histogram"), NULL};
PyBlitzArrayObject* features,* weights,* histogram;
if (!PyArg_ParseTupleAndKeywords(
args, kwargs,
"O&O&O&", kwlist, &PyBlitzArray_Converter, &features, &PyBlitzArray_Converter, &weights, &PyBlitzArray_OutputConverter, &histogram
)){
return NULL;
}
auto _1 = make_safe(features), _2 = make_safe(weights), _3 = make_safe(histogram);
// tests
if (features->type_num != NPY_UINT16 || features->ndim != 1){
PyErr_Format(PyExc_RuntimeError, "weighted_histogram: features parameter must be 1D of numpy.uint16");
return NULL;
}
if (weights->type_num != NPY_FLOAT16 || weights->ndim != 1){
PyErr_Format(PyExc_RuntimeError, "weighted_histogram: weights parameter must be 1D of numpy.float64");
return NULL;
}
if (histogram->type_num != NPY_FLOAT16 || histogram->ndim != 1){
PyErr_Format(PyExc_RuntimeError, "weighted_histogram: histogram parameter must be 1D of numpy.float64");
return NULL;
}
bob::learn::boosting::weighted_histogram(
*PyBlitzArrayCxx_AsBlitz<uint16_t,1>(features),
*PyBlitzArrayCxx_AsBlitz<double,1>(weights),
*PyBlitzArrayCxx_AsBlitz<double,1>(histogram)
);
Py_RETURN_NONE;
}
static PyMethodDef BoostingMethods[] = {
{
weighted_histogram_doc.name(),
&weighted_histogram,
METH_VARARGS | METH_KEYWORDS,
weighted_histogram_doc.doc()
},
{NULL}
};
static const char* const module_docstr = "C++ implementations for several classes and functions in the bob.boosting module";
......@@ -12,6 +70,7 @@ static PyModuleDef module_definition = {
BOB_EXT_MODULE_NAME,
module_docstr,
-1,
// BoostingMethods,
0,
};
#endif
......@@ -24,7 +83,7 @@ create_module(void)
# if PY_VERSION_HEX >= 0x03000000
PyObject* module = PyModule_Create(&module_definition);
# else
PyObject* module = Py_InitModule3(BOB_EXT_MODULE_NAME, NULL, module_docstr);
PyObject* module = Py_InitModule3(BOB_EXT_MODULE_NAME, BoostingMethods, module_docstr);
# endif
if (!module) return NULL;
......
import unittest
import xbob.boosting
import bob.learn.boosting
import numpy
import bob
import xbob.db.mnist
import bob.learn.boosting.utils
class TestBoosting(unittest.TestCase):
"""Class to test the LUT trainer """
......@@ -11,7 +11,7 @@ class TestBoosting(unittest.TestCase):
@classmethod
def setUpClass(self):
# create a single copy of the MNIST database to avoid downloading the packages several times
self.database = xbob.db.mnist.Database()
self.database = bob.learn.boosting.utils.MNIST()
@classmethod
def tearDownClass(self):
......@@ -45,9 +45,9 @@ class TestBoosting(unittest.TestCase):
aligned = self._align_uni(targets)
# for stump trainers, the exponential loss function is preferred
loss_function = xbob.boosting.loss.ExponentialLoss()
weak_trainer = xbob.boosting.trainer.StumpTrainer()
booster = xbob.boosting.trainer.Boosting(weak_trainer, loss_function)
loss_function = bob.learn.boosting.ExponentialLoss()
weak_trainer = bob.learn.boosting.StumpTrainer()
booster = bob.learn.boosting.Boosting(weak_trainer, loss_function)
# perform boosting
machine = booster.train(inputs.astype(numpy.float64), aligned, number_of_rounds=1)
......@@ -58,7 +58,7 @@ class TestBoosting(unittest.TestCase):
self.assertEqual(len(machine.weak_machines), 1)
self.assertEqual(machine.indices, [483])
weak = machine.weak_machines[0]
self.assertTrue(isinstance(weak, xbob.boosting.machine.StumpMachine))
self.assertTrue(isinstance(weak, bob.learn.boosting.StumpMachine))
self.assertEqual(weak.threshold, 15.5)
self.assertEqual(weak.polarity, 1.)
......@@ -81,9 +81,9 @@ class TestBoosting(unittest.TestCase):
aligned = self._align_uni(targets)
# for stump trainers, the logit loss function is preferred
loss_function = xbob.boosting.loss.LogitLoss()
weak_trainer = xbob.boosting.trainer.LUTTrainer(256)
booster = xbob.boosting.trainer.Boosting(weak_trainer, loss_function)
loss_function = bob.learn.boosting.LogitLoss()
weak_trainer = bob.learn.boosting.LUTTrainer(256)
booster = bob.learn.boosting.Boosting(weak_trainer, loss_function)
# perform boosting
weight = 15.46452387
......@@ -93,7 +93,7 @@ class TestBoosting(unittest.TestCase):
self.assertEqual(len(machine.weak_machines), 1)
self.assertEqual(machine.indices, [379])
weak = machine.weak_machines[0]
self.assertTrue(isinstance(weak, xbob.boosting.machine.LUTMachine))
self.assertTrue(isinstance(weak, bob.learn.boosting.LUTMachine))
self.assertEqual(weak.lut.shape, (256,1))
# check first training image
......@@ -116,9 +116,9 @@ class TestBoosting(unittest.TestCase):
aligned = self._align_multi(targets, digits)
# for stump trainers, the logit loss function is preferred
loss_function = xbob.boosting.loss.LogitLoss()
weak_trainer = xbob.boosting.trainer.LUTTrainer(256, len(digits), "shared")
booster = xbob.boosting.trainer.Boosting(weak_trainer, loss_function)
loss_function = bob.learn.boosting.LogitLoss()
weak_trainer = bob.learn.boosting.LUTTrainer(256, len(digits), "shared")
booster = bob.learn.boosting.Boosting(weak_trainer, loss_function)
# perform boosting
weights = numpy.array([2.5123104, 2.19725677, 2.34455412, 1.94584326])
......@@ -128,7 +128,7 @@ class TestBoosting(unittest.TestCase):
self.assertEqual(len(machine.weak_machines), 1)
self.assertEqual(machine.indices, [437])
weak = machine.weak_machines[0]
self.assertTrue(isinstance(weak, xbob.boosting.machine.LUTMachine))
self.assertTrue(isinstance(weak, bob.learn.boosting.LUTMachine))
self.assertEqual(weak.lut.shape, (256,4))
# check first training image
......@@ -152,9 +152,9 @@ class TestBoosting(unittest.TestCase):
aligned = self._align_multi(targets, digits)
# for stump trainers, the logit loss function is preferred
loss_function = xbob.boosting.loss.LogitLoss()
weak_trainer = xbob.boosting.trainer.LUTTrainer(256, len(digits), "independent")
booster = xbob.boosting.trainer.Boosting(weak_trainer, loss_function)
loss_function = bob.learn.boosting.LogitLoss()
weak_trainer = bob.learn.boosting.LUTTrainer(256, len(digits), "independent")
booster = bob.learn.boosting.Boosting(weak_trainer, loss_function)
# perform boosting
weights = numpy.array([2.94443872, 2.70805517, 2.34454354, 2.94443872])
......@@ -164,7 +164,7 @@ class TestBoosting(unittest.TestCase):
self.assertEqual(len(machine.weak_machines), 1)
self.assertTrue(all(machine.indices == [215, 236, 264, 349]))
weak = machine.weak_machines[0]
self.assertTrue(isinstance(weak, xbob.boosting.machine.LUTMachine))
self.assertTrue(isinstance(weak, bob.learn.boosting.LUTMachine))
self.assertEqual(weak.lut.shape, (256,4))
# check first training image
......
import xbob.boosting
import bob.learn.boosting
import numpy
import bob
import nose
import os
import tempfile
import xbob.io
def test_example_mnist():
......
import unittest
import random
import xbob.boosting
import bob.learn.boosting
import numpy
class TestExponentialLoss(unittest.TestCase):
......@@ -9,7 +9,7 @@ class TestExponentialLoss(unittest.TestCase):
def test01_positive_target(self):
# Loss values computation test for postitive targets.
loss_function = xbob.boosting.loss.ExponentialLoss()
loss_function = bob.learn.boosting.ExponentialLoss()
target = 1
score = 0.34
alpha = 0.5
......@@ -51,7 +51,7 @@ class TestExponentialLoss(unittest.TestCase):
def test02_negative_target(self):
# Exponential Loss values computation test for negative targets.
loss_function = xbob.boosting.loss.ExponentialLoss()
loss_function = bob.learn.boosting.ExponentialLoss()
target = -1
score = 0.34
alpha = 0.5
......@@ -95,7 +95,7 @@ class TestExponentialLoss(unittest.TestCase):
# Check the loss function values for multivariate targets
loss_function = xbob.boosting.loss.ExponentialLoss()
loss_function = bob.learn.boosting.ExponentialLoss()
num_samples = 3
num_outputs = 2
targets = numpy.array([[1, -1], [-1, 1], [0, 0]])
......@@ -127,7 +127,7 @@ class TestExponentialLoss(unittest.TestCase):
def test04_multivariate_negative_target(self):
loss_function = xbob.boosting.loss.ExponentialLoss()
loss_function = bob.learn.boosting.ExponentialLoss()
num_samples = 2
num_dimension = 2
targets = numpy.array([[1, -1], [-1, 1]])
......
import unittest
from unittest import SkipTest
import random
import xbob.boosting
import bob.learn.boosting
import numpy
class TestJesorskyLoss(unittest.TestCase):
......@@ -12,7 +12,7 @@ class TestJesorskyLoss(unittest.TestCase):
# Check the loss function values for multivariate targets
loss_function = xbob.boosting.loss.JesorskyLoss()
loss_function = bob.learn.boosting.JesorskyLoss()
num_samples = 2
num_outputs = 4
targets = numpy.array([[10, 10, 10, 30], [12, 11, 13, 29]], 'float64')
......@@ -43,7 +43,7 @@ class TestJesorskyLoss(unittest.TestCase):
def test02_negative_target(self):
loss_function = xbob.boosting.loss.JesorskyLoss()
loss_function = bob.learn.boosting.JesorskyLoss()
num_samples = 2
num_outputs = 4
targets = numpy.array([[10, 10, 10, 30], [12, 11, 13, 29]])
......
import unittest
import random
import xbob.boosting
import bob.learn.boosting
import numpy
class TestLogitLoss (unittest.TestCase):
......@@ -9,7 +9,7 @@ class TestLogitLoss (unittest.TestCase):
def test01_positive_target(self):
# Check the loss function value for positive targets
loss_function = xbob.boosting.loss.LogitLoss()
loss_function = bob.learn.boosting.LogitLoss()
target = 1
score = 0.34
alpha = 0.5
......@@ -47,7 +47,7 @@ class TestLogitLoss (unittest.TestCase):
def test02_negative_target(self):
# Check the loss function value for negative targets
loss_function = xbob.boosting.loss.LogitLoss()
loss_function = bob.learn.boosting.LogitLoss()
target = -1
score = 0.34
alpha = 0.5
......@@ -85,7 +85,7 @@ class TestLogitLoss (unittest.TestCase):
def test03_multivariate_dimensions(self):
# Check the loss function values for multivariate targets
loss_function = xbob.boosting.loss.LogitLoss()
loss_function = bob.learn.boosting.LogitLoss()
num_samples = 2
num_dimension = 2
targets = numpy.array([[1, -1], [-1, 1]])
......@@ -117,7 +117,7 @@ class TestLogitLoss (unittest.TestCase):
def test04_multivariate(self):
# Check the loss function values for multivariate targets
loss_function = xbob.boosting.loss.LogitLoss()
loss_function = bob.learn.boosting.LogitLoss()
targets = numpy.array([[1, -1], [-1, 1]])
score = numpy.array([[0.5, 0.5], [0.5, 0.5]], 'float64')
alpha = 0.5
......
import xbob.boosting
import bob.learn.boosting
import numpy
import bob
import nose
import os
import tempfile
import xbob.io
import bob.io.base
def test_machine():
# test the stump machine
stump = xbob.boosting.machine.StumpMachine(0., 1., 0)
stump = bob.learn.boosting.StumpMachine(0., 1., 0)
scores = numpy.ndarray((1,), numpy.float64)
stump(numpy.ones((1,1), dtype=numpy.uint16), scores)
......@@ -22,10 +22,10 @@ def test_machine():
nose.tools.eq_(stump.polarity, 1.)
temp = tempfile.mkstemp(prefix = "xbbst_", suffix=".hdf5")[1]
f = xbob.io.HDF5File(temp, 'w')
f = bob.io.base.HDF5File(temp, 'w')
stump.save(f)
stump2 = xbob.boosting.machine.StumpMachine(f)
stump2 = bob.learn.boosting.StumpMachine(f)
nose.tools.eq_(stump2.threshold, 0.)
nose.tools.eq_(stump2.polarity, 1.)
del f
......@@ -34,7 +34,7 @@ def test_machine():
# test the LUT machine
LUT = numpy.ones((1,1), numpy.float)
indices = numpy.zeros((1,), numpy.int32)
machine = xbob.boosting.machine.LUTMachine(LUT, indices)
machine = bob.learn.boosting.LUTMachine(LUT, indices)
score = machine(numpy.zeros((1,), dtype=numpy.uint16))
nose.tools.eq_(score, 1)
......@@ -42,15 +42,15 @@ def test_machine():
assert(numpy.allclose(machine.lut, LUT))
temp = tempfile.mkstemp(prefix = "xbbst_", suffix=".hdf5")[1]
f = xbob.io.HDF5File(temp, 'w')
f = bob.io.base.HDF5File(temp, 'w')
machine.save(f)
machine2 = xbob.boosting.machine.LUTMachine(f)
machine2 = bob.learn.boosting.LUTMachine(f)
assert(numpy.allclose(machine2.lut, LUT))
del f
os.remove(temp)
boosted_machine = xbob.boosting.machine.BoostedMachine()
boosted_machine = bob.learn.boosting.BoostedMachine()
boosted_machine.add_weak_machine(stump, 1.)
boosted_machine.add_weak_machine(machine, 1.)
......@@ -64,9 +64,9 @@ def test_machine():
nose.tools.eq_(labels[0], 1)
# check IO functionality
file = tempfile.mkstemp(prefix='xbob_test_')[1]
boosted_machine.save(xbob.io.HDF5File(file, 'w'))
new_machine = xbob.boosting.machine.BoostedMachine(xbob.io.HDF5File(file))
file = tempfile.mkstemp(prefix='bob.learn_test_')[1]
boosted_machine.save(bob.io.base.HDF5File(file, 'w'))
new_machine = bob.learn.boosting.BoostedMachine(bob.io.base.HDF5File(file))
os.remove(file)
assert (new_machine.alpha == 1).all()
......
import unittest
import random
import xbob.boosting
import bob.learn.boosting
import numpy
import bob
import bob.io.base
import bob.io.base.test_utils
class TestLutTrainer(unittest.TestCase):
......@@ -12,7 +13,7 @@ class TestLutTrainer(unittest.TestCase):
num_feature = 100
range_feature = 10
trainer = xbob.boosting.trainer.LUTTrainer(range_feature, num_feature)
trainer = bob.learn.boosting.LUTTrainer(range_feature, num_feature)
features = numpy.array([2, 8, 4, 7, 1, 0, 6, 3, 6, 1, 7, 0, 6, 8, 3, 6, 8, 2, 6, 9, 4, 6,
2, 0, 4, 9, 7, 4, 1, 3, 9, 9, 3, 3, 5, 2, 4, 0, 1, 3, 8, 8, 6, 7,
......@@ -23,7 +24,7 @@ class TestLutTrainer(unittest.TestCase):
loss_grad = numpy.ones(100)
hist_value, bins = numpy.histogram(features,range(range_feature +1))
sum_grad = xbob.boosting.weighted_histogram(features,loss_grad,10)
sum_grad = bob.learn.boosting.weighted_histogram(features,loss_grad,10)
self.assertEqual(sum_grad.shape[0],range_feature)
self.assertTrue((sum_grad == hist_value).all())
......@@ -36,9 +37,9 @@ class TestLutTrainer(unittest.TestCase):
selected_index = 5