Skip to content
Snippets Groups Projects
Commit 8a800933 authored by André Anjos's avatar André Anjos :speech_balloon:
Browse files

Adds a bunch of properties

parent fb366c1a
No related branches found
No related tags found
No related merge requests found
......@@ -115,7 +115,7 @@ static PyObject* PyBobMachineActivation_call1(PyBobMachineActivationObject* o,
if (PyBlitzArray_Check(z) || PyArray_Check(z)) {
PyBlitzArrayObject* z_converted = 0;
PyBlitzArray_Converter(z, &z_converted);
if (!PyBlitzArray_Converter(z, &z_converted)) return 0;
auto z_converted_ = make_safe(z_converted);
if (z_converted->type_num != NPY_FLOAT64) {
......@@ -512,7 +512,7 @@ static PyObject* PyBobMachineActivation_RichCompare (PyBobMachineActivationObjec
return 0;
}
PyBobMachineActivationObject* other_ = reinterpret_cast<PyBobMachineActivationObject*>(other);
auto other_ = reinterpret_cast<PyBobMachineActivationObject*>(other);
switch (op) {
case Py_EQ:
......
......@@ -89,10 +89,11 @@ static int PyBobMachineLinear_init_weights(PyBobMachineLinearObject* self,
PyBlitzArrayObject* weights = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&", kwlist,
&weights, &PyBlitzArray_Converter)) return -1;
&PyBlitzArray_Converter, &weights)) return -1;
auto weights_ = make_safe(weights);
if (weights->type_num != NPY_FLOAT64 || weights->ndim != 2) {
PyErr_SetString(PyExc_TypeError, "LinearMachine only supports 64-bit floats 2D arrays for input vector `weights'");
PyErr_SetString(PyExc_TypeError, "LinearMachine only supports 64-bit floats 2D arrays for property array `weights'");
return -1;
}
......@@ -239,6 +240,293 @@ static void PyBobMachineLinear_delete (PyBobMachineLinearObject* self) {
}
int PyBobMachineLinear_Check(PyObject* o) {
return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobMachineLinear_Type));
}
static PyObject* PyBobMachineLinear_RichCompare (PyBobMachineLinearObject* self, PyObject* other, int op) {
if (!PyBobMachineLinear_Check(other)) {
PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'",
s_linear_str, other->ob_type->tp_name);
return 0;
}
auto other_ = reinterpret_cast<PyBobMachineLinearObject*>(other);
switch (op) {
case Py_EQ:
if (self->machine->operator==(*other_->machine)) Py_RETURN_TRUE;
Py_RETURN_FALSE;
break;
case Py_NE:
if (self->machine->operator!=(*other_->machine)) Py_RETURN_TRUE;
Py_RETURN_FALSE;
break;
default:
Py_INCREF(Py_NotImplemented);
return Py_NotImplemented;
}
}
static PyMethodDef PyBobMachineLinear_methods[] = {
{0} /* Sentinel */
};
/**
.add_property("activation", &bob::machine::LinearMachine::getActivation, &bob::machine::LinearMachine::setActivation, "The activation function - by default, the identity function. The output provided by the activation function is passed, unchanged, to the user.")
**/
PyDoc_STRVAR(s_weights_str, "weights");
PyDoc_STRVAR(s_weights_doc,
"Weight matrix to which the input is projected to. The output\n\
of the project is fed subject to bias and activation before\n\
being output.\n\
");
static PyObject* PyBobMachineLinear_getWeights
(PyBobMachineLinearObject* self, void* /*closure*/) {
return PyBlitzArray_NUMPY_WRAP(PyBlitzArrayCxx_NewFromConstArray(self->machine->getWeights()));
}
static int PyBobMachineLinear_setWeights (PyBobMachineLinearObject* self,
PyObject* o, void* /*closure*/) {
PyBlitzArrayObject* weights = 0;
if (!PyBlitzArray_Converter(o, &weights)) return -1;
auto weights_ = make_safe(weights);
if (weights->type_num != NPY_FLOAT64 || weights->ndim != 2) {
PyErr_SetString(PyExc_TypeError, "LinearMachine only supports 64-bit floats 2D arrays for property array `weights'");
return -1;
}
try {
self->machine->setWeights(*PyBlitzArrayCxx_AsBlitz<double,2>(weights));
}
catch (std::exception& ex) {
PyErr_SetString(PyExc_RuntimeError, ex.what());
return -1;
}
catch (...) {
PyErr_Format(PyExc_RuntimeError, "cannot reset `weights' of %s: unknown exception caught", s_linear_str);
return -1;
}
return 0;
}
PyDoc_STRVAR(s_biases_str, "biases");
PyDoc_STRVAR(s_biases_doc,
"Bias to the output units of this linear machine, to be added\n\
to the output before activation.\n\
");
static PyObject* PyBobMachineLinear_getBiases
(PyBobMachineLinearObject* self, void* /*closure*/) {
return PyBlitzArray_NUMPY_WRAP(PyBlitzArrayCxx_NewFromConstArray(self->machine->getBiases()));
}
static int PyBobMachineLinear_setBiases (PyBobMachineLinearObject* self,
PyObject* o, void* /*closure*/) {
PyBlitzArrayObject* biases = 0;
if (!PyBlitzArray_Converter(o, &biases)) return -1;
auto biases_ = make_safe(biases);
if (biases->type_num != NPY_FLOAT64 || biases->ndim != 1) {
PyErr_SetString(PyExc_TypeError, "LinearMachine only supports 64-bit floats 1D arrays for property array `biases'");
return -1;
}
try {
self->machine->setBiases(*PyBlitzArrayCxx_AsBlitz<double,1>(biases));
}
catch (std::exception& ex) {
PyErr_SetString(PyExc_RuntimeError, ex.what());
return -1;
}
catch (...) {
PyErr_Format(PyExc_RuntimeError, "cannot reset `biases' of %s: unknown exception caught", s_linear_str);
return -1;
}
return 0;
}
PyDoc_STRVAR(s_input_subtract_str, "input_subtract");
PyDoc_STRVAR(s_input_subtract_doc,
"Input subtraction factor, before feeding data through the\n\
weight matrix W. The subtraction is the first applied\n\
operation in the processing chain - by default, it is set to\n\
0.0.\n\
");
static PyObject* PyBobMachineLinear_getInputSubtraction
(PyBobMachineLinearObject* self, void* /*closure*/) {
return PyBlitzArray_NUMPY_WRAP(PyBlitzArrayCxx_NewFromConstArray(self->machine->getInputSubtraction()));
}
static int PyBobMachineLinear_setInputSubtraction
(PyBobMachineLinearObject* self, PyObject* o, void* /*closure*/) {
PyBlitzArrayObject* input_subtract = 0;
if (!PyBlitzArray_Converter(o, &input_subtract)) return -1;
auto input_subtract_ = make_safe(input_subtract);
if (input_subtract->type_num != NPY_FLOAT64 || input_subtract->ndim != 1) {
PyErr_SetString(PyExc_TypeError, "LinearMachine only supports 64-bit floats 1D arrays for property array `input_subtract'");
return -1;
}
try {
self->machine->setInputSubtraction(*PyBlitzArrayCxx_AsBlitz<double,1>(input_subtract));
}
catch (std::exception& ex) {
PyErr_SetString(PyExc_RuntimeError, ex.what());
return -1;
}
catch (...) {
PyErr_Format(PyExc_RuntimeError, "cannot reset `input_subtract' of %s: unknown exception caught", s_linear_str);
return -1;
}
return 0;
}
PyDoc_STRVAR(s_input_divide_str, "input_divide");
PyDoc_STRVAR(s_input_divide_doc,
"Input division factor, before feeding data through the\n\
weight matrix W. The division is applied just after\n\
subtraction - by default, it is set to 1.0.\n\
");
static PyObject* PyBobMachineLinear_getInputDivision
(PyBobMachineLinearObject* self, void* /*closure*/) {
return PyBlitzArray_NUMPY_WRAP(PyBlitzArrayCxx_NewFromConstArray(self->machine->getInputDivision()));
}
static int PyBobMachineLinear_setInputDivision (PyBobMachineLinearObject* self,
PyObject* o, void* /*closure*/) {
PyBlitzArrayObject* input_divide = 0;
if (!PyBlitzArray_Converter(o, &input_divide)) return -1;
auto input_divide_ = make_safe(input_divide);
if (input_divide->type_num != NPY_FLOAT64 || input_divide->ndim != 1) {
PyErr_SetString(PyExc_TypeError, "LinearMachine only supports 64-bit floats 1D arrays for property array `input_divide'");
return -1;
}
try {
self->machine->setInputDivision(*PyBlitzArrayCxx_AsBlitz<double,1>(input_divide));
}
catch (std::exception& ex) {
PyErr_SetString(PyExc_RuntimeError, ex.what());
return -1;
}
catch (...) {
PyErr_Format(PyExc_RuntimeError, "cannot reset `input_divide' of %s: unknown exception caught", s_linear_str);
return -1;
}
return 0;
}
PyDoc_STRVAR(s_shape_str, "shape");
PyDoc_STRVAR(s_shape_doc,
"A tuple that represents the size of the input vector\n\
followed by the size of the output vector in the format\n\
``(input, output)``.\n\
");
static PyObject* PyBobMachineLinear_getShape
(PyBobMachineLinearObject* self, void* /*closure*/) {
return Py_BuildValue("(nn)", self->machine->inputSize(),
self->machine->outputSize());
}
static int PyBobMachineLinear_setShape (PyBobMachineLinearObject* self,
PyObject* o, void* /*closure*/) {
if (!PySequence_Check(o)) {
PyErr_Format(PyExc_TypeError, "LinearMachine shape can only be set using tuples (or sequences), not `%s'", o->ob_type->tp_name);
return -1;
}
PyObject* shape = PySequence_Tuple(o);
auto shape_ = make_safe(shape);
if (PyTuple_GET_SIZE(shape) != 2) {
PyErr_Format(PyExc_RuntimeError, "LinearMachine shape can only be set using 2-position tuples (or sequences), not an %" PY_FORMAT_SIZE_T "d-position sequence", PyTuple_GET_SIZE(shape));
return -1;
}
Py_ssize_t in = PyNumber_AsSsize_t(PyTuple_GET_ITEM(shape, 0), PyExc_OverflowError);
if (PyErr_Occurred()) return -1;
Py_ssize_t out = PyNumber_AsSsize_t(PyTuple_GET_ITEM(shape, 1), PyExc_OverflowError);
if (PyErr_Occurred()) return -1;
try {
self->machine->resize(in, out);
}
catch (std::exception& ex) {
PyErr_SetString(PyExc_RuntimeError, ex.what());
return -1;
}
catch (...) {
PyErr_Format(PyExc_RuntimeError, "cannot reset `shape' of %s: unknown exception caught", s_linear_str);
return -1;
}
return 0;
}
static PyGetSetDef PyBobMachineLinear_getseters[] = {
{
s_weights_str,
(getter)PyBobMachineLinear_getWeights,
(setter)PyBobMachineLinear_setWeights,
s_weights_doc,
0
},
{
s_biases_str,
(getter)PyBobMachineLinear_getBiases,
(setter)PyBobMachineLinear_setBiases,
s_biases_doc,
0
},
{
s_input_subtract_str,
(getter)PyBobMachineLinear_getInputSubtraction,
(setter)PyBobMachineLinear_setInputSubtraction,
s_input_subtract_doc,
0
},
{
s_input_divide_str,
(getter)PyBobMachineLinear_getInputDivision,
(setter)PyBobMachineLinear_setInputDivision,
s_input_divide_doc,
0
},
{
s_shape_str,
(getter)PyBobMachineLinear_getShape,
(setter)PyBobMachineLinear_setShape,
s_shape_doc,
0
},
{0} /* Sentinel */
};
PyTypeObject PyBobMachineLinear_Type = {
PyObject_HEAD_INIT(0)
0, /* ob_size */
......@@ -256,7 +544,7 @@ PyTypeObject PyBobMachineLinear_Type = {
0, /* tp_as_mapping */
0, /* tp_hash */
0, //(ternaryfunc)PyBobMachineLinear_call, /* tp_call */
0, //(reprfunc)PyBobMachineLinear_Str, /* tp_str */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
......@@ -264,13 +552,13 @@ PyTypeObject PyBobMachineLinear_Type = {
s_linear_doc, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, //(richcmpfunc)PyBobMachineLinear_RichCompare, /* tp_richcompare */
(richcmpfunc)PyBobMachineLinear_RichCompare, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
0, //PyBobMachineLinear_methods, /* tp_methods */
PyBobMachineLinear_methods, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
PyBobMachineLinear_getseters, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
......@@ -341,106 +629,13 @@ static void forward2(const bob::machine::LinearMachine& m,
PYTHON_ERROR(TypeError, "cannot forward arrays with " SIZE_T_FMT " dimensions (only with 1 or 2 dimensions).", info.nd);
}
}
static tuple get_shape(const bob::machine::LinearMachine& m) {
return make_tuple(m.inputSize(), m.outputSize());
}
static void set_shape(bob::machine::LinearMachine& m,
const blitz::TinyVector<int,2>& s) {
m.resize(s(0), s(1));
}
static void set_input_sub(bob::machine::LinearMachine& m, object o) {
extract<int> int_check(o);
extract<double> float_check(o);
if (int_check.check()) { //is int
m.setInputSubtraction(int_check());
}
else if (float_check.check()) { //is float
m.setInputSubtraction(float_check());
}
else {
//try hard-core extraction - throws TypeError, if not possible
extract<bob::python::const_ndarray> array_check(o);
if (!array_check.check())
PYTHON_ERROR(TypeError, "Cannot extract an array from this Python object");
bob::python::const_ndarray ar = array_check();
m.setInputSubtraction(ar.bz<double,1>());
}
}
static void set_input_div(bob::machine::LinearMachine& m, object o) {
extract<int> int_check(o);
extract<double> float_check(o);
if (int_check.check()) { //is int
m.setInputDivision(int_check());
}
else if (float_check.check()) { //is float
m.setInputDivision(float_check());
}
else {
//try hard-core extraction - throws TypeError, if not possible
extract<bob::python::const_ndarray> array_check(o);
if (!array_check.check())
PYTHON_ERROR(TypeError, "Cannot extract an array from this Python object");
bob::python::const_ndarray ar = array_check();
m.setInputDivision(ar.bz<double,1>());
}
}
static void set_weight(bob::machine::LinearMachine& m, object o) {
extract<int> int_check(o);
extract<double> float_check(o);
if (int_check.check()) { //is int
m.setWeights(int_check());
}
else if (float_check.check()) { //is float
m.setWeights(float_check());
}
else {
//try hard-core extraction - throws TypeError, if not possible
extract<bob::python::const_ndarray> array_check(o);
if (!array_check.check())
PYTHON_ERROR(TypeError, "Cannot extract an array from this Python object");
bob::python::const_ndarray ar = array_check();
m.setWeights(ar.bz<double,2>());
}
}
static void set_bias(bob::machine::LinearMachine& m, object o) {
extract<int> int_check(o);
extract<double> float_check(o);
if (int_check.check()) { //is int
m.setBiases(int_check());
}
else if (float_check.check()) { //is float
m.setBiases(float_check());
}
else {
//try hard-core extraction - throws TypeError, if not possible
extract<bob::python::const_ndarray> array_check(o);
if (!array_check.check())
PYTHON_ERROR(TypeError, "Cannot extract an array from this Python object");
bob::python::const_ndarray ar = array_check();
m.setBiases(ar.bz<double,1>());
}
}
***/
/***
void bind_machine_linear() {
.def(self == self)
.def(self != self)
.def("is_similar_to", &bob::machine::LinearMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this LinearMachine with the 'other' one to be approximately the same.")
.def("load", &bob::machine::LinearMachine::load, (arg("self"), arg("config")), "Loads the weights and biases from a configuration file. Both weights and biases have their dimensionalities checked between each other for consistency.")
.def("save", &bob::machine::LinearMachine::save, (arg("self"), arg("config")), "Saves the weights and biases to a configuration file.")
.add_property("input_subtract", make_function(&bob::machine::LinearMachine::getInputSubtraction, return_value_policy<copy_const_reference>()), &set_input_sub, "Input subtraction factor, before feeding data through the weight matrix W. The subtraction is the first applied operation in the processing chain - by default, it is set to 0.0.")
.add_property("input_divide", make_function(&bob::machine::LinearMachine::getInputDivision, return_value_policy<copy_const_reference>()), &set_input_div, "Input division factor, before feeding data through the weight matrix W. The division is applied just after subtraction - by default, it is set to 1.0")
.add_property("weights", make_function(&bob::machine::LinearMachine::getWeights, return_value_policy<copy_const_reference>()), &set_weight, "Weight matrix W to which the input is projected to. The output of the project is fed subject to bias and activation before being output.")
.add_property("biases", make_function(&bob::machine::LinearMachine::getBiases, return_value_policy<copy_const_reference>()), &set_bias, "Bias to the output units of this linear machine, to be added to the output before activation.")
.add_property("activation", &bob::machine::LinearMachine::getActivation, &bob::machine::LinearMachine::setActivation, "The activation function - by default, the identity function. The output provided by the activation function is passed, unchanged, to the user.")
.add_property("shape", &get_shape, &set_shape, "A tuple that represents the size of the input vector followed by the size of the output vector in the format ``(input, output)``.")
.def("resize", &bob::machine::LinearMachine::resize, (arg("self"), arg("input"), arg("output")), "Resizes the machine. If either the input or output increases in size, the weights and other factors should be considered uninitialized. If the size is preserved or reduced, already initialized values will not be changed.\n\nTip: Use this method to force data compression. All will work out given most relevant factors to be preserved are organized on the top of the weight matrix. In this way, reducing the system size will supress less relevant projections.")
.def("__call__", &forward2, (arg("self"), arg("input"), arg("output")), "Projects the input to the weights and biases and saves results on the output")
.def("forward", &forward2, (arg("self"), arg("input"), arg("output")), "Projects the input to the weights and biases and saves results on the output")
......
......@@ -9,11 +9,12 @@
"""
import os, sys
import unittest
import nose.tools
import math
import bob
from . import LinearMachine, HyperbolicTangentActivation, IdentityActivation
import numpy
import pkg_resources
from xbob.io import HDF5File
def F(f):
"""Returns the test file on the "data" subdirectory"""
......@@ -21,209 +22,204 @@ def F(f):
MACHINE = F('linear-test.hdf5')
class MachineTest(unittest.TestCase):
"""Performs various LinearMachine tests."""
def test_initialization():
# Two inputs and 1 output
m = LinearMachine(2,1)
assert (m.weights == 0.0).all()
nose.tools.eq_( m.weights.shape, (2,1) )
assert (m.biases == 0.0).all()
nose.tools.eq_( m.biases.shape, (1,) )
# Start by providing the data
w = numpy.array([[0.4, 0.1], [0.4, 0.2], [0.2, 0.7]], 'float64')
m = LinearMachine(w)
b = numpy.array([0.3, -3.0], 'float64')
isub = numpy.array([0., 0.5, 0.5], 'float64')
idiv = numpy.array([0.5, 1.0, 1.0], 'float64')
m.input_subtract = isub
m.input_divide = idiv
m.biases = b
m.activation = HyperbolicTangentActivation()
assert (m.input_subtract == isub).all()
assert (m.input_divide == idiv).all()
assert (m.weights == w).all()
assert (m.biases == b). all()
nose.tools.eq_(m.hidden_activation,
HyperbolicTangentActivation())
# Save to file
# c = HDF5File("bla.hdf5", 'w')
# m.save(c)
# Start by reading data from a file
c = HDF5File(MACHINE)
m = LinearMachine(c)
assert (m.weights == w).all()
assert (m.biases == b). all()
# Makes sure we cannot stuff incompatible data
w = numpy.array([[0.4, 0.4, 0.2], [0.1, 0.2, 0.7]], 'float64')
m = LinearMachine(w)
b = numpy.array([0.3, -3.0, 2.7, -18, 52], 'float64') #wrong
nose.tools.assert_raises(RuntimeError, setattr, m, 'biases', b)
nose.tools.assert_raises(RuntimeError, setattr, m, 'input_subtract', b)
nose.tools.assert_raises(RuntimeError, setattr, m, 'input_divide', b)
def test_correctness():
# Tests the correctness of a linear machine
c = HDF5File(MACHINE)
m = LinearMachine(c)
def presumed(ivalue):
"""Calculates, by hand, the presumed output given the input"""
# These are the supposed preloaded values from the file "MACHINE"
isub = numpy.array([0., 0.5, 0.5], 'float64')
idiv = numpy.array([0.5, 1.0, 1.0], 'float64')
w = numpy.array([[0.4, 0.4, 0.2], [0.1, 0.2, 0.7]], 'float64')
b = numpy.array([0.3, -3.0], 'float64')
act = math.tanh
def test01_Initialization(self):
return numpy.array([ act((w[i,:]*((ivalue-isub)/idiv)).sum() + b[i]) for i in range(w.shape[0]) ], 'float64')
# Two inputs and 1 output
m = bob.machine.LinearMachine(2,1)
self.assertTrue( (m.weights == 0.0).all() )
self.assertEqual( m.weights.shape, (2,1) )
self.assertTrue( (m.biases == 0.0).all() )
self.assertEqual( m.biases.shape, (1,) )
testing = [
[1,1,1],
[0.5,0.2,200],
[-27,35.77,0],
[12,0,0],
]
# Start by providing the data
w = numpy.array([[0.4, 0.1], [0.4, 0.2], [0.2, 0.7]], 'float64')
m = bob.machine.LinearMachine(w)
b = numpy.array([0.3, -3.0], 'float64')
# 1D case
maxerr = numpy.ndarray((2,), 'float64')
maxerr.fill(1e-10)
for k in testing:
input = numpy.array(k, 'float64')
assert (abs(presumed(input) - m(input)) < maxerr).all()
# 2D case
output = m(testing)
for i, k in enumerate(testing):
input = numpy.array(k, 'float64')
assert (abs(presumed(input) - output[i,:]) < maxerr).all()
def test_user_allocation():
# Tests the correctness of a linear machine
c = HDF5File(MACHINE)
m = LinearMachine(c)
def presumed(ivalue):
"""Calculates, by hand, the presumed output given the input"""
# These are the supposed preloaded values from the file "MACHINE"
isub = numpy.array([0., 0.5, 0.5], 'float64')
idiv = numpy.array([0.5, 1.0, 1.0], 'float64')
m.input_subtract = isub
m.input_divide = idiv
m.biases = b
m.hidden_activation = bob.machine.HyperbolicTangentActivation()
m.output_activation = bob.machine.HyperbolicTangentActivation()
self.assertTrue( (m.input_subtract == isub).all() )
self.assertTrue( (m.input_divide == idiv).all() )
self.assertTrue( (m.weights == w).all() )
self.assertTrue( (m.biases == b). all() )
self.assertEqual(m.hidden_activation,
bob.machine.HyperbolicTangentActivation())
# Save to file
# c = bob.io.HDF5File("bla.hdf5", 'w')
# m.save(c)
# Start by reading data from a file
c = bob.io.HDF5File(MACHINE)
m = bob.machine.LinearMachine(c)
self.assertTrue( (m.weights == w).all() )
self.assertTrue( (m.biases == b). all() )
# Makes sure we cannot stuff incompatible data
w = numpy.array([[0.4, 0.4, 0.2], [0.1, 0.2, 0.7]], 'float64')
m = bob.machine.LinearMachine(w)
b = numpy.array([0.3, -3.0, 2.7, -18, 52], 'float64') #wrong
self.assertRaises(RuntimeError, setattr, m, 'biases', b)
self.assertRaises(RuntimeError, setattr, m, 'input_subtract', b)
self.assertRaises(RuntimeError, setattr, m, 'input_divide', b)
def test02_Correctness(self):
# Tests the correctness of a linear machine
c = bob.io.HDF5File(MACHINE)
m = bob.machine.LinearMachine(c)
def presumed(ivalue):
"""Calculates, by hand, the presumed output given the input"""
# These are the supposed preloaded values from the file "MACHINE"
isub = numpy.array([0., 0.5, 0.5], 'float64')
idiv = numpy.array([0.5, 1.0, 1.0], 'float64')
w = numpy.array([[0.4, 0.4, 0.2], [0.1, 0.2, 0.7]], 'float64')
b = numpy.array([0.3, -3.0], 'float64')
act = math.tanh
return numpy.array([ act((w[i,:]*((ivalue-isub)/idiv)).sum() + b[i]) for i in range(w.shape[0]) ], 'float64')
testing = [
[1,1,1],
[0.5,0.2,200],
[-27,35.77,0],
[12,0,0],
]
# 1D case
maxerr = numpy.ndarray((2,), 'float64')
maxerr.fill(1e-10)
for k in testing:
input = numpy.array(k, 'float64')
self.assertTrue ( (abs(presumed(input) - m(input)) < maxerr).all() )
# 2D case
output = m(testing)
for i, k in enumerate(testing):
input = numpy.array(k, 'float64')
self.assertTrue ( (abs(presumed(input) - output[i,:]) < maxerr).all() )
def test03_UserAllocation(self):
# Tests the correctness of a linear machine
c = bob.io.HDF5File(MACHINE)
m = bob.machine.LinearMachine(c)
def presumed(ivalue):
"""Calculates, by hand, the presumed output given the input"""
# These are the supposed preloaded values from the file "MACHINE"
isub = numpy.array([0., 0.5, 0.5], 'float64')
idiv = numpy.array([0.5, 1.0, 1.0], 'float64')
w = numpy.array([[0.4, 0.4, 0.2], [0.1, 0.2, 0.7]], 'float64')
b = numpy.array([0.3, -3.0], 'float64')
act = math.tanh
return numpy.array([ act((w[i,:]*((ivalue-isub)/idiv)).sum() + b[i]) for i in range(w.shape[0]) ], 'float64')
testing = [
[1,1,1],
[0.5,0.2,200],
[-27,35.77,0],
[12,0,0],
]
# 1D case
maxerr = numpy.ndarray((2,), 'float64')
maxerr.fill(1e-10)
output = numpy.ndarray((2,), 'float64')
for k in testing:
input = numpy.array(k, 'float64')
m(input, output)
self.assertTrue ( (abs(presumed(input) - output) < maxerr).all() )
# 2D case
output = numpy.ndarray((len(testing), 2), 'float64')
m(testing, output)
for i, k in enumerate(testing):
input = numpy.array(k, 'float64')
self.assertTrue ( (abs(presumed(input) - output[i,:]) < maxerr).all() )
def test04_comparisons(self):
# Start by creating the data
w1 = numpy.array([[0.4, 0.1], [0.4, 0.2], [0.2, 0.7]], 'float64')
w2 = numpy.array([[0.4, 1.1], [0.4, 0.2], [0.2, 0.7]], 'float64')
b1 = numpy.array([0.3, -3.0], 'float64')
b2 = numpy.array([0.3, 3.0], 'float64')
isub1 = numpy.array([0., 0.5, 0.5], 'float64')
isub2 = numpy.array([0.5, 0.5, 0.5], 'float64')
idiv1 = numpy.array([0.5, 1.0, 1.0], 'float64')
idiv2 = numpy.array([1.5, 1.0, 1.0], 'float64')
# Creates LinearMachine's
m1 = bob.machine.LinearMachine(w1)
m1.input_subtract = isub1
m1.input_divide = idiv1
m1.biases = b1
m1.activation = bob.machine.HyperbolicTangentActivation()
m1b = bob.machine.LinearMachine(m1)
m1c = bob.machine.LinearMachine(w1)
m1c.input_subtract = isub1
m1c.input_divide = idiv1
m1c.biases = b1
m1c.activation = bob.machine.HyperbolicTangentActivation()
m2 = bob.machine.LinearMachine(w2)
m2.input_subtract = isub1
m2.input_divide = idiv1
m2.biases = b1
m2.activation = bob.machine.HyperbolicTangentActivation()
m3 = bob.machine.LinearMachine(w1)
m3.input_subtract = isub2
m3.input_divide = idiv1
m3.biases = b1
m3.activation = bob.machine.HyperbolicTangentActivation()
m4 = bob.machine.LinearMachine(w1)
m4.input_subtract = isub1
m4.input_divide = idiv2
m4.biases = b1
m4.activation = bob.machine.HyperbolicTangentActivation()
m5 = bob.machine.LinearMachine(w1)
m5.input_subtract = isub1
m5.input_divide = idiv1
m5.biases = b2
m5.activation = bob.machine.HyperbolicTangentActivation()
m6 = bob.machine.LinearMachine(w1)
m6.input_subtract = isub1
m6.input_divide = idiv1
m6.biases = b1
m6.activation = bob.machine.IdentityActivation()
# Compares them using the overloaded operators == and !=
self.assertTrue( m1 == m1b )
self.assertFalse( m1 != m1b )
self.assertTrue( m1.is_similar_to(m1b) )
self.assertTrue( m1 == m1c )
self.assertFalse( m1 != m1c )
self.assertTrue( m1.is_similar_to(m1c) )
self.assertFalse( m1 == m2 )
self.assertTrue( m1 != m2 )
self.assertFalse( m1.is_similar_to(m2) )
self.assertFalse( m1 == m3 )
self.assertTrue( m1 != m3 )
self.assertFalse( m1.is_similar_to(m3) )
self.assertFalse( m1 == m4 )
self.assertTrue( m1 != m4 )
self.assertFalse( m1.is_similar_to(m4) )
self.assertFalse( m1 == m5 )
self.assertTrue( m1 != m5 )
self.assertFalse( m1.is_similar_to(m5) )
self.assertFalse( m1 == m6 )
self.assertTrue( m1 != m6 )
self.assertFalse( m1.is_similar_to(m6) )
b = numpy.array([0.3, -3.0], 'float64')
act = math.tanh
return numpy.array([ act((w[i,:]*((ivalue-isub)/idiv)).sum() + b[i]) for i in range(w.shape[0]) ], 'float64')
testing = [
[1,1,1],
[0.5,0.2,200],
[-27,35.77,0],
[12,0,0],
]
# 1D case
maxerr = numpy.ndarray((2,), 'float64')
maxerr.fill(1e-10)
output = numpy.ndarray((2,), 'float64')
for k in testing:
input = numpy.array(k, 'float64')
m(input, output)
assert (abs(presumed(input) - output) < maxerr).all()
# 2D case
output = numpy.ndarray((len(testing), 2), 'float64')
m(testing, output)
for i, k in enumerate(testing):
input = numpy.array(k, 'float64')
assert (abs(presumed(input) - output[i,:]) < maxerr).all()
def test_comparisons():
# Start by creating the data
w1 = numpy.array([[0.4, 0.1], [0.4, 0.2], [0.2, 0.7]], 'float64')
w2 = numpy.array([[0.4, 1.1], [0.4, 0.2], [0.2, 0.7]], 'float64')
b1 = numpy.array([0.3, -3.0], 'float64')
b2 = numpy.array([0.3, 3.0], 'float64')
isub1 = numpy.array([0., 0.5, 0.5], 'float64')
isub2 = numpy.array([0.5, 0.5, 0.5], 'float64')
idiv1 = numpy.array([0.5, 1.0, 1.0], 'float64')
idiv2 = numpy.array([1.5, 1.0, 1.0], 'float64')
# Creates LinearMachine's
m1 = LinearMachine(w1)
m1.input_subtract = isub1
m1.input_divide = idiv1
m1.biases = b1
m1.activation = HyperbolicTangentActivation()
m1b = LinearMachine(m1)
m1c = LinearMachine(w1)
m1c.input_subtract = isub1
m1c.input_divide = idiv1
m1c.biases = b1
m1c.activation = HyperbolicTangentActivation()
m2 = LinearMachine(w2)
m2.input_subtract = isub1
m2.input_divide = idiv1
m2.biases = b1
m2.activation = HyperbolicTangentActivation()
m3 = LinearMachine(w1)
m3.input_subtract = isub2
m3.input_divide = idiv1
m3.biases = b1
m3.activation = HyperbolicTangentActivation()
m4 = LinearMachine(w1)
m4.input_subtract = isub1
m4.input_divide = idiv2
m4.biases = b1
m4.activation = HyperbolicTangentActivation()
m5 = LinearMachine(w1)
m5.input_subtract = isub1
m5.input_divide = idiv1
m5.biases = b2
m5.activation = HyperbolicTangentActivation()
m6 = LinearMachine(w1)
m6.input_subtract = isub1
m6.input_divide = idiv1
m6.biases = b1
m6.activation = IdentityActivation()
# Compares them using the overloaded operators == and !=
assert m1 == m1b
assert not m1 != m1b
assert m1.is_similar_to(m1b)
assert m1 == m1c
assert not m1 != m1c
assert m1.is_similar_to(m1c)
assert not m1 == m2
assert m1 != m2
assert not m1.is_similar_to(m2)
assert not m1 == m3
assert m1 != m3
assert not m1.is_similar_to(m3)
assert not m1 == m4
assert m1 != m4
assert not m1.is_similar_to(m4)
assert not m1 == m5
assert m1 != m5
assert not m1.is_similar_to(m5)
assert not m1 == m6
assert m1 != m6
assert not m1.is_similar_to(m6)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment