Commit 1da8fb2c authored by André Anjos's avatar André Anjos 💬
Browse files

Finished implementing MLP support (added roll/unroll)

parent 833fdd87
......@@ -63,9 +63,14 @@ setup(
),
Extension("xbob.learn.mlp._library",
[
"xbob/learn/mlp/roll.cpp",
"xbob/learn/mlp/rprop.cpp",
"xbob/learn/mlp/backprop.cpp",
"xbob/learn/mlp/trainer.cpp",
"xbob/learn/mlp/shuffler.cpp",
"xbob/learn/mlp/cost.cpp",
"xbob/learn/mlp/machine.cpp",
"xbob/learn/mlp/main.cpp",
"xbob/learn/mlp/cxx/roll.cpp",
"xbob/learn/mlp/cxx/machine.cpp",
"xbob/learn/mlp/cxx/cross_entropy.cpp",
......@@ -74,10 +79,6 @@ setup(
"xbob/learn/mlp/cxx/trainer.cpp",
"xbob/learn/mlp/cxx/backprop.cpp",
"xbob/learn/mlp/cxx/rprop.cpp",
"xbob/learn/mlp/shuffler.cpp",
"xbob/learn/mlp/cost.cpp",
"xbob/learn/mlp/machine.cpp",
"xbob/learn/mlp/main.cpp",
],
packages = packages,
include_dirs = include_dirs,
......
......@@ -256,8 +256,8 @@ static int PyBobLearnMLPBackProp_setPreviousDerivatives
(PyBobLearnMLPBackPropObject* self, PyObject* o, void* /*closure*/) {
std::vector<blitz::Array<double,2>> bzvec;
int retval = convert_tuple<2>((PyObject*)self, s_previous_derivatives_str,
o, bzvec);
int retval = convert_tuple<2>(Py_TYPE(self)->tp_name,
s_previous_derivatives_str, o, bzvec);
if (retval < 0) return retval;
try {
......@@ -292,7 +292,7 @@ static int PyBobLearnMLPBackProp_setPreviousBiasDerivatives
(PyBobLearnMLPBackPropObject* self, PyObject* o, void* /*closure*/) {
std::vector<blitz::Array<double,1>> bzvec;
int retval = convert_tuple<1>((PyObject*)self,
int retval = convert_tuple<1>(Py_TYPE(self)->tp_name,
s_previous_bias_derivatives_str, o, bzvec);
if (retval < 0) return retval;
......
......@@ -5,7 +5,6 @@
* Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
*/
#include <bob/core/assert.h>
#include <xbob.learn.mlp/roll.h>
int bob::learn::mlp::detail::getNbParameters(const bob::learn::mlp::Machine& machine)
......@@ -19,7 +18,6 @@ int bob::learn::mlp::detail::getNbParameters(
const std::vector<blitz::Array<double,2> >& w,
const std::vector<blitz::Array<double,1> >& b)
{
bob::core::array::assertSameDimensionLength(w.size(), b.size());
int N = 0;
for (int i=0; i<(int)w.size(); ++i)
N += b[i].numElements() + w[i].numElements();
......@@ -37,11 +35,6 @@ void bob::learn::mlp::unroll(const bob::learn::mlp::Machine& machine,
void bob::learn::mlp::unroll(const std::vector<blitz::Array<double,2> >& w,
const std::vector<blitz::Array<double,1> >& b, blitz::Array<double,1>& vec)
{
// 1/ Check number of elements
const int N = bob::learn::mlp::detail::getNbParameters(w, b);
bob::core::array::assertSameDimensionLength(vec.extent(0), N);
// 2/ Roll
blitz::Range rall = blitz::Range::all();
int offset=0;
for (int i=0; i<(int)w.size(); ++i)
......@@ -74,11 +67,6 @@ void bob::learn::mlp::roll(bob::learn::mlp::Machine& machine,
void bob::learn::mlp::roll(std::vector<blitz::Array<double,2> >& w,
std::vector<blitz::Array<double,1> >& b, const blitz::Array<double,1>& vec)
{
// 1/ Check number of elements
const int N = bob::learn::mlp::detail::getNbParameters(w, b);
bob::core::array::assertSameDimensionLength(vec.extent(0), N);
// 2/ Roll
blitz::Range rall = blitz::Range::all();
int offset=0;
for (int i=0; i<(int)w.size(); ++i)
......
......@@ -36,7 +36,7 @@ a global activation function. References to fully-connected\n\
feed-forward networks:\n\
\n\
Bishop's Pattern Recognition and Machine Learning, Chapter 5.\n\
Figure 5.1 shows what we mean.\n\
Figure 5.1 shows what is programmed.\n\
\n\
MLPs normally are multi-layered systems, with 1 or more hidden\n\
layers. As a special case, this implementation also supports\n\
......@@ -737,7 +737,7 @@ PyObject* PyBobLearnMLPMachine_Repr(PyBobLearnMLPMachineObject* self) {
PyDoc_STRVAR(s_forward_str, "forward");
PyDoc_STRVAR(s_forward_doc,
"o.forward(input [, output]) -> array\n\
"o.forward(input, [output]) -> array\n\
\n\
Projects ``input`` through its internal structure. If\n\
``output`` is provided, place output there instead of allocating\n\
......@@ -833,6 +833,7 @@ static PyObject* PyBobLearnMLPMachine_forward
osize[1] = self->cxx->outputSize();
}
output = (PyBlitzArrayObject*)PyBlitzArray_SimpleNew(NPY_FLOAT64, input->ndim, osize);
if (!output) return 0;
output_ = make_safe(output);
}
......@@ -937,7 +938,7 @@ static PyObject* PyBobLearnMLPMachine_Save
PyDoc_STRVAR(s_is_similar_to_str, "is_similar_to");
PyDoc_STRVAR(s_is_similar_to_doc,
"o.is_similar_to(other [, r_epsilon=1e-5 [, a_epsilon=1e-8]]) -> bool\n\
"o.is_similar_to(other, [r_epsilon=1e-5, [a_epsilon=1e-8]]) -> bool\n\
\n\
Compares this MLP with the ``other`` one to be approximately the same.\n\
\n\
......
......@@ -2,7 +2,7 @@
* @author Andre Anjos <andre.anjos@idiap.ch>
* @date Fri 13 Dec 2013 12:35:59 CET
*
* @brief Bindings to bob::machine
* @brief Bindings to bob::learn::mlp
*/
#define XBOB_LEARN_MLP_MODULE
......@@ -17,8 +17,191 @@
#include <xbob.learn.activation/api.h>
#include <xbob.core/random.h>
PyDoc_STRVAR(s_unroll_str, "unroll");
PyDoc_STRVAR(s_unroll_doc,
"unroll(machine, [parameters]) -> parameters\n\
\n\
unroll(weights, biases, [parameters]) -> parameters\n\
\n\
Unroll the parameters (weights and biases) into a 64-bit float 1D array.\n\
\n\
This function will unroll the MLP machine weights and biases into a\n\
single 1D array of 64-bit floats. This procedure is useful for adapting\n\
generic optimization procedures for the task of training MLPs.\n\
\n\
Keyword parameters:\n\
\n\
machine, :py:class:`xbob.learn.mlp.Machine`\n\
An MLP that will have its weights and biases unrolled into a 1D array\n\
\n\
weights, sequence of 2D 64-bit float arrays\n\
If you choose the second calling strategy, then pass a sequence of\n\
2D arrays of 64-bit floats representing the weights for the MLP you\n\
wish to unroll.\n\
\n\
.. note::\n\
In this case, both this sequence and ``biases`` must have the\n\
same length. This is the sole requirement.\n\
\n\
Other checks are disabled as this is considered an *expert* API.\n\
If you plan to unroll the weights and biases on a\n\
:py:class:`xbob.learn.mlp.Machine`, notice that in a given\n\
``weights`` sequence, the number of outputs in layer ``k``\n\
must match the number of inputs on layer ``k+1`` and the\n\
number of biases on layer ``k``. In practice, you must assert\n\
that ``weights[k].shape[1] == weights[k+1].shape[0]`` and.\n\
that ``weights[k].shape[1] == bias[k].shape[0]``.\n\
\n\
biases, sequence of 1D 64-bit float arrays\n\
If you choose the second calling strategy, then pass a sequence of\n\
1D arrays of 64-bit floats representing the biases for the MLP you\n\
wish to unroll.\n\
\n\
.. note::\n\
In this case, both this sequence and ``biases`` must have the\n\
same length. This is the sole requirement.\n\
\n\
parameters, 1D 64-bit float array\n\
You may decide to pass the array in which the parameters will be\n\
placed using this variable. In this case, the size of the vector\n\
must match the total number of parameters available on the input\n\
machine or discrete weights and biases. If you decided to omit\n\
this parameter, then a vector with the appropriate size will be\n\
allocated internally and returned.\n\
\n\
You can use py:func:`number_of_parameters` to calculate the total\n\
length of the required ``parameters`` vector, in case you wish\n\
to supply it.\n\
\n\
");
PyObject* unroll(PyObject*, PyObject* args, PyObject* kwds);
PyDoc_STRVAR(s_roll_str, "roll");
PyDoc_STRVAR(s_roll_doc,
"roll(machine, parameters) -> parameters\n\
\n\
roll(weights, biases, parameters) -> parameters\n\
\n\
Roll the parameters (weights and biases) from a 64-bit float 1D array.\n\
\n\
This function will roll the MLP machine weights and biases from a\n\
single 1D array of 64-bit floats. This procedure is useful for adapting\n\
generic optimization procedures for the task of training MLPs.\n\
\n\
Keyword parameters:\n\
\n\
machine, :py:class:`xbob.learn.mlp.Machine`\n\
An MLP that will have its weights and biases rolled from a 1D array\n\
\n\
weights, sequence of 2D 64-bit float arrays\n\
If you choose the second calling strategy, then pass a sequence of\n\
2D arrays of 64-bit floats representing the weights for the MLP you\n\
wish to roll the parameters into using this argument.\n\
\n\
.. note::\n\
In this case, both this sequence and ``biases`` must have the\n\
same length. This is the sole requirement.\n\
\n\
Other checks are disabled as this is considered an *expert* API.\n\
If you plan to roll the weights and biases on a\n\
:py:class:`xbob.learn.mlp.Machine`, notice that in a given\n\
``weights`` sequence, the number of outputs in layer ``k``\n\
must match the number of inputs on layer ``k+1`` and the\n\
number of biases on layer ``k``. In practice, you must assert\n\
that ``weights[k].shape[1] == weights[k+1].shape[0]`` and.\n\
that ``weights[k].shape[1] == bias[k].shape[0]``.\n\
\n\
biases, sequence of 1D 64-bit float arrays\n\
If you choose the second calling strategy, then pass a sequence of\n\
1D arrays of 64-bit floats representing the biases for the MLP you\n\
wish to roll the parameters into.\n\
\n\
.. note::\n\
In this case, both this sequence and ``biases`` must have the\n\
same length. This is the sole requirement.\n\
\n\
parameters, 1D 64-bit float array\n\
You may decide to pass the array in which the parameters will be\n\
placed using this variable. In this case, the size of the vector\n\
must match the total number of parameters available on the input\n\
machine or discrete weights and biases. If you decided to omit\n\
this parameter, then a vector with the appropriate size will be\n\
allocated internally and returned.\n\
\n\
You can use py:func:`number_of_parameters` to calculate the total\n\
length of the required ``parameters`` vector, in case you wish\n\
to supply it.\n\
\n\
");
PyObject* roll(PyObject*, PyObject* args, PyObject* kwds);
PyDoc_STRVAR(s_number_of_parameters_str, "number_of_parameters");
PyDoc_STRVAR(s_number_of_parameters_doc,
"number_of_parameters(machine) -> scalar\n\
\n\
number_of_parameters(weights, biases) -> scalar\n\
\n\
Returns the total number of parameters in an MLP.\n\
\n\
Keyword parameters:\n\
\n\
machine, :py:class:`xbob.learn.mlp.Machine`\n\
Using the first call API, counts the total number of parameters in\n\
an MLP.\n\
\n\
weights, sequence of 2D 64-bit float arrays\n\
If you choose the second calling strategy, then pass a sequence of\n\
2D arrays of 64-bit floats representing the weights for the MLP you\n\
wish to count the parameters from.\n\
\n\
.. note::\n\
In this case, both this sequence and ``biases`` must have the\n\
same length. This is the sole requirement.\n\
\n\
Other checks are disabled as this is considered an *expert* API.\n\
If you plan to unroll the weights and biases on a\n\
:py:class:`xbob.learn.mlp.Machine`, notice that in a given\n\
``weights`` sequence the number of outputs in layer ``k``\n\
must match the number of inputs on layer ``k+1`` and the\n\
number of bias on layer ``k``. In practice, you must assert\n\
that ``weights[k].shape[1] == weights[k+1].shape[0]`` and.\n\
that ``weights[k].shape[1] == bias[k].shape[0]``.\n\
\n\
biases, sequence of 1D 64-bit float arrays\n\
If you choose the second calling strategy, then pass a sequence of\n\
1D arrays of 64-bit floats representing the biases for the MLP you\n\
wish to number_of_parameters the parameters into.\n\
\n\
.. note::\n\
In this case, both this sequence and ``biases`` must have the\n\
same length. This is the sole requirement.\n\
\n\
");
PyObject* number_of_parameters(PyObject*, PyObject* args, PyObject* kwds);
static PyMethodDef module_methods[] = {
{0} /* Sentinel */
{
s_unroll_str,
(PyCFunction)unroll,
METH_VARARGS|METH_KEYWORDS,
s_unroll_doc
},
{
s_roll_str,
(PyCFunction)roll,
METH_VARARGS|METH_KEYWORDS,
s_roll_doc
},
{
s_number_of_parameters_str,
(PyCFunction)number_of_parameters,
METH_VARARGS|METH_KEYWORDS,
s_number_of_parameters_doc
},
{0} /* Sentinel */
};
PyDoc_STRVAR(module_docstr, "bob's multi-layer perceptron machine and trainers");
......
/**
* @author Andre Anjos <andre.anjos@idiap.ch>
* @date Wed 21 May 12:08:40 2014 CEST
*
* @brief Bindings to roll/unroll
*/
#define XBOB_LEARN_MLP_MODULE
#include <xbob.learn.mlp/api.h>
#include <xbob.learn.mlp/roll.h>
#include <xbob.blitz/capi.h>
#include <xbob.blitz/cleanup.h>
#include "utils.h"
static PyObject* unroll_from_machine(PyObject* args, PyObject* kwds) {
/* Parses input arguments in a single shot */
static const char* const_kwlist[] = {"machine", "parameters", 0};
static char** kwlist = const_cast<char**>(const_kwlist);
PyObject* machine = 0;
PyBlitzArrayObject* parameters = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|O&", kwlist,
&PyBobLearnMLPMachine_Type, &machine,
&PyBlitzArray_OutputConverter, &parameters
)) return 0;
auto machine_ = reinterpret_cast<PyBobLearnMLPMachineObject*>(machine);
auto parameters_ = make_xsafe(parameters);
Py_ssize_t nb_parameters =
bob::learn::mlp::detail::getNbParameters(*machine_->cxx);
if (parameters) {
if (parameters->type_num != NPY_FLOAT64 ||
parameters->ndim != 1 ||
parameters->shape[0] != nb_parameters) {
PyErr_Format(PyExc_TypeError, "function only supports 1D 64-bit float arrays with shape (%" PY_FORMAT_SIZE_T "d,) for output array `parameters', but you passed a %" PY_FORMAT_SIZE_T"dD %s array with shape (%" PY_FORMAT_SIZE_T "d,)", nb_parameters, parameters->ndim, PyBlitzArray_TypenumAsString(parameters->type_num), parameters->shape[0]);
return 0;
}
}
else {
//allocate space for the parameters
parameters = (PyBlitzArrayObject*)PyBlitzArray_SimpleNew(NPY_FLOAT64, 1, &nb_parameters);
if (!parameters) return 0;
parameters_ = make_safe(parameters);
}
/** all basic checks are done, can execute the function now **/
try {
bob::learn::mlp::unroll(*machine_->cxx,
*PyBlitzArrayCxx_AsBlitz<double,1>(parameters));
}
catch (std::exception& e) {
PyErr_SetString(PyExc_RuntimeError, e.what());
return 0;
}
catch (...) {
PyErr_SetString(PyExc_RuntimeError, "cannot unroll machine parameters: unknown exception caught");
return 0;
}
Py_INCREF(parameters);
return PyBlitzArray_NUMPY_WRAP((PyObject*)parameters);
}
static PyObject* unroll_from_values(PyObject* args, PyObject* kwds) {
/* Parses input arguments in a single shot */
static const char* const_kwlist[] = {"weights", "biases", "parameters", 0};
static char** kwlist = const_cast<char**>(const_kwlist);
PyObject* weights = 0;
PyObject* biases = 0;
PyBlitzArrayObject* parameters = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|O&", kwlist,
&weights, &biases,
&PyBlitzArray_OutputConverter, &parameters
)) return 0;
auto parameters_ = make_xsafe(parameters);
//converts weights and biases
std::vector<blitz::Array<double,2>> weights_;
int status = convert_tuple<2>("unroll", "weights", weights, weights_);
if (status < 0) return 0;
std::vector<blitz::Array<double,1>> biases_;
status = convert_tuple<1>("unroll", "biases", biases, biases_);
if (status < 0) return 0;
//checks
if (weights_.size() != biases_.size()) {
PyErr_Format(PyExc_RuntimeError, "unroll, when applied to individual weights and biases, requires these iterables to have the same length but len(weights) = %" PY_FORMAT_SIZE_T"d != len(biases) = %" PY_FORMAT_SIZE_T "d", weights_.size(), biases_.size());
return 0;
}
/** we don't check to provide a fast path
for (Py_ssize_t k=0; k<weights_.size(); ++k) {
if (weights_[k].extent(1) != biases_[k].extent(0)) {
Py_ssize_t cols = weights_[k].extent(1);
Py_ssize_t elems = biases_[k].extent(0);
PyErr_Format(PyExc_RuntimeError, "unroll, when applied to individual weights and biases, requires these iterables to have the same length and that the number of columns in each weight matrix matches the number of biases for the same layer, but in layer %" PY_FORMAT_SIZE_T "d, the weight matrix has %" PY_FORMAT_SIZE_T "d columns and the bias vector has %" PY_FORMAT_SIZE_T "d elements", k, cols, elems);
return 0;
}
if (k < (weights_.size()-1)) {
if (weights_[k].extent(1) != weights_[k+1].extent(0)) {
Py_ssize_t cols = weights_[k].extent(1);
Py_ssize_t rows = weights_[k+1].extent(0);
PyErr_Format(PyExc_RuntimeError, "unroll, when applied to individual weights and biases, requires that weights of successive layers have matching number of inputs and outputs, but the weight matrix in layer %" PY_FORMAT_SIZE_T "d, has %" PY_FORMAT_SIZE_T "d columns (outputs) and in layer %" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d rows (inputs)", k, cols, k+1, rows);
return 0;
}
}
}
**/
Py_ssize_t nb_parameters =
bob::learn::mlp::detail::getNbParameters(weights_, biases_);
if (parameters) {
if (parameters->type_num != NPY_FLOAT64 ||
parameters->ndim != 1 ||
parameters->shape[0] != nb_parameters) {
PyErr_Format(PyExc_TypeError, "function only supports 1D 64-bit float arrays with shape (%" PY_FORMAT_SIZE_T "d,) for output array `parameters', but you passed a %" PY_FORMAT_SIZE_T"dD %s array with shape (%" PY_FORMAT_SIZE_T "d,)", nb_parameters, parameters->ndim, PyBlitzArray_TypenumAsString(parameters->type_num), parameters->shape[0]);
return 0;
}
}
else {
//allocate space for the parameters
parameters = (PyBlitzArrayObject*)PyBlitzArray_SimpleNew(NPY_FLOAT64, 1, &nb_parameters);
if (!parameters) return 0;
parameters_ = make_safe(parameters);
}
/** all basic checks are done, can execute the function now **/
try {
bob::learn::mlp::unroll(weights_, biases_,
*PyBlitzArrayCxx_AsBlitz<double,1>(parameters));
}
catch (std::exception& e) {
PyErr_SetString(PyExc_RuntimeError, e.what());
return 0;
}
catch (...) {
PyErr_SetString(PyExc_RuntimeError, "cannot unroll machine parameters: unknown exception caught");
return 0;
}
Py_INCREF(parameters);
return PyBlitzArray_NUMPY_WRAP((PyObject*)parameters);
}
PyObject* unroll(PyObject*, PyObject* args, PyObject* kwds) {
PyObject* arg = 0; ///< borrowed (don't delete)
if (PyTuple_Size(args)) arg = PyTuple_GET_ITEM(args, 0);
else {
PyObject* tmp = PyDict_Values(kwds);
auto tmp_ = make_safe(tmp);
arg = PyList_GET_ITEM(tmp, 0);
}
if (PyBobLearnMLPMachine_Check(arg)) return unroll_from_machine(args, kwds);
return unroll_from_values(args, kwds);
}
static PyObject* roll_to_machine(PyObject* args, PyObject* kwds) {
/* Parses input arguments in a single shot */
static const char* const_kwlist[] = {"machine", "parameters", 0};
static char** kwlist = const_cast<char**>(const_kwlist);
PyObject* machine = 0;
PyBlitzArrayObject* parameters = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O&", kwlist,
&PyBobLearnMLPMachine_Type, &machine,
&PyBlitzArray_Converter, &parameters
)) return 0;
auto machine_ = reinterpret_cast<PyBobLearnMLPMachineObject*>(machine);
auto parameters_ = make_safe(parameters);
Py_ssize_t nb_parameters =
bob::learn::mlp::detail::getNbParameters(*machine_->cxx);
if (parameters->type_num != NPY_FLOAT64 ||
parameters->ndim != 1 ||
parameters->shape[0] != nb_parameters) {
PyErr_Format(PyExc_TypeError, "function only supports 1D 64-bit float arrays with shape (%" PY_FORMAT_SIZE_T "d,) for input array `parameters', but you passed a %" PY_FORMAT_SIZE_T"dD %s array with shape (%" PY_FORMAT_SIZE_T "d,)", nb_parameters, parameters->ndim, PyBlitzArray_TypenumAsString(parameters->type_num), parameters->shape[0]);
return 0;
}
/** all basic checks are done, can execute the function now **/
try {
bob::learn::mlp::roll(*machine_->cxx,
*PyBlitzArrayCxx_AsBlitz<double,1>(parameters));
}
catch (std::exception& e) {
PyErr_SetString(PyExc_RuntimeError, e.what());
return 0;
}
catch (...) {
PyErr_SetString(PyExc_RuntimeError, "cannot roll machine parameters: unknown exception caught");
return 0;
}
Py_RETURN_NONE;
}
static PyObject* roll_to_values(PyObject* args, PyObject* kwds) {
/* Parses input arguments in a single shot */
static const char* const_kwlist[] = {"weights", "biases", "parameters", 0};
static char** kwlist = const_cast<char**>(const_kwlist);
PyObject* weights = 0;
PyObject* biases = 0;
PyBlitzArrayObject* parameters = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwds, "OOO&", kwlist,
&weights, &biases,
&PyBlitzArray_Converter, &parameters
)) return 0;
auto parameters_ = make_safe(parameters);
//converts weights and biases
std::vector<blitz::Array<double,2>> weights_;
int status = convert_tuple<2>("roll", "weights", weights, weights_);
if (status < 0) return 0;
std::vector<blitz::Array<double,1>> biases_;
status = convert_tuple<1>("roll", "biases", biases, biases_);
if (status < 0) return 0;
//checks
if (weights_.size() != biases_.size()) {
PyErr_Format(PyExc_RuntimeError, "roll, when applied to individual weights and biases, requires these iterables to have the same length but len(weights) = %" PY_FORMAT_SIZE_T"d != len(biases) = %" PY_FORMAT_SIZE_T "d", weights_.size(), biases_.size());
return 0;
}
/** we don't check to provide a fast path
for (Py_ssize_t k=0; k<weights_.size(); ++k) {
if (weights_[k].extent(1) != biases_[k].extent(0)) {
Py_ssize_t cols = weights_[k].extent(1);
Py_ssize_t elems = biases_[k].extent(0);
PyErr_Format(PyExc_RuntimeError, "roll, when applied to individual weights and biases, requires these iterables to have the same length and that the number of columns in each weight matrix matches the number of biases for the same layer, but in layer %" PY_FORMAT_SIZE_T "d, the weight matrix has %" PY_FORMAT_SIZE_T "d columns and the bias vector has %" PY_FORMAT_SIZE_T "d elements", k, cols, elems);
return 0;
}
if (k < (weights_.size()-1)) {
if (weights_[k].extent(1) != weights_[k+1].extent(0)) {
Py_ssize_t cols = weights_[k].extent(1);
Py_ssize_t rows = weights_[k+1].extent(0);
PyErr_Format(PyExc_RuntimeError, "roll, when applied to individual weights and biases, requires that weights of successive layers have matching number of inputs and outputs, but the weight matrix in layer %" PY_FORMAT_SIZE_T "d, has %" PY_FORMAT_SIZE_T "d columns (outputs) and in layer %" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d rows (inputs)", k, cols, k+1, rows);
return 0;
}
}
}
**/
Py_ssize_t nb_parameters =
bob::learn::mlp::detail::getNbParameters(weights_, biases_);
if (parameters->type_num != NPY_FLOAT64 ||
parameters->ndim != 1 ||
parameters->shape[0] != nb_parameters) {
PyErr_Format(PyExc_TypeError, "function only supports 1D 64-bit float arrays with shape (%" PY_FORMAT_SIZE_T "d,) for input array `parameters', but you passed a %" PY_FORMAT_SIZE_T"dD %s array with shape (%" PY_FORMAT_SIZE_T "d,)", nb_parameters, parameters->ndim, PyBlitzArray_TypenumAsString(parameters->type_num), parameters->shape[0]);
return 0;
}
/** all basic checks are done, can execute the function now **/