diff --git a/setup.py b/setup.py
index 69d1d14566b3b5039bc5ba95a3344aa3edf41e8c..99b18687b70e0d1533febbaaa858e4535ddd4f77 100644
--- a/setup.py
+++ b/setup.py
@@ -80,6 +80,8 @@ setup(
           "xbob/learn/misc/ndarray.cpp",
           "xbob/learn/misc/ndarray_numpy.cpp",
           "xbob/learn/misc/tinyvector.cpp",
+          "xbob/learn/misc/hdf5.cpp",
+          "xbob/learn/misc/random.cpp",
 
           "xbob/learn/misc/main.cpp",
         ],
diff --git a/xbob/learn/misc/data/data.hdf5 b/xbob/learn/misc/data/data.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..7c406233e19c49f5ab6e2c26d32257fc4e47e54f
Binary files /dev/null and b/xbob/learn/misc/data/data.hdf5 differ
diff --git a/xbob/learn/misc/data/dataNormalized.hdf5 b/xbob/learn/misc/data/dataNormalized.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..ac8d0302ebb312d35a8ee43c1c6195b899643733
Binary files /dev/null and b/xbob/learn/misc/data/dataNormalized.hdf5 differ
diff --git a/xbob/learn/misc/data/dataforMAP.hdf5 b/xbob/learn/misc/data/dataforMAP.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..9cd7bfe8533daab0a21ae20d342281ecf1afa977
Binary files /dev/null and b/xbob/learn/misc/data/dataforMAP.hdf5 differ
diff --git a/xbob/learn/misc/data/faithful.torch3.hdf5 b/xbob/learn/misc/data/faithful.torch3.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..a508318e6e8bcc528674ab7f9e3594f73ddb8367
Binary files /dev/null and b/xbob/learn/misc/data/faithful.torch3.hdf5 differ
diff --git a/xbob/learn/misc/data/faithful.torch3_f64.hdf5 b/xbob/learn/misc/data/faithful.torch3_f64.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..fe7f81b2bce427f6ab367cb6c7a2a6c1524e0528
Binary files /dev/null and b/xbob/learn/misc/data/faithful.torch3_f64.hdf5 differ
diff --git a/xbob/learn/misc/data/gmm.init_means.hdf5 b/xbob/learn/misc/data/gmm.init_means.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..0b88738f3883e9b20c8eea20e2b278bf364498b4
Binary files /dev/null and b/xbob/learn/misc/data/gmm.init_means.hdf5 differ
diff --git a/xbob/learn/misc/data/gmm.init_variances.hdf5 b/xbob/learn/misc/data/gmm.init_variances.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..d0687a2ffc6bab5ea8b111c60cd112730af9b758
Binary files /dev/null and b/xbob/learn/misc/data/gmm.init_variances.hdf5 differ
diff --git a/xbob/learn/misc/data/gmm.init_weights.hdf5 b/xbob/learn/misc/data/gmm.init_weights.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..558faa66b67f5deb0550d2543372667ff45f1e70
Binary files /dev/null and b/xbob/learn/misc/data/gmm.init_weights.hdf5 differ
diff --git a/xbob/learn/misc/data/gmm_MAP.hdf5 b/xbob/learn/misc/data/gmm_MAP.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..91c5e69141e3042ef5d211fc4098a8d59649d62d
Binary files /dev/null and b/xbob/learn/misc/data/gmm_MAP.hdf5 differ
diff --git a/xbob/learn/misc/data/gmm_ML.hdf5 b/xbob/learn/misc/data/gmm_ML.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..238cd7e14f5e4ab92e505221f200cdba368cb593
Binary files /dev/null and b/xbob/learn/misc/data/gmm_ML.hdf5 differ
diff --git a/xbob/learn/misc/data/gmm_ML_32bit_debug.hdf5 b/xbob/learn/misc/data/gmm_ML_32bit_debug.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..275381b7e7573e060009a15220f092cfa323a1eb
Binary files /dev/null and b/xbob/learn/misc/data/gmm_ML_32bit_debug.hdf5 differ
diff --git a/xbob/learn/misc/data/gmm_ML_32bit_release.hdf5 b/xbob/learn/misc/data/gmm_ML_32bit_release.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..438e9932cecf179d1b834e2f5c19d39a7c906cf3
Binary files /dev/null and b/xbob/learn/misc/data/gmm_ML_32bit_release.hdf5 differ
diff --git a/xbob/learn/misc/data/means.hdf5 b/xbob/learn/misc/data/means.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..060afde0fb2777065d02c85baf8a34ec1d509fea
Binary files /dev/null and b/xbob/learn/misc/data/means.hdf5 differ
diff --git a/xbob/learn/misc/data/meansAfterKMeans.hdf5 b/xbob/learn/misc/data/meansAfterKMeans.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..9552dd832998ee19062e4c0b28b335691af25269
Binary files /dev/null and b/xbob/learn/misc/data/meansAfterKMeans.hdf5 differ
diff --git a/xbob/learn/misc/data/meansAfterMAP.hdf5 b/xbob/learn/misc/data/meansAfterMAP.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..ac4cb9383d10c223b26d16d686910c430cf71197
Binary files /dev/null and b/xbob/learn/misc/data/meansAfterMAP.hdf5 differ
diff --git a/xbob/learn/misc/data/meansAfterML.hdf5 b/xbob/learn/misc/data/meansAfterML.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..857bbe39c29cbb76f18aed3798ce484ed2bcb67d
Binary files /dev/null and b/xbob/learn/misc/data/meansAfterML.hdf5 differ
diff --git a/xbob/learn/misc/data/new_adapted_mean.hdf5 b/xbob/learn/misc/data/new_adapted_mean.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..fc4a8ee30af0d8531302133b2bd2595df07139b8
Binary files /dev/null and b/xbob/learn/misc/data/new_adapted_mean.hdf5 differ
diff --git a/xbob/learn/misc/data/samplesFrom2G_f64.hdf5 b/xbob/learn/misc/data/samplesFrom2G_f64.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..9ef47fd649fde13d36a15a6ebde122c31047b31b
Binary files /dev/null and b/xbob/learn/misc/data/samplesFrom2G_f64.hdf5 differ
diff --git a/xbob/learn/misc/data/stats.hdf5 b/xbob/learn/misc/data/stats.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..c4a13700ec20079fdaacbd3841e8289910e9dd82
Binary files /dev/null and b/xbob/learn/misc/data/stats.hdf5 differ
diff --git a/xbob/learn/misc/data/variances.hdf5 b/xbob/learn/misc/data/variances.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..c9d6d17bcf73be3bb7800d14604a0201b16f4ada
Binary files /dev/null and b/xbob/learn/misc/data/variances.hdf5 differ
diff --git a/xbob/learn/misc/data/variancesAfterKMeans.hdf5 b/xbob/learn/misc/data/variancesAfterKMeans.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..2aee23c0ef021e383d34ef2ca47175ecf165a6e9
Binary files /dev/null and b/xbob/learn/misc/data/variancesAfterKMeans.hdf5 differ
diff --git a/xbob/learn/misc/data/variancesAfterMAP.hdf5 b/xbob/learn/misc/data/variancesAfterMAP.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..47bd4d5f823882eb7de61f3b67946c81acc0e82f
Binary files /dev/null and b/xbob/learn/misc/data/variancesAfterMAP.hdf5 differ
diff --git a/xbob/learn/misc/data/variancesAfterML.hdf5 b/xbob/learn/misc/data/variancesAfterML.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..472229290b53eb34728dac334a7addb635d314a0
Binary files /dev/null and b/xbob/learn/misc/data/variancesAfterML.hdf5 differ
diff --git a/xbob/learn/misc/data/weights.hdf5 b/xbob/learn/misc/data/weights.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..41b22801f28b4afc8b3a81daf5e594e85100f29f
Binary files /dev/null and b/xbob/learn/misc/data/weights.hdf5 differ
diff --git a/xbob/learn/misc/data/weightsAfterKMeans.hdf5 b/xbob/learn/misc/data/weightsAfterKMeans.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..b241207eac61c8f47dcb0fafed293748108ba6d8
Binary files /dev/null and b/xbob/learn/misc/data/weightsAfterKMeans.hdf5 differ
diff --git a/xbob/learn/misc/data/weightsAfterMAP.hdf5 b/xbob/learn/misc/data/weightsAfterMAP.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..b6e1f0497f91dfc26e137fc021f02431023db1a7
Binary files /dev/null and b/xbob/learn/misc/data/weightsAfterMAP.hdf5 differ
diff --git a/xbob/learn/misc/data/weightsAfterML.hdf5 b/xbob/learn/misc/data/weightsAfterML.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..0b3fc2551fc9d1eff310c7cd7c0a5e33d926f0e7
Binary files /dev/null and b/xbob/learn/misc/data/weightsAfterML.hdf5 differ
diff --git a/xbob/learn/misc/data/ztnorm_eval_eval.hdf5 b/xbob/learn/misc/data/ztnorm_eval_eval.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..bc5771861bd444b1ba3d89c6c949e91e912136cf
Binary files /dev/null and b/xbob/learn/misc/data/ztnorm_eval_eval.hdf5 differ
diff --git a/xbob/learn/misc/data/ztnorm_eval_tnorm.hdf5 b/xbob/learn/misc/data/ztnorm_eval_tnorm.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..d98b4d656bbe0a8c75a675ccc09834b30bc4bd00
Binary files /dev/null and b/xbob/learn/misc/data/ztnorm_eval_tnorm.hdf5 differ
diff --git a/xbob/learn/misc/data/ztnorm_result.hdf5 b/xbob/learn/misc/data/ztnorm_result.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..877c72c77fedb11fe7d39791823f42e58cce1e1c
Binary files /dev/null and b/xbob/learn/misc/data/ztnorm_result.hdf5 differ
diff --git a/xbob/learn/misc/data/ztnorm_znorm_eval.hdf5 b/xbob/learn/misc/data/ztnorm_znorm_eval.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..5d670ae0b3bd410c40e70b4697f0531fdc7bfb87
Binary files /dev/null and b/xbob/learn/misc/data/ztnorm_znorm_eval.hdf5 differ
diff --git a/xbob/learn/misc/data/ztnorm_znorm_tnorm.hdf5 b/xbob/learn/misc/data/ztnorm_znorm_tnorm.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..e2f709ed9a33a014f64e6c69f15c3e549dc7e3ca
Binary files /dev/null and b/xbob/learn/misc/data/ztnorm_znorm_tnorm.hdf5 differ
diff --git a/xbob/learn/misc/hdf5.cpp b/xbob/learn/misc/hdf5.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..10d06446e5316f39ad608602e71820d1a3a81f08
--- /dev/null
+++ b/xbob/learn/misc/hdf5.cpp
@@ -0,0 +1,709 @@
+/**
+ * @author Andre Anjos <andre.anjos@idiap.ch>
+ * @date Wed Jun 22 17:50:08 2011 +0200
+ *
+ * @brief Binds our C++ HDF5 interface to python
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "ndarray.h"
+#include <boost/format.hpp>
+#include <bob/io/HDF5File.h>
+
+using namespace boost::python;
+
+/**
+ * Returns a list of all paths inside a HDF5File
+ */
+static list hdf5file_paths(const bob::io::HDF5File& f, const bool relative) {
+  list retval;
+  std::vector<std::string> values;
+  f.paths(values, relative);
+  for (size_t i=0; i<values.size(); ++i) retval.append(str(values[i]));
+  return retval;
+}
+
+/**
+ * Returns a list of all sub-directories inside a HDF5File
+ */
+static list hdf5file_sub_groups(const bob::io::HDF5File& f, const bool relative, bool recursive) {
+  list retval;
+  std::vector<std::string> values;
+  f.sub_groups(values, relative, recursive);
+  for (size_t i=0; i<values.size(); ++i) retval.append(str(values[i]));
+  return retval;
+}
+
+/**
+ * Returns tuples for the description of all possible ways to read a certain
+ * path.
+ */
+static tuple hdf5file_describe(const bob::io::HDF5File& f, const std::string& p) {
+  const std::vector<bob::io::HDF5Descriptor>& dv = f.describe(p);
+  list retval;
+  for (size_t k=0; k<dv.size(); ++k) retval.append(dv[k]);
+  return tuple(retval);
+}
+
+/**
+ * Functionality to read from HDF5File's
+ */
+static object hdf5file_xread(bob::io::HDF5File& f, const std::string& p,
+    int descriptor, int pos) {
+
+  const std::vector<bob::io::HDF5Descriptor>& D = f.describe(p);
+
+  //last descriptor always contains the full readout.
+  const bob::io::HDF5Type& type = D[descriptor].type;
+  const bob::io::HDF5Shape& shape = type.shape();
+
+  if (shape.n() == 1 && shape[0] == 1) { //read as scalar
+    switch(type.type()) {
+      case bob::io::s:
+        return object(f.read<std::string>(p, pos));
+      case bob::io::b:
+        return object(f.read<bool>(p, pos));
+      case bob::io::i8:
+        return object(f.read<int8_t>(p, pos));
+      case bob::io::i16:
+        return object(f.read<int16_t>(p, pos));
+      case bob::io::i32:
+        return object(f.read<int32_t>(p, pos));
+      case bob::io::i64:
+        return object(f.read<int64_t>(p, pos));
+      case bob::io::u8:
+        return object(f.read<uint8_t>(p, pos));
+      case bob::io::u16:
+        return object(f.read<uint16_t>(p, pos));
+      case bob::io::u32:
+        return object(f.read<uint32_t>(p, pos));
+      case bob::io::u64:
+        return object(f.read<uint64_t>(p, pos));
+      case bob::io::f32:
+        return object(f.read<float>(p, pos));
+      case bob::io::f64:
+        return object(f.read<double>(p, pos));
+      case bob::io::f128:
+        return object(f.read<long double>(p, pos));
+      case bob::io::c64:
+        return object(f.read<std::complex<float> >(p, pos));
+      case bob::io::c128:
+        return object(f.read<std::complex<double> >(p, pos));
+      case bob::io::c256:
+        return object(f.read<std::complex<long double> >(p, pos));
+      default:
+        PYTHON_ERROR(TypeError, "unsupported HDF5 type: %s", type.str().c_str());
+    }
+  }
+
+  //read as an numpy array
+  bob::core::array::typeinfo atype;
+  type.copy_to(atype);
+  bob::python::py_array retval(atype);
+  f.read_buffer(p, pos, atype, retval.ptr());
+  return retval.pyobject();
+}
+
+static object hdf5file_lread(bob::io::HDF5File& f, const std::string& p,
+    int64_t pos=-1) {
+  if (pos >= 0) return hdf5file_xread(f, p, 0, pos);
+
+  //otherwise returns as a list
+  const std::vector<bob::io::HDF5Descriptor>& D = f.describe(p);
+  list retval;
+  for (uint64_t k=0; k<D[0].size; ++k)
+    retval.append(hdf5file_xread(f, p, 0, k));
+  return retval;
+}
+BOOST_PYTHON_FUNCTION_OVERLOADS(hdf5file_lread_overloads, hdf5file_lread, 2, 3)
+
+static inline object hdf5file_read(bob::io::HDF5File& f, const std::string& p) {
+  return hdf5file_xread(f, p, 1, 0);
+}
+
+void set_string_type(bob::io::HDF5Type& t, object o) {
+  t = bob::io::HDF5Type(extract<std::string>(o));
+}
+
+template <typename T> void set_type(bob::io::HDF5Type& t) {
+  T v;
+  t = bob::io::HDF5Type(v);
+}
+
+/**
+ * A function to check for python scalars that works with numpy-1.6.x
+ */
+static bool is_python_scalar(PyObject* obj) {
+  return (
+    PyBool_Check(obj) ||
+#if PY_VERSION_HEX < 0x03000000
+    PyString_Check(obj) ||
+#else
+    PyBytes_Check(obj) ||
+#endif
+    PyUnicode_Check(obj) ||
+#if PY_VERSION_HEX < 0x03000000
+    PyInt_Check(obj) ||
+#endif
+    PyLong_Check(obj) ||
+    PyFloat_Check(obj) ||
+    PyComplex_Check(obj)
+    );
+}
+
+/**
+ * Sets at 't', the type of the object 'o' according to our support types.
+ * Raise in case of problems. Furthermore, returns 'true' if the object is as
+ * simple scalar.
+ */
+static bool get_object_type(object o, bob::io::HDF5Type& t) {
+  PyObject* op = o.ptr();
+
+  if (PyArray_IsScalar(op, Generic) || is_python_scalar(op)) {
+    if (PyArray_IsScalar(op, String)) set_string_type(t, o);
+    else if (PyBool_Check(op)) set_type<bool>(t);
+#if PY_VERSION_HEX < 0x03000000
+    else if (PyString_Check(op)) set_string_type(t, o);
+#else
+    else if (PyBytes_Check(op)) set_string_type(t, o);
+#endif
+    else if (PyUnicode_Check(op)) set_string_type(t, o);
+#if PY_VERSION_HEX < 0x03000000
+    else if (PyInt_Check(op)) set_type<int32_t>(t);
+#endif
+    else if (PyLong_Check(op)) set_type<int64_t>(t);
+    else if (PyFloat_Check(op)) set_type<double>(t);
+    else if (PyComplex_Check(op)) set_type<std::complex<double> >(t);
+    else if (PyArray_IsScalar(op, Bool)) set_type<bool>(t);
+    else if (PyArray_IsScalar(op, Int8)) set_type<int8_t>(t);
+    else if (PyArray_IsScalar(op, UInt8)) set_type<uint8_t>(t);
+    else if (PyArray_IsScalar(op, Int16)) set_type<int16_t>(t);
+    else if (PyArray_IsScalar(op, UInt16)) set_type<uint16_t>(t);
+    else if (PyArray_IsScalar(op, Int32)) set_type<int32_t>(t);
+    else if (PyArray_IsScalar(op, UInt32)) set_type<uint32_t>(t);
+    else if (PyArray_IsScalar(op, Int64)) set_type<int64_t>(t);
+    else if (PyArray_IsScalar(op, UInt64)) set_type<uint64_t>(t);
+    else if (PyArray_IsScalar(op, Float)) set_type<float>(t);
+    else if (PyArray_IsScalar(op, Double)) set_type<double>(t);
+    else if (PyArray_IsScalar(op, LongDouble)) set_type<long double>(t);
+    else if (PyArray_IsScalar(op, CFloat)) set_type<std::complex<float> >(t);
+    else if (PyArray_IsScalar(op, CDouble)) set_type<std::complex<double> >(t);
+    else if (PyArray_IsScalar(op, CLongDouble)) set_type<std::complex<long double> >(t);
+    else {
+      str so(o);
+      std::string s = extract<std::string>(so);
+      PYTHON_ERROR(TypeError, "No support for HDF5 type conversion for scalar object '%s'", s.c_str());
+    }
+    return true;
+  }
+
+  else if (PyArray_Check(op)) {
+    bob::core::array::typeinfo ti;
+    bob::python::typeinfo_ndarray_(o, ti);
+    t = bob::io::HDF5Type(ti);
+    return false;
+  }
+
+  else {
+    //checks for convertibility to numpy.ndarray (not necessarily writeable,
+    //but has to be "behaved" = C-style contiguous).
+    bob::core::array::typeinfo ti;
+    if (bob::python::convertible(o, ti, false, true) != bob::python::IMPOSSIBLE) {
+      t = bob::io::HDF5Type(ti);
+      return false;
+    }
+  }
+
+  //if you get to this point, then this object is not supported
+  str so(o);
+  std::string printout = extract<std::string>(so);
+  PYTHON_ERROR(TypeError, "No support for HDF5 type conversion for object of unknown type %s", printout.c_str());
+}
+
+template <typename T>
+static void inner_replace_scalar(bob::io::HDF5File& f,
+  const std::string& path, object obj, size_t pos) {
+  T value = extract<T>(obj);
+  f.replace(path, pos, value);
+}
+
+static void inner_replace(bob::io::HDF5File& f, const std::string& path,
+    const bob::io::HDF5Type& type, object obj, size_t pos, bool scalar) {
+
+  //no error detection: this should be done before reaching this method
+
+  if (scalar) { //write as a scalar
+    switch(type.type()) {
+      case bob::io::s:
+        return inner_replace_scalar<std::string>(f, path, obj, pos);
+      case bob::io::b:
+        return inner_replace_scalar<bool>(f, path, obj, pos);
+      case bob::io::i8:
+        return inner_replace_scalar<int8_t>(f, path, obj, pos);
+      case bob::io::i16:
+        return inner_replace_scalar<int16_t>(f, path, obj, pos);
+      case bob::io::i32:
+        return inner_replace_scalar<int32_t>(f, path, obj, pos);
+      case bob::io::i64:
+        return inner_replace_scalar<int64_t>(f, path, obj, pos);
+      case bob::io::u8:
+        return inner_replace_scalar<uint8_t>(f, path, obj, pos);
+      case bob::io::u16:
+        return inner_replace_scalar<uint16_t>(f, path, obj, pos);
+      case bob::io::u32:
+        return inner_replace_scalar<uint32_t>(f, path, obj, pos);
+      case bob::io::u64:
+        return inner_replace_scalar<uint64_t>(f, path, obj, pos);
+      case bob::io::f32:
+        return inner_replace_scalar<float>(f, path, obj, pos);
+      case bob::io::f64:
+        return inner_replace_scalar<double>(f, path, obj, pos);
+      case bob::io::f128:
+        return inner_replace_scalar<long double>(f, path, obj, pos);
+      case bob::io::c64:
+        return inner_replace_scalar<std::complex<float> >(f, path, obj, pos);
+      case bob::io::c128:
+        return inner_replace_scalar<std::complex<double> >(f, path, obj, pos);
+      case bob::io::c256:
+        return inner_replace_scalar<std::complex<long double> >(f, path, obj, pos);
+      default:
+        break;
+    }
+  }
+
+  else { //write as an numpy array
+    bob::python::py_array tmp(obj, object());
+    f.write_buffer(path, pos, tmp.type(), tmp.ptr());
+  }
+}
+
+static void hdf5file_replace(bob::io::HDF5File& f, const std::string& path,
+    size_t pos, object obj) {
+  bob::io::HDF5Type type;
+  bool scalar = get_object_type(obj, type);
+  inner_replace(f, path, type, obj, pos, scalar);
+}
+
+template <typename T>
+static void inner_append_scalar(bob::io::HDF5File& f, const std::string& path,
+    object obj) {
+  T value = extract<T>(obj);
+  f.append(path, value);
+}
+
+static void inner_append(bob::io::HDF5File& f, const std::string& path,
+    const bob::io::HDF5Type& type, object obj, size_t compression, bool scalar) {
+
+  //no error detection: this should be done before reaching this method
+
+  if (scalar) { //write as a scalar
+    switch(type.type()) {
+      case bob::io::s:
+        return inner_append_scalar<std::string>(f, path, obj);
+      case bob::io::b:
+        return inner_append_scalar<bool>(f, path, obj);
+      case bob::io::i8:
+        return inner_append_scalar<int8_t>(f, path, obj);
+      case bob::io::i16:
+        return inner_append_scalar<int16_t>(f, path, obj);
+      case bob::io::i32:
+        return inner_append_scalar<int32_t>(f, path, obj);
+      case bob::io::i64:
+        return inner_append_scalar<int64_t>(f, path, obj);
+      case bob::io::u8:
+        return inner_append_scalar<uint8_t>(f, path, obj);
+      case bob::io::u16:
+        return inner_append_scalar<uint16_t>(f, path, obj);
+      case bob::io::u32:
+        return inner_append_scalar<uint32_t>(f, path, obj);
+      case bob::io::u64:
+        return inner_append_scalar<uint64_t>(f, path, obj);
+      case bob::io::f32:
+        return inner_append_scalar<float>(f, path, obj);
+      case bob::io::f64:
+        return inner_append_scalar<double>(f, path, obj);
+      case bob::io::f128:
+        return inner_append_scalar<long double>(f, path, obj);
+      case bob::io::c64:
+        return inner_append_scalar<std::complex<float> >(f, path, obj);
+      case bob::io::c128:
+        return inner_append_scalar<std::complex<double> >(f, path, obj);
+      case bob::io::c256:
+        return inner_append_scalar<std::complex<long double> >(f, path, obj);
+      default:
+        break;
+    }
+  }
+
+  else { //write as an numpy array
+    bob::python::py_array tmp(obj, object());
+    if (!f.contains(path)) f.create(path, tmp.type(), true, compression);
+    f.extend_buffer(path, tmp.type(), tmp.ptr());
+  }
+}
+
+static void hdf5file_append_iterable(bob::io::HDF5File& f, const std::string& path,
+  object iterable, size_t compression) {
+  for (int k=0; k<len(iterable); ++k) {
+    object obj = iterable[k];
+    bob::io::HDF5Type type;
+    bool scalar = get_object_type(obj, type);
+    inner_append(f, path, type, obj, compression, scalar);
+  }
+}
+
+static void hdf5file_append(bob::io::HDF5File& f, const std::string& path,
+    object obj, size_t compression=0) {
+  PyObject* op = obj.ptr();
+  if (PyList_Check(op) || PyTuple_Check(op)) {
+    hdf5file_append_iterable(f, path, obj, compression);
+  }
+  else {
+    bob::io::HDF5Type type;
+    bool scalar = get_object_type(obj, type);
+    inner_append(f, path, type, obj, compression, scalar);
+  }
+}
+
+BOOST_PYTHON_FUNCTION_OVERLOADS(hdf5file_append_overloads, hdf5file_append, 3, 4)
+
+template <typename T>
+static void inner_set_scalar(bob::io::HDF5File& f, const std::string& path,
+    object obj) {
+  T value = extract<T>(obj);
+  f.set(path, value);
+}
+
+static void inner_set(bob::io::HDF5File& f, const std::string& path,
+    const bob::io::HDF5Type& type, object obj, size_t compression, bool scalar) {
+
+  //no error detection: this should be done before reaching this method
+
+  if (scalar) { //write as a scalar
+    switch(type.type()) {
+      case bob::io::s:
+        return inner_set_scalar<std::string>(f, path, obj);
+      case bob::io::b:
+        return inner_set_scalar<bool>(f, path, obj);
+      case bob::io::i8:
+        return inner_set_scalar<int8_t>(f, path, obj);
+      case bob::io::i16:
+        return inner_set_scalar<int16_t>(f, path, obj);
+      case bob::io::i32:
+        return inner_set_scalar<int32_t>(f, path, obj);
+      case bob::io::i64:
+        return inner_set_scalar<int64_t>(f, path, obj);
+      case bob::io::u8:
+        return inner_set_scalar<uint8_t>(f, path, obj);
+      case bob::io::u16:
+        return inner_set_scalar<uint16_t>(f, path, obj);
+      case bob::io::u32:
+        return inner_set_scalar<uint32_t>(f, path, obj);
+      case bob::io::u64:
+        return inner_set_scalar<uint64_t>(f, path, obj);
+      case bob::io::f32:
+        return inner_set_scalar<float>(f, path, obj);
+      case bob::io::f64:
+        return inner_set_scalar<double>(f, path, obj);
+      case bob::io::f128:
+        return inner_set_scalar<long double>(f, path, obj);
+      case bob::io::c64:
+        return inner_set_scalar<std::complex<float> >(f, path, obj);
+      case bob::io::c128:
+        return inner_set_scalar<std::complex<double> >(f, path, obj);
+      case bob::io::c256:
+        return inner_set_scalar<std::complex<long double> >(f, path, obj);
+      default:
+        break;
+    }
+  }
+
+  else { //write as an numpy array
+    bob::python::py_array tmp(obj, object());
+    if (!f.contains(path)) f.create(path, tmp.type(), false, compression);
+    f.write_buffer(path, 0, tmp.type(), tmp.ptr());
+  }
+}
+
+static void hdf5file_set(bob::io::HDF5File& f, const std::string& path,
+    object obj, size_t compression=0) {
+  bob::io::HDF5Type type;
+  bool scalar = get_object_type(obj, type);
+  inner_set(f, path, type, obj, compression, scalar);
+}
+
+BOOST_PYTHON_FUNCTION_OVERLOADS(hdf5file_set_overloads, hdf5file_set, 3, 4)
+
+template <typename T>
+static object inner_get_scalar_attr(const bob::io::HDF5File& f,
+  const std::string& path, const std::string& name, const bob::io::HDF5Type& type) {
+  T value;
+  f.read_attribute(path, name, type, static_cast<void*>(&value));
+  return object(value);
+}
+
+template <>
+object inner_get_scalar_attr<std::string>(const bob::io::HDF5File& f,
+  const std::string& path, const std::string& name, const bob::io::HDF5Type&) {
+  std::string retval;
+  f.getAttribute(path, name, retval);
+  return object(retval);
+}
+
+static object inner_get_attr(const bob::io::HDF5File& f, const std::string& path,
+    const std::string& name, const bob::io::HDF5Type& type) {
+
+  //no error detection: this should be done before reaching this method
+
+  const bob::io::HDF5Shape& shape = type.shape();
+
+  if (type.type() == bob::io::s || (shape.n() == 1 && shape[0] == 1)) {
+    //read as scalar
+    switch(type.type()) {
+      case bob::io::s:
+        return inner_get_scalar_attr<std::string>(f, path, name, type);
+      case bob::io::b:
+        return inner_get_scalar_attr<bool>(f, path, name, type);
+      case bob::io::i8:
+        return inner_get_scalar_attr<int8_t>(f, path, name, type);
+      case bob::io::i16:
+        return inner_get_scalar_attr<int16_t>(f, path, name, type);
+      case bob::io::i32:
+        return inner_get_scalar_attr<int32_t>(f, path, name, type);
+      case bob::io::i64:
+        return inner_get_scalar_attr<int64_t>(f, path, name, type);
+      case bob::io::u8:
+        return inner_get_scalar_attr<uint8_t>(f, path, name, type);
+      case bob::io::u16:
+        return inner_get_scalar_attr<uint16_t>(f, path, name, type);
+      case bob::io::u32:
+        return inner_get_scalar_attr<uint32_t>(f, path, name, type);
+      case bob::io::u64:
+        return inner_get_scalar_attr<uint64_t>(f, path, name, type);
+      case bob::io::f32:
+        return inner_get_scalar_attr<float>(f, path, name, type);
+      case bob::io::f64:
+        return inner_get_scalar_attr<double>(f, path, name, type);
+      case bob::io::f128:
+        return inner_get_scalar_attr<long double>(f, path, name, type);
+      case bob::io::c64:
+        return inner_get_scalar_attr<std::complex<float> >(f, path, name, type);
+      case bob::io::c128:
+        return inner_get_scalar_attr<std::complex<double> >(f, path, name, type);
+      case bob::io::c256:
+        return inner_get_scalar_attr<std::complex<long double> >(f, path, name, type);
+      default:
+        break;
+    }
+  }
+
+  //read as an numpy array
+  bob::core::array::typeinfo atype;
+  type.copy_to(atype);
+  bob::python::py_array retval(atype);
+  f.read_attribute(path, name, type, retval.ptr());
+  return retval.pyobject();
+}
+
+static dict hdf5file_get_attributes(const bob::io::HDF5File& f, const std::string& path=".") {
+  std::map<std::string, bob::io::HDF5Type> attributes;
+  f.listAttributes(path, attributes);
+  dict retval;
+  for (std::map<std::string, bob::io::HDF5Type>::iterator k=attributes.begin(); k!=attributes.end(); ++k) {
+    if (k->second.type() == bob::io::unsupported) {
+      boost::format m("unsupported HDF5 data type detected for attribute '%s' - setting None");
+      m % k->first;
+      PYTHON_WARNING(UserWarning, m.str().c_str());
+      retval[k->first] = object(); //None
+    }
+    else {
+      retval[k->first] = inner_get_attr(f, path, k->first, k->second);
+    }
+  }
+  return retval;
+}
+
+BOOST_PYTHON_FUNCTION_OVERLOADS(hdf5file_get_attributes_overloads, hdf5file_get_attributes, 1, 2)
+
+static object hdf5file_get_attribute(const bob::io::HDF5File& f, const std::string& name, const std::string& path=".") {
+  bob::io::HDF5Type type;
+  f.getAttributeType(path, name, type);
+  if (type.type() == bob::io::unsupported) {
+    boost::format m("unsupported HDF5 data type detected for attribute '%s' - returning None");
+    m % name;
+    PYTHON_WARNING(UserWarning, m.str().c_str());
+    return object();
+  }
+  else {
+    return inner_get_attr(f, path, name, type);
+  }
+}
+
+BOOST_PYTHON_FUNCTION_OVERLOADS(hdf5file_get_attribute_overloads, hdf5file_get_attribute, 2, 3)
+
+template <typename T>
+static void inner_set_scalar_attr(bob::io::HDF5File& f,
+  const std::string& path, const std::string& name, const bob::io::HDF5Type& type,
+  object obj) {
+  T value = extract<T>(obj);
+  f.write_attribute(path, name, type, static_cast<void*>(&value));
+}
+
+template <>
+void inner_set_scalar_attr<std::string>(bob::io::HDF5File& f,
+  const std::string& path, const std::string& name, const bob::io::HDF5Type& type,
+  object obj) {
+  std::string value = extract<std::string>(obj);
+  f.write_attribute(path, name, type, static_cast<const void*>(value.c_str()));
+}
+
+static void inner_set_attr(bob::io::HDF5File& f, const std::string& path,
+    const std::string& name, const bob::io::HDF5Type& type, object obj,
+    bool scalar) {
+
+  //no error detection: this should be done before reaching this method
+
+  if (scalar) { //write as a scalar
+    switch(type.type()) {
+      case bob::io::s:
+        return inner_set_scalar_attr<std::string>(f, path, name, type, obj);
+      case bob::io::b:
+        return inner_set_scalar_attr<bool>(f, path, name, type, obj);
+      case bob::io::i8:
+        return inner_set_scalar_attr<int8_t>(f, path, name, type, obj);
+      case bob::io::i16:
+        return inner_set_scalar_attr<int16_t>(f, path, name, type, obj);
+      case bob::io::i32:
+        return inner_set_scalar_attr<int32_t>(f, path, name, type, obj);
+      case bob::io::i64:
+        return inner_set_scalar_attr<int64_t>(f, path, name, type, obj);
+      case bob::io::u8:
+        return inner_set_scalar_attr<uint8_t>(f, path, name, type, obj);
+      case bob::io::u16:
+        return inner_set_scalar_attr<uint16_t>(f, path, name, type, obj);
+      case bob::io::u32:
+        return inner_set_scalar_attr<uint32_t>(f, path, name, type, obj);
+      case bob::io::u64:
+        return inner_set_scalar_attr<uint64_t>(f, path, name, type, obj);
+      case bob::io::f32:
+        return inner_set_scalar_attr<float>(f, path, name, type, obj);
+      case bob::io::f64:
+        return inner_set_scalar_attr<double>(f, path, name, type, obj);
+      case bob::io::f128:
+        return inner_set_scalar_attr<long double>(f, path, name, type, obj);
+      case bob::io::c64:
+        return inner_set_scalar_attr<std::complex<float> >(f, path, name, type, obj);
+      case bob::io::c128:
+        return inner_set_scalar_attr<std::complex<double> >(f, path, name, type, obj);
+      case bob::io::c256:
+        return inner_set_scalar_attr<std::complex<long double> >(f, path, name, type, obj);
+      default:
+        break;
+    }
+  }
+
+  else { //write as an numpy array
+    bob::python::py_array retval(obj, object());
+    f.write_attribute(path, name, type, retval.ptr());
+  }
+}
+
+static void hdf5file_set_attributes(bob::io::HDF5File& f, dict attributes, const std::string& path=".") {
+  object keys = attributes.iterkeys();
+  for (int k=0; k<len(keys); ++k) {
+    std::string key = extract<std::string>(keys[k]);
+    bob::io::HDF5Type type;
+    object obj = attributes[keys[k]];
+    bool scalar = get_object_type(obj, type);
+    inner_set_attr(f, path, key, type, obj, scalar);
+  }
+}
+
+BOOST_PYTHON_FUNCTION_OVERLOADS(hdf5file_set_attributes_overloads, hdf5file_set_attributes, 2, 3)
+
+static void hdf5file_set_attribute(bob::io::HDF5File& f, const std::string& key, object obj, const std::string& path=".") {
+  bob::io::HDF5Type type;
+  bool scalar = get_object_type(obj, type);
+  inner_set_attr(f, path, key, type, obj, scalar);
+}
+
+BOOST_PYTHON_FUNCTION_OVERLOADS(hdf5file_set_attribute_overloads, hdf5file_set_attribute, 3, 4)
+
+static bool hdf5file_has_attribute(const bob::io::HDF5File& f, const std::string& name, const std::string& path=".") {
+  return f.hasAttribute(path, name);
+}
+
+BOOST_PYTHON_FUNCTION_OVERLOADS(hdf5file_has_attribute_overloads, hdf5file_has_attribute, 2, 3)
+
+static void hdf5file_del_attribute(bob::io::HDF5File& f, const std::string& name, const std::string& path=".") {
+  f.deleteAttribute(path, name);
+}
+
+BOOST_PYTHON_FUNCTION_OVERLOADS(hdf5file_del_attribute_overloads, hdf5file_del_attribute, 2, 3)
+
+static void hdf5file_del_attributes(bob::io::HDF5File& f, const std::string& path=".") {
+  std::map<std::string, bob::io::HDF5Type> attributes;
+  f.listAttributes(path, attributes);
+  for (std::map<std::string, bob::io::HDF5Type>::iterator k=attributes.begin(); k!=attributes.end(); ++k) {
+    f.deleteAttribute(path, k->first);
+  }
+}
+
+BOOST_PYTHON_FUNCTION_OVERLOADS(hdf5file_del_attributes_overloads, hdf5file_del_attributes, 1, 2)
+
+void bind_io_hdf5() {
+  class_<bob::io::HDF5File, boost::shared_ptr<bob::io::HDF5File> >("HDF5File", "A HDF5File allows users to read and write data from and to files containing standard bob binary coded data in HDF5 format. For an introduction to HDF5, please visit http://www.hdfgroup.org/HDF5.", no_init)
+    .def(init<const bob::io::HDF5File&> ((arg("self"), arg("other")), "Generates a shallow copy of the already opened file."))
+    .def(init<const std::string&, const char> ((arg("self"), arg("filename"), arg("openmode_string")='r'), "Opens a new file in one of these supported modes: 'r' (read-only), 'a' (read/write/append), 'w' (read/write/truncate) or 'x' (read/write/exclusive)"))
+    .def("cd", &bob::io::HDF5File::cd, (arg("self"), arg("path")), "Changes the current prefix path. When this object is started, the prefix path is empty, which means all following paths to data objects should be given using the full path. If you set this to a different value, it will be used as a prefix to any subsequent operation until you reset it. If path starts with '/', it is treated as an absolute path. '..' and '.' are supported. This object should be a std::string. If the value is relative, it is added to the current path. If it is absolute, it causes the prefix to be reset. Note all operations taking a relative path, following a cd(), will be considered relative to the value defined by the 'cwd' property of this object.")
+    .def("has_group", &bob::io::HDF5File::hasGroup, (arg("self"), arg("path")), "Checks if a path exists inside a file - does not work for datasets, only for directories. If the given path is relative, it is take w.r.t. to the current working directory")
+    .def("create_group", &bob::io::HDF5File::createGroup, (arg("self"), arg("path")), "Creates a new directory inside the file. A relative path is taken w.r.t. to the current directory. If the directory already exists (check it with hasGroup()), an exception will be raised.")
+    .add_property("cwd", &bob::io::HDF5File::cwd)
+    .def("__contains__", &bob::io::HDF5File::contains, (arg("self"), arg("key")), "Returns True if the file contains an HDF5 dataset with a given path")
+    .def("has_key", &bob::io::HDF5File::contains, (arg("self"), arg("key")), "Returns True if the file contains an HDF5 dataset with a given path")
+    .def("describe", &hdf5file_describe, (arg("self"), arg("key")), "If a given path to an HDF5 dataset exists inside the file, return a type description of objects recorded in such a dataset, otherwise, raises an exception. The returned value type is a tuple of tuples (HDF5Type, number-of-objects, expandible) describing the capabilities if the file is read using theses formats.")
+    .def("unlink", &bob::io::HDF5File::unlink, (arg("self"), arg("key")), "If a given path to an HDF5 dataset exists inside the file, unlinks it. Please note this will note remove the data from the file, just make it inaccessible. If you wish to cleanup, save the reacheable objects from this file to another HDF5File object using copy(), for example.")
+    .def("rename", &bob::io::HDF5File::rename, (arg("self"), arg("from"), arg("to")), "If a given path to an HDF5 dataset exists in the file, rename it")
+    .def("keys", &hdf5file_paths, (arg("self"), arg("relative") = false), "Synonym for 'paths'")
+    .def("paths", &hdf5file_paths, (arg("self"), arg("relative") = false), "Returns all paths to datasets available inside this file, stored under the current working directory. If relative is set to True, the returned paths are relative to the current working directory, otherwise they are asbolute.")
+    .def("sub_groups", &hdf5file_sub_groups, (arg("self"), arg("relative") = false, arg("recursive") = true), "Returns all the subgroups (sub-directories) in the current file.")
+    .def("copy", &bob::io::HDF5File::copy, (arg("self"), arg("file")), "Copies all accessible content to another HDF5 file")
+    .def("read", &hdf5file_read, (arg("self"), arg("key")), "Reads the whole dataset in a single shot. Returns a single object with all contents.")
+    .def("lread", (object(*)(bob::io::HDF5File&, const std::string&, int64_t))0, hdf5file_lread_overloads((arg("self"), arg("key"), arg("pos")=-1), "Reads a given position from the dataset. Returns a single object if 'pos' >= 0, otherwise a list by reading all objects in sequence."))
+    .def("replace", &hdf5file_replace, (arg("self"), arg("path"), arg("pos"), arg("data")), "Modifies the value of a scalar/array inside a dataset in the file.\n\n" \
+  "Keyword Parameters:\n\n" \
+  "path\n" \
+  "  This is the path to the HDF5 dataset to replace data at\n\n" \
+  "pos\n" \
+  "  This is the position we should replace\n\n" \
+  "data\n" \
+  "  This is the data that will be set on the position indicated")
+    .def("append", &hdf5file_append, hdf5file_append_overloads((arg("self"), arg("path"), arg("data"), arg("compression")=0), "Appends a scalar or an array to a dataset. If the dataset does not yet exist, one is created with the type characteristics.\n\n" \
+  "Keyword Parameters:\n\n" \
+  "path\n" \
+  "  This is the path to the HDF5 dataset to replace data at\n\n" \
+  "data\n" \
+  "  This is the data that will be set on the position indicated. It may be a simple python or numpy scalar (such as :py:class:`numpy.uint8`) or a :py:class:`numpy.ndarray` of any of the supported data types. You can also, optionally, set this to a list or tuple of scalars or arrays. This will cause this method to iterate over the elements and add each individually.\n\n" \
+  "compression\n" \
+  "  This parameter is effective when appending arrays. Set this to a number betwen 0 (default) and 9 (maximum) to compress the contents of this dataset. This setting is only effective if the dataset does not yet exist, otherwise, the previous setting is respected."))
+    .def("set", &hdf5file_set, hdf5file_set_overloads((arg("self"), arg("path"), arg("data"), arg("compression")=0), "Sets the scalar or array at position 0 to the given value. This method is equivalent to checking if the scalar or array at position 0 exists and then replacing it. If the path does not exist, we append the new scalar or array.\n\n" \
+  "Keyword Parameters:\n\n" \
+  "path\n" \
+  "  This is the path to the HDF5 dataset to replace data at\n\n" \
+  "data\n" \
+  "  This is the data that will be set on the position indicated. It may be a simple python or numpy scalar (such as :py:class:`numpy.uint8`) or a :py:class:`numpy.ndarray` of any of the supported data types. You can also, optionally, set this to an iterable of scalars or arrays. This will cause this method to collapse the whole iterable into a :py:class:`numpy.ndarray` and set that into the file.\n\n" \
+  "compression\n" \
+  "  This parameter is effective when setting arrays. Set this to a number betwen 0 (default) and 9 (maximum) to compress the contents of this dataset. This setting is only effective if the dataset does not yet exist, otherwise, the previous setting is respected."))
+    // attribute manipulation
+    .def("get_attributes", &hdf5file_get_attributes, hdf5file_get_attributes_overloads((arg("self"), arg("path")="."), "Returns a dictionary containing all attributes related to a particular (existing) path in this file. The path may point to a subdirectory or to a particular dataset. If the path does not exist, a RuntimeError is raised."))
+    .def("get_attribute", &hdf5file_get_attribute, hdf5file_get_attribute_overloads((arg("self"), arg("name"), arg("path")="."), "Returns an object representing an attribute attached to a particular (existing) path in this file. The path may point to a subdirectory or to a particular dataset. If the path does not exist, a RuntimeError is raised."))
+    .def("set_attributes", &hdf5file_set_attributes, hdf5file_set_attributes_overloads((arg("self"), arg("attrs"), arg("path")="."), "Sets attributes in a given (existing) path using a dictionary containing the names (keys) and values of those attributes. The path may point to a subdirectory or to a particular dataset. Only simple scalars (booleans, integers, floats and complex numbers) and arrays of those are supported at the time being. You can use :py:mod:`numpy` scalars to set values with arbitrary precision (e.g. :py:class:`numpy.uint8`). If the path does not exist, a RuntimeError is raised."))
+    .def("set_attribute", &hdf5file_set_attribute, hdf5file_set_attribute_overloads((arg("self"), arg("name"), arg("value"), arg("path")="."), "Sets the attribute in a given (existing) path using the value provided. The path may point to a subdirectory or to a particular dataset. Only simple scalars (booleans, integers, floats and complex numbers) and arrays of those are supported at the time being. You can use :py:mod:`numpy` scalars to set values with arbitrary precision (e.g. :py:class:`numpy.uint8`). If the path does not exist, a RuntimeError is raised."))
+    .def("has_attribute", &hdf5file_has_attribute, hdf5file_has_attribute_overloads((arg("self"), arg("name"), arg("path")="."), "Checks if given attribute exists in a given (existing) path. The path may point to a subdirectory or to a particular dataset. If the path does not exist, a RuntimeError is raised."))
+    .def("delete_attribute", &hdf5file_del_attribute, hdf5file_del_attribute_overloads((arg("self"), arg("name"), arg("path")="."), "Deletes a given attribute associated to a (existing) path in the file. The path may point to a subdirectory or to a particular dataset. If the path does not exist, a RuntimeError is raised."))
+    .def("delete_attributes", &hdf5file_del_attributes, hdf5file_del_attributes_overloads((arg("self"), arg("path")="."), "Deletes **all** attributes associated to a (existing) path in the file. The path may point to a subdirectory or to a particular dataset. If the path does not exist, a RuntimeError is raised."))
+
+    .add_property("filename", &bob::io::HDF5File::filename, "The name of the underlying file.")
+    ;
+}
diff --git a/xbob/learn/misc/main.cpp b/xbob/learn/misc/main.cpp
index 6058c6581190892c04532aac6b41abbbd17a8116..21f2599bdf137c6a7b66851b97c01895aec51542 100644
--- a/xbob/learn/misc/main.cpp
+++ b/xbob/learn/misc/main.cpp
@@ -13,7 +13,9 @@
 void bind_core_tinyvector();
 void bind_core_ndarray_numpy();
 void bind_core_bz_numpy();
+void bind_core_random();
 void bind_ip_gabor_wavelet_transform();
+void bind_io_hdf5();
 
 /** machine bindings **/
 void bind_machine_base();
@@ -50,6 +52,8 @@ BOOST_PYTHON_MODULE(_library) {
   bind_core_ndarray_numpy();
   bind_core_bz_numpy();
   bind_ip_gabor_wavelet_transform();
+  bind_io_hdf5();
+  bind_core_random();
 
   /** machine bindings **/
   bind_machine_base();
diff --git a/xbob/learn/misc/random.cpp b/xbob/learn/misc/random.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..35ada5103f2c0bdf3671ca9256b32c7b39057743
--- /dev/null
+++ b/xbob/learn/misc/random.cpp
@@ -0,0 +1,39 @@
+/**
+ * @author Andre Anjos <andre.anjos@idiap.ch>
+ * @author Laurent El Shafey <laurent.el-shafey@idiap.ch>
+ * @date Mon Jul 11 18:31:22 2011 +0200
+ *
+ * @brief Bindings for random number generation.
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "ndarray.h"
+#include <boost/make_shared.hpp>
+#include <boost/random.hpp>
+
+using namespace boost::python;
+
+template <typename T>
+static boost::shared_ptr<boost::mt19937> make_with_seed(T s) {
+  return boost::make_shared<boost::mt19937>(s);
+}
+
+template <typename T>
+static void set_seed(boost::mt19937& o, T s) {
+  o.seed(s);
+}
+
+void bind_core_random () {
+  class_<boost::mt19937, boost::shared_ptr<boost::mt19937> >("mt19937",
+      "A Mersenne-Twister Random Number Generator (RNG)\n" \
+      "\n" \
+      "A Random Number Generator (RNG) based on the work 'Mersenne Twister: A 623-dimensionally equidistributed uniform pseudo-random number generator, Makoto Matsumoto and Takuji Nishimura, ACM Transactions on Modeling and Computer Simulation: Special Issue on Uniform Random Number Generation, Vol. 8, No. 1, January 1998, pp. 3-30'", init<>((arg("self")), "Default constructor"))
+    .def("__init__", make_constructor(&make_with_seed<int64_t>, default_call_policies(), (arg("seed"))), "Builds a new generator with a specific seed")
+    .def("__init__", make_constructor(&make_with_seed<double>, default_call_policies(), (arg("seed"))), "Builds a new generator with a specific seed")
+    .def("seed", &set_seed<double>, (arg("self"), arg("seed")), "Sets my internal seed using a floating-point number")
+    .def("seed", &set_seed<int64_t>, (arg("self"), arg("seed")), "Sets my internal seed using an integer")
+    .def(self == self)
+    .def(self != self)
+    ;
+}
diff --git a/xbob/learn/misc/test_bic.py b/xbob/learn/misc/test_bic.py
index cad8fb742c82a04ae365130b0799e089dd361c7a..d0cdff3dc98bacd94d5be07604a4158841dc0f85 100644
--- a/xbob/learn/misc/test_bic.py
+++ b/xbob/learn/misc/test_bic.py
@@ -9,81 +9,88 @@
 """
 
 import numpy
+import nose.tools
 from . import BICMachine, BICTrainer
 
+eps = 1e-5
+
 def equals(x, y, epsilon):
   return (abs(x - y) < epsilon).all()
 
-class BICTrainerAndMachineTest(unittest.TestCase):
-  """Performs various BIC trainer and machine tests."""
-
-  def training_data(self):
-    data = numpy.array([
-      (10., 4., 6., 8., 2.),
-      (8., 2., 4., 6., 0.),
-      (12., 6., 8., 10., 4.),
-      (11., 3., 7., 7., 3.),
-      (9., 5., 5., 9., 1.)], dtype='float64')
-
-    return data, -1. * data
-
-  def eval_data(self, which):
-    eval_data = numpy.ndarray((5,), dtype=numpy.float64)
-    if which == 0:
-      eval_data.fill(0.)
-    elif which == 1:
-      eval_data.fill(10.)
-
-    return eval_data
-
-  def test_IEC(self):
-    # Tests the IEC training of the BICTrainer
-    intra_data, extra_data = self.training_data()
-
-    # train BIC machine
-    machine = BICMachine()
-    trainer = BICTrainer()
-
-    # train machine with intrapersonal data only
-    trainer.train(machine, intra_data, intra_data)
-    # => every result should be zero
-    self.assertAlmostEqual(machine(self.eval_data(0)), 0.)
-    self.assertAlmostEqual(machine(self.eval_data(1)), 0.)
-
-    # re-train the machine with intra- and extrapersonal data
-    trainer.train(machine, intra_data, extra_data)
-    # now, only the input vector 0 should give log-likelihood 0
-    self.assertAlmostEqual(machine(self.eval_data(0)), 0.)
-    # while a positive vector should give a positive result
-    self.assertTrue(machine(self.eval_data(1)) > 0.)
-
-  def test_BIC(self):
-    # Tests the BIC training of the BICTrainer
-    intra_data, extra_data = self.training_data()
-
-    # train BIC machine
-    trainer = BICTrainer(2,2)
-
-    # The data are chosen such that the third eigenvalue is zero.
-    # Hence, calculating rho (i.e., using the Distance From Feature Space) is impossible
-    machine = BICMachine(True)
-    def should_raise():
-      trainer.train(machine, intra_data, intra_data)
-    self.assertRaises(RuntimeError, should_raise)
-
-    # So, now without rho...
-    machine = BICMachine(False)
-
-    # First, train the machine with intrapersonal data only
-    trainer.train(machine, intra_data, intra_data)
-
-    # => every result should be zero
-    self.assertAlmostEqual(machine(self.eval_data(0)), 0.)
-    self.assertAlmostEqual(machine(self.eval_data(1)), 0.)
-
-    # re-train the machine with intra- and extrapersonal data
-    trainer.train(machine, intra_data, extra_data)
-    # now, only the input vector 0 should give log-likelihood 0
-    self.assertAlmostEqual(machine(self.eval_data(0)), 0.)
-    # while a positive vector should give a positive result
-    self.assertTrue(machine(self.eval_data(1)) > 0.)
+def training_data():
+  data = numpy.array([
+    (10., 4., 6., 8., 2.),
+    (8., 2., 4., 6., 0.),
+    (12., 6., 8., 10., 4.),
+    (11., 3., 7., 7., 3.),
+    (9., 5., 5., 9., 1.)], dtype='float64')
+
+  return data, -1. * data
+
+def eval_data(which):
+  eval_data = numpy.ndarray((5,), dtype=numpy.float64)
+  if which == 0:
+    eval_data.fill(0.)
+  elif which == 1:
+    eval_data.fill(10.)
+
+  return eval_data
+
+def test_IEC():
+  # Tests the IEC training of the BICTrainer
+  intra_data, extra_data = training_data()
+
+  # train BIC machine
+  machine = BICMachine()
+  trainer = BICTrainer()
+
+  # train machine with intrapersonal data only
+  trainer.train(machine, intra_data, intra_data)
+  # => every result should be zero
+  assert abs(machine(eval_data(0))) < eps
+  assert abs(machine(eval_data(1))) < eps
+
+  # re-train the machine with intra- and extrapersonal data
+  trainer.train(machine, intra_data, extra_data)
+  # now, only the input vector 0 should give log-likelihood 0
+  assert abs(machine(eval_data(0))) < eps
+  # while a positive vector should give a positive result
+  assert machine(eval_data(1)) > 0.
+
+@nose.tools.raises(RuntimeError)
+def test_raises():
+
+  # Tests the BIC training of the BICTrainer
+  intra_data, extra_data = training_data()
+
+  # train BIC machine
+  trainer = BICTrainer(2,2)
+
+  # The data are chosen such that the third eigenvalue is zero.
+  # Hence, calculating rho (i.e., using the Distance From Feature Space) is impossible
+  machine = BICMachine(True)
+  trainer.train(machine, intra_data, intra_data)
+
+def test_BIC():
+  # Tests the BIC training of the BICTrainer
+  intra_data, extra_data = training_data()
+
+  # train BIC machine
+  trainer = BICTrainer(2,2)
+
+  # So, now without rho...
+  machine = BICMachine(False)
+
+  # First, train the machine with intrapersonal data only
+  trainer.train(machine, intra_data, intra_data)
+
+  # => every result should be zero
+  assert abs(machine(eval_data(0))) < eps
+  assert abs(machine(eval_data(1))) < eps
+
+  # re-train the machine with intra- and extrapersonal data
+  trainer.train(machine, intra_data, extra_data)
+  # now, only the input vector 0 should give log-likelihood 0
+  assert abs(machine(eval_data(0))) < eps
+  # while a positive vector should give a positive result
+  assert machine(eval_data(1)) > 0.
diff --git a/xbob/learn/misc/test_em.py b/xbob/learn/misc/test_em.py
index eca6be6b416906474aad993611f35e8938a12861..34e15f271c3519a742ad28561069cea3aec2cff8 100644
--- a/xbob/learn/misc/test_em.py
+++ b/xbob/learn/misc/test_em.py
@@ -13,10 +13,13 @@ import numpy
 import xbob.io.base
 from xbob.io.base.test_utils import datafile
 
-from . import KMeansMachine, GMMMachine
+from . import KMeansMachine, GMMMachine, KMeansTrainer, \
+    ML_GMMTrainer, MAP_GMMTrainer
+
+from . import HDF5File as OldHDF5File
 
 def loadGMM():
-  gmm = bob.machine.GMMMachine(2, 2)
+  gmm = GMMMachine(2, 2)
 
   gmm.weights = xbob.io.base.load(datafile('gmm.init_weights.hdf5', __name__))
   gmm.means = xbob.io.base.load(datafile('gmm.init_means.hdf5', __name__))
@@ -28,10 +31,11 @@ def loadGMM():
 def equals(x, y, epsilon):
   return (abs(x - y) < epsilon).all()
 
-class MyTrainer1(bob.trainer.KMeansTrainer):
+class MyTrainer1(KMeansTrainer):
   """Simple example of python trainer: """
-  def __init__():
-    bob.trainer.KMeansTrainer.__init__()
+
+  def __init__(self):
+    KMeansTrainer.__init__(self)
 
   def train(self, machine, data):
     a = numpy.ndarray((2, 2), 'float64')
@@ -47,15 +51,15 @@ def test_gmm_ML_1():
 
   gmm = loadGMM()
 
-  ml_gmmtrainer = bob.trainer.ML_GMMTrainer(True, True, True)
+  ml_gmmtrainer = ML_GMMTrainer(True, True, True)
   ml_gmmtrainer.train(gmm, ar)
 
-  #config = xbob.io.base.HDF5File(datafile('gmm_ML.hdf5", __name__), 'w')
+  #config = OldHDF5File(datafile('gmm_ML.hdf5", __name__), 'w')
   #gmm.save(config)
 
-  gmm_ref = bob.machine.GMMMachine(xbob.io.base.HDF5File(datafile('gmm_ML.hdf5', __name__)))
-  gmm_ref_32bit_debug = bob.machine.GMMMachine(xbob.io.base.HDF5File(datafile('gmm_ML_32bit_debug.hdf5', __name__)))
-  gmm_ref_32bit_release = bob.machine.GMMMachine(xbob.io.base.HDF5File(datafile('gmm_ML_32bit_release.hdf5', __name__)))
+  gmm_ref = GMMMachine(OldHDF5File(datafile('gmm_ML.hdf5', __name__)))
+  gmm_ref_32bit_debug = GMMMachine(OldHDF5File(datafile('gmm_ML_32bit_debug.hdf5', __name__)))
+  gmm_ref_32bit_release = GMMMachine(OldHDF5File(datafile('gmm_ML_32bit_release.hdf5', __name__)))
 
   assert (gmm == gmm_ref) or (gmm == gmm_ref_32bit_release) or (gmm == gmm_ref_32bit_debug)
 
@@ -66,7 +70,7 @@ def test_gmm_ML_2():
   ar = xbob.io.base.load(datafile('dataNormalized.hdf5', __name__))
 
   # Initialize GMMMachine
-  gmm = bob.machine.GMMMachine(5, 45)
+  gmm = GMMMachine(5, 45)
   gmm.means = xbob.io.base.load(datafile('meansAfterKMeans.hdf5', __name__)).astype('float64')
   gmm.variances = xbob.io.base.load(datafile('variancesAfterKMeans.hdf5', __name__)).astype('float64')
   gmm.weights = numpy.exp(xbob.io.base.load(datafile('weightsAfterKMeans.hdf5', __name__)).astype('float64'))
@@ -78,7 +82,7 @@ def test_gmm_ML_2():
   prior = 0.001
   max_iter_gmm = 25
   accuracy = 0.00001
-  ml_gmmtrainer = bob.trainer.ML_GMMTrainer(True, True, True, prior)
+  ml_gmmtrainer = ML_GMMTrainer(True, True, True, prior)
   ml_gmmtrainer.max_iterations = max_iter_gmm
   ml_gmmtrainer.convergence_threshold = accuracy
 
@@ -102,18 +106,18 @@ def test_gmm_MAP_1():
 
   ar = xbob.io.base.load(datafile('faithful.torch3_f64.hdf5', __name__))
 
-  gmm = bob.machine.GMMMachine(xbob.io.base.HDF5File(datafile("gmm_ML.hdf5", __name__)))
-  gmmprior = bob.machine.GMMMachine(xbob.io.base.HDF5File(datafile("gmm_ML.hdf5", __name__)))
+  gmm = GMMMachine(OldHDF5File(datafile("gmm_ML.hdf5", __name__)))
+  gmmprior = GMMMachine(OldHDF5File(datafile("gmm_ML.hdf5", __name__)))
 
-  map_gmmtrainer = bob.trainer.MAP_GMMTrainer(16)
+  map_gmmtrainer = MAP_GMMTrainer(16)
   map_gmmtrainer.set_prior_gmm(gmmprior)
   map_gmmtrainer.train(gmm, ar)
 
-  #config = xbob.io.base.HDF5File(datafile('gmm_MAP.hdf5", 'w', __name__))
+  #config = OldHDF5File(datafile('gmm_MAP.hdf5", 'w', __name__))
   #gmm.save(config)
 
-  gmm_ref = bob.machine.GMMMachine(xbob.io.base.HDF5File(datafile('gmm_MAP.hdf5', __name__)))
-  #gmm_ref_32bit_release = bob.machine.GMMMachine(xbob.io.base.HDF5File(datafile('gmm_MAP_32bit_release.hdf5', __name__)))
+  gmm_ref = GMMMachine(OldHDF5File(datafile('gmm_MAP.hdf5', __name__)))
+  #gmm_ref_32bit_release = GMMMachine(OldHDF5File(datafile('gmm_MAP_32bit_release.hdf5', __name__)))
 
   assert (equals(gmm.means,gmm_ref.means,1e-3) and equals(gmm.variances,gmm_ref.variances,1e-3) and equals(gmm.weights,gmm_ref.weights,1e-3))
 
@@ -121,21 +125,21 @@ def test_gmm_MAP_2():
 
   # Train a GMMMachine with MAP_GMMTrainer and compare with matlab reference
 
-  map_adapt = bob.trainer.MAP_GMMTrainer(4., True, False, False, 0.)
-  data = xbob.io.base.load(datafile('data.hdf5', 'machine', __name__))
+  map_adapt = MAP_GMMTrainer(4., True, False, False, 0.)
+  data = xbob.io.base.load(datafile('data.hdf5', __name__))
   data = data.reshape((1, data.shape[0])) # make a 2D array out of it
-  means = xbob.io.base.load(datafile('means.hdf5', 'machine', __name__))
-  variances = xbob.io.base.load(datafile('variances.hdf5', 'machine', __name__))
-  weights = xbob.io.base.load(datafile('weights.hdf5', 'machine', __name__))
+  means = xbob.io.base.load(datafile('means.hdf5', __name__))
+  variances = xbob.io.base.load(datafile('variances.hdf5', __name__))
+  weights = xbob.io.base.load(datafile('weights.hdf5', __name__))
 
-  gmm = bob.machine.GMMMachine(2,50)
+  gmm = GMMMachine(2,50)
   gmm.means = means
   gmm.variances = variances
   gmm.weights = weights
 
   map_adapt.set_prior_gmm(gmm)
 
-  gmm_adapted = bob.machine.GMMMachine(2,50)
+  gmm_adapted = GMMMachine(2,50)
   gmm_adapted.means = means
   gmm_adapted.variances = variances
   gmm_adapted.weights = weights
@@ -159,7 +163,7 @@ def test_gmm_MAP_3():
   # Initialize GMMMachine
   n_gaussians = 5
   n_inputs = 45
-  prior_gmm = bob.machine.GMMMachine(n_gaussians, n_inputs)
+  prior_gmm = GMMMachine(n_gaussians, n_inputs)
   prior_gmm.means = xbob.io.base.load(datafile('meansAfterML.hdf5', __name__))
   prior_gmm.variances = xbob.io.base.load(datafile('variancesAfterML.hdf5', __name__))
   prior_gmm.weights = xbob.io.base.load(datafile('weightsAfterML.hdf5', __name__))
@@ -173,13 +177,13 @@ def test_gmm_MAP_3():
   max_iter_gmm = 1
   accuracy = 0.00001
   map_factor = 0.5
-  map_gmmtrainer = bob.trainer.MAP_GMMTrainer(relevance_factor, True, False, False, prior)
+  map_gmmtrainer = MAP_GMMTrainer(relevance_factor, True, False, False, prior)
   map_gmmtrainer.max_iterations = max_iter_gmm
   map_gmmtrainer.convergence_threshold = accuracy
   map_gmmtrainer.set_prior_gmm(prior_gmm)
   map_gmmtrainer.set_t3_map(map_factor);
 
-  gmm = bob.machine.GMMMachine(n_gaussians, n_inputs)
+  gmm = GMMMachine(n_gaussians, n_inputs)
   gmm.set_variance_thresholds(threshold)
 
   # Train
@@ -209,7 +213,7 @@ def test_gmm_test():
   # Initialize GMMMachine
   n_gaussians = 5
   n_inputs = 45
-  gmm = bob.machine.GMMMachine(n_gaussians, n_inputs)
+  gmm = GMMMachine(n_gaussians, n_inputs)
   gmm.means = xbob.io.base.load(datafile('meansAfterML.hdf5', __name__))
   gmm.variances = xbob.io.base.load(datafile('variancesAfterML.hdf5', __name__))
   gmm.weights = xbob.io.base.load(datafile('weightsAfterML.hdf5', __name__))
@@ -234,7 +238,7 @@ def test_custom_trainer():
 
   mytrainer = MyTrainer1()
 
-  machine = bob.machine.KMeansMachine(2, 2)
+  machine = KMeansMachine(2, 2)
   mytrainer.train(machine, ar)
 
   for i in range(0, 2):
diff --git a/xbob/learn/misc/test_gaussian.py b/xbob/learn/misc/test_gaussian.py
index 95e18b0e0722cbd00280d050e4466999955d61d1..f9b82e85aff8723843860849b0676aba228648cb 100644
--- a/xbob/learn/misc/test_gaussian.py
+++ b/xbob/learn/misc/test_gaussian.py
@@ -16,6 +16,8 @@ import xbob.io.base
 
 from . import Gaussian
 
+from . import HDF5File as OldHDF5File
+
 def equals(x, y, epsilon):
   return (abs(x - y) < epsilon)
 
@@ -47,8 +49,8 @@ def test_GaussianMachine():
 
   # Save and read from file
   filename = str(tempfile.mkstemp(".hdf5")[1])
-  g.save(xbob.io.base.HDF5File(filename, 'w'))
-  g_loaded = Gaussian(xbob.io.base.HDF5File(filename))
+  g.save(OldHDF5File(filename, 'w'))
+  g_loaded = Gaussian(OldHDF5File(filename))
   assert g == g_loaded
   assert (g != g_loaded ) is False
   assert g.is_similar_to(g_loaded)
diff --git a/xbob/learn/misc/test_gmm.py b/xbob/learn/misc/test_gmm.py
index 2c95483379e7bacd541e3f7e29d4b462b03e0330..57df3b22d55583a3bdc4ca9ecc2e5f1c3a721e5c 100644
--- a/xbob/learn/misc/test_gmm.py
+++ b/xbob/learn/misc/test_gmm.py
@@ -17,6 +17,8 @@ from xbob.io.base.test_utils import datafile
 
 from . import GMMStats, GMMMachine
 
+from . import HDF5File as OldHDF5File
+
 def test_GMMStats():
   # Test a GMMStats
 
@@ -40,8 +42,8 @@ def test_GMMStats():
 
   # Saves and reads from file
   filename = str(tempfile.mkstemp(".hdf5")[1])
-  gs.save(xbob.io.base.HDF5File(filename, 'w'))
-  gs_loaded = GMMStats(xbob.io.base.HDF5File(filename))
+  gs.save(OldHDF5File(filename, 'w'))
+  gs_loaded = GMMStats(OldHDF5File(filename))
   assert gs == gs_loaded
   assert (gs != gs_loaded ) is False
   assert gs.is_similar_to(gs_loaded)
@@ -190,7 +192,7 @@ def test_GMMMachine_2():
   stats = GMMStats(2, 2)
   gmm.acc_statistics(arrayset, stats)
 
-  stats_ref = GMMStats(xbob.io.base.HDF5File(datafile("stats.hdf5", __name__)))
+  stats_ref = GMMStats(OldHDF5File(datafile("stats.hdf5", __name__)))
 
   assert stats.t == stats_ref.t
   assert numpy.allclose(stats.n, stats_ref.n, atol=1e-10)
diff --git a/xbob/learn/misc/test_jfa.py b/xbob/learn/misc/test_jfa.py
index 06f219e8d9e06caf870eb791e19621b794f59827..9c1897c43141b9390887c2f0738c2fbb4f38a0ef 100644
--- a/xbob/learn/misc/test_jfa.py
+++ b/xbob/learn/misc/test_jfa.py
@@ -15,7 +15,9 @@ import tempfile
 
 import xbob.io.base
 
-from . import GMMMachine, GMMStats, JFABase, ISVBase
+from . import GMMMachine, GMMStats, JFABase, ISVBase, ISVMachine, JFAMachine
+
+from . import HDF5File as OldHDF5File
 
 def estimate_x(dim_c, dim_d, mean, sigma, U, N, F):
   # Compute helper values
@@ -83,8 +85,8 @@ def test_JFABase():
 
   # Saves and loads
   filename = str(tempfile.mkstemp(".hdf5")[1])
-  m.save(xbob.io.base.HDF5File(filename, 'w'))
-  m_loaded = JFABase(xbob.io.base.HDF5File(filename))
+  m.save(OldHDF5File(filename, 'w'))
+  m_loaded = JFABase(OldHDF5File(filename))
   m_loaded.ubm = ubm
   assert m == m_loaded
   assert (m != m_loaded) is False
@@ -144,8 +146,8 @@ def test_ISVBase():
 
   # Saves and loads
   filename = str(tempfile.mkstemp(".hdf5")[1])
-  m.save(xbob.io.base.HDF5File(filename, 'w'))
-  m_loaded = ISVBase(xbob.io.base.HDF5File(filename))
+  m.save(OldHDF5File(filename, 'w'))
+  m_loaded = ISVBase(OldHDF5File(filename))
   m_loaded.ubm = ubm
   assert m == m_loaded
   assert (m != m_loaded) is False
@@ -208,8 +210,8 @@ def test_JFAMachine():
 
   # Saves and loads
   filename = str(tempfile.mkstemp(".hdf5")[1])
-  m.save(xbob.io.base.HDF5File(filename, 'w'))
-  m_loaded = JFAMachine(xbob.io.base.HDF5File(filename))
+  m.save(OldHDF5File(filename, 'w'))
+  m_loaded = JFAMachine(OldHDF5File(filename))
   m_loaded.jfa_base = base
   assert m == m_loaded
   assert (m != m_loaded) is False
@@ -304,8 +306,8 @@ def test_ISVMachine():
 
   # Saves and loads
   filename = str(tempfile.mkstemp(".hdf5")[1])
-  m.save(xbob.io.base.HDF5File(filename, 'w'))
-  m_loaded = ISVMachine(xbob.io.base.HDF5File(filename))
+  m.save(OldHDF5File(filename, 'w'))
+  m_loaded = ISVMachine(OldHDF5File(filename))
   m_loaded.isv_base = base
   assert m == m_loaded
   assert (m != m_loaded) is False
diff --git a/xbob/learn/misc/test_jfa_trainer.py b/xbob/learn/misc/test_jfa_trainer.py
index 8754de8419472ab450a133d6327d1ee7029ab75f..5de6a46b26614adcb9e5f7256a88a08fddd50f72 100644
--- a/xbob/learn/misc/test_jfa_trainer.py
+++ b/xbob/learn/misc/test_jfa_trainer.py
@@ -11,7 +11,10 @@
 import numpy
 import numpy.linalg
 
-from . import GMMStats, GMMMachine, JFABase, JFAMachine, ISVBase, ISVMachine
+from . import GMMStats, GMMMachine, JFABase, JFAMachine, ISVBase, ISVMachine, \
+    JFATrainer, ISVTrainer
+
+from . import mt19937 as old_mt19937
 
 def equals(x, y, epsilon):
   return (abs(x - y) < epsilon).all()
@@ -78,7 +81,7 @@ def test_JFATrainer_updateYandV():
   ubm.mean_supervector = UBM_MEAN
   ubm.variance_supervector = UBM_VAR
   m = JFABase(ubm,2,2)
-  t = bob.trainer.JFATrainer(10)
+  t = JFATrainer(10)
   t.initialize(m, TRAINING_STATS)
   m.u = M_u
   m.v = M_v
@@ -112,7 +115,7 @@ def test_JFATrainer_updateXandU():
   ubm.mean_supervector = UBM_MEAN
   ubm.variance_supervector = UBM_VAR
   m = JFABase(ubm,2,2)
-  t = bob.trainer.JFATrainer(10)
+  t = JFATrainer(10)
   t.initialize(m, TRAINING_STATS)
   m.u = M_u
   m.v = M_v
@@ -145,7 +148,7 @@ def test_JFATrainer_updateZandD():
   ubm.mean_supervector = UBM_MEAN
   ubm.variance_supervector = UBM_VAR
   m = JFABase(ubm,2,2)
-  t = bob.trainer.JFATrainer(10)
+  t = JFATrainer(10)
   t.initialize(m, TRAINING_STATS)
   m.u = M_u
   m.v = M_v
@@ -170,7 +173,7 @@ def test_JFATrainAndEnrol():
   ubm.mean_supervector = UBM_MEAN
   ubm.variance_supervector = UBM_VAR
   mb = JFABase(ubm, 2, 2)
-  t = bob.trainer.JFATrainer(10)
+  t = JFATrainer(10)
   t.initialize(mb, TRAINING_STATS)
   mb.u = M_u
   mb.v = M_v
@@ -222,7 +225,7 @@ def test_ISVTrainAndEnrol():
   ubm.mean_supervector = UBM_MEAN
   ubm.variance_supervector = UBM_VAR
   mb = ISVBase(ubm,2)
-  t = bob.trainer.ISVTrainer(10, 4.)
+  t = ISVTrainer(10, 4.)
   #t.train(mb, TRAINING_STATS)
   t.initialize(mb, TRAINING_STATS)
   mb.u = M_u
@@ -263,8 +266,8 @@ def test_JFATrainInitialize():
   ## JFA
   jb = JFABase(ubm, 2, 2)
   # first round
-  rng = bob.core.random.mt19937(0)
-  jt = bob.trainer.JFATrainer(10)
+  rng = old_mt19937(0)
+  jt = JFATrainer(10)
   jt.rng = rng
   jt.initialize(jb, TRAINING_STATS)
   u1 = jb.u
@@ -272,7 +275,7 @@ def test_JFATrainInitialize():
   d1 = jb.d
 
   # second round
-  rng = bob.core.random.mt19937(0)
+  rng = old_mt19937(0)
   jt.rng = rng
   jt.initialize(jb, TRAINING_STATS)
   u2 = jb.u
@@ -296,15 +299,15 @@ def test_ISVTrainInitialize():
   ## ISV
   ib = ISVBase(ubm, 2)
   # first round
-  rng = bob.core.random.mt19937(0)
-  it = bob.trainer.ISVTrainer(10)
+  rng = old_mt19937(0)
+  it = ISVTrainer(10)
   it.rng = rng
   it.initialize(ib, TRAINING_STATS)
   u1 = ib.u
   d1 = ib.d
 
   # second round
-  rng = bob.core.random.mt19937(0)
+  rng = old_mt19937(0)
   it.rng = rng
   it.initialize(ib, TRAINING_STATS)
   u2 = ib.u
diff --git a/xbob/learn/misc/test_kmeans.py b/xbob/learn/misc/test_kmeans.py
index 16f01a3c4a5b9c27d9ae4ec3e7ccc369628faac0..920b51681ef3c8e3b9bf1853f4514b076fb1c95d 100644
--- a/xbob/learn/misc/test_kmeans.py
+++ b/xbob/learn/misc/test_kmeans.py
@@ -15,6 +15,8 @@ import tempfile
 import xbob.io.base
 from . import KMeansMachine
 
+from . import HDF5File as OldHDF5File
+
 def equals(x, y, epsilon):
   return (abs(x - y) < epsilon)
 
@@ -48,8 +50,8 @@ def test_KMeansMachine():
 
   # Loads and saves
   filename = str(tempfile.mkstemp(".hdf5")[1])
-  km.save(xbob.io.base.HDF5File(filename, 'w'))
-  km_loaded = KMeansMachine(xbob.io.base.HDF5File(filename))
+  km.save(OldHDF5File(filename, 'w'))
+  km_loaded = KMeansMachine(OldHDF5File(filename))
   assert km == km_loaded
 
   # Resize
diff --git a/xbob/learn/misc/test_kmeans_trainer.py b/xbob/learn/misc/test_kmeans_trainer.py
index 2964ce205d3bc0e87c1a32115310f56a1e4440f1..bcf3f206be736538c76c3f560979ff85e3403353 100644
--- a/xbob/learn/misc/test_kmeans_trainer.py
+++ b/xbob/learn/misc/test_kmeans_trainer.py
@@ -13,15 +13,19 @@ import xbob.core
 import xbob.io
 from xbob.io.base.test_utils import datafile
 
+from . import KMeansMachine, KMeansTrainer
+
+from . import mt19937 as old_mt19937
+
 def equals(x, y, epsilon):
   return (abs(x - y) < epsilon).all()
 
 def kmeans_plus_plus(machine, data, seed):
   """Python implementation of K-Means++ (initialization)"""
   n_data = data.shape[0]
-  mt = xbob.core.random.mt19937(seed)
-  rng = xbob.core.random.uniform_int32(0, n_data-1)
-  index = rng(mt)
+  rng = xbob.core.random.mt19937(seed)
+  u = xbob.core.random.uniform('int32', 0, n_data-1)
+  index = u(rng)
   machine.set_mean(0, data[index,:])
   weights = numpy.zeros(shape=(n_data,), dtype=numpy.float64)
 
@@ -34,8 +38,8 @@ def kmeans_plus_plus(machine, data, seed):
       weights[s] = w_cur
     weights *= weights
     weights /= numpy.sum(weights)
-    rng_d = xbob.core.random.discrete_int32(weights)
-    index = rng_d(mt)
+    d = xbob.core.random.discrete('int32', weights)
+    index = d(rng)
     machine.set_mean(m, data[index,:])
 
 
@@ -57,7 +61,7 @@ def flipRows(array):
   else:
     raise Exception('Input type not supportd by flipRows')
 
-if hasattr(bob.trainer.KMeansTrainer, 'KMEANS_PLUS_PLUS'):
+if hasattr(KMeansTrainer, 'KMEANS_PLUS_PLUS'):
   def test_kmeans_plus_plus():
 
     # Tests the K-Means++ initialization
@@ -68,14 +72,14 @@ if hasattr(bob.trainer.KMeansTrainer, 'KMEANS_PLUS_PLUS'):
     seed = 0
 
     # C++ implementation
-    machine = bob.machine.KMeansMachine(dim_c, dim_d)
-    trainer = bob.trainer.KMeansTrainer()
-    trainer.rng = xbob.core.random.mt19937(seed)
-    trainer.initialization_method = bob.trainer.KMeansTrainer.KMEANS_PLUS_PLUS
+    machine = KMeansMachine(dim_c, dim_d)
+    trainer = KMeansTrainer()
+    trainer.rng = old_mt19937(seed)
+    trainer.initialization_method = KMeansTrainer.KMEANS_PLUS_PLUS
     trainer.initialize(machine, data)
 
     # Python implementation
-    py_machine = bob.machine.KMeansMachine(dim_c, dim_d)
+    py_machine = KMeansMachine(dim_c, dim_d)
     kmeans_plus_plus(py_machine, data, seed)
     assert equals(machine.means, py_machine.means, 1e-8)
 
@@ -86,13 +90,13 @@ def test_kmeans_noduplicate():
   seed = 0
   data = numpy.array([[1,2,3],[1,2,3],[1,2,3],[4,5,6.]])
   # Defines machine and trainer
-  machine = bob.machine.KMeansMachine(dim_c, dim_d)
-  trainer = bob.trainer.KMeansTrainer()
-  trainer.rng = xbob.core.random.mt19937(seed)
-  trainer.initialization_method = bob.trainer.KMeansTrainer.RANDOM_NO_DUPLICATE
+  machine = KMeansMachine(dim_c, dim_d)
+  trainer = KMeansTrainer()
+  trainer.rng = old_mt19937(seed)
+  trainer.initialization_method = KMeansTrainer.RANDOM_NO_DUPLICATE
   trainer.initialize(machine, data)
   # Makes sure that the two initial mean vectors selected are different
-  assert (equals(machine.get_mean(0), machine.get_mean(1), 1e-8)) is False
+  assert equals(machine.get_mean(0), machine.get_mean(1), 1e-8) == False
 
 def test_kmeans_a():
 
@@ -102,9 +106,9 @@ def test_kmeans_a():
   #   * 100 samples from N(10,1)
   data = xbob.io.base.load(datafile("samplesFrom2G_f64.hdf5", __name__))
 
-  machine = bob.machine.KMeansMachine(2, 1)
+  machine = KMeansMachine(2, 1)
 
-  trainer = bob.trainer.KMeansTrainer()
+  trainer = KMeansTrainer()
   trainer.train(machine, data)
 
   [variances, weights] = machine.get_variances_and_weights_for_each_cluster(data)
@@ -131,9 +135,9 @@ def test_kmeans_b():
   # Trains a KMeansMachine
   (arStd,std) = NormalizeStdArray(datafile("faithful.torch3.hdf5", __name__))
 
-  machine = bob.machine.KMeansMachine(2, 2)
+  machine = KMeansMachine(2, 2)
 
-  trainer = bob.trainer.KMeansTrainer()
+  trainer = KMeansTrainer()
   #trainer.seed = 1337
   trainer.train(machine, arStd)
 
@@ -157,8 +161,8 @@ def test_kmeans_b():
   assert equals(variances, gmmVariances, 1e-3)
 
   # Check comparison operators
-  trainer1 = bob.trainer.KMeansTrainer()
-  trainer2 = bob.trainer.KMeansTrainer()
+  trainer1 = KMeansTrainer()
+  trainer2 = KMeansTrainer()
   trainer1.rng = trainer2.rng
   assert trainer1 == trainer2
   assert (trainer1 != trainer2) is False
@@ -167,9 +171,9 @@ def test_kmeans_b():
   assert trainer1 != trainer2
 
   # Check that there is no duplicate means during initialization
-  machine = bob.machine.KMeansMachine(2, 1)
-  trainer = bob.trainer.KMeansTrainer()
-  trainer.initialization_method = bob.trainer.KMeansTrainer.RANDOM_NO_DUPLICATE
+  machine = KMeansMachine(2, 1)
+  trainer = KMeansTrainer()
+  trainer.initialization_method = KMeansTrainer.RANDOM_NO_DUPLICATE
   data = numpy.array([[1.], [1.], [1.], [1.], [1.], [1.], [2.], [3.]])
   trainer.train(machine, data)
-  assert (numpy.isnan(machine.means).any()) is False
+  assert (numpy.isnan(machine.means).any()) == False
diff --git a/xbob/learn/misc/test_plda.py b/xbob/learn/misc/test_plda.py
index 07aec4fa494d78c430991e57184d10ccaaa73b53..210cdaba08c01f138f6912a1dd7e91bd1a864019 100644
--- a/xbob/learn/misc/test_plda.py
+++ b/xbob/learn/misc/test_plda.py
@@ -13,11 +13,14 @@ import tempfile
 import math
 import numpy
 import numpy.linalg
+import nose.tools
 
 import xbob.io.base
 
 from . import PLDABase, PLDAMachine
 
+from . import HDF5File as OldHDF5File
+
 # Defines common variables globally
 # Dimensionalities
 C_dim_d = 7
@@ -291,8 +294,8 @@ def test_plda_basemachine():
 
   # Saves to file, loads and compares to original
   filename = str(tempfile.mkstemp(".hdf5")[1])
-  m.save(xbob.io.base.HDF5File(filename, 'w'))
-  m_loaded = PLDABase(xbob.io.base.HDF5File(filename))
+  m.save(OldHDF5File(filename, 'w'))
+  m_loaded = PLDABase(OldHDF5File(filename))
 
   # Compares the values loaded with the former ones
   assert m_loaded == m
@@ -368,8 +371,7 @@ def test_plda_basemachine_loglikelihood_pointestimate():
   m.g = C_G
   m.sigma = sigma
 
-  self.assertTrue(equals(m.compute_log_likelihood_point_estimate(xij, hi, wij),
-                         compute_log_likelihood_point_estimate(xij, mu, C_F, C_G, sigma, hi, wij), 1e-6))
+  assert equals(m.compute_log_likelihood_point_estimate(xij, hi, wij), compute_log_likelihood_point_estimate(xij, mu, C_F, C_G, sigma, hi, wij), 1e-6)
 
 
 def test_plda_machine():
@@ -417,8 +419,8 @@ def test_plda_machine():
 
   # Saves to file, loads and compares to original
   filename = str(tempfile.mkstemp(".hdf5")[1])
-  m.save(xbob.io.base.HDF5File(filename, 'w'))
-  m_loaded = PLDAMachine(xbob.io.base.HDF5File(filename), mb)
+  m.save(OldHDF5File(filename, 'w'))
+  m_loaded = PLDAMachine(OldHDF5File(filename), mb)
 
   # Compares the values loaded with the former ones
   assert m_loaded == m
@@ -443,12 +445,12 @@ def test_plda_machine():
 
   # Check exceptions
   m_loaded2 = PLDAMachine()
-  m_loaded2.load(xbob.io.base.HDF5File(filename))
-  self.assertRaises(RuntimeError, getattr, m_loaded2, 'dim_d')
-  self.assertRaises(RuntimeError, getattr, m_loaded2, 'dim_f')
-  self.assertRaises(RuntimeError, getattr, m_loaded2, 'dim_g')
-  self.assertRaises(RuntimeError, m_loaded2.forward, [1.])
-  self.assertRaises(RuntimeError, m_loaded2.compute_log_likelihood, [1.])
+  m_loaded2.load(OldHDF5File(filename))
+  nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'dim_d')
+  nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'dim_f')
+  nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'dim_g')
+  nose.tools.assert_raises(RuntimeError, m_loaded2.forward, [1.])
+  nose.tools.assert_raises(RuntimeError, m_loaded2.compute_log_likelihood, [1.])
 
   # Clean-up
   os.unlink(filename)
diff --git a/xbob/learn/misc/test_wiener.py b/xbob/learn/misc/test_wiener.py
index 15e3028c4720cec6ab9c2977ab9304b949a2ca0d..331ca3b0950f5fa0c78a4a9628c081d096407b39 100644
--- a/xbob/learn/misc/test_wiener.py
+++ b/xbob/learn/misc/test_wiener.py
@@ -17,6 +17,8 @@ import xbob.io.base
 
 from . import WienerMachine
 
+from . import HDF5File as OldHDF5File
+
 def test_initialization():
 
   # Getters/Setters
@@ -65,8 +67,8 @@ def test_load_save():
 
   # Save and read from file
   filename = str(tempfile.mkstemp(".hdf5")[1])
-  m.save(xbob.io.base.HDF5File(filename, 'w'))
-  m_loaded = WienerMachine(xbob.io.base.HDF5File(filename))
+  m.save(OldHDF5File(filename, 'w'))
+  m_loaded = WienerMachine(OldHDF5File(filename))
   assert m == m_loaded
   assert (m != m_loaded ) is False
   assert m.is_similar_to(m_loaded)
diff --git a/xbob/learn/misc/test_ztnorm.py b/xbob/learn/misc/test_ztnorm.py
index 0af05f2d6bc392255756aa959e72cffeb677bbf1..fe4563c4294b96082c6b63fba840d4f50640ccfd 100644
--- a/xbob/learn/misc/test_ztnorm.py
+++ b/xbob/learn/misc/test_ztnorm.py
@@ -68,13 +68,13 @@ def test_ztnorm_simple():
   assert (abs(scores - ref_scores) < 1e-7).all()
 
 def test_ztnorm_big():
-  my_A = xbob.io.base.load(datafile("ztnorm_eval_eval.mat", __name__))
-  my_B = xbob.io.base.load(datafile("ztnorm_znorm_eval.mat", __name__))
-  my_C = xbob.io.base.load(datafile("ztnorm_eval_tnorm.mat", __name__))
-  my_D = xbob.io.base.load(datafile("ztnorm_znorm_tnorm.mat", __name__))
+  my_A = xbob.io.base.load(datafile("ztnorm_eval_eval.hdf5", __name__))
+  my_B = xbob.io.base.load(datafile("ztnorm_znorm_eval.hdf5", __name__))
+  my_C = xbob.io.base.load(datafile("ztnorm_eval_tnorm.hdf5", __name__))
+  my_D = xbob.io.base.load(datafile("ztnorm_znorm_tnorm.hdf5", __name__))
 
   # ZT-Norm
-  ref_scores = xbob.io.base.load(datafile("ztnorm_result.mat", __name__))
+  ref_scores = xbob.io.base.load(datafile("ztnorm_result.hdf5", __name__))
   scores = ztnorm(my_A, my_B, my_C, my_D)
   assert (abs(scores - ref_scores) < 1e-7).all()