diff --git a/advanced/algorithms/username/gmm/1.json b/advanced/algorithms/username/gmm/1.json
index 4402e43eae217a82596efb51413631a32955ee73..c5456a637c7a23b8b04b678da669b1ea637fac6a 100644
--- a/advanced/algorithms/username/gmm/1.json
+++ b/advanced/algorithms/username/gmm/1.json
@@ -1,5 +1,5 @@
 {
-  "description": "Implements the Universal Background Model (UBM) training ",
+  "description": "Implements the Universal Background Model (UBM) training",
   "language": "python",
   "splittable": false,
   "groups": [
diff --git a/advanced/algorithms/username/gmm_projection/1.json b/advanced/algorithms/username/gmm_projection/1.json
index 8a5e8d7c54dc4991ee97944e1abca34f29d7f15d..f22d8000b5e687f9c43e4d7284263262b4ce905e 100644
--- a/advanced/algorithms/username/gmm_projection/1.json
+++ b/advanced/algorithms/username/gmm_projection/1.json
@@ -1,5 +1,5 @@
 {
-  "description": "For a given set of feature vectors and a Gaussian Mixture Models (GMM), this algorithm implements the Maximum-a-posteriori (MAP) estimation",
+  "description": "For a given set of feature vectors and a GMM, this algorithm implements the MAP estimation",
   "language": "python",
   "splittable": true,
   "groups": [
diff --git a/advanced/algorithms/username/gmm_projection_string/1.json b/advanced/algorithms/username/gmm_projection_string/1.json
index 750609657867d2f97f171a041a01633773f4110f..ebcff7d265e34d679d7fa6e871008597f93fc258 100644
--- a/advanced/algorithms/username/gmm_projection_string/1.json
+++ b/advanced/algorithms/username/gmm_projection_string/1.json
@@ -1,5 +1,5 @@
 {
-  "description": "For a given set of feature vectors and a Gaussian Mixture Models (GMM), this algorithm implements the Maximum-a-posteriori (MAP) estimation",
+  "description": "For a given set of feature vectors and a GMM, this algorithm implements the MAP estimation",
   "language": "python",
   "splittable": true,
   "groups": [
diff --git a/advanced/databases/atnt/6.json b/advanced/databases/atnt/6.json
new file mode 100644
index 0000000000000000000000000000000000000000..dc12e1584e4b6440614768eb6a2eb6bad88e435a
--- /dev/null
+++ b/advanced/databases/atnt/6.json
@@ -0,0 +1,59 @@
+{
+    "description": "The AT&T Database of Faces",
+    "root_folder": "/idiap/group/biometric/databases/orl",
+    "protocols": [
+        {
+            "name": "idiap",
+            "template": "simple_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {}
+                },
+                "templates": {
+                    "view": "Templates",
+                    "parameters": {}
+                },
+                "probes": {
+                    "view": "Probes",
+                    "parameters": {}
+                }
+            }
+        },
+        {
+            "name": "idiap_test_eyepos",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "TrainEyePositions",
+                    "parameters": {}
+                },
+                "dev_templates": {
+                    "view": "TemplatesEyePositions",
+                    "parameters": {
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "ProbesEyePositions",
+                    "parameters": {
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "TemplatesEyePositions",
+                    "parameters": {
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "ProbesEyePositions",
+                    "parameters": {
+                        "group": "eval"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/atnt/6.py b/advanced/databases/atnt/6.py
new file mode 100644
index 0000000000000000000000000000000000000000..90560660f11a854323aea1db2862c3ad32b8bdb4
--- /dev/null
+++ b/advanced/databases/atnt/6.py
@@ -0,0 +1,572 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.io.base
+import bob.io.image
+import bob.ip.color
+import bob.db.atnt
+
+
+#----------------------------------------------------------
+
+
+class Train(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_2d_uint8/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'file_id', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.atnt.Database()
+        objs = sorted(db.objects(groups='world', purposes=None),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(x.client_id, x.id, x.make_path(root_folder, '.pgm')) for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Templates(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_2d_uint8/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - template_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    Several "image" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                         client_id                                           |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_id"
+    per "client_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.atnt.Database()
+
+        template_ids = db.model_ids(groups='dev')
+
+        entries = []
+
+        for template_id in template_ids:
+            objs = db.objects(groups='dev', purposes='enroll',
+                              model_ids=[template_id])
+
+            entries.extend([ Entry(x.client_id, template_id, x.id, x.make_path(root_folder, '.pgm'))
+                             for x in objs ])
+
+        return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'template_id':
+            return {
+                'value': np.uint64(obj.template_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Probes(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_2d_uint8/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+        - probe_id: "{{ system_user.username }}/uint64/1"
+        - template_ids: "{{ system_user.username }}/array_1d_uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "probe_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.atnt.Database()
+
+        template_ids = np.array(sorted(db.model_ids(groups='dev'),
+                                       key=lambda x: int(x)),
+                                dtype='uint64')
+
+        objs = sorted(db.objects(groups='dev', purposes='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(template_ids, x.client_id, x.id, x.id, x.make_path(root_folder, '.pgm'))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'value': obj.template_ids
+            }
+
+        elif output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'probe_id':
+            return {
+                'value': np.uint64(obj.probe_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class TrainEyePositions(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'eye_centers', 'file_id', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.atnt.Database()
+        objs = sorted(db.objects(groups='world', purposes=None),
+                      key=lambda x: (x.client_id, x.id))
+
+        eye_centers = {
+            'left': {
+                'y': np.int32(48),
+                'x': np.int32(63),
+            },
+            'right': {
+                'y': np.int32(48),
+                'x': np.int32(27),
+            }
+        }
+
+        return [ Entry(x.client_id, eye_centers, x.id, x.make_path(root_folder, '.pgm'))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'eye_centers':
+            return obj.eye_centers
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class TemplatesEyePositions(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - template_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                         client_id                                           |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_id"
+    per "client_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.atnt.Database()
+
+        eye_centers = {
+            'left': {
+                'y': np.int32(48),
+                'x': np.int32(63),
+            },
+            'right': {
+                'y': np.int32(48),
+                'x': np.int32(27),
+            }
+        }
+
+        template_ids = db.model_ids(groups='dev')
+
+        entries = []
+
+        for template_id in template_ids:
+            objs = db.objects(groups='dev', purposes='enroll',
+                              model_ids=[template_id])
+
+            entries.extend([ Entry(x.client_id, template_id, x.id, eye_centers, x.make_path(root_folder, '.pgm'))
+                             for x in objs ])
+
+        return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'template_id':
+            return {
+                'value': np.uint64(obj.template_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return obj.eye_centers
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class ProbesEyePositions(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+        - probe_id: "{{ system_user.username }}/uint64/1"
+        - template_ids: "{{ system_user.username }}/array_1d_uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    One "probe_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id',
+                                     'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.atnt.Database()
+
+        eye_centers = {
+            'left': {
+                'y': np.int32(48),
+                'x': np.int32(63),
+            },
+            'right': {
+                'y': np.int32(48),
+                'x': np.int32(27),
+            }
+        }
+
+        template_ids = np.array(sorted(db.model_ids(groups='dev'),
+                                       key=lambda x: int(x)),
+                                dtype='uint64')
+
+        objs = sorted(db.objects(groups='dev', purposes='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(template_ids, x.client_id, x.id, x.id, eye_centers,
+                       x.make_path(root_folder, '.pgm'))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'value': obj.template_ids
+            }
+
+        elif output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'probe_id':
+            return {
+                'value': np.uint64(obj.probe_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return obj.eye_centers
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the images
+    def mock_load(root_folder):
+        return np.ndarray((92, 112), dtype=np.uint8)
+
+    bob.io.base.load = mock_load
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    view = Train()
+    view.objs = view.index(root_folder='', parameters=dict())
+    view.get('client_id', 0)
+    view.get('file_id', 0)
+    view.get('image', 0)
+
+    view = Templates()
+    view.objs = view.index(root_folder='', parameters=dict())
+    view.get('client_id', 0)
+    view.get('template_id', 0)
+    view.get('file_id', 0)
+    view.get('image', 0)
+
+    view = Probes()
+    view.objs = view.index(root_folder='', parameters=dict())
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('image', 0)
+
+    view = TrainEyePositions()
+    view.objs = view.index(root_folder='', parameters=dict())
+    view.get('client_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+    view = TemplatesEyePositions()
+    view.objs = view.index(root_folder='', parameters=dict())
+    view.get('client_id', 0)
+    view.get('template_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+    view = ProbesEyePositions()
+    view.objs = view.index(root_folder='', parameters=dict())
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
diff --git a/advanced/databases/atnt/6.rst b/advanced/databases/atnt/6.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6a960ce119f94bf997aacf90453b162b4ac721f8
--- /dev/null
+++ b/advanced/databases/atnt/6.rst
@@ -0,0 +1 @@
+The AT&T Database of Faces
\ No newline at end of file
diff --git a/advanced/databases/atvskeystroke/5.json b/advanced/databases/atvskeystroke/5.json
new file mode 100644
index 0000000000000000000000000000000000000000..3940cb4c1731d4d8dea2e9cce7feb6ca22b0b62c
--- /dev/null
+++ b/advanced/databases/atvskeystroke/5.json
@@ -0,0 +1,25 @@
+{
+    "description": "The ATVS Keystroke database",
+    "root_folder": "/idiap/group/biometric/databases/atvs_keystroke",
+    "protocols": [
+        {
+            "name": "A",
+            "template": "simple_keystroke_recognition/1",
+            "views": {
+                "templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "A"
+                    }
+                },
+                "probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "A"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/atvskeystroke/5.py b/advanced/databases/atvskeystroke/5.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9a4339c709e4ff62a24aa8b510b5cc49c9b9805
--- /dev/null
+++ b/advanced/databases/atvskeystroke/5.py
@@ -0,0 +1,266 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.db.atvskeystroke
+
+
+keystroke_feature = ['', 'holdtime', 'rplatency', 'pplatency', 'rrlatency', 'prlatency']
+keystroke_type = ['', 'given_name', 'family_name', 'email', 'nationality', 'id_number']
+
+
+#----------------------------------------------------------
+
+
+def keystroke_reader(filename):
+    counter = 0
+    feat = 0
+
+    data = {}
+    for line in open(filename, 'r').readlines():
+        if not line.strip(): continue
+        if counter % 6 == 0:
+            feat += 1
+            label = line.strip()
+            data[keystroke_feature[feat]] = {}
+        else:
+            values = [np.int32(v) for v in line.strip().split(' ')]
+            data[keystroke_feature[feat]][keystroke_type[counter % 6]] = np.array(values)
+        counter += 1
+
+    return data
+
+
+#----------------------------------------------------------
+
+
+class Templates(View):
+    """Outputs:
+        - keystroke: "{{ user.username }}/atvs_keystroke/1
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - template_id: "{{ system_user.username }}/text/1"
+        - client_id: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "keystroke".
+    Several "keystroke" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |  keystroke  | |  keystroke  | |  keystroke  | |  keystroke  | |  keystroke  | |  keystroke  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                          client_id                                          |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_id"
+    per "client_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'keystroke'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.atvskeystroke.Database()
+
+        template_ids = db.model_ids(groups='eval',
+                                    protocol=parameters['protocol'])
+
+        entries = []
+
+        for template_id in template_ids:
+            objs = db.objects(groups='eval',
+                              protocol=parameters['protocol'],
+                              purposes='enrol',
+                              model_ids=[template_id])
+
+            entries.extend([ Entry(x.client_id, template_id, x.id, x.make_path(root_folder, '.txt'))
+                             for x in objs ])
+
+        return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'template_id':
+            return {
+                'text': str(obj.template_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'keystroke':
+            return keystroke_reader(obj.keystroke)
+
+
+#----------------------------------------------------------
+
+
+class Probes(View):
+    """Outputs:
+        - keystroke: "{{ user.username }}/atvs_keystroke/1
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/text/1"
+        - probe_id: "{{ system_user.username }}/uint64/1",
+        - template_ids: "{{ system_user.username }}/array_1d_text/1",
+
+    One "file_id" is associated with a given "keystroke".
+    One "probe_id" is associated with a given "keystroke".
+    Several "keystroke" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |  keystroke  | |  keystroke  | |  keystroke  | |  keystroke  | |  keystroke  | |  keystroke  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id',
+                                     'keystroke'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.atvskeystroke.Database()
+
+        template_ids = sorted(db.model_ids(protocol=parameters['protocol'],
+                                           groups='eval'),
+                              key=lambda x: int(x))
+
+        template_probes = {}
+        for template_id in template_ids:
+            objs = sorted(db.objects(protocol=parameters['protocol'],
+                                     groups='eval',
+                                     purposes='probe',
+                                     model_ids=[template_id]),
+                          key=lambda x: (x.client_id, x.id))
+
+            template_probes[template_id] = [ p.id for p in objs ]
+
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups='eval',
+                                 purposes='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        entries = []
+        for obj in objs:
+            templates = [ template_id for template_id in template_ids
+                                      if obj.id in template_probes[template_id] ]
+            entries.append( Entry(templates, obj.client_id, obj.id, obj.id,
+                                  obj.make_path(root_folder, '.txt')) )
+
+        return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids,
+                                              x.client_id, x.probe_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'text': [ str(x) for x in obj.template_ids ]
+            }
+
+        elif output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'probe_id':
+            return {
+                'value': np.uint64(obj.probe_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'keystroke':
+            return keystroke_reader(obj.keystroke)
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the keystrokes
+    def mock_keystroke_reader(filename):
+        return {}
+
+    global keystroke_reader
+    keystroke_reader = mock_keystroke_reader
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    view = Templates()
+    view.objs = view.index(root_folder='', parameters=dict(protocol = 'A'))
+    view.get('client_id', 0)
+    view.get('template_id', 0)
+    view.get('file_id', 0)
+    view.get('keystroke', 0)
+
+    view = Probes()
+    view.objs = view.index(root_folder='', parameters=dict(protocol = 'A'))
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('keystroke', 0)
diff --git a/advanced/databases/atvskeystroke/5.rst b/advanced/databases/atvskeystroke/5.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fe80803c56815cce01c171354aef3128d9ba732d
--- /dev/null
+++ b/advanced/databases/atvskeystroke/5.rst
@@ -0,0 +1 @@
+The ATVS Keystroke database
\ No newline at end of file
diff --git a/advanced/databases/avspoof/5.json b/advanced/databases/avspoof/5.json
new file mode 100644
index 0000000000000000000000000000000000000000..7009832e4407d30dfccace1062a2894ded3aa933
--- /dev/null
+++ b/advanced/databases/avspoof/5.json
@@ -0,0 +1,503 @@
+{
+    "description": "The AVspoof Database",
+    "root_folder": "/idiap/resource/database/AVSpoof",
+    "protocols": [
+        {
+            "name": "smalltest_verify_train",
+            "template": "verify_trainset_speech/1",
+            "views": {
+                "train_templates": {
+                    "view": "RecognitionTemplates",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "smalltest",
+                        "purpose": "enroll"
+                    }
+                },
+                "train_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "smalltest"
+                    }
+                }
+            }
+        },
+        {
+            "name": "smalltest_verify_train_spoof",
+            "template": "verify_trainset_speech_spoof/1",
+            "views": {
+                "train_attacks": {
+                    "view": "Attacks",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "smalltest"
+                    }
+                }
+            }
+        },
+        {
+            "name": "smalltest_verification",
+            "template": "advanced_speaker_recognition/1",
+            "views": {
+                "train": {
+                    "view": "RecognitionTraining",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "smalltest"
+                    }
+                },
+                "dev_templates": {
+                    "view": "RecognitionTemplates",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "smalltest",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "smalltest"
+                    }
+                },
+                "test_templates": {
+                    "view": "RecognitionTemplates",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "smalltest",
+                        "purpose": "enroll"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "smalltest"
+                    }
+                }
+            }
+        },
+        {
+            "name": "smalltest_verification_spoof",
+            "template": "speaker_recognition_spoof/1",
+            "views": {
+                "dev_attacks": {
+                    "view": "Attacks",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "smalltest"
+                    }
+                },
+                "test_attacks": {
+                    "view": "Attacks",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "smalltest"
+                    }
+                }
+            }
+        },
+        {
+            "name": "grandtest_verify_train",
+            "template": "verify_trainset_speech/1",
+            "views": {
+                "train_templates": {
+                    "view": "RecognitionTemplates",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "grandtest",
+                        "purpose": "enroll"
+                    }
+                },
+                "train_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "grandtest"
+                    }
+                }
+            }
+        },
+        {
+            "name": "grandtest_verify_train_spoof",
+            "template": "verify_trainset_speech_spoof/1",
+            "views": {
+                "train_attacks": {
+                    "view": "Attacks",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "grandtest"
+                    }
+                }
+            }
+        },
+        {
+            "name": "grandtest_verification",
+            "template": "advanced_speaker_recognition/1",
+            "views": {
+                "train": {
+                    "view": "RecognitionTraining",
+                    "parameters": {
+                        "protocol": "grandtest",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "RecognitionTemplates",
+                    "parameters": {
+                        "protocol": "grandtest",
+                        "purpose": "enroll",
+                        "group": "devel"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "grandtest",
+                        "group": "devel"
+                    }
+                },
+                "test_templates": {
+                    "view": "RecognitionTemplates",
+                    "parameters": {
+                        "protocol": "grandtest",
+                        "purpose": "enroll",
+                        "group": "test"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "grandtest",
+                        "group": "test"
+                    }
+                }
+            }
+        },
+        {
+            "name": "grandtest_verification_spoof",
+            "template": "speaker_recognition_spoof/1",
+            "views": {
+                "dev_attacks": {
+                    "view": "Attacks",
+                    "parameters": {
+                        "protocol": "grandtest",
+                        "group": "devel"
+                    }
+                },
+                "test_attacks": {
+                    "view": "Attacks",
+                    "parameters": {
+                        "protocol": "grandtest",
+                        "group": "test"
+                    }
+                }
+            }
+        },
+        {
+            "name": "physicalaccess_verify_train",
+            "template": "verify_trainset_speech/1",
+            "views": {
+                "train_templates": {
+                    "view": "RecognitionTemplates",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "physical_access",
+                        "purpose": "enroll"
+                    }
+                },
+                "train_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "physical_access"
+                    }
+                }
+            }
+        },
+        {
+            "name": "physicalaccess_verify_train_spoof",
+            "template": "verify_trainset_speech_spoof/1",
+            "views": {
+                "train_attacks": {
+                    "view": "Attacks",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "physical_access"
+                    }
+                }
+            }
+        },
+        {
+            "name": "physicalaccess_verification",
+            "template": "advanced_speaker_recognition/1",
+            "views": {
+                "train": {
+                    "view": "RecognitionTraining",
+                    "parameters": {
+                        "protocol": "physical_access",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "RecognitionTemplates",
+                    "parameters": {
+                        "protocol": "physical_access",
+                        "purpose": "enroll",
+                        "group": "devel"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "physical_access",
+                        "group": "devel"
+                    }
+                },
+                "test_templates": {
+                    "view": "RecognitionTemplates",
+                    "parameters": {
+                        "protocol": "physical_access",
+                        "purpose": "enroll",
+                        "group": "test"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "physical_access",
+                        "group": "test"
+                    }
+                }
+            }
+        },
+        {
+            "name": "physicalaccess_verification_spoof",
+            "template": "speaker_recognition_spoof/1",
+            "views": {
+                "dev_attacks": {
+                    "view": "Attacks",
+                    "parameters": {
+                        "protocol": "physical_access",
+                        "group": "devel"
+                    }
+                },
+                "test_attacks": {
+                    "view": "Attacks",
+                    "parameters": {
+                        "protocol": "physical_access",
+                        "group": "test"
+                    }
+                }
+            }
+        },
+        {
+            "name": "logicalaccess_verify_train",
+            "template": "verify_trainset_speech/1",
+            "views": {
+                "train_templates": {
+                    "view": "RecognitionTemplates",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "logical_access",
+                        "purpose": "enroll"
+                    }
+                },
+                "train_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "logical_access"
+                    }
+                }
+            }
+        },
+        {
+            "name": "logicalaccess_verify_train_spoof",
+            "template": "verify_trainset_speech_spoof/1",
+            "views": {
+                "train_attacks": {
+                    "view": "Attacks",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "logical_access"
+                    }
+                }
+            }
+        },
+        {
+            "name": "logicalaccess_verification",
+            "template": "advanced_speaker_recognition/1",
+            "views": {
+                "train": {
+                    "view": "RecognitionTraining",
+                    "parameters": {
+                        "protocol": "logical_access",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "RecognitionTemplates",
+                    "parameters": {
+                        "protocol": "logical_access",
+                        "purpose": "enroll",
+                        "group": "devel"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "logical_access",
+                        "group": "devel"
+                    }
+                },
+                "test_templates": {
+                    "view": "RecognitionTemplates",
+                    "parameters": {
+                        "protocol": "logical_access",
+                        "purpose": "enroll",
+                        "group": "test"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "logical_access",
+                        "group": "test"
+                    }
+                }
+            }
+        },
+        {
+            "name": "logicalaccess_verification_spoof",
+            "template": "speaker_recognition_spoof/1",
+            "views": {
+                "dev_attacks": {
+                    "view": "Attacks",
+                    "parameters": {
+                        "protocol": "logical_access",
+                        "group": "devel"
+                    }
+                },
+                "test_attacks": {
+                    "view": "Attacks",
+                    "parameters": {
+                        "protocol": "logical_access",
+                        "group": "test"
+                    }
+                }
+            }
+        },
+        {
+            "name": "smalltest_antispoofing",
+            "template": "simple_speech_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "SimpleAntispoofing",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "smalltest"
+                    }
+                },
+                "dev_probes": {
+                    "view": "SimpleAntispoofing",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "smalltest"
+                    }
+                },
+                "eval_probes": {
+                    "view": "SimpleAntispoofing",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "smalltest"
+                    }
+                }
+            }
+        },
+        {
+            "name": "grandtest_antispoofing",
+            "template": "simple_speech_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "SimpleAntispoofing",
+                    "parameters": {
+                        "protocol": "grandtest",
+                        "group": "train"
+                    }
+                },
+                "dev_probes": {
+                    "view": "SimpleAntispoofing",
+                    "parameters": {
+                        "protocol": "grandtest",
+                        "group": "devel"
+                    }
+                },
+                "eval_probes": {
+                    "view": "SimpleAntispoofing",
+                    "parameters": {
+                        "protocol": "grandtest",
+                        "group": "test"
+                    }
+                }
+            }
+        },
+        {
+            "name": "physicalaccess_antispoofing",
+            "template": "simple_speech_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "SimpleAntispoofing",
+                    "parameters": {
+                        "protocol": "physical_access",
+                        "group": "train"
+                    }
+                },
+                "dev_probes": {
+                    "view": "SimpleAntispoofing",
+                    "parameters": {
+                        "protocol": "physical_access",
+                        "group": "devel"
+                    }
+                },
+                "eval_probes": {
+                    "view": "SimpleAntispoofing",
+                    "parameters": {
+                        "protocol": "physical_access",
+                        "group": "test"
+                    }
+                }
+            }
+        },
+        {
+            "name": "logicalaccess_antispoofing",
+            "template": "simple_speech_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "SimpleAntispoofing",
+                    "parameters": {
+                        "protocol": "logical_access",
+                        "group": "train"
+                    }
+                },
+                "dev_probes": {
+                    "view": "SimpleAntispoofing",
+                    "parameters": {
+                        "protocol": "logical_access",
+                        "group": "devel"
+                    }
+                },
+                "eval_probes": {
+                    "view": "SimpleAntispoofing",
+                    "parameters": {
+                        "protocol": "logical_access",
+                        "group": "test"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/avspoof/5.py b/advanced/databases/avspoof/5.py
new file mode 100644
index 0000000000000000000000000000000000000000..d39daa24ae31bcc21af39204d7083ab9ac38844b
--- /dev/null
+++ b/advanced/databases/avspoof/5.py
@@ -0,0 +1,539 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.io.base
+import bob.db.avspoof
+import scipy.io.wavfile
+
+from bob.db.avspoof.driver import Interface
+
+INFO = Interface()
+SQLITE_FILE = INFO.files()[0]
+
+
+#----------------------------------------------------------
+
+
+class RecognitionTraining(View):
+    """Outputs:
+        - speech: "{{ system_user.username }}/array_1d_floats/1"
+        - file_id: "{{ system_user.username }}/text/1"
+        - client_id: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "speech".
+    Several "speech" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    speech   | |    speech   | |    speech   | |    speech   | |    speech   | |    speech   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'file_id', 'speech'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.avspoof.Database()
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups=parameters['group'],
+                                 cls=('enroll', 'probe')),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(x.client_id, x.id, x.make_path(root_folder, '.wav')) for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'text': str(obj.file_id)
+            }
+
+        elif output == 'speech':
+            rate, audio = scipy.io.wavfile.read(obj.speech)
+
+            return {
+                'value': np.cast['float'](audio)
+            }
+
+
+#----------------------------------------------------------
+
+
+class RecognitionTemplates(View):
+    """Outputs:
+        - speech: "{{ system_user.username }}/array_1d_floats/1"
+        - file_id: "{{ system_user.username }}/text/1"
+        - template_id: "{{ system_user.username }}/text/1"
+        - client_id: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "speech".
+    Several "speech" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    speech   | |    speech   | |    speech   | |    speech   | |    speech   | |    speech   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                         client_id                                           |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_id"
+    per "client_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'speech'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.avspoof.Database()
+
+        if parameters['protocol'] == "smalltest":
+            if parameters['group'] == "train":
+                template_ids = sorted([1, 3])
+            if parameters['group'] == "devel":
+                template_ids = sorted([15, 20])
+            if parameters['group'] == "test":
+                template_ids = sorted([18, 33])
+        else:
+            template_ids = [ client.id for client in db.clients(groups=parameters['group']) ]
+
+        entries = []
+
+        for template_id in template_ids:
+            objs = db.objects(groups=parameters['group'],
+                              protocol=parameters['protocol'],
+                              cls=parameters['purpose'],
+                              clients=(template_id,))
+
+            entries.extend([ Entry(x.client_id, template_id, x.id, x.make_path(root_folder, '.wav'))
+                             for x in objs ])
+
+        return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'template_id':
+            return {
+                'text': str(obj.template_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'text': str(obj.file_id)
+            }
+
+        elif output == 'speech':
+            rate, audio = scipy.io.wavfile.read(obj.speech)
+
+            return {
+                'value': np.cast['float'](audio)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Probes(View):
+    """Outputs:
+        - speech: "{{ system_user.username }}/array_1d_floats/1"
+        - file_id: "{{ system_user.username }}/text/1"
+        - probe_id: "{{ system_user.username }}/text/1",
+        - client_id: "{{ system_user.username }}/text/1"
+        - template_ids: "{{ system_user.username }}/array_1d_text/1",
+
+    One "file_id" is associated with a given "speech".
+    One "probe_id" is associated with a given "speech".
+    Several "speech" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    Each probe must be matched against a number of templates defined by a list of
+    client identifiers.
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    speech   | |    speech   | |    speech   | |    speech   | |    speech   | |    speech   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', 'speech'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.avspoof.Database()
+
+        if parameters['protocol'] == "smalltest":
+            if parameters['group'] == "train":
+                template_ids = sorted([1, 3])
+            if parameters['group'] == "devel":
+                template_ids = sorted([15, 20])
+            if parameters['group'] == "test":
+                template_ids = sorted([18, 33])
+        else:
+            template_ids = sorted([ client.id for client in db.clients(groups=parameters['group']) ])
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups=parameters['group'],
+                                 cls='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(template_ids, x.client_id, x.id, x.id, x.make_path(root_folder, '.wav')) for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'text': [ str(x) for x in obj.template_ids ]
+            }
+
+        elif output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'probe_id':
+            return {
+                'text': str(obj.probe_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'text': str(obj.file_id)
+            }
+
+        elif output == 'speech':
+            rate, audio = scipy.io.wavfile.read(obj.speech)
+
+            return {
+                'value': np.cast['float'](audio)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Attacks(View):
+    """Outputs:
+        - speech: "{{ system_user.username }}/array_1d_floats/1"
+        - file_id: "{{ system_user.username }}/text/1"
+        - attack_id: "{{ system_user.username }}/text/1",
+        - client_id: "{{ system_user.username }}/text/1"
+        - template_ids: "{{ system_user.username }}/array_1d_text/1",
+
+    One "file_id" is associated with a given "speech".
+    One "probe_id" is associated with a given "speech".
+    Several "speech" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    Each probe must be matched against a number of templates defined by a list of
+    client identifiers.
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    speech   | |    speech   | |    speech   | |    speech   | |    speech   | |    speech   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |  attack_id  | |  attack_id  | |  attack_id  | |  attack_id  | |  attack_id  | |  attack_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'attack_id', 'file_id', 'speech'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.avspoof.Database()
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups=parameters['group'],
+                                 cls='attack'),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry([ str(x.client_id) ], x.client_id, x.id, x.id,
+                       x.make_path(root_folder, '.wav')) for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'text': obj.template_ids
+            }
+
+        elif output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'attack_id':
+            return {
+                'text': str(obj.attack_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'text': str(obj.file_id)
+            }
+
+        elif output == 'speech':
+            rate, audio = scipy.io.wavfile.read(obj.speech)
+
+            return {
+                'value': np.cast['float'](audio)
+            }
+
+
+#----------------------------------------------------------
+
+
+class SimpleAntispoofing(View):
+    """Outputs:
+        - speech: "{{ system_user.username }}/array_1d_floats/1"
+        - file_id: "{{ system_user.username }}/text/1"
+        - client_id: "{{ system_user.username }}/text/1"
+        - attack_type: "{{ system_user.username }}/text/1"
+        - class: "{{ system_user.username }}/text/1"
+
+
+    One "file_id" is associated with a given "speech".
+    Several "speech" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    speech   | |    speech   | |    speech   | |    speech   | |    speech   | |    speech   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ------------------------------- ------------------------------- -------------------------------
+    |          attack_type        | |          attack_type        | |          attack_type        |
+    ------------------------------- ------------------------------- -------------------------------
+    --------------------------------------------------------------- -------------------------------
+    |                          client_id                          | |                   client_id
+    --------------------------------------------------------------- -------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                          class                                              |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "client_id"
+    per "template_id".
+    """
+
+    def __init__(self):
+        super(SimpleAntispoofing, self)
+        self.output_member_map = {'class': 'cls'}
+
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['cls', 'client_id', 'attack_type', 'file_id', 'speech'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.avspoof.Database()
+
+        objs_real = sorted(db.objects(protocol=parameters['protocol'],
+                                      groups=parameters['group'],
+                                      cls='real'),
+                           key=lambda x: (x.client_id, x.id))
+
+        objs_attack = sorted(db.objects(protocol=parameters['protocol'],
+                                        groups=parameters['group'],
+                                        cls='attack'),
+                             key=lambda x: (x.client_id, x.get_attack(), x.id))
+
+        return [ Entry('real', x.client_id, 'human', x.id, x.make_path(root_folder, '.wav')) for x in objs_real ] + \
+               [ Entry('attack', x.client_id, x.get_attack(), x.id, x.make_path(root_folder, '.wav')) for x in objs_attack ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'class':
+            return {
+                'text': obj.cls
+            }
+
+        elif output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'attack_type':
+            return {
+                'text': obj.attack_type
+            }
+
+        elif output == 'file_id':
+            return {
+                'text': str(obj.file_id)
+            }
+
+        elif output == 'speech':
+            rate, audio = scipy.io.wavfile.read(obj.speech)
+
+            return {
+                'value': np.cast['float'](audio)
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock read function for the audio files
+    def mock_read(filename):
+        return 44100, np.ndarray((128,))
+
+    scipy.io.wavfile.read = mock_read
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    view = RecognitionTraining()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol = 'smalltest',
+            group = 'train',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('file_id', 0)
+    view.get('speech', 0)
+
+
+    view = RecognitionTemplates()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol = 'smalltest',
+            group = 'train',
+            purpose = 'enroll',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('template_id', 0)
+    view.get('file_id', 0)
+    view.get('speech', 0)
+
+
+    view = Probes()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol = 'smalltest',
+            group = 'train',
+        )
+    )
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('speech', 0)
+
+
+    view = Attacks()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol = 'smalltest',
+            group = 'train',
+        )
+    )
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('attack_id', 0)
+    view.get('file_id', 0)
+    view.get('speech', 0)
+
+
+    view = SimpleAntispoofing()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol = 'smalltest',
+            group = 'train',
+        )
+    )
+    view.get('class', 0)
+    view.get('client_id', 0)
+    view.get('attack_type', 0)
+    view.get('file_id', 0)
+    view.get('speech', 0)
diff --git a/advanced/databases/avspoof/5.rst b/advanced/databases/avspoof/5.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fb8a5f67432d9df6ef19ce758ad46480bc712557
--- /dev/null
+++ b/advanced/databases/avspoof/5.rst
@@ -0,0 +1 @@
+The AVspoof Database
\ No newline at end of file
diff --git a/advanced/databases/banca/5.json b/advanced/databases/banca/5.json
new file mode 100644
index 0000000000000000000000000000000000000000..f8423df164e4f1a15865459ae51c0d2d29d92fc6
--- /dev/null
+++ b/advanced/databases/banca/5.json
@@ -0,0 +1,287 @@
+{
+    "description": "The BANCA Database of Faces",
+    "root_folder": "/idiap/group/biometric/databases/banca/english/images/images",
+    "protocols": [
+        {
+            "name": "P",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "P"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "P",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "P",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "P",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "P",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "G",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "G"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "G",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "G",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "G",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "G",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Mc",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "Mc"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "Mc",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "Mc",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "Mc",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "Mc",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Md",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "Md"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "Md",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "Md",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "Md",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "Md",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Ma",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "Ma"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "Ma",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "Ma",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "Ma",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "Ma",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Ud",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "Ud"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "Ud",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "Ud",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "Ud",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "Ud",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Ua",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "Ua"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "Ua",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "Ua",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "Ua",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "Ua",
+                        "group": "eval"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/banca/5.py b/advanced/databases/banca/5.py
new file mode 100644
index 0000000000000000000000000000000000000000..c90de39496b15d3e283d3d164efc59178afbe37d
--- /dev/null
+++ b/advanced/databases/banca/5.py
@@ -0,0 +1,382 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.io.base
+import bob.io.image
+import bob.db.banca
+
+
+#----------------------------------------------------------
+
+
+class Train(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'file_id', 'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.banca.Database()
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups='world'),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(x.client_id, x.id, db.annotations(x), x.make_path(root_folder, '.ppm'))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Templates(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - template_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                          client_id                                          |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_id"
+    per "client_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.banca.Database()
+
+        template_ids = db.model_ids(protocol=parameters['protocol'],
+                                    groups=parameters['group'])
+
+        entries = []
+
+        for template_id in template_ids:
+            objs = db.objects(protocol=parameters['protocol'],
+                              groups=parameters['group'],
+                              purposes='enroll',
+                              model_ids=[template_id])
+
+            entries.extend([ Entry(x.client_id, template_id, x.id, db.annotations(x),
+                                   x.make_path(root_folder, '.ppm'))
+                             for x in objs ])
+
+        return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'template_id':
+            return {
+                'value': np.uint64(obj.template_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Probes(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - probe_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+        - template_ids: "{{ system_user.username }}/array_1d_uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    One "probe_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id',
+                                     'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.banca.Database()
+
+        template_ids = sorted(db.model_ids(protocol=parameters['protocol'],
+                                           groups=parameters['group']))
+
+
+        template_probes = {}
+        for template_id in template_ids:
+            objs = sorted(db.objects(protocol=parameters['protocol'],
+                                     groups=parameters['group'],
+                                     purposes='probe',
+                                     model_ids=[template_id]),
+                           key=lambda x: (x.client_id, x.id))
+
+            template_probes[template_id] = [ p.id for p in objs ]
+
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups=parameters['group'],
+                                 purposes='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        entries = []
+        for obj in objs:
+            templates = [ template_id for template_id in template_ids
+                                      if obj.id in template_probes[template_id] ]
+
+            entries.append( Entry(templates, obj.client_id, obj.id, obj.id,
+                                  db.annotations(obj), obj.make_path(root_folder, '.ppm')) )
+
+        return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids,
+                                              x.client_id, x.probe_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'value': np.uint64(obj.template_ids)
+            }
+
+        elif output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'probe_id':
+            return {
+                'value': np.uint64(obj.probe_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the images
+    def mock_load(root_folder):
+        return np.ndarray((3, 10, 20), dtype=np.uint8)
+
+    bob.io.base.load = mock_load
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    view = Train()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='P'
+        )
+    )
+    view.get('client_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+
+    view = Templates()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='P',
+            group='dev',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('template_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+
+    view = Probes()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='P',
+            group='dev',
+        )
+    )
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
diff --git a/advanced/databases/banca/5.rst b/advanced/databases/banca/5.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d89cec723368f0cdad06b05865c17fb6b578da88
--- /dev/null
+++ b/advanced/databases/banca/5.rst
@@ -0,0 +1 @@
+The BANCA Database of Faces
\ No newline at end of file
diff --git a/advanced/databases/biowave/4.json b/advanced/databases/biowave/4.json
new file mode 100644
index 0000000000000000000000000000000000000000..a928cf223eff9c17d0a9b7e1a1ee90a649ddcdcf
--- /dev/null
+++ b/advanced/databases/biowave/4.json
@@ -0,0 +1,874 @@
+{
+    "root_folder": "/idiap/project/biowave/biowave_v1/data",
+    "description": "BIOWAVE Vein Database",
+    "protocols": [
+        {
+            "name": "Idiap_1_1_R_BEAT_test",
+            "template": "advanced_vein_recognition_2/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R_BEAT_test",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R_BEAT_test",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R_BEAT_test",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R_BEAT_test",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R_BEAT_test",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_5_5_R_BEAT_test",
+            "template": "advanced_vein_recognition_2/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R_BEAT_test",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R_BEAT_test",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R_BEAT_test",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R_BEAT_test",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R_BEAT_test",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_1_5_R_BEAT_test",
+            "template": "advanced_vein_recognition_2/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_R_BEAT_test",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_R_BEAT_test",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_R_BEAT_test",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_R_BEAT_test",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_R_BEAT_test",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_1_1_R",
+            "template": "advanced_vein_recognition_2/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_1_5_R",
+            "template": "advanced_vein_recognition_2/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_R",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_R",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_R",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_R",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_R",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_3_5_R",
+            "template": "advanced_vein_recognition_2/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_3_5_R",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_3_5_R",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_3_5_R",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_3_5_R",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_3_5_R",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_5_5_R",
+            "template": "advanced_vein_recognition_2/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_1_1_L",
+            "template": "advanced_vein_recognition_2/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_L",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_L",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_L",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_L",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_L",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_1_5_L",
+            "template": "advanced_vein_recognition_2/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_L",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_L",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_L",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_L",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_L",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_5_5_L",
+            "template": "advanced_vein_recognition_2/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_L",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_L",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_L",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_L",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_L",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_1_1_R_less",
+            "template": "advanced_vein_recognition_2/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R_less",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R_less",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R_less",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R_less",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R_less",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_1_5_R_less",
+            "template": "advanced_vein_recognition_2/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_R_less",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_R_less",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_R_less",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_R_less",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_1_5_R_less",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_5_5_R_less",
+            "template": "advanced_vein_recognition_2/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R_less",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R_less",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R_less",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R_less",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R_less",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_1_1_R_annotations_BEAT_test",
+            "template": "advanced_vein_annotations/1",
+            "views": {
+                "train": {
+                    "view": "ViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R_BEAT_test",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R_BEAT_test",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "ViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R_BEAT_test",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R_BEAT_test",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R_BEAT_test",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_2_1_R_annotations_BEAT_test",
+            "template": "advanced_vein_annotations/1",
+            "views": {
+                "train": {
+                    "view": "ViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R_BEAT_test",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R_BEAT_test",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "ViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R_BEAT_test",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R_BEAT_test",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R_BEAT_test",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_1_1_R_annotations",
+            "template": "advanced_vein_annotations/1",
+            "views": {
+                "train": {
+                    "view": "ViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "ViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_2_1_R_annotations",
+            "template": "advanced_vein_annotations/1",
+            "views": {
+                "train": {
+                    "view": "ViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "ViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_R",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_1_1_L_annotations",
+            "template": "advanced_vein_annotations/1",
+            "views": {
+                "train": {
+                    "view": "ViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_L",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_L",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "ViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_L",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_L",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_L",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Idiap_2_1_L_annotations",
+            "template": "advanced_vein_annotations/1",
+            "views": {
+                "train": {
+                    "view": "ViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_L",
+                        "group": "world"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_L",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "ViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_L",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_L",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "TemplateViewAnnotations",
+                    "parameters": {
+                        "protocol": "Idiap_5_5_L",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "annotation_benchmark",
+            "template": "advanced_annotation_benchmark/1",
+            "views": {
+                "annotations": {
+                    "view": "AnnotationBenchmark",
+                    "parameters": {
+                        "protocol": "Idiap_1_1_R"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/biowave/4.py b/advanced/databases/biowave/4.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa4837a641853c28b2e4ca8197743c88ad04bd60
--- /dev/null
+++ b/advanced/databases/biowave/4.py
@@ -0,0 +1,497 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import os
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View as BaseView
+
+import bob.io.base
+import bob.io.image
+import bob.db.biowave_v1
+
+from PIL import Image, ImageDraw, ImageFilter
+
+
+#----------------------------------------------------------
+
+
+def construct_ROI_image(annotations):
+    """Adapted from bob.db.biowave_v1, because we want to separate it in two steps:
+    indexing and image construction"""
+
+    if len(annotations) > 0:
+        return bob.db.biowave_v1.utils.ManualRoiCut(annotations).roi_mask()
+    else:
+        return np.array([], np.uint8)
+
+
+#----------------------------------------------------------
+
+
+def construct_vein_image(annotations, center=False):
+    """Adapted from bob.db.biowave_v1, because we want to separate it in two steps:
+    indexing and image construction"""
+
+    if len(annotations) > 0:
+        im = Image.new('L', (480, 480), (0))
+        draw = ImageDraw.Draw(im)
+        if center:
+            xes_all = [point[1] for line in annotations for point in line]
+            yes_all = [point[0] for line in annotations for point in line]
+            for line in annotations:
+                xes = [point[1] - np.round(np.mean(xes_all)) + 239 for point in line]
+                yes = [point[0] - np.round(np.mean(yes_all)) + 239 for point in line]
+                for point in range(len(line) - 1):
+                    draw.line((xes[point],yes[point], xes[point+1], yes[point+1]), fill=255, width = 5)
+        else:
+            for line in annotations:
+                xes = [point[1] for point in line]
+                yes = [point[0] for point in line]
+                for point in range(len(line) - 1):
+                    draw.line((xes[point],yes[point], xes[point+1], yes[point+1]), fill=255, width = 5)
+        im = im.filter(ImageFilter.MedianFilter(5))
+        return np.array(np.array(im, dtype = bool), dtype = np.uint8)
+    else:
+        return np.array([], np.uint8)
+
+
+#----------------------------------------------------------
+
+
+def construct_alignment_annotations(annotations):
+    return [ dict(
+                 x = np.int32(annotation[1]),
+                 y = np.int32(annotation[0])
+             ) for annotation in annotations ]
+
+
+#----------------------------------------------------------
+
+
+class View(BaseView):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_2d_uint8/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    Several "image" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.biowave_v1.Database()
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups=[parameters['group']],
+                                 purposes=parameters.get('purpose', None),
+                                 annotated_images=False,
+                                 imagedir=root_folder),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(x.client_id, x.make_path(root_folder, '.png')) for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class TemplateView(BaseView):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_2d_uint8/1"
+        - model_id: "{{ system_user.username }}/text/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    Several "image" are associated with a given "model_id".
+    Several "model_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   model_id                  | |                   model_id                  |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                          client_id                                          |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "image"
+    per "model_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'model_id', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.biowave_v1.Database()
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 purposes=parameters['purpose'],
+                                 groups=[parameters['group']],
+                                 annotated_images=False,
+                                 imagedir=root_folder),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(x.client_id, x.model_id.encode('utf-8'),
+                       x.make_path(root_folder, '.png'))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'model_id':
+            return {
+                'text': str(obj.model_id)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class ViewAnnotations(BaseView):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_2d_uint8/1"
+        - vein_annotations: "{{ system_user.username }}/array_2d_uint8/1"
+        - ROI_annotations: "{{ system_user.username }}/array_2d_uint8/1"
+        - alignment_annotations: "{{ system_user.username }}/array_1d_coordinates/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "vein_annotations" are associated with a given "image".
+    One "ROI_annotations" are associated with a given "image".
+    One "alignment_annotations" are associated with a given "image".
+    Several "image" are associated with a given "client_id".
+
+    ------------------------- ------------------------- ------------------------- -------------------------
+    |         image         | |         image         | |         image         | |         image         |
+    ------------------------- ------------------------- ------------------------- -------------------------
+    ------------------------- ------------------------- ------------------------- -------------------------
+    |    vein_annotations   | |    vein_annotations   | |    vein_annotations   | |    vein_annotations   |
+    ------------------------- ------------------------- ------------------------- -------------------------
+    ------------------------- ------------------------- ------------------------- -------------------------
+    |    ROI_annotations    | |    ROI_annotations    | |    ROI_annotations    | |    ROI_annotations    |
+    ------------------------- ------------------------- ------------------------- -------------------------
+    ------------------------- ------------------------- ------------------------- -------------------------
+    | alignment_annotations | | alignment_annotations | | alignment_annotations | | alignment_annotations |
+    ------------------------- ------------------------- ------------------------- -------------------------
+    --------------------------------------------------- ---------------------------------------------------
+    |                     client_id                   | |                     client_id                   |
+    --------------------------------------------------- ---------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'alignment_annotations', 'ROI_annotations',
+                                     'vein_annotations', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.biowave_v1.Database()
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups=[parameters['group']],
+                                 purposes=parameters.get('purpose', None),
+                                 annotated_images=True,
+                                 imagedir=root_folder),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(x.client_id,
+                       x.alignment_annotations(directory=root_folder),
+                       x.roi_annotations(directory=root_folder),
+                       x.vein_annotations(directory=root_folder),
+                       x.make_path(root_folder, '.png'))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'alignment_annotations':
+            return {
+                'value': construct_alignment_annotations(obj.alignment_annotations)
+            }
+
+        elif output == 'ROI_annotations':
+            return {
+                'value': construct_ROI_image(obj.ROI_annotations)
+            }
+
+        elif output == 'vein_annotations':
+            return {
+                'value': construct_vein_image(obj.vein_annotations)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class TemplateViewAnnotations(BaseView):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_2d_uint8/1"
+        - vein_annotations: "{{ system_user.username }}/array_2d_uint8/1"
+        - ROI_annotations: "{{ system_user.username }}/array_2d_uint8/1"
+        - alignment_annotations: "{{ system_user.username }}/array_1d_coordinates/1"
+        - model_id: "{{ system_user.username }}/model_id/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "vein_annotations" are associated with a given "image".
+    One "ROI_annotations" are associated with a given "image".
+    One "alignment_annotations" are associated with a given "image".
+    Several "image" are associated with a given "model_id".
+    Several "model_id" are associated with a given "client_id".
+
+    ------------------------- ------------------------- ------------------------- -------------------------
+    |         image         | |         image         | |         image         | |         image         |
+    ------------------------- ------------------------- ------------------------- -------------------------
+    ------------------------- ------------------------- ------------------------- -------------------------
+    |    vein_annotations   | |    vein_annotations   | |    vein_annotations   | |    vein_annotations   |
+    ------------------------- ------------------------- ------------------------- -------------------------
+    ------------------------- ------------------------- ------------------------- -------------------------
+    |    ROI_annotations    | |    ROI_annotations    | |    ROI_annotations    | |    ROI_annotations    |
+    ------------------------- ------------------------- ------------------------- -------------------------
+    ------------------------- ------------------------- ------------------------- -------------------------
+    | alignment_annotations | | alignment_annotations | | alignment_annotations | | alignment_annotations |
+    ------------------------- ------------------------- ------------------------- -------------------------
+    --------------------------------------------------- ---------------------------------------------------
+    |                     model_id                    | |                     model_id                    |
+    --------------------------------------------------- ---------------------------------------------------
+    -------------------------------------------------------------------------------------------------------
+    |                                              client_id                                              |
+    -------------------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "image"
+    per "model_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'model_id', 'alignment_annotations',
+                                     'ROI_annotations', 'vein_annotations', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.biowave_v1.Database()
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups=[parameters['group']],
+                                 purposes=parameters['purpose'],
+                                 annotated_images=True,
+                                 imagedir=root_folder),
+                      key=lambda x: (x.client_id, x.model_id, x.id))
+
+        return [ Entry(x.client_id,
+                       x.model_id.encode('utf-8'),
+                       x.alignment_annotations(directory=root_folder),
+                       x.roi_annotations(directory=root_folder),
+                       x.vein_annotations(directory=root_folder),
+                       x.make_path(root_folder, '.png'))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'model_id':
+            return {
+                'text': obj.model_id
+            }
+
+        elif output == 'alignment_annotations':
+            return {
+                'value': construct_alignment_annotations(obj.alignment_annotations)
+            }
+
+        elif output == 'ROI_annotations':
+            return {
+                'value': construct_ROI_image(obj.ROI_annotations)
+            }
+
+        elif output == 'vein_annotations':
+            return {
+                'value': construct_vein_image(obj.vein_annotations)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class AnnotationBenchmark(BaseView):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_2d_uint8/1"
+        - vein_annotations: "{{ system_user.username }}/array_2d_uint8/1"
+        - ROI_annotations: "{{ system_user.username }}/array_2d_uint8/1"
+        - alignment_annotations: "{{ system_user.username }}/array_1d_coordinates/1"
+
+    One "vein_annotations" are associated with a given "image".
+    One "ROI_annotations" are associated with a given "image".
+    One "alignment_annotations" are associated with a given "image".
+
+    ------------------------- ------------------------- ------------------------- -------------------------
+    |         image         | |         image         | |         image         | |         image         |
+    ------------------------- ------------------------- ------------------------- -------------------------
+    ------------------------- ------------------------- ------------------------- -------------------------
+    |    vein_annotations   | |    vein_annotations   | |    vein_annotations   | |    vein_annotations   |
+    ------------------------- ------------------------- ------------------------- -------------------------
+    ------------------------- ------------------------- ------------------------- -------------------------
+    |    ROI_annotations    | |    ROI_annotations    | |    ROI_annotations    | |    ROI_annotations    |
+    ------------------------- ------------------------- ------------------------- -------------------------
+    ------------------------- ------------------------- ------------------------- -------------------------
+    | alignment_annotations | | alignment_annotations | | alignment_annotations | | alignment_annotations |
+    ------------------------- ------------------------- ------------------------- -------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['alignment_annotations', 'ROI_annotations',
+                                     'vein_annotations', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.biowave_v1.Database()
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 annotated_images=True,
+                                 imagedir=root_folder),
+                      key=lambda x: x.id)
+
+        return [ Entry(x.alignment_annotations(directory=root_folder),
+                       x.roi_annotations(directory=root_folder),
+                       x.vein_annotations(directory=root_folder),
+                       x.make_path(root_folder, '.png'))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'alignment_annotations':
+            return {
+                'value': construct_alignment_annotations(obj.alignment_annotations)
+            }
+
+        elif output == 'ROI_annotations':
+            return {
+                'value': construct_ROI_image(obj.ROI_annotations)
+            }
+
+        elif output == 'vein_annotations':
+            return {
+                'value': construct_vein_image(obj.vein_annotations)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock methods
+    def mock_load(root_folder):
+        return np.ndarray((10, 20), dtype=np.uint8)
+
+    bob.io.base.load = mock_load
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    view = View()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol="Idiap_1_1_R_BEAT_test",
+            group="world",
+        )
+    )
+    view.get('client_id', 0)
+    view.get('image', 0)
+
+
+    view = TemplateView()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol="Idiap_1_1_R_BEAT_test",
+            group="dev",
+            purpose="enroll",
+        )
+    )
+    view.get('client_id', 0)
+    view.get('model_id', 0)
+    view.get('image', 0)
diff --git a/advanced/databases/biowave/4.rst b/advanced/databases/biowave/4.rst
new file mode 100644
index 0000000000000000000000000000000000000000..67429868f7d24aa105e309da653dea9a24c0ffbd
--- /dev/null
+++ b/advanced/databases/biowave/4.rst
@@ -0,0 +1 @@
+BIOWAVE Vein Database
\ No newline at end of file
diff --git a/advanced/databases/casme2/5.json b/advanced/databases/casme2/5.json
new file mode 100644
index 0000000000000000000000000000000000000000..346966e43bf70d3ba5c00a3a3fcde7e880b3ee99
--- /dev/null
+++ b/advanced/databases/casme2/5.json
@@ -0,0 +1,527 @@
+{
+    "description": "CASME 2 Spotaneous Subtle Expression Database",
+    "root_folder": "/idiap/resource/database/CASME2",
+    "protocols": [
+        {
+            "name": "fold_1",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_1"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_1"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_2",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_2"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_2"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_3",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_3"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_3"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_4",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_4"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_4"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_5",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_5"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_5"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_6",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_6"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_6"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_7",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_7"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_7"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_8",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_8"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_8"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_9",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_9"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_9"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_10",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_10"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_10"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_11",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_11"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_11"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_12",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_12"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_12"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_13",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_13"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_13"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_14",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_14"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_14"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_15",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_15"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_15"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_16",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_16"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_16"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_17",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_17"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_17"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_18",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_18"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_18"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_19",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_19"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_19"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_20",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_20"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_20"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_21",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_21"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_21"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_22",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_22"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_22"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_23",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_23"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_23"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_24",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_24"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_24"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_25",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_25"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_25"
+                    }
+                }
+            }
+        },
+        {
+            "name": "fold_26",
+            "template": "simple_expression_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train",
+                        "protocol": "fold_26"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "fold_26"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/casme2/5.py b/advanced/databases/casme2/5.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f2ef31f1fbc25be07a6e6908cf8739fcb269482
--- /dev/null
+++ b/advanced/databases/casme2/5.py
@@ -0,0 +1,148 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import os
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View as BaseView
+
+import bob.io.base
+import bob.io.image
+import bob.db.casme2
+
+
+#----------------------------------------------------------
+
+
+class View(BaseView):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_4d_uint8/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+        - emotion: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+    Several "client_id" are associated with a given "emotion".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                          emotion                                            |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['emotion', 'client_id', 'file_id', 'frames'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.casme2.Database()
+
+        objs = sorted(db.objects(protocol=str(parameters['protocol']),
+                                 groups=parameters['group']),
+                      key=lambda x: (x.emotion, x.client_id, x.id))
+
+        entries = []
+
+        for obj in objs:
+            frames = [ str(os.path.join(obj.make_path(), x.filename)).
+                         replace('/idiap/resource/database/CASME2/Cropped', root_folder)
+                       for x in obj.frames ]
+
+            entries.append(Entry(obj.emotion, obj.client_id, obj.id, frames))
+
+        return entries
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'emotion':
+            return {
+                'value': obj.emotion
+            }
+
+        elif output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'image':
+            frame = bob.io.base.load(obj.frames[0])
+
+            data = np.zeros(shape=(len(obj.frames), frame.shape[0], frame.shape[1], frame.shape[2]), dtype="uint8")
+            data[0] = frame
+
+            for i in range(1, len(obj.frames)):
+                data[i] = bob.io.base.load(obj.frames[i])
+
+            return {
+                'value': data
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the images
+    def mock_load(root_folder):
+        return np.ndarray((3, 10, 20), dtype=np.uint8)
+
+    bob.io.base.load = mock_load
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    view = View()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='fold_1',
+            group='train',
+        )
+    )
+    view.get('emotion', 0)
+    view.get('client_id', 0)
+    view.get('file_id', 0)
+    view.get('image', 0)
diff --git a/advanced/databases/casme2/5.rst b/advanced/databases/casme2/5.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4f8103c8643158d3a6b269d56665af5f462f83ca
--- /dev/null
+++ b/advanced/databases/casme2/5.rst
@@ -0,0 +1 @@
+CASME 2 Spotaneous Subtle Expression Database
\ No newline at end of file
diff --git a/advanced/databases/cbsr_nir_vis_2/5.json b/advanced/databases/cbsr_nir_vis_2/5.json
new file mode 100644
index 0000000000000000000000000000000000000000..ac75d88542b046000925fc8aaabb32133f0ff7c3
--- /dev/null
+++ b/advanced/databases/cbsr_nir_vis_2/5.json
@@ -0,0 +1,407 @@
+{
+    "description": "CASIA NIR-VIS 2.0 Face Database",
+    "root_folder": "/idiap/resource/database/cbsr_nir_vis_2",
+    "protocols": [
+        {
+            "name": "view2_1",
+            "template": "advanced_face_recognition_textid/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "view2_1"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_1",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_1",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_1",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_1",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "view2_2",
+            "template": "advanced_face_recognition_textid/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "view2_2"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_2",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_2",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_2",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_2",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "view2_3",
+            "template": "advanced_face_recognition_textid/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "view2_3"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_3",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_3",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_3",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_3",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "view2_4",
+            "template": "advanced_face_recognition_textid/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "view2_4"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_4",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_4",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_4",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_4",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "view2_5",
+            "template": "advanced_face_recognition_textid/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "view2_5"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_5",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_5",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_5",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_5",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "view2_6",
+            "template": "advanced_face_recognition_textid/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "view2_6"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_6",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_6",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_6",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_6",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "view2_7",
+            "template": "advanced_face_recognition_textid/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "view2_7"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_7",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_7",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_7",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_7",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "view2_8",
+            "template": "advanced_face_recognition_textid/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "view2_8"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_8",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_8",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_8",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_8",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "view2_9",
+            "template": "advanced_face_recognition_textid/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "view2_9"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_9",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_9",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_9",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_9",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "view2_10",
+            "template": "advanced_face_recognition_textid/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "view2_10"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_10",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_10",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view2_10",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view2_10",
+                        "group": "eval"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/cbsr_nir_vis_2/5.py b/advanced/databases/cbsr_nir_vis_2/5.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef3c42ae67f2c94c8e3d0e5b3cc9b91672a334be
--- /dev/null
+++ b/advanced/databases/cbsr_nir_vis_2/5.py
@@ -0,0 +1,413 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import os
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.io.base
+import bob.io.image
+import bob.db.cbsr_nir_vis_2
+
+
+#----------------------------------------------------------
+
+
+class Train(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/text/1"
+        - client_id: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'file_id', 'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.cbsr_nir_vis_2.Database(
+            annotation_directory=os.path.join(root_folder, 'annotations')
+        )
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups='world'),
+                      key=lambda x: (x.client_id, x.id))
+
+        entries = []
+
+        for obj in objs:
+            if os.path.exists(obj.make_path(root_folder, '.jpg')):
+                filename = obj.make_path(root_folder, '.jpg')
+            else:
+                filename = obj.make_path(root_folder, '.bmp')
+
+            entries.append(Entry(obj.client_id, obj.id, db.annotations(obj), filename))
+
+        return entries
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'text': obj.client_id
+            }
+
+        elif output == 'file_id':
+            return {
+                'text': obj.file_id
+            }
+
+        elif output == 'eye_centers':
+            return {
+                    'left': {
+                        'y': np.int32(obj.eye_centers['leye'][0]),
+                        'x': np.int32(obj.eye_centers['leye'][1]),
+                    },
+                    'right': {
+                        'y': np.int32(obj.eye_centers['reye'][0]),
+                        'x': np.int32(obj.eye_centers['reye'][1]),
+                    }
+                }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Templates(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/text/1"
+        - template_id: "{{ system_user.username }}/text/1"
+        - client_id: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                          client_id                                          |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.cbsr_nir_vis_2.Database(
+            annotation_directory=os.path.join(root_folder, 'annotations')
+        )
+
+        template_ids = db.model_ids(protocol=parameters['protocol'],
+                                    groups=parameters['group'])
+
+        entries = []
+
+        for template_id in template_ids:
+            objs = db.objects(protocol=parameters['protocol'],
+                              groups=parameters['group'],
+                              purposes='enroll',
+                              model_ids=[template_id])
+
+            for obj in objs:
+                if os.path.exists(obj.make_path(root_folder, '.jpg')):
+                    filename = obj.make_path(root_folder, '.jpg')
+                else:
+                    filename = obj.make_path(root_folder, '.bmp')
+
+                entries.append(Entry(obj.client_id, template_id, obj.id,
+                                     db.annotations(obj), filename))
+
+        return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'text': obj.client_id
+            }
+
+        elif output == 'template_id':
+            return {
+                'text': obj.template_id
+            }
+
+        elif output == 'file_id':
+            return {
+                'text': obj.file_id
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Probes(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/text/1"
+        - probe_id: "{{ system_user.username }}/text/1"
+        - client_id: "{{ system_user.username }}/text/1"
+        - template_ids: "{{ system_user.username }}/array_1d_text/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    One "probe_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id',
+                                     'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.cbsr_nir_vis_2.Database(
+            annotation_directory=os.path.join(root_folder, 'annotations')
+        )
+
+        template_ids = sorted(db.model_ids(protocol=parameters['protocol'],
+                                           groups=parameters['group']))
+
+
+        template_probes = {}
+        for template_id in template_ids:
+            objs = sorted(db.objects(protocol=parameters['protocol'],
+                                     groups=parameters['group'],
+                                     purposes='probe',
+                                     model_ids=[template_id]),
+                           key=lambda x: (x.client_id, x.id))
+
+            template_probes[template_id] = [ p.id for p in objs ]
+
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups=parameters['group'],
+                                 purposes='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        entries = []
+        for obj in objs:
+            templates = [ template_id for template_id in template_ids
+                                      if obj.id in template_probes[template_id] ]
+
+            if os.path.exists(obj.make_path(root_folder, '.jpg')):
+                filename = obj.make_path(root_folder, '.jpg')
+            else:
+                filename = obj.make_path(root_folder, '.bmp')
+
+            entries.append( Entry(templates, obj.client_id, obj.id, obj.id,
+                                  db.annotations(obj), filename) )
+
+        return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids,
+                                              x.client_id, x.probe_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'text': obj.template_ids
+            }
+
+        elif output == 'client_id':
+            return {
+                'text': obj.client_id
+            }
+
+        elif output == 'probe_id':
+            return {
+                'text': obj.probe_id
+            }
+
+        elif output == 'file_id':
+            return {
+                'text': obj.file_id
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the images
+    def mock_load(root_folder):
+        return np.ndarray((3, 10, 20), dtype=np.uint8)
+
+    # Install a mock os.path.exists function
+    def mock_exists(path):
+        return True
+
+    bob.io.base.load = mock_load
+    os.path.exists = mock_exists
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    # Note: This database can't be tested without the actual data, since
+    # some files are needed by this implementation
+
+    view = Train()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='view2_1',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+
+    view = Templates()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='view2_1',
+            group='dev',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('template_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+
+    view = Probes()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='view2_1',
+            group='dev',
+        )
+    )
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
diff --git a/advanced/databases/cbsr_nir_vis_2/5.rst b/advanced/databases/cbsr_nir_vis_2/5.rst
new file mode 100644
index 0000000000000000000000000000000000000000..964b6f9f0dd0b5f54c8669c2e2e17c8850670bac
--- /dev/null
+++ b/advanced/databases/cbsr_nir_vis_2/5.rst
@@ -0,0 +1 @@
+CASIA NIR-VIS 2.0 Face Database
\ No newline at end of file
diff --git a/advanced/databases/cpqd/5.json b/advanced/databases/cpqd/5.json
new file mode 100644
index 0000000000000000000000000000000000000000..8226837fc0efef4f9e645a66e72ff10192fc2563
--- /dev/null
+++ b/advanced/databases/cpqd/5.json
@@ -0,0 +1,327 @@
+{
+    "description": "The CPqD database",
+    "root_folder": "/this/database/is/not/installed",
+    "protocols": [
+        {
+            "name": "laptop_male",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "laptop_male"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "laptop_male"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "laptop_male"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "laptop_male"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "laptop_male"
+                    }
+                }
+            }
+        },
+        {
+            "name": "laptop_female",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "laptop_female"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "laptop_female"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "laptop_female"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "laptop_female"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "laptop_female"
+                    }
+                }
+            }
+        },
+        {
+            "name": "smartphone_male",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "smartphone_male"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "smartphone_male"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "smartphone_male"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "smartphone_male"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "smartphone_male"
+                    }
+                }
+            }
+        },
+        {
+            "name": "smartphone_female",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "smartphone_female"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "smartphone_female"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "smartphone_female"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "smartphone_female"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "smartphone_female"
+                    }
+                }
+            }
+        },
+        {
+            "name": "l2s_male",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "l2s_male"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "l2s_male"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "l2s_male"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "l2s_male"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "l2s_male"
+                    }
+                }
+            }
+        },
+        {
+            "name": "l2s_female",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "l2s_female"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "l2s_female"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "l2s_female"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "l2s_female"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "l2s_female"
+                    }
+                }
+            }
+        },
+        {
+            "name": "s2l_male",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "s2l_male"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "s2l_male"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "s2l_male"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "s2l_male"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "s2l_male"
+                    }
+                }
+            }
+        },
+        {
+            "name": "s2l_female",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "s2l_female"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "s2l_female"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "s2l_female"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "s2l_female"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "s2l_female"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/cpqd/5.py b/advanced/databases/cpqd/5.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a09e7c8306ff260db47f297a558803a3e4972a4
--- /dev/null
+++ b/advanced/databases/cpqd/5.py
@@ -0,0 +1,424 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import os
+import re
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.io.base
+import bob.io.image
+import bob.db.cpqd
+
+
+#----------------------------------------------------------
+
+
+class Train(View):
+
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'file_id', 'eye_centers', 'image'])
+
+        image_folder       = os.path.join(root_folder, "images")
+        annotation_folder  = os.path.join(root_folder, "eye_positions")
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.cpqd.Database()
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups='world'),
+                      key=lambda x: (x.client_id, x.id))
+
+        entries = []
+
+        for obj in objs:
+            obj_id = obj.id.split('/')[-1]
+            digits = re.findall(r'\d+', obj_id)
+
+            entries.append(Entry(np.uint64(obj.client_id[1:]), np.uint64(''.join(digits)),
+                                 db.annotations(obj.make_path(annotation_folder, '.pos')),
+                                 obj.make_path(image_folder, '.jpg')))
+
+        return entries
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Templates(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - template_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                          client_id                                          |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_id"
+    per "client_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'eye_centers', 'image'])
+
+        image_folder       = os.path.join(root_folder, "images")
+        annotation_folder  = os.path.join(root_folder, "eye_positions")
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.cpqd.Database()
+
+        template_ids = db.model_ids(protocol=parameters['protocol'],
+                                    groups=parameters['group'])
+
+        entries = []
+
+        for template_id in template_ids:
+            objs = db.objects(protocol=parameters['protocol'],
+                              groups=parameters['group'],
+                              purposes='enroll',
+                              model_ids=[template_id])
+
+            for obj in objs:
+                obj_id = obj.id.split('/')[-1]
+                digits = re.findall(r'\d+', obj_id)
+
+                entries.append(Entry(np.uint64(obj.client_id[1:]), np.uint64(template_id[1:]),
+                                     np.uint64(''.join(digits)),
+                                     db.annotations(obj.make_path(annotation_folder, '.pos')),
+                                     obj.make_path(image_folder, '.jpg')))
+
+        return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'template_id':
+            return {
+                'value': np.uint64(obj.template_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Probes(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - probe_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+        - template_ids: "{{ system_user.username }}/array_1d_uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    One "probe_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id',
+                                     'eye_centers', 'image'])
+
+        image_folder       = os.path.join(root_folder, "images")
+        annotation_folder  = os.path.join(root_folder, "eye_positions")
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.cpqd.Database()
+
+        template_ids = sorted(db.model_ids(protocol=parameters['protocol'],
+                                           groups=parameters['group']))
+
+
+        template_probes = {}
+        for template_id in template_ids:
+            objs = sorted(db.objects(protocol=parameters['protocol'],
+                                     groups=parameters['group'],
+                                     purposes='probe',
+                                     model_ids=[template_id]),
+                           key=lambda x: (x.client_id, x.id))
+
+            template_probes[template_id] = [ p.id for p in objs ]
+
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups=parameters['group'],
+                                 purposes='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        entries = []
+        for obj in objs:
+            templates = [ template_id for template_id in template_ids
+                                      if obj.id in template_probes[template_id] ]
+
+            templates = [ np.uint64(x[1:]) for x in templates ]
+
+            obj_id = obj.id.split('/')[-1]
+            digits = re.findall(r'\d+', obj_id)
+
+            entries.append(Entry(templates,
+                                 np.uint64(obj.client_id[1:]),
+                                 np.uint64(''.join(digits)),
+                                 np.uint64(''.join(digits)),
+                                 db.annotations(obj.make_path(annotation_folder, '.pos')),
+                                 obj.make_path(image_folder, '.jpg')))
+
+        return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids,
+                                              x.client_id, x.probe_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'value': obj.template_ids
+            }
+
+        elif output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'probe_id':
+            return {
+                'value': np.uint64(obj.probe_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the images
+    def mock_load(root_folder):
+        return np.ndarray((3, 10, 20), dtype=np.uint8)
+
+    def mock_annotations(obj, path):
+        return dict(
+            leye=(5, 4),
+            reye=(7, 4),
+        )
+
+    bob.io.base.load = mock_load
+    bob.db.cpqd.Database.annotations = mock_annotations
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    view = Train()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='laptop_male'
+        )
+    )
+    view.get('client_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+
+    view = Templates()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='laptop_male',
+            group='dev',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('template_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+
+    view = Probes()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='s2l_female',
+            group='dev',
+        )
+    )
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
diff --git a/advanced/databases/cpqd/5.rst b/advanced/databases/cpqd/5.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1013b986581729c2239a45a216157df26e2371d4
--- /dev/null
+++ b/advanced/databases/cpqd/5.rst
@@ -0,0 +1 @@
+The CPqD database
\ No newline at end of file
diff --git a/advanced/databases/frgc/5.json b/advanced/databases/frgc/5.json
new file mode 100644
index 0000000000000000000000000000000000000000..005a74da4c7b7aef71395f67ffe010fc47afb05f
--- /dev/null
+++ b/advanced/databases/frgc/5.json
@@ -0,0 +1,169 @@
+{
+    "description": "The Face Recognition Grand Challenge",
+    "root_folder": "/idiap/resource/database/frgc/FRGC-2.0-dist",
+    "protocols": [
+        {
+            "name": "2.0.1_maskI",
+            "template": "simple_face_recognition_frgc/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "mask": "maskI",
+                        "protocol": "2.0.1"
+                    }
+                },
+                "templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "mask": "maskI",
+                        "protocol": "2.0.1"
+                    }
+                },
+                "probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "mask": "maskI",
+                        "protocol": "2.0.1"
+                    }
+                }
+            }
+        },
+        {
+            "name": "2.0.4_maskI",
+            "template": "simple_face_recognition_frgc/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "mask": "maskI",
+                        "protocol": "2.0.4"
+                    }
+                },
+                "templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "mask": "maskI",
+                        "protocol": "2.0.4"
+                    }
+                },
+                "probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "mask": "maskI",
+                        "protocol": "2.0.4"
+                    }
+                }
+            }
+        },
+        {
+            "name": "2.0.1_maskII",
+            "template": "simple_face_recognition_frgc/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "mask": "maskII",
+                        "protocol": "2.0.1"
+                    }
+                },
+                "templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "mask": "maskII",
+                        "protocol": "2.0.1"
+                    }
+                },
+                "probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "mask": "maskII",
+                        "protocol": "2.0.1"
+                    }
+                }
+            }
+        },
+        {
+            "name": "2.0.4_maskII",
+            "template": "simple_face_recognition_frgc/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "mask": "maskII",
+                        "protocol": "2.0.4"
+                    }
+                },
+                "templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "mask": "maskII",
+                        "protocol": "2.0.4"
+                    }
+                },
+                "probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "mask": "maskII",
+                        "protocol": "2.0.4"
+                    }
+                }
+            }
+        },
+        {
+            "name": "2.0.1_maskIII",
+            "template": "simple_face_recognition_frgc/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "mask": "maskIII",
+                        "protocol": "2.0.1"
+                    }
+                },
+                "templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "mask": "maskIII",
+                        "protocol": "2.0.1"
+                    }
+                },
+                "probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "mask": "maskIII",
+                        "protocol": "2.0.1"
+                    }
+                }
+            }
+        },
+        {
+            "name": "2.0.4_maskIII",
+            "template": "simple_face_recognition_frgc/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "mask": "maskIII",
+                        "protocol": "2.0.4"
+                    }
+                },
+                "templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "mask": "maskIII",
+                        "protocol": "2.0.4"
+                    }
+                },
+                "probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "mask": "maskIII",
+                        "protocol": "2.0.4"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/frgc/5.py b/advanced/databases/frgc/5.py
new file mode 100644
index 0000000000000000000000000000000000000000..70e53012eb68e44d73d4ae75477ba4a1bca0c062
--- /dev/null
+++ b/advanced/databases/frgc/5.py
@@ -0,0 +1,412 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import os
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.io.base
+import bob.io.image
+import bob.db.frgc
+
+
+#----------------------------------------------------------
+
+
+class Train(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/text/1"
+        - client_id: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'file_id', 'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.frgc.Database(original_directory=root_folder)
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups='world',
+                                 mask_type=parameters['mask']),
+                      key=lambda x: (x.client_id, x.id))
+
+        entries = []
+
+        for obj in objs:
+            filename = obj.make_path(root_folder, '.jpg')
+            if not os.path.exists(filename):
+                filename = obj.make_path(root_folder, '.JPG')
+
+            entries.append(Entry(obj.client_id, obj.id, db.annotations(obj), filename))
+
+        return entries
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'text': obj.client_id
+            }
+
+        elif output == 'file_id':
+            return {
+                'text': obj.file_id
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Templates(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/text/1"
+        - template_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                          client_id                                          |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.frgc.Database(original_directory=root_folder)
+
+        model_files = bob.db.frgc.models.get_list(root_folder,
+                                                  'dev',
+                                                  parameters['protocol'],
+                                                  'enroll')
+
+        mask = bob.db.frgc.models.get_mask(root_folder,
+                                           parameters['protocol'],
+                                           parameters['mask'])
+
+        entries = []
+        for model_index, model in enumerate(model_files):
+            if (mask[:, model_index] > 0).any():
+                for presentation in model.m_files:
+                    obj = bob.db.frgc.models.File(model.m_signature,
+                                                  presentation,
+                                                  model.m_files[presentation])
+
+                    filename = obj.make_path(root_folder, '.jpg')
+                    if not os.path.exists(filename):
+                        filename = obj.make_path(root_folder, '.JPG')
+
+                    entries.append(Entry(obj.client_id, model.m_model, obj.id,
+                                         db.annotations(obj), filename))
+
+        return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'text': obj.client_id
+            }
+
+        elif output == 'template_id':
+            return {
+                'value': np.uint64(obj.template_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'text': obj.file_id
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Probes(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/text/1"
+        - probe_id: "{{ system_user.username }}/text/1"
+        - client_id: "{{ system_user.username }}/text/1"
+        - template_ids: "{{ system_user.username }}/array_1d_uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    One "probe_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id',
+                                     'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.frgc.Database(original_directory=root_folder)
+
+        probe_files = bob.db.frgc.models.get_list(root_folder,
+                                                  'dev',
+                                                  parameters['protocol'],
+                                                  'probe')
+
+        model_files = bob.db.frgc.models.get_list(root_folder,
+                                                  'dev',
+                                                  parameters['protocol'],
+                                                  'enroll')
+
+        mask = bob.db.frgc.models.get_mask(root_folder,
+                                           parameters['protocol'],
+                                           parameters['mask'])
+
+        template_ids = np.array([ x.m_model for x in model_files ])
+
+        entries = []
+        for probe_index, probe in enumerate(probe_files):
+            template_indices = mask[probe_index, :].nonzero()[0]
+            templates = sorted(template_ids.take(template_indices))
+
+            for presentation in probe.m_files:
+                obj = bob.db.frgc.models.File(probe.m_signature,
+                                              presentation,
+                                              probe.m_files[presentation])
+
+                filename = obj.make_path(root_folder, '.jpg')
+                if not os.path.exists(filename):
+                    filename = obj.make_path(root_folder, '.JPG')
+
+                entries.append(Entry(templates, obj.client_id, obj.id, obj.id,
+                                     db.annotations(obj), filename))
+
+        return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids,
+                                              x.client_id, x.probe_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'value': np.uint64(obj.template_ids)
+            }
+
+        elif output == 'client_id':
+            return {
+                'text': obj.client_id
+            }
+
+        elif output == 'probe_id':
+            return {
+                'text': obj.file_id
+            }
+
+        elif output == 'file_id':
+            return {
+                'text': obj.file_id
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the images
+    def mock_load(root_folder):
+        return np.ndarray((3, 10, 20), dtype=np.uint8)
+
+    # Install a mock os.path.exists function
+    def mock_exists(path):
+        return True
+
+    bob.io.base.load = mock_load
+    os.path.exists = mock_exists
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    # Note: This database can't be tested without the actual data, since
+    # some files are needed by this implementation
+
+    view = Train()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='2.0.1',
+            mask='maskI',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+
+    view = Templates()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='2.0.1',
+            mask='maskI',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('template_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+
+    view = Probes()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='2.0.1',
+            mask='maskI',
+        )
+    )
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
diff --git a/advanced/databases/frgc/5.rst b/advanced/databases/frgc/5.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b767be8bef0e6679c1e8dd294e9e39b5d44be3ea
--- /dev/null
+++ b/advanced/databases/frgc/5.rst
@@ -0,0 +1 @@
+The Face Recognition Grand Challenge
\ No newline at end of file
diff --git a/advanced/databases/gbu/5.json b/advanced/databases/gbu/5.json
new file mode 100644
index 0000000000000000000000000000000000000000..0216473ced3beb6d26112a9c449ac7737e377e25
--- /dev/null
+++ b/advanced/databases/gbu/5.json
@@ -0,0 +1,79 @@
+{
+    "description": "The Good, the Bad and the Ugly Face Challenge",
+    "root_folder": "/idiap/resource/database/MBGC-V1",
+    "protocols": [
+        {
+            "name": "good",
+            "template": "simple_face_recognition_gbu/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "Good"
+                    }
+                },
+                "templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "Good"
+                    }
+                },
+                "probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "Good"
+                    }
+                }
+            }
+        },
+        {
+            "name": "bad",
+            "template": "simple_face_recognition_gbu/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "Bad"
+                    }
+                },
+                "templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "Bad"
+                    }
+                },
+                "probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "Bad"
+                    }
+                }
+            }
+        },
+        {
+            "name": "ugly",
+            "template": "simple_face_recognition_gbu/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "Ugly"
+                    }
+                },
+                "templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "Ugly"
+                    }
+                },
+                "probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "Ugly"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/gbu/5.py b/advanced/databases/gbu/5.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca6dbd55b3ff889950e748ba540c964d1a6d4f1e
--- /dev/null
+++ b/advanced/databases/gbu/5.py
@@ -0,0 +1,381 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.io.base
+import bob.io.image
+import bob.db.gbu
+
+
+#----------------------------------------------------------
+
+
+class Train(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'file_id', 'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.gbu.Database()
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups='world',
+                                 subworld='x8'),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(x.client_id, x.id, db.annotations(x), x.make_path(root_folder, '.jpg'))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Templates(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - template_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                          client_id                                          |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_id"
+    per "client_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.gbu.Database()
+
+        template_ids = db.model_ids(protocol=parameters['protocol'],
+                                    groups='dev')
+
+        entries = []
+
+        for template_id in template_ids:
+            objs = db.objects(protocol=parameters['protocol'],
+                              groups='dev',
+                              purposes='enroll',
+                              model_ids=[template_id])
+
+            entries.extend([ Entry(x.client_id, template_id, x.id, db.annotations(x),
+                                   x.make_path(root_folder, '.jpg'))
+                             for x in objs ])
+
+        return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'template_id':
+            return {
+                'value': np.uint64(obj.template_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Probes:
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - probe_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+        - template_ids: "{{ system_user.username }}/array_1d_uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    One "probe_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id',
+                                     'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.gbu.Database()
+
+        template_ids = sorted(db.model_ids(protocol=parameters['protocol'],
+                                           groups='dev'))
+
+
+        template_probes = {}
+        for template_id in template_ids:
+            objs = sorted(db.objects(protocol=parameters['protocol'],
+                                     groups='dev',
+                                     purposes='probe',
+                                     model_ids=[template_id]),
+                           key=lambda x: (x.client_id, x.id))
+
+            template_probes[template_id] = [ p.id for p in objs ]
+
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups='dev',
+                                 purposes='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        entries = []
+        for obj in objs:
+            templates = [ template_id for template_id in template_ids
+                                      if obj.id in template_probes[template_id] ]
+
+            entries.append( Entry(templates, obj.client_id, obj.id, obj.id,
+                                  db.annotations(obj), obj.make_path(root_folder, '.jpg')) )
+
+        return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids,
+                                              x.client_id, x.probe_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'value': np.uint64(obj.template_ids)
+            }
+
+        elif output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'probe_id':
+            return {
+                'value': np.uint64(obj.probe_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the images
+    def mock_load(root_folder):
+        return np.ndarray((3, 10, 20), dtype=np.uint8)
+
+    bob.io.base.load = mock_load
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    view = Train()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='Good'
+        )
+    )
+    view.get('client_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+
+    view = Templates()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='Good'
+        )
+    )
+    view.get('client_id', 0)
+    view.get('template_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+
+    view = Probes()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='Good'
+        )
+    )
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
diff --git a/advanced/databases/gbu/5.rst b/advanced/databases/gbu/5.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f319375c5cce6dad165914205c4cb740f7a7f61f
--- /dev/null
+++ b/advanced/databases/gbu/5.rst
@@ -0,0 +1 @@
+The Good, the Bad and the Ugly Face Challenge
\ No newline at end of file
diff --git a/advanced/databases/kboc16/5.json b/advanced/databases/kboc16/5.json
new file mode 100644
index 0000000000000000000000000000000000000000..8fd359a3c49bf7d4af54f8a5872c930657a84d3c
--- /dev/null
+++ b/advanced/databases/kboc16/5.json
@@ -0,0 +1,43 @@
+{
+    "description": "The KBOC16 database",
+    "root_folder": "/idiap/group/biometric/databases/kboc16",
+    "protocols": [
+        {
+            "name": "A",
+            "template": "simple_keystroke_recognition_kboc16/1",
+            "views": {
+                "templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "A"
+                    }
+                },
+                "probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "A"
+                    }
+                }
+            }
+        },
+        {
+            "name": "D",
+            "template": "simple_keystroke_recognition_kboc16/1",
+            "views": {
+                "templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "D"
+                    }
+                },
+                "probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "D"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/kboc16/5.py b/advanced/databases/kboc16/5.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2c544b09ebfb22126b10ed9c67aadf56c739177
--- /dev/null
+++ b/advanced/databases/kboc16/5.py
@@ -0,0 +1,259 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import string
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.db.kboc16
+
+
+#----------------------------------------------------------
+
+
+def keystroke_reader(filename):
+    times = []
+    keys = []
+
+    for line in open(filename, 'r').readlines():
+        parts = string.split(line)
+        times.append(np.int32(parts[1]))
+        keys.append(parts[0])
+
+    return dict(
+        timestamps = times,
+        key_events = keys,
+    )
+
+
+#----------------------------------------------------------
+
+
+class Templates(View):
+    """Outputs:
+        - keystroke: "{{ system_user.username }}/kboc16_keystroke/1
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - template_id: "{{ system_user.username }}/text/1"
+        - client_id: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "keystroke".
+    Several "keystroke" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |  keystroke  | |  keystroke  | |  keystroke  | |  keystroke  | |  keystroke  | |  keystroke  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                          client_id                                          |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_id"
+    per "client_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'keystroke'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.kboc16.Database()
+
+        template_ids = db.model_ids(groups='eval',
+                                    protocol=parameters['protocol'])
+
+        entries = []
+
+        for template_id in template_ids:
+            objs = db.objects(groups='eval',
+                              protocol=parameters['protocol'],
+                              purposes='enrol',
+                              model_ids=[template_id])
+
+            entries.extend([ Entry(x.client_id, template_id, x.id, x.make_path(root_folder, '.txt'))
+                             for x in objs ])
+
+        return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'template_id':
+            return {
+                'text': str(obj.template_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'keystroke':
+            return keystroke_reader(obj.keystroke)
+
+
+#----------------------------------------------------------
+
+
+class Probes(View):
+    """Outputs:
+        - keystroke: "{{ system_user.username }}/kboc16_keystroke/1
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/text/1"
+        - probe_id: "{{ system_user.username }}/uint64/1",
+        - template_ids: "{{ system_user.username }}/array_1d_text/1",
+
+    One "file_id" is associated with a given "keystroke".
+    One "probe_id" is associated with a given "keystroke".
+    Several "keystroke" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |  keystroke  | |  keystroke  | |  keystroke  | |  keystroke  | |  keystroke  | |  keystroke  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id',
+                                     'keystroke'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.kboc16.Database()
+
+        template_ids = sorted(db.model_ids(protocol=parameters['protocol'],
+                                           groups='eval'),
+                              key=lambda x: int(x))
+
+        template_probes = {}
+        for template_id in template_ids:
+            objs = sorted(db.objects(protocol=parameters['protocol'],
+                                     groups='eval',
+                                     purposes='probe',
+                                     model_ids=[template_id]),
+                          key=lambda x: (x.client_id, x.id))
+
+            template_probes[template_id] = [ p.id for p in objs ]
+
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups='eval',
+                                 purposes='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        entries = []
+        for obj in objs:
+            templates = [ template_id for template_id in template_ids
+                                      if obj.id in template_probes[template_id] ]
+            entries.append( Entry(templates, obj.client_id, obj.id, obj.id,
+                                  obj.make_path(root_folder, '.txt')) )
+
+        return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids,
+                                              x.client_id, x.probe_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'text': [ str(x) for x in obj.template_ids ]
+            }
+
+        elif output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'probe_id':
+            return {
+                'value': np.uint64(obj.probe_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'keystroke':
+            return keystroke_reader(obj.keystroke)
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the keystrokes
+    def mock_keystroke_reader(filename):
+        return {}
+
+    global keystroke_reader
+    keystroke_reader = mock_keystroke_reader
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    view = Templates()
+    view.objs = view.index(root_folder='', parameters=dict(protocol = 'A'))
+    view.get('client_id', 0)
+    view.get('template_id', 0)
+    view.get('file_id', 0)
+    view.get('keystroke', 0)
+
+    view = Probes()
+    view.objs = view.index(root_folder='', parameters=dict(protocol = 'A'))
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('keystroke', 0)
diff --git a/advanced/databases/kboc16/5.rst b/advanced/databases/kboc16/5.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1c6f70313f138a8c5c0526cb7ebd988693347d3b
--- /dev/null
+++ b/advanced/databases/kboc16/5.rst
@@ -0,0 +1 @@
+The KBOC16 database
\ No newline at end of file
diff --git a/advanced/databases/lfw/5.json b/advanced/databases/lfw/5.json
new file mode 100644
index 0000000000000000000000000000000000000000..cb0e9f5066e107dc0fbab0895577dbadd1d8692e
--- /dev/null
+++ b/advanced/databases/lfw/5.json
@@ -0,0 +1,31 @@
+{
+    "description": "The Labeled Faces in the Wild Database",
+    "root_folder": "/idiap/resource/database/lfw/all_images",
+    "protocols": [
+        {
+            "name": "view1",
+            "template": "simple_face_recognition_textid/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "view1"
+                    }
+                },
+                "templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "view1"
+                    }
+                },
+                "probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "view1"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/lfw/5.py b/advanced/databases/lfw/5.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7c01dd18c76d269fdea10a60e582b0f49c62038
--- /dev/null
+++ b/advanced/databases/lfw/5.py
@@ -0,0 +1,285 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.io.base
+import bob.io.image
+import bob.db.lfw
+
+
+#----------------------------------------------------------
+
+
+class Train(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'file_id', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.lfw.Database()
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups='world',
+                                 world_type='unrestricted'),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(x.client_id, x.id, x.make_path(root_folder, '.jpg')) for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Templates(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - template_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "image".
+    Several "image" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        client_id                                          |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.lfw.Database()
+
+        template_ids = db.model_ids(protocol=parameters['protocol'],
+                                    groups='dev')
+
+        entries = []
+
+        for template_id in template_ids:
+            objs = db.objects(protocol=parameters['protocol'],
+                              groups='dev',
+                              purposes='enroll',
+                              model_ids=[template_id])
+
+            entries.extend([ Entry(x.client_id, template_id, x.id, x.make_path(root_folder, '.jpg'))
+                             for x in objs ])
+
+        return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'template_id':
+            return {
+                'value': np.uint64(obj.template_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Probes(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - probe_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/text/1"
+        - template_ids: "{{ system_user.username }}/array_1d_uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "probe_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.lfw.Database()
+
+        template_ids = np.array(sorted(db.model_ids(protocol=parameters['protocol'],
+                                                    groups='dev'),
+                                       key=lambda x: int(x)),
+                                dtype='uint64')
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups='dev',
+                                 purposes='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(template_ids, x.client_id, x.id, x.id, x.make_path(root_folder, '.jpg'))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'value': obj.template_ids
+            }
+
+        elif output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'probe_id':
+            return {
+                'value': np.uint64(obj.probe_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the images
+    def mock_load(root_folder):
+        return np.ndarray((3, 10, 20), dtype=np.uint8)
+
+    bob.io.base.load = mock_load
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    view = Train()
+    view.objs = view.index(root_folder='', parameters=dict(protocol='view1'))
+    view.get('client_id', 0)
+    view.get('file_id', 0)
+    view.get('image', 0)
+
+    view = Templates()
+    view.objs = view.index(root_folder='', parameters=dict(protocol='view1'))
+    view.get('client_id', 0)
+    view.get('template_id', 0)
+    view.get('file_id', 0)
+    view.get('image', 0)
+
+    view = Probes()
+    view.objs = view.index(root_folder='', parameters=dict(protocol='view1'))
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('image', 0)
diff --git a/advanced/databases/lfw/5.rst b/advanced/databases/lfw/5.rst
new file mode 100644
index 0000000000000000000000000000000000000000..71340a0cd179306f785948966a3e35dd7fc74eae
--- /dev/null
+++ b/advanced/databases/lfw/5.rst
@@ -0,0 +1 @@
+The Labeled Faces in the Wild Database
\ No newline at end of file
diff --git a/advanced/databases/livdet-iris-2020/2.json b/advanced/databases/livdet-iris-2020/2.json
new file mode 100644
index 0000000000000000000000000000000000000000..76bb6dd1c9d65db73bd422df36a913538881c1b1
--- /dev/null
+++ b/advanced/databases/livdet-iris-2020/2.json
@@ -0,0 +1,21 @@
+{
+    "description": "LivDet Iris 2020 test database",
+    "environment": {
+        "name": "Example databases",
+        "version": "1.4.1"
+    },
+    "protocols": [
+        {
+            "name": "Main",
+            "template": "iris_pad/1",
+            "views": {
+                "test": {
+                    "view": "Test",
+                    "parameters": {}
+                }
+            }
+        }
+    ],
+    "root_folder": "/somewhere",
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/livdet-iris-2020/2.py b/advanced/databases/livdet-iris-2020/2.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8460135a13ed794bdfcbd6f1fb9685b75395424
--- /dev/null
+++ b/advanced/databases/livdet-iris-2020/2.py
@@ -0,0 +1,40 @@
+import os
+
+import numpy as np
+import pandas as pd
+from beat.backend.python.database import View
+from PIL import Image
+
+
+class Test(View):
+    def index(self, root_folder, parameters):
+        """Creates the data for the database indexation"""
+
+        csv_path = os.path.join(root_folder, "test.csv")
+        df = pd.read_csv(csv_path)
+
+        df["filename"] = df["filename"].apply(lambda x: os.path.join(root_folder, x))
+        df = df.rename(columns={"filename": "image"})
+
+        # ------------- v1 random labels -----
+        # remove this part for v2 release
+        num_files = len(df)
+        np.random.seed(0)
+        df["label"] = np.random.randint(0, 2, size=num_files, dtype=bool)
+        df["category"] = np.random.randint(1, 7, size=num_files, dtype=int)
+        # ------------------------------------
+
+        return list(df.itertuples(index=False))
+
+    def get(self, output, index):
+        """Returns the data for the output based on the index content"""
+
+        obj = self.objs[index]
+
+        if output == "image":
+            img = np.asarray(Image.open(obj.image))
+            return {"value": img}
+        elif output == "label":
+            return {"value": bool(obj.label)}
+        elif output == "category":
+            return {"value": np.cast["int32"](obj.category)}
diff --git a/advanced/databases/livdet-iris-2020/2.rst b/advanced/databases/livdet-iris-2020/2.rst
new file mode 100644
index 0000000000000000000000000000000000000000..298357be0097371aa47a386a185199cfb7abcfe1
--- /dev/null
+++ b/advanced/databases/livdet-iris-2020/2.rst
@@ -0,0 +1 @@
+LivDet Iris 2020 test database
\ No newline at end of file
diff --git a/advanced/databases/livdet2013/5.json b/advanced/databases/livdet2013/5.json
new file mode 100644
index 0000000000000000000000000000000000000000..92d712a2fd79686c315e865a79d4daffeea50f99
--- /dev/null
+++ b/advanced/databases/livdet2013/5.json
@@ -0,0 +1,105 @@
+{
+    "description": "The LivDet 2013 Fingerprint Liveness Database",
+    "root_folder": "/idiap/resource/database/LivDet/LivDet2013",
+    "protocols": [
+        {
+            "name": "Biometrika",
+            "template": "simple_fingerprint_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "All",
+                    "parameters": {
+                        "protocol": "Biometrika",
+                        "group": "train"
+                    }
+                },
+                "test": {
+                    "view": "All",
+                    "parameters": {
+                        "protocol": "Biometrika",
+                        "group": "test"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Italdata",
+            "template": "simple_fingerprint_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "All",
+                    "parameters": {
+                        "protocol": "Italdata",
+                        "group": "train"
+                    }
+                },
+                "test": {
+                    "view": "All",
+                    "parameters": {
+                        "protocol": "Italdata",
+                        "group": "test"
+                    }
+                }
+            }
+        },
+        {
+            "name": "CrossMatch",
+            "template": "simple_fingerprint_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "All",
+                    "parameters": {
+                        "protocol": "CrossMatch",
+                        "group": "train"
+                    }
+                },
+                "test": {
+                    "view": "All",
+                    "parameters": {
+                        "protocol": "CrossMatch",
+                        "group": "test"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Swipe",
+            "template": "simple_fingerprint_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "All",
+                    "parameters": {
+                        "protocol": "Swipe",
+                        "group": "train"
+                    }
+                },
+                "test": {
+                    "view": "All",
+                    "parameters": {
+                        "protocol": "Swipe",
+                        "group": "test"
+                    }
+                }
+            }
+        },
+        {
+            "name": "Full",
+            "template": "simple_fingerprint_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "train"
+                    }
+                },
+                "test": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "test"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/livdet2013/5.py b/advanced/databases/livdet2013/5.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0bc58c960856e54c59ddb57f43817256cc92958
--- /dev/null
+++ b/advanced/databases/livdet2013/5.py
@@ -0,0 +1,109 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import os
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.io.base
+import bob.io.image
+from bob.db.livdet2013 import Database
+
+
+#----------------------------------------------------------
+
+
+class All(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - spoof: "{{ system_user.username }}/boolean/1"
+
+    Several "image" are associated with a given "spoof".
+
+    --------------- --------------- --------------- --------------- 
+    |    image    | |    image    | |    image    | |    image    | 
+    --------------- --------------- --------------- --------------- 
+    ------------------------------- ------------------------------ 
+    |            spoof            | |            spoof            |
+    ------------------------------- ------------------------------ 
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['spoof', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = Database()
+        objs = sorted(db.objects(protocols=parameters.get('protocol'),
+                                 groups=parameters['group'],
+                                 classes=parameters.get('class')),
+                      key=lambda x: x.is_live())
+
+        return [ Entry(x.is_live(), x.make_path(root_folder)) for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'spoof':
+            return {
+                'value': obj.spoof
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the images
+    def mock_load(root_folder):
+        return np.ndarray((3, 10, 20), dtype=np.uint8)
+
+    bob.io.base.load = mock_load
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    view = All()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='Biometrika',
+            group='train'
+        )
+    )
+    view.get('spoof', 0)
+    view.get('image', 0)
diff --git a/advanced/databases/livdet2013/5.rst b/advanced/databases/livdet2013/5.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0cf3ad568f8f06b3d091630b28abca710a38d29c
--- /dev/null
+++ b/advanced/databases/livdet2013/5.rst
@@ -0,0 +1 @@
+The LivDet 2013 Fingerprint Liveness Database
\ No newline at end of file
diff --git a/advanced/databases/mobio/5.json b/advanced/databases/mobio/5.json
new file mode 100644
index 0000000000000000000000000000000000000000..f0bea7711b5289c969d8543b11a34c1a3b0805f1
--- /dev/null
+++ b/advanced/databases/mobio/5.json
@@ -0,0 +1,107 @@
+{
+    "description": "The MOBIO Database of Faces",
+    "root_folder": "/idiap/resource/database/mobio",
+    "protocols": [
+        {
+            "name": "male",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "male",
+                        "images": "IMAGES_PNG",
+                        "annotations": "IMAGE_ANNOTATIONS"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "male",
+                        "group": "dev",
+                        "images": "IMAGES_PNG",
+                        "annotations": "IMAGE_ANNOTATIONS"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "male",
+                        "group": "dev",
+                        "images": "IMAGES_PNG",
+                        "annotations": "IMAGE_ANNOTATIONS"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "male",
+                        "group": "eval",
+                        "images": "IMAGES_PNG",
+                        "annotations": "IMAGE_ANNOTATIONS"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "male",
+                        "group": "eval",
+                        "images": "IMAGES_PNG",
+                        "annotations": "IMAGE_ANNOTATIONS"
+                    }
+                }
+            }
+        },
+        {
+            "name": "female",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "female",
+                        "images": "IMAGES_PNG",
+                        "annotations": "IMAGE_ANNOTATIONS"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "female",
+                        "group": "dev",
+                        "images": "IMAGES_PNG",
+                        "annotations": "IMAGE_ANNOTATIONS"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "female",
+                        "group": "dev",
+                        "images": "IMAGES_PNG",
+                        "annotations": "IMAGE_ANNOTATIONS"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "female",
+                        "group": "eval",
+                        "images": "IMAGES_PNG",
+                        "annotations": "IMAGE_ANNOTATIONS"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "female",
+                        "group": "eval",
+                        "images": "IMAGES_PNG",
+                        "annotations": "IMAGE_ANNOTATIONS"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/mobio/5.py b/advanced/databases/mobio/5.py
new file mode 100644
index 0000000000000000000000000000000000000000..1cbb8faadfdddc2c1ede005feb439bf8cbc5232d
--- /dev/null
+++ b/advanced/databases/mobio/5.py
@@ -0,0 +1,414 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import os
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.io.base
+import bob.io.image
+import bob.db.mobio
+
+
+#----------------------------------------------------------
+
+
+class Train(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'file_id', 'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        annotations = parameters['annotations']
+        if not os.path.isabs(annotations):
+            annotations = os.path.join(root_folder, annotations)
+
+        images = parameters['images']
+        if not os.path.isabs(images):
+            images = os.path.join(root_folder, images)
+
+        db = bob.db.mobio.Database(annotation_directory=annotations)
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups='world',
+                                 purposes='train'),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(x.client_id, x.id, db.annotations(x), x.make_path(images, '.png'))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Templates(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - template_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                          client_id                                          |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_id"
+    per "client_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        annotations = parameters['annotations']
+        if not os.path.isabs(annotations):
+            annotations = os.path.join(root_folder, annotations)
+
+        images = parameters['images']
+        if not os.path.isabs(images):
+            images = os.path.join(root_folder, images)
+
+        db = bob.db.mobio.Database(annotation_directory=annotations)
+
+        template_ids = db.model_ids(protocol=parameters['protocol'],
+                                    groups=parameters['group'])
+
+        entries = []
+
+        for template_id in template_ids:
+            objs = db.objects(protocol=parameters['protocol'],
+                              groups=parameters['group'],
+                              purposes='enroll',
+                              model_ids=[template_id])
+
+            entries.extend([ Entry(x.client_id, template_id, x.id, db.annotations(x),
+                                   x.make_path(images, '.png'))
+                             for x in objs ])
+
+        return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'template_id':
+            return {
+                'value': np.uint64(obj.template_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Probes(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - probe_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+        - template_ids: "{{ system_user.username }}/array_1d_uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    One "probe_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id',
+                                     'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        annotations = parameters['annotations']
+        if not os.path.isabs(annotations):
+            annotations = os.path.join(root_folder, annotations)
+
+        images = parameters['images']
+        if not os.path.isabs(images):
+            images = os.path.join(root_folder, images)
+
+        db = bob.db.mobio.Database(annotation_directory=annotations)
+
+        template_ids = sorted(db.model_ids(protocol=parameters['protocol'],
+                                           groups=parameters['group']))
+
+
+        template_probes = {}
+        for template_id in template_ids:
+            objs = sorted(db.objects(protocol=parameters['protocol'],
+                                     groups=parameters['group'],
+                                     purposes='probe',
+                                     model_ids=[template_id]),
+                           key=lambda x: (x.client_id, x.id))
+
+            template_probes[template_id] = [ p.id for p in objs ]
+
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups=parameters['group'],
+                                 purposes='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        entries = []
+        for obj in objs:
+            templates = [ template_id for template_id in template_ids
+                                      if obj.id in template_probes[template_id] ]
+
+            entries.append( Entry(templates, obj.client_id, obj.id, obj.id,
+                                  db.annotations(obj), obj.make_path(images, '.png')) )
+
+        return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids,
+                                              x.client_id, x.probe_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'value': np.uint64(obj.template_ids)
+            }
+
+        elif output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'probe_id':
+            return {
+                'value': np.uint64(obj.probe_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the images
+    def mock_load(root_folder):
+        return np.ndarray((3, 10, 20), dtype=np.uint8)
+
+    bob.io.base.load = mock_load
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    # Note: This database can't be tested without the actual data, since
+    # the actual files are needed by this implementation
+
+    view = Train()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='male',
+            annotations='../IMAGE_ANNOTATIONS',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+
+    view = Templates()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='male',
+            group='dev',
+            annotations='../IMAGE_ANNOTATIONS',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('template_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+
+    view = Probes()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='male',
+            group='dev',
+            annotations='../IMAGE_ANNOTATIONS',
+        )
+    )
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
diff --git a/advanced/databases/mobio/5.rst b/advanced/databases/mobio/5.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6e381f9b44ef6b27728faa31bd0f620e50702e94
--- /dev/null
+++ b/advanced/databases/mobio/5.rst
@@ -0,0 +1 @@
+The MOBIO Database of Faces
\ No newline at end of file
diff --git a/advanced/databases/nist_sre12/4.json b/advanced/databases/nist_sre12/4.json
new file mode 100644
index 0000000000000000000000000000000000000000..18042021f635ef2ec72e6f154dfc6183ab0f69e6
--- /dev/null
+++ b/advanced/databases/nist_sre12/4.json
@@ -0,0 +1,97 @@
+{
+    "description": "The NIST Speaker Recognition Evaluation 2012 (SRE'12)",
+    "root_folder": "/idiap/temp/ekhoury/NIST_DATA/DENOISED_FOR_BEAT",
+    "protocols": [
+        {
+            "name": "female",
+            "template": "advanced_speaker_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "core-c1",
+                        "gender": "female"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "core-c1",
+                        "gender": "female",
+                        "group": "eval"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "core-c1",
+                        "gender": "female",
+                        "group": "eval"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "core-c2",
+                        "gender": "female",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "core-c2",
+                        "gender": "female",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "male",
+            "template": "advanced_speaker_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "core-c1",
+                        "gender": "male"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "core-c1",
+                        "gender": "male",
+                        "group": "eval"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "core-c1",
+                        "gender": "male",
+                        "group": "eval"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "core-c2",
+                        "gender": "male",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "core-c2",
+                        "gender": "male",
+                        "group": "eval"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/nist_sre12/4.py b/advanced/databases/nist_sre12/4.py
new file mode 100644
index 0000000000000000000000000000000000000000..f42efccbddbe0eaef99cb980159bb2184f62a607
--- /dev/null
+++ b/advanced/databases/nist_sre12/4.py
@@ -0,0 +1,545 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import numpy as np
+import bob.db.nist_sre12
+import bob.io.base
+import bob.io.audio
+
+
+#----------------------------------------------------------
+
+
+def get_client_end_index(objs, client_id, client_start_index,
+                         start_index, end_index, indice_in_tuple=1):
+    client_end_index = client_start_index
+
+    while client_end_index + 1 <= end_index:
+        obj = objs[client_end_index + 1 - start_index]
+
+        if isinstance(obj, tuple):
+            obj = obj[indice_in_tuple]
+
+        if obj.client_id != client_id:
+            return client_end_index
+
+        client_end_index += 1
+
+    return end_index
+
+
+#----------------------------------------------------------
+
+
+def get_value_end_index(objs, value, index_in_tuple, value_start_index,
+                           start_index, end_index):
+    value_end_index = value_start_index
+
+    while value_end_index + 1 <= end_index:
+        id = objs[value_end_index + 1 - start_index][index_in_tuple]
+
+        if id != value:
+            return value_end_index
+
+        value_end_index += 1
+
+    return end_index
+
+
+#----------------------------------------------------------
+
+
+class Train:
+    """Outputs:
+        - speech: "{{ system_user.username }}/array_1d_floats/1"
+        - file_id: "{{ system_user.username }}/text/1"
+        - client_id: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "speech".
+    Several "speech" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    speech   | |    speech   | |    speech   | |    speech   | |    speech   | |    speech   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def setup(self, root_folder, outputs, parameters, force_start_index=None,
+              force_end_index=None):
+
+        # Initialisations
+        self.root_folder = root_folder
+        self.outputs = outputs
+
+        # Open the database and load the objects to provide via the outputs
+        self.db = bob.db.nist_sre12.Database()
+
+        self.objs = sorted(self.db.objects(protocol=parameters['protocol'],
+                                           gender=parameters['gender']),
+                           key=lambda x: (x.client_id, x.id))
+
+        # Determine the range of indices that must be provided
+        self.start_index = force_start_index if force_start_index is not None else 0
+        self.end_index = force_end_index if force_end_index is not None else len(self.objs) - 1
+
+        self.objs = self.objs[self.start_index : self.end_index + 1]
+
+        self.next_index = self.start_index
+
+        return True
+
+
+    def done(self, last_data_index):
+        return last_data_index >= self.end_index
+
+
+    def next(self):
+        obj = self.objs[self.next_index - self.start_index]
+
+        # Output: client_id (only provide data when the client_id change)
+        if self.outputs['client_id'].isConnected() and \
+           self.outputs['client_id'].last_written_data_index < self.next_index:
+
+            client_end_index = get_client_end_index(self.objs, obj.client_id,
+                                                    self.next_index,
+                                                    self.start_index,
+                                                    self.end_index)
+
+            self.outputs['client_id'].write(
+                {
+                    'text': str(obj.client_id)
+                },
+                client_end_index
+            )
+
+        # Output: file_id (provide data at each iteration)
+        if self.outputs['file_id'].isConnected():
+            self.outputs['file_id'].write(
+                {
+                    'text': str(obj.id)
+                },
+                self.next_index
+            )
+
+        # Output: speech (provide data at each iteration)
+        if self.outputs['speech'].isConnected():
+            filename = obj.make_path(self.root_folder, '.sph')
+
+            audio = bob.io.base.load(filename)
+
+            self.outputs['speech'].write(
+                {
+                    'value': np.cast['float'](audio[0] * pow(2, 15))
+                },
+                self.next_index
+            )
+
+        # Determine the next data index that must be provided
+        self.next_index = 1 + min([ x.last_written_data_index for x in self.outputs
+                                                              if x.isConnected() ]
+        )
+
+        return True
+
+
+#----------------------------------------------------------
+
+
+class Templates:
+    """Outputs:
+        - speech: "{{ system_user.username }}/array_1d_floats/1"
+        - file_id: "{{ system_user.username }}/text/1"
+        - template_id: "{{ system_user.username }}/text/1"
+        - client_id: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "speech".
+    Several "speech" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    speech   | |    speech   | |    speech   | |    speech   | |    speech   | |    speech   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                          client_id                                          |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_id"
+    per "client_id".
+    """
+
+    def setup(self, root_folder, outputs, parameters, force_start_index=None,
+              force_end_index=None):
+
+        # Initialisations
+        self.root_folder = root_folder
+        self.outputs = outputs
+        self.parameters = parameters
+
+        # Open the database and load the objects to provide via the outputs
+        self.db = bob.db.nist_sre12.Database()
+
+        template_ids = self.db.model_ids(protocol=parameters['protocol'],
+                                         groups=self.parameters['group'])
+
+        self.objs = []
+
+        for template_id in template_ids:
+            objs = self.db.objects(protocol=parameters['protocol'],
+                                   gender=parameters['gender'],
+                                   groups=self.parameters['group'],
+                                   purposes='enroll',
+                                   model_ids=[template_id])
+
+            self.objs.extend([ (template_id, obj) for obj in objs ])
+
+        self.objs = sorted(self.objs, key=lambda x: (x[1].client_id, x[0], x[1].id))
+
+        # Determine the range of indices that must be provided
+        self.start_index = force_start_index if force_start_index is not None else 0
+        self.end_index = force_end_index if force_end_index is not None else len(self.objs) - 1
+
+        self.objs = self.objs[self.start_index : self.end_index + 1]
+
+        self.next_index = self.start_index
+
+        return True
+
+
+    def done(self, last_data_index):
+        return last_data_index >= self.end_index
+
+
+    def next(self):
+        (template_id, obj) = self.objs[self.next_index - self.start_index]
+
+        # Output: template_id (only provide data when the template_id change)
+        if self.outputs['template_id'].isConnected() and \
+           self.outputs['template_id'].last_written_data_index < self.next_index:
+
+            template_end_index = get_value_end_index(self.objs, template_id, 0,
+                                                     self.next_index,
+                                                     self.start_index,
+                                                     self.end_index)
+
+            self.outputs['template_id'].write(
+                {
+                    'text': template_id
+                },
+                template_end_index
+            )
+
+        # Output: client_id (only provide data when the client_id change)
+        if self.outputs['client_id'].isConnected() and \
+           self.outputs['client_id'].last_written_data_index < self.next_index:
+
+            client_end_index = get_client_end_index(self.objs, obj.client_id,
+                                                    self.next_index,
+                                                    self.start_index,
+                                                    self.end_index)
+
+            self.outputs['client_id'].write(
+                {
+                    'text': obj.client_id
+                },
+                client_end_index
+            )
+
+        # Output: file_id (provide data at each iteration)
+        if self.outputs['file_id'].isConnected():
+            self.outputs['file_id'].write(
+                {
+                    'text': obj.id
+                },
+                self.next_index
+            )
+
+        # Output: speech (provide data at each iteration)
+        if self.outputs['speech'].isConnected():
+            filename = obj.make_path(self.root_folder, '.sph')
+
+            audio = bob.io.base.load(filename)
+
+            self.outputs['speech'].write(
+                {
+                    'value': np.cast['float'](audio[0] * pow(2, 15))
+                },
+                self.next_index
+            )
+
+        # Determine the next data index that must be provided
+        self.next_index = 1 + min([ x.last_written_data_index for x in self.outputs
+                                                              if x.isConnected() ]
+        )
+
+        return True
+
+
+#----------------------------------------------------------
+
+
+class Probes:
+    """Outputs:
+        - speech: "{{ system_user.username }}/array_1d_floats/1"
+        - file_id: "{{ system_user.username }}/text/1"
+        - probe_id: "{{ system_user.username }}/text/1",
+        - client_id: "{{ system_user.username }}/text/1"
+        - template_ids: "{{ system_user.username }}/array_1d_text/1",
+
+    One "file_id" is associated with a given "speech".
+    One "probe_id" is associated with a given "speech".
+    Several "speech" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    Each probe must be matched against a number of templates defined by a list of
+    client identifiers.
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    speech   | |    speech   | |    speech   | |    speech   | |    speech   | |    speech   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+
+    def setup(self, root_folder, outputs, parameters, force_start_index=None,
+              force_end_index=None):
+
+        # Initialisations
+        self.root_folder = root_folder
+        self.outputs = outputs
+        self.parameters = parameters
+
+        # Open the database and load the objects to provide via the outputs
+        self.db = bob.db.nist_sre12.Database()
+
+        template_ids = self.db.model_ids(protocol=parameters['protocol'],
+                                         groups=self.parameters['group'])
+
+        template_probes = {}
+        for template_id in template_ids:
+            objs = sorted(self.db.objects(protocol=parameters['protocol'],
+                                          groups=self.parameters['group'],
+                                          gender=parameters['gender'],
+                                          purposes='probe',
+                                          model_ids=[template_id]),
+                           key=lambda x: (x.client_id, x.id))
+
+            template_probes[template_id] = [ p.id for p in objs ]
+
+        objs = sorted(self.db.objects(protocol=parameters['protocol'],
+                                      gender=parameters['gender'],
+                                      groups=self.parameters['group'],
+                                      purposes='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        self.objs = []
+        for obj in objs:
+            templates = [ template_id for template_id in template_ids
+                                      if obj.id in template_probes[template_id] ]
+            self.objs.append( (templates, obj) )
+
+        self.objs = sorted(self.objs, key=lambda x: (len(x[0]), x[0], x[1].client_id, x[1].id))
+
+        # Determine the range of indices that must be provided
+        self.start_index = force_start_index if force_start_index is not None else 0
+        self.end_index = force_end_index if force_end_index is not None else len(self.objs) - 1
+
+        self.objs = self.objs[self.start_index : self.end_index + 1]
+
+        self.next_index = self.start_index
+
+        return True
+
+
+    def done(self, last_data_index):
+        return last_data_index >= self.end_index
+
+
+    def next(self):
+        (template_ids, obj) = self.objs[self.next_index - self.start_index]
+
+        # Output: template_ids (only provide data when the template_ids change)
+        if self.outputs['template_ids'].isConnected() and \
+           self.outputs['template_ids'].last_written_data_index < self.next_index:
+
+            template_ids_end_index = get_value_end_index(self.objs, template_ids, 0,
+                                                         self.next_index,
+                                                         self.start_index,
+                                                         self.end_index)
+
+            self.outputs['template_ids'].write(
+                {
+                    'value': template_ids
+                },
+                template_ids_end_index
+            )
+
+        # Output: client_id (only provide data when the client_id change)
+        if self.outputs['client_id'].isConnected() and \
+           self.outputs['client_id'].last_written_data_index < self.next_index:
+
+            client_end_index = get_client_end_index(self.objs, obj.client_id,
+                                                    self.next_index,
+                                                    self.start_index,
+                                                    self.end_index)
+
+            self.outputs['client_id'].write(
+                {
+                    'text': obj.client_id
+                },
+                client_end_index
+            )
+
+        # Output: probe_id (provide data at each iteration)
+        if self.outputs['probe_id'].isConnected():
+            self.outputs['probe_id'].write(
+                {
+                    'text': obj.id
+                },
+                self.next_index
+            )
+
+        # Output: file_id (provide data at each iteration)
+        if self.outputs['file_id'].isConnected():
+            self.outputs['file_id'].write(
+                {
+                    'text': obj.id
+                },
+                self.next_index
+            )
+
+        # Output: speech (provide data at each iteration)
+        if self.outputs['speech'].isConnected():
+            filename = obj.make_path(self.root_folder, '.sph')
+
+            audio = bob.io.base.load(filename)
+
+            self.outputs['speech'].write(
+                {
+                    'value': np.cast['float'](audio[0] * pow(2, 15))
+                },
+                self.next_index
+            )
+
+        # Determine the next data index that must be provided
+        self.next_index = 1 + min([ x.last_written_data_index for x in self.outputs
+                                                              if x.isConnected() ]
+        )
+
+        return True
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the images
+    def mock_load(root_folder):
+        return np.ndarray((3, 10, 20), dtype=np.uint8)
+
+    bob.io.base.load = mock_load
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    from beat.backend.python.database import DatabaseTester
+
+    DatabaseTester('Train', Train,
+        [
+            'client_id',
+            'file_id',
+            'speech',
+        ],
+        parameters=dict(
+            protocol='core-c1',
+            gender='female',
+        ),
+        irregular_outputs=[
+            'client_id',
+        ]
+    )
+
+    DatabaseTester('Templates', Templates,
+        [
+            'client_id',
+            'template_id',
+            'file_id',
+            'speech',
+        ],
+        parameters=dict(
+            protocol='core-c1',
+            gender='female',
+            group='eval',
+        ),
+        irregular_outputs=[
+            'client_id',
+            'template_id',
+        ]
+    )
+
+    DatabaseTester('Probes', Probes,
+        [
+            'template_ids',
+            'client_id',
+            'probe_id',
+            'file_id',
+            'speech',
+        ],
+        parameters=dict(
+            protocol='core-c1',
+            gender='female',
+            group='eval',
+        ),
+        irregular_outputs=[
+            'template_ids',
+            'client_id',
+        ]
+    )
diff --git a/advanced/databases/nist_sre12/4.rst b/advanced/databases/nist_sre12/4.rst
new file mode 100644
index 0000000000000000000000000000000000000000..724c02ddc505606c18a9f1bf461e9407b979ee7d
--- /dev/null
+++ b/advanced/databases/nist_sre12/4.rst
@@ -0,0 +1 @@
+The NIST Speaker Recognition Evaluation 2012 (SRE'12)
\ No newline at end of file
diff --git a/advanced/databases/putvein/5.json b/advanced/databases/putvein/5.json
new file mode 100644
index 0000000000000000000000000000000000000000..efcc0c79df8f074e6978a8ff90202c5b14923734
--- /dev/null
+++ b/advanced/databases/putvein/5.json
@@ -0,0 +1,1007 @@
+{
+    "description": "The PUT Vein Database",
+    "root_folder": "/idiap/resource/database/PUT_Vein_Dataset",
+    "protocols": [
+        {
+            "name": "palm-L_1",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "L_1",
+                        "kind": "palm",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "L_1",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "L_1",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "L_1",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "L_1",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "palm-L_4",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "L_4",
+                        "kind": "palm",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "L_4",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "L_4",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "L_4",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "L_4",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "palm-R_1",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_1",
+                        "kind": "palm",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "R_1",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_1",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "R_1",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_1",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "palm-R_4",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_4",
+                        "kind": "palm",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "R_4",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_4",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "R_4",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_4",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "palm-RL_1",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "RL_1",
+                        "kind": "palm",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "RL_1",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "RL_1",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "RL_1",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "RL_1",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "palm-RL_4",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "RL_4",
+                        "kind": "palm",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "RL_4",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "RL_4",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "RL_4",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "RL_4",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "palm-LR_1",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "LR_1",
+                        "kind": "palm",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "LR_1",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "LR_1",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "LR_1",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "LR_1",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "palm-LR_4",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "LR_4",
+                        "kind": "palm",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "LR_4",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "LR_4",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "LR_4",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "LR_4",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "palm-R_BEAT_1",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_BEAT_1",
+                        "kind": "palm",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "R_BEAT_1",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_BEAT_1",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "R_BEAT_1",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_BEAT_1",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "palm-R_BEAT_4",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_BEAT_4",
+                        "kind": "palm",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "R_BEAT_4",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_BEAT_4",
+                        "kind": "palm",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "R_BEAT_4",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_BEAT_4",
+                        "kind": "palm",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "wrist-L_1",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "L_1",
+                        "kind": "wrist",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "L_1",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "L_1",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "L_1",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "L_1",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "wrist-L_4",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "L_4",
+                        "kind": "wrist",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "L_4",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "L_4",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "L_4",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "L_4",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "wrist-R_1",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_1",
+                        "kind": "wrist",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "R_1",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_1",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "R_1",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_1",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "wrist-R_4",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_4",
+                        "kind": "wrist",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "R_4",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_4",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "R_4",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_4",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "wrist-RL_1",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "RL_1",
+                        "kind": "wrist",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "RL_1",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "RL_1",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "RL_1",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "RL_1",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "wrist-RL_4",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "RL_4",
+                        "kind": "wrist",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "RL_4",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "RL_4",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "RL_4",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "RL_4",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "wrist-LR_1",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "LR_1",
+                        "kind": "wrist",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "LR_1",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "LR_1",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "LR_1",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "LR_1",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "wrist-LR_4",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "LR_4",
+                        "kind": "wrist",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "LR_4",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "LR_4",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "LR_4",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "LR_4",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "wrist-R_BEAT_1",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_BEAT_1",
+                        "kind": "wrist",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "R_BEAT_1",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_BEAT_1",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "R_BEAT_1",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_BEAT_1",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        },
+        {
+            "name": "wrist-R_BEAT_4",
+            "template": "advanced_vein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_BEAT_4",
+                        "kind": "wrist",
+                        "group": "train"
+                    }
+                },
+                "dev_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "R_BEAT_4",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "enroll"
+                    }
+                },
+                "dev_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_BEAT_4",
+                        "kind": "wrist",
+                        "group": "dev",
+                        "purpose": "probe"
+                    }
+                },
+                "eval_templates": {
+                    "view": "TemplateView",
+                    "parameters": {
+                        "protocol": "R_BEAT_4",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "enroll"
+                    }
+                },
+                "eval_probes": {
+                    "view": "View",
+                    "parameters": {
+                        "protocol": "R_BEAT_4",
+                        "kind": "wrist",
+                        "group": "eval",
+                        "purpose": "probe"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/putvein/5.py b/advanced/databases/putvein/5.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7e81522b3a13794211a8814b9cc7b504123d0ed
--- /dev/null
+++ b/advanced/databases/putvein/5.py
@@ -0,0 +1,222 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import os
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View as BaseView
+
+import bob.db.putvein
+import bob.ip.color
+
+
+#----------------------------------------------------------
+
+
+class View(BaseView):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_2d_uint8/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    Several "image" are associated with a given "client_id"
+
+    --------- --------- --------- --------- --------- ---------
+    | image | | image | | image | | image | | image | | image |
+    --------- --------- --------- --------- --------- ---------
+    ----------------------------- -----------------------------
+    |          client_id        | |          client_id        |
+    ----------------------------- -----------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.putvein.Database()
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 purposes=parameters.get('purpose', None),
+                                 groups=[parameters['group']],
+                                 kinds=[parameters['kind']]),
+                      key=lambda x: x.client_id)
+
+        return [ Entry(x.client_id, x.make_path(root_folder))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'image':
+            """
+            The image returned by the ``bob.db.putvein`` is RGB (with shape
+            (3, 768, 1024)). This method converts image to a grayscale
+            (shape (768, 1024)) and then rotates image by 270 deg so that
+            images can be used with ``bob.bio.vein`` algorithms designed for
+            the ``bob.db.biowave_v1`` database.
+            Output images dimensions: (1024, 768).
+            """
+            color_image = bob.io.base.load(obj.image)
+            grayscale_image = bob.ip.color.rgb_to_gray(color_image)
+            grayscale_image = np.rot90(grayscale_image, k=3)
+
+            return {
+                'value': grayscale_image
+            }
+
+
+#----------------------------------------------------------
+
+
+class TemplateView(BaseView):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_2d_uint8/1"
+        - model_id: "{{ system_user.username }}/text/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    Several "image" are associated with a given "model_id".
+    Several "model_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   model_id                  | |                   model_id                  |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                          client_id                                          |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "image"
+    per "model_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'model_id', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.putvein.Database()
+
+        model_ids = db.model_ids(protocol=parameters['protocol'],
+                                 groups=[parameters['group']],
+                                 kinds=[parameters['kind']])
+
+        entries = []
+
+        for model_id in model_ids:
+            objs = db.objects(protocol=parameters['protocol'],
+                              purposes=parameters.get('purpose', None),
+                              groups=[parameters['group']],
+                              kinds=[parameters['kind']],
+                              model_ids=[model_id])
+
+            entries.extend([ Entry(x.client_id, model_id, x.make_path(root_folder))
+                             for x in objs ])
+
+        return sorted(entries, key=lambda x: (x.client_id, x.model_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'model_id':
+            return {
+                'text': str(obj.model_id)
+            }
+
+        elif output == 'image':
+            """
+            The image returned by the ``bob.db.putvein`` is RGB (with shape
+            (3, 768, 1024)). This method converts image to a grayscale
+            (shape (768, 1024)) and then rotates image by 270 deg so that
+            images can be used with ``bob.bio.vein`` algorithms designed for
+            the ``bob.db.biowave_v1`` database.
+            Output images dimensions: (1024, 768).
+            """
+            color_image = bob.io.base.load(obj.image)
+            grayscale_image = bob.ip.color.rgb_to_gray(color_image)
+            grayscale_image = np.rot90(grayscale_image, k=3)
+
+            return {
+                'value': grayscale_image
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock methods
+    def mock_load(filename):
+        return np.ndarray((3, 10, 20), dtype=np.uint8)
+
+    bob.io.base.load = mock_load
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    view = View()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol = 'LR_4',
+            kind = 'wrist',
+            group = 'dev',
+            purpose = 'probe',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('image', 0)
+
+
+    view = TemplateView()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol = 'LR_4',
+            kind = 'wrist',
+            group = 'dev',
+            purpose = 'enroll',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('model_id', 0)
+    view.get('image', 0)
diff --git a/advanced/databases/putvein/5.rst b/advanced/databases/putvein/5.rst
new file mode 100644
index 0000000000000000000000000000000000000000..76867d0f2412a972fdcfa3a673e231bfd09db4f8
--- /dev/null
+++ b/advanced/databases/putvein/5.rst
@@ -0,0 +1 @@
+The PUT Vein Database
\ No newline at end of file
diff --git a/advanced/databases/replay/5.json b/advanced/databases/replay/5.json
new file mode 100644
index 0000000000000000000000000000000000000000..f9a77e25e622ded247d227832475ca10b8be886a
--- /dev/null
+++ b/advanced/databases/replay/5.json
@@ -0,0 +1,523 @@
+{
+    "description": "The Replay Database",
+    "root_folder": "/idiap/group/replay/database/protocols/replayattack-database",
+    "protocols": [
+        {
+            "name": "grandtest",
+            "template": "simple_face_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "train",
+                        "enroll": false,
+                        "protocol": "grandtest"
+                    }
+                },
+                "dev_probes": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "devel",
+                        "enroll": false,
+                        "protocol": "grandtest"
+                    }
+                },
+                "test_probes": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "test",
+                        "enroll": false,
+                        "protocol": "grandtest"
+                    }
+                }
+            }
+        },
+        {
+            "name": "print",
+            "template": "simple_face_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "train",
+                        "enroll": false,
+                        "protocol": "print"
+                    }
+                },
+                "dev_probes": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "devel",
+                        "enroll": false,
+                        "protocol": "print"
+                    }
+                },
+                "test_probes": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "test",
+                        "enroll": false,
+                        "protocol": "print"
+                    }
+                }
+            }
+        },
+        {
+            "name": "photo",
+            "template": "simple_face_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "train",
+                        "enroll": false,
+                        "protocol": "photo"
+                    }
+                },
+                "dev_probes": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "devel",
+                        "enroll": false,
+                        "protocol": "photo"
+                    }
+                },
+                "test_probes": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "test",
+                        "enroll": false,
+                        "protocol": "photo"
+                    }
+                }
+            }
+        },
+        {
+            "name": "video",
+            "template": "simple_face_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "train",
+                        "enroll": false,
+                        "protocol": "video"
+                    }
+                },
+                "dev_probes": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "devel",
+                        "enroll": false,
+                        "protocol": "video"
+                    }
+                },
+                "test_probes": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "test",
+                        "enroll": false,
+                        "protocol": "video"
+                    }
+                }
+            }
+        },
+        {
+            "name": "mobile",
+            "template": "simple_face_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "train",
+                        "enroll": false,
+                        "protocol": "mobile"
+                    }
+                },
+                "dev_probes": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "devel",
+                        "enroll": false,
+                        "protocol": "mobile"
+                    }
+                },
+                "test_probes": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "test",
+                        "enroll": false,
+                        "protocol": "mobile"
+                    }
+                }
+            }
+        },
+        {
+            "name": "highdef",
+            "template": "simple_face_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "train",
+                        "enroll": false,
+                        "protocol": "highdef"
+                    }
+                },
+                "dev_probes": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "devel",
+                        "enroll": false,
+                        "protocol": "highdef"
+                    }
+                },
+                "test_probes": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "test",
+                        "enroll": false,
+                        "protocol": "highdef"
+                    }
+                }
+            }
+        },
+        {
+            "name": "verification_grandtest",
+            "template": "advanced_face_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "train",
+                        "enroll": true,
+                        "protocol": "grandtest"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "grandtest"
+                    }
+                },
+                "dev_probes_real": {
+                    "view": "ProbesReal",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "grandtest"
+                    }
+                },
+                "dev_probes_attack": {
+                    "view": "ProbesAttack",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "grandtest"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "grandtest"
+                    }
+                },
+                "test_probes_real": {
+                    "view": "ProbesReal",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "grandtest"
+                    }
+                },
+                "test_probes_attack": {
+                    "view": "ProbesAttack",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "grandtest"
+                    }
+                }
+            }
+        },
+        {
+            "name": "verification_print",
+            "template": "advanced_face_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "train",
+                        "enroll": true,
+                        "protocol": "print"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "print"
+                    }
+                },
+                "dev_probes_real": {
+                    "view": "ProbesReal",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "print"
+                    }
+                },
+                "dev_probes_attack": {
+                    "view": "ProbesAttack",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "print"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "print"
+                    }
+                },
+                "test_probes_real": {
+                    "view": "ProbesReal",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "print"
+                    }
+                },
+                "test_probes_attack": {
+                    "view": "ProbesAttack",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "print"
+                    }
+                }
+            }
+        },
+        {
+            "name": "verification_photo",
+            "template": "advanced_face_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "train",
+                        "enroll": true,
+                        "protocol": "photo"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "photo"
+                    }
+                },
+                "dev_probes_real": {
+                    "view": "ProbesReal",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "photo"
+                    }
+                },
+                "dev_probes_attack": {
+                    "view": "ProbesAttack",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "photo"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "photo"
+                    }
+                },
+                "test_probes_real": {
+                    "view": "ProbesReal",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "photo"
+                    }
+                },
+                "test_probes_attack": {
+                    "view": "ProbesAttack",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "photo"
+                    }
+                }
+            }
+        },
+        {
+            "name": "verification_video",
+            "template": "advanced_face_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "train",
+                        "enroll": true,
+                        "protocol": "video"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "video"
+                    }
+                },
+                "dev_probes_real": {
+                    "view": "ProbesReal",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "video"
+                    }
+                },
+                "dev_probes_attack": {
+                    "view": "ProbesAttack",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "video"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "video"
+                    }
+                },
+                "test_probes_real": {
+                    "view": "ProbesReal",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "video"
+                    }
+                },
+                "test_probes_attack": {
+                    "view": "ProbesAttack",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "video"
+                    }
+                }
+            }
+        },
+        {
+            "name": "verification_mobile",
+            "template": "advanced_face_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "train",
+                        "enroll": true,
+                        "protocol": "mobile"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "mobile"
+                    }
+                },
+                "dev_probes_real": {
+                    "view": "ProbesReal",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "mobile"
+                    }
+                },
+                "dev_probes_attack": {
+                    "view": "ProbesAttack",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "mobile"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "mobile"
+                    }
+                },
+                "test_probes_real": {
+                    "view": "ProbesReal",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "mobile"
+                    }
+                },
+                "test_probes_attack": {
+                    "view": "ProbesAttack",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "mobile"
+                    }
+                }
+            }
+        },
+        {
+            "name": "verification_highdef",
+            "template": "advanced_face_antispoofing/1",
+            "views": {
+                "train": {
+                    "view": "All",
+                    "parameters": {
+                        "group": "train",
+                        "enroll": true,
+                        "protocol": "highdef"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "highdef"
+                    }
+                },
+                "dev_probes_real": {
+                    "view": "ProbesReal",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "highdef"
+                    }
+                },
+                "dev_probes_attack": {
+                    "view": "ProbesAttack",
+                    "parameters": {
+                        "group": "devel",
+                        "protocol": "highdef"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "highdef"
+                    }
+                },
+                "test_probes_real": {
+                    "view": "ProbesReal",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "highdef"
+                    }
+                },
+                "test_probes_attack": {
+                    "view": "ProbesAttack",
+                    "parameters": {
+                        "group": "test",
+                        "protocol": "highdef"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/replay/5.py b/advanced/databases/replay/5.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b96eceba90e79f96003223e8b0629d7df0aef1e
--- /dev/null
+++ b/advanced/databases/replay/5.py
@@ -0,0 +1,576 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.io.base
+import bob.io.video
+import bob.db.replay
+
+from bob.db.replay.driver import Interface
+
+INFO = Interface()
+SQLITE_FILE = INFO.files()[0]
+
+
+#----------------------------------------------------------
+
+
+class All(View):
+    """Outputs:
+        - video: "{{ system_user.username }}/array_4d_uint8/1"
+        - annotations: "{{ system_user.username }}/bounding_box_video/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+        - attack_support: "{{ system_user.username }}/text/1"
+        - class: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "video".
+    One "annotations" is associated with a given "video".
+    Several "video" are associated with a given "client_id".
+    Several "client_id" are associated with a given "class".
+    Several "attack_support" are associated with a given "class".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    video    | |    video    | |    video    | |    video    | |    video    | |    video    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | annotations | | annotations | | annotations | | annotations | | annotations | | annotations |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ------------------------------- ------------------------------- -------------------------------
+    |          client_id          | |          client_id          | |          client_id          |
+    ------------------------------- ------------------------------- -------------------------------
+    --------------------------------------------------------------- -------------------------------
+    |                        attack_support                       | |        attack_support       |
+    --------------------------------------------------------------- -------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                             class                                           |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def __init__(self):
+        super(All, self)
+        self.output_member_map = {'class': 'cls'}
+
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['cls', 'attack_support', 'client_id', 'file_id',
+                                     'annotations', 'video'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.replay.Database()
+
+        objs = []
+
+        if parameters['enroll']:
+            objs.extend([ ('enroll', '', x)
+                          for x in sorted(db.objects(protocol=parameters['protocol'],
+                                                     groups=parameters['group'],
+                                                     cls='enroll'),
+                                          key=lambda x: (x.client_id, x.id))
+                        ])
+
+        objs.extend([ ('real', '', x)
+                      for x in sorted(db.objects(protocol=parameters['protocol'],
+                                                 groups=parameters['group'],
+                                                 cls='real'),
+                                      key=lambda x: (x.client_id, x.id))
+                    ])
+
+        objs.extend([ ('attack', 'fixed', x)
+                      for x in sorted(db.objects(protocol=parameters['protocol'],
+                                                 groups=parameters['group'],
+                                                 cls='attack',
+                                                 support='fixed'),
+                                      key=lambda x: (x.client_id, x.id))
+                    ])
+
+        objs.extend([ ('attack', 'hand', x)
+                      for x in sorted(db.objects(protocol=parameters['protocol'],
+                                                 groups=parameters['group'],
+                                                 cls='attack',
+                                                 support='hand'),
+                                      key=lambda x: (x.client_id, x.id))
+                    ])
+
+
+        return [ Entry(x[0], x[1], x[2].client_id, x[2].id, x[2].bbx(root_folder),
+                       x[2].videofile(root_folder))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'class':
+            return {
+                'text': str(obj.cls)
+            }
+
+        elif output == 'attack_support':
+            return {
+                'text': str(obj.attack_support)
+            }
+
+        elif output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'annotations':
+            annotations_list = []
+            for annotation in obj.annotations:
+                annotations_list.append({
+                    'frame_id': np.uint64(annotation[0]),
+                    'top-left-x': np.int32(annotation[1]),
+                    'top-left-y': np.int32(annotation[2]),
+                    'width': np.int32(annotation[3]),
+                    'height': np.int32(annotation[4])
+                })
+
+            return {
+                'value': annotations_list
+            }
+
+        elif output == 'video':
+            return {
+                'value': bob.io.base.load(obj.video)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Templates(View):
+    """Outputs:
+        - video: "{{ system_user.username }}/array_4d_uint8/1"
+        - annotations: "{{ system_user.username }}/bounding_box_video/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - template_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "video".
+    One "annotations" is associated with a given "video".
+    Several "video" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- ---------------
+    |    video    | |    video    | |    video    | |    video    |
+    --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- ---------------
+    | annotations | | annotations | | annotations | | annotations |
+    --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- ---------------
+    ------------------------------- -------------------------------
+    |         template_id         | |         template_id         |
+    ------------------------------- -------------------------------
+    ---------------------------------------------------------------
+    |                          client_id                          |
+    ---------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_id"
+    per "client_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id',
+                                     'annotations', 'video'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.replay.Database()
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups=[parameters['group']],
+                                 cls='enroll'),
+                      key=lambda x: (x.client_id, x.id))
+
+
+        return [ Entry(x.client_id, x.client_id, x.id, x.bbx(root_folder),
+                       x.videofile(root_folder))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'template_id':
+            return {
+                'value': np.uint64(obj.template_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'annotations':
+            annotations_list = []
+            for annotation in obj.annotations:
+                annotations_list.append({
+                    'frame_id': np.uint64(annotation[0]),
+                    'top-left-x': np.int32(annotation[1]),
+                    'top-left-y': np.int32(annotation[2]),
+                    'width': np.int32(annotation[3]),
+                    'height': np.int32(annotation[4])
+                })
+
+            return {
+                'value': annotations_list
+            }
+
+        elif output == 'video':
+            return {
+                'value': bob.io.base.load(obj.video)
+            }
+
+
+#----------------------------------------------------------
+
+
+class ProbesReal(View):
+    """Outputs:
+        - video: "{{ system_user.username }}/array_4d_uint8/1"
+        - annotations: "{{ system_user.username }}/bounding_box_video/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - probe_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+        - template_ids: "{{ system_user.username }}/array_1d_uint64/1"
+
+    One "file_id" is associated with a given "video".
+    One "annotations" is associated with a given "video".
+    One "probe_id" is associated with a given "video".
+    Several "video" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    --------------- --------------- --------------- ---------------
+    |    video    | |    video    | |    video    | |    video    |
+    --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- ---------------
+    | annotations | | annotations | | annotations | | annotations |
+    --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- ---------------
+    |  probe_id   | |  probe_id   | |  probe_id   | |  probe_id   |
+    --------------- --------------- --------------- ---------------
+    ------------------------------- -------------------------------
+    |          client_id          | |          client_id          |
+    ------------------------------- -------------------------------
+    ---------------------------------------------------------------
+    |                         template_ids                        |
+    ---------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_ids"
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id',
+                                     'annotations', 'video'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.replay.Database()
+
+        template_ids = sorted([c.id for c in db.clients() if c.set == parameters['group']])
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups=[parameters['group']],
+                                 cls='real'),
+                      key=lambda x: (x.client_id, x.id))
+
+
+        return [ Entry(template_ids, x.client_id, x.id, x.id, x.bbx(root_folder),
+                       x.videofile(root_folder))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'value': np.uint64(obj.template_ids)
+            }
+
+        elif output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'probe_id':
+            return {
+                'value': np.uint64(obj.probe_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'annotations':
+            annotations_list = []
+            for annotation in obj.annotations:
+                annotations_list.append({
+                    'frame_id': np.uint64(annotation[0]),
+                    'top-left-x': np.int32(annotation[1]),
+                    'top-left-y': np.int32(annotation[2]),
+                    'width': np.int32(annotation[3]),
+                    'height': np.int32(annotation[4])
+                })
+
+            return {
+                'value': annotations_list
+            }
+
+        elif output == 'video':
+            return {
+                'value': bob.io.base.load(obj.video)
+            }
+
+
+#----------------------------------------------------------
+
+
+class ProbesAttack(View):
+    """Outputs:
+        - video: "{{ system_user.username }}/array_4d_uint8/1"
+        - annotations: "{{ system_user.username }}/bounding_box_video/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - probe_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+        - attack_support: "{{ system_user.username }}/text/1"
+        - template_ids: "{{ system_user.username }}/array_1d_uint64/1"
+
+    One "file_id" is associated with a given "video".
+    One "annotations" is associated with a given "video".
+    One "probe_id" is associated with a given "video".
+    Several "video" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+    Several "template_ids" are associated with a given "attack_support".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    video    | |    video    | |    video    | |    video    | |    video    | |    video    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | annotations | | annotations | | annotations | | annotations | | annotations | | annotations |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |  probe_id   | |  probe_id   | |  probe_id   | |  probe_id   | |  probe_id   | |  probe_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ------------------------------- ------------------------------- -------------------------------
+    |          client_id          | |          client_id          | |          client_id          |
+    ------------------------------- ------------------------------- -------------------------------
+    --------------------------------------------------------------- -------------------------------
+    |                         template_ids                        | |         template_ids        |
+    --------------------------------------------------------------- -------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        attack_support                                       |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['attack_support', 'template_ids', 'client_id',
+                                     'probe_id', 'file_id', 'annotations', 'video'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.replay.Database()
+
+        objs = []
+
+        objs.extend([ ('fixed', x)
+                      for x in sorted(db.objects(protocol=parameters['protocol'],
+                                                 groups=parameters['group'],
+                                                 cls='attack',
+                                                 support='fixed'),
+                                      key=lambda x: (x.client_id, x.id))
+                    ])
+
+        objs.extend([ ('hand', x)
+                      for x in sorted(db.objects(protocol=parameters['protocol'],
+                                                 groups=parameters['group'],
+                                                 cls='attack',
+                                                 support='hand'),
+                                      key=lambda x: (x.client_id, x.id))
+                    ])
+
+        return [ Entry(x[0], [ x[1].client_id ], x[1].client_id, x[1].id, x[1].id,
+                       x[1].bbx(root_folder), x[1].videofile(root_folder))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'attack_support':
+            return {
+                'text': str(obj.attack_support)
+            }
+
+        elif output == 'template_ids':
+            return {
+                'value': np.uint64(obj.template_ids)
+            }
+
+        elif output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'probe_id':
+            return {
+                'value': np.uint64(obj.probe_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'annotations':
+            annotations_list = []
+            for annotation in obj.annotations:
+                annotations_list.append({
+                    'frame_id': np.uint64(annotation[0]),
+                    'top-left-x': np.int32(annotation[1]),
+                    'top-left-y': np.int32(annotation[2]),
+                    'width': np.int32(annotation[3]),
+                    'height': np.int32(annotation[4])
+                })
+
+            return {
+                'value': annotations_list
+            }
+
+        elif output == 'video':
+            return {
+                'value': bob.io.base.load(obj.video)
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    def mock_load(filename):
+        return np.ndarray((5, 3, 10, 20), dtype=np.uint8)
+
+    def mock_bbx(obj, directory):
+        return np.array([(0, 1, 2, 3, 4, 5), (1, 10, 20, 30, 40, 50)])
+
+    bob.io.base.load = mock_load
+    bob.db.replay.File.bbx = mock_bbx
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    view = All()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol="grandtest",
+            group="train",
+            enroll=False,
+        )
+    )
+    view.get('class', 0)
+    view.get('attack_support', 0)
+    view.get('client_id', 0)
+    view.get('file_id', 0)
+    view.get('annotations', 0)
+    view.get('video', 0)
+
+
+    view = Templates()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol="grandtest",
+            group="devel",
+        )
+    )
+    view.get('client_id', 0)
+    view.get('template_id', 0)
+    view.get('file_id', 0)
+    view.get('annotations', 0)
+    view.get('video', 0)
+
+
+    view = ProbesReal()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol="grandtest",
+            group="devel",
+        )
+    )
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('annotations', 0)
+    view.get('video', 0)
+
+
+    view = ProbesAttack()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol="grandtest",
+            group="devel",
+        )
+    )
+    view.get('attack_support', 0)
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('annotations', 0)
+    view.get('video', 0)
diff --git a/advanced/databases/replay/5.rst b/advanced/databases/replay/5.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cb450c05cd275bffca25d1fde72d216a27033522
--- /dev/null
+++ b/advanced/databases/replay/5.rst
@@ -0,0 +1 @@
+The Replay Database
\ No newline at end of file
diff --git a/advanced/databases/utfvp/5.json b/advanced/databases/utfvp/5.json
new file mode 100644
index 0000000000000000000000000000000000000000..88fb1b4a2e648f6918e5c6ebed8306d613e495fb
--- /dev/null
+++ b/advanced/databases/utfvp/5.json
@@ -0,0 +1,313 @@
+{
+    "description": "Finger-Vein database from the University of Twente",
+    "root_folder": "/idiap/resource/database/UTFVP/data",
+    "protocols": [
+        {
+            "name": "1vsall",
+            "template": "simple_fingervein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "1vsall"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "1vsall"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "1vsall"
+                    }
+                }
+            }
+        },
+        {
+            "name": "nom",
+            "template": "advanced_fingervein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "nom"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "nom"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "nom"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "nom"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "nom"
+                    }
+                }
+            }
+        },
+        {
+            "name": "nomLeftRing",
+            "template": "advanced_fingervein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "nomLeftRing"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "nomLeftRing"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "nomLeftRing"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "nomLeftRing"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "nomLeftRing"
+                    }
+                }
+            }
+        },
+        {
+            "name": "nomLeftMiddle",
+            "template": "advanced_fingervein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "nomLeftMiddle"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "nomLeftMiddle"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "nomLeftMiddle"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "nomLeftMiddle"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "nomLeftMiddle"
+                    }
+                }
+            }
+        },
+        {
+            "name": "nomLeftIndex",
+            "template": "advanced_fingervein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "nomLeftIndex"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "nomLeftIndex"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "nomLeftIndex"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "nomLeftIndex"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "nomLeftIndex"
+                    }
+                }
+            }
+        },
+        {
+            "name": "nomRightIndex",
+            "template": "advanced_fingervein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "nomRightIndex"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "nomRightIndex"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "nomRightIndex"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "nomRightIndex"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "nomRightIndex"
+                    }
+                }
+            }
+        },
+        {
+            "name": "nomRightMiddle",
+            "template": "advanced_fingervein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "nomRightMiddle"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "nomRightMiddle"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "nomRightMiddle"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "nomRightMiddle"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "nomRightMiddle"
+                    }
+                }
+            }
+        },
+        {
+            "name": "nomRightRing",
+            "template": "advanced_fingervein_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "nomRightRing"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "nomRightRing"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev",
+                        "protocol": "nomRightRing"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "nomRightRing"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval",
+                        "protocol": "nomRightRing"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/utfvp/5.py b/advanced/databases/utfvp/5.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9c5c883b8904d6f69b74bb0dcf2cf3ea3887fd0
--- /dev/null
+++ b/advanced/databases/utfvp/5.py
@@ -0,0 +1,308 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.io.base
+import bob.io.image
+import bob.db.utfvp
+
+
+#----------------------------------------------------------
+
+
+class Train(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_2d_uint8/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'file_id', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.utfvp.Database()
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups='world',
+                                 purposes='train'),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(x.client_id, x.id, x.make_path(root_folder, '.png')) for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Templates(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_2d_uint8/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - template_id: "{{ system_user.username }}/text/1"
+        - client_id: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "image".
+    Several "image" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                         client_id                                           |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_id"
+    per "client_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.utfvp.Database()
+
+        template_ids = db.model_ids(protocol=parameters['protocol'],
+                                    groups=parameters['group'])
+
+        entries = []
+
+        for template_id in template_ids:
+            objs = db.objects(protocol=parameters['protocol'],
+                              groups=parameters['group'],
+                              purposes='enroll',
+                              model_ids=[template_id])
+
+            entries.extend([ Entry(x.client_id, template_id, x.id, x.make_path(root_folder, '.png'))
+                             for x in objs ])
+
+        return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'template_id':
+            return {
+                'text': str(obj.template_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Probes(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_2d_uint8/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - probe_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/text/1"
+        - template_ids: "{{ system_user.username }}/array_1d_text/1"
+
+    One "file_id" is associated with a given "image".
+    One "probe_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.utfvp.Database()
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups=parameters['group'],
+                                 purposes='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        objs = [ (sorted([ model.name for model in obj.models_probe
+                                      if model.sgroup == parameters['group'] ]), obj)
+                 for obj in objs ]
+
+        entries = [ Entry(x[0], x[1].client_id, x[1].id, x[1].id, x[1].make_path(root_folder, '.png'))
+                    for x in objs ]
+
+        return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids,
+                                              x.client_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'text': obj.template_ids
+            }
+
+        elif output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'probe_id':
+            return {
+                'value': np.uint64(obj.probe_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the images
+    def mock_load(root_folder):
+        return np.ndarray((10, 20), dtype=np.uint8)
+
+    bob.io.base.load = mock_load
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    view = Train()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='1vsall',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('file_id', 0)
+    view.get('image', 0)
+
+    view = Templates()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='nomLeftRing',
+            group='dev',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('template_id', 0)
+    view.get('file_id', 0)
+    view.get('image', 0)
+
+    view = Probes()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='nomLeftRing',
+            group='dev',
+        )
+    )
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('image', 0)
diff --git a/advanced/databases/utfvp/5.rst b/advanced/databases/utfvp/5.rst
new file mode 100644
index 0000000000000000000000000000000000000000..30e6ad41b6b9d2b917f9f66ba03b07e59a0583a8
--- /dev/null
+++ b/advanced/databases/utfvp/5.rst
@@ -0,0 +1 @@
+Finger-Vein database from the University of Twente
\ No newline at end of file
diff --git a/advanced/databases/voxforge/5.json b/advanced/databases/voxforge/5.json
new file mode 100644
index 0000000000000000000000000000000000000000..1365bdf883757485d4b60beb521a0b422b44e103
--- /dev/null
+++ b/advanced/databases/voxforge/5.json
@@ -0,0 +1,41 @@
+{
+    "description": "The VoxForge Database",
+    "root_folder": "/idiap/resource/database/VoxForge/dbase/SpeechCorpus/Trunk/Audio/Main/16kHz_16bit",
+    "protocols": [
+        {
+            "name": "default",
+            "template": "advanced_speaker_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {}
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "group": "eval"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/voxforge/5.py b/advanced/databases/voxforge/5.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac3e69855f51457bcc6e937d732f5a08ec81f6a1
--- /dev/null
+++ b/advanced/databases/voxforge/5.py
@@ -0,0 +1,323 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.db.voxforge
+import bob.io.base
+import bob.io.audio
+
+
+#----------------------------------------------------------
+
+
+class Train(View):
+    """Outputs:
+        - speech: "{{ system_user.username }}/array_1d_floats/1"
+        - file_id: "{{ system_user.username }}/text/1"
+        - client_id: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "speech".
+    Several "speech" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    speech   | |    speech   | |    speech   | |    speech   | |    speech   | |    speech   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'file_id', 'speech'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.voxforge.Database()
+
+        objs = sorted(db.objects(groups='world'),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(x.client_id, x.id, x.make_path(root_folder, '.wav')) for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'text': str(obj.file_id)
+            }
+
+        elif output == 'speech':
+            audio = bob.io.base.load(obj.speech)
+
+            return {
+                'value': np.cast['float'](audio[0] * pow(2, 15))
+            }
+
+
+#----------------------------------------------------------
+
+
+class Templates(View):
+    """Outputs:
+        - speech: "{{ system_user.username }}/array_1d_floats/1"
+        - file_id: "{{ system_user.username }}/text/1"
+        - template_id: "{{ system_user.username }}/text/1"
+        - client_id: "{{ system_user.username }}/text/1"
+
+    One "file_id" is associated with a given "speech".
+    Several "speech" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    speech   | |    speech   | |    speech   | |    speech   | |    speech   | |    speech   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                          client_id                                          |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_id"
+    per "client_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'speech'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.voxforge.Database()
+
+        template_ids = db.model_ids(groups=parameters['group'])
+
+        entries = []
+
+        for template_id in template_ids:
+            objs = db.objects(groups=parameters['group'],
+                              purposes='enroll',
+                              model_ids=[template_id])
+
+            entries.extend([ Entry(x.client_id, template_id, x.id, x.make_path(root_folder, '.wav'))
+                             for x in objs ])
+
+        return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'template_id':
+            return {
+                'text': str(obj.template_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'text': str(obj.file_id)
+            }
+
+        elif output == 'speech':
+            audio = bob.io.base.load(obj.speech)
+
+            return {
+                'value': np.cast['float'](audio[0] * pow(2, 15))
+            }
+
+
+#----------------------------------------------------------
+
+
+class Probes(View):
+    """Outputs:
+        - speech: "{{ system_user.username }}/array_1d_floats/1"
+        - file_id: "{{ system_user.username }}/text/1"
+        - probe_id: "{{ system_user.username }}/text/1",
+        - client_id: "{{ system_user.username }}/text/1"
+        - template_ids: "{{ system_user.username }}/array_1d_text/1",
+
+    One "file_id" is associated with a given "speech".
+    One "probe_id" is associated with a given "speech".
+    Several "speech" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    Each probe must be matched against a number of templates defined by a list of
+    client identifiers.
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    speech   | |    speech   | |    speech   | |    speech   | |    speech   | |    speech   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', 'speech'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.voxforge.Database()
+
+        template_ids = db.model_ids(groups=parameters['group'])
+
+        template_probes = {}
+        for template_id in template_ids:
+            objs = sorted(db.objects(groups=parameters['group'],
+                                     purposes='probe',
+                                     model_ids=[template_id]),
+                          key=lambda x: (x.client_id, x.id))
+
+            template_probes[template_id] = [ p.id for p in objs ]
+
+        objs = sorted(db.objects(groups=parameters['group'],
+                                 purposes='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        entries = []
+        for obj in objs:
+            templates = [ template_id for template_id in template_ids
+                                      if obj.id in template_probes[template_id] ]
+            entries.append( (templates, obj) )
+
+        return sorted([ Entry(x[0], x[1].client_id, x[1].id, x[1].id,
+                              x[1].make_path(root_folder, '.wav'))
+                        for x in entries ],
+                       key=lambda x: (len(x.template_ids), x.template_ids, x.client_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'text': [ str(x) for x in obj.template_ids ]
+            }
+
+        elif output == 'client_id':
+            return {
+                'text': str(obj.client_id)
+            }
+
+        elif output == 'probe_id':
+            return {
+                'text': str(obj.probe_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'text': str(obj.file_id)
+            }
+
+        elif output == 'speech':
+            audio = bob.io.base.load(obj.speech)
+
+            return {
+                'value': np.cast['float'](audio[0] * pow(2, 15))
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the images
+    def mock_load(root_folder):
+        return np.ndarray((1, 512), dtype=np.uint8)
+
+    bob.io.base.load = mock_load
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    view = Train()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+        )
+    )
+    view.get('client_id', 0)
+    view.get('file_id', 0)
+    view.get('speech', 0)
+
+
+    view = Templates()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            group='dev',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('template_id', 0)
+    view.get('file_id', 0)
+    view.get('speech', 0)
+
+
+    view = Probes()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            group='dev',
+        )
+    )
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('speech', 0)
diff --git a/advanced/databases/voxforge/5.rst b/advanced/databases/voxforge/5.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1e5de09a78dffa345abfcb982f04385a7a6a239a
--- /dev/null
+++ b/advanced/databases/voxforge/5.rst
@@ -0,0 +1 @@
+The VoxForge Database
\ No newline at end of file
diff --git a/advanced/databases/xm2vts/5.json b/advanced/databases/xm2vts/5.json
new file mode 100644
index 0000000000000000000000000000000000000000..2bc7de01192c73e7b3d1d1689e82288c50486446
--- /dev/null
+++ b/advanced/databases/xm2vts/5.json
@@ -0,0 +1,167 @@
+{
+    "description": "The XM2VTS Database of Faces",
+    "root_folder": "/idiap/resource/database/xm2vtsdb/images",
+    "protocols": [
+        {
+            "name": "lp1",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "lp1"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "lp1",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "lp1",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "lp1",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "lp1",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "lp2",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "lp2"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "lp2",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "lp2",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "lp2",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "lp2",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "darkened-lp1",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "darkened-lp1"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "darkened-lp1",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "darkened-lp1",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "darkened-lp1",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "darkened-lp1",
+                        "group": "eval"
+                    }
+                }
+            }
+        },
+        {
+            "name": "darkened-lp2",
+            "template": "advanced_face_recognition/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "protocol": "darkened-lp2"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "darkened-lp2",
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "darkened-lp2",
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "protocol": "darkened-lp2",
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "protocol": "darkened-lp2",
+                        "group": "eval"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/advanced/databases/xm2vts/5.py b/advanced/databases/xm2vts/5.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a084c55499e2a0018e84e4807571572b78e067e
--- /dev/null
+++ b/advanced/databases/xm2vts/5.py
@@ -0,0 +1,382 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.io.base
+import bob.io.image
+import bob.db.xm2vts
+
+
+#----------------------------------------------------------
+
+
+class Train(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'file_id', 'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.xm2vts.Database()
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups='world'),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(x.client_id, x.id, db.annotations(x), x.make_path(root_folder, '.ppm'))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Templates(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - template_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                          client_id                                          |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_id"
+    per "client_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.xm2vts.Database()
+
+        template_ids = db.model_ids(protocol=parameters['protocol'],
+                                    groups=parameters['group'])
+
+        entries = []
+
+        for template_id in template_ids:
+            objs = db.objects(protocol=parameters['protocol'],
+                              groups=parameters['group'],
+                              purposes='enroll',
+                              model_ids=[template_id])
+
+            entries.extend([ Entry(x.client_id, template_id, x.id, db.annotations(x),
+                                   x.make_path(root_folder, '.ppm'))
+                             for x in objs ])
+
+        return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'template_id':
+            return {
+                'value': np.uint64(obj.template_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Probes(View):
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - probe_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+        - template_ids: "{{ system_user.username }}/array_1d_uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    One "probe_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id',
+                                     'eye_centers', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.xm2vts.Database()
+
+        template_ids = sorted(db.model_ids(protocol=parameters['protocol'],
+                                           groups=parameters['group']))
+
+
+        template_probes = {}
+        for template_id in template_ids:
+            objs = sorted(db.objects(protocol=parameters['protocol'],
+                                     groups=parameters['group'],
+                                     purposes='probe',
+                                     model_ids=[template_id]),
+                           key=lambda x: (x.client_id, x.id))
+
+            template_probes[template_id] = [ p.id for p in objs ]
+
+
+        objs = sorted(db.objects(protocol=parameters['protocol'],
+                                 groups=parameters['group'],
+                                 purposes='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        entries = []
+        for obj in objs:
+            templates = [ template_id for template_id in template_ids
+                                      if obj.id in template_probes[template_id] ]
+
+            entries.append( Entry(templates, obj.client_id, obj.id, obj.id,
+                                  db.annotations(obj), obj.make_path(root_folder, '.ppm')) )
+
+        return sorted(entries, key=lambda x: (len(x.template_ids), x.template_ids,
+                                              x.client_id, x.probe_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'value': np.uint64(obj.template_ids)
+            }
+
+        elif output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'probe_id':
+            return {
+                'value': np.uint64(obj.probe_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'eye_centers':
+            return {
+                'left': {
+                    'y': np.int32(obj.eye_centers['leye'][0]),
+                    'x': np.int32(obj.eye_centers['leye'][1]),
+                },
+                'right': {
+                    'y': np.int32(obj.eye_centers['reye'][0]),
+                    'x': np.int32(obj.eye_centers['reye'][1]),
+                }
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the images
+    def mock_load(root_folder):
+        return np.ndarray((3, 10, 20), dtype=np.uint8)
+
+    bob.io.base.load = mock_load
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    view = Train()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='lp1',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+
+    view = Templates()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='lp1',
+            group='dev',
+        )
+    )
+    view.get('client_id', 0)
+    view.get('template_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
+
+
+    view = Probes()
+    view.objs = view.index(
+        root_folder='',
+        parameters=dict(
+            protocol='lp1',
+            group='dev',
+        )
+    )
+    view.get('template_ids', 0)
+    view.get('client_id', 0)
+    view.get('probe_id', 0)
+    view.get('file_id', 0)
+    view.get('eye_centers', 0)
+    view.get('image', 0)
diff --git a/advanced/databases/xm2vts/5.rst b/advanced/databases/xm2vts/5.rst
new file mode 100644
index 0000000000000000000000000000000000000000..437ec0924733ed0e4de37ced402a148752bbb75c
--- /dev/null
+++ b/advanced/databases/xm2vts/5.rst
@@ -0,0 +1 @@
+The XM2VTS Database of Faces
\ No newline at end of file
diff --git a/advanced/experiments/username/username/gpu_test/1/gpu_test.json b/advanced/experiments/username/username/gpu_test/1/gpu_test.json
index 28f60c51afff89bc5fee14b0e6adafdc1ff68616..2f2e158d3965f20f9c1facb83ff783459b5821ac 100644
--- a/advanced/experiments/username/username/gpu_test/1/gpu_test.json
+++ b/advanced/experiments/username/username/gpu_test/1/gpu_test.json
@@ -16,8 +16,8 @@
             "offset": 10
         },
         "environment": {
-            "version": "0.0.1",
-            "name": "Pytorch 0.4.0"
+            "version": "1.0.0",
+            "name": "Deep Learning"
         }
     },
     "analyzers": {
diff --git a/advanced/protocoltemplates/advanced_annotation_benchmark/1.json b/advanced/protocoltemplates/advanced_annotation_benchmark/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..4f13bed19192517db39b139e3e0705f34b791f85
--- /dev/null
+++ b/advanced/protocoltemplates/advanced_annotation_benchmark/1.json
@@ -0,0 +1,14 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "outputs": {
+                "image": "system/array_2d_uint8/1",
+                "vein_annotations": "system/array_2d_uint8/1",
+                "ROI_annotations": "system/array_2d_uint8/1",
+                "alignment_annotations": "system/array_1d_coordinates/1"
+            },
+            "name": "annotations"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/advanced_face_antispoofing/1.json b/advanced/protocoltemplates/advanced_face_antispoofing/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..444c99b05cadb8a1cb46686bb4dbf1d15bc435d2
--- /dev/null
+++ b/advanced/protocoltemplates/advanced_face_antispoofing/1.json
@@ -0,0 +1,82 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "outputs": {
+                "attack_support": "system/text/1",
+                "annotations": "system/bounding_box_video/1",
+                "video": "system/array_4d_uint8/1",
+                "file_id": "system/uint64/1",
+                "client_id": "system/uint64/1",
+                "class": "system/text/1"
+            },
+            "name": "train"
+        },
+        {
+            "outputs": {
+                "annotations": "system/bounding_box_video/1",
+                "video": "system/array_4d_uint8/1",
+                "file_id": "system/uint64/1",
+                "client_id": "system/uint64/1",
+                "template_id": "system/uint64/1"
+            },
+            "name": "dev_templates"
+        },
+        {
+            "outputs": {
+                "probe_id": "system/uint64/1",
+                "template_ids": "system/array_1d_uint64/1",
+                "video": "system/array_4d_uint8/1",
+                "file_id": "system/uint64/1",
+                "client_id": "system/uint64/1",
+                "annotations": "system/bounding_box_video/1"
+            },
+            "name": "dev_probes_real"
+        },
+        {
+            "outputs": {
+                "attack_support": "system/text/1",
+                "probe_id": "system/uint64/1",
+                "template_ids": "system/array_1d_uint64/1",
+                "video": "system/array_4d_uint8/1",
+                "file_id": "system/uint64/1",
+                "client_id": "system/uint64/1",
+                "annotations": "system/bounding_box_video/1"
+            },
+            "name": "dev_probes_attack"
+        },
+        {
+            "outputs": {
+                "annotations": "system/bounding_box_video/1",
+                "video": "system/array_4d_uint8/1",
+                "file_id": "system/uint64/1",
+                "client_id": "system/uint64/1",
+                "template_id": "system/uint64/1"
+            },
+            "name": "test_templates"
+        },
+        {
+            "outputs": {
+                "probe_id": "system/uint64/1",
+                "template_ids": "system/array_1d_uint64/1",
+                "video": "system/array_4d_uint8/1",
+                "file_id": "system/uint64/1",
+                "client_id": "system/uint64/1",
+                "annotations": "system/bounding_box_video/1"
+            },
+            "name": "test_probes_real"
+        },
+        {
+            "outputs": {
+                "attack_support": "system/text/1",
+                "probe_id": "system/uint64/1",
+                "template_ids": "system/array_1d_uint64/1",
+                "video": "system/array_4d_uint8/1",
+                "file_id": "system/uint64/1",
+                "client_id": "system/uint64/1",
+                "annotations": "system/bounding_box_video/1"
+            },
+            "name": "test_probes_attack"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/advanced_face_recognition/1.json b/advanced/protocoltemplates/advanced_face_recognition/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..478813c3341d8c32e978267e4f96a20c0ca0b1c4
--- /dev/null
+++ b/advanced/protocoltemplates/advanced_face_recognition/1.json
@@ -0,0 +1,56 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "train",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/uint64/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "eye_centers": "{{ system_user.username }}/eye_positions/1"
+            }
+        },
+        {
+            "name": "dev_templates",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/uint64/1",
+                "template_id": "{{ system_user.username }}/uint64/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "eye_centers": "{{ system_user.username }}/eye_positions/1"
+            }
+        },
+        {
+            "name": "dev_probes",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "probe_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/uint64/1",
+                "template_ids": "{{ system_user.username }}/array_1d_uint64/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "eye_centers": "{{ system_user.username }}/eye_positions/1"
+            }
+        },
+        {
+            "name": "test_templates",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/uint64/1",
+                "template_id": "{{ system_user.username }}/uint64/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "eye_centers": "{{ system_user.username }}/eye_positions/1"
+            }
+        },
+        {
+            "name": "test_probes",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "probe_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/uint64/1",
+                "template_ids": "{{ system_user.username }}/array_1d_uint64/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "eye_centers": "{{ system_user.username }}/eye_positions/1"
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/advanced_face_recognition_textid/1.json b/advanced/protocoltemplates/advanced_face_recognition_textid/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..9bf66c614336b9bab3302392e5797910a91da371
--- /dev/null
+++ b/advanced/protocoltemplates/advanced_face_recognition_textid/1.json
@@ -0,0 +1,56 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "train",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/text/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "eye_centers": "{{ system_user.username }}/eye_positions/1"
+            }
+        },
+        {
+            "name": "dev_templates",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/text/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_id": "{{ system_user.username }}/text/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "eye_centers": "{{ system_user.username }}/eye_positions/1"
+            }
+        },
+        {
+            "name": "dev_probes",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/text/1",
+                "probe_id": "{{ system_user.username }}/text/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_ids": "{{ system_user.username }}/array_1d_text/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "eye_centers": "{{ system_user.username }}/eye_positions/1"
+            }
+        },
+        {
+            "name": "test_templates",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/text/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_id": "{{ system_user.username }}/text/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "eye_centers": "{{ system_user.username }}/eye_positions/1"
+            }
+        },
+        {
+            "name": "test_probes",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/text/1",
+                "probe_id": "{{ system_user.username }}/text/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_ids": "{{ system_user.username }}/array_1d_text/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "eye_centers": "{{ system_user.username }}/eye_positions/1"
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/advanced_fingervein_recognition/1.json b/advanced/protocoltemplates/advanced_fingervein_recognition/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..1f73d2045de4330bdb77a129790697eec87fd4df
--- /dev/null
+++ b/advanced/protocoltemplates/advanced_fingervein_recognition/1.json
@@ -0,0 +1,51 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "outputs": {
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/text/1"
+            },
+            "name": "train"
+        },
+        {
+            "outputs": {
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_id": "{{ system_user.username }}/text/1"
+            },
+            "name": "dev_templates"
+        },
+        {
+            "outputs": {
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "probe_id": "{{ system_user.username }}/uint64/1",
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_ids": "{{ system_user.username }}/array_1d_text/1"
+            },
+            "name": "dev_probes"
+        },
+        {
+            "outputs": {
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_id": "{{ system_user.username }}/text/1"
+            },
+            "name": "test_templates"
+        },
+        {
+            "outputs": {
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "probe_id": "{{ system_user.username }}/uint64/1",
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_ids": "{{ system_user.username }}/array_1d_text/1"
+            },
+            "name": "test_probes"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/advanced_speaker_recognition/1.json b/advanced/protocoltemplates/advanced_speaker_recognition/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..35f89c3f545e8bed59a4417eed2347eecc2c495f
--- /dev/null
+++ b/advanced/protocoltemplates/advanced_speaker_recognition/1.json
@@ -0,0 +1,51 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "train",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/text/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "speech": "{{ system_user.username }}/array_1d_floats/1"
+            }
+        },
+        {
+            "name": "dev_templates",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/text/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_id": "{{ system_user.username }}/text/1",
+                "speech": "{{ system_user.username }}/array_1d_floats/1"
+            }
+        },
+        {
+            "name": "dev_probes",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/text/1",
+                "probe_id": "{{ system_user.username }}/text/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_ids": "{{ system_user.username }}/array_1d_text/1",
+                "speech": "{{ system_user.username }}/array_1d_floats/1"
+            }
+        },
+        {
+            "name": "test_templates",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/text/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_id": "{{ system_user.username }}/text/1",
+                "speech": "{{ system_user.username }}/array_1d_floats/1"
+            }
+        },
+        {
+            "name": "test_probes",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/text/1",
+                "probe_id": "{{ system_user.username }}/text/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_ids": "{{ system_user.username }}/array_1d_text/1",
+                "speech": "{{ system_user.username }}/array_1d_floats/1"
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/advanced_vein_annotations/1.json b/advanced/protocoltemplates/advanced_vein_annotations/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..bb699de6eab33476728ec0528b4430053fa2f2a3
--- /dev/null
+++ b/advanced/protocoltemplates/advanced_vein_annotations/1.json
@@ -0,0 +1,57 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "outputs": {
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1",
+                "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1",
+                "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1",
+                "client_id": "{{ system_user.username }}/uint64/1"
+            },
+            "name": "train"
+        },
+        {
+            "outputs": {
+                "model_id": "{{ system_user.username }}/text/1",
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1",
+                "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1",
+                "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1",
+                "client_id": "{{ system_user.username }}/uint64/1"
+            },
+            "name": "dev_templates"
+        },
+        {
+            "outputs": {
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1",
+                "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1",
+                "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1",
+                "client_id": "{{ system_user.username }}/uint64/1"
+            },
+            "name": "dev_probes"
+        },
+        {
+            "outputs": {
+                "model_id": "{{ system_user.username }}/text/1",
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1",
+                "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1",
+                "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1",
+                "client_id": "{{ system_user.username }}/uint64/1"
+            },
+            "name": "eval_templates"
+        },
+        {
+            "outputs": {
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "vein_annotations": "{{ system_user.username }}/array_2d_uint8/1",
+                "ROI_annotations": "{{ system_user.username }}/array_2d_uint8/1",
+                "alignment_annotations": "{{ system_user.username }}/array_1d_coordinates/1",
+                "client_id": "{{ system_user.username }}/uint64/1"
+            },
+            "name": "eval_probes"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/advanced_vein_recognition/1.json b/advanced/protocoltemplates/advanced_vein_recognition/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..9db52359d8dc4ce4d5b61f6eb5442f9b288c851c
--- /dev/null
+++ b/advanced/protocoltemplates/advanced_vein_recognition/1.json
@@ -0,0 +1,42 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "outputs": {
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "client_id": "{{ system_user.username }}/uint64/1"
+            },
+            "name": "train"
+        },
+        {
+            "outputs": {
+                "model_id": "{{ system_user.username }}/text/1",
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "client_id": "{{ system_user.username }}/uint64/1"
+            },
+            "name": "dev_templates"
+        },
+        {
+            "outputs": {
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "client_id": "{{ system_user.username }}/uint64/1"
+            },
+            "name": "dev_probes"
+        },
+        {
+            "outputs": {
+                "model_id": "{{ system_user.username }}/text/1",
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "client_id": "{{ system_user.username }}/uint64/1"
+            },
+            "name": "eval_templates"
+        },
+        {
+            "outputs": {
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "client_id": "{{ system_user.username }}/uint64/1"
+            },
+            "name": "eval_probes"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/advanced_vein_recognition_2/1.json b/advanced/protocoltemplates/advanced_vein_recognition_2/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..9db52359d8dc4ce4d5b61f6eb5442f9b288c851c
--- /dev/null
+++ b/advanced/protocoltemplates/advanced_vein_recognition_2/1.json
@@ -0,0 +1,42 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "outputs": {
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "client_id": "{{ system_user.username }}/uint64/1"
+            },
+            "name": "train"
+        },
+        {
+            "outputs": {
+                "model_id": "{{ system_user.username }}/text/1",
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "client_id": "{{ system_user.username }}/uint64/1"
+            },
+            "name": "dev_templates"
+        },
+        {
+            "outputs": {
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "client_id": "{{ system_user.username }}/uint64/1"
+            },
+            "name": "dev_probes"
+        },
+        {
+            "outputs": {
+                "model_id": "{{ system_user.username }}/text/1",
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "client_id": "{{ system_user.username }}/uint64/1"
+            },
+            "name": "eval_templates"
+        },
+        {
+            "outputs": {
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "client_id": "{{ system_user.username }}/uint64/1"
+            },
+            "name": "eval_probes"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/iris_pad/1.json b/advanced/protocoltemplates/iris_pad/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..b14d93cb78289614d29f1b72b0e9117f11f6c2f0
--- /dev/null
+++ b/advanced/protocoltemplates/iris_pad/1.json
@@ -0,0 +1,13 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "test",
+            "outputs": {
+                "category": "{{ system_user.username }}/integer/1",
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "label": "{{ system_user.username }}/boolean/1"
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/simple_expression_recognition/1.json b/advanced/protocoltemplates/simple_expression_recognition/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..4da22d9dfd52fe2b1ce27c6cfbb69a4de9c75e23
--- /dev/null
+++ b/advanced/protocoltemplates/simple_expression_recognition/1.json
@@ -0,0 +1,23 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "train",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/uint64/1",
+                "image": "{{ system_user.username }}/array_4d_uint8/1",
+                "emotion": "{{ system_user.username }}/text/1"
+            }
+        },
+        {
+            "name": "test",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/uint64/1",
+                "image": "{{ system_user.username }}/array_4d_uint8/1",
+                "emotion": "{{ system_user.username }}/text/1"
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/simple_face_antispoofing/1.json b/advanced/protocoltemplates/simple_face_antispoofing/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..56fca1f89e9b146d3aa8439454cf95997738cf21
--- /dev/null
+++ b/advanced/protocoltemplates/simple_face_antispoofing/1.json
@@ -0,0 +1,38 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "outputs": {
+                "attack_support": "{{ system_user.username }}/text/1",
+                "annotations": "{{ system_user.username }}/bounding_box_video/1",
+                "video": "{{ system_user.username }}/array_4d_uint8/1",
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/uint64/1",
+                "class": "{{ system_user.username }}/text/1"
+            },
+            "name": "train"
+        },
+        {
+            "outputs": {
+                "attack_support": "{{ system_user.username }}/text/1",
+                "annotations": "{{ system_user.username }}/bounding_box_video/1",
+                "video": "{{ system_user.username }}/array_4d_uint8/1",
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/uint64/1",
+                "class": "{{ system_user.username }}/text/1"
+            },
+            "name": "dev_probes"
+        },
+        {
+            "outputs": {
+                "attack_support": "{{ system_user.username }}/text/1",
+                "annotations": "{{ system_user.username }}/bounding_box_video/1",
+                "video": "{{ system_user.username }}/array_4d_uint8/1",
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/uint64/1",
+                "class": "{{ system_user.username }}/text/1"
+            },
+            "name": "test_probes"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/simple_face_recognition/1.json b/advanced/protocoltemplates/simple_face_recognition/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..3c25c402e3f55b9f00fc75fcdf03acf98da7dd06
--- /dev/null
+++ b/advanced/protocoltemplates/simple_face_recognition/1.json
@@ -0,0 +1,32 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "train",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/uint64/1",
+                "image": "{{ system_user.username }}/array_2d_uint8/1"
+            }
+        },
+        {
+            "name": "templates",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/uint64/1",
+                "template_id": "{{ system_user.username }}/uint64/1",
+                "image": "{{ system_user.username }}/array_2d_uint8/1"
+            }
+        },
+        {
+            "name": "probes",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "probe_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/uint64/1",
+                "template_ids": "{{ system_user.username }}/array_1d_uint64/1",
+                "image": "{{ system_user.username }}/array_2d_uint8/1"
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/simple_face_recognition_frgc/1.json b/advanced/protocoltemplates/simple_face_recognition_frgc/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..67c0b56db6d93607e564277426a0259589b35e89
--- /dev/null
+++ b/advanced/protocoltemplates/simple_face_recognition_frgc/1.json
@@ -0,0 +1,35 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "train",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/text/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "eye_centers": "{{ system_user.username }}/eye_positions/1"
+            }
+        },
+        {
+            "name": "templates",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/text/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_id": "{{ system_user.username }}/uint64/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "eye_centers": "{{ system_user.username }}/eye_positions/1"
+            }
+        },
+        {
+            "name": "probes",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/text/1",
+                "probe_id": "{{ system_user.username }}/text/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_ids": "{{ system_user.username }}/array_1d_uint64/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "eye_centers": "{{ system_user.username }}/eye_positions/1"
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/simple_face_recognition_gbu/1.json b/advanced/protocoltemplates/simple_face_recognition_gbu/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..ccdec3a2711461a5ff85ea58759760f64588ccbf
--- /dev/null
+++ b/advanced/protocoltemplates/simple_face_recognition_gbu/1.json
@@ -0,0 +1,35 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "train",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/uint64/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "eye_centers": "{{ system_user.username }}/eye_positions/1"
+            }
+        },
+        {
+            "name": "templates",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/uint64/1",
+                "template_id": "{{ system_user.username }}/uint64/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "eye_centers": "{{ system_user.username }}/eye_positions/1"
+            }
+        },
+        {
+            "name": "probes",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "probe_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/uint64/1",
+                "template_ids": "{{ system_user.username }}/array_1d_uint64/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "eye_centers": "{{ system_user.username }}/eye_positions/1"
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/simple_face_recognition_textid/1.json b/advanced/protocoltemplates/simple_face_recognition_textid/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..c4be9712541566d1dd0cb2eaa8d450798c8b437f
--- /dev/null
+++ b/advanced/protocoltemplates/simple_face_recognition_textid/1.json
@@ -0,0 +1,32 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "train",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1"
+            }
+        },
+        {
+            "name": "templates",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_id": "{{ system_user.username }}/uint64/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1"
+            }
+        },
+        {
+            "name": "probes",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "probe_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_ids": "{{ system_user.username }}/array_1d_uint64/1",
+                "image": "{{ system_user.username }}/array_3d_uint8/1"
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/simple_fingerprint_antispoofing/1.json b/advanced/protocoltemplates/simple_fingerprint_antispoofing/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..a1bd042fe2b834c6421db633ff87f4ada580807f
--- /dev/null
+++ b/advanced/protocoltemplates/simple_fingerprint_antispoofing/1.json
@@ -0,0 +1,19 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "train",
+            "outputs": {
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "spoof": "{{ system_user.username }}/boolean/1"
+            }
+        },
+        {
+            "name": "test",
+            "outputs": {
+                "image": "{{ system_user.username }}/array_3d_uint8/1",
+                "spoof": "{{ system_user.username }}/boolean/1"
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/simple_fingervein_recognition/1.json b/advanced/protocoltemplates/simple_fingervein_recognition/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..fb43bac7c059a5bc605fbbe244e23bb4c55cfc2d
--- /dev/null
+++ b/advanced/protocoltemplates/simple_fingervein_recognition/1.json
@@ -0,0 +1,32 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "outputs": {
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/text/1"
+            },
+            "name": "train"
+        },
+        {
+            "outputs": {
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_id": "{{ system_user.username }}/text/1"
+            },
+            "name": "dev_templates"
+        },
+        {
+            "outputs": {
+                "image": "{{ system_user.username }}/array_2d_uint8/1",
+                "probe_id": "{{ system_user.username }}/uint64/1",
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_ids": "{{ system_user.username }}/array_1d_text/1"
+            },
+            "name": "dev_probes"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/simple_keystroke_recognition/1.json b/advanced/protocoltemplates/simple_keystroke_recognition/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..d2399f93637180cadadfc888d48ad16d44516eb7
--- /dev/null
+++ b/advanced/protocoltemplates/simple_keystroke_recognition/1.json
@@ -0,0 +1,24 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "templates",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_id": "{{ system_user.username }}/text/1",
+                "keystroke": "user/atvs_keystroke/1"
+            }
+        },
+        {
+            "name": "probes",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "probe_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_ids": "{{ system_user.username }}/array_1d_text/1",
+                "keystroke": "user/atvs_keystroke/1"
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/simple_keystroke_recognition_kboc16/1.json b/advanced/protocoltemplates/simple_keystroke_recognition_kboc16/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..ac4ffac326310a22914f671312473c18ddef3295
--- /dev/null
+++ b/advanced/protocoltemplates/simple_keystroke_recognition_kboc16/1.json
@@ -0,0 +1,24 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "templates",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_id": "{{ system_user.username }}/text/1",
+                "keystroke": "{{ system_user.username }}/kboc16_keystroke/1"
+            }
+        },
+        {
+            "name": "probes",
+            "outputs": {
+                "file_id": "{{ system_user.username }}/uint64/1",
+                "probe_id": "{{ system_user.username }}/uint64/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_ids": "{{ system_user.username }}/array_1d_text/1",
+                "keystroke": "{{ system_user.username }}/kboc16_keystroke/1"
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/simple_speech_antispoofing/1.json b/advanced/protocoltemplates/simple_speech_antispoofing/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..eef6de8a02dc58b46f87b0817698e9833b112245
--- /dev/null
+++ b/advanced/protocoltemplates/simple_speech_antispoofing/1.json
@@ -0,0 +1,35 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "outputs": {
+                "attack_type": "{{ system_user.username }}/text/1",
+                "speech": "{{ system_user.username }}/array_1d_floats/1",
+                "file_id": "{{ system_user.username }}/text/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "class": "{{ system_user.username }}/text/1"
+            },
+            "name": "train"
+        },
+        {
+            "outputs": {
+                "attack_type": "{{ system_user.username }}/text/1",
+                "speech": "{{ system_user.username }}/array_1d_floats/1",
+                "file_id": "{{ system_user.username }}/text/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "class": "{{ system_user.username }}/text/1"
+            },
+            "name": "dev_probes"
+        },
+        {
+            "outputs": {
+                "attack_type": "{{ system_user.username }}/text/1",
+                "speech": "{{ system_user.username }}/array_1d_floats/1",
+                "file_id": "{{ system_user.username }}/text/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "class": "{{ system_user.username }}/text/1"
+            },
+            "name": "eval_probes"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/speaker_recognition_spoof/1.json b/advanced/protocoltemplates/speaker_recognition_spoof/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..543c84a07f7f51644b294391782073ffbe8432e8
--- /dev/null
+++ b/advanced/protocoltemplates/speaker_recognition_spoof/1.json
@@ -0,0 +1,25 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "outputs": {
+                "speech": "{{ system_user.username }}/array_1d_floats/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "file_id": "{{ system_user.username }}/text/1",
+                "attack_id": "{{ system_user.username }}/text/1",
+                "template_ids": "{{ system_user.username }}/array_1d_text/1"
+            },
+            "name": "dev_attacks"
+        },
+        {
+            "outputs": {
+                "speech": "{{ system_user.username }}/array_1d_floats/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "file_id": "{{ system_user.username }}/text/1",
+                "attack_id": "{{ system_user.username }}/text/1",
+                "template_ids": "{{ system_user.username }}/array_1d_text/1"
+            },
+            "name": "test_attacks"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/verify_trainset_speech/1.json b/advanced/protocoltemplates/verify_trainset_speech/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..3ea7924c4df8b4da9d1e130411a14663fddc0304
--- /dev/null
+++ b/advanced/protocoltemplates/verify_trainset_speech/1.json
@@ -0,0 +1,24 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "outputs": {
+                "speech": "{{ system_user.username }}/array_1d_floats/1",
+                "file_id": "{{ system_user.username }}/text/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "template_id": "{{ system_user.username }}/text/1"
+            },
+            "name": "train_templates"
+        },
+        {
+            "outputs": {
+                "speech": "{{ system_user.username }}/array_1d_floats/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "file_id": "{{ system_user.username }}/text/1",
+                "probe_id": "{{ system_user.username }}/text/1",
+                "template_ids": "{{ system_user.username }}/array_1d_text/1"
+            },
+            "name": "train_probes"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/advanced/protocoltemplates/verify_trainset_speech_spoof/1.json b/advanced/protocoltemplates/verify_trainset_speech_spoof/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..e9028c82bd2faac2ee387cec2a88b2217dc94adb
--- /dev/null
+++ b/advanced/protocoltemplates/verify_trainset_speech_spoof/1.json
@@ -0,0 +1,15 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "outputs": {
+                "speech": "{{ system_user.username }}/array_1d_floats/1",
+                "client_id": "{{ system_user.username }}/text/1",
+                "file_id": "{{ system_user.username }}/text/1",
+                "attack_id": "{{ system_user.username }}/text/1",
+                "template_ids": "{{ system_user.username }}/array_1d_text/1"
+            },
+            "name": "train_attacks"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/test/algorithms/username/integers_rawdata_access/1.json b/test/algorithms/username/integers_rawdata_access/1.json
index 33287700e2290662af32ddcb696c9f32798660a5..cec4dbe0496ddfae6a4e34094c3d3cdaa0228592 100644
--- a/test/algorithms/username/integers_rawdata_access/1.json
+++ b/test/algorithms/username/integers_rawdata_access/1.json
@@ -18,5 +18,11 @@
   ],
   "uses": {
     "lib": "{{ user.username }}/thelib/1"
-  }
+  },
+  "parameters": {
+        "base_path": {
+            "default": "",
+            "type": "string"
+        }
+    }
 }
diff --git a/test/algorithms/username/integers_rawdata_access/1.py b/test/algorithms/username/integers_rawdata_access/1.py
index 6c939d0c6b5cb4fc47912531a1e11b6b58298e24..c2bfcd1f874ff4cfd63797f8c1e7693931b76b06 100644
--- a/test/algorithms/username/integers_rawdata_access/1.py
+++ b/test/algorithms/username/integers_rawdata_access/1.py
@@ -31,7 +31,8 @@ class Algorithm:
         self.offset = 1
 
     def setup(self, parameters):
-        with open("/databases/simple_rawdata_access/1/datafile.txt", "rt") as shared_data:
+        base_path = parameters["base_path"]
+        with open(f"/databases/{base_path}/datafile.txt", "rt") as shared_data:
             value = shared_data.read()
             self.offset = int(value)
         return True
diff --git a/test/databases/simple/2.json b/test/databases/simple/2.json
new file mode 100644
index 0000000000000000000000000000000000000000..2dcd3a744530c99753ded2d6bb66b718e6118e0a
--- /dev/null
+++ b/test/databases/simple/2.json
@@ -0,0 +1,55 @@
+{
+    "description": "A test database that emits integers",
+    "root_folder": "/this/database/does/not/require/a/path",
+    "protocols": [
+        {
+            "name": "protocol",
+            "template": "test_integers/1",
+            "views": {
+                "set": {
+                    "view": "View",
+                    "parameters": {}
+                },
+                "set2": {
+                    "view": "View2",
+                    "parameters": {}
+                }
+            }
+        },
+        {
+            "name": "protocol2",
+            "template": "test_integers/1",
+            "views": {
+                "set": {
+                    "view": "LargeView",
+                    "parameters": {}
+                },
+                "set2": {
+                    "view": "View2",
+                    "parameters": {}
+                }
+            }
+        },
+        {
+            "name": "10_numbers",
+            "template": "one_block/1",
+            "views": {
+                "numbers": {
+                    "view": "View10",
+                    "parameters": {}
+                }
+            }
+        },
+        {
+            "name": "duo",
+            "template": "one_block_two_inputs/1",
+            "views": {
+                "numbers": {
+                    "view": "ViewDuo",
+                    "parameters": {}
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/test/databases/simple/2.py b/test/databases/simple/2.py
new file mode 100644
index 0000000000000000000000000000000000000000..d887ae898a2e992ddeb5a9b682bfc7321b06c596
--- /dev/null
+++ b/test/databases/simple/2.py
@@ -0,0 +1,247 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import numpy as np
+from collections import namedtuple
+from beat.backend.python.database import View as BaseView
+
+
+#----------------------------------------------------------
+
+
+class View(BaseView):
+    """Outputs:
+        - out: "{{ system_user.username }}/integer/1"
+
+    --------------- --------------- --------------- ---------------
+    |     out     | |     out     | |     out     | |     out     |
+    --------------- --------------- --------------- ---------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['out'])
+
+        return [
+            Entry(42),
+        ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'out':
+            return {
+                'value': np.int32(obj.out)
+            }
+
+
+#----------------------------------------------------------
+
+
+class View2(BaseView):
+    """Outputs:
+        - out: "{{ system_user.username }}/integer/1"
+
+    --------------- --------------- --------------- ---------------
+    |     out     | |     out     | |     out     | |     out     |
+    --------------- --------------- --------------- ---------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['out'])
+
+        return [
+            Entry(53),
+        ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'out':
+            return {
+                'value': np.int32(obj.out)
+            }
+
+
+#----------------------------------------------------------
+
+
+class LargeView(BaseView):
+
+    """Outputs:
+        - out: "{{ system_user.username }}/integer/1"
+
+    --------------- --------------- --------------- ---------------
+    |     out     | |     out     | |     out     | |     out     |
+    --------------- --------------- --------------- ---------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['out'])
+
+        return [
+            Entry(0),
+            Entry(1),
+            Entry(2),
+            Entry(3),
+            Entry(4),
+        ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'out':
+            return {
+                'value': np.int32(obj.out)
+            }
+
+
+#----------------------------------------------------------
+
+
+class View10(BaseView):
+
+    """Outputs:
+        - out: "{{ system_user.username }}/integer/1"
+
+    --------------- --------------- --------------- ---------------
+    |     out     | |     out     | |     out     | |     out     |
+    --------------- --------------- --------------- ---------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['out'])
+
+        return [
+            Entry(0),
+            Entry(1),
+            Entry(2),
+            Entry(3),
+            Entry(4),
+            Entry(5),
+            Entry(6),
+            Entry(7),
+            Entry(8),
+            Entry(9),
+        ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'out':
+            return {
+                'value': np.int32(obj.out)
+            }
+
+
+#----------------------------------------------------------
+
+
+class ViewDuo(BaseView):
+
+    """Outputs:
+        - a: "{{ system_user.username }}/integer/1"
+        - b: "{{ system_user.username }}/integer/1"
+
+    ------------------------------- -------------------------------
+    |              a              | |              a              |
+    ------------------------------- -------------------------------
+    --------------- --------------- --------------- ---------------
+    |      b      | |      b      | |      b      | |      b      |
+    --------------- --------------- --------------- ---------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['a', 'b'])
+
+        return [
+            Entry(0, 0),
+            Entry(0, 1),
+            Entry(0, 2),
+            Entry(10, 3),
+            Entry(10, 4),
+            Entry(10, 5),
+            Entry(20, 6),
+            Entry(20, 7),
+            Entry(20, 8),
+        ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'a':
+            return {
+                'value': np.int32(obj.a)
+            }
+
+        elif output == 'b':
+            return {
+                'value': np.int32(obj.b)
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    pass
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    from beat.backend.python.database import DatabaseTester
+
+    DatabaseTester('View', View,
+        [
+            'out',
+        ],
+        parameters=dict(
+        ),
+    )
+
+    DatabaseTester('View2', View2,
+        [
+            'out',
+        ],
+        parameters=dict(
+        ),
+    )
+
+    DatabaseTester('LargeView', LargeView,
+        [
+            'out',
+        ],
+        parameters=dict(
+        ),
+    )
diff --git a/test/databases/simple/2.rst b/test/databases/simple/2.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2a8928cc50322685142dbb3102b07c55349d702b
--- /dev/null
+++ b/test/databases/simple/2.rst
@@ -0,0 +1 @@
+A test database that emits integers
\ No newline at end of file
diff --git a/test/databases/simple_rawdata_access/2.json b/test/databases/simple_rawdata_access/2.json
new file mode 100644
index 0000000000000000000000000000000000000000..4ed14fb1324581f7be403bc03e65d8978d5e01b6
--- /dev/null
+++ b/test/databases/simple_rawdata_access/2.json
@@ -0,0 +1,60 @@
+{
+    "description": "A test database that emits integers",
+    "root_folder": "/this/database/does/not/require/a/path",
+    "environment": {
+        "name": "Example databases",
+        "version": "1.4.1"
+    },
+    "direct_rawdata_access": true,
+    "protocols": [
+        {
+            "name": "protocol",
+            "template": "test_integers/1",
+            "views": {
+                "set": {
+                    "view": "View",
+                    "parameters": {}
+                },
+                "set2": {
+                    "view": "View2",
+                    "parameters": {}
+                }
+            }
+        },
+        {
+            "name": "protocol2",
+            "template": "test_integers/1",
+            "views": {
+                "set": {
+                    "view": "LargeView",
+                    "parameters": {}
+                },
+                "set2": {
+                    "view": "View2",
+                    "parameters": {}
+                }
+            }
+        },
+        {
+            "name": "10_numbers",
+            "template": "one_block/1",
+            "views": {
+                "numbers": {
+                    "view": "View10",
+                    "parameters": {}
+                }
+            }
+        },
+        {
+            "name": "duo",
+            "template": "one_block_two_inputs/1",
+            "views": {
+                "numbers": {
+                    "view": "ViewDuo",
+                    "parameters": {}
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/test/databases/simple_rawdata_access/2.py b/test/databases/simple_rawdata_access/2.py
new file mode 100644
index 0000000000000000000000000000000000000000..d887ae898a2e992ddeb5a9b682bfc7321b06c596
--- /dev/null
+++ b/test/databases/simple_rawdata_access/2.py
@@ -0,0 +1,247 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import numpy as np
+from collections import namedtuple
+from beat.backend.python.database import View as BaseView
+
+
+#----------------------------------------------------------
+
+
+class View(BaseView):
+    """Outputs:
+        - out: "{{ system_user.username }}/integer/1"
+
+    --------------- --------------- --------------- ---------------
+    |     out     | |     out     | |     out     | |     out     |
+    --------------- --------------- --------------- ---------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['out'])
+
+        return [
+            Entry(42),
+        ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'out':
+            return {
+                'value': np.int32(obj.out)
+            }
+
+
+#----------------------------------------------------------
+
+
+class View2(BaseView):
+    """Outputs:
+        - out: "{{ system_user.username }}/integer/1"
+
+    --------------- --------------- --------------- ---------------
+    |     out     | |     out     | |     out     | |     out     |
+    --------------- --------------- --------------- ---------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['out'])
+
+        return [
+            Entry(53),
+        ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'out':
+            return {
+                'value': np.int32(obj.out)
+            }
+
+
+#----------------------------------------------------------
+
+
+class LargeView(BaseView):
+
+    """Outputs:
+        - out: "{{ system_user.username }}/integer/1"
+
+    --------------- --------------- --------------- ---------------
+    |     out     | |     out     | |     out     | |     out     |
+    --------------- --------------- --------------- ---------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['out'])
+
+        return [
+            Entry(0),
+            Entry(1),
+            Entry(2),
+            Entry(3),
+            Entry(4),
+        ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'out':
+            return {
+                'value': np.int32(obj.out)
+            }
+
+
+#----------------------------------------------------------
+
+
+class View10(BaseView):
+
+    """Outputs:
+        - out: "{{ system_user.username }}/integer/1"
+
+    --------------- --------------- --------------- ---------------
+    |     out     | |     out     | |     out     | |     out     |
+    --------------- --------------- --------------- ---------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['out'])
+
+        return [
+            Entry(0),
+            Entry(1),
+            Entry(2),
+            Entry(3),
+            Entry(4),
+            Entry(5),
+            Entry(6),
+            Entry(7),
+            Entry(8),
+            Entry(9),
+        ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'out':
+            return {
+                'value': np.int32(obj.out)
+            }
+
+
+#----------------------------------------------------------
+
+
+class ViewDuo(BaseView):
+
+    """Outputs:
+        - a: "{{ system_user.username }}/integer/1"
+        - b: "{{ system_user.username }}/integer/1"
+
+    ------------------------------- -------------------------------
+    |              a              | |              a              |
+    ------------------------------- -------------------------------
+    --------------- --------------- --------------- ---------------
+    |      b      | |      b      | |      b      | |      b      |
+    --------------- --------------- --------------- ---------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['a', 'b'])
+
+        return [
+            Entry(0, 0),
+            Entry(0, 1),
+            Entry(0, 2),
+            Entry(10, 3),
+            Entry(10, 4),
+            Entry(10, 5),
+            Entry(20, 6),
+            Entry(20, 7),
+            Entry(20, 8),
+        ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'a':
+            return {
+                'value': np.int32(obj.a)
+            }
+
+        elif output == 'b':
+            return {
+                'value': np.int32(obj.b)
+            }
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    pass
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    from beat.backend.python.database import DatabaseTester
+
+    DatabaseTester('View', View,
+        [
+            'out',
+        ],
+        parameters=dict(
+        ),
+    )
+
+    DatabaseTester('View2', View2,
+        [
+            'out',
+        ],
+        parameters=dict(
+        ),
+    )
+
+    DatabaseTester('LargeView', LargeView,
+        [
+            'out',
+        ],
+        parameters=dict(
+        ),
+    )
diff --git a/test/databases/simple_rawdata_access/2.rst b/test/databases/simple_rawdata_access/2.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2a8928cc50322685142dbb3102b07c55349d702b
--- /dev/null
+++ b/test/databases/simple_rawdata_access/2.rst
@@ -0,0 +1 @@
+A test database that emits integers
\ No newline at end of file
diff --git a/test/experiments/username/username/double/1/double_db_v2.json b/test/experiments/username/username/double/1/double_db_v2.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a88cbe9372485e3461c8d4183aac48dacf7c9c6
--- /dev/null
+++ b/test/experiments/username/username/double/1/double_db_v2.json
@@ -0,0 +1,44 @@
+{
+  "analyzers": {
+    "analysis": {
+      "algorithm": "{{ user.username }}/integers_echo_analyzer/1",
+      "inputs": {
+        "in_data": "in"
+      }
+    }
+  },
+  "blocks": {
+    "echo1": {
+      "algorithm": "{{ user.username }}/integers_echo/1",
+      "inputs": {
+        "in_data": "in"
+      },
+      "outputs": {
+        "out_data": "out"
+      }
+    },
+    "echo2": {
+      "algorithm": "{{ user.username }}/integers_echo/1",
+      "inputs": {
+        "in_data": "in"
+      },
+      "outputs": {
+        "out_data": "out"
+      }
+    }
+  },
+  "datasets": {
+    "set": {
+      "database": "simple/2",
+      "protocol": "protocol",
+      "set": "set"
+    }
+  },
+  "globals": {
+    "queue": "{{ queue }}",
+    "environment": {
+      "name": "{{ environment.name }}",
+      "version": "{{ environment.version }}"
+    }
+  }
+}
diff --git a/test/experiments/username/username/double_triangle/1/double_triangle_db_v2.json b/test/experiments/username/username/double_triangle/1/double_triangle_db_v2.json
new file mode 100644
index 0000000000000000000000000000000000000000..d0b46c47c322d8bc2336c20701d74424467d9a6c
--- /dev/null
+++ b/test/experiments/username/username/double_triangle/1/double_triangle_db_v2.json
@@ -0,0 +1,77 @@
+{
+  "analyzers": {
+    "analysis": {
+      "algorithm": "{{ user.username }}/integers_echo_analyzer/1",
+      "inputs": {
+        "in_data": "in"
+      }
+    }
+  },
+  "blocks": {
+    "echo1": {
+      "algorithm": "{{ user.username }}/integers_echo/1",
+      "inputs": {
+        "in_data": "in"
+      },
+      "outputs": {
+        "out_data": "out"
+      }
+    },
+    "echo2": {
+      "algorithm": "{{ user.username }}/integers_echo/1",
+      "inputs": {
+        "in_data": "in"
+      },
+      "outputs": {
+        "out_data": "out"
+      }
+    },
+    "echo3": {
+      "algorithm": "{{ user.username }}/integers_echo/1",
+      "inputs": {
+        "in_data": "in"
+      },
+      "outputs": {
+        "out_data": "out"
+      }
+    },
+    "echo4": {
+      "algorithm": "{{ user.username }}/integers_echo/1",
+      "inputs": {
+        "in_data": "in"
+      },
+      "outputs": {
+        "out_data": "out"
+      }
+    },
+    "echo5": {
+      "algorithm": "{{ user.username }}/integers_echo_ignore/1",
+      "inputs": {
+        "in_data": "in",
+        "in_data2": "in2"
+      },
+      "outputs": {
+        "out_data": "out"
+      }
+    }
+  },
+  "datasets": {
+    "set": {
+      "database": "simple/2",
+      "protocol": "protocol",
+      "set": "set"
+    },
+    "set2": {
+      "database": "simple/2",
+      "protocol": "protocol",
+      "set": "set2"
+    }
+  },
+  "globals": {
+    "queue": "{{ queue }}",
+    "environment": {
+      "name": "{{ environment.name }}",
+      "version": "{{ environment.version }}"
+    }
+  }
+}
diff --git a/test/experiments/username/username/duo/1/split_2_db_v2.json b/test/experiments/username/username/duo/1/split_2_db_v2.json
new file mode 100644
index 0000000000000000000000000000000000000000..80c6b968ea806b67f2ffe8e5e719436e4c4a137e
--- /dev/null
+++ b/test/experiments/username/username/duo/1/split_2_db_v2.json
@@ -0,0 +1,37 @@
+{
+  "analyzers": {
+    "analysis": {
+      "algorithm": "{{ user.username }}/integers_echo_analyzer/1",
+      "inputs": {
+        "in_data": "in"
+      }
+    }
+  },
+  "blocks": {
+    "operation": {
+      "algorithm": "{{ user.username }}/integers_sum/1",
+      "nb_slots": 2,
+      "inputs": {
+        "a": "a",
+        "b": "b"
+      },
+      "outputs": {
+        "sum": "out"
+      }
+    }
+  },
+  "datasets": {
+    "set": {
+        "database": "simple/2",
+        "protocol": "duo",
+        "set": "numbers"
+    }
+  },
+  "globals": {
+    "queue": "{{ queue }}",
+    "environment": {
+      "name": "{{ environment.name }}",
+      "version": "{{ environment.version }}"
+    }
+  }
+}
diff --git a/test/experiments/username/username/single/1/single_add_torch_autonomous.json b/test/experiments/username/username/single/1/single_add_torch_autonomous.json
index c0501b423827fcfac11e9c31d5017b892a7e0e89..58f8f6bfeb4e728296b7eb39c6fbb94bb3cbb791 100644
--- a/test/experiments/username/username/single/1/single_add_torch_autonomous.json
+++ b/test/experiments/username/username/single/1/single_add_torch_autonomous.json
@@ -28,8 +28,8 @@
     "globals": {
         "queue": "queue",
         "environment": {
-            "name": "Pytorch 0.4.0",
-            "version": "0.0.1"
+            "name": "Deep Learning",
+            "version": "1.0.0"
         },
         "user/integers_add_torch_autonomous/1": {
             "offset": 3
diff --git a/test/experiments/username/username/single/1/single_add_torch_legacy.json b/test/experiments/username/username/single/1/single_add_torch_legacy.json
index e591242c8dca9b88e1b146b3fc4b5b763ffea097..cf738537999e7404060ff6152007be88cc55558f 100644
--- a/test/experiments/username/username/single/1/single_add_torch_legacy.json
+++ b/test/experiments/username/username/single/1/single_add_torch_legacy.json
@@ -28,8 +28,8 @@
     "globals": {
         "queue": "queue",
         "environment": {
-            "name": "Pytorch 0.4.0",
-            "version": "0.0.1"
+            "name": "Deep Learning",
+            "version": "1.0.0"
         },
         "user/integers_add_torch_legacy/1": {
             "offset": 3
diff --git a/test/experiments/username/username/single/1/single_add_torch_sequential.json b/test/experiments/username/username/single/1/single_add_torch_sequential.json
index bca924c4a77506da6f6c77c2c30df697c7190382..3d277ce76539370a80b107bc4ca1bb8420c8cfd6 100644
--- a/test/experiments/username/username/single/1/single_add_torch_sequential.json
+++ b/test/experiments/username/username/single/1/single_add_torch_sequential.json
@@ -28,8 +28,8 @@
     "globals": {
         "queue": "queue",
         "environment": {
-            "name": "Pytorch 0.4.0",
-            "version": "0.0.1"
+            "name": "Deep Learning",
+            "version": "1.0.0"
         },
         "user/integers_add_torch_sequential/1": {
             "offset": 3
diff --git a/test/experiments/username/username/single/1/single_db_v2.json b/test/experiments/username/username/single/1/single_db_v2.json
new file mode 100644
index 0000000000000000000000000000000000000000..e87939fafe0d9615c1644cbeead85007fa662160
--- /dev/null
+++ b/test/experiments/username/username/single/1/single_db_v2.json
@@ -0,0 +1,35 @@
+{
+  "analyzers": {
+    "analysis": {
+      "algorithm": "{{ user.username }}/integers_echo_analyzer/1",
+      "inputs": {
+        "in_data": "in"
+      }
+    }
+  },
+  "blocks": {
+    "echo": {
+      "algorithm": "{{ user.username }}/integers_echo/1",
+      "inputs": {
+        "in_data": "in"
+      },
+      "outputs": {
+        "out_data": "out"
+      }
+    }
+  },
+  "datasets": {
+    "set": {
+      "database": "simple/2",
+      "protocol": "10_numbers",
+      "set": "numbers"
+    }
+  },
+  "globals": {
+    "queue": "{{ queue }}",
+    "environment": {
+      "name": "{{ environment.name }}",
+      "version": "{{ environment.version }}"
+    }
+  }
+}
diff --git a/test/experiments/username/username/single/1/single_rawdata_access.json b/test/experiments/username/username/single/1/single_rawdata_access.json
index 16dd7ce5ccb24d9c010dde2b6527eafe12afd15d..446bef2dc890127e508fa83f680f5c84fc074830 100644
--- a/test/experiments/username/username/single/1/single_rawdata_access.json
+++ b/test/experiments/username/username/single/1/single_rawdata_access.json
@@ -10,6 +10,9 @@
   "blocks": {
     "echo": {
       "algorithm": "{{ user.username }}/integers_rawdata_access/1",
+      "parameters": {
+        "base_path": "simple_rawdata_access/1"
+      },
       "inputs": {
         "in_data": "in"
       },
diff --git a/test/experiments/username/username/single/1/single_rawdata_access_db_v2.json b/test/experiments/username/username/single/1/single_rawdata_access_db_v2.json
new file mode 100644
index 0000000000000000000000000000000000000000..6c10e82e7ed4cf2cc491d19e2ed139b8dc60c4cc
--- /dev/null
+++ b/test/experiments/username/username/single/1/single_rawdata_access_db_v2.json
@@ -0,0 +1,38 @@
+{
+  "analyzers": {
+    "analysis": {
+      "algorithm": "{{ user.username }}/integers_echo_analyzer/1",
+      "inputs": {
+        "in_data": "in"
+      }
+    }
+  },
+  "blocks": {
+    "echo": {
+      "algorithm": "{{ user.username }}/integers_rawdata_access/1",
+      "parameters": {
+        "base_path": "simple_rawdata_access/2"
+      },
+      "inputs": {
+        "in_data": "in"
+      },
+      "outputs": {
+        "out_data": "out"
+      }
+    }
+  },
+  "datasets": {
+    "set": {
+        "database": "simple_rawdata_access/2",
+        "protocol": "10_numbers",
+        "set": "numbers"
+    }
+  },
+  "globals": {
+    "queue": "{{ queue }}",
+    "environment": {
+      "name": "{{ environment.name }}",
+      "version": "{{ environment.version }}"
+    }
+  }
+}
diff --git a/test/protocoltemplates/one_block/1.json b/test/protocoltemplates/one_block/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..41e16bbbd73a81d3838752be5984b1cce1306eb8
--- /dev/null
+++ b/test/protocoltemplates/one_block/1.json
@@ -0,0 +1,11 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "numbers",
+            "outputs": {
+                "out": "{{ system_user.username }}/integer/1"
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/test/protocoltemplates/one_block_two_inputs/1.json b/test/protocoltemplates/one_block_two_inputs/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..85fa6d611f9d548168c217b27310d7057ad1a88b
--- /dev/null
+++ b/test/protocoltemplates/one_block_two_inputs/1.json
@@ -0,0 +1,12 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "numbers",
+            "outputs": {
+                "a": "{{ system_user.username }}/integer/1",
+                "b": "{{ system_user.username }}/integer/1"
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/test/protocoltemplates/test_integers/1.json b/test/protocoltemplates/test_integers/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..d0cea10941d6db9079a8a82411029d5879e5e41b
--- /dev/null
+++ b/test/protocoltemplates/test_integers/1.json
@@ -0,0 +1,17 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "set",
+            "outputs": {
+                "out": "{{ system_user.username }}/integer/1"
+            }
+        },
+        {
+            "name": "set2",
+            "outputs": {
+                "out": "{{ system_user.username }}/integer/1"
+            }
+        }
+    ]
+}
\ No newline at end of file