diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..77aede85764c5c02202c9fdef0de3487d7cdb45f
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+*~
+prefix/cache/
diff --git a/README.rst b/README.rst
index fb894067e3db65119799284c0609e63186c9e1ab..43910a4ae7f77d89d4fc7e6ab3067bc0759945d3 100644
--- a/README.rst
+++ b/README.rst
@@ -1,12 +1,31 @@
 .. vim: set fileencoding=utf-8 :
 
-======================
- BEAT Tutorial Prefix
-======================
+=====================
+ A Hands-On Tutorial
+=====================
 
-This package contains the prefix folder for BEAT tutorials. Run any
-BEAT commands relating to the prefix in the top-level folder of this
-project, next to the `prefix` folder.
+This repository contains a hands-on BEAT tutorial.
+To get started, clone this repository::
+
+    $ git clone https://gitlab.idiap.ch/beat/beat.tutorial.prefix.git
+    $ cd beat.tutorial.prefix
+
+Run the commands below to install BEAT::
+
+    $ conda create -n beat-tutorial --override-channels \
+        -c https://www.idiap.ch/software/beat/conda \
+        -c defaults \
+        python=3 beat.editor scikit-learn matplotlib ipdb jupyterlab
+    $ conda activate beat-tutorial
+    $ conda config --env --add channels defaults
+    $ conda config --env --add channels https://www.idiap.ch/software/beat/conda
+
+It is best that you create a separate conda environment for this tutorial
+using the instructions provided above.
+
+Then, launch jupyter lab and follow the instructions in ``tutorial.ipynb``::
+
+    $ jupyter lab
 
 
 Support
diff --git a/img/beat_editor_home.png b/img/beat_editor_home.png
new file mode 100644
index 0000000000000000000000000000000000000000..c0cca484ebf8c2e83e1088ec285b4b366e303903
Binary files /dev/null and b/img/beat_editor_home.png differ
diff --git a/img/iris_db_overview.png b/img/iris_db_overview.png
new file mode 100644
index 0000000000000000000000000000000000000000..070acb8c455f78dbcd0bd79b59ea6dd5520278d1
Binary files /dev/null and b/img/iris_db_overview.png differ
diff --git a/img/iris_flowers.jpg b/img/iris_flowers.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a834deb55552792497ca905876852e2a37701ea8
Binary files /dev/null and b/img/iris_flowers.jpg differ
diff --git a/img/iris_versicolor_measurements.png b/img/iris_versicolor_measurements.png
new file mode 100644
index 0000000000000000000000000000000000000000..b7bce5b209aecc53a30ebd3f16a49d02e9546389
Binary files /dev/null and b/img/iris_versicolor_measurements.png differ
diff --git a/img/toolchain_scale_and_lda.png b/img/toolchain_scale_and_lda.png
new file mode 100644
index 0000000000000000000000000000000000000000..180a69a7bef0411af193362ab7b0248582bd6b68
Binary files /dev/null and b/img/toolchain_scale_and_lda.png differ
diff --git a/prefix/databases/atnt/4.json b/prefix/databases/atnt/4.json
new file mode 100644
index 0000000000000000000000000000000000000000..a80c7618675b8efe04202b8839a4678fd4dfc025
--- /dev/null
+++ b/prefix/databases/atnt/4.json
@@ -0,0 +1,59 @@
+{
+    "root_folder": "/remote/dataset/att_faces",
+    "description": "The AT&T Database of Faces",
+    "protocols": [
+        {
+            "name": "idiap",
+            "template": "idiap/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {}
+                },
+                "templates": {
+                    "view": "Templates",
+                    "parameters": {}
+                },
+                "probes": {
+                    "view": "Probes",
+                    "parameters": {}
+                }
+            }
+        },
+        {
+            "name": "idiap_test_eyepos",
+            "template": "idiap_test_eyepos/1",
+            "views": {
+                "train": {
+                    "view": "TrainEyePositions",
+                    "parameters": {}
+                },
+                "dev_templates": {
+                    "view": "TemplatesEyePositions",
+                    "parameters": {
+                        "group": "dev"
+                    }
+                },
+                "dev_probes": {
+                    "view": "ProbesEyePositions",
+                    "parameters": {
+                        "group": "dev"
+                    }
+                },
+                "test_templates": {
+                    "view": "TemplatesEyePositions",
+                    "parameters": {
+                        "group": "eval"
+                    }
+                },
+                "test_probes": {
+                    "view": "ProbesEyePositions",
+                    "parameters": {
+                        "group": "eval"
+                    }
+                }
+            }
+        }
+    ],
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/prefix/databases/atnt/4.py b/prefix/databases/atnt/4.py
new file mode 100644
index 0000000000000000000000000000000000000000..bcc3c3d3dd8826e9d89556e77e35043abb9df2c6
--- /dev/null
+++ b/prefix/databases/atnt/4.py
@@ -0,0 +1,438 @@
+import numpy
+import bob.io.base
+import bob.io.image
+import bob.ip.color
+import bob.db.atnt
+
+
+class Train:
+
+    def setup(self, root_folder, outputs, parameters, force_start_index=None, force_end_index=None):
+        self.root_folder = root_folder
+        self.outputs     = outputs
+
+        db               = bob.db.atnt.Database()
+        self.objs        = sorted(db.objects(groups='world', purposes=None), key=lambda x: x.id)
+        self.next_index  = 0
+
+        self.force_start_index = force_start_index
+        self.force_end_index   = force_end_index
+
+        # Retrieve only 'useful' data
+        ### End index
+        if self.force_end_index is not None:
+            self.objs = self.objs[:self.force_end_index+1]
+        ### Start index
+        if self.force_start_index is not None:
+            self.objs = self.objs[self.force_start_index:]
+            self.next_index = self.force_start_index
+        else:
+            self.force_start_index = 0
+
+        return True
+
+
+    def done(self):
+        return (self.next_index-self.force_start_index >= len(self.objs))
+
+
+    def next(self):
+        obj = self.objs[self.next_index-self.force_start_index]
+
+        if self.outputs['file_id'].isConnected():
+            self.outputs['file_id'].write({'value': numpy.uint64(obj.id)}, self.next_index)
+
+        if self.outputs['client_id'].isConnected():
+            self.outputs['client_id'].write({'value': numpy.uint64(obj.client_id)}, self.next_index)
+
+        if self.outputs['image'].isConnected():
+            data = {
+                'value': bob.io.base.load(obj.make_path(self.root_folder, '.pgm'))
+            }
+            self.outputs['image'].write(data, self.next_index)
+
+        self.next_index += 1
+
+        return True
+
+
+class Templates:
+    # Reasoning: Each client may have a number of models in certain databases.
+    # So, each model receives an unique identifier. Those identifiers are linked
+    # to the client identifier and contain a number of images to generated the
+    # model from.
+
+    def setup(self, root_folder, outputs, parameters, force_start_index=None, force_end_index=None):
+        self.root_folder            = root_folder
+        self.outputs                = outputs
+
+        self.db                     = bob.db.atnt.Database()
+        self.template_ids           = sorted(self.db.model_ids(groups='dev'))
+        self.objs                   = None
+
+        self.current_template_index = 0
+        self.current_obj_index      = 0
+        self.next_index             = 0
+
+        self.force_start_index = force_start_index
+        self.force_end_index   = force_end_index
+
+        # Retrieve only 'useful' data
+        ### End index
+        #if self.force_end_index is not None:
+        #    self.objs = self.objs[:self.force_end_index+1]
+        ### Start index
+        if self.force_start_index is None:
+            self.force_start_index = 0
+        while self.next_index < self.force_start_index:
+            self.next()
+
+        return True
+
+
+    def done(self):
+        return (self.current_template_index >= len(self.template_ids)) or (self.force_end_index is not None and self.force_end_index < self.next_index)
+
+
+    def next(self):
+        if self.objs is None:
+            # probe for the specific objects concerning a given client
+            self.objs = sorted(self.db.objects(model_ids=[self.template_ids[self.current_template_index]],
+                                               groups='dev', purposes='enroll'), key=lambda x: x.id)
+
+            if self.force_start_index <= self.next_index and (self.force_end_index is None or self.force_end_index >= self.next_index):
+                # For this database, 'self.template_ids[self.current_template_index]' corresponds to 'obj.client_id'
+                if self.outputs['template_id'].isConnected():
+                    self.outputs['template_id'].write({'value': numpy.uint64(self.template_ids[self.current_template_index])}, self.next_index+len(self.objs)-1)
+
+                if self.outputs['client_id'].isConnected():
+                    self.outputs['client_id'].write({'value': numpy.uint64(self.objs[0].client_id)}, self.next_index+len(self.objs)-1)
+
+        obj = self.objs[self.current_obj_index]
+
+        if self.outputs['file_id'].isConnected() or self.outputs['image'].isConnected():
+            if self.force_start_index <= self.next_index and (self.force_end_index is None or self.force_end_index >= self.next_index):
+                if self.outputs['file_id'].isConnected():
+                    self.outputs['file_id'].write({'value': numpy.uint64(obj.id)}, self.next_index)
+
+                if self.outputs['image'].isConnected():
+                    data = {
+                        'value': bob.io.base.load(obj.make_path(self.root_folder, '.pgm'))
+                    }
+                    self.outputs['image'].write(data, self.next_index)
+
+            self.next_index += 1
+            self.current_obj_index += 1
+
+        else:
+            self.next_index += len(self.objs)
+            self.current_obj_index = len(self.objs)
+
+        if self.current_obj_index == len(self.objs):
+            self.objs = None
+            self.current_obj_index = 0
+            self.current_template_index += 1
+
+        return True
+
+
+class Probes:
+    # Reasoning: Each client may have a number of probes. Each probe may be
+    # composed of any number of images. So, each probe receives an unique
+    # identifier. Those identifiers are linked to the client identifier and
+    # contain a number of images to generated the match/comparison score. Each
+    # probe must be matched against a number of models defined by a list of
+    # client identifiers.
+
+    def setup(self, root_folder, outputs, parameters, force_start_index=None, force_end_index=None):
+        self.root_folder = root_folder
+        self.outputs     = outputs
+
+        self.db          = bob.db.atnt.Database()
+        self.objs        = sorted(self.db.objects(groups='dev', purposes='probe'), key=lambda x: x.id)
+        self.next_index  = 0
+
+        self.force_start_index = force_start_index
+        self.force_end_index   = force_end_index
+
+        # Retrieve only 'useful' data
+        ### End index
+        if self.force_end_index is not None:
+            self.objs = self.objs[:self.force_end_index+1]
+        ### Start index
+        if self.force_start_index is not None:
+            self.objs = self.objs[self.force_start_index:]
+            self.next_index = self.force_start_index
+        else:
+            self.force_start_index = 0
+
+        return True
+
+
+    def done(self):
+        return (self.next_index-self.force_start_index >= len(self.objs))
+
+
+    def next(self):
+
+        obj = self.objs[self.next_index-self.force_start_index]
+
+        if self.outputs['file_id'].isConnected():
+            self.outputs['file_id'].write({'value': numpy.uint64(obj.id)}, self.next_index)
+
+        if self.outputs['probe_id'].isConnected():
+            self.outputs['probe_id'].write({'value': numpy.uint64(obj.id)}, self.next_index)
+
+        if self.outputs['client_id'].isConnected():
+            self.outputs['client_id'].write({'value': numpy.uint64(obj.client_id)}, self.next_index)
+
+        if self.outputs['template_ids'].isConnected():
+              data = {
+                  'value': numpy.array(self.db.model_ids(groups='dev'), dtype='uint64')
+              }
+              self.outputs['template_ids'].write(data, self.next_index)
+
+        if self.outputs['image'].isConnected():
+            data = {
+                'value': bob.io.base.load(obj.make_path(self.root_folder, '.pgm'))
+            }
+            self.outputs['image'].write(data, self.next_index)
+
+        self.next_index += 1
+
+        return True
+
+
+class TrainEyePositions:
+
+    def setup(self, root_folder, outputs, parameters, force_start_index=None, force_end_index=None):
+        self.root_folder = root_folder
+        self.outputs     = outputs
+
+        db               = bob.db.atnt.Database()
+        self.objs        = sorted(db.objects(groups='world', purposes=None), key=lambda x: x.id)
+        self.next_index  = 0
+
+        self.force_start_index = force_start_index
+        self.force_end_index   = force_end_index
+
+        # Retrieve only 'useful' data
+        ### End index
+        if self.force_end_index is not None:
+            self.objs = self.objs[:self.force_end_index+1]
+        ### Start index
+        if self.force_start_index is not None:
+            self.objs = self.objs[self.force_start_index:]
+            self.next_index = self.force_start_index
+        else:
+            self.force_start_index = 0
+
+        return True
+
+
+    def done(self):
+        return (self.next_index-self.force_start_index >= len(self.objs))
+
+
+    def next(self):
+
+        obj = self.objs[self.next_index-self.force_start_index]
+
+        if self.outputs['file_id'].isConnected():
+            self.outputs['file_id'].write({'value': numpy.uint64(obj.id)}, self.next_index)
+
+        if self.outputs['client_id'].isConnected():
+            self.outputs['client_id'].write({'value': numpy.uint64(obj.client_id)}, self.next_index)
+
+        if self.outputs['image'].isConnected():
+            data = {
+                'value': bob.ip.color.gray_to_rgb(bob.io.base.load(obj.make_path(self.root_folder, '.pgm')))
+            }
+
+            self.outputs['image'].write(data, self.next_index)
+
+        if self.outputs['eye_centers'].isConnected():
+            self.outputs['eye_centers'].write({
+                    'left': {
+                        'y': numpy.int32(48),
+                        'x': numpy.int32(63),
+                    },
+                    'right': {
+                        'y': numpy.int32(48),
+                        'x': numpy.int32(27),
+                    }
+                }, self.next_index)
+
+        self.next_index += 1
+
+        return True
+
+
+class TemplatesEyePositions:
+    # Reasoning: Each client may have a number of models in certain databases.
+    # So, each model receives an unique identifier. Those identifiers are linked
+    # to the client identifier and contain a number of images to generated the
+    # model from.
+
+    def setup(self, root_folder, outputs, parameters, force_start_index=None, force_end_index=None):
+        self.root_folder            = root_folder
+        self.outputs                = outputs
+
+        self.db                     = bob.db.atnt.Database()
+        self.template_ids           = sorted(self.db.model_ids(groups='dev'))
+        self.objs                   = None
+
+        self.current_template_index = 0
+        self.current_obj_index      = 0
+        self.next_index             = 0
+
+        self.force_start_index = force_start_index
+        self.force_end_index   = force_end_index
+
+        # Retrieve only 'useful' data
+        ### End index
+        #if self.force_end_index is not None:
+        #    self.objs = self.objs[:self.force_end_index+1]
+        ### Start index
+        if self.force_start_index is None:
+            self.force_start_index = 0
+        while self.next_index < self.force_start_index:
+            self.next()
+
+        return True
+
+
+    def done(self):
+        return (self.current_template_index >= len(self.template_ids)) or (self.force_end_index is not None and self.force_end_index < self.next_index)
+
+
+    def next(self):
+        if self.objs is None:
+            # probe for the specific objects concerning a given client
+            self.objs = sorted(self.db.objects(model_ids=[self.template_ids[self.current_template_index]],
+                                               groups='dev', purposes='enroll'), key=lambda x: x.id)
+
+            if self.force_start_index <= self.next_index and (self.force_end_index is None or self.force_end_index >= self.next_index):
+                # For this database, 'self.template_ids[self.current_template_index]' corresponds to 'obj.client_id'
+                if self.outputs['template_id'].isConnected():
+                    self.outputs['template_id'].write({'value': numpy.uint64(self.template_ids[self.current_template_index])}, self.next_index+len(self.objs)-1)
+
+                if self.outputs['client_id'].isConnected():
+                    self.outputs['client_id'].write({'value': numpy.uint64(self.objs[0].client_id)}, self.next_index+len(self.objs)-1)
+
+        obj = self.objs[self.current_obj_index]
+
+        if self.outputs['file_id'].isConnected() or self.outputs['image'].isConnected() or self.outputs['eye_centers'].isConnected():
+            if self.force_start_index <= self.next_index and (self.force_end_index is None or self.force_end_index >= self.next_index):
+                if self.outputs['file_id'].isConnected():
+                    self.outputs['file_id'].write({'value': numpy.uint64(obj.id)}, self.next_index)
+
+                if self.outputs['image'].isConnected():
+                    data = {
+                        'value': bob.ip.color.gray_to_rgb(bob.io.base.load(obj.make_path(self.root_folder, '.pgm')))
+                    }
+                    self.outputs['image'].write(data, self.next_index)
+
+                if self.outputs['eye_centers'].isConnected():
+                    self.outputs['eye_centers'].write({
+                          'left': {
+                              'y': numpy.int32(48),
+                              'x': numpy.int32(63),
+                          },
+                          'right': {
+                              'y': numpy.int32(48),
+                              'x': numpy.int32(27),
+                          }
+                      }, self.next_index)
+
+            self.next_index += 1
+            self.current_obj_index += 1
+        else:
+            self.next_index += len(self.objs)
+            self.current_obj_index = len(self.objs)
+
+        if self.current_obj_index == len(self.objs):
+            self.objs = None
+            self.current_obj_index = 0
+            self.current_template_index += 1
+
+        return True
+
+
+class ProbesEyePositions:
+    # Reasoning: Each client may have a number of probes. Each probe may be
+    # composed of any number of images. So, each probe receives an unique
+    # identifier. Those identifiers are linked to the client identifier and
+    # contain a number of images to generated the match/comparison score. Each
+    # probe must be matched against a number of models defined by a list of
+    # client identifiers.
+
+    def setup(self, root_folder, outputs, parameters, force_start_index=None, force_end_index=None):
+        self.root_folder = root_folder
+        self.outputs     = outputs
+
+        self.db          = bob.db.atnt.Database()
+        self.objs        = sorted(self.db.objects(groups='dev', purposes='probe'), key=lambda x: x.id)
+        self.next_index  = 0
+
+        self.force_start_index = force_start_index
+        self.force_end_index   = force_end_index
+
+        # Retrieve only 'useful' data
+        ### End index
+        if self.force_end_index is not None:
+            self.objs = self.objs[:self.force_end_index+1]
+        ### Start index
+        if self.force_start_index is not None:
+            self.objs = self.objs[self.force_start_index:]
+            self.next_index = self.force_start_index
+        else:
+            self.force_start_index = 0
+
+        return True
+
+
+    def done(self):
+        return (self.next_index-self.force_start_index >= len(self.objs))
+
+
+    def next(self):
+
+        obj = self.objs[self.next_index-self.force_start_index]
+
+        if self.outputs['file_id'].isConnected():
+            self.outputs['file_id'].write({'value': numpy.uint64(obj.id)}, self.next_index)
+
+        if self.outputs['probe_id'].isConnected():
+            self.outputs['probe_id'].write({'value': numpy.uint64(obj.id)}, self.next_index)
+
+        if self.outputs['client_id'].isConnected():
+            self.outputs['client_id'].write({'value': numpy.uint64(obj.client_id)}, self.next_index)
+
+        if self.outputs['template_ids'].isConnected():
+              data = {
+                  'value': numpy.array(self.db.model_ids(groups='dev'), dtype='uint64')
+              }
+              self.outputs['template_ids'].write(data, self.next_index)
+
+        if self.outputs['image'].isConnected():
+            data = {
+                'value': bob.ip.color.gray_to_rgb(bob.io.base.load(obj.make_path(self.root_folder, '.pgm')))
+            }
+            self.outputs['image'].write(data, self.next_index)
+
+        if self.outputs['eye_centers'].isConnected():
+            self.outputs['eye_centers'].write({
+                    'left': {
+                        'y': numpy.int32(48),
+                        'x': numpy.int32(63),
+                    },
+                    'right': {
+                        'y': numpy.int32(48),
+                        'x': numpy.int32(27),
+                    }
+                }, self.next_index)
+
+        self.next_index += 1
+
+        return True
\ No newline at end of file
diff --git a/prefix/databases/atnt/4.rst b/prefix/databases/atnt/4.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6a960ce119f94bf997aacf90453b162b4ac721f8
--- /dev/null
+++ b/prefix/databases/atnt/4.rst
@@ -0,0 +1 @@
+The AT&T Database of Faces
\ No newline at end of file
diff --git a/prefix/databases/atnt/6.json b/prefix/databases/atnt/6.json
new file mode 100644
index 0000000000000000000000000000000000000000..fcfbbc6b9f99ee740a2680c8ea48a7e841639a76
--- /dev/null
+++ b/prefix/databases/atnt/6.json
@@ -0,0 +1,25 @@
+{
+    "description": "The AT&T Database of Faces",
+    "protocols": [
+        {
+            "name": "idiap",
+            "template": "idiap/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {}
+                },
+                "templates": {
+                    "view": "Templates",
+                    "parameters": {}
+                },
+                "probes": {
+                    "view": "Probes",
+                    "parameters": {}
+                }
+            }
+        }
+    ],
+    "root_folder": "/home/jae/code/atnt_db",
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/prefix/databases/atnt/6.py b/prefix/databases/atnt/6.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab8564df0784526f4fb6c573f3cb9da0be6cc706
--- /dev/null
+++ b/prefix/databases/atnt/6.py
@@ -0,0 +1,238 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import numpy as np
+from collections import namedtuple
+
+from beat.backend.python.database import View
+
+import bob.io.base
+import bob.db.atnt
+
+
+#----------------------------------------------------------
+
+
+class Train(View):
+    """Outputs:
+        - image: "system/array_2d_uint8/1"
+        - file_id: "system/uint64/1"
+        - client_id: "system/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'file_id', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.atnt.Database()
+        objs = sorted(db.objects(groups='world', purposes=None),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(x.client_id, x.id, x.make_path(root_folder, '.pgm')) for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Templates(View):
+    """Outputs:
+        - image: "system/array_2d_uint8/1"
+        - file_id: "system/uint64/1"
+        - template_id: "system/uint64/1"
+        - client_id: "system/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    Several "image" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                         client_id                                           |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_id"
+    per "client_id".
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['client_id', 'template_id', 'file_id', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.atnt.Database()
+
+        template_ids = db.model_ids(groups='dev')
+
+        entries = []
+
+        for template_id in template_ids:
+            objs = db.objects(groups='dev', purposes='enroll',
+                              model_ids=[template_id])
+
+            entries.extend([ Entry(x.client_id, template_id, x.id, x.make_path(root_folder, '.pgm'))
+                             for x in objs ])
+
+        return sorted(entries, key=lambda x: (x.client_id, x.template_id, x.file_id))
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'template_id':
+            return {
+                'value': np.uint64(obj.template_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
+
+
+#----------------------------------------------------------
+
+
+class Probes(View):
+    """Outputs:
+        - image: "system/array_2d_uint8/1"
+        - file_id: "system/uint64/1"
+        - client_id: "system/uint64/1"
+        - probe_id: "system/uint64/1"
+        - template_ids: "system/array_1d_uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "probe_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['template_ids', 'client_id', 'probe_id', 'file_id', 'image'])
+
+        # Open the database and load the objects to provide via the outputs
+        db = bob.db.atnt.Database()
+
+        template_ids = np.array(sorted(db.model_ids(groups='dev'),
+                                       key=lambda x: int(x)),
+                                dtype='uint64')
+
+        objs = sorted(db.objects(groups='dev', purposes='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        return [ Entry(template_ids, x.client_id, x.id, x.id, x.make_path(root_folder, '.pgm'))
+                 for x in objs ]
+
+
+    def get(self, output, index):
+        obj = self.objs[index]
+
+        if output == 'template_ids':
+            return {
+                'value': np.array([ np.uint64(tid) for tid in obj.template_ids ])
+            }
+
+        elif output == 'client_id':
+            return {
+                'value': np.uint64(obj.client_id)
+            }
+
+        elif output == 'probe_id':
+            return {
+                'value': np.uint64(obj.probe_id)
+            }
+
+        elif output == 'file_id':
+            return {
+                'value': np.uint64(obj.file_id)
+            }
+
+        elif output == 'image':
+            return {
+                'value': bob.io.base.load(obj.image)
+            }
diff --git a/prefix/databases/atnt/6.rst b/prefix/databases/atnt/6.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6a960ce119f94bf997aacf90453b162b4ac721f8
--- /dev/null
+++ b/prefix/databases/atnt/6.rst
@@ -0,0 +1 @@
+The AT&T Database of Faces
\ No newline at end of file
diff --git a/prefix/databases/iris/2.json b/prefix/databases/iris/2.json
new file mode 100644
index 0000000000000000000000000000000000000000..166bc8050567607b4c7c947d6a09e0bdfebc4fc9
--- /dev/null
+++ b/prefix/databases/iris/2.json
@@ -0,0 +1,21 @@
+{
+    "description": "",
+    "protocols": [
+        {
+            "name": "Main",
+            "template": "Main/1",
+            "views": {
+                "training": {
+                    "view": "Training",
+                    "parameters": {}
+                },
+                "testing": {
+                    "view": "Testing",
+                    "parameters": {}
+                }
+            }
+        }
+    ],
+    "root_folder": "/not/needed",
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/prefix/databases/iris/2.py b/prefix/databases/iris/2.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e3746c6a692b9168313455bd7636d84c50319c5
--- /dev/null
+++ b/prefix/databases/iris/2.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+# You may import any python packages that will be available in the environment you will run this database in
+# Environments can change based on the experiment's settings
+from collections import namedtuple
+
+import numpy as np
+from beat.backend.python.database import View
+import bob.db.iris
+
+class Training(View):
+    """Outputs:
+        - measurements: 1D float array (four values)
+        - species: text label
+    """
+    # build the data for your view
+    # split the raw data into (homogenous) bits and return a keyed iterable
+    # (something with `.keys()` available to it, like a dict)
+    # the key names must be the same as the output names for sets that use this view
+    #    root_folder: the path to the root folder of the database's files (not always applicable)
+    #    parameters: parameters passed to the view, defined in the metadata
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['measurements', 'species'])
+        db = bob.db.iris.data()
+        objs = list()
+        for species in db.keys():
+            for measurement in db[species]:
+                objs.append(Entry(measurement, species))
+
+        return objs
+
+
+    # returns a value at a specific index in the iterable for this view
+    #   output: the specific output value requested
+    #   index: the current index of the iterable
+    def get(self, output, index):
+        # to get the current object referenced by the given index:
+        #       obj = self.objs[index]
+        # note that this object is a named tuple, with fields equivalent to your keys from
+        # the objects returned from the index function
+        obj = self.objs[index]
+
+        if output == 'measurements':
+            return {
+                'value': obj.measurements
+            }
+        else:
+            return {
+                'text': obj.species
+            }
+
+
+class Testing(View):
+    """Outputs:
+        - measurements: 1D float array (four values)
+        - species: text label
+    """
+    # build the data for your view
+    # split the raw data into (homogenous) bits and return a keyed iterable
+    # (something with `.keys()` available to it, like a dict)
+    # the key names must be the same as the output names for sets that use this view
+    #    root_folder: the path to the root folder of the database's files (not always applicable)
+    #    parameters: parameters passed to the view, defined in the metadata
+    def index(self, root_folder, parameters):
+        Entry = namedtuple('Entry', ['measurements', 'species'])
+        db = bob.db.iris.data()
+        objs = list()
+        for species in db.keys():
+            for measurement in db[species]:
+                objs.append(Entry(measurement , species))
+
+        return objs
+
+
+    # returns a value at a specific index in the iterable for this view
+    #   output: the specific output value requested
+    #   index: the current index of the iterable
+    def get(self, output, index):
+        # to get the current object referenced by the given index:
+        #       obj = self.objs[index]
+        # note that this object is a named tuple, with fields equivalent to your keys from
+        # the objects returned from the index function
+        obj = self.objs[index]
+
+        if output == 'measurements':
+            return {
+                'value': obj.measurements
+            }
+        else:
+            return {
+                'text': obj.species
+            }
diff --git a/prefix/databases/mnist/4.json b/prefix/databases/mnist/4.json
new file mode 100644
index 0000000000000000000000000000000000000000..29160c9d81a2f096d5ca852db2df6a6c418d64b6
--- /dev/null
+++ b/prefix/databases/mnist/4.json
@@ -0,0 +1,25 @@
+{
+    "description": "The MNIST Database of Handwritten Digits",
+    "protocols": [
+        {
+            "name": "idiap",
+            "template": "idiap/1",
+            "views": {
+                "train": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "train"
+                    }
+                },
+                "test": {
+                    "view": "View",
+                    "parameters": {
+                        "group": "test"
+                    }
+                }
+            }
+        }
+    ],
+    "root_folder": "/remote/dataset/mnist",
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/prefix/databases/mnist/4.py b/prefix/databases/mnist/4.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac09898a1aa052d04216bdf6fd8025252e2ef358
--- /dev/null
+++ b/prefix/databases/mnist/4.py
@@ -0,0 +1,175 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import numpy as np
+import bob.db.mnist
+
+
+#----------------------------------------------------------
+
+
+def get_label_end_index(objs, label, label_start_index,
+                        start_index, end_index):
+    label_end_index = label_start_index
+
+    while label_end_index + 1 <= end_index:
+        label_ = objs[label_end_index + 1 - start_index][1]
+
+        if label_ != label:
+            return label_end_index
+
+        label_end_index += 1
+
+    return end_index
+
+
+#----------------------------------------------------------
+
+
+class View:
+    """Outputs:
+        - image: "{{ system_user.username }}/array_2d_uint8/1"
+        - id: "{{ system_user.username }}/uint64/1"
+        - class_id: "{{ system_user.username }}/uint64/1"
+
+    One "id" is associated with a given "image".
+    Several "image" are associated with a given "class_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |     id      | |     id      | |     id      | |     id      | |     id      | |     id      |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   class_id                  | |                   class_id                  |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+    def setup(self, root_folder, outputs, parameters, force_start_index=None,
+              force_end_index=None):
+
+        # Initialisations
+        self.root_folder = root_folder
+        self.outputs     = outputs
+        self.parameters  = parameters
+
+        # Open the database and load the objects to provide via the outputs
+        self.db = bob.db.mnist.Database(data_dir=self.root_folder)
+
+        features, labels = self.db.data(groups=parameters['group'])
+
+        self.objs = sorted([ (features[i], labels[i]) for i in range(len(features)) ],
+                           key=lambda x: x[1])
+
+        # Determine the range of indices that must be provided
+        self.start_index = force_start_index if force_start_index is not None else 0
+        self.end_index = force_end_index if force_end_index is not None else len(self.objs) - 1
+
+        self.objs = self.objs[self.start_index : self.end_index + 1]
+
+        self.next_index = self.start_index
+
+        return True
+
+
+    def done(self, last_data_index):
+        return last_data_index >= self.end_index
+
+
+    def next(self):
+        features, label = self.objs[self.next_index - self.start_index]
+
+        # Output: class_id (only provide data when the class_id change)
+        if self.outputs['class_id'].isConnected() and \
+           self.outputs['class_id'].last_written_data_index < self.next_index:
+
+            label_end_index = get_label_end_index(self.objs, label,
+                                                  self.next_index,
+                                                  self.start_index,
+                                                  self.end_index)
+
+            self.outputs['class_id'].write(
+                {
+                    'value': np.uint64(label)
+                },
+                label_end_index
+            )
+
+        # Output: id (provide data at each iteration)
+        if self.outputs['id'].isConnected():
+            self.outputs['id'].write(
+                {
+                    'value': np.uint64(self.next_index)
+                },
+                self.next_index
+            )
+
+        # Output: image (provide data at each iteration)
+        if self.outputs['image'].isConnected():
+            self.outputs['image'].write(
+                {
+                    'value': features.reshape((28, 28))
+                },
+                self.next_index
+            )
+
+        # Determine the next data index that must be provided
+        self.next_index = 1 + min([ x.last_written_data_index for x in self.outputs
+                                                              if x.isConnected() ]
+        )
+
+        return True
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    pass
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    # Note: This database can't be tested without the actual data, since
+    # the actual files are needed by this implementation
+
+    from beat.backend.python.database import DatabaseTester
+
+    DatabaseTester('View', View,
+        [
+            'class_id',
+            'id',
+            'image',
+        ],
+        parameters=dict(
+            group='train',
+        ),
+    )
diff --git a/prefix/databases/mnist/4.rst b/prefix/databases/mnist/4.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2584c54faa06a406f71eb0586e54df25a8f8e659
--- /dev/null
+++ b/prefix/databases/mnist/4.rst
@@ -0,0 +1 @@
+The MNIST Database of Handwritten Digits
\ No newline at end of file
diff --git a/prefix/databases/mobio/4.json b/prefix/databases/mobio/4.json
new file mode 100644
index 0000000000000000000000000000000000000000..c0271d60a39b58c422c51239a3c7c93f2ca3ee93
--- /dev/null
+++ b/prefix/databases/mobio/4.json
@@ -0,0 +1,97 @@
+{
+    "description": "The MOBIO Database of Faces",
+    "protocols": [
+        {
+            "name": "male",
+            "template": "male/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "annotations": "../IMAGE_ANNOTATIONS",
+                        "protocol": "male"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "annotations": "../IMAGE_ANNOTATIONS",
+                        "group": "dev",
+                        "protocol": "male"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "annotations": "../IMAGE_ANNOTATIONS",
+                        "group": "dev",
+                        "protocol": "male"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "annotations": "../IMAGE_ANNOTATIONS",
+                        "group": "eval",
+                        "protocol": "male"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "annotations": "../IMAGE_ANNOTATIONS",
+                        "group": "eval",
+                        "protocol": "male"
+                    }
+                }
+            }
+        },
+        {
+            "name": "female",
+            "template": "female/1",
+            "views": {
+                "train": {
+                    "view": "Train",
+                    "parameters": {
+                        "annotations": "../IMAGE_ANNOTATIONS",
+                        "protocol": "female"
+                    }
+                },
+                "dev_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "annotations": "../IMAGE_ANNOTATIONS",
+                        "group": "dev",
+                        "protocol": "female"
+                    }
+                },
+                "dev_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "annotations": "../IMAGE_ANNOTATIONS",
+                        "group": "dev",
+                        "protocol": "female"
+                    }
+                },
+                "test_templates": {
+                    "view": "Templates",
+                    "parameters": {
+                        "annotations": "../IMAGE_ANNOTATIONS",
+                        "group": "eval",
+                        "protocol": "female"
+                    }
+                },
+                "test_probes": {
+                    "view": "Probes",
+                    "parameters": {
+                        "annotations": "../IMAGE_ANNOTATIONS",
+                        "group": "eval",
+                        "protocol": "female"
+                    }
+                }
+            }
+        }
+    ],
+    "root_folder": "/remote/dataset/mobio/IMAGES_PNG",
+    "schema_version": 2
+}
\ No newline at end of file
diff --git a/prefix/databases/mobio/4.py b/prefix/databases/mobio/4.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0db0bed9f1ff7b52fe5e807f8d77794482f30e2
--- /dev/null
+++ b/prefix/databases/mobio/4.py
@@ -0,0 +1,605 @@
+###############################################################################
+#                                                                             #
+# Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.examples module of the BEAT platform.         #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import os
+import numpy as np
+import bob.io.base
+import bob.io.image
+import bob.db.mobio
+
+
+#----------------------------------------------------------
+
+
+def get_client_end_index(objs, client_id, client_start_index,
+                         start_index, end_index):
+    client_end_index = client_start_index
+
+    while client_end_index + 1 <= end_index:
+        obj = objs[client_end_index + 1 - start_index]
+
+        if isinstance(obj, tuple):
+            obj = obj[1]
+
+        if obj.client_id != client_id:
+            return client_end_index
+
+        client_end_index += 1
+
+    return end_index
+
+
+#----------------------------------------------------------
+
+
+def get_value_end_index(objs, value, index_in_tuple, value_start_index,
+                        start_index, end_index):
+    value_end_index = value_start_index
+
+    while value_end_index + 1 <= end_index:
+        id = objs[value_end_index + 1 - start_index][index_in_tuple]
+
+        if id != value:
+            return value_end_index
+
+        value_end_index += 1
+
+    return end_index
+
+
+#----------------------------------------------------------
+
+
+class Train:
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    """
+
+
+    def setup(self, root_folder, outputs, parameters, force_start_index=None,
+              force_end_index=None):
+
+        # Initialisations
+        self.root_folder = root_folder
+        self.outputs     = outputs
+        self.parameters  = parameters
+
+        # Open the database and load the objects to provide via the outputs
+        annotations = parameters['annotations']
+        if not os.path.isabs(annotations):
+            annotations = os.path.join(root_folder, annotations)
+
+        self.db = bob.db.mobio.Database(annotation_directory=annotations)
+
+        self.objs = sorted(self.db.objects(protocol=parameters['protocol'],
+                                           groups='world',
+                                           purposes='train'),
+                           key=lambda x: (x.client_id, x.id))
+
+        # Determine the range of indices that must be provided
+        self.start_index = force_start_index if force_start_index is not None else 0
+        self.end_index = force_end_index if force_end_index is not None else len(self.objs) - 1
+
+        self.objs = self.objs[self.start_index : self.end_index + 1]
+
+        self.next_index = self.start_index
+
+        return True
+
+
+    def done(self, last_data_index):
+        return last_data_index >= self.end_index
+
+
+    def next(self):
+        obj = self.objs[self.next_index - self.start_index]
+
+        # Output: client_id (only provide data when the client_id change)
+        if self.outputs['client_id'].isConnected() and \
+           self.outputs['client_id'].last_written_data_index < self.next_index:
+
+            client_end_index = get_client_end_index(self.objs, obj.client_id,
+                                                    self.next_index,
+                                                    self.start_index,
+                                                    self.end_index)
+
+            self.outputs['client_id'].write(
+                {
+                    'value': np.uint64(obj.client_id)
+                },
+                client_end_index
+            )
+
+        # Output: file_id (provide data at each iteration)
+        if self.outputs['file_id'].isConnected():
+            self.outputs['file_id'].write(
+                {
+                    'value': np.uint64(obj.id)
+                },
+                self.next_index
+            )
+
+        # Output: image (provide data at each iteration)
+        if self.outputs['image'].isConnected():
+            self.outputs['image'].write(
+                {
+                    'value': bob.io.base.load(obj.make_path(self.root_folder, '.png'))
+                },
+                self.next_index
+            )
+
+        # Output: eye_centers (provide data at each iteration)
+        if self.outputs['eye_centers'].isConnected():
+            annotation = self.db.annotations(obj)
+
+            self.outputs['eye_centers'].write({
+                    'left': {
+                        'y': np.int32(annotation['leye'][0]),
+                        'x': np.int32(annotation['leye'][1]),
+                    },
+                    'right': {
+                        'y': np.int32(annotation['reye'][0]),
+                        'x': np.int32(annotation['reye'][1]),
+                    }
+                },
+                self.next_index
+            )
+
+        # Determine the next data index that must be provided
+        self.next_index = 1 + min([ x.last_written_data_index for x in self.outputs
+                                                              if x.isConnected() ]
+        )
+
+        return True
+#----------------------------------------------------------
+
+
+class Templates:
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - template_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    Several "image" are associated with a given "template_id".
+    Several "template_id" are associated with a given "client_id".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                  template_id                | |                  template_id                |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                          client_id                                          |
+    -----------------------------------------------------------------------------------------------
+
+    Note: for this particular database, there is only one "template_id"
+    per "client_id".
+    """
+
+    def setup(self, root_folder, outputs, parameters, force_start_index=None,
+              force_end_index=None):
+
+        # Initialisations
+        self.root_folder = root_folder
+        self.outputs     = outputs
+        self.parameters  = parameters
+
+        # Open the database and load the objects to provide via the outputs
+        annotations = parameters['annotations']
+        if not os.path.isabs(annotations):
+            annotations = os.path.join(root_folder, annotations)
+
+        self.db = bob.db.mobio.Database(annotation_directory=annotations)
+
+        template_ids = self.db.model_ids(protocol=parameters['protocol'],
+                                         groups=parameters['group'])
+
+        self.objs = []
+
+        for template_id in template_ids:
+            objs = self.db.objects(protocol=parameters['protocol'],
+                                   groups=parameters['group'],
+                                   purposes='enroll',
+                                   model_ids=[template_id])
+
+            self.objs.extend([ (template_id, obj) for obj in objs ])
+
+        self.objs = sorted(self.objs, key=lambda x: (x[1].client_id, x[0], x[1].id))
+
+        # Determine the range of indices that must be provided
+        self.start_index = force_start_index if force_start_index is not None else 0
+        self.end_index = force_end_index if force_end_index is not None else len(self.objs) - 1
+
+        self.objs = self.objs[self.start_index : self.end_index + 1]
+
+        self.next_index = self.start_index
+
+        return True
+
+
+    def done(self, last_data_index):
+        return last_data_index >= self.end_index
+
+
+    def next(self):
+        (template_id, obj) = self.objs[self.next_index - self.start_index]
+
+        # Output: template_id (only provide data when the template_id change)
+        if self.outputs['template_id'].isConnected() and \
+           self.outputs['template_id'].last_written_data_index < self.next_index:
+
+            template_end_index = get_value_end_index(self.objs, template_id, 0,
+                                                     self.next_index,
+                                                     self.start_index,
+                                                     self.end_index)
+
+            self.outputs['template_id'].write(
+                {
+                    'value': np.uint64(template_id)
+                },
+                template_end_index
+            )
+
+        # Output: client_id (only provide data when the client_id change)
+        if self.outputs['client_id'].isConnected() and \
+           self.outputs['client_id'].last_written_data_index < self.next_index:
+
+            client_end_index = get_client_end_index(self.objs, obj.client_id,
+                                                    self.next_index,
+                                                    self.start_index,
+                                                    self.end_index)
+
+            self.outputs['client_id'].write(
+                {
+                    'value': np.uint64(obj.client_id)
+                },
+                client_end_index
+            )
+
+        # Output: file_id (provide data at each iteration)
+        if self.outputs['file_id'].isConnected():
+            self.outputs['file_id'].write(
+                {
+                    'value': np.uint64(obj.id)
+                },
+                self.next_index
+            )
+
+        # Output: image (provide data at each iteration)
+        if self.outputs['image'].isConnected():
+            self.outputs['image'].write(
+                {
+                    'value': bob.io.base.load(obj.make_path(self.root_folder, '.png'))
+                },
+                self.next_index
+            )
+
+        # Output: eye_centers (provide data at each iteration)
+        if self.outputs['eye_centers'].isConnected():
+            annotation = self.db.annotations(obj)
+
+            self.outputs['eye_centers'].write({
+                    'left': {
+                        'y': np.int32(annotation['leye'][0]),
+                        'x': np.int32(annotation['leye'][1]),
+                    },
+                    'right': {
+                        'y': np.int32(annotation['reye'][0]),
+                        'x': np.int32(annotation['reye'][1]),
+                    }
+                },
+                self.next_index
+            )
+
+        # Determine the next data index that must be provided
+        self.next_index = 1 + min([ x.last_written_data_index for x in self.outputs
+                                                              if x.isConnected() ]
+        )
+
+        return True
+
+
+#----------------------------------------------------------
+
+
+class Probes:
+    """Outputs:
+        - image: "{{ system_user.username }}/array_3d_uint8/1"
+        - file_id: "{{ system_user.username }}/uint64/1"
+        - eye_centers: "{{ system_user.username }}/eye_positions/1"
+        - probe_id: "{{ system_user.username }}/uint64/1"
+        - client_id: "{{ system_user.username }}/uint64/1"
+        - template_ids: "{{ system_user.username }}/array_1d_uint64/1"
+
+    One "file_id" is associated with a given "image".
+    One "eye_centers" is associated with a given "image".
+    One "probe_id" is associated with a given "image".
+    Several "image" are associated with a given "client_id".
+    Several "client_id" are associated with a given "template_ids".
+
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |    image    | |    image    | |    image    | |    image    | |    image    | |    image    |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   | |   file_id   |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers | | eye_centers |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  | |   probe_id  |
+    --------------- --------------- --------------- --------------- --------------- ---------------
+    ----------------------------------------------- -----------------------------------------------
+    |                   client_id                 | |                   client_id                 |
+    ----------------------------------------------- -----------------------------------------------
+    -----------------------------------------------------------------------------------------------
+    |                                        template_ids                                         |
+    -----------------------------------------------------------------------------------------------
+    """
+
+    def setup(self, root_folder, outputs, parameters, force_start_index=None,
+              force_end_index=None):
+
+        # Initialisations
+        self.root_folder = root_folder
+        self.outputs     = outputs
+        self.parameters  = parameters
+
+        # Open the database and load the objects to provide via the outputs
+        annotations = parameters['annotations']
+        if not os.path.isabs(annotations):
+            annotations = os.path.join(root_folder, annotations)
+
+        self.db = bob.db.mobio.Database(annotation_directory=annotations)
+
+        template_ids = sorted(self.db.model_ids(protocol=parameters['protocol'],
+                                                groups=parameters['group']))
+
+        template_probes = {}
+        for template_id in template_ids:
+            objs = sorted(self.db.objects(protocol=parameters['protocol'],
+                                          groups=self.parameters['group'],
+                                          purposes='probe',
+                                          model_ids=[template_id]),
+                           key=lambda x: (x.client_id, x.id))
+
+            template_probes[template_id] = [ p.id for p in objs ]
+
+        objs = sorted(self.db.objects(protocol=parameters['protocol'],
+                                      groups=self.parameters['group'],
+                                      purposes='probe'),
+                      key=lambda x: (x.client_id, x.id))
+
+        self.objs = []
+        for obj in objs:
+            templates = [ template_id for template_id in template_ids
+                                      if obj.id in template_probes[template_id] ]
+            self.objs.append( (templates, obj) )
+
+        self.objs = sorted(self.objs, key=lambda x: (len(x[0]), x[0], x[1].client_id, x[1].id))
+
+        # Determine the range of indices that must be provided
+        self.start_index = force_start_index if force_start_index is not None else 0
+        self.end_index = force_end_index if force_end_index is not None else len(self.objs) - 1
+
+        self.objs = self.objs[self.start_index : self.end_index + 1]
+
+        self.next_index = self.start_index
+
+        return True
+
+
+    def done(self, last_data_index):
+        return last_data_index >= self.end_index
+
+
+    def next(self):
+        (template_ids, obj) = self.objs[self.next_index - self.start_index]
+
+        # Output: template_ids (only provide data when the template_ids change)
+        if self.outputs['template_ids'].isConnected() and \
+           self.outputs['template_ids'].last_written_data_index < self.next_index:
+
+            template_ids_end_index = get_value_end_index(self.objs, template_ids, 0,
+                                                         self.next_index,
+                                                         self.start_index,
+                                                         self.end_index)
+
+            self.outputs['template_ids'].write(
+                {
+                    'value': np.uint64(template_ids)
+                },
+                template_ids_end_index
+            )
+
+        # Output: client_id (only provide data when the client_id change)
+        if self.outputs['client_id'].isConnected() and \
+           self.outputs['client_id'].last_written_data_index < self.next_index:
+
+            client_end_index = get_client_end_index(self.objs, obj.client_id,
+                                                    self.next_index,
+                                                    self.start_index,
+                                                    self.end_index)
+
+            self.outputs['client_id'].write(
+                {
+                    'value': np.uint64(obj.client_id)
+                },
+                client_end_index
+            )
+
+        # Output: probe_id (provide data at each iteration)
+        if self.outputs['probe_id'].isConnected():
+            self.outputs['probe_id'].write(
+                {
+                    'value': np.uint64(obj.id)
+                },
+                self.next_index
+            )
+
+        # Output: file_id (provide data at each iteration)
+        if self.outputs['file_id'].isConnected():
+            self.outputs['file_id'].write(
+                {
+                    'value': np.uint64(obj.id)
+                },
+                self.next_index
+            )
+
+        # Output: image (provide data at each iteration)
+        if self.outputs['image'].isConnected():
+            self.outputs['image'].write(
+                {
+                    'value': bob.io.base.load(obj.make_path(self.root_folder, '.png'))
+                },
+                self.next_index
+            )
+
+        # Output: eye_centers (provide data at each iteration)
+        if self.outputs['eye_centers'].isConnected():
+            annotation = self.db.annotations(obj)
+
+            self.outputs['eye_centers'].write({
+                    'left': {
+                        'y': np.int32(annotation['leye'][0]),
+                        'x': np.int32(annotation['leye'][1]),
+                    },
+                    'right': {
+                        'y': np.int32(annotation['reye'][0]),
+                        'x': np.int32(annotation['reye'][1]),
+                    }
+                },
+                self.next_index
+            )
+
+        # Determine the next data index that must be provided
+        self.next_index = 1 + min([ x.last_written_data_index for x in self.outputs
+                                                              if x.isConnected() ]
+        )
+
+        return True
+
+
+#----------------------------------------------------------
+
+
+def setup_tests():
+    # Install a mock load function for the images
+    def mock_load(root_folder):
+        return np.ndarray((3, 10, 20), dtype=np.uint8)
+
+    bob.io.base.load = mock_load
+
+
+#----------------------------------------------------------
+
+
+# Test the behavior of the views (on fake data)
+if __name__ == '__main__':
+
+    setup_tests()
+
+    # Note: This database can't be tested without the actual data, since
+    # the actual files are needed by this implementation
+
+    from beat.backend.python.database import DatabaseTester
+
+    DatabaseTester('Train', Train,
+        [
+            'client_id',
+            'file_id',
+            'eye_centers',
+            'image',
+        ],
+        parameters=dict(
+            protocol='male',
+            annotations='../IMAGE_ANNOTATIONS',
+        ),
+    )
+
+    DatabaseTester('Templates', Templates,
+        [
+            'client_id',
+            'template_id',
+            'file_id',
+            'eye_centers',
+            'image',
+        ],
+        parameters=dict(
+            protocol='male',
+            group='dev',
+            annotations='../IMAGE_ANNOTATIONS',
+        ),
+    )
+
+    DatabaseTester('Probes', Probes,
+        [
+            'template_ids',
+            'client_id',
+            'probe_id',
+            'file_id',
+            'eye_centers',
+            'image',
+        ],
+        parameters=dict(
+            protocol='male',
+            group='dev',
+            annotations='../IMAGE_ANNOTATIONS',
+        ),
+        irregular_outputs=[
+            'template_ids',
+            'client_id',
+        ]
+    )
diff --git a/prefix/databases/mobio/4.rst b/prefix/databases/mobio/4.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6e381f9b44ef6b27728faa31bd0f620e50702e94
--- /dev/null
+++ b/prefix/databases/mobio/4.rst
@@ -0,0 +1 @@
+The MOBIO Database of Faces
\ No newline at end of file
diff --git a/prefix/dataformats/user/integers/1.json b/prefix/dataformats/user/integers/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..904c175a53bc98a593f3c174eca299211be9b83e
--- /dev/null
+++ b/prefix/dataformats/user/integers/1.json
@@ -0,0 +1,6 @@
+{
+    "value8": "int8",
+    "value16": "int16",
+    "value32": "int32",
+    "value64": "int64"
+}
diff --git a/prefix/protocoltemplates/Main/1.json b/prefix/protocoltemplates/Main/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..d8ccb4d056aa05546b3ba6aa5457b29cb845e731
--- /dev/null
+++ b/prefix/protocoltemplates/Main/1.json
@@ -0,0 +1,19 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "training",
+            "outputs": {
+                "measurements": "system/array_1d_floats/1",
+                "species": "system/text/1"
+            }
+        },
+        {
+            "name": "testing",
+            "outputs": {
+                "measurements": "system/array_1d_floats/1",
+                "species": "system/text/1"
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/prefix/protocoltemplates/female/1.json b/prefix/protocoltemplates/female/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..3347f818fa0ffd826e734362da248a8d60742d9c
--- /dev/null
+++ b/prefix/protocoltemplates/female/1.json
@@ -0,0 +1,56 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "train",
+            "outputs": {
+                "client_id": "system/uint64/1",
+                "eye_centers": "system/eye_positions/1",
+                "file_id": "system/uint64/1",
+                "image": "system/array_3d_uint8/1"
+            }
+        },
+        {
+            "name": "dev_templates",
+            "outputs": {
+                "client_id": "system/uint64/1",
+                "eye_centers": "system/eye_positions/1",
+                "file_id": "system/uint64/1",
+                "image": "system/array_3d_uint8/1",
+                "template_id": "system/uint64/1"
+            }
+        },
+        {
+            "name": "dev_probes",
+            "outputs": {
+                "client_id": "system/uint64/1",
+                "eye_centers": "system/eye_positions/1",
+                "file_id": "system/uint64/1",
+                "image": "system/array_3d_uint8/1",
+                "probe_id": "system/uint64/1",
+                "template_ids": "system/array_1d_uint64/1"
+            }
+        },
+        {
+            "name": "test_templates",
+            "outputs": {
+                "client_id": "system/uint64/1",
+                "eye_centers": "system/eye_positions/1",
+                "file_id": "system/uint64/1",
+                "image": "system/array_3d_uint8/1",
+                "template_id": "system/uint64/1"
+            }
+        },
+        {
+            "name": "test_probes",
+            "outputs": {
+                "client_id": "system/uint64/1",
+                "eye_centers": "system/eye_positions/1",
+                "file_id": "system/uint64/1",
+                "image": "system/array_3d_uint8/1",
+                "probe_id": "system/uint64/1",
+                "template_ids": "system/array_1d_uint64/1"
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/prefix/protocoltemplates/idiap/1.json b/prefix/protocoltemplates/idiap/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..f641fac229b1eb14c21e40609f99f431c78a4726
--- /dev/null
+++ b/prefix/protocoltemplates/idiap/1.json
@@ -0,0 +1,32 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "outputs": {
+                "image": "system/array_2d_uint8/1",
+                "file_id": "system/uint64/1",
+                "client_id": "system/uint64/1"
+            },
+            "name": "train"
+        },
+        {
+            "outputs": {
+                "image": "system/array_2d_uint8/1",
+                "file_id": "system/uint64/1",
+                "client_id": "system/uint64/1",
+                "template_id": "system/uint64/1"
+            },
+            "name": "templates"
+        },
+        {
+            "outputs": {
+                "probe_id": "system/uint64/1",
+                "image": "system/array_2d_uint8/1",
+                "file_id": "system/uint64/1",
+                "client_id": "system/uint64/1",
+                "template_ids": "system/array_1d_uint64/1"
+            },
+            "name": "probes"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/prefix/protocoltemplates/idiap_test_eyepos/1.json b/prefix/protocoltemplates/idiap_test_eyepos/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..9f28214c9f318944f70209c573f1eb8cc4ed48e0
--- /dev/null
+++ b/prefix/protocoltemplates/idiap_test_eyepos/1.json
@@ -0,0 +1,56 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "outputs": {
+                "image": "system/array_3d_uint8/1",
+                "eye_centers": "system/eye_positions/1",
+                "file_id": "system/uint64/1",
+                "client_id": "system/uint64/1"
+            },
+            "name": "train"
+        },
+        {
+            "outputs": {
+                "image": "system/array_3d_uint8/1",
+                "eye_centers": "system/eye_positions/1",
+                "file_id": "system/uint64/1",
+                "client_id": "system/uint64/1",
+                "template_id": "system/uint64/1"
+            },
+            "name": "dev_templates"
+        },
+        {
+            "outputs": {
+                "image": "system/array_3d_uint8/1",
+                "probe_id": "system/uint64/1",
+                "file_id": "system/uint64/1",
+                "client_id": "system/uint64/1",
+                "eye_centers": "system/eye_positions/1",
+                "template_ids": "system/array_1d_uint64/1"
+            },
+            "name": "dev_probes"
+        },
+        {
+            "outputs": {
+                "image": "system/array_3d_uint8/1",
+                "eye_centers": "system/eye_positions/1",
+                "file_id": "system/uint64/1",
+                "client_id": "system/uint64/1",
+                "template_id": "system/uint64/1"
+            },
+            "name": "test_templates"
+        },
+        {
+            "outputs": {
+                "image": "system/array_3d_uint8/1",
+                "probe_id": "system/uint64/1",
+                "file_id": "system/uint64/1",
+                "client_id": "system/uint64/1",
+                "eye_centers": "system/eye_positions/1",
+                "template_ids": "system/array_1d_uint64/1"
+            },
+            "name": "test_probes"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/prefix/protocoltemplates/male/1.json b/prefix/protocoltemplates/male/1.json
new file mode 100644
index 0000000000000000000000000000000000000000..3347f818fa0ffd826e734362da248a8d60742d9c
--- /dev/null
+++ b/prefix/protocoltemplates/male/1.json
@@ -0,0 +1,56 @@
+{
+    "schema_version": 1,
+    "sets": [
+        {
+            "name": "train",
+            "outputs": {
+                "client_id": "system/uint64/1",
+                "eye_centers": "system/eye_positions/1",
+                "file_id": "system/uint64/1",
+                "image": "system/array_3d_uint8/1"
+            }
+        },
+        {
+            "name": "dev_templates",
+            "outputs": {
+                "client_id": "system/uint64/1",
+                "eye_centers": "system/eye_positions/1",
+                "file_id": "system/uint64/1",
+                "image": "system/array_3d_uint8/1",
+                "template_id": "system/uint64/1"
+            }
+        },
+        {
+            "name": "dev_probes",
+            "outputs": {
+                "client_id": "system/uint64/1",
+                "eye_centers": "system/eye_positions/1",
+                "file_id": "system/uint64/1",
+                "image": "system/array_3d_uint8/1",
+                "probe_id": "system/uint64/1",
+                "template_ids": "system/array_1d_uint64/1"
+            }
+        },
+        {
+            "name": "test_templates",
+            "outputs": {
+                "client_id": "system/uint64/1",
+                "eye_centers": "system/eye_positions/1",
+                "file_id": "system/uint64/1",
+                "image": "system/array_3d_uint8/1",
+                "template_id": "system/uint64/1"
+            }
+        },
+        {
+            "name": "test_probes",
+            "outputs": {
+                "client_id": "system/uint64/1",
+                "eye_centers": "system/eye_positions/1",
+                "file_id": "system/uint64/1",
+                "image": "system/array_3d_uint8/1",
+                "probe_id": "system/uint64/1",
+                "template_ids": "system/array_1d_uint64/1"
+            }
+        }
+    ]
+}
\ No newline at end of file
diff --git a/tutorial.ipynb b/tutorial.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..e9271f58eb7c6e72db8d49099a5417528ed8498d
--- /dev/null
+++ b/tutorial.ipynb
@@ -0,0 +1,1546 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# A Hands-On Tutorial\n",
+    "\n",
+    "This tutorial should help you understand how to create and edit BEAT objects locally using `beat.editor` and `beat.cmdline`.\n",
+    "\n",
+    "## Requirements\n",
+    "\n",
+    "* A **BEAT** installation: see the `README.rst` of this repository.\n",
+    "* BEAT familiarity: This guide assumes you are somewhat familiar with what BEAT is and how it works. Please see https://www.idiap.ch/software/beat/documentation and refer to it if you come across an unfamiliar BEAT term or concept.\n",
+    "* A BEAT **prefix**: All the building blocks of BEAT is stored in a directory typically named *prefix* (explained more below). For the purpose of this tutorial we provide this folder.\n",
+    "* To simplify code examples, we will be using [scikit-learn](https://www.idiap.ch/software/beat/docs/beat/docs/stable/beat/user.html#prefix), a machine learning toolbox."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## The Prefix\n",
+    "\n",
+    "The root of the BEAT object installation is commonly referred as a *prefix*. The\n",
+    "prefix is just a path to a known directory to which the user has write access, and it holds all of BEAT object data in a certain format. This directory is commonly named ``prefix`` but it could be named anything. This is the typical directory structure in a *prefix*:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!ls prefix"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Each of the subdirectories in the *prefix* keeps only objects of a given type.\n",
+    "For example, the ``dataformats`` subdirectory keeps only data format objects,\n",
+    "and so on. Inside each subdirectory, the user will find an organization that\n",
+    "resembles the naming convention of objects in the BEAT framework. For example,\n",
+    "you'd be able to find the data format ``my_dataformat``, belonging to user\n",
+    "``user``, version ``1``, under the directory\n",
+    "``<prefix>/dataformats/user/my_dataformat/1``. Objects are described by a JSON\n",
+    "file, an optional full-length description in reStructuredText format and,\n",
+    "depending on the object type, a program file containing user routines\n",
+    "programmed in one of the supported languages.\n",
+    "\n",
+    "\n",
+    "All the commands from ``beat.editor`` or ``beat.cmdline`` commands should be run it in the parent folder of prefix. Otherwise the system will not be able to access the BEAT objects. For more information about configuring the prefix please see [Command-line configurations](https://www.idiap.ch/software/beat/docs/beat/docs/stable/beat.cmdline/doc/configuration.html).\n",
+    "\n",
+    "Let's configure beat.cmdline to use our tutorial prefix and also set our username.\n",
+    "We will use ``myusername`` as our username for the purpose of this tutorial.\n",
+    "We are also configurinb BEAT locally in this folder, these setting changes are effective only when we are in this folder."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%bash\n",
+    "beat config set --local prefix $PWD/prefix\n",
+    "beat config set --local user myusername\n",
+    "beat config show"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## A Sanity Check\n",
+    "\n",
+    "Let's make sure your installation is working as intended. Run the following in a terminal (in the parent folder of your prefix!):"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!beat exp list"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "This lists all the experiments in your prefix, and should not be empty."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## The Workflow\n",
+    "BEAT objects consist of two main components.\n",
+    "\n",
+    "* A json file that represents the metadata of the object.\n",
+    "* A piece of code in the supported backend language (python or C++) that defines the behaviour of certain type of objects.\n",
+    "\n",
+    "To use BEAT locally you need to be able to create and edit the mentioned components for BEAT objects, test and debug them, visualize and manage them, and finally run an experiment. These can be done using different tools in BEAT.\n",
+    "\n",
+    "* ``beat.editor`` is a graphical web application that enables you to edit metadata (.json files) and manage the objects.\n",
+    "* The codes can be eddited using the eidtor of your choice.\n",
+    "* ``beat.cmdline`` does \"the rest\", letting you run and visualize experiments, manage the cache, debug, and much more. For more information see [BEAT Command-line Client](https://www.idiap.ch/software/beat/docs/beat/docs/stable/beat.cmdline/doc/index.html#beat-cmdline-introduction).\n",
+    "\n",
+    "Let's start the beat.editor:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%bash --bg \n",
+    "beat editor start"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "A new application should open up showing this:\n",
+    "![Beat Editor](img/beat_editor_home.png)\n",
+    "\n",
+    "Keep this window open as we will use it to edit BEAT blocks during the tutorial.\n",
+    "\n",
+    "Now that you have your setup ready, let's go through our experiment.\n",
+    "At first, we will run the experiment **without BEAT** and then we\n",
+    "will see how we can run the same experiment on BEAT."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## The Iris LDA Experiment\n",
+    "\n",
+    "This tutorial will be based on the classic Iris LDA experiment. We'll start by explaining the problem, designing a solution, and seeing this solution in terms of BEAT. We'll then run the experiment, analyze it, and debug it. After that we start changing parts of the experiment and creating BEAT objects to represent our changes.\n",
+    "\n",
+    "### The Problem: What are we doing?\n",
+    "\n",
+    "Our task will be to discriminate between the 3 types of flowers in Fisher's Iris dataset using LDA. To keep it simple, we will just be discriminating setosa flower samples versus versicolor & virginica flower samples, giving us a 2-class problem.\n",
+    "\n",
+    "\n",
+    "![iris_flowers](img/iris_flowers.jpg)\n",
+    "\n",
+    "Each sample in Fisher's Iris dataset is 4 measurements from a flower:\n",
+    "\n",
+    "![iris_versicolor_measurements](img/iris_versicolor_measurements.png)\n",
+    "\n",
+    "The dataset therefore looks like the following:\n",
+    "\n",
+    "![iris_db_overview](img/iris_db_overview.png)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## The experiment using scikit-learn\n",
+    "Below, you can find the complete experiment (from database to the ROC plot) done using common Python libraries.\n",
+    "We will not explain the steps below, refer to scikit-learn's documentation for more information."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from sklearn.datasets import load_iris\n",
+    "from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\n",
+    "from sklearn.preprocessing import StandardScaler\n",
+    "from sklearn.model_selection import train_test_split\n",
+    "from sklearn.metrics import roc_curve\n",
+    "from sklearn.pipeline import make_pipeline\n",
+    "import matplotlib.pyplot as plt\n",
+    "import numpy as np"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# load the dataset\n",
+    "iris = load_iris()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print(iris.DESCR)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "X, y = iris.data, iris.target\n",
+    "y = np.clip(y, 0, 1) # this will convert our problem to a binary classification problem"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# split the data to train and test splits\n",
+    "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1/3, random_state=0, shuffle=True)\n",
+    "X_train.shape, X_test.shape, y_train.shape, y_test.shape"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# construct the classifier\n",
+    "clf = make_pipeline(StandardScaler(), LDA())\n",
+    "clf"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# train on the training data\n",
+    "clf.fit(X_train, y_train);"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# test on the test data\n",
+    "y_score = clf.decision_function(X_test)\n",
+    "y_score"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# plot the ROC curve\n",
+    "fpr, tpr , _ = roc_curve(y_test, y_score)\n",
+    "plt.plot(fpr, tpr)\n",
+    "plt.xlabel(\"FPR\")\n",
+    "plt.ylabel(\"TPR\");"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## The experiment using BEAT\n",
+    "Now, we will see how can this experiment be done in BEAT.\n",
+    "\n",
+    "### [Database](https://www.idiap.ch/software/beat/docs/beat/docs/stable/beat/databases.html)\n",
+    "The very first thing that we want to is to prepare this dataset for BEAT.\n",
+    "First, let's create an empty database (called ``myiris``) in our prefix:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!mkdir -pv prefix/databases/myiris/"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Let's create two `.csv` files that contain our training and test sets:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# we will stack X and y collumn wise and save them in a .csv file\n",
+    "options = dict(delimiter=\",\", fmt='%.1f')\n",
+    "np.savetxt(\"prefix/databases/myiris/train.csv\", np.hstack((X_train, y_train[:, None])), **options)\n",
+    "np.savetxt(\"prefix/databases/myiris/test.csv\", np.hstack((X_test, y_test[:, None])), **options)\n",
+    "# look into the saved file\n",
+    "!head prefix/databases/myiris/train.csv"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now we need a create a [View](https://www.idiap.ch/software/beat/docs/beat/docs/stable/beat.backend.python/doc/api.html#beat.backend.python.database.View) for each set of our database in save it in `prefix/databases/myiris/1.py`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%writefile prefix/databases/myiris/1.py\n",
+    "# you need to import all the packages that you are using\n",
+    "# this file will run outside this interactive shell once BEAT runs it\n",
+    "from beat.backend.python.database import View\n",
+    "from collections import namedtuple\n",
+    "import inspect\n",
+    "import numpy as np\n",
+    "import os\n",
+    "\n",
+    "# Each entry of our dataset looks like this:\n",
+    "Entry = namedtuple('Entry', ['features', 'label'])\n",
+    "\n",
+    "def csv_to_entries(filename):\n",
+    "    data = np.loadtxt(filename, delimiter=\",\")\n",
+    "    X, y = data[:, :-1], data[:, -1:]\n",
+    "    # print(X.shape, X.dtype, y.shape, y.dtype)\n",
+    "    # create an entry for each sample\n",
+    "    entries = [Entry(x1, bool(y1)) for x1, y1 in zip(X, y)]\n",
+    "    return entries\n",
+    "\n",
+    "class ViewTemplate(View):\n",
+    "    \"\"\"Outputs:\n",
+    "        - features: 1D float array (four values)\n",
+    "        - label: a boolean\n",
+    "    \"\"\"\n",
+    "    csv_file = None\n",
+    "\n",
+    "    # build the data for your view\n",
+    "    # split the raw data into (homogenous) bits and return a keyed iterable\n",
+    "    # (something with `.keys()` available to it, like a dict)\n",
+    "    # the key names must be the same as the output names for sets that use this view\n",
+    "    #    root_folder: the path to the root folder of the database's files (not always applicable)\n",
+    "    #    parameters: parameters passed to the view, defined in the metadata\n",
+    "    def index(self, root_folder, parameters):\n",
+    "\n",
+    "        # load the .csv file and return the entries\n",
+    "        return csv_to_entries(self.csv_file)\n",
+    "\n",
+    "    # returns a value at a specific index in the iterable for this view\n",
+    "    #   output: the specific output value requested\n",
+    "    #   index: the current index of the iterable\n",
+    "    def get(self, output, index):\n",
+    "        # to get the current object referenced by the given index:\n",
+    "        #       obj = self.objs[index]\n",
+    "        # note that this object is a named tuple, with fields equivalent to your keys from\n",
+    "        # the objects returned from the index function\n",
+    "        obj = self.objs[index]\n",
+    "\n",
+    "        # here, you would load the data from disk like images but our database is small and all\n",
+    "        # features are already in the view's index.\n",
+    "        if output == 'features':\n",
+    "            return {\n",
+    "                'value': obj.features\n",
+    "            }\n",
+    "        elif output == \"label\":\n",
+    "            return {\n",
+    "                'value': obj.label\n",
+    "            }\n",
+    "        else:\n",
+    "            raise ValueError(\"Unknown label\")\n",
+    "\n",
+    "# this path will point to this python file\n",
+    "# we will find the .csv files relative to this path\n",
+    "src_file_path = inspect.getfile(lambda: None)\n",
+    "\n",
+    "# Each set inherits from View\n",
+    "class Train(ViewTemplate):\n",
+    "    csv_file = os.path.join(os.path.dirname(src_file_path), \"train.csv\")\n",
+    "\n",
+    "class Test(ViewTemplate):\n",
+    "    csv_file = os.path.join(os.path.dirname(src_file_path), \"test.csv\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# run the saved file here so we can interact with it and test it\n",
+    "%run prefix/databases/myiris/1.py\n",
+    "train_view = Train()\n",
+    "# Internally BEAT will call .index method to understand how many samples exists in the database\n",
+    "objs = train_view.index(None, None)\n",
+    "objs[:4]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# when train_view.get is called inside BEAT, it will have an attribute .objs\n",
+    "# this will be a namedtuple of the entries that were returned during .index method.\n",
+    "# Note that .objs attribute will always be a namedtuple irrespective what you returned in .index method.\n",
+    "# Here we assign objs manually to test our interface\n",
+    "train_view.objs = objs\n",
+    "print(train_view.get(\"features\", 0))\n",
+    "print(train_view.get(\"label\", 0))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The reasone that we have to return values in dictionaries, like `{'value': True}`, is that these will be converted to BEAT [dataformats](https://www.idiap.ch/software/beat/docs/beat/docs/stable/beat/dataformats.html). In our dataset here we will use two dataformats: `system/array_1d_floats/1` and `system/boolean/1`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%bash\n",
+    "# BEAT can tell you what is available and where things are:\n",
+    "beat dataformats list\n",
+    "beat dataformats path system/boolean/1"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# look into one file\n",
+    "!cat prefix/dataformats/system/boolean/1.json"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Let's create the json description of our database and save it in `prefix/databases/myiris/1.json`.\n",
+    "The json file is very flexible and verbose but we will only explain a few things below using comments."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import json\n",
+    "metadata = {\n",
+    "    \"description\": \"The two class iris database\",\n",
+    "    # we don't need to use root_folder here because all the database is included\n",
+    "    # with our interface. For larger databases, you will need to use this option.\n",
+    "    \"root_folder\": \"/nowhere\",\n",
+    "    \"protocols\": [\n",
+    "        {\n",
+    "            # we just have one protocal called iris_two_class.\n",
+    "            \"name\": \"iris_two_class\",\n",
+    "            \"template\": \"iris_two_class\",\n",
+    "            \"sets\": [\n",
+    "                {\n",
+    "                    \"name\": \"train\",\n",
+    "                    \"outputs\": { # here we define the output format of the .get method.\n",
+    "                        \"features\": \"system/array_1d_floats/1\",\n",
+    "                        \"label\": \"system/boolean/1\",\n",
+    "                    },\n",
+    "                    \"parameters\": {},\n",
+    "                    \"template\": \"train\",\n",
+    "                    \"view\": \"Train\", # this maps to our class name in the Python file\n",
+    "                },\n",
+    "                {\n",
+    "                    \"name\": \"test\",\n",
+    "                    \"outputs\": {\n",
+    "                        \"features\": \"system/array_1d_floats/1\",\n",
+    "                        \"label\": \"system/boolean/1\",\n",
+    "                    },\n",
+    "                    \"parameters\": {},\n",
+    "                    \"template\": \"test\",\n",
+    "                    \"view\": \"Test\",\n",
+    "                },\n",
+    "            ],\n",
+    "        }\n",
+    "    ],\n",
+    "}\n",
+    "with open(\"prefix/databases/myiris/1.json\", \"w\") as f:\n",
+    "    json.dump(metadata, f)\n",
+    "# look at what was written\n",
+    "!cat prefix/databases/myiris/1.json"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%bash\n",
+    "# Now let's validate our database interface\n",
+    "beat databases check myiris/1\n",
+    "# index our database (this will be done by BEAT later if we don't do it here)\n",
+    "# This will also test our code inside BEAT\n",
+    "beat databases index myiris/1"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### [Toolchain](https://www.idiap.ch/software/beat/docs/beat/docs/stable/beat/toolchains.html)\n",
+    "\n",
+    "Designing the toolchain is probably the most difficult part of our experiment.\n",
+    "However, you can look at other people's toolchains for similar problems to get started.\n",
+    "\n",
+    "The main thing that you should know that training a model in BEAT and testing it is usually done\n",
+    "in two isolated algorithms. One algorithm trains the model and outputs it. Another algorithm\n",
+    "accepts the model as input and tests it on data. Our toolchain is going to look like this:\n",
+    "\n",
+    "![toolchain](img/toolchain_scale_and_lda.png)\n",
+    "\n",
+    "but we will not create the toolchain right now. What you should understand from this toolchian is that we have separated our scaler and lda estimators while in the scikit-learn code we used the `make_pipeline` function to create a composite estimator. In BEAT, you want to create algorithms that are modular as much as possible so they can be re-used in many different cases. Let's create the algorithms inside those blocks."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### [Algorithms](https://www.idiap.ch/software/beat/docs/beat/docs/stable/beat/algorithms.html)\n",
+    "\n",
+    "#### Scaler training algorithm\n",
+    "Let's start by creating the `train_scaler` algorithm. We will be using the *Sequential* algorithm API.\n",
+    "Switch to the beat.editor, go to `File->New->Algorithm`. You will be presented with bits to fill in for the JSON declaration of a BEAT algorithm. Edit the declaration so that your raw JSON looks like below:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%writefile prefix/algorithms/myusername/train_scaler/1.json\n",
+    "{\n",
+    "    \"api_version\": 2,\n",
+    "    \"description\": \"Trains and outputs a standard scaler\",\n",
+    "    \"groups\": [\n",
+    "        {\n",
+    "            \"inputs\": {\n",
+    "                \"features\": {\n",
+    "                    \"type\": \"system/array_1d_floats/1\"\n",
+    "                }\n",
+    "            },\n",
+    "            \"name\": \"main\",\n",
+    "            \"outputs\": {\n",
+    "                \"model\": {\n",
+    "                    \"type\": \"system/text/1\"\n",
+    "                }\n",
+    "            }\n",
+    "        }\n",
+    "    ],\n",
+    "    \"language\": \"python\",\n",
+    "    \"parameters\": {\n",
+    "        \"with_mean\": {\n",
+    "            \"default\": true,\n",
+    "            \"type\": \"bool\"\n",
+    "        },\n",
+    "        \"with_std\": {\n",
+    "            \"default\": true,\n",
+    "            \"type\": \"bool\"\n",
+    "        }\n",
+    "    },\n",
+    "    \"schema_version\": 2,\n",
+    "    \"splittable\": false,\n",
+    "    \"type\": \"sequential\"\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "##### **Python code**\n",
+    "Now click on edit->code (in the bottom right of the BEAT editor) and edit the Python code so it looks like below:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%writefile prefix/algorithms/myusername/train_scaler/1.py\n",
+    "import pickle\n",
+    "import base64\n",
+    "from sklearn.preprocessing import StandardScaler\n",
+    "\n",
+    "\n",
+    "class Algorithm:\n",
+    "    def setup(self, parameters):\n",
+    "        # Retrieve the value of the parameters\n",
+    "        self.estimator = StandardScaler(**parameters)\n",
+    "        return True\n",
+    "\n",
+    "    def __init__(self):\n",
+    "        self.features = []\n",
+    "\n",
+    "    def process(self, inputs, data_loaders, outputs):\n",
+    "        # accumulate features\n",
+    "        self.features.append(inputs[\"features\"].data.value)\n",
+    "\n",
+    "        if not (inputs[\"features\"].hasMoreData()):\n",
+    "            # train the estimator when all data is accumulated\n",
+    "            self.estimator.fit(self.features)\n",
+    "            # pickle and output estimator\n",
+    "            out = pickle.dumps(self.estimator)\n",
+    "            # stringify the output since BEAT does not yet support byte type as output\n",
+    "            out = base64.b64encode(out).decode(\"ascii\")\n",
+    "            outputs[\"model\"].write({\"text\": out})\n",
+    "            self.features = []\n",
+    "\n",
+    "        return True"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Run the command below to check the algorithm. This will check the validity of the `1.json` file."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!beat algorithm check myusername/train_scaler/1"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Scaler transforming algorithm\n",
+    "Now let's create the algorithm that loads this model and applies it on input data.\n",
+    "Unfortunately, because BEAT algorithms are driven using synchronization channels,\n",
+    "the algorithm in the two blocks of `scale_train_data` and `scale_test_data`, cannot\n",
+    "share the same implementation.\n",
+    "These two blocks, though their inputs look similar, their input channels are different.\n",
+    "The `scale_train_data` block, only gets data from the `train` channel while the\n",
+    "`scale_test_data` gets data from both the train and the test channel.\n",
+    "Hence, their implementations will be different. Again, you would use the BEAT editor\n",
+    "to create the algorithms below:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%writefile prefix/algorithms/myusername/transform_1d_features_train/1.json\n",
+    "{\n",
+    "    \"api_version\": 2,\n",
+    "    \"description\": \"Transforms 1D features using a transformer\",\n",
+    "    \"groups\": [\n",
+    "        {\n",
+    "            \"inputs\": {\n",
+    "                \"features\": {\n",
+    "                    \"type\": \"system/array_1d_floats/1\"\n",
+    "                },\n",
+    "                \"model\": {\n",
+    "                    \"type\": \"system/text/1\"\n",
+    "                }\n",
+    "            },\n",
+    "            \"name\": \"main\",\n",
+    "            \"outputs\": {\n",
+    "                \"transformed_features\": {\n",
+    "                    \"type\": \"system/array_1d_floats/1\"\n",
+    "                }\n",
+    "            }\n",
+    "        }\n",
+    "    ],\n",
+    "    \"language\": \"python\",\n",
+    "    \"schema_version\": 2,\n",
+    "    \"splittable\": false,\n",
+    "    \"type\": \"sequential\"\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%writefile prefix/algorithms/myusername/transform_1d_features_train/1.py\n",
+    "import pickle\n",
+    "import base64\n",
+    "\n",
+    "\n",
+    "def load_model(text):\n",
+    "    return pickle.loads(base64.b64decode(text))\n",
+    "\n",
+    "\n",
+    "class Algorithm:\n",
+    "    def __init__(self):\n",
+    "        self.transformer = None\n",
+    "\n",
+    "    def process(self, inputs, data_loaders, outputs):\n",
+    "\n",
+    "        if self.transformer is None:\n",
+    "            self.transformer = load_model(inputs[\"model\"].data.text)\n",
+    "            if inputs[\"model\"].hasMoreData():\n",
+    "                raise RuntimeError(\"Something is wrong!\")\n",
+    "\n",
+    "        # N.B.: this will be called for every unit in `features'\n",
+    "        out = self.transformer.transform([inputs[\"features\"].data.value])[0]\n",
+    "\n",
+    "        # Writes the output\n",
+    "        outputs[\"transformed_features\"].write({\"value\": out})\n",
+    "        return True"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!beat algorithm check myusername/transform_1d_features_train/1"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%writefile prefix/algorithms/myusername/transform_1d_features_test/1.json\n",
+    "{\n",
+    "    \"api_version\": 2,\n",
+    "    \"description\": \"Transforms 1D features using a transformer\",\n",
+    "    \"groups\": [\n",
+    "        {\n",
+    "            \"inputs\": {\n",
+    "                \"features\": {\n",
+    "                    \"type\": \"system/array_1d_floats/1\"\n",
+    "                }\n",
+    "            },\n",
+    "            \"name\": \"main\",\n",
+    "            \"outputs\": {\n",
+    "                \"transformed_features\": {\n",
+    "                    \"type\": \"system/array_1d_floats/1\"\n",
+    "                }\n",
+    "            }\n",
+    "        },\n",
+    "        {\n",
+    "            \"inputs\": {\n",
+    "                \"model\": {\n",
+    "                    \"type\": \"system/text/1\"\n",
+    "                }\n",
+    "            },\n",
+    "            \"name\": \"model\"\n",
+    "        }\n",
+    "    ],\n",
+    "    \"language\": \"python\",\n",
+    "    \"schema_version\": 2,\n",
+    "    \"splittable\": false,\n",
+    "    \"type\": \"sequential\"\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%writefile prefix/algorithms/myusername/transform_1d_features_test/1.py\n",
+    "import pickle\n",
+    "import base64\n",
+    "\n",
+    "\n",
+    "def load_model(text):\n",
+    "    return pickle.loads(base64.b64decode(text))\n",
+    "\n",
+    "\n",
+    "class Algorithm:\n",
+    "    def prepare(self, data_loaders):\n",
+    "\n",
+    "        # Loads the model at the beginning\n",
+    "        loader = data_loaders.loaderOf(\"model\")\n",
+    "        for i in range(loader.count()):\n",
+    "            view = loader.view(\"model\", i)\n",
+    "            data, _, _ = view[0]\n",
+    "            self.transformer = load_model(data[\"model\"].text)\n",
+    "            if i == 1:\n",
+    "                raise RuntimeError(\"Something is wrong!\")\n",
+    "        return True\n",
+    "\n",
+    "    def process(self, inputs, data_loaders, outputs):\n",
+    "        # N.B.: this will be called for every unit in `features'\n",
+    "        out = self.transformer.transform([inputs[\"features\"].data.value])[0]\n",
+    "\n",
+    "        # Writes the output\n",
+    "        outputs[\"transformed_features\"].write({\"value\": out})\n",
+    "        return True"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!beat algorithm check myusername/transform_1d_features_test/1"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### LDA training algorithm"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%writefile prefix/algorithms/myusername/train_lda/1.json\n",
+    "{\n",
+    "    \"api_version\": 2,\n",
+    "    \"description\": \"Trains and outputs an LDA estimator\",\n",
+    "    \"groups\": [\n",
+    "        {\n",
+    "            \"inputs\": {\n",
+    "                \"features\": {\n",
+    "                    \"type\": \"system/array_1d_floats/1\"\n",
+    "                },\n",
+    "                \"label\": {\n",
+    "                    \"type\": \"system/boolean/1\"\n",
+    "                }\n",
+    "            },\n",
+    "            \"name\": \"main\",\n",
+    "            \"outputs\": {\n",
+    "                \"model\": {\n",
+    "                    \"type\": \"system/text/1\"\n",
+    "                }\n",
+    "            }\n",
+    "        }\n",
+    "    ],\n",
+    "    \"language\": \"python\",\n",
+    "    \"parameters\": {\n",
+    "        \"n_components\": {\n",
+    "            \"default\": -1,\n",
+    "            \"type\": \"int64\"\n",
+    "        },\n",
+    "        \"shrinkage\": {\n",
+    "            \"default\": -1.0,\n",
+    "            \"type\": \"float64\"\n",
+    "        },\n",
+    "        \"solver\": {\n",
+    "            \"choice\": [\n",
+    "                \"svd\",\n",
+    "                \"lsqr\",\n",
+    "                \"eigen\"\n",
+    "            ],\n",
+    "            \"default\": \"svd\",\n",
+    "            \"type\": \"string\"\n",
+    "        },\n",
+    "        \"store_covariance\": {\n",
+    "            \"default\": false,\n",
+    "            \"type\": \"bool\"\n",
+    "        },\n",
+    "        \"tol\": {\n",
+    "            \"default\": 0.0001,\n",
+    "            \"type\": \"float64\"\n",
+    "        }\n",
+    "    },\n",
+    "    \"schema_version\": 2,\n",
+    "    \"splittable\": false,\n",
+    "    \"type\": \"sequential\"\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%writefile prefix/algorithms/myusername/train_lda/1.py\n",
+    "import pickle\n",
+    "import base64\n",
+    "from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\n",
+    "\n",
+    "\n",
+    "class Algorithm:\n",
+    "    def setup(self, parameters):\n",
+    "\n",
+    "        # Parameters in a BEAT algorithm can only have one type unlike Python parameters\n",
+    "        # Here we translate special integer values to needed Python parameters\n",
+    "        shrinkage = parameters.pop(\"shrinkage\")\n",
+    "        if shrinkage == -1:\n",
+    "            shrinkage = None\n",
+    "        elif shrinkage == 2:\n",
+    "            shrinkage = \"auto\"\n",
+    "\n",
+    "        n_components = parameters.pop(\"n_components\")\n",
+    "        if n_components == -1:\n",
+    "            n_components = None\n",
+    "\n",
+    "        self.estimator = LDA(\n",
+    "            shrinkage=shrinkage, n_components=n_components, **parameters\n",
+    "        )\n",
+    "        return True\n",
+    "\n",
+    "    def __init__(self):\n",
+    "        self.features = []\n",
+    "        self.labels = []\n",
+    "\n",
+    "    def process(self, inputs, data_loaders, outputs):\n",
+    "        # accumulate features and labels\n",
+    "        self.features.append(inputs[\"features\"].data.value)\n",
+    "        self.labels.append(inputs[\"label\"].data.value)\n",
+    "\n",
+    "        if not (inputs[\"features\"].hasMoreData()):\n",
+    "            # train the estimator when all data is accumulated\n",
+    "            self.estimator.fit(self.features, self.labels)\n",
+    "            # pickle and output estimator\n",
+    "            out = pickle.dumps(self.estimator)\n",
+    "            # \"stringify\"\n",
+    "            out = base64.b64encode(out).decode(\"ascii\")\n",
+    "            outputs[\"model\"].write({\"text\": out})\n",
+    "            self.features, self.labels = [], []\n",
+    "\n",
+    "        return True"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!beat algorithm check myusername/train_lda/1"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### LDA decision algorithm"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%writefile prefix/algorithms/myusername/decision_1d_features/1.json\n",
+    "{\n",
+    "    \"api_version\": 2,\n",
+    "    \"description\": \"Outputs a decision number for 1D features using an estimator\",\n",
+    "    \"groups\": [\n",
+    "        {\n",
+    "            \"inputs\": {\n",
+    "                \"features\": {\n",
+    "                    \"type\": \"system/array_1d_floats/1\"\n",
+    "                }\n",
+    "            },\n",
+    "            \"name\": \"main\",\n",
+    "            \"outputs\": {\n",
+    "                \"score\": {\n",
+    "                    \"type\": \"system/float/1\"\n",
+    "                }\n",
+    "            }\n",
+    "        },\n",
+    "        {\n",
+    "            \"inputs\": {\n",
+    "                \"model\": {\n",
+    "                    \"type\": \"system/text/1\"\n",
+    "                }\n",
+    "            },\n",
+    "            \"name\": \"model\"\n",
+    "        }\n",
+    "    ],\n",
+    "    \"language\": \"python\",\n",
+    "    \"schema_version\": 2,\n",
+    "    \"splittable\": false,\n",
+    "    \"type\": \"sequential\"\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%writefile prefix/algorithms/myusername/decision_1d_features/1.py\n",
+    "import pickle\n",
+    "import base64\n",
+    "\n",
+    "\n",
+    "def load_model(text):\n",
+    "    return pickle.loads(base64.b64decode(text))\n",
+    "\n",
+    "\n",
+    "class Algorithm:\n",
+    "    def prepare(self, data_loaders):\n",
+    "\n",
+    "        # Loads the model at the beginning\n",
+    "        loader = data_loaders.loaderOf(\"model\")\n",
+    "        for i in range(loader.count()):\n",
+    "            view = loader.view(\"model\", i)\n",
+    "            data, _, _ = view[0]\n",
+    "            self.transformer = load_model(data[\"model\"].text)\n",
+    "            if i == 1:\n",
+    "                raise RuntimeError(\"Something is wrong!\")\n",
+    "        return True\n",
+    "\n",
+    "    def process(self, inputs, data_loaders, outputs):\n",
+    "        # N.B.: this will be called for every unit in `features'\n",
+    "        out = self.transformer.decision_function([inputs[\"features\"].data.value])[0]\n",
+    "\n",
+    "        # Writes the output\n",
+    "        outputs[\"score\"].write({\"value\": out})\n",
+    "        return True"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!beat algorithm check myusername/decision_1d_features/1"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Analyzer"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%writefile prefix/algorithms/myusername/binary_classification_analyzer/1.json\n",
+    "{\n",
+    "    \"api_version\": 2,\n",
+    "    \"description\": \"Analyzes the results of a binary classification problem\",\n",
+    "    \"groups\": [\n",
+    "        {\n",
+    "            \"inputs\": {\n",
+    "                \"label\": {\n",
+    "                    \"type\": \"system/boolean/1\"\n",
+    "                },\n",
+    "                \"score\": {\n",
+    "                    \"type\": \"system/float/1\"\n",
+    "                }\n",
+    "            },\n",
+    "            \"name\": \"main\"\n",
+    "        }\n",
+    "    ],\n",
+    "    \"language\": \"python\",\n",
+    "    \"results\": {\n",
+    "        \"roc\": {\n",
+    "            \"display\": true,\n",
+    "            \"type\": \"plot/isoroc/1\"\n",
+    "        }\n",
+    "    },\n",
+    "    \"schema_version\": 2,\n",
+    "    \"type\": \"sequential\"\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%writefile prefix/algorithms/myusername/binary_classification_analyzer/1.py\n",
+    "import numpy as np\n",
+    "from sklearn.metrics import roc_curve\n",
+    "\n",
+    "\n",
+    "class Algorithm:\n",
+    "    def __init__(self):\n",
+    "        self.scores = []\n",
+    "        self.labels = []\n",
+    "\n",
+    "    def process(self, inputs, data_loaders, output):\n",
+    "        # accumulate features and labels\n",
+    "        self.scores.append(inputs[\"score\"].data.value)\n",
+    "        self.labels.append(inputs[\"label\"].data.value)\n",
+    "\n",
+    "        if not (inputs[\"score\"].hasMoreData()):\n",
+    "            y_true = np.asarray(self.labels)\n",
+    "            y_score = np.asarray(self.scores)\n",
+    "            fpr, tpr, _ = roc_curve(y_true, y_score)\n",
+    "            fnr = 1 - tpr\n",
+    "            number_of_positives = y_true.sum()\n",
+    "            number_of_negatives = len(y_true) - number_of_positives\n",
+    "            output.write(\n",
+    "                {\n",
+    "                    \"roc\": {\n",
+    "                        \"data\": [\n",
+    "                            {\n",
+    "                                \"false_negatives\": fnr,\n",
+    "                                \"false_positives\": fpr,\n",
+    "                                \"number_of_negatives\": np.uint64(number_of_positives),\n",
+    "                                \"number_of_positives\": np.uint64(number_of_negatives),\n",
+    "                                \"label\": \"roc\",\n",
+    "                            }\n",
+    "                        ]\n",
+    "                    }\n",
+    "                }\n",
+    "            )\n",
+    "            self.scores, self.labels = [], []\n",
+    "\n",
+    "        return True"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Toolchain\n",
+    "Go ahead and create the toolchain that we showed earlier using BEAT editor. Call the toolchain `scale_and_lda_binary_1d_features`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%writefile prefix/toolchains/myusername/scale_and_lda_binary_1d_features/1.json\n",
+    "{\n",
+    "    \"analyzers\": [\n",
+    "        {\n",
+    "            \"inputs\": [\n",
+    "                \"label\",\n",
+    "                \"score\"\n",
+    "            ],\n",
+    "            \"name\": \"roc_analyzer\",\n",
+    "            \"synchronized_channel\": \"test\"\n",
+    "        }\n",
+    "    ],\n",
+    "    \"blocks\": [\n",
+    "        {\n",
+    "            \"inputs\": [\n",
+    "                \"features\"\n",
+    "            ],\n",
+    "            \"name\": \"train_scaler\",\n",
+    "            \"outputs\": [\n",
+    "                \"model\"\n",
+    "            ],\n",
+    "            \"synchronized_channel\": \"train\"\n",
+    "        },\n",
+    "        {\n",
+    "            \"inputs\": [\n",
+    "                \"features\",\n",
+    "                \"label\"\n",
+    "            ],\n",
+    "            \"name\": \"train_lda\",\n",
+    "            \"outputs\": [\n",
+    "                \"model\"\n",
+    "            ],\n",
+    "            \"synchronized_channel\": \"train\"\n",
+    "        },\n",
+    "        {\n",
+    "            \"inputs\": [\n",
+    "                \"features\",\n",
+    "                \"model\"\n",
+    "            ],\n",
+    "            \"name\": \"scale_train_data\",\n",
+    "            \"outputs\": [\n",
+    "                \"transformed_features\"\n",
+    "            ],\n",
+    "            \"synchronized_channel\": \"train\"\n",
+    "        },\n",
+    "        {\n",
+    "            \"inputs\": [\n",
+    "                \"features\",\n",
+    "                \"model\"\n",
+    "            ],\n",
+    "            \"name\": \"scale_test_data\",\n",
+    "            \"outputs\": [\n",
+    "                \"transformed_features\"\n",
+    "            ],\n",
+    "            \"synchronized_channel\": \"test\"\n",
+    "        },\n",
+    "        {\n",
+    "            \"inputs\": [\n",
+    "                \"features\",\n",
+    "                \"model\"\n",
+    "            ],\n",
+    "            \"name\": \"score_with_lda\",\n",
+    "            \"outputs\": [\n",
+    "                \"score\"\n",
+    "            ],\n",
+    "            \"synchronized_channel\": \"test\"\n",
+    "        }\n",
+    "    ],\n",
+    "    \"connections\": [\n",
+    "        {\n",
+    "            \"channel\": \"train\",\n",
+    "            \"from\": \"train.features\",\n",
+    "            \"to\": \"train_scaler.features\"\n",
+    "        },\n",
+    "        {\n",
+    "            \"channel\": \"train\",\n",
+    "            \"from\": \"train.features\",\n",
+    "            \"to\": \"scale_train_data.features\"\n",
+    "        },\n",
+    "        {\n",
+    "            \"channel\": \"train\",\n",
+    "            \"from\": \"train_scaler.model\",\n",
+    "            \"to\": \"scale_train_data.model\"\n",
+    "        },\n",
+    "        {\n",
+    "            \"channel\": \"train\",\n",
+    "            \"from\": \"train_scaler.model\",\n",
+    "            \"to\": \"scale_test_data.model\"\n",
+    "        },\n",
+    "        {\n",
+    "            \"channel\": \"test\",\n",
+    "            \"from\": \"test.features\",\n",
+    "            \"to\": \"scale_test_data.features\"\n",
+    "        },\n",
+    "        {\n",
+    "            \"channel\": \"train\",\n",
+    "            \"from\": \"scale_train_data.transformed_features\",\n",
+    "            \"to\": \"train_lda.features\"\n",
+    "        },\n",
+    "        {\n",
+    "            \"channel\": \"train\",\n",
+    "            \"from\": \"train.label\",\n",
+    "            \"to\": \"train_lda.label\"\n",
+    "        },\n",
+    "        {\n",
+    "            \"channel\": \"test\",\n",
+    "            \"from\": \"test.label\",\n",
+    "            \"to\": \"roc_analyzer.label\"\n",
+    "        },\n",
+    "        {\n",
+    "            \"channel\": \"test\",\n",
+    "            \"from\": \"score_with_lda.score\",\n",
+    "            \"to\": \"roc_analyzer.score\"\n",
+    "        },\n",
+    "        {\n",
+    "            \"channel\": \"test\",\n",
+    "            \"from\": \"scale_test_data.transformed_features\",\n",
+    "            \"to\": \"score_with_lda.features\"\n",
+    "        },\n",
+    "        {\n",
+    "            \"channel\": \"train\",\n",
+    "            \"from\": \"train_lda.model\",\n",
+    "            \"to\": \"score_with_lda.model\"\n",
+    "        }\n",
+    "    ],\n",
+    "    \"datasets\": [\n",
+    "        {\n",
+    "            \"name\": \"test\",\n",
+    "            \"outputs\": [\n",
+    "                \"features\",\n",
+    "                \"label\"\n",
+    "            ]\n",
+    "        },\n",
+    "        {\n",
+    "            \"name\": \"train\",\n",
+    "            \"outputs\": [\n",
+    "                \"features\",\n",
+    "                \"label\"\n",
+    "            ]\n",
+    "        }\n",
+    "    ],\n",
+    "    \"description\": \"scales data and trains an LDA and tests it\",\n",
+    "    \"representation\": {\n",
+    "        \"blocks\": {\n",
+    "            \"roc_analyzer\": {\n",
+    "                \"col\": 306,\n",
+    "                \"height\": 128,\n",
+    "                \"row\": -105,\n",
+    "                \"width\": 122\n",
+    "            },\n",
+    "            \"scale_test_data\": {\n",
+    "                \"col\": -236,\n",
+    "                \"height\": 128,\n",
+    "                \"row\": -210,\n",
+    "                \"width\": 274\n",
+    "            },\n",
+    "            \"scale_train_data\": {\n",
+    "                \"col\": -243,\n",
+    "                \"height\": 128,\n",
+    "                \"row\": -383,\n",
+    "                \"width\": 274\n",
+    "            },\n",
+    "            \"score_with_lda\": {\n",
+    "                \"col\": 93,\n",
+    "                \"height\": 128,\n",
+    "                \"row\": -210,\n",
+    "                \"width\": 153\n",
+    "            },\n",
+    "            \"test\": {\n",
+    "                \"col\": -541,\n",
+    "                \"height\": 128,\n",
+    "                \"row\": -126,\n",
+    "                \"width\": 110\n",
+    "            },\n",
+    "            \"train\": {\n",
+    "                \"col\": -541,\n",
+    "                \"height\": 128,\n",
+    "                \"row\": -274,\n",
+    "                \"width\": 110\n",
+    "            },\n",
+    "            \"train_lda\": {\n",
+    "                \"col\": 80,\n",
+    "                \"height\": 128,\n",
+    "                \"row\": -382,\n",
+    "                \"width\": 157\n",
+    "            },\n",
+    "            \"train_scaler\": {\n",
+    "                \"col\": -441,\n",
+    "                \"height\": 98,\n",
+    "                \"row\": -424,\n",
+    "                \"width\": 157\n",
+    "            }\n",
+    "        },\n",
+    "        \"channel_colors\": {\n",
+    "            \"test\": \"#005500\",\n",
+    "            \"train\": \"#aa0000\"\n",
+    "        },\n",
+    "        \"connections\": {}\n",
+    "    }\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Experiment\n",
+    "Create a new experiment using BEAT editor and call it `iris_scale_lda_binary`. Use the toolchain that we just created in the experiment."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%writefile prefix/experiments/myusername/myusername/scale_and_lda_binary_1d_features/1/iris_scale_lda_binary.json\n",
+    "{\n",
+    "    \"analyzers\": {\n",
+    "        \"roc_analyzer\": {\n",
+    "            \"algorithm\": \"myusername/binary_classification_analyzer/1\",\n",
+    "            \"inputs\": {\n",
+    "                \"label\": \"label\",\n",
+    "                \"score\": \"score\"\n",
+    "            }\n",
+    "        }\n",
+    "    },\n",
+    "    \"blocks\": {\n",
+    "        \"scale_test_data\": {\n",
+    "            \"algorithm\": \"myusername/transform_1d_features_test/1\",\n",
+    "            \"inputs\": {\n",
+    "                \"features\": \"features\",\n",
+    "                \"model\": \"model\"\n",
+    "            },\n",
+    "            \"outputs\": {\n",
+    "                \"transformed_features\": \"transformed_features\"\n",
+    "            }\n",
+    "        },\n",
+    "        \"scale_train_data\": {\n",
+    "            \"algorithm\": \"myusername/transform_1d_features_train/1\",\n",
+    "            \"inputs\": {\n",
+    "                \"features\": \"features\",\n",
+    "                \"model\": \"model\"\n",
+    "            },\n",
+    "            \"outputs\": {\n",
+    "                \"transformed_features\": \"transformed_features\"\n",
+    "            }\n",
+    "        },\n",
+    "        \"score_with_lda\": {\n",
+    "            \"algorithm\": \"myusername/decision_1d_features/1\",\n",
+    "            \"inputs\": {\n",
+    "                \"features\": \"features\",\n",
+    "                \"model\": \"model\"\n",
+    "            },\n",
+    "            \"outputs\": {\n",
+    "                \"score\": \"score\"\n",
+    "            }\n",
+    "        },\n",
+    "        \"train_lda\": {\n",
+    "            \"algorithm\": \"myusername/train_lda/1\",\n",
+    "            \"inputs\": {\n",
+    "                \"features\": \"features\",\n",
+    "                \"label\": \"label\"\n",
+    "            },\n",
+    "            \"outputs\": {\n",
+    "                \"model\": \"model\"\n",
+    "            }\n",
+    "        },\n",
+    "        \"train_scaler\": {\n",
+    "            \"algorithm\": \"myusername/train_scaler/1\",\n",
+    "            \"inputs\": {\n",
+    "                \"features\": \"features\"\n",
+    "            },\n",
+    "            \"outputs\": {\n",
+    "                \"model\": \"model\"\n",
+    "            }\n",
+    "        }\n",
+    "    },\n",
+    "    \"datasets\": {\n",
+    "        \"test\": {\n",
+    "            \"database\": \"myiris/1\",\n",
+    "            \"protocol\": \"iris_two_class\",\n",
+    "            \"set\": \"test\"\n",
+    "        },\n",
+    "        \"train\": {\n",
+    "            \"database\": \"myiris/1\",\n",
+    "            \"protocol\": \"iris_two_class\",\n",
+    "            \"set\": \"train\"\n",
+    "        }\n",
+    "    },\n",
+    "    \"description\": \"An experiment to test LDA on Iris dataset\",\n",
+    "    \"globals\": {\n",
+    "        \"environment\": {\n",
+    "            \"name\": \"dummy\",\n",
+    "            \"version\": \"0.0.0\"\n",
+    "        },\n",
+    "        \"myusername/train_lda/1\": {\n",
+    "            \"n_components\": -1,\n",
+    "            \"shrinkage\": -1.0,\n",
+    "            \"solver\": \"svd\",\n",
+    "            \"store_covariance\": false,\n",
+    "            \"tol\": 0.0001\n",
+    "        },\n",
+    "        \"myusername/train_scaler/1\": {\n",
+    "            \"with_mean\": true,\n",
+    "            \"with_std\": true\n",
+    "        },\n",
+    "        \"queue\": \"queue\"\n",
+    "    },\n",
+    "    \"schema_version\": 1\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Execution\n",
+    "Finally, we are ready to run our experiment:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!beat exp run myusername/myusername/scale_and_lda_binary_1d_features/1/iris_scale_lda_binary"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# get the plots of the experiment\n",
+    "!beat exp plot myusername/myusername/scale_and_lda_binary_1d_features/1/iris_scale_lda_binary"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# look into the saved plot\n",
+    "plt.figure(figsize=(20,20))\n",
+    "plt.imshow(plt.imread(\"prefix/myusername_myusername_scale_and_lda_binary_1d_features_1_iris_scale_lda_binary_roc.png\"));"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.7.7"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}