diff --git a/.gitignore b/.gitignore
index 367b4086b41407632965200667743ee0e593f056..9e2e4b7212f6034228cbf6e1a7fe1415067a6478 100644
--- a/.gitignore
+++ b/.gitignore
@@ -18,7 +18,8 @@ sphinx/
 .DS_Store
 beat/web/settings/settings.py
 src/
-web_dynamic_data*
+prefix
+test_prefix
 .nfs*
 beat/web/static/
 doc/api/api/
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index fca859d154fcec160124aa2c6d25daf96f6e4b8d..41bead64fb43b4e37a5d2bafd862c2623f360554 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -6,15 +6,20 @@ py27-linux:
   - ./bin/buildout
   - ./bin/python --version
   - unset TMPDIR
-  - export NOSE_WITH_COVERAGE=1
-  - export NOSE_COVER_PACKAGE=beat.web
+  - cd src/cpulimit && make && cd -
+  - cd bin && ln -s ../src/cpulimit/src/cpulimit . && cd -
   - export COVERAGE_FILE=.coverage.django
-  - ./bin/django test --settings=beat.web.settings.test -v 2
+  - export BEAT_TEST_PREFIX=/var/tmp/test_prefix
+  - rm -rf $BEAT_TEST_PREFIX
+  - ./bin/coverage run --source='./beat/web' ./bin/django test --settings=beat.web.settings.test -v 2
   - export BEAT_CMDLINE_TEST_PLATFORM=django://beat.web.settings.test
   - export COVERAGE_FILE=.coverage.cmdline
+  - export NOSE_WITH_COVERAGE=1
+  - export NOSE_COVER_PACKAGE=beat.web
   - ./bin/nosetests -sv beat.cmdline
   - unset COVERAGE_FILE
-  - ./bin/coverage combine
+  - unset BEAT_TEST_PREFIX
+  - ./bin/coverage combine .coverage.django .coverage.cmdline
   - ./bin/coverage report
   - ./bin/sphinx-apidoc --separate -d 2 --output=doc/api/api beat beat/web/*/migrations beat/web/*/tests
   - ./bin/sphinx-build doc/api html/api
@@ -30,15 +35,18 @@ py27-macosx:
   - /Users/buildbot/work/environments/beat/py27/bin/python bootstrap-buildout.py --setuptools-version=`/Users/buildbot/work/environments/beat/py27/bin/python -c 'import setuptools; print(setuptools.__version__)'`
   - ./bin/buildout
   - ./bin/python --version
-  - export NOSE_WITH_COVERAGE=1
-  - export NOSE_COVER_PACKAGE=beat.web
+  - cd src/cpulimit && make && cd -
+  - cd bin && ln -s ../src/cpulimit/src/cpulimit . && cd -
   - export COVERAGE_FILE=.coverage.django
-  - ./bin/django test --settings=beat.web.settings.test -v 2
+  - rm -rf ./test_prefix
+  - ./bin/coverage run --source='./beat/web' ./bin/django test --settings=beat.web.settings.test -v 2
   - export BEAT_CMDLINE_TEST_PLATFORM=django://beat.web.settings.test
   - export COVERAGE_FILE=.coverage.cmdline
+  - export NOSE_WITH_COVERAGE=1
+  - export NOSE_COVER_PACKAGE=beat.web
   - ./bin/nosetests -sv beat.cmdline
   - unset COVERAGE_FILE
-  - ./bin/coverage combine
+  - ./bin/coverage combine .coverage.django .coverage.cmdline
   - ./bin/coverage report
   - ./bin/sphinx-apidoc --separate -d 2 --output=doc/api/api beat beat/web/*/migrations beat/web/*/tests
   - ./bin/sphinx-build doc/api html/api
diff --git a/MANIFEST.in b/MANIFEST.in
index 1171d23cad68cee555447aff5ac66b6257031fab..01085dd22e1ae11d113ac4c4ac015f0bb88ac503 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,4 @@
 include LICENSE.AGPL README.rst bootstrap-buildout.py buildout.cfg
 recursive-include beat/web *.css *.png *.gif *.svg *.ico *.js *.html *.rst *.txt
 recursive-include doc conf.py *.rst *.png *.svg *.ico *.pdf
+recursive-include beat/web/backend/environments/default/bin execute describe
diff --git a/README.rst b/README.rst
index 404019df02fbdb94791db0f03f748ef6c4ba71b6..eb95b9f621291b0b4cd0b3d1fbc046067e3fc0b8 100644
--- a/README.rst
+++ b/README.rst
@@ -25,8 +25,12 @@
  Biometrics Evaluation and Testing Platform
 ============================================
 
-This package contains the source code for the web components of the BEAT
-platform.
+The BEAT platform is written as a set of python packages. This package
+(beat.web), in particular, constitutes the central deployment pillar of BEAT
+platform instance. It uses as a base development library, a web framework
+called Django_. If you are unfamiliar with this framework, but wishes to deploy
+or develop the BEAT platform, it is recommended you familiarize yourself with
+it.
 
 
 Installation
@@ -52,388 +56,15 @@ get you a fully operational test and development environment.
   machinery.
 
 
-.. tip::
-
-  If you'd like to **speed-up** the installation, it is strongly advised you
-  prepare a preset virtual environment (see the virtualenv_ package) with all
-  required dependencies, so that ``./bin/buildout`` does not download and
-  installs all of them every time you cleanup. This technique should allow you
-  to quickly clean-up and re-start your working environment which is useful
-  during development.
-
-  In order to fetch currently needed dependencies, run::
-
-    $ ./bin/buildout #to setup once
-    $ ./bin/pip freeze > requirements.txt
-
-  Examine the file ``requirements.txt`` and remove packages you are either
-  developing locally (e.g., all that are under ``src``) or that you think you
-  don't need. The command ``pip freeze`` reports all installed packages and not
-  only those which are needed by your project. If the Python prompt you used
-  for bootstrapping already had a good set of packages installed, you may see
-  them there.
-
-  Once you have a satisfying ``requirements.txt`` file, you may proceed to
-  recreate a virtualenv_ you'll use for your development. Just call::
-
-    $ virtualenv ~/work/beat-env #--system-site-packages
-
-  To create the virtual environment. This new environment does not contain
-  system packages by default. You may override that by specifying
-  ``--system-site-packages`` as suggested above. Then, install the required
-  packages on your new virtual environment::
-
-    $ ~/work/beat-env/bin/pip install -r requirements.txt
-
-  After that step is done, your virtual environment is ready for deployment.
-  You may now start from scratch to develop ``beat.web`` taking as base the
-  Python interpreter on your virtualenv_::
-
-    $ cd beat.web
-    $ git clean -fdx #full clean-up
-    $ ~/work/beat-env/bin/python bootstrap-buildout.py
-    $ ./bin/buildout
-
-  You'll realize the buildout step now takes considerably less time and you may
-  repeat this last step as much as needed. ``pip`` is a very flexible tool and
-  you may use it to manage the virtualenv_ installing and removing packages as
-  needed.
-
-
 Documentation
 -------------
 
-Our documentation project is divided in 3 parts. The user guide is the only one
-which is automatically built as part of the ``buildout`` procedure. The API and
-administrators guide needs to be manually compiled if required.
-
-To build the API documentation, just do::
-
-  $ ./bin/sphinx-apidoc --separate -d 2 --output=doc/api/api beat beat/web/*/migrations beat/web/*/tests
-  $ ./bin/sphinx-build doc/api html/api
-
-
 To build the administrator guide, just do::
 
   $ ./bin/sphinx-build doc/admin html/admin
 
-
-The above commands will build the stated guides, in HTML format, and dump
-results into your local directory ``html``. You may navigate then to that
-directory and, with your preferred web browser, open the file ``index.html`` to
-browse the available documentation.
-
-The basic user guide which includes information for users of the platform, is
-built automatically upon ``buildout``. If you wish to build it and place it
-alongside the other guides, you may do it as well like this::
-
-  $ ./bin/sphinx-build doc/user html/user
-
-
-Instantiating a BEAT web server
--------------------------------
-
-For a simple (development) web server, the default settings on
-``beat/web/settings/settings.py`` should work out of the box. These settings:
-
-  * Instantiate the web service on the local host under port 8000 (the address
-    will be ``http://127.0.0.1:8000``
-  * Use an SQLITE3 database named ``django.sql3`` located on the current
-    working directory
-  * Run with full debug output
-  * It sets the working BEAT prefix to ``./prefix``
-  * A single user, called ``admin`` will be setup into the system
-
-If you need to tweak these settings, just edit the file
-``beat/web/settings/settings.py``. You may consult the `Django documentation`_
-for detailed information on other settings.
-
-Once the Django settings are in place, you can run a single command to fully
-populate a development webserver::
-
-  $ ./bin/django install -v1
-
-.. note::
-
-   Concerning databases installed by this command, we only explain the platform
-   how to **access** their data. It does not download the raw data for the
-   databases that you must procure yourself through the relevant web sites
-   (checkout the database pages on the Idiap instance of the BEAT platform for
-   details).
-
-.. note::
-
-  If you need to specify your own path to the directories containing the
-  databases, you could just create a simple JSON file as follows::
-
-    {
-      "atnt/1": "/remote/databases/atnt",
-      "banca/2": "/remote/databases/banca"
-    }
-
-  Then just use the previous script with the option ``--database-root-file``::
-
-    $ ./bin/django install -v1 --database-root-file=MYFILE.json
-
-  By default, paths to the root of all databases are set to match the Idiap
-  Research Institute filesystem organisation.
-
-.. note::
-
-  For every installed database, you'll need to generate their data indices,
-  which allows the platform to correctly parallelize algorithms. To do so, for
-  every combination of database and version you wish to support, run the
-  following command::
-
-    $ ./bin/beat -p web_dynamic_data db index <name>/<version>
-
-  Replacing the strings ``<name>`` by the name of the database you wish to dump
-  the indices for, together with the version in ``<version>``. For example, to
-  dump the indices for the AT&T database, version 1, do the following::
-
-    $ ./bin/beat -p web_dynamic_data db index atnt/1
-
-Once the contributions and users are in place, you're ready to start the test
-server::
-
-  $ ./bin/django runserver -v3
-
-At this point, your platform can be accessed by typing the URL
-``http://127.0.0.1:8000`` in a web browser on the machine the server is
-running.
-
-.. _localhost:
-
-Localhost
----------
-
-To effectively use your new server and test all aspects of it, you'll also need
-a scheduler with at least one attached worker that can execute experiments. For
-most development purposes, a simple 3-node system, with all components running
-on the current (local) host is sufficient.
-
-Here is a recipe to start a simple 3-node system in which the local worker uses
-the system-wide installed Python interpreter to execute the algorithms.
-
-First, make sure the program ``cpulimit`` is available on your system. The BEAT
-platform uses this program to control slot usage on the scheduling/worker
-level::
-
-  $ cpulimit -h
-
-If that is not the case, then you need to install it. Either install a package
-that is native to your system (e.g. on Debian or Ubuntu platforms) or compile
-the checked-out version available at ``src/cpulimit``::
-
-  $ cd src/cpulimit;
-  $ make
-  $ ./src/cpulimit -h #to test it
-  $ cd ../../bin #go back to the root of beat.web and the into the `bin' dir
-  $ ln -s ../src/cpulimit/src/cpulimit
-  $ cd .. #go back to the root of beat.web
-
-Now start the localhost system::
-
-  $ ./bin/localhost.py -v
-  ...
-
-You may inspect this programs help message for details on its usage and
-options.
-
-Once the localhost system is started and the scheduler is properly configured,
-you may open a browser window to your `localhost, port 8000
-<http://127.0.0.1:8000>`_, to get started with your locally installed platform.
-
-
-Localhost with ``DEBUG=False``
-==============================
-
-If you need to test the RESTful API, it is better to do it without Django
-throwing you HTML error pages. For that, you'll need to start the Django
-development server with slightly different settings::
-
-  $ ./bin/localhost.py -v --settings=beat.web.settings.nodebug
-
-
-Triggering a Scheduler Reconfiguration
-======================================
-
-If you modify the queue configuration on the Django administrative panel,
-you'll need to notify the scheduler of those changes. You can trigger a
-scheduler (hot) re-configuration using the following command-line program::
-
-  $ ./bin/django qconf
-
-.. note::
-
-   Optionally, you may also visit `your local scheduler page
-   <http://127.0.0.1:8000/backend/scheduler>`, and hit the (green) button that
-   says "Send configuration to Scheduler". It has the same effect.
-
-
-Unit Testing
-------------
-
-After installation, it is possible to run our suite of unit tests. To do so,
-use::
-
-  $ ./bin/django test --settings=beat.web.settings.test -v 2
-
-You may pass filtering criteria to just launch tests for a particular set of
-``beat.web`` applications. For example, to run tests only concerning
-``beat.web.toolchains``, run::
-
-  $ ./bin/django test --settings=beat.web.settings.test -v 2 beat.web.toolchains.tests
-
-To measure coverage, you must set an environment variable for nose::
-
-  $ NOSE_WITH_COVERAGE=1 NOSE_COVER_PACKAGE=beat.web ./bin/django test --settings=beat.web.settings.test -v 2
-
-.. _snapshot:
-
-Local Development Server
-------------------------
-
-It is easy to quickly setup a local system for development, taking as base the
-current state of a production system.
-
-
-1. Before starting, make sure you have gone through, at least once, over the
-   localhost_ instructions above. It explains the very basic setup required for
-   a complete development environment.
-
-
-2. Dump and back-up your current **production** BEAT database::
-
-     [production]$ ./bin/django backup
-
-
-3. [Optional] If you have made important modifications between the contents
-   available at your production server and your currently checked-out source,
-   you'll need to run Django migrations on data imported from the production
-   server. If you need to do this, make sure you don't have unapplied commits
-   to your local **development** package and reset it to the production tag::
-
-     [development]$ git checkout <production-tag>
-
-   .. note::
-
-      You can figure you the production tag by looking at the footer of the
-      BEAT website. The corresponding tag name is found by prefixing a ``v``
-      before the version number. For example, the tag for version ``0.8.2`` of
-      the platform is ``v0.8.2``.
-
-
-   Also make sure to revert all dependent packages, so as to recreate the state
-   of the database schema as on the production site.
-
-
-4. Remove the current local development database so that the restore operation
-   can start from scratch::
-
-     [development]$ rm -rf django.sql3 web_dynamic_data
-
-
-5. Copy the backup tarball from the production server and restore it locally::
-
-     [development]$ scp root@beatweb:backups/<backup-filename>.tar.bz2
-     [development]$ ./bin/django restore <backup-filename>.tar.bz2
-
-   At this point, you have recreated a copy of your production system locally,
-   on your SQLite3 database.
-
-
-6. Reset queue configuration to allow for local running.
-
-   You may, optionally, reset the queue configuration of your installation so
-   that the environment you have is compatible with your development machine,
-   so that you can immediately run experiments locally. To do so, use the
-   ``qsetup`` Django command::
-
-     [development]$ ./bin/django qsetup --reset
-
-
-7. Apply migrations::
-
-   $ ./bin/django migrate
-
-
-At this point, you should have a complete development setup with all elements
-available on the production system installed locally. This system is fully
-capable of running experiments locally using your machine. Start a full system
-using ``localhost.py`` as explained on the localhost_ section above.
-
-
-Testing Django Migrations
--------------------------
-
-Django migrations, introduced in version 1.7, is a useful feature for
-automatically migrating your database to new model schemas, if you get it
-right. Here is a recipe to make sure your migrations will work on your
-production system, allowing for quick and repetitive test/fix cycles.
-
-The key idea is that we follow the setup for the snapshot_ and then,
-locally backup our database and prefix so that we can quickly reproduce the
-migration test loop.
-
-
-1. Make sure you go through the snapshot_ instructions above (**up to
-   step 6 only**).
-
-
-2. Make a copy of the SQLite3 database::
-
-     $ cp -a django.sql3 django.sql3.backup
-
-   This backup will allow you to quickly test the migrations w/o having to
-   checkout the production version anymore.
-
-   Also, create a temporary git repository of ``web_dynamic_data``, so you can
-   cross-check changes and reset it in case of problems::
-
-     $ cd web_dynamic_data
-     $ git init .
-     $ git add .
-     $ git commit -m "Initial commit"
-     $ cd ..
-
-
-3. Go back to the HEAD or branch you were developping before::
-
-     $ git checkout HEAD
-
-
-4. Here is how to test/fix your migrations:
-
-   a. Run "django migrate"::
-
-        $ ./bin/django migrate
-
-   b. Check your database by visually inspecting it on the django web admin or
-      by manually dumping it.
-
-   c. If a problem is detected, fix it and revert the state::
-
-        $ cp -af django.sql3.backup django.sql3
-        $ cd web_dynamic_data && git reset --hard HEAD && git clean -fdx . & cd ..
-
-      .. note::
-
-         Tip: Write the above lines in a shell script so it is easy to repeat.
-
-      Go back to a. and restart.
-
-
-Javascript Management with Node.js/Bower
-----------------------------------------
-
-We manage javascript external packages with the help of Bower_. If you'd like
-to include more packages that will statically served with the Django web app,
-please consider including them at the appropriate section of ``buildout.cfg``.
-
-The included recipes will also download and install executables for
-``uglifyjs``, ``grunt``, ``csslint`` and ``jshint``, which can be useful for JS
-development.
+Once the guide is built, continue reading on the "Installation" section for
+further deployment instructions.
 
 
 Issues
@@ -443,13 +74,9 @@ If you find problems concerning this package, please post a message to our
 `group mailing list`_. Currently open issues can be tracked at `our gitlab
 page`_.
 
+
 .. Place here references to all citations in lower case
 
-.. _django documentation: https://docs.djangoproject.com
-.. _pip: http://pypi.python.org/pypi/pip
-.. _easy_install: http://pypi.python.org/pypi/setuptools
-.. _zc.buildout: http://pypi.python.org/pypi/zc.buildout
-.. _virtualenv: http://pypi.python.org/pypi/virtualenv
+.. _django: https://www.djangoproject.com/
 .. _group mailing list: https://groups.google.com/d/forum/beat-devel
-.. _our gitlab page: https://gitlab.idiap.ch/biometric/beat.web/issues
-.. _bower: http://bower.io
+.. _our gitlab page: https://gitlab.idiap.ch/beat/beat.web/issues
diff --git a/beat/web/accounts/apps.py b/beat/web/accounts/apps.py
index 4f87ff979e400842728fbd30a959b01fabc7795c..537ec0b2830d57b364f98c3cfa18767f0c4e07b3 100644
--- a/beat/web/accounts/apps.py
+++ b/beat/web/accounts/apps.py
@@ -27,8 +27,6 @@
 
 from django.apps import AppConfig
 from django.utils.translation import ugettext_lazy as _
-from django.contrib.auth.models import User
-from actstream import registry
 
 class AccountsConfig(AppConfig):
 
@@ -36,4 +34,8 @@ class AccountsConfig(AppConfig):
     verbose_name = _('Accounts')
 
     def ready(self):
+        super(AccountsConfig, self).ready()
+        from django.contrib.auth.models import User
+        from actstream import registry
         registry.register(User)
+        from .signals import setup_user
diff --git a/beat/web/backend/signals.py b/beat/web/accounts/signals.py
similarity index 98%
rename from beat/web/backend/signals.py
rename to beat/web/accounts/signals.py
index a718679e6f893bad698528e7442554861be50134..828afcbf0d39ecbec7b8a5b14958426e244e768f 100644
--- a/beat/web/backend/signals.py
+++ b/beat/web/accounts/signals.py
@@ -35,7 +35,7 @@ from django.conf import settings
 
 from rest_framework.authtoken.models import Token
 
-from ..accounts.models import AccountSettings
+from .models import AccountSettings
 
 
 @receiver(post_save, sender=User)
diff --git a/beat/web/accounts/urls.py b/beat/web/accounts/urls.py
index fb476580afdf47ecc6aa1d0d05bb4a7322d9f8bd..dd7c08297f36c58639736e9ab65e09004c900c4d 100644
--- a/beat/web/accounts/urls.py
+++ b/beat/web/accounts/urls.py
@@ -28,8 +28,6 @@
 from django.conf.urls import patterns, url
 from . import views
 
-urlpatterns = patterns('',
-    url(r'^settings/$',
-        views.account_settings,
-        name='settings')
-)
+urlpatterns = [
+    url(r'^settings/$', views.account_settings, name='settings'),
+]
diff --git a/beat/web/algorithms/apps.py b/beat/web/algorithms/apps.py
index d0ee608006b0ab43204eda3e7080045f399e6c31..3f57549932aa9c67c5dfbe092b62d609393df449 100644
--- a/beat/web/algorithms/apps.py
+++ b/beat/web/algorithms/apps.py
@@ -27,7 +27,6 @@
 
 from ..common.apps import CommonAppConfig
 from django.utils.translation import ugettext_lazy as _
-from actstream import registry
 
 class AlgorithmsConfig(CommonAppConfig):
     name = 'beat.web.algorithms'
@@ -39,4 +38,5 @@ class AlgorithmsConfig(CommonAppConfig):
         from .signals import (create_endpoints, delete_endpoints,
             auto_delete_file_on_delete, auto_delete_file_on_change)
 
+        from actstream import registry
         registry.register(self.get_model('Algorithm'))
diff --git a/beat/web/algorithms/models.py b/beat/web/algorithms/models.py
index c5a8993a8b6afab5b00f375dfe512e7ec2656bc2..28864f02222654f25b90b1e62861c7af1f0b67a4 100644
--- a/beat/web/algorithms/models.py
+++ b/beat/web/algorithms/models.py
@@ -35,8 +35,6 @@ import beat.core.library
 
 from beat.core.utils import NumpyJSONEncoder
 
-from ..backend.models import Environment
-
 from ..dataformats.models import DataFormat
 
 from ..common.storage import OverwriteStorage
@@ -190,6 +188,8 @@ class Algorithm(Code):
 
         from ..experiments.models import Block
         from ..experiments.models import Experiment
+        from ..backend.models import Environment
+
 
         # Tries to figure through a maximum if an algorithm has been
         # successfuly used inside an environment.
diff --git a/beat/web/algorithms/tests/core.py b/beat/web/algorithms/tests/core.py
index 151fa20a9d8b05cfaa5f1ec5428322845474d17c..2f3c375bba9fa3262a42a298b7c97a599bf3382a 100644
--- a/beat/web/algorithms/tests/core.py
+++ b/beat/web/algorithms/tests/core.py
@@ -34,10 +34,9 @@ import simplejson as json
 from django.contrib.auth.models import User
 from django.conf import settings
 
-from beat.web.dataformats.models import DataFormat
-
-from beat.web.common.testutils import BaseTestCase
-from beat.web.team.models import Team
+from ...dataformats.models import DataFormat
+from ...common.testutils import BaseTestCase, tearDownModule
+from ...team.models import Team
 
 from ..models import Algorithm
 
diff --git a/beat/web/algorithms/tests/tests.py b/beat/web/algorithms/tests/tests.py
index 634e2ef8af79898fffe7cf4a11a4f147881c85bc..3b0edf3cb1bc045150b952c835dc377fdf60a265 100644
--- a/beat/web/algorithms/tests/tests.py
+++ b/beat/web/algorithms/tests/tests.py
@@ -27,7 +27,8 @@
 
 from django.contrib.auth.models import User
 
-from beat.web.libraries.models import Library
+from ...libraries.models import Library
+from ...common.testutils import tearDownModule
 
 from ..models import Algorithm
 
diff --git a/beat/web/algorithms/tests/tests_api.py b/beat/web/algorithms/tests/tests_api.py
index ceccce34ece9edaf30fbba35c9d609fe4b0ea8b4..749fd77b85519cc9d8327615dd02e9e0b6341fde 100644
--- a/beat/web/algorithms/tests/tests_api.py
+++ b/beat/web/algorithms/tests/tests_api.py
@@ -32,7 +32,8 @@ from django.contrib.auth.models import User
 from django.conf import settings
 from django.core.urlresolvers import reverse
 
-from beat.web.dataformats.models import DataFormat
+from ...dataformats.models import DataFormat
+from ...common.testutils import tearDownModule
 
 import beat.core.algorithm
 
diff --git a/beat/web/algorithms/tests/tests_team.py b/beat/web/algorithms/tests/tests_team.py
index 68ef3b7167ca37faf0f0105dc19165b993c9d44d..173946850bd646eee9929ddee635b0aa8088c3f3 100644
--- a/beat/web/algorithms/tests/tests_team.py
+++ b/beat/web/algorithms/tests/tests_team.py
@@ -27,8 +27,9 @@
 
 from django.contrib.auth.models import User
 
-from beat.web.team.models import Team
-from beat.web.dataformats.models import DataFormat
+from ...team.models import Team
+from ...dataformats.models import DataFormat
+from ...common.testutils import tearDownModule
 
 from ..models import Algorithm
 
diff --git a/beat/web/algorithms/tests/tests_user.py b/beat/web/algorithms/tests/tests_user.py
index 71114d37e907b1ecc862414ea67c35caf7040d76..5c02aef9b2a2f0e05fe01e573ed1f4bedc0a9036 100644
--- a/beat/web/algorithms/tests/tests_user.py
+++ b/beat/web/algorithms/tests/tests_user.py
@@ -29,6 +29,7 @@
 from ..models import Algorithm
 
 from .core import AlgorithmsAccessibilityFunctionsBase
+from ...common.testutils import tearDownModule
 
 class NotSharedAlgorithm_CheckAccessibilityFunction(AlgorithmsAccessibilityFunctionsBase):
 
diff --git a/beat/web/attestations/apps.py b/beat/web/attestations/apps.py
index a0a13e4352ba6d914efb7145dda191e5212dbafe..fd30640f9db8ca6069e12ddf38eda40f82bf05f5 100644
--- a/beat/web/attestations/apps.py
+++ b/beat/web/attestations/apps.py
@@ -27,7 +27,6 @@
 
 from ..common.apps import CommonAppConfig
 from django.utils.translation import ugettext_lazy as _
-from actstream import registry
 
 class AttestationsConfig(CommonAppConfig):
     name = 'beat.web.attestations'
@@ -35,7 +34,6 @@ class AttestationsConfig(CommonAppConfig):
 
     def ready(self):
         super(AttestationsConfig, self).ready()
-
         from .signals.handlers import on_unlocked
-
+        from actstream import registry
         registry.register(self.get_model('Attestation'))
diff --git a/beat/web/attestations/tests.py b/beat/web/attestations/tests.py
index 28e0e2cb0d9b104a4934219a4617fb0278a6629b..d2ea57a1ca3b421ae1d37d1a2534c0c580ab06fe 100644
--- a/beat/web/attestations/tests.py
+++ b/beat/web/attestations/tests.py
@@ -46,7 +46,7 @@ from ..dataformats.models import DataFormat
 from ..toolchains.models import Toolchain
 from ..databases.models import Database
 
-from ..common.testutils import BaseTestCase
+from ..common.testutils import BaseTestCase, tearDownModule
 
 class AttestationsAPIBase(BaseTestCase):
 
@@ -221,7 +221,7 @@ class AttestationsAPIBase(BaseTestCase):
         environment = Environment(name='env1', version='1.0')
         environment.save()
 
-        queue = Queue(name='queue1', memory_limit=1024, time_limit=60, nb_cores_per_slot=1, max_slots_per_user=10)
+        queue = Queue(name='queue1', memory_limit=1024, time_limit=60, cores_per_slot=1, max_slots_per_user=10)
         queue.save()
 
         queue.environments.add(environment)
@@ -280,18 +280,6 @@ class AttestationsAPIBase(BaseTestCase):
         experiment.end_date   = datetime.now()
         experiment.save()
 
-        block            = Block()
-        block.experiment = experiment
-        block.name       = 'block1'
-        block.algorithm  = algorithm1
-        block.save()
-
-        block            = Block()
-        block.experiment = experiment
-        block.name       = 'analyzer1'
-        block.algorithm  = algorithm2
-        block.save()
-
 
     def tearDown(self):
         if os.path.exists(settings.TOOLCHAINS_ROOT):
diff --git a/beat/web/backend/admin.py b/beat/web/backend/admin.py
index 22cafa5ef69bfffbc420c7b51b9e28db293d7829..def8b5b508d20f935b39b64407fb16bb2c6312cf 100644
--- a/beat/web/backend/admin.py
+++ b/beat/web/backend/admin.py
@@ -31,7 +31,9 @@ from django import forms
 from .models import Environment as EnvironmentModel
 from .models import Worker as WorkerModel
 from .models import Queue as QueueModel
-from .models import QueueWorkerSlot as QueueWorkerSlotModel
+from .models import Slot as SlotModel
+from .models import Job as JobModel
+from .models import JobSplit as JobSplitModel
 
 from ..ui.forms import CodeMirrorRSTCharField
 from ..common.texts import Messages
@@ -142,7 +144,7 @@ deactivate_workers.short_description = 'Deactivate selected workers'
 
 class Worker(admin.ModelAdmin):
 
-    list_display        = ('id', 'name', 'nb_cores', 'active')
+    list_display        = ('id', 'name', 'cores', 'memory', 'active')
     search_fields       = ['name']
     list_display_links  = ('id', 'name')
 
@@ -157,8 +159,8 @@ admin.site.register(WorkerModel, Worker)
 #----------------------------------------------------------
 
 
-class QueueWorkerSlotInline(admin.TabularInline):
-    model = QueueWorkerSlotModel
+class SlotInline(admin.TabularInline):
+    model = SlotModel
 
 
 #----------------------------------------------------------
@@ -166,9 +168,45 @@ class QueueWorkerSlotInline(admin.TabularInline):
 
 class Queue(Django18ProofGuardedModelAdmin):
 
-    list_display        = ('id', 'name', 'memory_limit', 'time_limit', 'nb_cores_per_slot', 'max_slots_per_user')
+    list_display        = ('id', 'name', 'memory_limit', 'time_limit', 'cores_per_slot', 'max_slots_per_user')
     search_fields       = ['name']
     list_display_links  = ('id', 'name')
-    inlines             = [QueueWorkerSlotInline]
+    inlines             = [SlotInline]
 
 admin.site.register(QueueModel, Queue)
+
+
+#----------------------------------------------------------
+
+
+class JobSplitInline(admin.TabularInline):
+    model = JobSplitModel
+
+    def has_delete_permission(self, request, obj=None):
+        return False
+
+    def has_add_permission(self, request):
+            return False
+
+
+
+class Job(admin.ModelAdmin):
+
+    list_display        = ('id', 'status', 'runnable_date', 'block', 'splits')
+    search_fields       = ['block__name', 'block__experiment__name']
+    list_display_links  = ('id', 'block')
+    ordering            = ('runnable_date', 'id')
+
+    # to avoid very slow loading of cached files
+    raw_id_fields = ('block',)
+
+    def splits(self, obj):
+        return obj.splits.count()
+
+    def has_delete_permission(self, request, obj=None):
+        return False
+
+    def has_add_permission(self, request):
+            return False
+
+admin.site.register(JobModel, Job)
diff --git a/beat/web/backend/api.py b/beat/web/backend/api.py
index ada56354fb684a3a8e4db58e28f4918949c0de57..258472e8c79f2a6ef52deb04bcdc01e5226f3528 100644
--- a/beat/web/backend/api.py
+++ b/beat/web/backend/api.py
@@ -25,637 +25,12 @@
 #                                                                             #
 ###############################################################################
 
-from django.conf import settings
-from django.db import transaction
-from django.db.models import Q
-from django.utils import six
-
 from rest_framework.decorators import api_view, permission_classes
 from rest_framework.response import Response
 from rest_framework import permissions
 
-from ..common.permissions import IsSuperuser
-from ..common.responses import BadRequestResponse
-from .permissions import IsScheduler
-
-from ..utils.api import send_email_to_administrators
-from ..utils import scheduler as scheduler_api
-
-from .models import Worker
-from .models import Queue
 from .models import Environment
 
-from ..experiments.models import Experiment
-from ..experiments.models import Block
-from ..experiments.models import Result
-from ..experiments.models import CachedFile
-
-from ..statistics.utils import updateStatistics
-
-import beat.core.stats
-import beat.core.hash
-import beat.core.experiment
-import beat.core.toolchain
-import beat.core.algorithm
-import beat.core.data
-import beat.core.utils
-
-import simplejson as json
-from datetime import datetime
-import os
-
-import logging
-logger = logging.getLogger(__name__)
-
-
-#----------------------------------------------------------
-
-
-@api_view(['GET'])
-@permission_classes([IsSuperuser])
-def scheduler(request):
-
-    # Determine if we want the latest known status or a fresh one
-    refresh = False
-    if request.GET.has_key('refresh'):
-        refresh = (request.GET['refresh'] == '1')
-
-    if refresh:
-        timeout = request.GET.get('timeout', '120') #seconds
-        status_and_data = scheduler_api.getMessage(
-            '/state?refresh=1&timeout=%s' % timeout)
-    else:
-        status_and_data = scheduler_api.getMessage('/state?refresh=0')
-
-    if status_and_data is None:
-        return Response('ERROR: Could not connect to the scheduler', status=500)
-
-    status, data = status_and_data
-    if status == 500:
-        send_email_to_administrators('Scheduler internal error', data)
-        error_message = 'ERROR: The scheduler did not accept the request.\n' \
-                        'An administrator has been notified about this problem.'
-        return Response(error_message, status=status)
-
-    elif status != 200:
-        error_message = 'ERROR: The scheduler did not accept the request.\n' \
-                        '    REASON: %s' % data
-        return Response(error_message, status=status)
-
-
-    # Setup worker information
-    data = json.loads(data)
-
-    workers = Worker.objects.all()
-    known_node_names = [k.name for k in workers]
-    active_node_names = [k.name for k in workers if k.active]
-    reported_names = data['workers'].keys()
-
-    for name in reported_names: #for all nodes described by the scheduler
-
-        if name not in known_node_names:
-            data['workers'][name]['db_status'] = 'Unknown'
-            data['workers'][name]['info'] = 'Worker not described'
-        else:
-            worker = Worker.objects.get(name=name)
-            data['workers'][name]['id'] = worker.id
-
-            if name not in active_node_names:
-                data['workers'][name]['db_status'] = 'Inactive'
-                data['workers'][name]['info'] = 'Worker temporarily deactivated'
-
-            elif data['workers'][name]['cores'] != worker.nb_cores:
-                data['workers'][name]['db_status'] = 'Mismatch'
-                data['workers'][name]['info'] = \
-                        'Number of cores differs (db: %d, scheduler: %d)' % \
-                        (data['workers'][name]['cores'], worker.nb_cores)
-            else:
-                data['workers'][name]['db_status'] = 'Active'
-                data['workers'][name]['info'] = 'Worker is declared and active'
-
-    # for nodes in the database, but not described by the scheduler
-    for name in [x for x in known_node_names if x not in reported_names]:
-        if name not in active_node_names:
-            data['workers'][name] = {
-                    'db_status': 'Inactive',
-                    'info': 'Worker not reported by the scheduler',
-                    }
-        else:
-            data['workers'][name] = {
-                    'db_status': 'Active',
-                    'info': 'Worker not reported by the scheduler',
-                    }
-        data['workers'][name]['id'] = Worker.objects.get(name=name).id
-
-    # Amend the queue statistics
-    queues = Queue.objects.all()
-    known_queue_names = [k.name for k in queues]
-    reported_names = data['scheduler']['queues'].keys()
-
-    for name in reported_names:
-        data['scheduler']['queues'][name]['status'] = 'Active'
-        if name not in known_queue_names:
-            data['scheduler']['queues'][name]['db_status'] = 'Missing'
-            data['scheduler']['queues'][name]['info'] = 'Queue is missing ' \
-                    'on the database'
-        else:
-            data['scheduler']['queues'][name]['total-slots'] = \
-                    sum(data['scheduler']['queues'][name]['slots'].values())
-            data['scheduler']['queues'][name]['db_status'] = 'Active'
-            data['scheduler']['queues'][name]['info'] = 'Queue is declared ' \
-                    'and active'
-            db_queue = Queue.objects.get(name=name)
-
-            slots = {}
-            for slot in db_queue.slots.iterator():
-                if slot.worker.active:
-                    slots[slot.worker.name] = slot.nb_slots
-
-            environments = []
-            for environment in db_queue.environments.filter(active=True):
-                environments.append({
-                    'name':    environment.name,
-                    'version': environment.version,
-                })
-
-            compare = {
-                'memory-in-megabytes':   db_queue.memory_limit,
-                'time-limit-in-minutes': db_queue.time_limit,
-                'nb-cores-per-slot':     db_queue.nb_cores_per_slot,
-                'max-slots-per-user':    db_queue.max_slots_per_user,
-                'slots':                 slots,
-                'environments':          environments,
-            }
-
-            sched = data['scheduler']['queues'][name]
-            for key in compare:
-                same = True
-                if isinstance(compare[key], list):
-                    #compare environments
-                    comp_key = set(['%s-%s' % (k['name'], k['version']) for k in compare[key]])
-                    sched_key = set(['%s-%s' % (k['name'], k['version']) for k in sched[key]])
-                    same = comp_key == sched_key
-                else:
-                    same = compare[key] == sched[key]
-                if not same:
-                    sched['db_status'] = 'Mismatch'
-                    if 'info' in sched:
-                        sched['info'] += '; mismatch at "%s" (%s != %s)' \
-                            % (key, compare[key], sched[key])
-                    else:
-                        sched['info'] += 'Mismatch at "%s" (%s != %s)' \
-                            % (key, compare[key], sched[key])
-
-
-    # attach existing database ids to the queues
-    for name in filter(lambda x: x in reported_names, known_queue_names):
-        db_queue = filter(lambda x: x.name == name, queues)[0]
-        data['scheduler']['queues'][name]['id'] = db_queue.id
-
-    for name in filter(lambda x: x not in reported_names, known_queue_names):
-        db_queue = filter(lambda x: x.name == name, queues)[0]
-
-        slots = {}
-        for slot in db_queue.slots.iterator():
-            if slot.worker.active:
-                slots[slot.worker.name] = slot.nb_slots
-
-        environments = []
-        for environment in db_queue.environments.filter(active=True):
-            environments.append({
-                'name':    environment.name,
-                'version': environment.version,
-            })
-
-        data['scheduler']['queues'][name] = {
-            'db_status': 'Inactive',
-            'info': 'Queue described at database, but not reported by scheduler',
-            'id':                    db_queue.id,
-            'status':                'Active',
-            'memory-in-megabytes':   db_queue.memory_limit,
-            'time-limit-in-minutes': db_queue.time_limit,
-            'nb-cores-per-slot':     db_queue.nb_cores_per_slot,
-            'max-slots-per-user':    db_queue.max_slots_per_user,
-            'slots':                 slots,
-            'total-slots':           db_queue.total_slots(),
-            'environments':          environments,
-            }
-
-
-    # Send the response
-    return Response(data)
-
-
-#----------------------------------------------------------
-
-
-@api_view(['POST'])
-@permission_classes([IsSuperuser])
-def cancel_all_experiments(request):
-
-    # Send the command to the Scheduler
-    status_and_data = scheduler_api.postMessage('/cancel-all-experiments')
-
-    if status_and_data is None:
-        return Response('ERROR: Could not connect to the scheduler', status=500)
-
-    status, data = status_and_data
-    if status == 500:
-        send_email_to_administrators('Scheduler internal error', data)
-        error_message = 'ERROR: The scheduler did not accept the request.\n' \
-                        'An administrator has been notified about this problem.\n\n' \
-                        '    REASON:\n%s' % data
-        return Response(error_message, status=status)
-
-    elif status != 200:
-        error_message = 'ERROR: The scheduler did not accept the request.\n' \
-                        '    REASON: %s' % data
-        return Response(error_message, status=status)
-
-    return Response(status=200)
-
-
-#----------------------------------------------------------
-
-
-@api_view(['POST'])
-@permission_classes([IsSuperuser])
-def scheduler_configuration(request):
-
-    configuration = {}
-    for queue in Queue.objects.all():
-        configuration[queue.name] = queue.as_json()
-
-    # Send the configuration to the Scheduler
-    status_and_data = scheduler_api.putMessage('/queue-configuration', data=configuration)
-
-    if status_and_data is None:
-        return Response('ERROR: Could not connect to the scheduler', status=500)
-
-    status, data = status_and_data
-    if status == 500:
-        send_email_to_administrators('Scheduler internal error', data)
-        error_message = 'ERROR: The scheduler did not accept the request.\n' \
-                        'An administrator has been notified about this problem.\n\n' \
-                        '    REASON:\n%s' % data
-        return Response(error_message, status=status)
-
-    elif status != 200:
-        error_message = 'ERROR: The scheduler did not accept the request.\n' \
-                        '    REASON: %s' % data
-        return Response(error_message, status=status)
-
-    return Response(status=200)
-
-
-#----------------------------------------------------------
-
-
-@api_view(['POST'])
-@permission_classes([IsSuperuser])
-def cache_cleanup(request):
-
-    data = request.data
-
-    if data.has_key('olderthan'):
-        olderthan = data['olderthan']
-    else:
-        olderthan = 0
-
-
-    # Send the configuration to the Scheduler
-    params = {
-        'olderthan': olderthan,
-        'nolist':    0,
-    }
-
-    status_and_data = scheduler_api.postMessage('/cache-cleanup', params=params)
-    if status_and_data is None:
-        return Response('ERROR: Could not connect to the scheduler', status=500)
-
-    status, data = status_and_data
-    if status == 500:
-        send_email_to_administrators('Scheduler internal error', data)
-        error_message = 'ERROR: The scheduler did not accept the request.\n' \
-                        'An administrator has been notified about this problem.\n\n' \
-                        '    REASON:\n%s' % data
-        return Response(error_message, status=status)
-
-    elif status != 200:
-        error_message = 'ERROR: The scheduler did not accept the request.\n' \
-                        '    REASON: %s' % data
-        return Response(error_message, status=status)
-
-
-    # Reset the DB representation of the cache
-    data = json.loads(data)
-    if len(data) > 0:
-        blocks = Block.objects.filter(hashes__hash__in=data)
-        for block in blocks:
-            block.status = Block.NOT_CACHED
-            block.save()
-
-    return Response(data)
-
-
-#----------------------------------------------------------
-
-
-@api_view(['PUT'])
-@permission_classes([permissions.IsAuthenticated, IsScheduler])
-@transaction.atomic
-def block_started(request):
-
-    # Check the validity of the request
-    data = request.data
-
-    if not(data.has_key('experiment-name')) or \
-        not(isinstance(data['experiment-name'], six.string_types)):
-        return BadRequestResponse('ERROR: Experiment name not provided')
-
-    if not(data.has_key('block-name')) or \
-        not(isinstance(data['block-name'], six.string_types)):
-        return BadRequestResponse('ERROR: Block name not provided')
-
-
-    # Retrieve the experiment
-    try:
-        configuration_id = beat.core.experiment.Storage(settings.PREFIX,
-                data['experiment-name'])
-        toolchain_id = beat.core.toolchain.Storage(settings.PREFIX,
-                configuration_id.toolchain)
-
-        experiment = Experiment.objects.select_for_update().get(
-                author__username=configuration_id.username,
-                toolchain__author__username=toolchain_id.username,
-                toolchain__name=toolchain_id.name,
-                toolchain__version=toolchain_id.version,
-                name=configuration_id.name,
-                )
-    except Exception:
-        return Response(status=404)
-
-
-    if experiment.status == Experiment.PENDING:
-        return BadRequestResponse("ERROR: The experiment '%s' is still marked as 'pending' in the database" % data['experiment-name'])
-    elif experiment.status not in (Experiment.SCHEDULED, Experiment.RUNNING):
-        return BadRequestResponse("ERROR: The experiment '%s' is already marked as '%s' in the database" % (data['experiment-name'], experiment.get_status_display()))
-
-
-    # Retrieve the block
-    try:
-        block = experiment.blocks.get(name=data['block-name'])
-    except Exception:
-        return Response(status=404)
-
-
-    # Modify the status of the experiment (if necessary)
-    if experiment.status == Experiment.SCHEDULED:
-        experiment.start_date = datetime.now()
-        experiment.status = Experiment.RUNNING
-        experiment.save()
-
-
-    # Modify the status of the block
-    block.status = Block.PROCESSING
-    block.save()
-
-
-    # Update all the other similar not-cached blocks and associated scheduled
-    # experiments. Note we don't updated failed blocks or unscheduled
-    # experiments as not to reset experiments that have already been run.
-    similar_blocks = Block.objects.filter(hashes__in=block.hashes.all()).exclude(pk=block.pk).order_by('pk').distinct()
-    similar_blocks.filter(status=Block.NOT_CACHED).update(status=Block.PROCESSING)
-    Experiment.objects.filter(blocks__in=similar_blocks, status=Experiment.SCHEDULED).update(start_date=datetime.now(), status=Experiment.RUNNING)
-
-    # Send the response
-    return Response(status=204)
-
-
-#----------------------------------------------------------
-
-
-@api_view(['PUT'])
-@permission_classes([permissions.IsAuthenticated, IsScheduler])
-@transaction.atomic
-def block_finished(request):
-    # Check the validity of the request
-    data = request.data
-
-    if not(data.has_key('experiment-name')) or \
-        not(isinstance(data['experiment-name'], six.string_types)):
-        return BadRequestResponse('ERROR: Experiment name not provided')
-
-    if not(data.has_key('block-name')) or \
-        not(isinstance(data['block-name'], six.string_types)):
-        return BadRequestResponse('ERROR: Block name not provided')
-
-    if not(data.has_key('state')) or \
-        not(isinstance(data['state'], six.string_types)):
-        return BadRequestResponse('ERROR: Block state not provided')
-
-    if not(data.has_key('outputs')) or \
-        not(isinstance(data['outputs'], (list, tuple))):
-        return BadRequestResponse('ERROR: Block outputs not provided')
-
-    # Alert system administrators if weird errors occur
-    if data.has_key('system-message') and \
-        isinstance(data['system-message'], six.string_types) and \
-        len(data['system-message'].strip()) != 0:
-        send_email_to_administrators('Worker environment error', data['system-message'])
-
-    block_state = data['state']
-    if block_state not in ['processed', 'failed', 'cancelled']:
-        return BadRequestResponse('ERROR: Invalid block state value: ' + block_state)
-
-    # Retrieve the experiment, block all other experiment retrieval operations
-    try:
-        configuration_id = beat.core.experiment.Storage(settings.PREFIX,
-                data['experiment-name'])
-        toolchain_id = beat.core.toolchain.Storage(settings.PREFIX,
-                configuration_id.toolchain)
-
-        experiment = Experiment.objects.select_for_update().get(
-            author__username=configuration_id.username,
-            toolchain__author__username=toolchain_id.username,
-            toolchain__name=toolchain_id.name,
-            toolchain__version=toolchain_id.version,
-            name=configuration_id.name,
-            )
-    except Exception as e:
-        logger.error("Could not retrieve experiment '%s/%s/%s/%s/%s' from " \
-            "database: %s", configuration_id.username,
-            toolchain_id.username, toolchain_id.name, toolchain_id.version,
-            configuration_id.name, str(e))
-        return Response(status=404)
-
-
-    if experiment.status == Experiment.PENDING:
-        return BadRequestResponse("ERROR: The experiment '%s' is still marked as 'pending' in the database" % data['experiment-name'])
-
-
-    # Retrieve the block
-    try:
-        block = experiment.blocks.get(name=data['block-name'])
-    except Exception:
-        return Response(status=404)
-
-
-    if (block.status == Block.NOT_CACHED) and (block_state != 'processed'):
-        if block_state == 'cancelled' or block_state == 'failed':
-            pass
-        else:
-            return BadRequestResponse("ERROR: The block '%s' isn't marked as 'running' in the database" % data['block-name'])
-    elif block.status == Block.FAILED:
-        return BadRequestResponse("ERROR: The block '%s' is already marked as 'failed' in the database" % data['block-name'])
-
-
-    # Create or retrieve cached files -- attach to block
-    all_cached_files = []
-    for hash in data['outputs']:
-        cache, created = CachedFile.objects.get_or_create(hash=hash)
-        if created:
-            cache.hash = hash
-            cache.save()
-        cache.blocks.add(block)
-        all_cached_files.append(cache)
-
-    # Updates all sorts of statistics on these caches (typically only one)
-    statistics = None
-    if block_state != 'cancelled':
-        if not(data.has_key('statistics')):
-            return BadRequestResponse('ERROR: Invalid statistics')
-
-        if data['statistics']:
-            statistics = beat.core.stats.Statistics(data['statistics'])
-
-    if statistics is not None:
-        for cache in all_cached_files:
-            cache.cpu_time = statistics.cpu['user'] + statistics.cpu['system']
-            cache.max_memory = statistics.memory['rss']
-            cache.data_read_size = statistics.data['volume']['read']
-            cache.data_read_nb_blocks = statistics.data['blocks']['read']
-            cache.data_read_time = statistics.data['time']['read']
-            cache.data_written_size = statistics.data['volume']['write']
-            cache.data_written_nb_blocks = statistics.data['blocks']['write']
-            cache.data_written_time = statistics.data['time']['write']
-
-    if data.has_key('execution_info') and \
-        (data['execution_info'] is not None):
-
-        execution_infos = data['execution_info']
-
-        for cache in all_cached_files:
-
-            if execution_infos.has_key('linear_execution_time') and \
-                (execution_infos['linear_execution_time'] is not None):
-                cache.linear_execution_time = \
-                    execution_infos['linear_execution_time']
-
-            if execution_infos.has_key('speed_up_real') and \
-                (execution_infos['speed_up_real'] is not None):
-                cache.speed_up_real = execution_infos['speed_up_real']
-
-            if execution_infos.has_key('speed_up_maximal') and \
-                (execution_infos['speed_up_maximal'] is not None):
-                cache.speed_up_maximal = execution_infos['speed_up_maximal']
-
-            if execution_infos.has_key('queuing_time') and \
-                (execution_infos['queuing_time'] is not None):
-                cache.queuing_time = execution_infos['queuing_time']
-
-    # Logged messages
-    if data.has_key('stdout') and isinstance(data['stdout'], six.string_types):
-        for cache in all_cached_files: cache.stdout = data['stdout']
-    if data.has_key('stderr') and isinstance(data['stderr'], six.string_types):
-        for cache in all_cached_files: cache.stderr = data['stderr']
-    if data.has_key('error-message') and \
-        isinstance(data['error-message'], six.string_types):
-        for cache in all_cached_files:
-            cache.error_report = data['error-message']
-
-    # Saves all cached files to the database
-    for cache in all_cached_files: cache.save()
-
-    # Save the results in the database (if applicable)
-    if block.analyzer and (block_state == 'processed'):
-        data_source = beat.core.data.CachedDataSource()
-        data_source.setup(os.path.join(settings.CACHE_ROOT,
-                beat.core.hash.toPath(block.hashes.all()[0].hash)),
-                settings.PREFIX)
-        output_data = data_source.next()[0]
-        if output_data is not None:
-            algorithm = beat.core.algorithm.Algorithm(settings.PREFIX,
-                    block.algorithm.fullname())
-            for field, value in output_data.as_dict().items():
-                result_entry         = Result()
-                result_entry.block   = block
-                result_entry.primary = algorithm.results[field]['display']
-                result_entry.name    = field
-                result_entry.type    = algorithm.results[field]["type"]
-
-                if result_entry.type in ['int32', 'float32', 'bool', 'string']:
-                    result_entry.data_value = str(value)
-                else:
-                    result_entry.data_value = json.dumps(value, indent=4, cls=beat.core.utils.NumpyJSONEncoder)
-
-                result_entry.save()
-
-        data_source.close()
-
-    # Modify the status of the block
-    if block_state == 'processed':
-        block.status = Block.CACHED
-        block.save()
-
-        if (experiment.status in [Experiment.SCHEDULED, Experiment.RUNNING]) and \
-           (experiment.blocks.filter(analyzer=True).exclude(status=Block.CACHED).count() == 0):
-            experiment.end_date = datetime.now()
-            experiment.status = Experiment.DONE
-            experiment.save()
-        elif experiment.status == Experiment.SCHEDULED:
-            experiment.start_date = datetime.now()
-            experiment.status = Experiment.RUNNING
-            experiment.save()
-
-    elif block_state == 'failed':
-        block.status = Block.FAILED
-        block.save()
-
-        if experiment.status != Experiment.FAILED:
-            experiment.end_date = datetime.now()
-            experiment.status = Experiment.FAILED
-            experiment.save()
-
-    elif block_state == 'cancelled':
-        block.status = Block.NOT_CACHED
-        block.statistics = None
-        block.linear_execution_time = None
-        block.speed_up_real = None
-        block.speed_up_maximal = None
-        block.queuing_time = None
-        block.save()
-
-        if experiment.status not in [Experiment.CANCELING, Experiment.FAILED]:
-            experiment.end_date = datetime.now()
-            experiment.status = Experiment.CANCELING
-            experiment.save()
-
-        if experiment.status == Experiment.CANCELING:
-            if experiment.blocks.filter(Q(status=Block.PROCESSING)).count() == 0:
-                experiment.end_date = datetime.now()
-                experiment.status = Experiment.FAILED
-                experiment.save()
-
-    # Update central statistics
-    if statistics: updateStatistics(statistics)
-
-    # Send the response
-    return Response(status=204)
-
-
-#----------------------------------------------------------
-
 
 @api_view(['GET'])
 @permission_classes([permissions.AllowAny])
@@ -663,7 +38,8 @@ def accessible_environments_list(request):
     """Returns all accessible environments for a given user"""
 
     # Retrieve the list of environments
-    environments = Environment.objects.filter(active=True).order_by('name', 'version')
+    environments = Environment.objects.filter(active=True).order_by('name',
+        'version')
 
     result = []
     for environment in environments.iterator():
@@ -677,10 +53,10 @@ def accessible_environments_list(request):
         for queue in environment.queues.iterator():
             if request.user.has_perm('can_access', queue):
                 queues[queue.name] = {
-                    'nb_slots':          queue.total_slots(),
+                    'nb_slots':          queue.number_of_slots(),
                     'memory_limit':      queue.memory_limit,
                     'time_limit':        queue.time_limit,
-                    'nb_cores_per_slot': queue.nb_cores_per_slot,
+                    'nb_cores_per_slot': queue.cores_per_slot,
                     'max_slots_per_user':queue.max_slots_per_user,
                 }
 
diff --git a/beat/web/backend/api_urls.py b/beat/web/backend/api_urls.py
index 3e76dd472a521a21499effc41826a773b6dc1654..e5faf3d8cf06d313978ed12103d6347416d398a5 100644
--- a/beat/web/backend/api_urls.py
+++ b/beat/web/backend/api_urls.py
@@ -29,41 +29,6 @@ from django.conf.urls import url
 from . import api
 
 urlpatterns = [
-    url(
-        r'^scheduler/$',
-        api.scheduler,
-        name='backend-api-scheduler',
-        ),
-
-    url(
-        r'^cancel-all-experiments/$',
-        api.cancel_all_experiments,
-        name='backend-api-cancel-all-experiments',
-        ),
-
-    url(
-        r'^scheduler-configuration/$',
-        api.scheduler_configuration,
-        name='backend-api-scheduler-configuration',
-        ),
-
-    url(
-        r'^cache-cleanup/$',
-        api.cache_cleanup,
-        name='backend-api-cache-cleanup',
-        ),
-
-    url(
-        r'^block-started/$',
-        api.block_started,
-        name='backend-api-block-started',
-        ),
-
-    url(
-        r'^block-finished/$',
-        api.block_finished,
-        name='backend-api-block-finished',
-        ),
 
     url(
         r'^environments/$',
diff --git a/beat/web/backend/apps.py b/beat/web/backend/apps.py
index a974899e8f03a69edeffc071dea5b5ff4fd19baa..07e67f57e25f3dfa84d570f8df537c3989a107ef 100644
--- a/beat/web/backend/apps.py
+++ b/beat/web/backend/apps.py
@@ -32,6 +32,3 @@ class BackendConfig(AppConfig):
 
     name = 'beat.web.backend'
     verbose_name = _('Backend')
-
-    def ready(self):
-        from .signals import setup_user
diff --git a/beat/web/backend/environments/default/bin/describe b/beat/web/backend/environments/default/bin/describe
new file mode 120000
index 0000000000000000000000000000000000000000..db1e579b6fac2b7505c56448d65ee9ee494ee2ce
--- /dev/null
+++ b/beat/web/backend/environments/default/bin/describe
@@ -0,0 +1 @@
+../../../../../../bin/describe
\ No newline at end of file
diff --git a/beat/web/backend/environments/default/bin/execute b/beat/web/backend/environments/default/bin/execute
new file mode 120000
index 0000000000000000000000000000000000000000..98c7b63c88a7c21b630cb7309d6d9e3db1914e2e
--- /dev/null
+++ b/beat/web/backend/environments/default/bin/execute
@@ -0,0 +1 @@
+../../../../../../bin/execute
\ No newline at end of file
diff --git a/beat/web/backend/management/__init__.py b/beat/web/backend/management/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/beat/web/backend/management/commands/__init__.py b/beat/web/backend/management/commands/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/beat/web/backend/management/commands/cleanup_cache.py b/beat/web/backend/management/commands/cleanup_cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..2adec94db072f0188eeaa97449ba3ddd355b6938
--- /dev/null
+++ b/beat/web/backend/management/commands/cleanup_cache.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import os
+import logging
+logger = logging.getLogger('beat.web')
+
+from django.core.management.base import BaseCommand
+from django.conf import settings
+
+from ...utils import cleanup_cache
+from ....import __version__
+
+
+class Command(BaseCommand):
+
+    help = 'Cleans-up the cache, removing old files'
+
+
+    def add_arguments(self, parser):
+
+        parser.add_argument('--olderthan', type=int, metavar='MINUTES',
+            default=0, help='All files which are older than this value ' \
+                'in *minutes* and are not locked or being used by active ' \
+                'experiments (running or scheduled) will be deleted ' \
+                '[default: %(default)s]')
+
+        parser.add_argument('--delete', action='store_true', default=False,
+            help='By default we only list cache files that will ' \
+                'be erased. If you pass this flag, then we really erase them')
+
+        parser.add_argument('--path', default=settings.CACHE_ROOT,
+            help='By default, we erase the CACHE path on your settings. Set ' \
+                'this flag if you want to operate on a different path ' \
+                '[default: %(default)s]')
+
+
+    def handle(self, *ignored, **arguments):
+
+        # Setup this command's logging level
+        global logger
+        arguments['verbosity'] = int(arguments['verbosity'])
+        if arguments['verbosity'] >= 1:
+            if arguments['verbosity'] == 1: logger.setLevel(logging.INFO)
+            elif arguments['verbosity'] >= 2: logger.setLevel(logging.DEBUG)
+
+        deleted = cleanup_cache(arguments['path'],
+            age_in_minutes=arguments['olderthan'],
+            delete=arguments['delete'])
+
+        if not arguments['delete']:
+            print("%d cache files can be deleted" % len(deleted))
+            for k in deleted:
+                print(os.path.join(arguments['path'], k))
diff --git a/beat/web/backend/management/commands/qsetup.py b/beat/web/backend/management/commands/qsetup.py
new file mode 100644
index 0000000000000000000000000000000000000000..080d869647301e62cc08f5b61cb68893a66aa140
--- /dev/null
+++ b/beat/web/backend/management/commands/qsetup.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+import sys
+import logging
+logger = logging.getLogger('beat.web')
+
+import simplejson
+
+from django.core.management.base import BaseCommand, CommandError
+
+from ...models import Environment, Queue, Worker, Slot
+from ...utils import setup_backend, dump_backend
+
+# Default configuration to start the state with
+import psutil
+import socket
+
+CORES = psutil.cpu_count()
+RAM = psutil.virtual_memory().total/(1024*1024)
+ENVIRONMENT = {'name': 'environment', 'version': '1'}
+ENVKEY = '%(name)s (%(version)s)' % ENVIRONMENT
+HOSTNAME = socket.gethostname()
+
+DEFAULT_CONFIGURATION = {
+    "queues": {
+      "queue": {
+        "memory-limit": RAM/CORES,
+        "time-limit": 1440, #1 day
+        "cores-per-slot": 1,
+        "max-slots-per-user": CORES,
+        "environments": [ENVKEY],
+        "slots": {
+          HOSTNAME: {
+            "quantity": CORES,
+            "priority": 0
+          }
+        },
+        "groups": [
+          "Default",
+          ],
+      }
+    },
+    "environments": {
+      ENVKEY: {
+        "name": ENVIRONMENT['name'],
+        "version": ENVIRONMENT['version'],
+        "short_description": "Local python interpreter",
+        "description": "Automatically generated local python " \
+            "interpreter environment",
+        },
+      },
+    "workers": {
+      HOSTNAME: {
+        "cores": CORES,
+        "memory": RAM
+      }
+    }
+  }
+
+RESET_CONFIGURATION = {
+    "queues": {},
+    "environments": {},
+    "workers": {}
+  }
+
+
+class Command(BaseCommand):
+
+    help = 'Sets and resets queue configurations'
+
+
+    def add_arguments(self, parser):
+
+        parser.add_argument('--dump', action='store_true', dest='dump',
+                default=False, help='Dump all environment/worker/queues ' \
+                        'configuration and exits')
+
+        parser.add_argument('--reset', action='store_true', dest='reset',
+                default=False, help='Delete all environment/worker/queues ' \
+                        'before setting the given configuration')
+
+        parser.add_argument('config', type=str, nargs='?',
+                help='Optional custom queue configuration to use. If not ' \
+                        'passed, uses an internal default with a single ' \
+                        'queue/worker/%d slots' % CORES)
+
+
+    def handle(self, *ignored, **arguments):
+
+        # Setup this command's logging level
+        global logger
+        arguments['verbosity'] = int(arguments['verbosity'])
+        if arguments['verbosity'] >= 1:
+            if arguments['verbosity'] == 1: logger.setLevel(logging.INFO)
+            elif arguments['verbosity'] >= 2: logger.setLevel(logging.DEBUG)
+
+        if arguments['dump']:
+            d=dump_backend()
+            print(simplejson.dumps(d, indent=2))
+            sys.exit(0)
+
+        if arguments['reset']:
+            setup_backend(RESET_CONFIGURATION)
+
+        config = None
+        if arguments['config']:
+            with open(arguments['config'], 'rb') as f:
+                config = simplejson.load(f)
+
+        setup_backend(config or DEFAULT_CONFIGURATION)
diff --git a/beat/web/backend/migrations/0002_scheduler_addons.py b/beat/web/backend/migrations/0002_scheduler_addons.py
new file mode 100644
index 0000000000000000000000000000000000000000..76176108f8809551b9648c228f84478ed3264b74
--- /dev/null
+++ b/beat/web/backend/migrations/0002_scheduler_addons.py
@@ -0,0 +1,190 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('backend', '0001_initial'),
+    ]
+
+    operations = [
+        migrations.RenameModel('QueueWorkerSlot', 'Slot'),
+        migrations.RenameField(
+            model_name='slot',
+            old_name='nb_slots',
+            new_name='quantity',
+        ),
+        migrations.AddField(
+            model_name='slot',
+            name='priority',
+            field=models.PositiveIntegerField(default=0, help_text=b'Priority of these slots on the defined queue'),
+        ),
+        migrations.AlterUniqueTogether(
+            name='slot',
+            unique_together=set([('queue', 'worker')]),
+        ),
+        migrations.RenameField(
+            model_name='queue',
+            old_name='nb_cores_per_slot',
+            new_name='cores_per_slot',
+        ),
+        migrations.AlterField(
+            model_name='worker',
+            name='active',
+            field=models.BooleanField(default=False, help_text='If this worker is usable presently'),
+        ),
+        migrations.RenameField(
+            model_name='worker',
+            old_name='nb_cores',
+            new_name='cores',
+        ),
+        migrations.AddField(
+            model_name='worker',
+            name='info',
+            field=models.TextField(help_text=b'Informative message from the worker', null=True, blank=True),
+        ),
+        migrations.AddField(
+            model_name='worker',
+            name='memory',
+            field=models.PositiveIntegerField(default=0, help_text=b'In megabytes'),
+        ),
+        migrations.AddField(
+            model_name='worker',
+            name='used_cores',
+            field=models.PositiveIntegerField(default=0, help_text=b'In %'),
+        ),
+        migrations.AddField(
+            model_name='worker',
+            name='used_memory',
+            field=models.PositiveIntegerField(default=0, help_text=b'In %'),
+        ),
+        migrations.AddField(
+            model_name='worker',
+            name='update',
+            field=models.BooleanField(default=False, help_text='If this worker state must be updated at the next cycle'),
+        ),
+        migrations.AddField(
+            model_name='worker',
+            name='updated',
+            field=models.DateTimeField(auto_now=True, null=True),
+        ),
+        migrations.AlterField(
+            model_name='queue',
+            name='name',
+            field=models.CharField(help_text=b'The name for this object (space-like characters will be automatically replaced by dashes)', unique=True, max_length=100),
+        ),
+        migrations.AlterField(
+            model_name='queue',
+            name='cores_per_slot',
+            field=models.PositiveIntegerField(),
+        ),
+        migrations.AlterField(
+            model_name='queue',
+            name='max_slots_per_user',
+            field=models.PositiveIntegerField(),
+        ),
+        migrations.AlterField(
+            model_name='queue',
+            name='memory_limit',
+            field=models.PositiveIntegerField(help_text=b'In megabytes'),
+        ),
+        migrations.AlterField(
+            model_name='queue',
+            name='time_limit',
+            field=models.PositiveIntegerField(help_text=b'In minutes'),
+        ),
+        migrations.AlterField(
+            model_name='slot',
+            name='quantity',
+            field=models.PositiveIntegerField(help_text=b'Number of processing slots to dedicate in this worker for a given queue', verbose_name=b'Number of slots'),
+        ),
+        migrations.AlterField(
+            model_name='worker',
+            name='cores',
+            field=models.PositiveIntegerField(),
+        ),
+        migrations.AlterField(
+            model_name='environment',
+            name='previous_version',
+            field=models.ForeignKey(related_name='next_versions', on_delete=models.deletion.SET_NULL, blank=True, to='backend.Environment', null=True),
+        ),
+        migrations.CreateModel(
+            name='Result',
+            fields=[
+                ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+                ('status', models.IntegerField()),
+                ('stdout', models.TextField(null=True, blank=True)),
+                ('stderr', models.TextField(null=True, blank=True)),
+                ('usrerr', models.TextField(null=True, blank=True)),
+                ('syserr', models.TextField(null=True, blank=True)),
+                ('_stats', models.TextField(null=True, blank=True)),
+                ('timed_out', models.BooleanField(default=False)),
+                ('cancelled', models.BooleanField(default=False)),
+            ],
+        ),
+        migrations.CreateModel(
+            name='Job',
+            fields=[
+                ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+                ('status', models.CharField(default=b'N', max_length=1, choices=[(b'N', b'Queued'), (b'P', b'Processing'), (b'C', b'Completed'), (b'F', b'Failed'), (b'S', b'Skipped'), (b'L', b'Cancelled'), (b'K', b'Cancel')])),
+                ('runnable_date', models.DateTimeField(null=True, blank=True)),
+                ('start_date', models.DateTimeField(null=True, blank=True)),
+                ('end_date', models.DateTimeField(null=True, blank=True)),
+                ('split_errors', models.PositiveIntegerField(default=0)),
+                ('block', models.OneToOneField(related_name='job', on_delete=models.deletion.CASCADE, to='experiments.Block', null=True)),
+                ('parent', models.OneToOneField(related_name='child', to='backend.Job', null=True, on_delete=models.deletion.SET_NULL)),
+                ('result', models.OneToOneField(to='backend.Result', null=True, on_delete=models.deletion.CASCADE, related_name='job')),
+            ],
+        ),
+        migrations.CreateModel(
+            name='JobSplit',
+            fields=[
+                ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+                ('split_index', models.PositiveIntegerField()),
+                ('start_index', models.PositiveIntegerField(null=True)),
+                ('end_index', models.PositiveIntegerField(null=True)),
+                ('cache_errors', models.PositiveIntegerField(default=0)),
+                ('status', models.CharField(default=b'N', max_length=1, choices=[(b'N', b'Queued'), (b'P', b'Processing'), (b'C', b'Completed'), (b'F', b'Failed'), (b'S', b'Skipped'), (b'L', b'Cancelled'), (b'K', b'Cancel')])),
+                ('start_date', models.DateTimeField(null=True)),
+                ('end_date', models.DateTimeField(null=True)),
+                ('process_id', models.PositiveIntegerField(null=True)),
+                ('job', models.ForeignKey(related_name='splits', to='backend.Job', null=True)),
+                ('worker', models.ForeignKey(related_name='splits', on_delete=models.deletion.SET_NULL, to='backend.Worker', null=True)),
+                ('result', models.OneToOneField(to='backend.Result', related_name='split', null=True, on_delete=models.deletion.CASCADE)),
+            ],
+        ),
+        migrations.AlterUniqueTogether(
+            name='jobsplit',
+            unique_together=set([('job', 'split_index')]),
+        ),
+    ]
diff --git a/beat/web/backend/models.py b/beat/web/backend/models.py
index 7e3be5ae9772e8421302f29e87981538a3fc9164..052390ffb302f53c2f368dca741e8f3ac5b39bca 100644
--- a/beat/web/backend/models.py
+++ b/beat/web/backend/models.py
@@ -25,19 +25,52 @@
 #                                                                             #
 ###############################################################################
 
+import os
+import time
+import signal
+import datetime
+import operator
+import traceback
+import subprocess
 
+import logging
+logger = logging.getLogger(__name__)
+
+import psutil
+import simplejson
+
+from django.db import utils
 from django.db import models
+from django.db import transaction
 from django.conf import settings
 from django.core.urlresolvers import reverse
 from django.utils.translation import ugettext_lazy as _
+from django.contrib.auth.models import Group
+
+from guardian.shortcuts import get_perms
 
-from ..common.models import Shareable
+import beat.core.stats
+import beat.core.data
+import beat.core.execution
+
+from ..common.models import Shareable, ShareableManager
 from ..common.texts import Messages
+from ..statistics.utils import updateStatistics
 
 
 #----------------------------------------------------------
 
 
+class EnvironmentManager(ShareableManager):
+
+    def get_by_natural_key(self, key):
+        name, version = key.rsplit(' ', 1)
+        return self.get(
+            name=name,
+            version=version[1:-1],
+            )
+
+
 class Environment(Shareable):
     """Defines a software environment to run algorithms"""
 
@@ -77,9 +110,12 @@ class Environment(Shareable):
     previous_version = models.ForeignKey('self',
         related_name='next_versions',
         null=True,
-        blank=True
+        blank=True,
+        on_delete=models.SET_NULL,
         )
 
+    objects = EnvironmentManager()
+
     #_____ Meta parameters __________
 
     class Meta:
@@ -91,15 +127,14 @@ class Environment(Shareable):
         return self.fullname()
 
 
+    def natural_key(self):
+        return self.fullname()
+
+
     #_____ Utilities __________
 
     def fullname(self):
-        retval = '%s (%s)' % (
-            self.name,
-            self.version,
-            )
-        if not self.active: retval += ' [INACTIVE]'
-        return retval
+        return '%s (%s)' % (self.name, self.version)
 
     def get_absolute_url(self):
 
@@ -108,6 +143,9 @@ class Environment(Shareable):
             args=(self.name, self.version,),
             )
 
+    def get_admin_change_url(self):
+        return reverse('admin:backend_environment_change', args=(self.id,))
+
     def queues_for(self, user):
         """Returns all queues associated to this environment for which the user
         has the 'can_access' permission"""
@@ -115,9 +153,38 @@ class Environment(Shareable):
         return [q for q in self.queues.all() if user.has_perm('backend.can_access', q)]
 
 
+    def as_dict(self):
+        '''Returns a representation as a dictionary'''
+
+        return dict(
+            name=self.name,
+            version=self.version,
+            short_description=self.short_description,
+            description=self.description,
+            )
+
+
 #----------------------------------------------------------
 
 
+def _cleanup_zombies():
+    '''Cleans-up eventual zombie subprocesses launched by the worker'''
+
+    for child in psutil.Process().children(recursive=True):
+        try:
+            if child.status() == psutil.STATUS_ZOMBIE:
+                child.wait()
+        except psutil.NoSuchProcess:
+            # process is gone meanwhile, which is ok
+            pass
+
+
+class WorkerManager(models.Manager):
+
+    def get_by_natural_key(self, name):
+        return self.get(name=name)
+
+
 class Worker(models.Model):
 
     name = models.CharField(
@@ -128,42 +195,322 @@ class Worker(models.Model):
 
     active = models.BooleanField(
         help_text=u'If this worker is usable presently',
-        default=True,
+        default=False,
         )
 
-    nb_cores = models.IntegerField(
+    update = models.BooleanField(
+        help_text=u'If this worker state must be updated at the next cycle',
+        default=False,
         )
 
+    updated = models.DateTimeField(null=True, auto_now=True)
+
+    cores = models.PositiveIntegerField()
+
+    memory = models.PositiveIntegerField(default=0, help_text='In megabytes')
+
+    used_cores = models.PositiveIntegerField(default=0, help_text='In %')
+
+    used_memory = models.PositiveIntegerField(default=0, help_text='In %')
+
+    info = models.TextField(null=True, blank=True,
+        help_text='Informative message from the worker')
+
+    objects = WorkerManager()
+
+
     #_____ Overrides __________
 
+
     def __str__(self):
-        retval = '%s (%d cores)' % (self.name, self.nb_cores)
-        if not self.active: retval += ' [DEACTIVATED]'
+        retval = '%s (%d cores, %d Mb)' % (self.name, self.cores, self.memory)
+        if not self.active: retval += ' [INACTIVE]'
         return retval
 
 
+    def natural_key(self):
+        return self.name
+
+
+    def get_admin_change_url(self):
+        return reverse('admin:backend_worker_change', args=(self.id,))
+
+
+    def load(self):
+        '''Calculates the number of cores in use or to be used in the future'''
+        return sum([j.job.block.queue.cores_per_slot for j in self.splits.all()])
+
+
+    def current_load(self):
+        '''Calculates the number of cores being used currently'''
+        return sum([j.job.block.queue.cores_per_slot for j in self.splits.filter(status=Job.PROCESSING)])
+
+
+    def available_cores(self):
+        '''Calculates the number of available cores considering current load'''
+
+        return max(self.cores - self.load(), 0)
+
+
+    def deactivate(self, reason):
+        '''Deactivates the current worker for a reason, that is registered'''
+
+        self.info = reason
+        self.active = False
+
+
+    def activate(self, reason=None):
+        '''Reactivates the worker, deletes any associated information'''
+
+        self.info = reason
+        self.active = True
+
+
+    def as_dict(self):
+        '''Returns a dictionary-like representation'''
+
+        return dict(cores=self.cores, memory=self.memory)
+
+
+    def check_environments(self, environments):
+        '''Checks that this worker has access to all environments it needs
+
+        This method will check if the found set of environments (in the
+        dictionary ``environments``) contains, at least, one environment for
+        each environment object this worker is supposed to be able to execute
+        user algorithms for.
+
+
+        Parameters:
+
+          environments (dict): A dictionary of environments found by using
+          :py:func:`utils.find_environments` in which, keys represent the
+          natural keys of Django database environments.
+
+
+        Returns:
+
+          list: A list of missing environments this worker can be assigned to
+              work with, but where not found
+
+          list: A list of unused environments this worker cannot be assigned to
+              work with, but where nevertheless found
+
+        '''
+
+        slots = Slot.objects.filter(worker=self)
+        queues = Queue.objects.filter(slots__in=slots)
+        wishlist = Environment.objects.filter(queues__in=queues, active=True)
+        wishlist = wishlist.order_by('id').distinct()
+
+        required = [k.natural_key() for k in wishlist]
+        missing = [k for k in required if k not in environments]
+        unused = [k for k in environments if k not in required]
+
+        return missing, unused
+
+
+    def update_state(self):
+        '''Updates state on the database based on current machine readings'''
+
+        # check I have at least all cores and memory I'm supposed to have
+        cores = psutil.cpu_count()
+        ram = psutil.virtual_memory().total/(1024*1024)
+        self.info = ''
+
+        if cores < self.cores:
+            logger.warn("Worker `%s' only has %d cores which is less then " \
+                "the value declared on the database - it's not a problem, " \
+                "but note this self may get overloaded", self, cores)
+            self.info += 'only %d cores;' % cores
+
+        if ram < self.memory:
+            logger.warn("Worker `%s' only has %d Mb of RAM which is less " \
+                "then the value declared on the database - it's not a " \
+                "problem, but note this self may get overloaded", self,
+                ram)
+            self.info += 'only %d Mb of RAM;' % ram
+
+        with transaction.atomic():
+            self_ = Worker.objects.select_for_update().get(pk=self.pk) #lock
+
+            # update process and memory usage
+            self.used_cores = int(psutil.cpu_percent())
+            self.used_memory = int(psutil.virtual_memory().percent)
+
+            # save current self state
+            self.active = True
+            self.update = False
+            self.save()
+
+
+    def terminate(self):
+        '''Cleanly terminates a particular worker at the database
+
+        .. note::
+
+           This method does not destroy running or assigned processes that may
+           be running or assigned to this worker. This is implemented in this
+           way to allow for a clean replacement of the worker program w/o an
+           interruption of the backend service.
+
+        '''
+
+        # disables worker, so no more splits can be assigned to it
+        with transaction.atomic():
+            self_ = Worker.objects.select_for_update().get(pk=self.pk)
+            self_.active = False
+            self_.used_cores = 0
+            self_.used_memory = 0
+            self_.info = 'Worker deactivated by system administrator'
+            self_.save()
+
+        # cancel job splits which should be cancelled anyways
+        for j in JobSplit.objects.filter(worker=self, status=Job.CANCEL,
+            end_date__isnull=True, process_id__isnull=False):
+            if psutil.pid_exists(j.process_id):
+                os.kill(j.process_id, signal.SIGTERM)
+
+        # cleans-up zombie processes that may linger
+        _cleanup_zombies()
+
+
+    def shutdown(self):
+        '''Removes all running/assigned jobs from the queue, shuts down
+
+        This method should be used with care as it may potentially cancel all
+        assigned splits for the current worker.
+
+        '''
+
+        self.terminate()
+
+        message = 'Cancelled on forced worker shutdown (maintenance)' \
+            ' - you may retry submitting your experiment shortly'
+
+        # cancel job splits which were not yet started
+        for j in JobSplit.objects.filter(worker=self, status=Job.QUEUED,
+            start_date__isnull=True, process_id__isnull=True):
+            j.end(Result(status=1, usrerr=message))
+
+        # cancel job splits which are running
+        for j in JobSplit.objects.filter(worker=self, status=Job.PROCESSING,
+            end_date__isnull=True, process_id__isnull=False):
+            j._cancel()
+
+
+
+    def work(self, environments, cpulimit, process):
+        '''Launches user code on isolated processes
+
+        This function is supposed to be called asynchronously, by a
+        scheduled agent, every few seconds. It examines job splits assigned
+        to the current host and launches an individual process to handle
+        these splits. The process is started locally and the process ID
+        stored with the split.
+
+        Job split cancelling is executed by setting the split state as
+        ``CANCEL`` and waiting for this function to handle it.
+
+
+        Parameters:
+
+          environments (dict): A dictionary containing installed
+            environments, their description and execute-file paths.
+
+          cpulimit (str): The path to the ``cpulimit`` program to use for
+            limiting the user code in CPU usage. If set to ``None``, then
+            don't use it, even if the select user queue has limits.
+
+          process (str): The path to the ``process.py`` program to use for
+            running the user code on isolated processes.
+
+        '''
+        from .utils import pick_execute
+
+        # refresh state from database and update state if required
+        self.refresh_from_db()
+        if self.update: self.update_state()
+
+        # cancel job splits by killing associated processes
+        for j in JobSplit.objects.filter(worker=self, status=Job.CANCEL,
+            end_date__isnull=True, process_id__isnull=False):
+            if psutil.pid_exists(j.process_id):
+                os.kill(j.process_id, signal.SIGTERM)
+
+        # cmdline base argument
+        cmdline = [process]
+        if cpulimit is not None: cmdline += ['--cpulimit=%s' % cpulimit]
+        if settings.DEBUG: cmdline += ['-vv']
+
+        # start newly assigned job splits
+        with transaction.atomic():
+            splits = JobSplit.objects.select_for_update().filter(worker=self,
+                status=Job.QUEUED, start_date__isnull=True,
+                process_id__isnull=True)
+            for split in splits:
+                execute = pick_execute(split, environments)
+                if execute is None:
+                    message = "Environment `%s' is not available for split " \
+                        "%d/%d running at worker `%s', for block `%s' of " \
+                        "experiment `%s': %s" % \
+                        (split.job.block.environment,
+                            split.split_index+1,
+                            split.job.block.required_slots,
+                            self,
+                            split.job.block.name,
+                            split.job.block.experiment.fullname(),
+                            "Available environments are `%s'" % \
+                                '|'.join(environments.keys()),
+                                )
+                    split.end(Result(status=1,
+                      usrerr=settings.DEFAULT_USER_ERROR, syserr=message))
+                    continue
+
+                # if we get to this point, then we launch the user process
+                # -> see settings.WORKER_DETACH_CHILDREN for more info
+                kwargs = dict()
+                if settings.WORKER_DETACH_CHILDREN:
+                    kwargs['preexec_fn'] = os.setpgrp
+                subprocess.Popen(cmdline + [execute, str(split.pk)], **kwargs)
+                split.status = Job.PROCESSING #avoids re-running
+                split.save()
+
+        # cleans-up zombie processes that may linger
+        _cleanup_zombies()
+
+
+    def __enter__(self):
+        self.update_state()
+        return self
+
+
+    def __exit__(self, *exc):
+        self.terminate()
+        return False #propagate exceptions
+
+
 #----------------------------------------------------------
 
 
+class QueueManager(models.Manager):
+
+    def get_by_natural_key(self, name):
+        return self.get(name=name)
+
+
 class Queue(models.Model):
-    name = models.CharField(
-        max_length=100,
-        help_text=Messages['name'],
-        )
 
-    memory_limit = models.IntegerField(
-        help_text='In megabytes',
-        )
+    name = models.CharField(max_length=100, help_text=Messages['name'],
+        unique=True)
 
-    time_limit = models.IntegerField(
-        help_text='In minutes',
-        )
+    memory_limit = models.PositiveIntegerField(help_text='In megabytes')
 
-    nb_cores_per_slot = models.IntegerField(
-        )
+    time_limit = models.PositiveIntegerField(help_text='In minutes')
 
-    max_slots_per_user = models.IntegerField(
-        )
+    cores_per_slot = models.PositiveIntegerField()
+
+    max_slots_per_user = models.PositiveIntegerField()
 
     environments = models.ManyToManyField(
         Environment,
@@ -185,58 +532,953 @@ class Queue(models.Model):
             self.name,
             self.time_limit,
             self.memory_limit,
-            self.nb_cores_per_slot,
+            self.cores_per_slot,
             self.max_slots_per_user
             )
 
+
+    def natural_key(self):
+        return self.name
+
+
+    def get_admin_change_url(self):
+        return reverse('admin:backend_queue_change', args=(self.id,))
+
     #_____ Utilities __________
 
-    def total_slots(self):
-        return sum([k.nb_slots for k in self.slots.all() if k.worker.active])
+    def number_of_slots(self):
+        '''Total number of slots considering all assigned worker/slots'''
+
+        r = self.slots.filter(worker__active=True)
+        return r.aggregate(nslots=models.Sum('quantity'))['nslots'] or 0
+
+
+    def availability(self):
+        '''Returns the availability for this queue in terms of number of slots
+
+        This method does not take into consideration the occupation of this
+        queue slots caused by jobs on other queues. It only looks to its inner
+        occupancy and reports on that.
+
+        Returns an integer between 0 and :py:meth:`Queue.slots`.
+        '''
+
+        running = JobSplit.objects.filter(job__block__in=self.blocks.all(),
+            status=Job.PROCESSING).count()
+        return max(self.number_of_slots() - running, 0)
+
+
+    def worker_availability(self):
+        '''Returns an ordered dictionary indicating the availability of workers
+           according to their queue priority.
+
+        The dictionary contains, as value, the number of slots available per
+        worker
 
+        The order of workers is sorted by:
+
+          * slot priority (the higher, the better)
+          * load (the lower, the better)
+          * name (alphabetically)
+
+        '''
+
+        workers = [(k.worker, -k.priority, k.worker.load(), k.worker.name) \
+            for k in self.slots.filter(worker__active=True)]
+
+        workers = sorted(workers, key=operator.itemgetter(1,2,3))
+
+        return [w[0] for w in workers]
+
+
+    def splits(self):
+        '''Lists all job splits currently associated to this queue'''
+
+        return JobSplit.objects.filter(job__block__queue=self)
+
+
+    def as_dict(self):
+        '''Returns a representation as a dictionary'''
 
-    def as_json(self):
         return {
-            'memory-in-megabytes': self.memory_limit,
-            'time-limit-in-minutes': self.time_limit,
-            'nb-cores-per-slot':     self.nb_cores_per_slot,
-            'max-slots-per-user':    self.max_slots_per_user,
-            'environments': map(lambda x: {
-                    'name':    x.name,
-                    'version': x.version,
-                }, self.environments.filter(active=True)),
-            'slots': dict([(x.worker.name, x.nb_slots) for x in self.slots.iterator() if x.worker.active]),
-        }
+            'memory-limit': self.memory_limit,
+            'time-limit': self.time_limit,
+            'cores-per-slot': self.cores_per_slot,
+            'max-slots-per-user': self.max_slots_per_user,
+            'environments': [k.natural_key() for k in self.environments.all()],
+            'slots': dict([(s.worker.name, dict(quantity=s.quantity,
+              priority=s.priority)) for s in self.slots.all()]),
+            'groups': [k.name for k in Group.objects.all() if 'can_access' in get_perms(k, self)]
+            }
 
-#----------------------------------------------------------
 
+class SlotManager(models.Manager):
 
-class QueueWorkerSlot(models.Model):
+    def get_by_natural_key(self, queue_name, worker_name):
+        return self.get(queue__name=queue_name, worker__name=worker_name)
 
-    queue = models.ForeignKey(
-        Queue,
-        related_name='slots',
-        )
 
-    worker = models.ForeignKey(
-        Worker,
-        related_name='slots',
-        )
+class Slot(models.Model):
+
+    queue = models.ForeignKey(Queue, related_name='slots',
+        on_delete=models.CASCADE)
+
+    worker = models.ForeignKey(Worker, related_name='slots',
+        on_delete=models.CASCADE)
 
-    nb_slots = models.IntegerField(
+    quantity = models.PositiveIntegerField(
         'Number of slots',
         help_text='Number of processing slots to dedicate in this worker for a given queue'
         )
 
+    priority = models.PositiveIntegerField(
+        default=0,
+        help_text='Priority of these slots on the defined queue'
+        )
+
+    objects = SlotManager()
+
     #_____ Meta parameters __________
 
     class Meta:
-        unique_together = ('queue', 'worker', 'nb_slots')
+        unique_together = ('queue', 'worker')
 
     #_____ Overrides __________
 
     def __str__(self):
-        return '%s - %s (slots: %d)' % (self.queue, self.worker, self.nb_slots)
+        return '%s - %s (slots: %d, priority: %d)' % (self.queue, self.worker, self.quantity, self.priority)
+
+
+    def natural_key(self):
+        return (self.queue.name, self.worker.name)
+
+
+#----------------------------------------------------------
+
+
+def _merge_strings(s):
+    if len(s) == 1: return s[0]
+    s = [k.strip() for k in s]
+    if any(s):
+        return '\n'.join(['Process %d: %s' % (i,k) for i,k in enumerate(s)])
+    else:
+        return ''
+
+
+#----------------------------------------------------------
+
+
+class Result(models.Model):
+    '''Logging and status information concerning block or job execution.
+    '''
+
+    # exit status code
+    status = models.IntegerField()
+    stdout = models.TextField(null=True, blank=True)
+    stderr = models.TextField(null=True, blank=True)
+    usrerr = models.TextField(null=True, blank=True)
+    syserr = models.TextField(null=True, blank=True)
+    _stats = models.TextField(null=True, blank=True)
+    timed_out = models.BooleanField(default=False)
+    cancelled = models.BooleanField(default=False)
+
+
+    def __str__(self):
+        status = 'success' if self.status == 0 else 'failed'
+        retval = 'Result(%s' % status
+        if self.stdout: retval += ', stdout=' + self.stdout
+        if self.stderr: retval += ', stderr=' + self.stderr
+        if self.usrerr: retval += ', usrerr=' + self.usrerr
+        if self.syserr: retval += ', syserr=' + self.syserr
+        retval += ')'
+        return retval
+
+
+    def _get_stats(self):
+        if self._stats is not None:
+            return beat.core.stats.Statistics(simplejson.loads(self._stats))
+        else:
+            return beat.core.stats.Statistics()
+
+
+    def _set_stats(self, v):
+        self._stats = simplejson.dumps(v.as_dict())
+
+
+    stats = property(_get_stats, _set_stats)
+
+
+#----------------------------------------------------------
+
+
+class Job(models.Model):
+    '''Class describing the execution of a Job on the backend'''
+
+    QUEUED     = 'N' #Block.NOT_CACHED
+    PROCESSING = 'P' #Block.PROCESSING
+    COMPLETED  = 'C' #Block.COMPLETED
+    FAILED     = 'F' #Block.FAILED
+    SKIPPED    = 'S' #Block.SKIPPED
+    CANCELLED  = 'L' #Block.CANCELLED
+    CANCEL     = 'K' #Job was asked to be killed
+
+    STATUS = (
+        (QUEUED,     'Queued'),
+        (PROCESSING, 'Processing'),
+        (COMPLETED,  'Completed'),
+        (FAILED,     'Failed'),
+        (SKIPPED,    'Skipped'),
+        (CANCELLED,  'Cancelled'),
+        (CANCEL,     'Cancel'),
+    )
+
+    block = models.OneToOneField('experiments.Block', null=True,
+        on_delete=models.CASCADE, related_name='job')
+
+    status = models.CharField(max_length=1, choices=STATUS, default=QUEUED)
+
+    result = models.OneToOneField(Result, null=True, on_delete=models.CASCADE,
+        related_name='job')
+
+    runnable_date = models.DateTimeField(null=True, blank=True)
+
+    start_date = models.DateTimeField(null=True, blank=True)
+
+    end_date = models.DateTimeField(null=True, blank=True)
+
+    parent = models.OneToOneField('self', related_name='child', null=True,
+        on_delete=models.SET_NULL)
+
+    split_errors = models.PositiveIntegerField(default=0)
+
+
+    def _get_child(self):
+        return self.child if hasattr(self, 'child') else None
+
+    def _set_child(self, val):
+        val.parent = self
+        val.save()
+
+    child_ = property(_get_child, _set_child)
+
+
+    def __str__(self):
+
+        return "Job(%s, %s, splits=%d, status=%s, cores=%d)" % \
+            (self.block.name, self.block.experiment.name,
+                self.block.required_slots, self.status,
+                self.block.queue.cores_per_slot)
+
+
+    def done(self):
+        '''Says whether the job has finished or not'''
+
+        return self.status in (Job.COMPLETED, Job.SKIPPED, Job.FAILED,
+            Job.CANCELLED)
+
+
+
+    def _copy(self, other):
+        '''Copy state from another block'''
+
+        self.refresh_from_db()
+
+        if self.done(): return
+
+        self.start_date = other.start_date
+        self.end_date = other.end_date
+        self.status = other.status
+
+        # update status of parent jobs
+        self.save()
+        self._cascade_updates()
+        self.block._update_state(None)
+
+
+    def _make_runnable(self):
+        '''Tries to make this job runnable - if it is cached, we skip it'''
+
+        # lock self - avoids concurrent update from scheduler/worker subsystem
+        self_ = Job.objects.select_for_update().get(pk=self.pk)
+
+        # checks for the presence of output caches - if they exist and
+        # checksum, skip and update related blocks
+        if all([k.exists() for k in self.block.outputs.all()]):
+            if all([k.index_checksums() for k in self.block.outputs.all()]):
+                self.status = Job.SKIPPED
+                self.split_errors = 0
+                self.end_date = datetime.datetime.now()
+                self.save()
+                self._cascade_updates() #to similar blocks
+                self.block._update_state()
+                return
+            else:
+                logger.warning("Trying to make block `%s' runnable, but " \
+                    "indexes do not checksum - waiting...", self.block)
+                self.split_errors += 1
+                self.save()
+                return
+
+        # else, flag it as runnable
+        self.runnable_date = datetime.datetime.now()
+
+        # runs index splitting once, for all created splits
+        self._split_indices()
+
+
+    def _split_indices(self):
+        '''Runs the index splitting machinery once for all associated splits'''
+
+        # no index spliting is required
+        if self.block.required_slots == 1:
+            self.save()
+            s = JobSplit(job=self, split_index=0)
+            s.save()
+            return
+
+        indices = []
+
+        conf = simplejson.loads(self.block.command)
+
+        try:
+
+            # For all synchronized inputs with the current block, append the
+            # list of generated object indices. This is necessary for an
+            # informed decision on where to split the processing
+            sync = [conf['inputs'][i] for i in conf['inputs'] if conf['inputs'][i]['channel']==conf['channel']]
+            for i in sync:
+                indices.append(beat.core.data.load_data_index(
+                  settings.CACHE_ROOT, str(i['path'])))
+
+            # Determine N splits using the possible indices for split:
+            indices = beat.core.data.foundSplitRanges(indices,
+                self.block.required_slots)
+
+            self.split_errors = 0
+            self.save()
+
+            if len(indices) == 0:
+                message = "Index splitting for block `%s' of experiment " \
+                    "`%s' could not be completed: not splittable!" % \
+                    (self.block.name, self.block.experiment.fullname())
+                logger.warn(message)
+                self._cancel(usrerr=settings.DEFAULT_USER_ERROR, syserr=message)
+
+            # if you get to this point, the splitting has succeeded,
+            # create the necessary splits and assign the ranges
+            for i, s in enumerate(indices):
+                split_indices = indices[i]
+                s = JobSplit(
+                    job=self,
+                    split_index=i,
+                    start_index=split_indices[0],
+                    end_index=split_indices[1],
+                    )
+                s.save()
+
+        except Exception as e:
+
+            self.split_errors += 1
+            self.save()
+
+            if self.split_errors > settings.MAXIMUM_SPLIT_ERRORS: #stop
+                message = "Index splitting for block `%s' of experiment " \
+                    "`%s' could not be completed due to an index split " \
+                    "error: %s" % (self.block.name,
+                        self.block.experiment.fullname(),
+                        traceback.format_exc())
+                logger.warn(message)
+                self._cancel(usrerr=settings.DEFAULT_USER_ERROR, syserr=message)
+
+
+    def _cascade_updates(self):
+        '''Cascade updates to children before I'm deleted.
+        '''
+
+        if hasattr(self, 'child'):
+            if self.status == Job.CANCELLED:
+                if self.parent: #I have a parent, so must give to child
+                    parent = self.parent
+                    self.parent = None
+                    self.child.parent = parent
+                else: #child is the new parent
+                    child = self.child
+                    self.child.parent = None
+                    # does this unblock the child to run?
+                    if child.block.is_runnable(): child._make_runnable()
+
+            else:
+                self.child._copy(self)
+
+        if self.parent and self.status == Job.CANCELLED:
+            self.parent = None
+
+
+    def _update_state(self):
+        '''Update self state based on associated job states
+
+        This method is not part of the Job's public API. It is supposed to be
+        called by children splits or itself.
+        '''
+
+        # lock
+        self_ = Job.objects.select_for_update().get(pk=self.pk)
+
+        if self_.done(): return
+
+        # If this process has a parent, then don't try to get split
+        # statuses
+        if not self.parent:
+
+            split_statuses = self.splits.values_list('status', flat=True)
+
+            if self.start_date is None:
+                qs = self.splits.filter(start_date__isnull=False).\
+                    order_by('start_date')
+                if qs:
+                    self.start_date = qs.first().start_date
+                else:
+                    self.start_date = datetime.datetime.now()
+
+            # Process main status and status from job results
+            if Job.FAILED in split_statuses:
+                self.status = Job.FAILED
+
+            elif Job.CANCELLED in split_statuses:
+                self.status = Job.CANCELLED
+
+            elif (Job.PROCESSING in split_statuses) or \
+                (Job.QUEUED in split_statuses and \
+                Job.COMPLETED in split_statuses) or \
+                (Job.CANCEL in split_statuses):
+                self.status = Job.PROCESSING
+
+            elif all([s == Job.SKIPPED for s in split_statuses]):
+                self.status = Job.SKIPPED
+
+            elif Job.QUEUED not in split_statuses:
+                self.status = Job.COMPLETED
+
+            else:
+                self.status = Job.QUEUED
+
+        # if required, erase dangling files, update own results
+        timings = None
+        if self.done() and self.status != Job.CANCELLED:
+            # compute final timings and update parent block
+            if self.status != Job.SKIPPED:
+                diff_timings = self._merge_results()
+                # delete all splits w/o results (still queued)
+                self.splits.filter(result__isnull=True).delete()
+                self.end_date = self.splits.order_by('-end_date').\
+                    first().end_date
+                updateStatistics(self.result.stats, self.end_date)
+                Result.objects.filter(split__in=self.splits.all()).delete()
+                seqtime = sum(diff_timings)
+                if self.start_date is None:
+                    queuing = 0
+                else:
+                    queuing = (self.start_date - \
+                        self.runnable_date).total_seconds()
+                if not seqtime:
+                    speed_up_real = 1.0
+                    speed_up_maximal = 1.0
+                else:
+                    speed_up_real = float(seqtime) / \
+                        (self.end_date - self.start_date).total_seconds()
+                    speed_up_maximal = float(seqtime) / max(diff_timings)
+                timings = dict(
+                    queuing = queuing,
+                    linear_execution = seqtime,
+                    speed_up_real = speed_up_real,
+                    speed_up_maximal = speed_up_maximal,
+                    )
+                self.runnable_date = None
+                self.erase_dangling_files()
+
+        # updates the dependents and child state
+        self.save()
+
+        self._cascade_updates()
+        self.block._update_state(timings)
+
+
+    def erase_dangling_files(self):
+        '''Erase dangling files produced by this job in case of errors'''
+
+        l = []
+        failed = self.status in (Job.FAILED, Job.CANCELLED)
+
+        if failed:
+            for o in self.block.outputs.all(): l += o.files()
+
+        for f in l:
+          logger.info("Erasing output file `%s' because Job `%s' failed", f,
+              self)
+          os.remove(f)
+
+
+    def _cancel(self, usrerr=None, syserr=None):
+        '''Cancel the execution of this job
+
+        As a consequence: delete all associated jobs, mark end_date and set
+        cancelled state. This method should only be called by the owning Block.
+        '''
+
+        # lock
+        self_ = Job.objects.select_for_update().get(pk=self.pk)
+
+        if self_.done(): return
+
+        logger.info("Marking job `%s' as 'cancelled'", self)
+        self.runnable_date = None
+        self.start_date = None
+        if self.splits.count():
+            for s in self.splits.all(): s._cancel()
+        else:
+            self.status = Job.CANCELLED
+            if usrerr or syserr:
+                r = Result(status=1, usrerr=usrerr, syserr=syserr)
+                r.save()
+                self.result = r
+            self.save()
+            self.block._update_state()
+            self._cascade_updates()
+
+
+    def _merge_results(self):
+        '''Merge results from jobs, if any exist'''
+
+        # update results
+        job_results = Result.objects.filter(pk__in=self.splits.filter(result__isnull=False).values_list('result', flat=True))
+
+        diff_timings = [(k[0]-k[1]).total_seconds() for k in \
+            self.splits.filter(end_date__isnull=False,
+              start_date__isnull=False).values_list('end_date', 'start_date')]
+
+        status = sum([k.status for k in job_results])
+        stdout = _merge_strings([k.stdout for k in job_results])
+        stderr = _merge_strings([k.stderr for k in job_results])
+        usrerr = _merge_strings([k.usrerr for k in job_results])
+        syserr = _merge_strings([k.syserr for k in job_results])
+
+        # merge beat.core statistics
+        if job_results:
+          stats = job_results[0].stats
+          for k in job_results[1:]: stats += k.stats
+          stats = stats
+        else:
+          stats = beat.core.stats.Statistics()
+
+        cancelled = any([k.cancelled for k in job_results])
+        timed_out = any([k.timed_out for k in job_results])
+
+        r = Result(status=status, stdout=stdout, stderr=stderr, usrerr=usrerr,
+            syserr=syserr, timed_out=timed_out, cancelled=cancelled)
+        r.stats = stats
+        r.save()
+        self.result = r
+
+        return diff_timings
 
 
 #----------------------------------------------------------
+
+
+class JobSplit(models.Model):
+    '''Class describing a part of job of an experiment'''
+
+    worker = models.ForeignKey(Worker, null=True, on_delete=models.SET_NULL,
+        related_name='splits')
+
+    job = models.ForeignKey(Job, null=True, on_delete=models.CASCADE,
+        related_name='splits')
+
+    split_index = models.PositiveIntegerField()
+
+    start_index = models.PositiveIntegerField(null=True)
+
+    end_index = models.PositiveIntegerField(null=True)
+
+    cache_errors = models.PositiveIntegerField(default=0)
+
+    status = models.CharField(max_length=1, choices=Job.STATUS,
+        default=Job.QUEUED)
+
+    result = models.OneToOneField(Result, null=True, on_delete=models.CASCADE,
+        related_name='split')
+
+    start_date = models.DateTimeField(null=True)
+
+    end_date = models.DateTimeField(null=True)
+
+    process_id = models.PositiveIntegerField(null=True)
+
+
+    class Meta:
+        unique_together = ('job', 'split_index')
+
+
+    def __str__(self):
+
+        return "JobSplit(%s, index=%d, state=%s)%s" % \
+            (self.job, self.split_index, self.status,
+                ('@%s' % self.worker) if self.worker else '')
+
+
+    def done(self):
+        '''Says whether the job has finished or not'''
+
+        return self.status in (
+            Job.COMPLETED,
+            Job.SKIPPED,
+            Job.FAILED,
+            Job.CANCELLED,
+            )
+
+
+    @transaction.atomic
+    def schedule(self, worker):
+        '''Schedules this split to be executed on a given worker
+
+        Parameters:
+
+            worker (:py:class:Worker): The worker this job split was actually
+              submitted to, if there is one.
+
+        '''
+
+        # lock self - avoids concurrent update from scheduler/worker
+        # subsystem
+        self_ = JobSplit.objects.select_for_update().get(pk=self.pk)
+        worker_ = Worker.objects.select_for_update().get(pk=worker.pk)
+
+        self.worker = worker
+        self.save()
+
+        logger.info("Job split %s scheduled at `%s' was assigned to `%s'",
+            self, self.job.block.queue, self.worker)
+
+
+    def signal_io_error(self):
+        '''Marks the split as having an IOError (cache sync issues, likely)
+        '''
+
+        tries = 0
+
+        while True:
+
+            try:
+
+                with transaction.atomic():
+
+                    # lock self - avoids concurrent update from
+                    # scheduler/worker subsystem
+                    self_ = JobSplit.objects.select_for_update().get(pk=self.pk)
+
+                    if self_.start_date is not None: return
+
+                    self.cache_errors += 1
+                    self.save()
+
+                    break
+
+            except utils.OperationalError:
+                tries += 1
+                if tries > settings.MAXIMUM_SPLIT_SAVE_RETRIES:
+                    raise
+                else:
+                  logger.info("Database error caught starting `%s': retrying " \
+                      "in 1 second (%d/%d)...", self, tries,
+                      settings.MAXIMUM_SPLIT_SAVE_RETRIES)
+                  # wait a second and retry
+                  time.sleep(1)
+
+
+    def start(self):
+        '''Marks the job as started, acknowledging scheduler assignment
+
+        Once this function is called, a second call no longer alters anything.
+        '''
+
+        tries = 0
+
+        while True:
+
+            try:
+
+                with transaction.atomic():
+
+                    # lock self - avoids concurrent update from
+                    # scheduler/worker subsystem
+                    self_ = JobSplit.objects.select_for_update().get(pk=self.pk)
+
+                    if self_.start_date is not None: return
+
+                    self.start_date = datetime.datetime.now()
+                    self.process_id = os.getpid()
+
+                    self.status = Job.PROCESSING
+
+                    self.save()
+
+                    logger.info("Job split `%s' was just started.", self)
+
+                    self.job._update_state()
+
+                    break
+
+            except utils.OperationalError:
+                tries += 1
+                if tries > settings.MAXIMUM_SPLIT_SAVE_RETRIES:
+                    raise
+                else:
+                  logger.info("Database error caught starting `%s': retrying " \
+                      "in 1 second (%d/%d)...", self, tries,
+                      settings.MAXIMUM_SPLIT_SAVE_RETRIES)
+                  # wait a second and retry
+                  time.sleep(1)
+
+
+    def _cancel(self):
+        '''Marks this job as cancelled.'''
+
+        # If this split is running, then wait
+        if self.status == Job.PROCESSING:
+            with transaction.atomic():
+                # lock self - avoids concurrent update from scheduler/worker
+                # subsystem
+                self_ = JobSplit.objects.select_for_update().get(pk=self.pk)
+                self.status = Job.CANCEL
+                self.save()
+
+            logger.info("Job split `%s' is currently processing. Waiting " \
+                "for worker to cancel split remotely.", self)
+
+        else: #just end it
+            self.end(None, Job.CANCELLED)
+
+
+    def end(self, result, status=None):
+        '''Marks the job as finished on the state database
+
+        Disassociates the worker from this job. Once this function is called, a
+        second call no longer alters anything.
+
+
+        Parameters:
+
+        result (:py:class:`Result`): The result of the task
+
+        status (str): One of the possible (single character) Job statuses, in
+          case ``result`` is not provided. Notice that, if ``result`` is
+          provided, this variable is **ignored** and the state
+          (``Job.COMPLETED`` or ``Job.FAILED``) is filled in from
+          ``result.status``. A ``result.status`` of 0 (zero) indicates a
+          successful task (set job status to ``Job.COMPLETED``), whereas if the
+          status is different than zero, the job status is set to
+          ``Job.FAILED``.
+
+        '''
+
+        tries = 0
+
+        while True:
+
+            try:
+
+                with transaction.atomic():
+
+                    # lock self - avoids concurrent update from
+                    # scheduler/worker subsystem
+                    self_ = JobSplit.objects.select_for_update().get(pk=self.pk)
+
+                    if self_.done(): return
+
+                    if status:
+                        logger.info("Marking job split `%s' as '%s'", self,
+                            status)
+
+                    if self.start_date is None:
+                        self.start_date = datetime.datetime.now()
+                    self.end_date = datetime.datetime.now()
+                    self.worker = None #frees worker slot
+
+                    if result:
+                        # special condition to handle cancelled jobs
+                        # which should be marked as cancelled, unless
+                        # they have finished before I could act on them
+                        # in those, cases, preserve the result as the
+                        # caches are already setup
+                        if result.status != 0 and self_.status == Job.CANCEL:
+                            self.status = Job.CANCELLED
+                            if result.id is not None: result.delete()
+                        else:
+                            self.status = Job.COMPLETED if \
+                                result.status == 0 else Job.FAILED
+                            if result.status in (-15, 15, -9, 9):
+                                # job received a term/kill signal
+                                if not result.usrerr:
+                                    result.usrerr = 'User process was ' \
+                                        'terminated by an external agent'
+                            if result.id is None: result.save()
+                            self.result = result
+
+                    else:
+                      self.status = status
+
+                    self.save()
+
+                    logger.info("Job split `%s' finished executing", self)
+
+                    self.job._update_state()
+
+                    break
+
+            except utils.OperationalError:
+                tries += 1
+                if tries > settings.MAXIMUM_SPLIT_SAVE_RETRIES:
+                    raise
+                else:
+                  logger.info("Database error caught ending `%s': retrying " \
+                      "in 1 second (%d/%d)...", self, tries,
+                      settings.MAXIMUM_SPLIT_SAVE_RETRIES)
+                  # wait a second and retry
+                  time.sleep(1)
+
+
+    def try_end(self, result):
+        '''Tries to end the split - ignores if the split was deleted'''
+
+        try:
+            self.refresh_from_db()
+        except JobSplit.DoesNotExist:
+            logger.warn("Job split(pk=%d) does not exist. Likely cancelled, " \
+                "so ignoring result `%s'", self.pk, result)
+        self.end(result)
+
+
+    def process(self, execute, cpulimit=None, cache=settings.CACHE_ROOT):
+        '''Process assigned job splits using beat.core
+
+        This task executes the user algorithm on a subprocess. It also serves
+        the data to the user process so it works like an I/O daemon.
+
+        If ``required_slots == 1``, then this job takes care of the whole data
+        set.  Otherwise, it takes care of a subset of the input data that is
+        synchronised with this block, determined by ``split_index``.
+
+        Two processes are spawned from the current work process:
+
+        * The process for executing the user code
+        * A process to limit the CPU usage (with ``cpulimit``), if these
+          conditions are respected:
+
+          1. The program ``cpulimit`` is available on the current machine
+          2. The configuration requests a CPU usage greater than 0 (``nb_cores
+             > 0``). (N.B.: a value of zero means not to limit on CPU).
+
+
+        Parameters:
+
+          execute (str): The path to the ``execute`` program to use for running
+            the user code associated with this job split.
+
+          cpulimit (str, Optional): The path to the ``cpulimit`` program to use
+            for limiting the user code in CPU usage. If not set, then don't use
+            it, even if the select user queue has limits.
+
+          cache (str, Optional): The path leading to the root of the cache to
+            use for this run. If not set, use the global default at
+            ``settings.CACHE_ROOT``.
+
+        '''
+
+        config = simplejson.loads(self.job.block.command)
+
+        # setup range if necessary
+        if self.job.block.required_slots > 1:
+
+            if (self.start_index) is None or (self.end_index is None):
+                message = "The split %d/%d (pid=%d) running on worker `%s' " \
+                    "for block `%s' of experiment `%s' could not " \
+                    "be completed: indexes are missing!" % \
+                    (self.split_index+1, self.job.block.required_slots,
+                      os.getpid(), self.worker, self.job.block.name,
+                      self.job.block.experiment.fullname())
+                logger.warn(message)
+                self.try_end(Result(status=1, syserr=message,
+                    usrerr=settings.DEFAULT_USER_ERROR))
+
+            config['range'] = [self.start_index, self.end_index]
+
+        # For reference, this bit of code should match (or be very similar) to
+        # the one at beat.cmdline.experiments:run_experiment()
+
+        try:
+
+            executor = beat.core.execution.Executor(settings.PREFIX, config,
+                cache)
+
+            if not executor.valid:
+                err = ''
+                for e in executor.errors: err += '  * %s\n' % e
+                message = "Failed to load execution information for split " \
+                    "%d/%d running at worker `%s', for block `%s' of " \
+                    "experiment `%s': %s" % (self.split_index+1,
+                            self.job.block.required_slots,
+                            self.worker, self.job.block.name,
+                            self.job.block.experiment.fullname(), err)
+                raise RuntimeError(message)
+
+            queue = self.job.block.queue
+            nb_cores = queue.cores_per_slot
+            if (nb_cores > 0) and (cpulimit is None):
+                logger.warn("Job requires limiting CPU usage to %g (cores), " \
+                    "but you have not set the path to the program " \
+                    "`cpulimit'. Continuing without CPU limiting...", nb_cores)
+                nb_cores = 0
+
+            logger.info("Running `%s' on worker request",
+                executor.algorithm.name)
+
+            # n.b.: with executor may crash on the database view setup
+            with executor:
+                self.start()
+                result = executor.process(
+                    execute_path=execute,
+                    virtual_memory_in_megabytes=queue.memory_limit,
+                    max_cpu_percent=int(100*float(nb_cores)), #allows for 150%
+                    cpulimit_path=cpulimit,
+                    timeout_in_minutes=queue.time_limit,
+                    daemon=0,
+                    )
+
+            self.try_end(Result(
+                status=result['status'],
+                stdout=result['stdout'],
+                stderr=result['stderr'],
+                usrerr=result['user_error'],
+                syserr=result['system_error'],
+                _stats=simplejson.dumps(result['statistics'], indent=2),
+                ))
+            logger.info("Split `%s' (pid=%d) ended gracefully", self,
+                os.getpid())
+
+        except IOError:
+            logger.warn("Split `%s' (pid=%d) execution raised an IOError: %s",
+                self, os.getpid(), traceback.format_exc())
+            self.signal_io_error()
+            if self.cache_errors > settings.MAXIMUM_IO_ERRORS:
+                self.try_end(Result(status=1,
+                  usrerr=settings.DEFAULT_USER_ERROR,
+                  syserr=traceback.format_exc(),))
+            else:
+                logger.info("Split `%s' will be retried (%d/%d)",
+                    self, self.cache_errors, settings.MAXIMUM_IO_ERRORS)
+
+        except Exception:
+            logger.warn("Split `%s' (pid=%d) ended with an error: %s",
+                self, os.getpid(), traceback.format_exc())
+            self.try_end(Result(status=1, usrerr=settings.DEFAULT_USER_ERROR,
+              syserr=traceback.format_exc(),))
diff --git a/beat/web/backend/schedule.py b/beat/web/backend/schedule.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8fc2fa71b2d23e9add96c5a9b72ac95e5d1b345
--- /dev/null
+++ b/beat/web/backend/schedule.py
@@ -0,0 +1,239 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+'''Scheduling functions and utilities'''
+
+import logging
+logger = logging.getLogger(__name__)
+
+from django.db import transaction
+
+from .models import Job, JobSplit, Queue, Worker
+
+
+def _select_splits_for_queue(queue):
+    '''Returns a list of job splits that can run now, at a certain queue
+
+    Here is the work done:
+
+    1. Find the queue availability. This is a bit tricky as queues are only
+       allowed to consume a limited (configurable) number of slots in each
+       worker, per user
+
+    2. Calculate runnable job splits
+
+    3. TODO: Calculates the list of job splits that can potentially run now
+       (for which there is space in the current queue being analyzed), taking
+       into consideration the relative use for every user and giving more
+       priority to user with less load
+
+    4. Return such a list clipping it so that the number of job splits returned
+       does not exceed the queue availability
+
+    The work is done inside the "queue" domain: it does not take into
+    consideration other job splits that may also have slots shared on one of
+    the machines this queue also has slots on.
+
+    '''
+
+    splits = JobSplit.objects.filter(job__block__queue=queue,
+        worker__isnull=True, job__split_errors=0,
+        status=Job.QUEUED).order_by('job__runnable_date')
+
+    # lists of all users currently running
+    users = set(splits.values_list('job__block__experiment__author', flat=True))
+
+    # number of splits (== slots) running on this queue for each user
+    user_slots = [JobSplit.objects.filter(job__block__experiment__author=k,
+      job__block__queue=queue, status=Job.PROCESSING).count() for k in users]
+
+    allowance = [queue.max_slots_per_user - k for k in user_slots]
+    allowance = dict(zip(users, allowance))
+
+    # limit runnable splits so we reach a maximum of allowed user slots
+    splits_to_consider = []
+    for s in splits:
+        author = s.job.block.experiment.author.id
+        if allowance[author] > 0:
+            splits_to_consider.append(s)
+            allowance[author] -= 1
+
+    # TODO: Sort splits taking into consideration current user load
+
+    # now we have a list of job splits, within each users' allowance, we could
+    # submit, clip it by using the total number of available slots on the queue
+    # the list is ordered having oldest job splits first
+    return splits_to_consider[:queue.availability()]
+
+
+def schedule():
+    '''Schedules job splits that can run now, respecting user/queue usage
+
+    The priorities are:
+
+    1. job splits that require more cores to run (more resource intense)
+    2. job splits that are older (runnable since a long time)
+
+    Reasoning: Jobs which require more cores are prioritary over job splits
+    that don't require that many resources. The reason for this is practical:
+    nodes associated to queues with more cores maybe used by queues that
+    requires less slots in case no job splits requiring more cores are present,
+    which will happen most of the time if there are more slots available with
+    less resources. This will not work if either priorities are not established
+    for the use of each node or you only have 1 machine hosting all queues. In
+    these cases, you may face a situation in which job splits requesting more
+    cores block the execution of job splits requesting less cores because of
+    the lack of farm avalability.
+
+    For example, a good setup could be like this (note priorities in this case
+    are not useful, so they are all set to 0):
+
+    Queue A (1 core/4Gb RAM/60 minutes/1 job per user):
+      - Host 1 (1 core/4Gb): 1 slot (priority 0)
+      - Host 2 (2 cores/8Gb): 2 slots (priority 0)
+
+    Queue B (2 cores/8Gb RAM/120 minutes/1 job per user):
+      - Host 2 (2 cores/8Gb): 1 slot (priority 0)
+
+    In this way: job splits for Queue B have priority of job splits for Queue
+    A.
+
+    If no job splits are scheduled requiring 2 cores, then we have 3 slots
+    available to run simpler job splits. In case a job with 2 cores pops-up,
+    then we must make sure 1-core job splits running at ``Host 2`` are freed so
+    to run that job. To do that, we must first check on more resource intensive
+    job splits and then order those by age, processing one after the other.
+
+    You may further setup a queue for "special" users that can run more job
+    splits in parallel, like this:
+
+    Queue A (1 core/4Gb RAM/60 minutes/1 job per user):
+      - Host 1 (1 core/4Gb): 1 slot (priority 0)
+      - Host 2 (2 cores/8Gb): 2 slots (priority 0)
+
+    Queue A* (1 core/4Gb RAM/60 minutes/1 job per user):
+      - Host 1 (1 core/4Gb): 1 slot (priority 0)
+      - Host 2 (2 cores/8Gb): 2 slots (priority 1)
+
+    Queue B (2 cores/8Gb RAM/120 minutes/1 job per user):
+      - Host 2 (2 cores/8Gb): 1 slot (priority 2)
+
+    Queue A* may be setup so that some "special" users can run more job splits
+    than others, having the same configuration as Queue A, except for the
+    priority on Host 2, which is higher. Users that have access to Queue A or
+    Queue A* can use Host 1 on a "first come first served" basis (as they have
+    the same priority). Host 2 is primarily used for 2-core job splits. If job
+    splits requiring a single core appears to run on Host 2 (from either Queue
+    A or A*), then job splits from Queue A* have priority.
+
+    Here is a summary of the work done:
+
+    1. Use :py:func:`_select_splits_for_queue` to figure out which splits can
+       actually run on a queue basis, respecting user limits
+
+    2. For those splits, prioritize execution by: number of cores required per
+       job split and then, by age.
+
+    3. Remove splits, based on the order, which have the same output
+
+    4. Attribute more resource intensive job splits to the slots that are free.
+       If no slots are free, then virtually block free slots on hosts that can
+       process job splits requiring more resources. This will guarantee the
+       remaining cores will be freed in the future and this resource-intensive
+       job splits will be able to run at a certain point.
+
+    5. For each job split on the list, check the queue virtual availability
+       (taking into consideration blocking as executed on step 3) and leave the
+       job split on the list if there is a free slot, otherwise remove it.
+
+
+    Returns:
+
+      list: The list of splits assigned at this scheduling iteration
+
+    '''
+
+    # updates jobs with split errors, cancel experiments if problems occur
+    for j in Job.objects.filter(split_errors__gt=0):
+        j._split_indices()
+
+    # get queues in a good order
+    sorted_queues = Queue.objects.order_by('-cores_per_slot',
+        'max_slots_per_user')
+
+    splits_to_consider = \
+        [_select_splits_for_queue(q) for q in sorted_queues]
+
+    if not any(splits_to_consider): return []
+
+    logger.debug('Considering splits: %s', splits_to_consider)
+
+    # decides which split to run considering the 'on-the-fly' availability
+
+    # workers that can run job splits
+    whitelist = {}
+    for worker in Worker.objects.filter(active=True):
+        availability = worker.available_cores()
+        if availability <= 0: continue
+        whitelist[worker] = availability
+
+    assigned_splits = []
+
+    logger.debug('Worker availability: %s', whitelist)
+
+    for batch in splits_to_consider:
+
+        if not batch: continue #empty list
+        candidates = batch[0].job.block.queue.worker_availability()
+
+        for split in batch:
+
+            assigned = False
+            required_cores = split.job.block.queue.cores_per_slot
+
+            for c in candidates:
+                avail = whitelist.get(c, 0)
+                if not avail: continue #should not use this worker
+                if avail >= required_cores:
+                    logger.debug("Assigning `%s' to worker `%s'", split, c)
+                    split.schedule(c) #assign job split to worker
+                    assigned_splits.append(split)
+                    whitelist[c] -= required_cores
+                    logger.debug("`%s' cores available: %d", c, whitelist[c])
+                    assigned = True
+                    break
+
+            if not assigned and required_cores > 1:
+                #blacklist the most promising worker, otherwise will never be
+                #picked
+                c0 = candidates[0]
+                if c0 in whitelist:
+                    logger.info("Could not assign `%s' so blacklisting `%s'",
+                        split, c0)
+                    del whitelist[c0]
+
+    return assigned_splits
diff --git a/beat/web/backend/state.py b/beat/web/backend/state.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b37ea6843cb8524abaa7ddb09fa7e82cd126cfa
--- /dev/null
+++ b/beat/web/backend/state.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+'''Utilities for summarizing scheduler state'''
+
+
+import os
+import logging
+logger = logging.getLogger(__name__)
+
+import psutil
+
+from django.conf import settings
+
+from .models import Job, JobSplit
+
+from ..experiments.models import Experiment
+
+
+def cache():
+    '''Returns a current cache state'''
+
+    if not os.path.exists(settings.CACHE_ROOT):
+        os.makedirs(settings.CACHE_ROOT)
+        mesg = "Created cache directory (%s)" % settings.CACHE_ROOT
+        logger.info(mesg)
+
+    df = psutil.disk_usage(settings.CACHE_ROOT)
+    MB = 1024 * 1024
+
+    return {
+        'size-in-megabytes': df.used / MB,
+        'capacity-in-megabytes': df.total / MB,
+        'free': df.free / MB,
+        'percent-used': df.percent,
+        }
+
+
+def jobs():
+
+    return dict(
+        total=JobSplit.objects.count(),
+        running=JobSplit.objects.filter(status=Job.PROCESSING).count(),
+        queued=JobSplit.objects.filter(status=Job.QUEUED).count(),
+        cancelled=JobSplit.objects.filter(status=Job.CANCELLED).count(),
+        skipped=JobSplit.objects.filter(status=Job.SKIPPED).count(),
+        completed=JobSplit.objects.filter(status=Job.COMPLETED).count(),
+        failed=JobSplit.objects.filter(status=Job.FAILED).count(),
+        )
+
+
+def experiments():
+
+    return dict(
+        running=Experiment.objects.filter(status=Experiment.RUNNING).count(),
+        scheduled=Experiment.objects.filter(status=Experiment.SCHEDULED).count(),
+        ls=Experiment.objects.filter(status__in=(Experiment.RUNNING,
+          Experiment.SCHEDULED)).order_by('author__username',
+            'toolchain__author__username', 'toolchain__name',
+            'toolchain__version', 'name'),
+        )
diff --git a/beat/web/backend/templates/backend/environment.html b/beat/web/backend/templates/backend/environment.html
index 53f21713226d13fc33aa777a3893223fbd1723fc..a8a1079f41274292ea3eb07c9d0e894519f599c5 100644
--- a/beat/web/backend/templates/backend/environment.html
+++ b/beat/web/backend/templates/backend/environment.html
@@ -2,21 +2,21 @@
 {% comment %}
  * Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/
  * Contact: beat.support@idiap.ch
- * 
+ *
  * This file is part of the beat.web module of the BEAT platform.
- * 
+ *
  * Commercial License Usage
  * Licensees holding valid commercial BEAT licenses may use this file in
  * accordance with the terms contained in a written agreement between you
  * and Idiap. For further information contact tto@idiap.ch
- * 
+ *
  * Alternatively, this file may be used under the terms of the GNU Affero
  * Public License version 3 as published by the Free Software and appearing
  * in the file LICENSE.AGPL included in the packaging of this file.
  * The BEAT platform is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  * or FITNESS FOR A PARTICULAR PURPOSE.
- * 
+ *
  * You should have received a copy of the GNU Affero Public License along
  * with the BEAT platform. If not, see http://www.gnu.org/licenses/.
 {% endcomment %}
@@ -75,7 +75,7 @@
 
     {# Navigation Tabs #}
     <ul id="object-tabs" class="nav nav-tabs" role="tablist">
-      <li role="presentation active"><a {% if not environment.description %}title="No documentation available" {% endif %}href="#doc" role="tab" data-toggle="tab" aria-controls="doc">Documentation{% if not environment.description %} <i class="fa fa-warning"></i>{% endif %}</a></li>
+      <li role="presentation active" class="active"><a {% if not environment.description %}title="No documentation available" {% endif %}href="#doc" role="tab" data-toggle="tab" aria-controls="doc">Documentation{% if not environment.description %} <i class="fa fa-warning"></i>{% endif %}</a></li>
       <li role="presentation"><a href="#queues" role="tab" data-toggle="tab" aria-controls="queues">Queues <span class="badge">{{ queues|length }}</span></a></li>
     </ul>
 
diff --git a/beat/web/backend/templates/backend/scheduler.html b/beat/web/backend/templates/backend/scheduler.html
index 2e1b407ff322333930325e0772273db4d3bbd3e1..1dbbb34ff02b38d170771efefa5e3cf84e52a940 100644
--- a/beat/web/backend/templates/backend/scheduler.html
+++ b/beat/web/backend/templates/backend/scheduler.html
@@ -2,27 +2,28 @@
 {% comment %}
  * Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/
  * Contact: beat.support@idiap.ch
- * 
+ *
  * This file is part of the beat.web module of the BEAT platform.
- * 
+ *
  * Commercial License Usage
  * Licensees holding valid commercial BEAT licenses may use this file in
  * accordance with the terms contained in a written agreement between you
  * and Idiap. For further information contact tto@idiap.ch
- * 
+ *
  * Alternatively, this file may be used under the terms of the GNU Affero
  * Public License version 3 as published by the Free Software and appearing
  * in the file LICENSE.AGPL included in the packaging of this file.
  * The BEAT platform is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  * or FITNESS FOR A PARTICULAR PURPOSE.
- * 
+ *
  * You should have received a copy of the GNU Affero Public License along
  * with the BEAT platform. If not, see http://www.gnu.org/licenses/.
 {% endcomment %}
 
 {% load fingerprint %}
-{% load ui_tags %}
+{% load backend_tags %}
+{% load humanize %}
 
 {% block title %}{{ block.super }} - Scheduler{% endblock %}
 
@@ -37,25 +38,67 @@
 <div class="row">
   <div class="col-sm-12">
     <div class="alert alert-success" role="alert" style="text-align: center;">
-      <i class="fa fa-check fa-2x vertical-center"></i> Scheduler Version: {{ data.scheduler.beat_version }} (updated: {% now "H:i O, jS F Y" %})
+      <i class="fa fa-clock-o fa-2x vertical-center"></i> Updated: {% now "H:i:s O, jS F Y" %}
       <div class="pull-right action-buttons">
-        <a id="clean-cache-button" class="btn btn-default" data-toggle="tooltip" data-placement="bottom" title="Clean-up the cache"><i class="fa fa-trash"></i> Wipe cache</a>
-        <a id="reconfig-button" class="btn btn-default btn-share" data-toggle="tooltip" data-placement="bottom" title="Re-configure the scheduler"><i class="fa fa-refresh"></i> Re-configure</a>
-        <a id="cancel-experiments-button" class="btn btn-default btn-delete" data-toggle="tooltip" data-placement="bottom" title="Cancel all running experiments"><i class="fa fa-times"></i> Cancel Experiments</a>
+        <a id="update-workers-button" class="btn btn-default btn-info" data-toggle="tooltip" data-placement="bottom" title="Request all workers to update their state when possible" href="{% url 'backend:update-workers' %}"><i class="fa fa-gears"></i> Update Workers</a>
+        <a id="cancel-experiments-button" class="btn btn-default btn-delete" data-toggle="tooltip" data-placement="bottom" title="Cancel all running experiments" href="{% url 'backend:cancel-experiments' %}"><i class="fa fa-times"></i> Cancel Experiments</a>
       </div>
     </div>
   </div>
 </div>
 
+{% if helper_panel %}
+<div class="row">
+  <div class="col-sm-12">
+    <div class="panel panel-warning">
+      <div class="panel-heading">
+        <h3 class="panel-title">Helper Panel</h3>
+      </div>
+      <div class="panel-body">
+        <p class="help">Use this panel to <strong>locally</strong> launch scheduling activity. This functionality is intended as a <em>test</em> scheduler and worker replacement that can be used to run local experiments or debug. <strong>Don't use this in a production system.</strong> Every time you launch an activity, the page will reload to trigger this action. Scheduling happens in the context of the Django server running on the background. Worker processes are managed using subprocesses and don't block the web server.</p>
+
+        <div class="form-inline">
+          <div id="activity-group" class="form-group">
+            <label class="sr-only" for="activity">Activity</label>
+            <select id="activity" class="form-control">
+              <option value="both">Schedule &amp; Work</option>
+              <option value="schedule">Schedule</option>
+              <option value="work">Work</option>
+            </select>
+          </div>
+          <div id="periodically-group" class="form-group">
+            <div class="checkbox">
+              <label>
+                <input id="periodically" type="checkbox" checked="checked"> periodically
+              </label>
+            </div>
+          </div>
+          <div id="period-group" class="form-group">
+            <label class="sr-only" for="period">Period</label>
+            <div class="input-group">
+              <div class="input-group-addon">every</div>
+              <input type="text" class="form-control" id="period" value="{{ scheduling_period }}">
+              <div class="input-group-addon">s</div>
+            </div>
+          </div>
+          <button id="start" type="submit" class="btn btn-success"><i class="fa fa-play"></i> <span id="start">Start</span></button>
+          <button id="stop" type="submit" class="btn btn-danger disabled"><i class="fa fa-stop"></i> <span id="stop">Stop</span></button>
+        </div>
+      </div>
+    </div>
+  </div>
+</div>
+{% endif %}
+
 <div class="row">
   <div class="col-sm-12">
 
 
     <ul id="object-tabs" class="nav nav-tabs nav-justified" role="tablist">
-      <li role="presentation" class="active"><a id="status-tab" href="#status" role="tab" data-toggle="tab" aria-controls="status">Status <span class="badge">{{ data.scheduler.jobs.running }}</a></li>
-      <li role="presentation"><a href="#experiments" role="tab" data-toggle="tab" aria-controls="experiments">Experiments <span class="badge">{{ data.scheduler.experiments.list|length }}</span></a></li>
-      <li role="presentation"><a href="#workers" role="tab" data-toggle="tab" aria-controls="workers">Workers <span class="badge">{{ data.workers|length }}</span></a></li>
-      <li role="presentation"><a href="#queues" role="tab" data-toggle="tab" aria-controls="queues">Queues <span class="badge">{{ data.scheduler.queues|length }}</span></a></li>
+      <li role="presentation" class="active"><a id="status-tab" href="#status" role="tab" data-toggle="tab" aria-controls="status">Status <span class="badge">{{ jobs.running }}</a></li>
+      <li role="presentation"><a href="#experiments" role="tab" data-toggle="tab" aria-controls="experiments">Experiments <span class="badge">{{ experiments.ls|length }}</span></a></li>
+      <li role="presentation"><a href="#workers" role="tab" data-toggle="tab" aria-controls="workers">Workers <span class="badge">{{ workers|length }}</span></a></li>
+      <li role="presentation"><a href="#queues" role="tab" data-toggle="tab" aria-controls="queues">Queues <span class="badge">{{ queues|length }}</span></a></li>
     </ul>
 
     <!-- Tab contents -->
@@ -67,19 +110,19 @@
         <div class="col-sm-4">
           <h3>Experiments</h3>
           <ul class="list-group">
-            <li class="list-group-item">Running <span class="badge">{{ data.scheduler.experiments.running }}</span></li>
-            <li class="list-group-item">Completed <span class="badge">{{ data.scheduler.experiments.completed }}</span></li>
+            <li class="list-group-item">Running <span class="badge">{{ experiments.running }}</span></li>
+            <li class="list-group-item">Scheduled <span class="badge">{{ experiments.scheduled }}</span></li>
           </ul>
         </div>
 
         <div class="col-sm-4">
           <h3>Jobs</h3>
           <ul class="list-group">
-            <li class="list-group-item">Queued <span class="badge">{{ data.scheduler.jobs.queued }}</span></li>
-            <li class="list-group-item">Running <span class="badge">{{ data.scheduler.jobs.running }}</span></li>
-            <li class="list-group-item">Completed <span class="badge">{{ data.scheduler.jobs.completed }}</span></li>
-            <li class="list-group-item">Failed <span class="badge">{{ data.scheduler.jobs.failed }}</span></li>
-            <li class="list-group-item">Cancelled <span class="badge">{{ data.scheduler.jobs.cancelled }}</span></li>
+            <li class="list-group-item">Queued <span class="badge">{{ jobs.queued }}</span></li>
+            <li class="list-group-item">Running <span class="badge">{{ jobs.running }}</span></li>
+            <li class="list-group-item">Completed <span class="badge">{{ jobs.completed }}</span></li>
+            <li class="list-group-item">Failed <span class="badge">{{ jobs.failed }}</span></li>
+            <li class="list-group-item">Cancelled <span class="badge">{{ jobs.cancelled }}</span></li>
           </ul>
         </div>
 
@@ -89,19 +132,19 @@
           <canvas id="cache-chart" style="width: 80%; height: auto;"></canvas>
           <div id="cache-legend" class="chart-legend"></div>
         </div>
-
       </div>
 
       <!-- Experiments tab -->
       <div role="tabpanel" class="tab-pane" id="experiments">
-        {% if data.scheduler.experiments.list %}
+        {% if experiments.ls %}
         <div class="scrollable table-responsive">
           <table class="table table-hover table-condensed">
             <thead>
               <tr>
                 <th>Name</th>
-                <th>Blocks</th>
-                <th>Jobs</th>
+                <th>Blocks/Jobs</th>
+                <th>Job Splits</th>
+                <th>Assigned</th>
                 <th>Running</th>
                 <th>Completed</th>
                 <th>Failed</th>
@@ -110,20 +153,28 @@
               </tr>
             </thead>
             <tbody>
-              {% for name,obj in data.scheduler.experiments.list.items %}
+              {% for obj in experiments.ls %}
               <tr>
-                {% with name|split_fullname as xp %}
-                <td><a href="{% url 'experiments:view' xp.0 xp.1 xp.2 xp.3 xp.4 %}">{{ name }}</a></td>
-                {% endwith %}
-                <td>{{ obj.blocks }}</td>
-                <td>{{ obj.jobs }}</td>
-                <td>{{ obj|getkey:"jobs-running" }}</td>
-                <td>{{ obj|getkey:"jobs-completed" }}</td>
-                <td>{{ obj|getkey:"jobs-failed" }}</td>
-                <td>{{ obj|getkey:"jobs-cancelled" }}</td>
-                <td>{{ obj|getkey:"jobs-skipped" }}</td>
+                <td><a href="{{ obj.get_admin_change_url }}">{{ obj.fullname }}</a></td>
+                <td>{{ obj.blocks.count }}</td>
+                <td>{{ obj|count_job_splits }}</td>
+                <td>{{ obj|count_job_splits:"A" }}</td>
+                <td>{{ obj|count_job_splits:"P" }}</td>
+                <td>{{ obj|count_job_splits:"C" }}</td>
+                <td>{{ obj|count_job_splits:"F" }}</td>
+                <td>{{ obj|count_job_splits:"L" }}</td>
+                <td>{{ obj|count_job_splits:"S" }}</td>
+              </tr>
+              {% for split in obj.job_splits %}
+              <tr class="job-split">
+                <td class="job-split-empty"></td>
+                <td>{{ split.job.block.name }} (<strong>{{ split.get_status_display }}</strong>)</td>
+                <td>{{ split.split_index|add:"1" }}/{{ split.job.block.required_slots }}</td>
+                <td colspan="3">{% if split.worker.name %} at {{ split.worker.name }} (consuming {{ split.job.block.queue.cores_per_slot }} core{{ split.job.block.queue.cores_per_slot|pluralize }}){% endif %}</td>
+                <td colspan="3">{% if split.start_date %} <i class="fa fa-clock-o"></i> {{ split.start_date|naturaltime }}{% endif %}</td>
               </tr>
               {% endfor %}
+              {% endfor %}
             </tbody>
           </table>
         </div>
@@ -133,36 +184,24 @@
       <!-- Workers tab -->
       <div role="tabpanel" class="tab-pane" id="workers">
 
-        {% if data.workers %}
+        {% if workers %}
         <div class="scrollable table-responsive">
           <table class="table table-hover object-list">
             <thead>
               <tr>
-                <th class="status"></th>
                 <th class="status"></th>
                 <th>Name</th>
-                <th>Available/Total Cores</th>
-                <th>Used Memory (in Gb)</th>
-                <th>Worker Version</th>
-                <th>Environments</th>
+                <th>Available/Total Cores (Load)</th>
+                <th>Memory (in Gb) (Load)</th>
+                <th>Last Updated</th>
+                <th>Info</th>
               </tr>
             </thead>
             <tbody>
-              {% for name, obj in data.workers.items %}
+              {% for obj in workers %}
               <tr{% if obj.available_cores != obj.cores %} class="{% if obj.available_cores == 0 %}danger{% else %}warning{% endif %}"{% endif %}>
                 <td class="status">
-                  <a title="{{ obj.db_status }}: {{ obj.info }}" data-toggle="tooltip" data-placement="top">
-                    {% if obj.db_status == 'Active' %}
-                    <i class="fa fa-check fa-2x" style="color:green"></i>
-                    {% elif obj.db_status == 'Inactive' %}
-                    <i class="fa fa-power-off fa-2x" style="color:gray"></i>
-                    {% elif obj.db_status == 'Unknown' %}
-                    <i class="fa fa-bug fa-2x" style="color:red"></i>
-                    {% endif %}
-                  </a>
-                </td>
-                <td class="status">
-                  <a title="{% if obj.active %}On{% else %}Off{% endif %}" data-toggle="tooltip" data-placement="top">
+                  <a title="{% if obj.active %}Active{% else %}Off{% endif %}" data-toggle="tooltip" data-placement="top">
                     {% if obj.active %}
                     <i class="fa fa-check fa-2x" style="color:green"></i>
                     {% else %}
@@ -170,11 +209,11 @@
                     {% endif %}
                   </a>
                 </td>
-                <td>{% if obj.id %}<a title="Click to admin" data-toggle="tooltip" data-placement="top" href="{% url 'admin:backend_worker_change' obj.id %}">{{ name }}</a>{% else %}{{ name }}{% endif %}</td>
-                <td>{{ obj.available_cores }} / {{ obj.cores }}</td>
-                <td>{{ obj.virtual_memory.percent }}% ({% widthratio obj.memory_in_megabytes 1024 1 %})</td>
-                <td>{{ obj|getkey:'beat_version' }}</td>
-                <td>{{ obj|getkey:'environments'|length }}</td>
+                <td><a title="Click to admin" data-toggle="tooltip" data-placement="top" href="{{ obj.get_admin_change_url }}">{{ obj.name }}</a></td>
+                <td>{{ obj.available_cores }} / {{ obj.cores }} ({{ obj.used_cores }}%)</td>
+                <td>{% widthratio obj.memory 1024 1 %} ({{ obj.used_memory }}%)</td>
+                <td>{{ obj.updated|date:"H:i:s O, jS F Y" }}</td>
+                <td>{{ obj.info }}</td>
                 {% endfor %}
             </tbody>
           </table>
@@ -186,44 +225,32 @@
       <!-- Queues tab -->
       <div role="tabpanel" class="tab-pane" id="queues">
 
-        {% if data.scheduler.queues %}
+        {% if queues %}
         <div class="scrollable table-responsive">
           <table class="table table-hover object-list">
             <thead>
               <tr>
-                <th class="status"></th>
                 <th>Name</th>
                 <th>Memory (in Mb)</th>
                 <th>Total Slots</th>
                 <th>Cores/Slot</th>
                 <th>Max Slots/User</th>
+                <th>Availability</th>
                 <th>Time Limit (minutes)</th>
                 <th>Environments</th>
               </tr>
             </thead>
             <tbody>
-              {% for name, obj in data.scheduler.queues.items %}
+              {% for obj in queues %}
               <tr>
-                <td class="status">
-                  <a title="{{ obj.info }}" data-toggle="tooltip" data-placement="top">
-                    {% if obj.db_status == 'Active' %}
-                    <i class="fa fa-check fa-2x" style="color:green"></i>
-                    {% elif obj.db_status == 'Inactive' %}
-                    <i class="fa fa-power-off fa-2x" style="color:gray"></i>
-                    {% elif obj.db_status == 'Mismatch' %}
-                    <i class="fa fa-bug fa-2x" style="color:red"></i>
-                    {% elif obj.db_status == 'Missing' %}
-                    <i class="fa fa-question fa-2x" style="color:orange"></i>
-                    {% endif %}
-                  </a>
-                </td>
-                <td>{% if obj.id %}<a title="Click to admin" data-toggle="tooltip" data-placement="top" href="{% url 'admin:backend_queue_change' obj.id %}">{{ name }}</a>{% else %}{{ name }}{% endif %}</td>
-                <td>{{ obj|getkey:'memory-in-megabytes' }}</td>
-                <td>{{ obj|getkey:'total-slots' }}</td>
-                <td>{{ obj|getkey:'nb-cores-per-slot' }}</td>
-                <td>{{ obj|getkey:'max-slots-per-user' }}</td>
-                <td>{{ obj|getkey:'time-limit-in-minutes' }}</td>
-                <td>{{ obj|getkey:'environments'|length }}</td>
+                <td><a title="Click to admin" data-toggle="tooltip" data-placement="top" href="{{ obj.get_admin_change_url }}">{{ obj.name }}</a></td>
+                <td>{{ obj.memory_limit }}</td>
+                <td>{{ obj.number_of_slots }}</td>
+                <td>{{ obj.cores_per_slot }}</td>
+                <td>{{ obj.max_slots_per_user }}</td>
+                <td>{{ obj.availability }}</td>
+                <td>{{ obj.time_limit }}</td>
+                <td>{{ obj.environments.count }}</td>
                 {% endfor %}
             </tbody>
           </table>
@@ -241,13 +268,10 @@
 <script type="text/javascript">
 $(document).ready(function() {
 
-  $.ajaxSetup({
-    beforeSend: function(xhr, settings) {
-      var csrftoken = $.cookie('csrftoken');
-      xhr.setRequestHeader('X-CSRFToken', csrftoken);
-    }
-  });
-
+  /**
+   * This bit of code here is to manage the normal parts of the scheduler
+   * page, like the tags and the cache chart which is displayed.
+   */
   manage_tabs('ul#object-tabs');
 
   var data = {{ cache_chart_data|safe }};
@@ -271,95 +295,98 @@ $(document).ready(function() {
     display_cache_chart();
   });
 
-  function reload_on_success(data) {
-    var message = 'Command successfully sent to scheduler.';
-    if (Array.isArray(data)) { //cache feedback
-      if (data.length == 0) message += ' No cache files erased.';
-      else if (data.length == 1) message += ' One cache file erased.';
-      else message += ' ' + data.length + ' cache files erased.';
-    }
-    BootstrapDialog.show({
-      title: '<i class="fa fa-info-circle"></i> Information',
-      message: message,
-      buttons: [{
-        label: 'Close',
-        cssClass: 'btn-primary',
-        action: function(dialog) {dialog.close();}
-      }],
-      onhide: function() {
-        location.reload();
-      },
-    });
+  {% if helper_panel %}
+  /**
+   * This bit of code here is to manage helper panel, only included if that
+   * shows up.
+   */
+
+  function display_periodic(period) {
+    $("input#periodically").prop("checked", true);
+    $("#period-group").show();
+    $("button#start > span#start").text("Start");
+    $("button#stop > span#stop").text("Stop");
+    $("button#stop").show();
+    $("input#period").val(period);
+    $("button#stop").disable();
   }
 
-  //rig buttons so to perform actions
-  $('a#reconfig-button').click(function() {
-    $this = $(this);
-    $this.disable();
-    $this.find('i').addClass('fa-spin');
-
-    var d = $.ajax({
-      type: 'POST',
-      url: '{% url "api_backend:backend-api-scheduler-configuration" %}',
-    });
-
-    d.done(reload_on_success);
+  function display_single_shot() {
+    $("input#periodically").prop("checked", false);
+    $("#period-group").hide();
+    $("button#start > span#start").text("Go");
+    $("button#stop > span#stop").text("Reset");
+    $("button#start").enable();
+    $("button#stop").enable();
+  }
 
-    d.fail(function(data, status_text) {
-      process_error(data, status_text);
-      $this.find('i').removeClass('fa-spin');
-      $this.enable();
-    });
+  /* controls button display */
+  $("input#periodically").click(function() {
+    if($(this).is(":checked")) {
+      display_periodic({{ scheduling_period }});
+    } else {
+      display_single_shot();
+    }
   });
 
-  $('a#cancel-experiments-button').click(function() {
-    $this = $(this);
-    $this.disable();
-    $this.find('i').addClass('fa-spin');
-
-    var d = $.ajax({
-      type: 'POST',
-      url: '{% url 'api_backend:backend-api-cancel-all-experiments' %}',
-    });
+  /* get url parameters */
+  function get_parameters() {
+    var vars = {};
+    var parts = location.search.replace(/[?&]+([^=&]+)=([^&]*)/gi,
+        function(m,key,value) {
+          vars[key] = value;
+        });
+    return vars;
+  }
 
-    d.done(reload_on_success);
+  /* controls the initial appearance */
+  var params = get_parameters();
+  if (location.search !== "") {
+    if ("period" in params) {
+      var period = parseInt(params.period, 10);
+      display_periodic(period);
+      $("button#start").disable();
+      $("button#stop").enable();
+      $("select#activity").disable();
+      $("input#period").disable();
+      $("input#periodically").disable();
+      setTimeout("location.reload(true);", period * 1000);
+    }
+    else {
+      display_single_shot();
+    }
+    if ("activity" in params) {
+      $("select#activity").val(params.activity);
+    }
+  } else {
+    display_periodic({{ scheduling_period }});
+    $("button#start").enable();
+    $("button#stop").disable();
+    $("select#activity").enable();
+    $("input#period").enable();
+    $("input#periodically").enable();
+  }
 
-    d.fail(function(data, status_text) {
-      process_error(data, status_text);
-      $this.find('i').removeClass('fa-spin');
-      $this.enable();
-    });
+  /* controls form submission buttons */
+  $("button#start").click(function() {
+    var params = '?activity=' + $("select#activity").val();
+    if ($("input#periodically").is(":checked")) {
+      params += '&period=' + $("input#period").val();
+    }
+    if (location.search === params) {
+      location.reload(true);
+    }
+    else {
+      location.search = params;
+    }
   });
 
-  $('a#clean-cache-button').click(function() {
-    $this = $(this);
-    $this.disable();
-    $this.find('i').addClass('fa-spin');
-
-    //TODO: missing date picker to choose time
-
-    data = {
-      'olderthan': 0, //in minutes (0 means from now backwards)
-      'nolist': false, //wheter we expect a list of hashes erased (negated)
-    };
-
-    var d = $.ajax({
-      type: "POST",
-      url: '{% url 'api_backend:backend-api-cache-cleanup' %}',
-      data: JSON.stringify(data),
-      contentType: "application/json; charset=utf-8",
-      dataType: "json",
-    });
-
-    d.done(reload_on_success);
-
-    d.fail(function(data, status_text) {
-      process_error(data, status_text);
-      $this.find('i').removeClass('fa-spin');
-      $this.enable();
-    });
+  $("button#stop").click(function() {
+    location.search = "";
   });
 
+  {% endif %}
+
 });
 </script>
 {% endblock %}
diff --git a/beat/web/backend/templatetags/backend_tags.py b/beat/web/backend/templatetags/backend_tags.py
index dc44d65edc1f1967a1c19e491db25a915b384f58..b3be2c7db7a7938274d55aafde560b40a57a7243 100644
--- a/beat/web/backend/templatetags/backend_tags.py
+++ b/beat/web/backend/templatetags/backend_tags.py
@@ -29,6 +29,8 @@
 from django import template
 from django.contrib.auth.models import User
 
+from ..models import Job
+
 
 register = template.Library()
 
@@ -81,3 +83,11 @@ def environment_actions(context, object, display_count):
 def visible_queues(context, object):
     '''Calculates the visible queues for an environment and requestor'''
     return object.queues_for(context['request'].user)
+
+
+@register.filter
+def count_job_splits(xp, status=None):
+    """Returns job splits for an experiment in a certain state"""
+    if status == 'A':
+        return xp.job_splits(status=Job.QUEUED).filter(worker__isnull=False).count()
+    return xp.job_splits(status=status).count()
diff --git a/beat/web/backend/tests.py b/beat/web/backend/tests.py
index be1c7f3d4b48fbb0a2c6e305e0010964353be7a5..977a2384276731419b4e2343b3fc4575142f1d0b 100644
--- a/beat/web/backend/tests.py
+++ b/beat/web/backend/tests.py
@@ -26,1081 +26,2735 @@
 ###############################################################################
 
 import os
-import simplejson as json
+import sys
+import time
 import shutil
-from datetime import datetime
+import tempfile
+import collections
 
 from django.conf import settings
-from django.contrib.auth.models import User
 from django.core.urlresolvers import reverse
+from django.core import management
+from django.contrib.auth.models import User, Group
+from django.test import TestCase, TransactionTestCase
 
+from guardian.shortcuts import get_perms
 
-# Override the Scheduler API
-from ..utils import scheduler
+from ..common.testutils import BaseTestCase as APITestCase, tearDownModule
+from ..experiments.models import Experiment, Block
+from ..algorithms.models import Algorithm
+from ..utils.management.commands import install
+from ..statistics.models import HourlyStatistics
+
+from .models import Queue, Worker, Slot, Environment, Job, JobSplit, Result
+from .utils import cleanup_cache, dump_backend, setup_backend
+from .management.commands import qsetup
+from .schedule import schedule
+
+
+def _sleep(tries, condition):
+  """For some reason, time.sleep is not reliable on this test unit. Use this"""
+
+  seconds = 1.0 #between tries
+  for i in range(tries):
+      if condition(): return
+      slept = 0
+      while slept < seconds:
+          start = time.time()
+          time.sleep(seconds - slept)
+          slept += time.time() - start
+
+
+# Example configuration with 3 queues with an increasing amount of resources
+# running on the same host
+QUEUES_WITHOUT_PRIORITY = {
+  "queues": collections.OrderedDict([
+    ("q1", {
+      "memory-limit": 4*1024,
+      "time-limit": 180, #3 hours
+      "cores-per-slot": 1,
+      "max-slots-per-user": 4,
+      "environments": ['environment (1)'],
+      "groups": [
+        "Default",
+        ],
+      "slots": {
+        "node1": {
+          "quantity": 4,
+          "priority": 0
+          }
+        }
+      }
+    ),
+    ("q2", {
+      "memory-limit": 8*1024,
+      "time-limit": 360, #6 hours
+      "cores-per-slot": 2,
+      "max-slots-per-user": 2,
+      "environments": ['environment (1)'],
+      "groups": [
+        "Default",
+        ],
+      "slots": {
+        "node1": {
+          "quantity": 2,
+          "priority": 0
+          },
+        }
+      }
+    ),
+    ("q4", {
+      "memory-limit": 16*1024,
+      "time-limit": 720, #12 hours
+      "cores-per-slot": 4,
+      "max-slots-per-user": 1,
+      "environments": ['environment (1)'],
+      "groups": [
+        "Default",
+        ],
+      "slots": {
+        "node1": {
+          "quantity": 1,
+          "priority": 0
+          },
+        }
+      }
+    )
+  ]),
+  "workers": {
+    "node1": {
+      "cores": 4,
+      "memory": 16*1024,
+      }
+    },
+  "environments": {
+    'environment (1)': {
+      "name": 'environment',
+      "version": '1',
+      "short_description": "Test",
+      "description": "Test environment",
+      },
+    },
+  }
 
-def mockPutMessage(url, params=None, data=None):
-    return (200, None)
+# Example configuration with 3 queues sharing slots on 2 hosts
+PRIORITY_QUEUES = {
+    "queues": collections.OrderedDict([
+      ("q1", {
+        "memory-limit": 4*1024,
+        "time-limit": 180, #3 hours
+        "cores-per-slot": 1,
+        "max-slots-per-user": 2,
+        "environments": ['environment (1)'],
+        "groups": [
+          "Default",
+          ],
+        "slots": {
+          "node1": {
+            "quantity": 4,
+            "priority": 5
+            },
+          "node2": {
+            "quantity": 4,
+            "priority": 0
+            },
+          }
+        },
+      ),
+      ("q2", {
+        "memory-limit": 8*1024,
+        "time-limit": 360, #6 hours
+        "cores-per-slot": 2,
+        "max-slots-per-user": 1,
+        "environments": ['environment (1)'],
+        "groups": [
+          "Default",
+          ],
+        "slots": {
+          "node1": {
+            "quantity": 2,
+            "priority": 0
+            },
+          "node2": {
+            "quantity": 2,
+            "priority": 10
+            }
+          }
+        },
+      ),
+      ("q1_special", {
+        "memory-limit": 4*1024,
+        "time-limit": 180, #3 hours
+        "cores-per-slot": 1,
+        "max-slots-per-user": 8,
+        "environments": ['environment (1)'],
+        "groups": [
+          "Default",
+          ],
+        "slots": {
+          "node1": {
+            "quantity": 4,
+            "priority": 0
+            },
+          "node2": {
+            "quantity": 4,
+            "priority": 5
+            }
+          }
+        }
+      ),
+    ]),
+    "workers": collections.OrderedDict([
+      ("node1", {
+        "cores": 4,
+        "memory": 32*1024,
+        }
+      ),
+      ("node2", {
+        "cores": 4,
+        "memory": 16*1024,
+        }
+      )
+    ]),
+    "environments": {
+        'environment (1)': {
+          "name": 'environment',
+          "version": '1',
+          "short_description": "Test",
+          "description": "Test environment",
+          },
+        },
+  }
 
-def mockPostMessage(url, params=None, data=None):
-    return (200, None)
 
-scheduler.putMessage  = mockPutMessage
-scheduler.postMessage = mockPostMessage
+class CancelAllExperimentsAPI(APITestCase):
 
+    def setUp(self):
+        self.url = reverse('backend:cancel-experiments')
 
-from ..experiments.models import Experiment
-from ..experiments.models import Block
-from ..dataformats.models import DataFormat
-from ..algorithms.models import Algorithm
-from ..toolchains.models import Toolchain
-from ..databases.models import Database
 
-from ..backend.models import Environment
-from ..backend.models import Queue
+    def test_no_access_for_anonymous_user(self):
+        response = self.client.get(self.url)
+        self.checkResponse(response, 302) #redirects to login page
 
-from ..common.testutils import BaseTestCase
 
-class BackendAPIBase(BaseTestCase):
+    def test_no_access_for_non_superuser(self):
+        User.objects.create_user('johndoe', 'johndoe@test.org', '1234')
+        self.client.login(username='johndoe', password='1234')
+        response = self.client.get(self.url)
+        self.checkResponse(response, 403)
+
+
+class CacheCleanUp(TestCase):
 
-    DECLARATION1 = {
-        "globals": {
-        },
-        "blocks": {
-            "addition1": {
-                "algorithm": "johndoe/sum/1",
-                "parameters": {
-                },
-                "inputs": {
-                    "a": "a",
-                    "b": "b"
-                },
-                "outputs": {
-                    "sum": "sum"
-                }
-            },
-            "addition2": {
-                "algorithm": "johndoe/sum/1",
-                "parameters": {
-                },
-                "inputs": {
-                    "a": "a",
-                    "b": "b"
-                },
-                "outputs": {
-                    "sum": "sum"
-                }
-            }
-        },
-        "datasets": {
-            "dataset1": {
-                "database": "integers/1",
-                "protocol": "triple",
-                "set": "default",
-            }
-        },
-        "analyzers": {
-            "analysis": {
-                "algorithm": "johndoe/analysis/1",
-                "parameters": {
-                },
-                "inputs": {
-                    "in": "input"
-                }
-            }
-        },
-        "globals": {
-            "environment": {
-                "name": "env1",
-                "version": "1.0"
-            },
-            "queue": "queue1"
-        }
-    }
-
-    DATABASE = {
-            "root_folder": "/path/to/root_folder",
-            "protocols": [
-                {
-                    "name": "triple",
-                    "template": "test",
-                    "sets": [
-                        {
-                            "name": "default",
-                            "template": "set",
-                            "view": "dummy",
-                            "outputs": {
-                                "output1": "johndoe/single_integer/1",
-                                "output2": "johndoe/single_integer/1",
-                                "output3": "johndoe/single_integer/1"
-                                }
-                            }
-                        ]
-                    }
-                ]
-            }
 
     def setUp(self):
-        for path in [settings.TOOLCHAINS_ROOT, settings.EXPERIMENTS_ROOT,
-                     settings.DATAFORMATS_ROOT, settings.ALGORITHMS_ROOT,
-                     settings.CACHE_ROOT]:
-            if os.path.exists(path):
-                shutil.rmtree(path)
-
-        user = User.objects.create_user('johndoe', 'johndoe@test.org', '1234')
-
-
-        # Create an environment and queue
-        environment = Environment(name='env1', version='1.0')
-        environment.save()
-
-        queue = Queue(name='queue1', memory_limit=1024, time_limit=60, nb_cores_per_slot=1, max_slots_per_user=10)
-        queue.save()
-
-        queue.environments.add(environment)
-
-
-        DataFormat.objects.create_dataformat(
-                author=user,
-                name='single_integer',
-                short_description='description',
-                declaration={
-                    "value": "int32"
-                    },
-                )
-
-        db, errors = Database.objects.create_database('integers', declaration=self.DATABASE)
-        assert not errors, 'Database errors: %s' % errors
-        db.sharing = Database.PUBLIC
-        db.save()
-
-        (self.toolchain1, errors) = Toolchain.objects.create_toolchain(user,
-                'toolchain1', 'short description 1',
-                declaration={
-                    "blocks": [ {
-                        "name": "addition1",
-                        "inputs": [
-                            "a",
-                            "b"
-                            ],
-                        "outputs": [
-                            "sum"
-                            ],
-                        "synchronized_channel": "dataset1"
-                        },
-                        {
-                            "name": "addition2",
-                            "inputs": [
-                                "a",
-                                "b"
-                                ],
-                            "outputs": [
-                                "sum"
-                                ],
-                            "synchronized_channel": "dataset1"
-                            }
-                        ],
-                    "datasets": [ {
-                        "name": "dataset1",
-                        "outputs": [
-                            "output1",
-                            "output2",
-                            "output3"
-                            ]
-                        }
-                        ],
-                    "connections": [ {
-                        "from": "dataset1.output1",
-                        "to": "addition1.a",
-                        "channel": "dataset1"
-                        },
-                        {
-                            "from": "dataset1.output2",
-                            "to": "addition1.b",
-                            "channel": "dataset1"
-                            },
-                        {
-                            "from": "addition1.sum",
-                            "to": "addition2.a",
-                            "channel": "dataset1"
-                            },
-                        {
-                            "from": "dataset1.output3",
-                            "to": "addition2.b",
-                            "channel": "dataset1"
-                            },
-                        {
-                            "to": "analysis.input",
-                            "from": "addition2.sum",
-                            "channel": "dataset1"
-                            }
-                        ],
-                    "analyzers": [
-                            {
-                                "inputs": [
-                                    "input"
-                                    ],
-                                "synchronized_channel": "dataset1",
-                                "name": "analysis"
-                                }
-                            ],
-                    "representation": {
-                            "connections": {},
-                            "blocks": {},
-                            "channel_colors": {},
-                            },
-                    })
-        assert not errors, 'Toolchain errors: %s' % errors
-
-        (self.algorithm, errors) = Algorithm.objects.create_algorithm(
-            author=user,
-            name='sum',
-            short_description='description',
-            declaration="""{
-  "language": "python",
-  "splittable": false,
-  "groups": [
-    {
-      "inputs": {
-        "a": { "type": "johndoe/single_integer/1" },
-        "b": { "type": "johndoe/single_integer/1" }
-      },
-      "outputs": {
-        "sum": { "type": "johndoe/single_integer/1" }
-      }
-    }
-  ],
-  "parameters": {
-  }
-}""",
-
-            code="""class Algorithm:
-
-    def process(self, inputs, outputs):
-        data = outputs['sum'].createData()
-        data.value = inputs['a'].data.value + inputs['b'].data.value
-        outputs['sum'].write(data)
-        return True
-""")
-        assert not errors, 'Algorithm errors: %s' % errors
-
-        system_user = User.objects.create_user(settings.SYSTEM_ACCOUNT, 'system@test.org', '1234')
-
-        (dataformat, errors) = DataFormat.objects.create_dataformat(
-            author=system_user,
-            name='float',
-            short_description='description',
-            declaration={
-              "value": "float64",
-              },
+        self.cache = tempfile.mkdtemp(prefix='beat_')
+
+
+    def tearDown(self):
+        shutil.rmtree(self.cache)
+
+
+    def touch(self, f, times=None):
+        """Replicates the `touch' command-line utility"""
+        with open(f, 'a'): os.utime(f, times)
+
+
+    def J(self, *args):
+        return os.path.join(*((self.cache,) + args))
+
+
+    def prepare_cleanup_full(self):
+
+        # creates a temporary directory structure
+        os.makedirs(self.J('a', 'b', 'c'))
+        os.makedirs(self.J('a', 'c', 'd'))
+        os.makedirs(self.J('a', 'c', 'e'))
+        self.touch(self.J('a', 'b', 'c', 'd.json'))
+        self.touch(self.J('a', 'c', 'd', 'e.json'))
+
+
+    def check_cleanup_full(self):
+
+        assert not os.listdir(self.cache)
+
+
+    def test_cache_cleanup_full(self):
+
+        self.prepare_cleanup_full()
+        cleanup_cache(self.cache, delete=True)
+        self.check_cleanup_full()
+
+
+    def test_cmd_cleanup_full(self):
+
+        self.prepare_cleanup_full()
+        management.call_command('cleanup_cache', path=self.cache,
+            verbosity=0, delete=True)
+        self.check_cleanup_full()
+
+
+    def prepare_cleanup_aged(self):
+
+        two_min_ago = time.time() - 60*2
+
+        # creates a temporary directory structure
+        os.makedirs(self.J('a', 'b', 'c'))
+        os.makedirs(self.J('a', 'c', 'd'))
+        os.makedirs(self.J('a', 'c', 'e'))
+        self.touch(self.J('a', 'b', 'c', 'd.json'), (two_min_ago, two_min_ago))
+        self.touch(self.J('a', 'c', 'd', 'e.json'))
+
+
+    def check_cleanup_aged(self):
+
+        assert os.path.exists(self.J('a', 'c', 'd', 'e.json'))
+        assert not os.path.exists(self.J('a', 'b', 'c'))
+        assert not os.path.exists(self.J('a', 'b', 'c', 'd.json'))
+        assert not os.path.exists(self.J('a', 'b', 'e'))
+
+
+    def test_cache_cleanup_aged(self):
+
+        self.prepare_cleanup_aged()
+        cleanup_cache(self.cache, age_in_minutes=2, delete=True)
+        self.check_cleanup_aged()
+
+
+    def test_cmd_cleanup_aged(self):
+
+        self.prepare_cleanup_aged()
+        management.call_command('cleanup_cache', path=self.cache,
+            verbosity=0, olderthan=2, delete=True)
+        self.check_cleanup_aged()
+
+
+    def prepare_cleanup_lock(self):
+
+        two_min_ago = time.time() - 60*2
+        ten_min_ago = time.time() - 60*10
+
+        # creates a temporary directory structure
+        os.makedirs(self.J('a', 'b', 'c'))
+        os.makedirs(self.J('a', 'c', 'd'))
+        os.makedirs(self.J('a', 'c', 'e'))
+        self.touch(self.J('a', 'b', 'c', 'd.json'), (two_min_ago, two_min_ago))
+        self.touch(self.J('a', 'c', 'd', 'e.json'), (ten_min_ago, ten_min_ago))
+
+        self.touch(self.J('a', 'c', 'd', 'e.lock')) #create a lock
+
+
+    def check_cleanup_lock(self):
+
+        assert os.path.exists(self.J('a', 'c', 'd', 'e.json'))
+        assert not os.path.exists(self.J('a', 'b', 'c'))
+        assert not os.path.exists(self.J('a', 'b', 'c', 'd.json'))
+        assert not os.path.exists(self.J('a', 'b', 'e'))
+
+
+    def test_cache_cleanup_lock(self):
+
+        self.prepare_cleanup_lock()
+        cleanup_cache(self.cache, delete=True)
+        self.check_cleanup_lock()
+
+
+    def test_cmd_cleanup_lock(self):
+
+        self.prepare_cleanup_lock()
+        management.call_command('cleanup_cache', path=self.cache,
+            verbosity=0, delete=True)
+        self.check_cleanup_lock()
+
+
+class BaseBackendTestCase(TestCase):
+
+
+    @classmethod
+    def setUpTestData(cls):
+        install.create_sites()
+        system_user, plot_user, user = install.create_users('user', 'user')
+        install.add_group('Default')
+
+        setup_backend(qsetup.DEFAULT_CONFIGURATION)
+
+        Worker.objects.update(active=True)
+        env = Environment.objects.first()
+        queue = Queue.objects.first()
+
+        template_data = dict(
+            system_user = system_user,
+            plot_user = plot_user,
+            user = user,
+            private = False,
+            queue = queue.name,
+            environment = dict(name=env.name, version=env.version),
+            )
+        prefix = os.path.join(
+              os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0]))),
+              'src',
+              'beat.examples',
+              )
+        install.install_contributions(prefix, 'system', template_data)
+        install.install_contributions(prefix, 'test', template_data)
+
+
+    def check_single(self, xp):
+        '''Checks user/user/single/1/single'''
+
+        self.assertEqual(xp.blocks.count(), 2)
+
+        b0 = xp.blocks.all()[0]
+
+        self.assertEqual(b0.name, 'echo')
+        self.assertEqual(b0.status, Block.NOT_CACHED)
+        self.assertEqual(b0.algorithm,
+            Algorithm.objects.get(name='integers_echo'))
+        self.assertEqual(b0.dependencies.count(), 0)
+        self.assertEqual(b0.dependents.count(), 1)
+        self.assertEqual(b0.job.status, Job.QUEUED)
+        self.assertEqual(b0.job.parent, None)
+        self.assertEqual(b0.job.child_, None)
+        self.assertEqual(b0.queue.name, 'queue')
+        self.assertEqual(b0.environment.name, 'environment')
+        self.assertEqual(b0.required_slots, 1)
+        self.assertEqual(b0.inputs.count(), 1)
+        self.assertEqual(b0.outputs.count(), 1)
+        self.assertEqual(b0.job.splits.count(), 1)
+        self.assertEqual(b0.job.splits.get().status, Job.QUEUED)
+        assert b0.job.splits.get().worker is None
+        assert not b0.done()
+
+        b1 = xp.blocks.all()[1]
+
+        self.assertEqual(b1.name, 'analysis')
+        self.assertEqual(b1.status, Block.NOT_CACHED)
+        self.assertEqual(b1.algorithm,
+            Algorithm.objects.get(name='integers_echo_analyzer'))
+        self.assertEqual(b1.dependencies.count(), 1)
+        self.assertEqual(b1.dependents.count(), 0)
+        self.assertEqual(b1.job.status, Job.QUEUED)
+        self.assertEqual(b1.job.parent, None)
+        self.assertEqual(b1.job.child_, None)
+        self.assertEqual(b1.queue.name, 'queue')
+        self.assertEqual(b1.environment.name, 'environment')
+        self.assertEqual(b1.required_slots, 1)
+        self.assertEqual(b1.inputs.count(), 1)
+        self.assertEqual(b1.outputs.count(), 1)
+        self.assertEqual(b1.job.splits.count(), 0) #not scheduled yet
+
+        assert not b1.done()
+
+
+
+class BackendSetup(BaseBackendTestCase):
+
+
+    def check_default_config(self):
+
+        # checks all is there
+        self.assertEqual(dump_backend(), qsetup.DEFAULT_CONFIGURATION)
+
+        worker = Worker.objects.get()
+        queue = Queue.objects.get()
+        Worker.objects.update(active=True)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+        self.assertEqual(list(worker.slots.values_list('id', flat=True)),
+            list(queue.slots.values_list('id', flat=True)))
+
+        # worker has no job splits assigned to it
+        self.assertEqual(worker.splits.count(), 0)
+
+        self.assertEqual(queue.availability(), qsetup.CORES)
+        self.assertEqual(queue.number_of_slots(), qsetup.CORES)
+        self.assertEqual(queue.worker_availability(), [worker])
+
+        # checks the single slot and priority
+        slot = queue.slots.get()
+        self.assertEqual(slot.quantity, qsetup.CORES)
+        self.assertEqual(slot.priority, 0)
+        self.assertEqual(slot.worker, worker)
+
+        # checks no orphan slots exist
+        self.assertEqual(Slot.objects.filter(queue=None).count(), 0)
+        self.assertEqual(Slot.objects.filter(worker=None).count(), 0)
+
+
+    def test_setup(self):
+
+        self.check_default_config()
+
+
+    def test_cmd_reset(self):
+
+        # installs the default configuration command
+        management.call_command('qsetup', verbosity=0, reset=True)
+        self.check_default_config()
+
+
+    def check_noprior_config(self):
+
+        qs = Queue.objects.all()
+
+        self.assertEqual(qs.count(), 3)
+
+        q1, q2, q3 = qs.order_by('name')
+
+        self.assertEqual(q1.name, 'q1')
+        self.assertEqual(q2.name, 'q2')
+        self.assertEqual(q3.name, 'q4')
+        self.assertEqual(q1.splits().count(), 0)
+        self.assertEqual(q2.splits().count(), 0)
+        self.assertEqual(q3.splits().count(), 0)
+
+        self.assertEqual(q1.number_of_slots(), 4)
+        self.assertEqual(q2.number_of_slots(), 2)
+        self.assertEqual(q3.number_of_slots(), 1)
+        self.assertEqual(q1.availability(), 4)
+        self.assertEqual(q2.availability(), 2)
+        self.assertEqual(q3.availability(), 1)
+        self.assertEqual(q1.environments.count(), 1)
+        self.assertEqual(q2.environments.count(), 1)
+        self.assertEqual(q3.environments.count(), 1)
+
+        self.assertEqual(q1.environments.first(), q2.environments.first())
+        self.assertEqual(q2.environments.first(), q3.environments.first())
+
+        env = q1.environments.first()
+
+        self.assertEqual(env.name, 'environment')
+        self.assertEqual(env.version, '1')
+
+        self.assertEqual(q1.slots.count(), 1)
+        self.assertEqual(q2.slots.count(), 1)
+        self.assertEqual(q3.slots.count(), 1)
+
+        slot1 = q1.slots.first()
+        slot2 = q2.slots.first()
+        slot3 = q3.slots.first()
+
+        self.assertEqual(slot1.quantity, 4)
+        self.assertEqual(slot1.priority, 0)
+        self.assertEqual(slot1.queue, q1)
+        self.assertEqual(slot2.quantity, 2)
+        self.assertEqual(slot2.priority, 0)
+        self.assertEqual(slot2.queue, q2)
+        self.assertEqual(slot3.quantity, 1)
+        self.assertEqual(slot3.priority, 0)
+        self.assertEqual(slot3.queue, q3)
+
+        worker1 = slot1.worker
+        worker2 = slot2.worker
+        worker3 = slot3.worker
+
+        self.assertEqual(worker1, worker2)
+        self.assertEqual(worker2, worker3)
+
+        self.assertEqual(worker1.name, 'node1')
+        self.assertEqual(list(worker1.splits.all()), [])
+        self.assertEqual(worker1.memory, 16*1024)
+        self.assertEqual(worker1.cores, 4)
+        self.assertEqual(worker1.available_cores(), 4)
+
+        self.assertEqual(worker1.slots.count(), 3)
+
+        self.assertEqual(set(worker1.slots.all()),
+            set(list(q1.slots.all()) + list(q2.slots.all()) + \
+                list(q3.slots.all())))
+
+        avail1 = q1.worker_availability()
+        self.assertEqual(avail1, [worker1])
+
+        avail2 = q2.worker_availability()
+        self.assertEqual(avail2, [worker1])
+
+        avail3 = q3.worker_availability()
+        self.assertEqual(avail2, [worker1])
+
+        # checks no orphan slots exist
+        self.assertEqual(Slot.objects.filter(queue=None).count(), 0)
+        self.assertEqual(Slot.objects.filter(worker=None).count(), 0)
+
+
+    def test_reconfigure_noprior(self):
+
+        setup_backend(QUEUES_WITHOUT_PRIORITY)
+        Worker.objects.update(active=True)
+        self.check_noprior_config()
+
+
+    def test_reconfigure_fail_qenv_used(self):
+
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        # schedules the experiment and check it
+        xp.schedule()
+        self.check_single(xp)
+
+        try:
+            setup_backend(QUEUES_WITHOUT_PRIORITY)
+        except RuntimeError as e:
+            assert str(e).find('on the following queue/environment combinations') != -1
+        else:
+            assert False, 'Queue re-configuration worked with q/env in use'
+
+
+    def check_prior_config(self):
+
+        qs = Queue.objects.all()
+
+        self.assertEqual(qs.count(), 3)
+
+        q1, q1_special, q2 = qs.order_by('name')
+
+        self.assertEqual(q1.name, 'q1')
+        self.assertEqual(q2.name, 'q2')
+        self.assertEqual(q1_special.name, 'q1_special')
+        self.assertEqual(q1.splits().count(), 0)
+        self.assertEqual(q2.splits().count(), 0)
+        self.assertEqual(q1_special.splits().count(), 0)
+
+        self.assertEqual(q1.number_of_slots(), 8)
+        self.assertEqual(q2.number_of_slots(), 4)
+        self.assertEqual(q1_special.number_of_slots(), 8)
+        self.assertEqual(q1.availability(), 8)
+        self.assertEqual(q2.availability(), 4)
+        self.assertEqual(q1_special.availability(), 8)
+        self.assertEqual(q1.environments.count(), 1)
+        self.assertEqual(q2.environments.count(), 1)
+        self.assertEqual(q1_special.environments.count(), 1)
+
+        self.assertEqual(q1.environments.first(), q2.environments.first())
+        self.assertEqual(q2.environments.first(),
+            q1_special.environments.first())
+
+        env = q1.environments.first()
+
+        self.assertEqual(env.name, 'environment')
+        self.assertEqual(env.version, '1')
+
+        self.assertEqual(q1.slots.count(), 2)
+        self.assertEqual(q1_special.slots.count(), 2)
+        self.assertEqual(q2.slots.count(), 2)
+
+        slot11, slot12 = q1.slots.all()
+        slot1_special1, slot1_special2 = q1_special.slots.all()
+        slot21, slot22 = q2.slots.all()
+
+        self.assertEqual(slot11.quantity, 4)
+        self.assertEqual(slot11.priority, 5)
+        self.assertEqual(slot12.quantity, 4)
+        self.assertEqual(slot12.priority, 0)
+        self.assertEqual(slot11.queue, q1)
+        self.assertEqual(slot12.queue, q1)
+
+        self.assertEqual(slot21.quantity, 2)
+        self.assertEqual(slot21.priority, 0)
+        self.assertEqual(slot22.quantity, 2)
+        self.assertEqual(slot22.priority, 10)
+        self.assertEqual(slot21.queue, q2)
+        self.assertEqual(slot22.queue, q2)
+
+        self.assertEqual(slot1_special1.quantity, 4)
+        self.assertEqual(slot1_special1.priority, 0)
+        self.assertEqual(slot1_special2.quantity, 4)
+        self.assertEqual(slot1_special2.priority, 5)
+        self.assertEqual(slot1_special1.queue, q1_special)
+        self.assertEqual(slot1_special2.queue, q1_special)
+
+        worker1 = slot11.worker
+        worker2 = slot12.worker
+        worker21 = slot21.worker
+        worker22 = slot22.worker
+        worker1_special1 = slot1_special1.worker
+        worker1_special2 = slot1_special2.worker
+
+        self.assertEqual(worker1, worker21)
+        self.assertEqual(worker1, worker1_special1)
+        self.assertEqual(worker2, worker22)
+        self.assertEqual(worker2, worker1_special2)
+
+        self.assertEqual(worker1.name, 'node1')
+        self.assertEqual(worker1.splits.count(), 0)
+        self.assertEqual(worker1.memory, 32*1024)
+        self.assertEqual(worker1.cores, 4)
+        self.assertEqual(worker1.available_cores(), 4)
+
+        self.assertEqual(worker2.name, 'node2')
+        self.assertEqual(worker2.splits.count(), 0)
+        self.assertEqual(worker2.memory, 16*1024)
+        self.assertEqual(worker2.cores, 4)
+        self.assertEqual(worker2.available_cores(), 4)
+
+        self.assertEqual(worker1.slots.count(), 3)
+        self.assertEqual(worker2.slots.count(), 3)
+
+        avail1 = q1.worker_availability()
+        self.assertEqual(avail1, [worker1, worker2])
+
+        avail2 = q2.worker_availability()
+        self.assertEqual(avail2, [worker2, worker1])
+
+        avail1_special = q1_special.worker_availability()
+        self.assertEqual(avail1_special, [worker2, worker1])
+
+        # checks no orphan slots exist
+        self.assertEqual(Slot.objects.filter(queue=None).count(), 0)
+        self.assertEqual(Slot.objects.filter(worker=None).count(), 0)
+
+
+    def test_reconfigure_priors(self):
+
+        setup_backend(PRIORITY_QUEUES)
+        Worker.objects.update(active=True)
+        self.check_prior_config()
+
+
+
+
+class Scheduling(BaseBackendTestCase):
+
+
+    def check_stats_success(self, split):
+
+        assert abs(split.job.block.speed_up_real() - 1.0) < 0.1
+        assert abs(split.job.block.speed_up_maximal() - 1.0) < 0.1
+        assert split.job.block.linear_execution_time() > 0.0
+        assert split.job.block.queuing_time() > 0.0
+        assert split.job.block.stdout() is None
+        assert split.job.block.stderr() is None
+        assert split.job.block.error_report() is None
+
+
+    def test_success(self):
+
+        # tests a simple successful experiment scheduling and execution
+
+        current_stats = HourlyStatistics.objects.count()
+
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        # schedules the experiment and check it
+        xp.schedule()
+        self.check_single(xp)
+        assigned_splits = schedule()
+
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
+
+        worker = Worker.objects.get()
+
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # simulate job start on worker
+        split.start()
+        self.assertEqual(split.job.status, Job.PROCESSING)
+        self.assertEqual(split.job.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # no job can be run right now
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 0)
+
+        # simulate end job signal
+        split.end(Result(status=0))
+        self.assertEqual(split.job.status, Job.COMPLETED)
+        self.assertEqual(split.job.block.status, Block.CACHED)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
+
+        self.check_stats_success(split)
+
+        # assert we have no database traces after the block is done
+        self.assertEqual(Job.objects.filter(block=split.job.block).count(), 0)
+        self.assertEqual(JobSplit.objects.filter(job=split.job).count(), 0)
+        self.assertEqual(Result.objects.filter(job__isnull=True).count(), 0)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+        # since this job was successful, the next one should be ready to run
+
+        # schedules the last block of the experiment
+        assert xp.blocks.last().job.runnable_date is not None
+        assigned_splits = schedule()
+
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'analysis')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # simulate job start on worker
+        split.start()
+        self.assertEqual(split.job.status, Job.PROCESSING)
+        self.assertEqual(split.job.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # no job can be run right now
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 0)
+
+        # simulate end job signal
+        split.end(Result(status=0))
+
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
+
+        self.assertEqual(split.job.status, Job.COMPLETED)
+        self.assertEqual(split.job.block.status, Block.CACHED)
+        self.assertEqual(split.job.block.experiment.status, Experiment.DONE)
+
+        self.check_stats_success(split)
+
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.count(), 0)
+        self.assertEqual(JobSplit.objects.count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+
+    def test_does_not_reassign(self):
+
+        # tests if the scheduling routine never re-assigns splits which are
+        # already assigned.
+
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        # schedules the experiment and check it
+        xp.schedule()
+        self.check_single(xp)
+        assigned_splits = schedule()
+
+        self.assertEqual(len(assigned_splits), 1)
+
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 0)
+
+
+    def test_worker_activation(self):
+
+        # tests that scheduling depends on worker activation
+
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        # de-activates worker
+        Worker.objects.update(active=False)
+
+        # schedules the experiment and check it
+        xp.schedule()
+        self.check_single(xp)
+
+        # no job can be run right now
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 0)
+
+        # re-activate the worker, show it now schedules fine
+        Worker.objects.update(active=True)
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 1)
+
+        # the rest would continue like with test_success
+
+
+    def test_fails_on_first_block(self):
+
+        # tests that, if we fail on the first block, experiment fails, all
+        # stops as foreseen
+
+        current_stats = HourlyStatistics.objects.count()
+
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        # schedules the experiment and check it
+        xp.schedule()
+        self.check_single(xp)
+
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
+        assigned_splits = schedule()
+
+        worker = Worker.objects.get()
+
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # simulate job start on worker
+        split.start()
+        self.assertEqual(split.job.status, Job.PROCESSING)
+        self.assertEqual(split.job.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # no job can be run right now
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 0)
+
+        # simulate end job signal, faiulre
+        split.end(Result(status=1))
+        self.assertEqual(split.job.status, Job.FAILED)
+        self.assertEqual(split.job.block.status, Block.FAILED)
+        split.job.block.experiment.refresh_from_db()
+        self.assertEqual(split.job.block.experiment.status, Experiment.FAILED)
+
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
+
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.count(), 0)
+        self.assertEqual(JobSplit.objects.count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+
+    def test_fails_on_last_block(self):
+
+        # tests a simple successful experiment scheduling and execution
+
+        current_stats = HourlyStatistics.objects.count()
+
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        # schedules the experiment and check it
+        xp.schedule()
+        self.check_single(xp)
+
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
+        assigned_splits = schedule()
+
+        worker = Worker.objects.get()
+
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # simulate job start on worker
+        split.start()
+        self.assertEqual(split.job.status, Job.PROCESSING)
+        self.assertEqual(split.job.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # no job can be run right now
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 0)
+
+        # simulate end job signal
+        split.end(Result(status=0))
+        self.assertEqual(split.job.status, Job.COMPLETED)
+        self.assertEqual(split.job.block.status, Block.CACHED)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
+
+        self.check_stats_success(split)
+
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.filter(block=split.job.block).count(), 0)
+        self.assertEqual(JobSplit.objects.filter(job=split.job).count(), 0)
+        self.assertEqual(Result.objects.filter(job__isnull=True).count(), 0)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+        # since this job was successful, the next one should be ready to run
+
+        # schedules the last block of the experiment
+        assert xp.blocks.last().job.runnable_date is not None
+        assigned_splits = schedule()
+
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'analysis')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # simulate job start on worker
+        split.start()
+        self.assertEqual(split.job.status, Job.PROCESSING)
+        self.assertEqual(split.job.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # no job can be run right now
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 0)
+
+        # simulate end job signal
+        split.end(Result(status=1))
+
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
+
+        self.assertEqual(split.job.status, Job.FAILED)
+        self.assertEqual(split.job.block.status, Block.FAILED)
+        self.assertEqual(split.job.block.experiment.status, Experiment.FAILED)
+
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.count(), 0)
+        self.assertEqual(JobSplit.objects.count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+
+    def test_cancel_before_starting(self):
+
+        # tests experiment cancellation before the experiment is started
+
+        current_stats = HourlyStatistics.objects.count()
+
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        # schedules the experiment and check it
+        xp.schedule()
+        self.check_single(xp)
+
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
+
+        xp.cancel()
+
+        self.assertEqual(
+            [str(k) for k in xp.blocks.values_list('status', flat=True)],
+            [Block.CANCELLED, Block.CANCELLED]
             )
-        assert not errors, 'Data format errors: %s' % errors
-
-        (dataformat, errors) = DataFormat.objects.create_dataformat(
-            author=system_user,
-            name='text',
-            short_description='description',
-            declaration={
-              "text": "string",
-              },
+        xp.refresh_from_db()
+        self.assertEqual(xp.status, Experiment.FAILED)
+
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.count(), 0)
+        self.assertEqual(JobSplit.objects.count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
+
+        worker = Worker.objects.get()
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+
+    def test_cancel_after_success(self):
+
+        # tests experiment cancellation while the experiment is running
+
+        current_stats = HourlyStatistics.objects.count()
+
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        # schedules the experiment and check it
+        xp.schedule()
+        self.check_single(xp)
+
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
+        assigned_splits = schedule()
+
+        worker = Worker.objects.get()
+
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # simulate job start on worker
+        split.start()
+        self.assertEqual(split.job.status, Job.PROCESSING)
+        self.assertEqual(split.job.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # no job can be run right now
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 0)
+
+        # simulate end job signal
+        split.end(Result(status=0))
+        self.assertEqual(split.job.status, Job.COMPLETED)
+        self.assertEqual(split.job.block.status, Block.CACHED)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
+
+        self.check_stats_success(split)
+
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.filter(block=split.job.block).count(), 0)
+        self.assertEqual(JobSplit.objects.filter(job=split.job).count(), 0)
+        self.assertEqual(Result.objects.filter(job__isnull=True).count(), 0)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+        # since this job was successful, the next one should be ready to run
+
+        # schedules the last block of the experiment
+        assert xp.blocks.last().job.runnable_date is not None
+        xp.cancel()
+
+        self.assertEqual(
+            [str(k) for k in xp.blocks.order_by('id').values_list('status', flat=True)],
+            [Block.CACHED, Block.CANCELLED]
             )
-        assert not errors, 'Data format errors: %s' % errors
-
-
-        (self.algorithm, errors) = Algorithm.objects.create_algorithm(
-            author=user,
-            name='analysis',
-            short_description='description',
-            declaration="""{
-  "language": "python",
-  "groups": [
-    {
-      "inputs": {
-        "in": { "type": "johndoe/single_integer/1" }
-      }
-    }
-  ],
-  "results": {
-    "out_float": { "type": "float32" },
-    "out_text": { "type": "string" }
-  },
-  "parameters": {
-  }
-}""",
+        self.assertEqual(xp.status, Experiment.FAILED)
 
-            code="""class Algorithm:
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.count(), 0)
+        self.assertEqual(JobSplit.objects.count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
 
-    def process(self, inputs, output):
-        # We don't really care
-        return True
-""")
-        assert not errors, 'Algorithm errors: %s' % errors
+        worker = Worker.objects.get()
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
 
-        super_user = User.objects.create_user('superuser', 'superuser@test.org', '1234')
-        super_user.is_superuser = True
-        super_user.save()
 
+    def test_cancel_while_running(self):
 
-    def tearDown(self):
-        for path in [settings.TOOLCHAINS_ROOT, settings.EXPERIMENTS_ROOT,
-                     settings.DATAFORMATS_ROOT, settings.ALGORITHMS_ROOT,
-                     settings.CACHE_ROOT]:
-            if os.path.exists(path):
-                shutil.rmtree(path)
+        # tests experiment cancellation while a block is running
 
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
 
-#----------------------------------------------------------
+        # schedules the experiment and check it
+        xp.schedule()
+        self.check_single(xp)
 
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
+        assigned_splits = schedule()
 
-class SchedulerAPI(BackendAPIBase):
-    def setUp(self):
-        super(SchedulerAPI, self).setUp()
-        self.url  = reverse('api_backend:backend-api-scheduler')
+        worker = Worker.objects.get()
 
-    def test_no_access_for_anonymous_user(self):
-        response = self.client.get(self.url)
-        self.checkResponse(response, 403)
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
 
+        # simulate job start on worker
+        split.start()
+        self.assertEqual(split.job.status, Job.PROCESSING)
+        self.assertEqual(split.job.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
 
-    def test_no_access_for_non_superuser(self):
-        self.client.login(username='johndoe', password='1234')
-        response = self.client.get(self.url)
-        self.checkResponse(response, 403)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
 
+        # no job can be run right now
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 0)
 
-#----------------------------------------------------------
+        xp.cancel()
 
+        # simulate worker cancelling
+        split.refresh_from_db()
+        self.assertEqual(split.status, Job.CANCEL)
+        split.end(None, Job.CANCELLED)
 
-class CancelAllExperimentsAPI(BackendAPIBase):
-    def setUp(self):
-        super(CancelAllExperimentsAPI, self).setUp()
-        self.url  = reverse('api_backend:backend-api-cancel-all-experiments')
+        xp.refresh_from_db()
+        self.assertEqual(
+            [str(k) for k in xp.blocks.order_by('id').values_list('status', flat=True)],
+            [Block.CANCELLED, Block.CANCELLED]
+            )
+        self.assertEqual(xp.status, Experiment.FAILED)
 
-    def test_no_access_for_anonymous_user(self):
-        response = self.client.get(self.url)
-        self.checkResponse(response, 403)
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.count(), 0)
+        self.assertEqual(JobSplit.objects.count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
 
+        worker = Worker.objects.get()
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
 
-    def test_no_access_for_non_superuser(self):
-        self.client.login(username='johndoe', password='1234')
-        response = self.client.get(self.url)
-        self.checkResponse(response, 403)
 
+    def test_cancel_after_failure(self):
 
-#----------------------------------------------------------
+        # tests that, if we fail on the first block, experiment fails and a
+        # cancellation that comes after that is a NOOP
 
+        current_stats = HourlyStatistics.objects.count()
 
-class SchedulerConfigurationAPI(BackendAPIBase):
-    def setUp(self):
-        super(SchedulerConfigurationAPI, self).setUp()
-        self.url  = reverse('api_backend:backend-api-scheduler-configuration')
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
 
-    def test_no_access_for_anonymous_user(self):
-        response = self.client.get(self.url)
-        self.checkResponse(response, 403)
+        # schedules the experiment and check it
+        xp.schedule()
+        self.check_single(xp)
 
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
+        assigned_splits = schedule()
 
-    def test_no_access_for_non_superuser(self):
-        self.client.login(username='johndoe', password='1234')
-        response = self.client.get(self.url)
-        self.checkResponse(response, 403)
+        worker = Worker.objects.get()
 
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
 
-#----------------------------------------------------------
+        # simulate job start on worker
+        split.start()
+        self.assertEqual(split.job.status, Job.PROCESSING)
+        self.assertEqual(split.job.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
 
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
 
-class CacheCleanupAPI(BackendAPIBase):
-    def setUp(self):
-        super(CacheCleanupAPI, self).setUp()
-        self.url  = reverse('api_backend:backend-api-cache-cleanup')
+        # no job can be run right now
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 0)
 
-    def test_no_access_for_anonymous_user(self):
-        response = self.client.get(self.url)
-        self.checkResponse(response, 403)
+        # simulate end job signal, faiulre
+        split.end(Result(status=1))
+        self.assertEqual(split.job.status, Job.FAILED)
+        self.assertEqual(split.job.block.status, Block.FAILED)
+        split.job.block.experiment.refresh_from_db()
+        self.assertEqual(split.job.block.experiment.status, Experiment.FAILED)
 
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
 
-    def test_no_access_for_non_superuser(self):
-        self.client.login(username='johndoe', password='1234')
-        response = self.client.get(self.url)
-        self.checkResponse(response, 403)
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.count(), 0)
+        self.assertEqual(JobSplit.objects.count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
 
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
 
-#----------------------------------------------------------
+        xp.cancel()
+        self.assertEqual(split.job.block.experiment.status, Experiment.FAILED)
 
 
-class BlockStartedAPI(BackendAPIBase):
+    def test_blocking_success(self):
 
-    def setUp(self):
-        super(BlockStartedAPI, self).setUp()
+        # tests two experiments that are similar can be scheduled at the same
+        # time and we'll optimise correctly and only run one of them. The other
+        # is updated as the blocking experiment is executed.
 
-        User.objects.create_user('scheduler',
-                'scheduler@test.org', '1234')
+        current_stats = HourlyStatistics.objects.count()
 
-        self.client.login(username='johndoe', password='1234')
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
 
-        url = reverse('api_experiments:list_create', args=['johndoe'])
-        response = self.client.post(url,
-            json.dumps({
-                'toolchain': 'johndoe/toolchain1/1',
-                'declaration': BackendAPIBase.DECLARATION1,
-                'name': 'experiment1',
-            }), content_type='application/json')
+        xpc = xp.fork(name='single_copy')
 
-        self.checkResponse(response, 201, content_type='application/json')
+        # schedules the experiment and check it
+        xp.schedule()
+        xpc.schedule()
 
-        url = reverse('api_experiments:start', args=['johndoe', 'toolchain1', 1, 'experiment1'])
-        response = self.client.post(url)
-        self.checkResponse(response, 200, content_type='application/json')
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
+        assert xpc.blocks.first().job.runnable_date is None
+        assert xpc.blocks.last().job.runnable_date is None
 
-        self.client.logout()
+        assigned_splits = schedule()
 
-        self.experiment = Experiment.objects.get(author__username='johndoe',
-                                                 toolchain__name='toolchain1',
-                                                 name='experiment1',
-                                                )
+        worker = Worker.objects.get()
 
-        self.url  = reverse('api_backend:backend-api-block-started')
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
 
+        # checks the jobs are connected one to the other across experiments
+        self.assertEqual(xp.blocks.first().job.child.block.experiment, xpc)
+        self.assertEqual(xp.blocks.last().job.child.block.experiment, xpc)
 
+        # simulate job start on worker
+        split.start()
+        self.assertEqual(split.job.status, Job.PROCESSING)
+        self.assertEqual(split.job.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+        self.assertEqual(split.job.child.status, Job.PROCESSING)
+        self.assertEqual(split.job.child.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.child.block.experiment.status, Experiment.RUNNING)
 
-    def test_no_notification_for_anonymous_user(self):
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-            }), content_type='application/json')
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
 
-        self.checkResponse(response, 403)
+        # no job can be run right now
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 0)
 
+        # simulate end job signal
+        split.end(Result(status=0))
+        self.assertEqual(split.job.status, Job.COMPLETED)
+        self.assertEqual(split.job.block.status, Block.CACHED)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+        self.assertEqual(split.job.child.status, Job.COMPLETED)
+        self.assertEqual(split.job.child.block.status, Block.CACHED)
+        self.assertEqual(split.job.child.block.experiment.status,
+            Experiment.RUNNING)
 
-    def test_no_notification_for_non_scheduler_user(self):
-        self.client.login(username='johndoe', password='1234')
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-            }), content_type='application/json')
+        self.check_stats_success(split)
 
-        self.checkResponse(response, 403)
+        # assert we have no database traces after the block is done
+        self.assertEqual(Job.objects.filter(block=split.job.block).count(), 0)
+        self.assertEqual(
+            Job.objects.filter(block=split.job.child.block).count(), 0)
+        self.assertEqual(JobSplit.objects.filter(job=split.job.child).count(),
+            0)
+        self.assertEqual(Result.objects.filter(job__isnull=True).count(), 0)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+        # since this job was successful, the next one should be ready to run
+
+        # schedules the last block of the experiment
+        assert xp.blocks.last().job.runnable_date is not None
+        assigned_splits = schedule()
+
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'analysis')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # simulate job start on worker
+        split.start()
+        self.assertEqual(split.job.status, Job.PROCESSING)
+        self.assertEqual(split.job.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+        self.assertEqual(split.job.child.status, Job.PROCESSING)
+        self.assertEqual(split.job.child.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.child.block.experiment.status,
+            Experiment.RUNNING)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # no job can be run right now
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 0)
+
+        # simulate end job signal
+        split.end(Result(status=0))
+
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
+
+        self.assertEqual(split.job.status, Job.COMPLETED)
+        self.assertEqual(split.job.block.status, Block.CACHED)
+        self.assertEqual(split.job.block.experiment.status, Experiment.DONE)
+        self.assertEqual(split.job.child.status, Job.COMPLETED)
+        self.assertEqual(split.job.child.block.status, Block.CACHED)
+        self.assertEqual(split.job.child.block.experiment.status,
+            Experiment.DONE)
+
+        self.check_stats_success(split)
+
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.count(), 0)
+        self.assertEqual(JobSplit.objects.count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+
+    def test_blocking_failure(self):
+
+        # tests two experiments that are similar can be scheduled at the same
+        # time and we'll optimise correctly and only run one of them. If the
+        # blocking experiment fails, so does the blocked one too.
+
+        current_stats = HourlyStatistics.objects.count()
+
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        xpc = xp.fork(name='single_copy')
+
+        # schedules the experiment and check it
+        xp.schedule()
+        xpc.schedule()
+
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
+        assert xpc.blocks.first().job.runnable_date is None
+        assert xpc.blocks.last().job.runnable_date is None
+
+        assigned_splits = schedule()
+
+        worker = Worker.objects.get()
+
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # checks the jobs are connected one to the other across experiments
+        self.assertEqual(xp.blocks.first().job.child.block.experiment, xpc)
+        self.assertEqual(xp.blocks.last().job.child.block.experiment, xpc)
+
+        # simulate job start on worker
+        split.start()
+        self.assertEqual(split.job.status, Job.PROCESSING)
+        self.assertEqual(split.job.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+        self.assertEqual(split.job.child.status, Job.PROCESSING)
+        self.assertEqual(split.job.child.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.child.block.experiment.status, Experiment.RUNNING)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # no job can be run right now
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 0)
+
+        # simulate end job signal
+        split.end(Result(status=1))
+        self.assertEqual(split.job.status, Job.FAILED)
+        self.assertEqual(split.job.block.status, Block.FAILED)
+        split.job.block.experiment.refresh_from_db()
+        self.assertEqual(split.job.block.experiment.status, Experiment.FAILED)
+        self.assertEqual(split.job.child.status, Job.FAILED)
+        self.assertEqual(split.job.child.block.status, Block.FAILED)
+        split.job.child.block.experiment.refresh_from_db()
+        self.assertEqual(split.job.child.block.experiment.status,
+            Experiment.FAILED)
+
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
+
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.count(), 0)
+        self.assertEqual(JobSplit.objects.count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+
+    def test_blocking_cancel_after_success(self):
+
+        # tests two experiments that are similar can be scheduled at the same
+        # time and we'll optimise correctly and only run one of them. If the
+        # first experiment is cancelled, then the second one proceeds normally.
+
+        current_stats = HourlyStatistics.objects.count()
+
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        xpc = xp.fork(name='single_copy')
+
+        # schedules the experiment and check it
+        xp.schedule()
+        xpc.schedule()
+
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
+        assert xpc.blocks.first().job.runnable_date is None
+        assert xpc.blocks.last().job.runnable_date is None
+
+        assigned_splits = schedule()
+
+        worker = Worker.objects.get()
+
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # checks the jobs are connected one to the other across experiments
+        self.assertEqual(xp.blocks.first().job.child.block.experiment, xpc)
+        self.assertEqual(xp.blocks.last().job.child.block.experiment, xpc)
+
+        # simulate job start on worker
+        split.start()
+        self.assertEqual(split.job.status, Job.PROCESSING)
+        self.assertEqual(split.job.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+        self.assertEqual(split.job.child.status, Job.PROCESSING)
+        self.assertEqual(split.job.child.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.child.block.experiment.status, Experiment.RUNNING)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # no job can be run right now
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 0)
+
+        # simulate end job signal
+        split.end(Result(status=0))
+        self.assertEqual(split.job.status, Job.COMPLETED)
+        self.assertEqual(split.job.block.status, Block.CACHED)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+        self.assertEqual(split.job.child.status, Job.COMPLETED)
+        self.assertEqual(split.job.child.block.status, Block.CACHED)
+        self.assertEqual(split.job.child.block.experiment.status,
+            Experiment.RUNNING)
+
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
+
+        self.check_stats_success(split)
+
+        # assert we have no database traces after the block is done
+        self.assertEqual(Job.objects.filter(block=split.job.block).count(), 0)
+        self.assertEqual(
+            Job.objects.filter(block=split.job.child.block).count(), 0)
+        self.assertEqual(JobSplit.objects.filter(job=split.job.child).count(),
+            0)
+        self.assertEqual(Result.objects.filter(job__isnull=True).count(), 0)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+        # cancels the blocking experiment - the blocked one must continue
+        xp.cancel()
+        self.assertEqual(
+            [str(k) for k in xp.blocks.order_by('id').values_list('status', flat=True)],
+            [Block.CACHED, Block.CANCELLED]
+            )
+        self.assertEqual(xp.status, Experiment.FAILED)
+
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.filter(block__in=xp.blocks.all()).count(), 0)
+        self.assertEqual(JobSplit.objects.filter(job__block__in=xp.blocks.all()).count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+        # since the first job was successful, the second block of the
+        # previously blocked experiment must be ready to run
+
+        # schedules the last block of the experiment
+        assert xpc.blocks.last().job.runnable_date is not None
+        assigned_splits = schedule()
+
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xpc)
+        self.assertEqual(split.job.block.name, 'analysis')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # the rest would continue normally
+
+
+    def test_blocking_cancel_while_running(self):
+
+        # tests two experiments that are similar can be scheduled at the same
+        # time and we'll optimise correctly and only run one of them. If the
+        # first experiment is cancelled while one of the blocks is running,
+        # then the second one proceeds normally.
+
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        xpc = xp.fork(name='single_copy')
+
+        # schedules the experiment and check it
+        xp.schedule()
+        xpc.schedule()
+
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
+        assert xpc.blocks.first().job.runnable_date is None
+        assert xpc.blocks.last().job.runnable_date is None
+
+        assigned_splits = schedule()
+
+        worker = Worker.objects.get()
+
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # checks the jobs are connected one to the other across experiments
+        self.assertEqual(xp.blocks.first().job.child.block.experiment, xpc)
+        self.assertEqual(xp.blocks.last().job.child.block.experiment, xpc)
+
+        # simulate job start on worker
+        split.start()
+        self.assertEqual(split.job.status, Job.PROCESSING)
+        self.assertEqual(split.job.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+        self.assertEqual(split.job.child.status, Job.PROCESSING)
+        self.assertEqual(split.job.child.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.child.block.experiment.status, Experiment.RUNNING)
 
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
 
-    def test_bad_notification_request_with_unknown_experiment(self):
-        self.client.login(username='scheduler', password='1234')
+        # no job can be run right now
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 0)
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/unknown',
-                'block-name': 'addition1',
-            }), content_type='application/json')
+        # cancels the blocking experiment - the blocked one must continue
+        xp.cancel()
 
-        self.checkResponse(response, 404)
+        # simulate worker cancelling
+        split.refresh_from_db()
+        self.assertEqual(split.status, Job.CANCEL)
+        split.end(None, Job.CANCELLED)
 
+        xp.refresh_from_db()
+        self.assertEqual(
+            [str(k) for k in xp.blocks.order_by('id').values_list('status', flat=True)],
+            [Block.CANCELLED, Block.CANCELLED]
+            )
+        self.assertEqual(xp.status, Experiment.FAILED)
+
+        # assert we have no database traces after the last block is cancel
+        self.assertEqual(Job.objects.filter(block__in=xp.blocks.all()).count(), 0)
+        self.assertEqual(JobSplit.objects.filter(job__block__in=xp.blocks.all()).count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+        # since the first job was successful, the second block of the
+        # previously blocked experiment must be ready to run
+
+        # schedules the last block of the experiment
+        assert xpc.blocks.first().job.runnable_date is not None
+        assigned_splits = schedule()
+
+        assigned_splits = JobSplit.objects.filter(worker__isnull=False)
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits.first()
+        self.assertEqual(split.job.block.experiment, xpc)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # the rest would continue normally
+
+
+    def test_blocking_cancel_blocked(self):
+
+        # tests two experiments that are similar can be scheduled at the same
+        # time and we'll optimise correctly and only run one of them. If the
+        # blocked experiment is cancelled, this does not affect the running
+        # experiment.
+
+        current_stats = HourlyStatistics.objects.count()
+
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        xpc = xp.fork(name='single_copy')
+
+        # schedules the experiment and check it
+        xp.schedule()
+        xpc.schedule()
+
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
+        assert xpc.blocks.first().job.runnable_date is None
+        assert xpc.blocks.last().job.runnable_date is None
+
+        assigned_splits = schedule()
+
+        worker = Worker.objects.get()
+
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # checks the jobs are connected one to the other across experiments
+        self.assertEqual(xp.blocks.first().job.child.block.experiment, xpc)
+        self.assertEqual(xp.blocks.last().job.child.block.experiment, xpc)
+
+        # simulate job start on worker
+        split.start()
+        self.assertEqual(split.job.status, Job.PROCESSING)
+        self.assertEqual(split.job.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+        self.assertEqual(split.job.child.status, Job.PROCESSING)
+        self.assertEqual(split.job.child.block.status, Block.PROCESSING)
+        self.assertEqual(split.job.child.block.experiment.status, Experiment.RUNNING)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # no job can be run right now
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 0)
+
+        # simulate end job signal
+        split.end(Result(status=0))
+        self.assertEqual(split.job.status, Job.COMPLETED)
+        self.assertEqual(split.job.block.status, Block.CACHED)
+        self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING)
+        self.assertEqual(split.job.child.status, Job.COMPLETED)
+        self.assertEqual(split.job.child.block.status, Block.CACHED)
+        self.assertEqual(split.job.child.block.experiment.status,
+            Experiment.RUNNING)
+
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
+
+        self.check_stats_success(split)
+
+        # cancels the blocked experiment - the blocking one must continue
+        xpc.cancel()
+        self.assertEqual(
+            [str(k) for k in xpc.blocks.order_by('id').values_list('status',
+              flat=True)],
+            [Block.CACHED, Block.CANCELLED]
+            )
+        self.assertEqual(xpc.status, Experiment.FAILED)
+
+        # assert we have no database traces after the last cancel
+        self.assertEqual(Job.objects.filter(block__in=xpc.blocks.all()).count(), 0)
+        self.assertEqual(JobSplit.objects.filter(job__block__in=xpc.blocks.all()).count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+        # since the first job was successful, the second block of the
+        # running experiment must be ready to run
+
+        # schedules the last block of the experiment
+        assert xp.blocks.last().job.runnable_date is not None
+        assigned_splits = schedule()
+
+        assigned_splits = JobSplit.objects.filter(worker__isnull=False)
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits.first()
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'analysis')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # the rest would continue normally
+
+
+    def test_schedule_without_queue(self):
+
+        # tests that an experiment with a queue that disappeared is correctly
+        # aborted
+        setup_backend(QUEUES_WITHOUT_PRIORITY)
+
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        self.assertRaises(RuntimeError, xp.schedule)
+
+
+    def test_split_no_index(self):
+
+        # tests a simple experiment with splitting and show it can fail
+        # gracefully
+
+        current_stats = HourlyStatistics.objects.count()
+
+        fullname = 'user/user/single/1/single_large'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        worker = Worker.objects.get()
+
+        # schedules the experiment and check it
+        xp.schedule()
+        xp.refresh_from_db()
+        self.assertEqual(xp.status, Experiment.FAILED)
+
+        self.assertEqual(xp.blocks.first().status, Block.CANCELLED)
+        assert xp.blocks.first().error_report().find(settings.DEFAULT_USER_ERROR) == 0
+        self.assertEqual(xp.blocks.last().status, Block.CANCELLED)
+
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.filter(block__in=xp.blocks.all()).count(), 0)
+        self.assertEqual(JobSplit.objects.filter(job__block__in=xp.blocks.all()).count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+
+    def test_schedules_two_jobs(self):
+
+        # tests a simple scheduling activity in which two jobs of the same
+        # experiment must be scheduled concurrently, provided there is enough
+        # space
+
+        current_stats = HourlyStatistics.objects.count()
+
+        fullname = 'user/user/triangle/1/triangle'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        worker = Worker.objects.get()
+
+        # schedules the experiment and check it
+        xp.schedule()
+        xp.refresh_from_db()
+        self.assertEqual(xp.status, Experiment.SCHEDULED)
+
+        assigned_splits = schedule()
 
-    def test_bad_notification_request_with_unknown_block(self):
-        self.client.login(username='scheduler', password='1234')
+        self.assertEqual(len(assigned_splits), 2)
+        self.assertEqual(assigned_splits[0].job.block.experiment, xp)
+        self.assertEqual(assigned_splits[1].job.block.experiment, xp)
+        self.assertNotEqual(assigned_splits[0], assigned_splits[1])
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'unknown',
-            }), content_type='application/json')
 
-        self.checkResponse(response, 404)
+    def test_cancel_concurrent_job(self):
 
+        # tests a simple scheduling activity in which two jobs of the same
+        # experiment must be scheduled concurrently, provided there is enough
+        # space. Then, fails one of them, waits the experiment to fail
+        # completely. Processing jobs must be cancelled.
 
-    def test_bad_notification_request_without_json_content(self):
-        self.client.login(username='scheduler', password='1234')
-        response = self.client.put(self.url)
-        self.checkResponse(response, 400, content_type='application/json')
+        current_stats = HourlyStatistics.objects.count()
 
+        fullname = 'user/user/triangle/1/triangle'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
 
-    def test_bad_notification_request_with_invalid_json_content1(self):
-        self.client.login(username='scheduler', password='1234')
+        worker = Worker.objects.get()
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'block-name': 'addition1',
-            }), content_type='application/json')
+        # schedules the experiment and check it
+        xp.schedule()
+        xp.refresh_from_db()
+        self.assertEqual(xp.status, Experiment.SCHEDULED)
 
-        self.checkResponse(response, 400, content_type='application/json')
+        assigned_splits = schedule()
 
+        self.assertEqual(len(assigned_splits), 2)
+        self.assertEqual(assigned_splits[0].job.block.experiment, xp)
+        self.assertEqual(assigned_splits[1].job.block.experiment, xp)
+        self.assertNotEqual(assigned_splits[0], assigned_splits[1])
 
-    def test_bad_notification_request_with_invalid_json_content2(self):
-        self.client.login(username='scheduler', password='1234')
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-            }), content_type='application/json')
+        # simulate job start on worker
+        assigned_splits[0].start()
+        assigned_splits[1].start()
 
-        self.checkResponse(response, 400, content_type='application/json')
+        # now fail one of the jobs, the end result is the experiment fails
+        assigned_splits[1].end(Result(status=15)) #simulated sigterm sent
+        self.assertEqual(assigned_splits[1].job.status, Job.FAILED)
 
+        # cancels the job which is marked for cancelling, checks the final
+        # experiment state is as expected (this is the worker job)
+        self.assertEqual(assigned_splits[0].job.splits.first().status,
+            Job.CANCEL)
+        assigned_splits[0].job.splits.first().end(None, Job.CANCELLED)
 
-    def test_first_block_of_experiment(self):
-        self.client.login(username='scheduler', password='1234')
+        xp.refresh_from_db()
+        self.assertEqual(
+            [str(k) for k in xp.blocks.order_by('id').values_list('status',
+              flat=True)],
+            [Block.CANCELLED, Block.FAILED, Block.CANCELLED, Block.CANCELLED]
+            )
+        self.assertEqual(xp.status, Experiment.FAILED)
+
+
+
+class SchedulingPriority(BaseBackendTestCase):
+
+
+    def set_globals(self, xp, queue, environment):
+        '''Sets the global queue of the experiment'''
+
+        decl = xp.declaration
+        decl['globals']['queue'] = queue.name
+        decl['globals']['environment']['name'] = environment.name
+        decl['globals']['environment']['version'] = environment.version
+        xp.declaration = decl
+        xp.save() #reloads all blocks
+
+
+    def reset_slots(self, xp):
+        '''Only use one slot in all blocks'''
+
+        decl = xp.declaration
+        for b in decl['blocks']:
+            if 'nb_slots' in decl['blocks'][b]:
+                del decl['blocks'][b]['nb_slots']
+        xp.declaration = decl
+        xp.save() #reloads all blocks
+
+
+    def test_priority_multicore(self):
+
+        # tests that in an heterogeneous backend setup, priority is given to
+        # jobs that require more cores correctly.
+
+        setup_backend(QUEUES_WITHOUT_PRIORITY)
+        Worker.objects.update(active=True)
+
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+        fullname = 'user/user/single/1/single_add'
+        xp_add = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+        fullname = 'user/user/single/1/single_add2'
+        xp_add2 = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+        fullname = 'user/user/single/1/single_large'
+        xp_large = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        q1 = Queue.objects.get(name='q1')
+        q2 = Queue.objects.get(name='q2')
+        env = Environment.objects.get()
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-            }), content_type='application/json')
+        # reset queue and environment to new backend configuration
+        self.set_globals(xp, q1, env)
+        self.set_globals(xp_add, q1, env)
+        self.set_globals(xp_add2, q1, env)
+        self.set_globals(xp_large, q2, env) #notice different queue
+        self.reset_slots(xp_large) #one slot per block only
 
-        self.checkResponse(response, 204)
+        xp.schedule()
+        xp_add.schedule()
+        xp_add2.schedule()
+        xp_large.schedule()
 
-        experiment = Experiment.objects.get(id=self.experiment.id)
+        assigned_splits = schedule()
 
-        self.assertTrue(experiment.start_date is not None)
-        self.assertTrue(experiment.end_date is None)
-        self.assertEqual(experiment.status, Experiment.RUNNING)
-        self.assertEqual(experiment.blocks.get(name='addition1').status, Block.PROCESSING)
-        self.assertEqual(experiment.blocks.get(name='addition2').status, Block.NOT_CACHED)
-        self.assertEqual(experiment.blocks.get(name='analysis').status, Block.NOT_CACHED)
+        self.assertEqual(len(assigned_splits), 3)
 
+        self.assertEqual(assigned_splits[0].job.block.experiment, xp_large)
+        # then, the scheduling order is respected
+        self.assertEqual(assigned_splits[1].job.block.experiment, xp)
+        self.assertEqual(assigned_splits[2].job.block.experiment, xp_add)
+        # notice that the last experiment is not assigned
 
-    def test_other_block_of_running_experiment(self):
-        self.client.login(username='scheduler', password='1234')
 
-        experiment = Experiment.objects.get(id=self.experiment.id)
+    def test_priority_multicore_delayed(self):
 
-        experiment.start_date = datetime.now()
-        experiment.status     = Experiment.RUNNING
-        experiment.save()
+        # tests that in an heterogeneous backend setup, priority is given to
+        # jobs that require more cores correctly. In this test, specifically,
+        # we verify that, if the farm is taken, new jobs that require more
+        # resources will block other possible jobs to run if they cannot run,
+        # even if free cores are available.
 
-        block = experiment.blocks.get(name='addition1')
-        block.status = Block.CACHED
-        block.save()
+        setup_backend(QUEUES_WITHOUT_PRIORITY)
+        Worker.objects.update(active=True)
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition2',
-            }), content_type='application/json')
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+        fullname = 'user/user/single/1/single_add'
+        xp_add = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+        fullname = 'user/user/single/1/single_add2'
+        xp_add2 = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+        fullname = 'user/user/single/1/single_large'
+        xp_large = Experiment.objects.get(name=fullname.split(os.sep)[-1])
 
-        self.checkResponse(response, 204)
+        q1 = Queue.objects.get(name='q1')
+        q4 = Queue.objects.get(name='q4')
+        env = Environment.objects.get()
 
-        experiment = Experiment.objects.get(id=self.experiment.id)
+        # reset queue and environment to new backend configuration
+        self.set_globals(xp, q1, env)
+        self.set_globals(xp_add, q1, env)
+        self.set_globals(xp_large, q4, env) #notice different queue
+        self.reset_slots(xp_large) #one slot per block only
 
-        self.assertTrue(experiment.start_date is not None)
-        self.assertTrue(experiment.end_date is None)
-        self.assertEqual(experiment.status, Experiment.RUNNING)
+        xp.schedule()
+        assigned_splits = schedule()
 
-        addition1 = experiment.blocks.get(name='addition1')
-        self.assertEqual(addition1.status, Block.CACHED)
-        self.assertTrue(addition1.start_date is None) #no start received
-        self.assertTrue(addition1.end_date is not None)
+        self.assertEqual(len(assigned_splits), 1)
+        self.assertEqual(assigned_splits[0].job.block.experiment, xp)
+        split = assigned_splits[0]
 
-        addition2 = experiment.blocks.get(name='addition2')
-        self.assertEqual(addition2.status, Block.PROCESSING)
-        self.assertTrue(addition2.start_date is not None)
-        self.assertTrue(addition2.end_date is None)
+        xp_large.schedule() #will now block anything else from running
+        xp_add.schedule()
 
-        analysis = experiment.blocks.get(name='analysis')
-        self.assertEqual(analysis.status, Block.NOT_CACHED)
-        self.assertTrue(analysis.start_date is None)
-        self.assertTrue(analysis.end_date is None)
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 0)
 
+        # start/end the xp block and schedule again
+        split.start()
+        split.end(Result(0))
 
-#----------------------------------------------------------
+        # now, the job with more cores should be scheduled first
+        assigned_splits = schedule()
+        self.assertEqual(len(assigned_splits), 1)
+        self.assertEqual(assigned_splits[0].job.block.experiment, xp_large)
 
 
-class BlockFinishedAPI(BackendAPIBase):
+    def test_priorities(self):
+
+        # tests that in an heterogeneous backend setup, priority is given to
+        # different computers based on their priority settings
+
+        setup_backend(PRIORITY_QUEUES)
+        Worker.objects.update(active=True)
+
+        q1 = Queue.objects.get(name='q1')
+        env = Environment.objects.get()
+
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+        self.set_globals(xp, q1, env)
+
+        q1_special = Queue.objects.get(name='q1_special')
+        fullname = 'user/user/single/1/single_add'
+        xp_add = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+        self.set_globals(xp_add, q1_special, env)
+
+        q2 = Queue.objects.get(name='q2')
+        fullname = 'user/user/single/1/single_large'
+        xp_large = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+        self.set_globals(xp_large, q2, env) #notice different queue
+        self.reset_slots(xp_large) #one slot per block only
+
+        node1 = Worker.objects.get(name='node1')
+        node2 = Worker.objects.get(name='node2')
+
+        # verify that xp_large has priority, other jobs corresponding to
+        # q1/_special
+        xp.schedule()
+        xp_add.schedule()
+        xp_large.schedule()
+
+        assigned_splits = schedule()
+        self.assertTrue(len(assigned_splits), 3)
+
+        self.assertEqual(assigned_splits[0].job.block.experiment, xp_large)
+        self.assertEqual(assigned_splits[0].job.block.name, 'echo')
+        self.assertEqual(assigned_splits[0].job.splits.first().worker, node2)
+
+        self.assertEqual(assigned_splits[1].job.block.experiment, xp)
+        self.assertEqual(assigned_splits[1].job.block.name, 'echo')
+        self.assertEqual(assigned_splits[1].job.splits.first().worker, node1)
+
+        self.assertEqual(assigned_splits[2].job.block.experiment, xp_add)
+        self.assertEqual(assigned_splits[2].job.block.name, 'echo')
+        self.assertEqual(assigned_splits[2].job.splits.first().worker, node2)
+
+
+class Working(BaseBackendTestCase):
+
 
     def setUp(self):
-        super(BlockFinishedAPI, self).setUp()
 
-        User.objects.create_user('scheduler', 'scheduler@test.org', '1234')
+        from beat.core.async import resolve_cpulimit_path
+        self.cpulimit = resolve_cpulimit_path(None)
 
-        self.client.login(username='johndoe', password='1234')
+        from . import utils
+        self.process = utils.resolve_process_path()
+        self.environments = utils.find_environments(None)
+        self.env1_execute = self.environments['environment (1)']['execute']
 
-        url = reverse('api_experiments:list_create', args=['johndoe'])
-        response = self.client.post(url,
-            json.dumps({
-                'toolchain': 'johndoe/toolchain1/1',
-                'declaration': BackendAPIBase.DECLARATION1,
-                'name': 'experiment1',
-            }), content_type='application/json')
+        if not os.path.exists(settings.CACHE_ROOT):
+            os.makedirs(settings.CACHE_ROOT)
 
-        self.checkResponse(response, 201, content_type='application/json')
 
-        url = reverse('api_experiments:start', args=['johndoe', 'toolchain1', 1, 'experiment1'])
-        response = self.client.post(url)
-        self.checkResponse(response, 200, content_type='application/json')
+    def tearDown(self):
+        if os.path.exists(settings.CACHE_ROOT):
+            shutil.rmtree(settings.CACHE_ROOT)
 
-        self.client.logout()
 
-        self.experiment = Experiment.objects.get(author__username='johndoe',
-                                                 toolchain__name='toolchain1',
-                                                 name='experiment1',
-                                                )
+    def check_stats_success(self, block):
 
-        self.url  = reverse('api_backend:backend-api-block-finished')
+        assert abs(block.speed_up_real() - 1.0) < 0.1
+        assert abs(block.speed_up_maximal() - 1.0) < 0.1
+        assert block.linear_execution_time() > 0.0
+        assert block.queuing_time() > 0.0
+        assert block.stdout() == ''
+        assert block.stderr() == ''
+        assert block.error_report() == ''
 
 
-    def start_block(self, block_name):
-        response = self.client.put(reverse('api_backend:backend-api-block-started'),
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': block_name,
-            }), content_type='application/json')
+    def test_success(self):
 
-        self.checkResponse(response, 204)
+        # tests an experiment can actually be run
 
+        current_stats = HourlyStatistics.objects.count()
 
-    def test_no_notification_for_anonymous_user(self):
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'state': 'processed',
-                'statistics': None,
-            }), content_type='application/json')
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
 
-        self.checkResponse(response, 403)
+        # schedules the experiment and check it
+        xp.schedule()
 
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
 
-    def test_no_notification_for_non_scheduler_user(self):
-        self.client.login(username='johndoe', password='1234')
+        assigned_splits = schedule()
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'state': 'processed',
-                'statistics': None,
-            }), content_type='application/json')
+        worker = Worker.objects.get()
 
-        self.checkResponse(response, 403)
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # actually runs the job (blocking)
+        split.process(self.env1_execute, self.cpulimit)
+
+        # at this point, job should have been successful
+        xp.refresh_from_db()
+        block = xp.blocks.first()
+        self.assertEqual(block.status, Block.CACHED)
+        self.assertEqual(xp.status, Experiment.RUNNING)
+
+        # all caches must be have been generated
+        assert all([k.index_checksums() for k in block.outputs.all()])
+
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
+
+        self.check_stats_success(block)
+
+        # assert we have no database traces after the block is done
+        self.assertEqual(Job.objects.filter(block=split.job.block).count(), 0)
+        self.assertEqual(JobSplit.objects.filter(job=split.job).count(), 0)
+        self.assertEqual(Result.objects.filter(job__isnull=True).count(), 0)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+        # since this job was successful, the next one should be ready to run
+
+        # schedules the last block of the experiment
+        assert xp.blocks.last().job.runnable_date is not None
+        assigned_splits = schedule()
+
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'analysis')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # actually runs the job (blocking)
+        split.process(self.env1_execute, self.cpulimit)
+
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
+
+        xp.refresh_from_db()
+        block = xp.blocks.last()
+        self.assertEqual(block.status, Block.CACHED)
+        self.assertEqual(xp.status, Experiment.DONE)
+
+        # all caches must be have been generated
+        assert all([k.index_checksums() for k in block.outputs.all()])
+
+        self.check_stats_success(block)
+
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.count(), 0)
+        self.assertEqual(JobSplit.objects.count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+
+    def test_failure(self):
+
+        # tests an experiment can fail and we can handle it fine
+
+        current_stats = HourlyStatistics.objects.count()
+
+        fullname = 'user/user/single/1/single_error'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        # schedules the experiment and check it
+        xp.schedule()
+
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
+
+        assigned_splits = schedule()
+
+        worker = Worker.objects.get()
+
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # actually runs the job (blocking)
+        split.process(self.env1_execute, self.cpulimit)
 
+        # at this point, job should have failed
+        xp.refresh_from_db()
+        block = xp.blocks.first()
+        self.assertEqual(block.status, Block.FAILED)
+        self.assertEqual(block.experiment.status, Experiment.FAILED)
 
-    def test_bad_notification_request_with_unknown_experiment(self):
-        self.client.login(username='scheduler', password='1234')
+        # all caches have not been generated
+        assert all([not k.exists() for k in block.outputs.all()])
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/unknown',
-                'block-name': 'addition1',
-                'state': 'processed',
-                'outputs': ['deadbeef'],
-                'statistics': None,
-            }), content_type='application/json')
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
 
-        self.checkResponse(response, 404)
+        assert abs(block.speed_up_real() - 1.0) < 0.1
+        assert abs(block.speed_up_maximal() - 1.0) < 0.1
+        assert block.linear_execution_time() > 0.0
+        assert block.queuing_time() > 0.0
+        assert block.stdout() == ''
+        assert block.stderr() == ''
+        assert block.error_report().find('Error') != -1
 
+        # assert we have no database traces after the block is done
+        self.assertEqual(Job.objects.filter(block=split.job.block).count(), 0)
+        self.assertEqual(JobSplit.objects.filter(job=split.job).count(), 0)
+        self.assertEqual(Result.objects.filter(job__isnull=True).count(), 0)
 
-    def test_bad_notification_request_with_unknown_block(self):
-        self.client.login(username='scheduler', password='1234')
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'unknown',
-                'state': 'processed',
-                'outputs': ['deadbeef'],
-                'statistics': None,
-            }), content_type='application/json')
 
-        self.checkResponse(response, 404)
+    def test_skip(self):
 
+        # tests an experiment can actually be completely skipped if all files
+        # are already cached
 
-    def test_bad_notification_request_with_invalid_state(self):
-        self.client.login(username='scheduler', password='1234')
+        current_stats = HourlyStatistics.objects.count()
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'state': 'unknown',
-                'statistics': None,
-            }), content_type='application/json')
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
 
-        self.checkResponse(response, 400, content_type='application/json')
+        # schedules the experiment and check it
+        xp.schedule()
 
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
 
-    def test_bad_notification_request_without_json_content(self):
-        self.client.login(username='scheduler', password='1234')
-        response = self.client.put(self.url)
-        self.checkResponse(response, 400, content_type='application/json')
+        assigned_splits = schedule()
 
+        worker = Worker.objects.get()
 
-    def test_bad_notification_request_with_invalid_json_content1(self):
-        self.client.login(username='scheduler', password='1234')
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'block-name': 'addition1',
-                'state': 'processed',
-                'statistics': None,
-            }), content_type='application/json')
+        # actually runs the job (blocking)
+        split.process(self.env1_execute, self.cpulimit)
 
-        self.checkResponse(response, 400, content_type='application/json')
+        # at this point, job should have been successful
+        xp.refresh_from_db()
+        block = xp.blocks.first()
+        self.assertEqual(block.status, Block.CACHED)
+        self.assertEqual(block.experiment.status, Experiment.RUNNING)
 
+        # all caches must be have been generated
+        assert all([k.index_checksums() for k in block.outputs.all()])
 
-    def test_bad_notification_request_with_invalid_json_content2(self):
-        self.client.login(username='scheduler', password='1234')
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'state': 'processed',
-                'statistics': None,
-            }), content_type='application/json')
+        self.check_stats_success(block)
 
-        self.checkResponse(response, 400, content_type='application/json')
+        # assert we have no database traces after the block is done
+        self.assertEqual(Job.objects.filter(block=split.job.block).count(), 0)
+        self.assertEqual(JobSplit.objects.filter(job=split.job).count(), 0)
+        self.assertEqual(Result.objects.filter(job__isnull=True).count(), 0)
 
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
 
-    def test_bad_notification_request_with_invalid_json_content3(self):
-        self.client.login(username='scheduler', password='1234')
+        # since this job was successful, the next one should be ready to run
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'statistics': None,
-            }), content_type='application/json')
+        # schedules the last block of the experiment
+        assert xp.blocks.last().job.runnable_date is not None
+        assigned_splits = schedule()
 
-        self.checkResponse(response, 400, content_type='application/json')
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'analysis')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
 
+        # actually runs the job (blocking)
+        split.process(self.env1_execute, self.cpulimit)
 
-    def test_bad_notification_request_with_invalid_json_content4(self):
-        self.client.login(username='scheduler', password='1234')
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'state': 'processed',
-                'statistics': None,
-            }), content_type='application/json')
+        xp.refresh_from_db()
+        block = xp.blocks.last()
+        self.assertEqual(block.status, Block.CACHED)
+        self.assertEqual(block.experiment.status, Experiment.DONE)
 
-        self.checkResponse(response, 400, content_type='application/json')
+        # all caches must be have been generated
+        assert all([k.index_checksums() for k in block.outputs.all()])
 
+        self.check_stats_success(block)
 
-    def test__not_cached_block__processed(self):
-        self.client.login(username='scheduler', password='1234')
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.count(), 0)
+        self.assertEqual(JobSplit.objects.count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'state': 'processed',
-                'outputs': ['deadbeef'],
-                'statistics': None,
-            }), content_type='application/json')
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
 
-        self.checkResponse(response, 204)
+        # now we fork and re-run the same experiment
+        xpc = xp.fork(name='single_copy')
 
-        experiment = Experiment.objects.get(id=self.experiment.id)
+        # schedules the experiment (it should immediately load from the db)
+        xpc.schedule()
 
-        self.assertTrue(experiment.start_date is not None)
-        self.assertTrue(experiment.end_date is None)
-        self.assertEqual(experiment.status, Experiment.RUNNING)
+        self.assertEqual(xpc.status, Experiment.DONE)
 
-        addition1 = experiment.blocks.get(name='addition1')
-        self.assertEqual(addition1.status, Block.CACHED)
-        self.assertTrue(addition1.start_date is None) #no start received!
-        self.assertTrue(addition1.end_date is not None)
 
-        addition2 = experiment.blocks.get(name='addition2')
-        self.assertEqual(addition2.status, Block.NOT_CACHED)
-        self.assertTrue(addition2.start_date is None)
-        self.assertTrue(addition2.end_date is None)
+    def test_does_not_skip(self):
 
-        analysis = experiment.blocks.get(name='analysis')
-        self.assertEqual(analysis.status, Block.NOT_CACHED)
-        self.assertTrue(analysis.start_date is None)
-        self.assertTrue(analysis.end_date is None)
+        # tests an experiment can actually be partially skipped if some files
+        # are ready
 
-    def test__not_cached_block__failed(self):
-        self.client.login(username='scheduler', password='1234')
+        current_stats = HourlyStatistics.objects.count()
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'state': 'failed',
-                'outputs': ['deadbeef'],
-                'statistics': None,
-            }), content_type='application/json')
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
 
-        self.checkResponse(response, 204)
+        # schedules the experiment and check it
+        xp.schedule()
 
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
 
-    def test__not_cached_block__cancelled(self):
-        self.client.login(username='scheduler', password='1234')
+        assigned_splits = schedule()
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'state': 'cancelled',
-                'outputs': ['deadbeef'],
-                'statistics': None,
-            }), content_type='application/json')
+        worker = Worker.objects.get()
 
-        self.checkResponse(response, 204)
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
 
+        # actually runs the job (blocking)
+        split.process(self.env1_execute, self.cpulimit)
 
-    def test__cached_block__processed(self):
-        self.client.login(username='scheduler', password='1234')
+        # at this point, job should have been successful
+        xp.refresh_from_db()
+        block = xp.blocks.first()
+        self.assertEqual(block.status, Block.CACHED)
+        self.assertEqual(block.experiment.status, Experiment.RUNNING)
 
-        experiment = Experiment.objects.get(id=self.experiment.id)
+        # all caches must be have been generated
+        assert all([k.index_checksums() for k in block.outputs.all()])
 
-        block = experiment.blocks.get(name='addition1')
-        block.status = Block.CACHED
-        block.save()
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'state': 'processed',
-                'outputs': ['deadbeef'],
-                'statistics': None,
-            }), content_type='application/json')
+        self.check_stats_success(block)
 
-        self.checkResponse(response, 204)
+        # assert we have no database traces after the block is done
+        self.assertEqual(Job.objects.filter(block=split.job.block).count(), 0)
+        self.assertEqual(JobSplit.objects.filter(job=split.job).count(), 0)
+        self.assertEqual(Result.objects.filter(job__isnull=True).count(), 0)
 
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
 
-    def test__cached_block__failed(self):
-        self.client.login(username='scheduler', password='1234')
+        # now we cancel the experiment
+        xp.cancel()
 
-        experiment = Experiment.objects.get(id=self.experiment.id)
+        # we fork it and re-run it - only the last block will run again
+        xpc = xp.fork(name='single_copy')
+        xpc.schedule()
 
-        block = experiment.blocks.get(name='addition1')
-        block.status = Block.CACHED
-        block.save()
+        # schedules the first runnable block
+        assert not hasattr(xpc.blocks.first(), 'job')
+        assert xpc.blocks.first().status == Block.CACHED
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'state': 'failed',
-                'outputs': ['deadbeef'],
-                'statistics': None,
-            }), content_type='application/json')
+        # since this job was successful, the next one should be ready to run
 
-        self.checkResponse(response, 204)
+        # schedules the last block of the experiment
+        assert xpc.blocks.last().job.runnable_date is not None
+        assigned_splits = schedule()
 
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xpc)
+        self.assertEqual(split.job.block.name, 'analysis')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
 
-    def test__cached_block__cancelled(self):
-        self.client.login(username='scheduler', password='1234')
+        # actually runs the job (blocking)
+        split.process(self.env1_execute, self.cpulimit)
 
-        experiment = Experiment.objects.get(id=self.experiment.id)
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
 
-        block = experiment.blocks.get(name='addition1')
-        block.status = Block.CACHED
-        block.save()
+        xpc.refresh_from_db()
+        block = xpc.blocks.last()
+        self.assertEqual(block.status, Block.CACHED)
+        self.assertEqual(block.experiment.status, Experiment.DONE)
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'state': 'cancelled',
-                'outputs': ['deadbeef'],
-                'statistics': None,
-            }), content_type='application/json')
+        # all caches must be have been generated
+        assert all([k.index_checksums() for k in block.outputs.all()])
 
-        self.checkResponse(response, 204)
+        self.check_stats_success(block)
 
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.count(), 0)
+        self.assertEqual(JobSplit.objects.count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
 
-    def test__failed_block__processed(self):
-        self.client.login(username='scheduler', password='1234')
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
 
-        experiment = Experiment.objects.get(id=self.experiment.id)
+        # asserts the old experiment is still on a failed state
+        self.assertEqual(
+            [str(k) for k in xp.blocks.order_by('id').values_list('status',
+              flat=True)],
+            [Block.CACHED, Block.CANCELLED]
+            )
+        self.assertEqual(xp.status, Experiment.FAILED)
+
+
+    def test_partially_blocking(self):
+
+        # tests an experiment can actually be partially skipped if some files
+        # are ready on the cache - blocking occurs as foreseeable on blocks to
+        # run
+
+        current_stats = HourlyStatistics.objects.count()
+
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
+
+        # schedules the experiment and check it
+        xp.schedule()
+
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
+
+        assigned_splits = schedule()
+
+        worker = Worker.objects.get()
+
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
+
+        # actually runs the job (blocking)
+        split.process(self.env1_execute, self.cpulimit)
+
+        # at this point, job should have been successful
+        xp.refresh_from_db()
+        block = xp.blocks.first()
+        self.assertEqual(block.status, Block.CACHED)
+        self.assertEqual(block.experiment.status, Experiment.RUNNING)
+
+        # all caches must be have been generated
+        assert all([k.index_checksums() for k in block.outputs.all()])
+
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
+
+        self.check_stats_success(block)
 
-        block = experiment.blocks.get(name='addition1')
-        block.status = Block.FAILED
-        block.save()
+        # assert we have no database traces after the block is done
+        self.assertEqual(Job.objects.filter(block=split.job.block).count(), 0)
+        self.assertEqual(JobSplit.objects.filter(job=split.job).count(), 0)
+        self.assertEqual(Result.objects.filter(job__isnull=True).count(), 0)
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'state': 'processed',
-                'outputs': ['deadbeef'],
-                'statistics': None,
-            }), content_type='application/json')
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
 
-        self.checkResponse(response, 400, content_type='application/json')
+        # now we fork and wait the second experiment to hook-into the last
+        # block
+        xpc = xp.fork(name='single_copy')
+        xpc.schedule()
 
+        self.assertEqual([k.status for k in xpc.blocks.all()],
+            [Block.CACHED, Block.NOT_CACHED])
+        assert xpc.blocks.last().job.parent == xp.blocks.last().job
 
-    def test__failed_block__failed(self):
-        self.client.login(username='scheduler', password='1234')
 
-        experiment = Experiment.objects.get(id=self.experiment.id)
+class WorkingExternally(TransactionTestCase):
 
-        block = experiment.blocks.get(name='addition1')
-        block.status = Block.FAILED
-        block.save()
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'state': 'failed',
-                'outputs': ['deadbeef'],
-                'statistics': None,
-            }), content_type='application/json')
+    def setUp(self):
+
+        from beat.core.async import resolve_cpulimit_path
+        self.cpulimit = resolve_cpulimit_path(None)
+
+        from . import utils
+        self.process = utils.resolve_process_path()
+        self.environments = utils.find_environments(None)
+
+        if not os.path.exists(settings.CACHE_ROOT):
+            os.makedirs(settings.CACHE_ROOT)
+
+        install.create_sites()
+        system_user, plot_user, user = install.create_users('user', 'user')
+        install.add_group('Default')
+
+        setup_backend(qsetup.DEFAULT_CONFIGURATION)
+
+        Worker.objects.update(active=True)
+        env = Environment.objects.first()
+        queue = Queue.objects.first()
+
+        template_data = dict(
+            system_user = system_user,
+            plot_user = plot_user,
+            user = user,
+            private = False,
+            queue = queue.name,
+            environment = dict(name=env.name, version=env.version),
+            )
+        prefix = os.path.join(
+              os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0]))),
+              'src',
+              'beat.examples',
+              )
+        install.install_contributions(prefix, 'system', template_data)
+        install.install_contributions(prefix, 'test', template_data)
+
+
+    def tearDown(self):
+        if os.path.exists(settings.CACHE_ROOT):
+            shutil.rmtree(settings.CACHE_ROOT)
+        if os.path.exists(settings.PREFIX):
+            shutil.rmtree(settings.PREFIX)
 
-        self.checkResponse(response, 400, content_type='application/json')
 
+    def test_success(self):
 
-    def test__failed_block__cancelled(self):
-        self.client.login(username='scheduler', password='1234')
+        # tests an experiment can actually be run
 
-        experiment = Experiment.objects.get(id=self.experiment.id)
+        current_stats = HourlyStatistics.objects.count()
 
-        block = experiment.blocks.get(name='addition1')
-        block.status = Block.FAILED
-        block.save()
+        fullname = 'user/user/single/1/single'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'state': 'cancelled',
-                'outputs': ['deadbeef'],
-                'statistics': None,
-            }), content_type='application/json')
+        # schedules the experiment and check it
+        xp.schedule()
 
-        self.checkResponse(response, 400, content_type='application/json')
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
 
+        assigned_splits = schedule()
 
-    def test__processing_block__processed(self):
-        self.client.login(username='scheduler', password='1234')
+        worker = Worker.objects.get()
 
-        self.start_block('addition1')
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'state': 'processed',
-                'outputs': ['deadbeef'],
-                'statistics': None,
-            }), content_type='application/json')
+        # actually runs the job (non-blocking)
+        worker.work(self.environments, self.cpulimit, self.process)
 
-        self.checkResponse(response, 204)
+        def condition():
+            xp.refresh_from_db()
+            block = xp.blocks.first()
+            return block.status == Block.CACHED
+        _sleep(20, condition)
 
-        experiment = Experiment.objects.get(id=self.experiment.id)
+        # at this point, split should have been successful which shall
+        # trigger job deletion and block update
+        xp.refresh_from_db()
+        block = xp.blocks.first()
 
-        self.assertTrue(experiment.start_date is not None)
-        self.assertTrue(experiment.end_date is None)
-        self.assertEqual(experiment.status, Experiment.RUNNING)
+        self.assertEqual(block.status, Block.CACHED)
+        self.assertEqual(xp.status, Experiment.RUNNING)
 
-        addition1 = experiment.blocks.get(name='addition1')
-        self.assertEqual(addition1.status, Block.CACHED)
-        self.assertTrue(addition1.start_date is not None)
-        self.assertTrue(addition1.end_date is not None)
+        # all caches must be have been generated
+        assert all([k.index_checksums() for k in split.job.block.outputs.all()])
 
-        addition2 = experiment.blocks.get(name='addition2')
-        self.assertEqual(addition2.status, Block.NOT_CACHED)
-        self.assertTrue(addition2.start_date is None)
-        self.assertTrue(addition2.end_date is None)
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
 
-        analysis = experiment.blocks.get(name='analysis')
-        self.assertEqual(analysis.status, Block.NOT_CACHED)
-        self.assertTrue(analysis.start_date is None)
-        self.assertTrue(analysis.end_date is None)
+        # assert we have no database traces after the block is done
+        self.assertEqual(Job.objects.filter(block=split.job.block).count(), 0)
+        self.assertEqual(JobSplit.objects.filter(job=split.job).count(), 0)
+        self.assertEqual(Result.objects.filter(job__isnull=True).count(), 0)
 
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
 
-    def test__processing_block__failed(self):
-        self.client.login(username='scheduler', password='1234')
+        # since this job was successful, the next one should be ready to run
 
-        self.start_block('addition1')
+        # schedules the last block of the experiment
+        assert xp.blocks.last().job.runnable_date is not None
+        assigned_splits = schedule()
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'state': 'failed',
-                'outputs': ['deadbeef'],
-                'statistics': None,
-            }), content_type='application/json')
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'analysis')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
 
-        self.checkResponse(response, 204)
+        # actually runs the job (non-blocking)
+        worker.work(self.environments, self.cpulimit, self.process)
 
-        experiment = Experiment.objects.get(id=self.experiment.id)
+        def condition():
+            xp.refresh_from_db()
+            return xp.status == Experiment.DONE
+        _sleep(20, condition) #wait job completion
 
-        self.assertTrue(experiment.start_date is not None)
-        self.assertTrue(experiment.end_date is not None)
-        self.assertEqual(experiment.status, Experiment.FAILED)
+        # checks the number of statistics objects has increased by 1
+        self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1)
 
-        addition1 = experiment.blocks.get(name='addition1')
-        self.assertEqual(addition1.status, Block.FAILED)
-        self.assertTrue(addition1.start_date is not None)
-        self.assertTrue(addition1.end_date is not None)
+        # at this point, split should have been successful which shall
+        # trigger job deletion and block update
+        xp.refresh_from_db()
+        block = xp.blocks.last()
 
-        addition2 = experiment.blocks.get(name='addition2')
-        self.assertEqual(addition2.status, Block.NOT_CACHED)
-        self.assertTrue(addition2.start_date is None)
-        self.assertTrue(addition2.end_date is None)
+        self.assertEqual(block.status, Block.CACHED)
+        self.assertEqual(xp.status, Experiment.DONE)
 
-        analysis = experiment.blocks.get(name='analysis')
-        self.assertEqual(analysis.status, Block.NOT_CACHED)
-        self.assertTrue(analysis.start_date is None)
-        self.assertTrue(analysis.end_date is None)
+        # all caches must be have been generated
+        assert all([k.index_checksums() for k in split.job.block.outputs.all()])
 
-    def test__unique_processing_block__cancelled(self):
-        self.client.login(username='scheduler', password='1234')
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.count(), 0)
+        self.assertEqual(JobSplit.objects.count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
 
-        self.start_block('addition1')
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'state': 'cancelled',
-                'outputs': ['deadbeef'],
-                'statistics': None,
-            }), content_type='application/json')
 
-        self.checkResponse(response, 204)
+    def test_cancel_running(self):
 
-        experiment = Experiment.objects.get(id=self.experiment.id)
+        # tests an experiment can be cancelled while running
 
-        self.assertTrue(experiment.creation_date is not None)
-        self.assertTrue(experiment.start_date is not None)
-        self.assertTrue(experiment.end_date is not None)
-        self.assertEqual(experiment.status, Experiment.FAILED)
+        current_stats = HourlyStatistics.objects.count()
 
-        self.assertEqual(experiment.blocks.count(), 3)
+        fullname = 'user/user/single/1/single_sleep'
+        xp = Experiment.objects.get(name=fullname.split(os.sep)[-1])
 
+        # schedules the experiment and check it
+        xp.schedule()
 
-    def test__processing_block__cancelled(self):
-        self.client.login(username='scheduler', password='1234')
+        # schedules the first runnable block
+        assert xp.blocks.first().job.runnable_date is not None
+        assert xp.blocks.last().job.runnable_date is None
 
-        self.start_block('addition1')
-        self.start_block('addition2')
+        assigned_splits = schedule()
 
-        response = self.client.put(self.url,
-            json.dumps({
-                'experiment-name': 'johndoe/toolchain1/1/experiment1',
-                'block-name': 'addition1',
-                'state': 'cancelled',
-                'outputs': ['deadbeef'],
-                'statistics': None,
-            }), content_type='application/json')
+        worker = Worker.objects.get()
 
-        self.checkResponse(response, 204)
+        self.assertEqual(len(assigned_splits), 1)
+        split = assigned_splits[0]
+        self.assertEqual(split.job.block.experiment, xp)
+        self.assertEqual(split.job.block.name, 'echo')
+        self.assertEqual(split.worker, worker)
+        self.assertEqual(worker.name, qsetup.HOSTNAME)
+        self.assertEqual(worker.available_cores(), qsetup.CORES-1)
 
-        experiment = Experiment.objects.get(id=self.experiment.id)
+        # actually runs the job (non-blocking)
+        worker.work(self.environments, self.cpulimit, self.process)
 
-        self.assertTrue(experiment.start_date is not None)
-        self.assertTrue(experiment.end_date is not None)
-        self.assertEqual(experiment.status, Experiment.CANCELING)
+        def condition():
+            xp.refresh_from_db()
+            return xp.status == Experiment.RUNNING
+        _sleep(20, condition)
 
-        addition1 = experiment.blocks.get(name='addition1')
-        self.assertEqual(addition1.status, Block.NOT_CACHED)
-        self.assertTrue(addition1.start_date is not None)
-        self.assertTrue(addition1.end_date is None)
+        # cancels the experiment
+        xp.cancel()
+        split.refresh_from_db()
+        self.assertEqual(split.status, Job.CANCEL)
 
-        addition2 = experiment.blocks.get(name='addition2')
-        self.assertEqual(addition2.status, Block.PROCESSING)
-        self.assertTrue(addition2.start_date is not None)
-        self.assertTrue(addition2.end_date is None)
+        # launch another working cycle to kill the process
+        worker.work(self.environments, self.cpulimit, self.process)
 
-        analysis = experiment.blocks.get(name='analysis')
-        self.assertEqual(analysis.status, Block.NOT_CACHED)
-        self.assertTrue(analysis.start_date is None)
-        self.assertTrue(analysis.end_date is None)
+        def condition():
+            xp.refresh_from_db()
+            return xp.status == Experiment.FAILED and Job.objects.count() == 0
+        _sleep(20, condition)
+        xp.refresh_from_db()
+
+        # assert we have no database traces after the last block is done
+        self.assertEqual(Job.objects.count(), 0)
+        self.assertEqual(JobSplit.objects.count(), 0)
+        self.assertEqual(Result.objects.count(), 0)
+
+        self.assertEqual(worker.available_cores(), qsetup.CORES)
+
+        # asserts the old experiment is still on a failed state
+        self.assertEqual(
+            [str(k) for k in xp.blocks.order_by('id').values_list('status',
+              flat=True)],
+            [Block.CANCELLED, Block.CANCELLED]
+            )
+        self.assertEqual(xp.status, Experiment.FAILED)
diff --git a/beat/web/backend/urls.py b/beat/web/backend/urls.py
index 7fbbf79db02cd8da6e4cee59d1d9ba3bf650c346..e09f3f99036173ff494088275ccd84a8a297e8a2 100644
--- a/beat/web/backend/urls.py
+++ b/beat/web/backend/urls.py
@@ -36,6 +36,18 @@ urlpatterns = [
       name='scheduler',
       ),
 
+    url(
+      r'^cancel-experiments/$',
+      views.cancel_all_experiments,
+      name='cancel-experiments',
+      ),
+
+    url(
+      r'^update-workers/$',
+      views.update_workers,
+      name='update-workers',
+      ),
+
     url(
         r'^environments/(?P<name>[-\.\w\s]+)/(?P<version>[-\.\w]+)/$',
         views.environment,
diff --git a/beat/web/backend/utils.py b/beat/web/backend/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..02c91a99181955bcbe617adc5da97823403ec6f9
--- /dev/null
+++ b/beat/web/backend/utils.py
@@ -0,0 +1,411 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+'''Utilities for backend management'''
+
+import os
+import sys
+import fnmatch
+import glob
+import time
+
+import logging
+logger = logging.getLogger(__name__)
+
+import psutil
+
+from django.db import transaction
+from django.contrib.auth.models import Group
+from guardian.shortcuts import assign_perm
+
+from ..common.models import Shareable
+from ..experiments.models import CachedFile, Block, Experiment
+from .models import Queue, Worker, Job, Environment, Slot
+
+
+def cleanup_cache(path, age_in_minutes=0, delete=False):
+    """Removes files which are older than a certain threshold from a directory
+
+    This function can be used for cache maintenance. It allows the the user to
+    remove all files under a certain directory which are older than a number of
+    minutes. It also save some files from being erased (if they belong to
+    experiments which are running or scheduled).
+
+
+    Parameters:
+
+      path (str): The path to the cache root (typically,
+        ``settings.CACHE_ROOT``)
+
+      age_in_minutes (int, Optional): The mininum access time of files (in
+        minutes) that will be erased from the disk. All files older than this
+        cut-off value will be erased. All others will be kept.
+
+      delete (bool, Optional): If set (defaults to False), then we really
+        delete the marked files. Otherwise, we just return the list of files
+        which we would delete.
+
+
+    Returns:
+
+      list: A list of filenames that will be/were removed
+
+    """
+
+    cutoff_access_time = time.time() - (60*age_in_minutes)
+
+    logger.info("Running `%s' clean-up: set file-access cutoff time to `%s'",
+        path, time.ctime(cutoff_access_time))
+
+    # Gets a list of cache files for active experiments:
+    blocks = Block.objects.filter(experiment__in=Experiment.objects.filter(status__in=(Experiment.SCHEDULED, Experiment.RUNNING)))
+    save_list = [k.path() + '*' for k in CachedFile.objects.filter(blocks__in=blocks)]
+
+    # Finds the files with an associated '.lock' file
+    for root, dirnames, filenames in os.walk(path):
+        for filename in fnmatch.filter(filenames, '*.lock'):
+            save_list += glob.glob(os.path.splitext(os.path.join(root, filename))[0] + '*')
+
+    removed_files = []
+
+    for p, dirs, files in os.walk(path, topdown=False):
+
+        files = [f for f in files if not f.startswith('.')]
+        dirs[:] = [d for d in dirs if not d.startswith('.')] #note: in-place
+
+        for f in files:
+            fullpath = os.path.join(p, f)
+
+            save_it = sum([fullpath.endswith(k) for k in save_list])
+            if save_it:
+                logger.debug("[skip] `%s' (user list)", fullpath)
+                continue
+
+            # if you get to this point and the file ends in '.part', erase it
+            ext = os.path.splitext(fullpath)
+            if len(ext) > 1 and ext[1] == '.part':
+                if delete:
+                    logger.info("[rm] `%s' (dangling)", fullpath)
+                    os.remove(fullpath)
+                removed_files.append(fullpath)
+
+                continue
+
+            # if you get to this point, check file access time
+            if os.path.getatime(fullpath) < cutoff_access_time:
+                if delete:
+                    logger.info("[rm] `%s'", fullpath)
+                    os.remove(fullpath)
+                removed_files.append(fullpath)
+            else:
+                logger.debug("[skip] `%s' (%f >= %f)", fullpath,
+                    os.path.getatime(fullpath), cutoff_access_time)
+
+        for d in dirs: #also remove empty directories
+            fullpath = os.path.join(p, d)
+            if not os.listdir(fullpath) and delete:
+                os.rmdir(fullpath)
+
+    return removed_files
+
+
+@transaction.atomic
+def setup_backend(d):
+    '''Configures or re-configures the internal queue setup
+
+    This method is called to re-configure the current backend architecture. It
+    is guaranteed to be called only if no experiments are currently running on
+    resources that will disappear (this is checked). The reason for this
+    present restriction goes through difficulties in handling "disappearing"
+    queues and/or environments.
+
+
+    Parameters:
+
+      d (dict): A JSON generated directory that defines the queue and slots
+        available at the backend farm.
+
+
+    Raises:
+
+      RuntimeError: If an error is detected and the re-configuration cannot
+        take place. In this case, it is safe to assume nothing has changed.
+
+    '''
+
+    # 1. We get a list of all current queue/environment combinations
+    q_envs = set([(q.name, str(e)) \
+        for q in Queue.objects.all() for e in q.environments.all()])
+
+    # 2. We get a list of new queue/environment combinations
+    config_q_envs = set([(qname, envkey) \
+        for qname, qpar in d['queues'].items() \
+        for envkey in qpar['environments']])
+
+    # 3. We figure out which combinations of queue/environment's need to be
+    #    deleted.
+    delete_q_envs = q_envs.difference(config_q_envs)
+
+    # 4. We figure out which combinations of queue/environment's are currently
+    #    used by running or queued jobs.
+    used_q_envs = set([(job.block.queue.name, str(job.block.environment)) \
+        for job in Job.objects.filter(status__in=(Job.PROCESSING, Job.QUEUED))])
+
+    # 5. We request that no jobs should be either executing or scheduled for
+    #    execution on queue/environment combinations that need to be deleted.
+    used_to_be_deleted = used_q_envs.intersection(delete_q_envs)
+    if len(used_to_be_deleted) != 0:
+        qenv_names = ['/'.join(k) for k in used_to_be_deleted]
+        reason = 'There are jobs currently running or scheduled to run on ' \
+            'the following queue/environment combinations which are ' \
+            'supposed to be deleted: %s. Aborting reconfiguration.'
+        raise RuntimeError(reason % ', '.join(qenv_names))
+
+    # 6. Request that no worker that is being used will disappear
+    existing_workers = set(Worker.objects.values_list('name', flat=True))
+    workers_to_be_deleted = existing_workers - set(d['workers'])
+    if workers_to_be_deleted:
+        wobjects_to_be_deleted = \
+            Worker.objects.filter(name__in=workers_to_be_deleted)
+    else:
+      wobjects_to_be_deleted = []
+
+    for w in wobjects_to_be_deleted:
+        if w.load() != 0:
+            reason = 'There are jobs currently running or scheduled to run ' \
+                'on some of the workers that would disappear: %s. ' \
+                'Aborting reconfiguration.'
+            raise RuntimeError(reason % ', '.join(workers_to_be_deleted))
+
+    # 7. Create new environments
+    config_envs = set(d['environments'].keys())
+    current_envs = set([str(k) for k in Environment.objects.all()])
+    new_envs = config_envs.difference(current_envs)
+
+    for envkey in new_envs:
+        attrs = d['environments'][envkey]
+        env = Environment(
+            name=attrs['name'],
+            version=attrs['version'],
+            short_description=attrs.get('short_description'),
+            description=attrs.get('description'),
+            sharing=Shareable.PUBLIC,
+            )
+        logger.info("Creating `%s'...", env)
+        env.save()
+
+    # 8.1 Create new workers
+    config_workers = set(d['workers'].keys())
+    current_workers = set(Worker.objects.values_list('name', flat=True))
+    new_workers = config_workers - current_workers
+
+    for name in new_workers:
+        attrs = d['workers'][name]
+        worker = Worker(
+            name=name,
+            active=False,
+            cores=attrs['cores'],
+            memory=attrs['memory'],
+            )
+        logger.info("Creating `%s'...", worker)
+        worker.save()
+
+    # 8.2 Update existing workers
+    update_workers = current_workers.intersection(config_workers)
+    for name in update_workers:
+        attrs = d['workers'][name]
+        worker = Worker.objects.select_for_update().get(name=name)
+        worker.cores = attrs['cores']
+        worker.ram = attrs['memory']
+        logger.info("Updating `%s'...", worker)
+        worker.save()
+
+    # 9. Create new queues
+    config_qnames = set(d['queues'].keys())
+    current_qnames = set(Queue.objects.values_list('name', flat=True))
+    new_qnames = config_qnames.difference(current_qnames)
+
+    for name in new_qnames:
+        attrs = d['queues'][name]
+        queue = Queue(
+            name=name,
+            memory_limit=attrs['memory-limit'],
+            time_limit=attrs['time-limit'],
+            cores_per_slot=attrs['cores-per-slot'],
+            max_slots_per_user=attrs['max-slots-per-user'],
+            )
+        logger.info("Creating `%s'...", queue)
+        queue.save()
+
+        for gname in attrs['groups']:
+            group = Group.objects.get(name=gname)
+            logger.info("Allowing group `%s' access to `%s'...", group, queue)
+            assign_perm('can_access', group, queue)
+
+        for hostname, par in attrs['slots'].items():
+            worker = Worker.objects.get(name=hostname)
+            priority = par.get('priority', 0)
+            slot = Slot(
+                queue=queue,
+                worker=worker,
+                priority=priority,
+                quantity=par['quantity'],
+                )
+            logger.info("Creating `%s'...", slot)
+            slot.save()
+
+        # Associates environments with queues
+        for envkey in attrs['environments']:
+            env = Environment.objects.get_by_natural_key(envkey)
+            logger.info("Appending `%s' to `%s'...", env, queue)
+            queue.environments.add(env)
+
+    # 10. Update existing queues
+    update_qnames = current_qnames.intersection(config_qnames)
+    for name in update_qnames:
+
+        queue = Queue.objects.select_for_update().get(name=name)
+        attrs = d['queues'][name]
+
+        # 10.1 Update queue parameterization: running jobs will be unaffected
+        #     whereas queued jobs will be subject to the new settings.
+        queue.ram = attrs['memory-limit']
+        queue.max_time = attrs['time-limit']
+        queue.cores_per_slot = attrs['cores-per-slot']
+        queue.max_slots_per_user = attrs['max-slots-per-user']
+        logger.info("Updating `%s'...", queue)
+        queue.save()
+
+        # 10.2 Update the queue-slot allocation
+        queue.slots.all().delete()
+        for hostname, par in attrs['slots'].items():
+            worker = Worker.objects.get(name=hostname)
+            priority = par.get('priority', 0)
+            slot = Slot(
+                worker=worker,
+                queue=queue,
+                priority=priority,
+                quantity=par['quantity'],
+                )
+            logger.info("Creating `%s'...", slot)
+            slot.save()
+
+        # 10.3 Associate and dissociate environments
+        queue.environments.clear()
+        for e in attrs['environments']:
+            env = Environment.objects.get_by_natural_key(e)
+            logger.info("Appending `%s' to `%s'...", env, queue)
+            queue.environments.add(env)
+
+    # 11. Delete queues not mentioned on the new configuration
+    delete_qnames = current_qnames.difference(config_qnames)
+    for name in delete_qnames:
+        q = Queue.objects.select_for_update().get(name=name)
+        logger.info("Deleting `%s'...", q)
+        q.delete() # slots are deleted on cascade
+
+    # 12. Delete workers not mentioned on the new configuration
+    for w in wobjects_to_be_deleted:
+      logger.info("Deleting `%s'...", w)
+      w.delete()
+
+
+def dump_backend():
+    '''Returns a dictionary that represents the current backend configuration'''
+
+    return dict(
+        queues=dict([(k.name, k.as_dict()) for k in Queue.objects.all()]),
+        environments=dict([(str(k), k.as_dict()) for k in Environment.objects.all()]),
+        workers=dict([(k.name, k.as_dict()) for k in Worker.objects.all()]),
+        )
+
+
+def resolve_process_path():
+  '''Returns the path to cpulimit'''
+
+  basedir = os.path.dirname(os.path.realpath(sys.argv[0]))
+  r = os.path.join(basedir, 'process')
+
+  if not os.path.exists(r):
+      raise RuntimeError("Cannot find `process.py' at `%s' - please check " \
+          "your installation" % basedir)
+
+  return r
+
+
+def find_environments(paths=None):
+  '''Finds list of known environments
+
+  Parameters:
+
+    paths (list, Optional): A list of paths where to search for environments.
+      If not set, then load default environments if possible.
+
+
+  Returns:
+
+    dict: A dictionary containing each environment available using as key the
+      natural key for environments (i.e., ``name (version)``) and as values
+      another dictionary with these keys:
+
+        * name: The environment name (str)
+        * version: The environment version (str)
+        * os: The output of ``uname -a`` (list):
+          1. Operating system (str)
+          2. Hostname (str)
+          3. Kernel version (str)
+          4. Kernel compilation details (str)
+          5. Platform (``x86_64`` for 64-bits or ``i386`` for 32-bits) (str)
+        * execute: The path to the ``execute`` script to be used for running
+          user jobs (str)
+        * directory: The path leading to the root of this environment (str)
+
+  '''
+
+  from beat.core.execution import discover_environments
+
+  if paths is not None:
+    logger.debug("Search for environments at `%s'", os.pathsep.join(paths))
+    retval = discover_environments(paths)
+    logger.debug("Found %d environment(s)", len(retval))
+    return retval
+
+  else:
+    import pkg_resources
+    path = pkg_resources.resource_filename(__name__, 'environments')
+    logger.debug("Search for environments at `%s'", path)
+    retval = discover_environments([path])
+    logger.debug("Found %d environment(s)", len(retval))
+    return retval
+
+
+def pick_execute(split, environments):
+    """Resolves the path to the ``execute`` program to use for the split"""
+
+    # Check we have a compatible environment to execute the user algorithm
+    envinfo = environments.get(split.job.block.environment.natural_key())
+    return envinfo['execute'] if envinfo else None
diff --git a/beat/web/backend/views.py b/beat/web/backend/views.py
index dde215fb781e8091d51799cae49c4a8b4527dfae..e954598b6b5fc2e962ac70a0db862681ceab4c26 100644
--- a/beat/web/backend/views.py
+++ b/beat/web/backend/views.py
@@ -25,11 +25,16 @@
 #                                                                             #
 ###############################################################################
 
-import collections
+import os
+import socket
+import logging
+logger = logging.getLogger(__name__)
+
 import simplejson
 
-from django.http import Http404
+from django.http import Http404, HttpResponseRedirect
 from django.conf import settings
+from django.core.urlresolvers import reverse
 from django.shortcuts import get_object_or_404
 from django.shortcuts import render_to_response
 from django.template import RequestContext
@@ -37,55 +42,111 @@ from django.contrib.auth.decorators import login_required
 from django.http import HttpResponseForbidden
 from django.contrib import messages
 
-from .models import Environment
+from beat.core.async import resolve_cpulimit_path
+
+from ..experiments.models import Experiment
+
+from .models import Environment, Worker, Queue
+from . import state
+from . import utils
+from . import schedule
 
 
 #------------------------------------------------
 
 
-@login_required
-def scheduler(request):
-    if not(request.user.is_superuser):
-        return HttpResponseForbidden()
+class Work:
+    '''Helper to do the required worker job for local scheduling'''
+
+    cpulimit = None
+    process = None
+    environments = None
+    worker = None
+
+    def __setup__(self):
+
+        Work.cpulimit = resolve_cpulimit_path(None)
+        logger.debug("(path) cpulimit: `%s'", Work.cpulimit)
+        Work.process = utils.resolve_process_path()
+        logger.debug("(path) process: `%s'", Work.process)
+        Work.environments = utils.find_environments(None)
+        logger.debug("Environments: %s", ", ".join(Work.environments))
+
+        # load worker, check environments, activate it
+        w = Worker.objects.get(name=socket.gethostname()) \
+            if Worker.objects.count() != 1 else Worker.objects.get()
+        missing, unused = w.check_environments(Work.environments)
+        if unused:
+            logger.info("The following environments where found on your " \
+                "setup, but will not be used with the current queue " \
+                "configuration: %s" % ", ".join(unused))
+        if missing:
+            raise RuntimeError("The following environments are currently " \
+                "missing from your setup: %s" % ", ".join(missing))
+        else:
+            logger.info("All required software environments were found")
+
+        w.activate()
+        w.save()
+        Work.worker = w
+
+    def __call__(self):
 
-    from .api import scheduler as scheduler_api
+        if Work.worker is None: self.__setup__()
 
-    response = scheduler_api(request)
+        # Regular work
+        Work.worker.work(Work.environments, Work.cpulimit, Work.process)
 
-    if response.status_code != 200:
-        messages.error(request, "Error contacting the scheduler at {}:{} " \
-                "[{}/{}] {}".format(settings.SCHEDULER_ADDRESS,
-                    settings.SCHEDULER_PORT, response.status_text,
-                    response.status_code, response.data))
-        return render_to_response('backend/scheduler.html', dict(),
-                context_instance=RequestContext(request))
+
+#------------------------------------------------
+
+
+@login_required
+def scheduler(request):
+
+    if not(request.user.is_superuser): return HttpResponseForbidden()
 
     # data for the cache plot
+    cache = state.cache()
     cache_chart_data = [
             dict(
-                label= 'Used (%d%%)' % round(100*float(response.data['cache']['size-in-megabytes'])/response.data['cache']['capacity-in-megabytes']),
-                value= response.data['cache']['size-in-megabytes'],
+                label= 'Used (%d%%)' % round(100*float(cache['size-in-megabytes'])/cache['capacity-in-megabytes']),
+                value= cache['size-in-megabytes'],
                 color= '#F7464A',
                 highlight= '#FF5A5E',
                 ),
             dict(
-                label= 'Free (%d%%)' % round(100*float(response.data['cache']['capacity-in-megabytes'] - response.data['cache']['size-in-megabytes'])/response.data['cache']['capacity-in-megabytes']),
-                value= response.data['cache']['capacity-in-megabytes'] - response.data['cache']['size-in-megabytes'],
+                label= 'Free (%d%%)' % round(100*float(cache['capacity-in-megabytes'] - cache['size-in-megabytes'])/cache['capacity-in-megabytes']),
+                value= cache['capacity-in-megabytes'] - cache['size-in-megabytes'],
                 color= '#46BFBD',
                 highlight= '#5AD3D1',
                 ),
             ]
 
-    cache_gb = int(response.data['cache']['capacity-in-megabytes'] / 1024.0)
+    cache_gb = int(cache['capacity-in-megabytes'] / 1024.0)
+
+    # do scheduling and/or worker activity if required
+    if settings.SCHEDULING_PANEL and request.GET.has_key('activity'):
+        activity = request.GET['activity']
 
-    # Organize the data a bit
-    response.data['workers'] = collections.OrderedDict(sorted([(k,v) for k,v in response.data['workers'].items()], key=lambda x: (not x[1].has_key('active'), x[1]['db_status']!='Active', x[0])))
+        if activity in ('both', 'schedule'):
+            splits = schedule.schedule()
+            if splits:
+                logger.info("Scheduler assigned %d splits", len(splits))
+
+        if activity in ('both', 'work'):
+            Work()()
 
     return render_to_response('backend/scheduler.html',
             dict(
-                data=response.data,
+                jobs=state.jobs(),
+                experiments=state.experiments(),
+                workers=Worker.objects.order_by('-active', 'name'),
+                queues=Queue.objects.order_by('memory_limit', 'max_slots_per_user'),
                 cache_chart_data=simplejson.dumps(cache_chart_data),
                 cache_gb=cache_gb,
+                helper_panel=getattr(settings, 'SCHEDULING_PANEL', False),
+                scheduling_period=getattr(settings, 'SCHEDULING_INTERVAL', 5),
                 ),
             context_instance=RequestContext(request))
 
@@ -126,3 +187,39 @@ def list_environments(request):
                 ),
             context_instance=RequestContext(request),
             )
+
+
+
+#----------------------------------------------------------
+
+
+@login_required
+def cancel_all_experiments(request):
+
+    if not(request.user.is_superuser): return HttpResponseForbidden()
+
+    qs = Experiment.objects.filter(status__in=(Experiment.RUNNING, Experiment.SCHEDULED))
+    counter = qs.count()
+
+    for xp in qs: xp.cancel()
+
+    messages.success(request, "Successfuly cancelled %d experiments" % counter)
+
+    return HttpResponseRedirect(reverse('backend:scheduler'))
+
+
+#----------------------------------------------------------
+
+
+@login_required
+def update_workers(request):
+
+    if not(request.user.is_superuser): return HttpResponseForbidden()
+
+    qs = Worker.objects.all()
+    counter = qs.count()
+    qs.update(update=True)
+
+    messages.success(request, "Requested %d workers for updates" % counter)
+
+    return HttpResponseRedirect(reverse('backend:scheduler') + '#workers')
diff --git a/beat/web/code/models.py b/beat/web/code/models.py
index 0c8a999a085aa4ec880d9f3008fc8c7f788fc9bf..b76b514ed27d0a653551e48c7939ea2eabe5712c 100644
--- a/beat/web/code/models.py
+++ b/beat/web/code/models.py
@@ -224,8 +224,8 @@ class Code(StoredContribution):
     usable_by = models.ManyToManyField(User, related_name='usable_%(class)ss',
                                        blank=True)
 
-    usable_by_team = models.ManyToManyField(Team, related_name='usable_%(class)ss',
-                                            blank=True)
+    usable_by_team = models.ManyToManyField(Team,
+        related_name='usable_%(class)ss', blank=True)
 
     language = models.CharField(max_length=1, choices=CODE_LANGUAGE,
                                 default=PYTHON)
diff --git a/beat/web/common/models.py b/beat/web/common/models.py
index 99d3720a5119336fd8c780213c840b75e702710e..66e680a071844d6d34f3280d3cfd6e300e104701 100644
--- a/beat/web/common/models.py
+++ b/beat/web/common/models.py
@@ -99,8 +99,8 @@ class Shareable(models.Model):
     shared_with = models.ManyToManyField(User, related_name='shared_%(class)ss',
                                          blank=True)
 
-    shared_with_team = models.ManyToManyField(Team, related_name='shared_%(class)ss',
-                                              blank=True)
+    shared_with_team = models.ManyToManyField(Team,
+        related_name='shared_%(class)ss', blank=True)
 
     objects = ShareableManager()
 
@@ -364,13 +364,13 @@ class Versionable(Shareable):
     previous_version  = models.ForeignKey('self',
                                           related_name='next_versions',
                                           null=True,
-                                          blank=True
+                                          blank=True,
                                          )
 
     fork_of           = models.ForeignKey('self',
                                           related_name='forks',
                                           null=True,
-                                          blank=True
+                                          blank=True,
                                          )
 
 
@@ -542,7 +542,8 @@ class Contribution(Versionable):
 
     #_____ Fields __________
 
-    author = models.ForeignKey(User, related_name='%(class)ss')
+    author = models.ForeignKey(User, related_name='%(class)ss',
+        on_delete=models.CASCADE)
 
     objects = ContributionManager()
 
diff --git a/beat/web/common/storage.py b/beat/web/common/storage.py
index 01d8111bd75c1e2cc34c2ae3d3888b77da75d39d..c28f8514cd0fd3d6daeab49511919070b1237dbb 100755
--- a/beat/web/common/storage.py
+++ b/beat/web/common/storage.py
@@ -38,7 +38,7 @@ class OverwriteStorage(FileSystemStorage):
     def __init__(self, *args, **kwargs):
         super(OverwriteStorage, self).__init__(*args, **kwargs)
 
-    def get_available_name(self, name):
+    def get_available_name(self, name, max_length=None):
         # If the filename already exists, remove it
         if self.exists(name): self.delete(name)
         return name
diff --git a/beat/web/common/testutils.py b/beat/web/common/testutils.py
index 286ff29f178e6bd468f30be9810bb64587032ff7..5b9553691fc606bf36c1326a69b3655c1942cac5 100644
--- a/beat/web/common/testutils.py
+++ b/beat/web/common/testutils.py
@@ -25,7 +25,11 @@
 #                                                                             #
 ###############################################################################
 
+import os
+import shutil
+
 from django.test import TestCase
+from django.conf import settings
 
 from urlparse import urlparse
 
@@ -34,10 +38,19 @@ import simplejson as json
 from .models import Shareable
 
 
+def tearDownModule():
+
+    if os.path.exists(settings.CACHE_ROOT):
+        shutil.rmtree(settings.CACHE_ROOT)
+    if os.path.exists(settings.PREFIX):
+        shutil.rmtree(settings.PREFIX)
+
+
 class BaseTestCase(TestCase):
     class Meta:
         model = None
 
+
     def checkObjectDBSharingPreferences(self, contribution, reference):
         if not(reference.has_key('status')):
             reference['status'] = 'private'
diff --git a/beat/web/databases/__init__.py b/beat/web/databases/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..93a15eb13994a4273d1626fb9e0113696b6fb263 100644
--- a/beat/web/databases/__init__.py
+++ b/beat/web/databases/__init__.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+default_app_config = 'beat.web.databases.apps.DatabasesConfig'
diff --git a/beat/web/databases/admin.py b/beat/web/databases/admin.py
index dfd9f4c8afff9cca17ca82b2a20b9556357d444a..06d55ad34a83d5bcc268766c19dcf6d778d4b479 100644
--- a/beat/web/databases/admin.py
+++ b/beat/web/databases/admin.py
@@ -33,8 +33,9 @@ from django.utils import six
 from .models import Database as DatabaseModel
 from .models import DatabaseProtocol as DatabaseProtocolModel
 from .models import DatabaseSet as DatabaseSetModel
+from .models import DatabaseSetOutput as DatabaseSetOutputModel
 from .models import DatabaseSetTemplate as DatabaseSetTemplateModel
-from .models import DatabaseOutput as DatabaseOutputModel
+from .models import DatabaseSetTemplateOutput as DatabaseSetTemplateOutputModel
 from .models import validate_database
 
 from ..ui.forms import CodeMirrorJSONFileField, CodeMirrorRSTFileField, \
@@ -234,11 +235,18 @@ admin.site.register(DatabaseModel, Database)
 #------------------------------------------------
 
 
-class DatabaseOutputInline(admin.TabularInline):
+class DatabaseSetTemplateOutputInline(admin.TabularInline):
 
-    model           = DatabaseOutputModel
+    model           = DatabaseSetTemplateOutputModel
     extra           = 0
     ordering        = ('name',)
+    readonly_fields = ('name', 'dataformat')
+
+    def has_delete_permission(self, request, obj=None):
+        return False
+
+    def has_add_permission(self, request):
+            return False
 
 
 class DatabaseSetTemplate(admin.ModelAdmin):
@@ -246,26 +254,58 @@ class DatabaseSetTemplate(admin.ModelAdmin):
     list_display        = ('id', 'name')
     search_fields       = ['name']
     list_display_links  = ('id', 'name')
+    readonly_fields = ('name',)
 
     inlines = [
-            DatabaseOutputInline,
+            DatabaseSetTemplateOutputInline,
             ]
 
+    def has_delete_permission(self, request, obj=None):
+        return False
+
+    def has_add_permission(self, request):
+            return False
+
+
 admin.site.register(DatabaseSetTemplateModel, DatabaseSetTemplate)
 
 
 #------------------------------------------------
 
 
+class DatabaseSetOutputInline(admin.TabularInline):
+
+    model           = DatabaseSetOutputModel
+    extra           = 0
+    ordering        = ('hash',)
+    readonly_fields = ('hash', 'template')
+
+    def has_delete_permission(self, request, obj=None):
+        return False
+
+    def has_add_permission(self, request):
+            return False
+
+
 class DatabaseSet(admin.ModelAdmin):
 
     list_display        = ('id', 'protocol', 'name', 'template')
     search_fields       = ['name',
                            'template__name',
                            'protocol__database__name',
-                           'protocol__database__short_description',
-                           'protocol__database__description',
                            'protocol__name']
     list_display_links  = ('id', 'name')
+    readonly_fields = ('name', 'template', 'protocol')
+
+    inlines = [
+            DatabaseSetOutputInline,
+            ]
+
+    def has_delete_permission(self, request, obj=None):
+        return False
+
+    def has_add_permission(self, request):
+            return False
+
 
 admin.site.register(DatabaseSetModel, DatabaseSet)
diff --git a/beat/web/backend/permissions.py b/beat/web/databases/apps.py
similarity index 86%
rename from beat/web/backend/permissions.py
rename to beat/web/databases/apps.py
index c98a973b295a80cffe736c94144e804a992c069c..fd94461ab2849a1702de37129f7232f0690b97ad 100644
--- a/beat/web/backend/permissions.py
+++ b/beat/web/databases/apps.py
@@ -25,9 +25,13 @@
 #                                                                             #
 ###############################################################################
 
-from rest_framework import permissions
-from django.conf import settings
+from ..common.apps import CommonAppConfig
+from django.utils.translation import ugettext_lazy as _
 
-class IsScheduler(permissions.BasePermission):
-    def has_permission(self, request, view):
-        return request.user.username == settings.SCHEDULER_ACCOUNT
+class DatabasesConfig(CommonAppConfig):
+    name = 'beat.web.databases'
+    verbose_name = _('Databases')
+
+    def ready(self):
+        super(DatabasesConfig, self).ready()
+        from . import signals
diff --git a/beat/web/databases/migrations/0002_scheduler_addons.py b/beat/web/databases/migrations/0002_scheduler_addons.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c748aa7240cefa5c46f9e5a91d241d898d05db7
--- /dev/null
+++ b/beat/web/databases/migrations/0002_scheduler_addons.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+from ...common.models import get_declaration
+
+from ..models import validate_database
+
+import logging
+logger = logging.getLogger(__name__)
+
+
+def refresh_databases(apps, schema_editor):
+    '''Refreshes each database so datasets/outputs are recreated'''
+
+    Database = apps.get_model("databases", "Database")
+    DatabaseSetOutput = apps.get_model("databases", "DatabaseSetOutput")
+
+    Database.declaration = property(get_declaration)
+    Database.fullname = lambda self: '%s/%d' % (self.name, self.version)
+
+    if Database.objects.count(): print('')
+
+    for db in Database.objects.order_by('id'):
+        print("Refreshing protocols for database `%s'..." % db.fullname())
+        core = validate_database(db.declaration)
+        core.name = db.fullname()
+        for proto in db.protocols.all():
+            for set in proto.sets.all():
+                for output in set.template.outputs.all():
+                    try:
+                        DatabaseSetOutput(template=output, set=set,
+                            hash=core.hash_output(proto.name, set.name,
+                              output.name)).save()
+                    except KeyError:
+                        logger.warn('Database output %s/%d.%s.%s.%s does ' \
+                            'not exist' % (db.name, db.version, proto.name,
+                              set.name, output.name))
+                        continue
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('dataformats', '0001_initial'),
+        ('databases', '0001_initial'),
+    ]
+
+    operations = [
+        migrations.RenameModel('DatabaseOutput', 'DatabaseSetTemplateOutput'),
+        migrations.AlterUniqueTogether(
+            name='databasesettemplateoutput',
+            unique_together=set([('template', 'name')]),
+        ),
+        migrations.CreateModel(
+            name='DatabaseSetOutput',
+            fields=[
+                ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+                ('hash', models.CharField(unique=True, max_length=64)),
+                ('set', models.ForeignKey(related_name='outputs', to='databases.DatabaseSet')),
+                ('template', models.ForeignKey(related_name='instances', to='databases.DatabaseSetTemplateOutput')),
+            ],
+        ),
+        migrations.RunPython(refresh_databases),
+    ]
diff --git a/beat/web/databases/models.py b/beat/web/databases/models.py
index 87166515863bbda195f907488d7dc3bcf3896213..1351621e1bd87ed7b2fcf9ba9271d3c145ff8def 100755
--- a/beat/web/databases/models.py
+++ b/beat/web/databases/models.py
@@ -31,7 +31,6 @@ import simplejson
 
 from django.db import models
 from django.conf import settings
-from django.dispatch import receiver
 from django.core.urlresolvers import reverse
 
 import beat.core.database
@@ -263,7 +262,6 @@ class Database(Versionable):
             result.extend(database_protocol.all_needed_dataformats())
         return list(set(result))
 
-
     #_____ Properties __________
 
     description = property(get_description, set_description)
@@ -275,173 +273,6 @@ class Database(Versionable):
 #----------------------------------------------------------
 
 
-@receiver(models.signals.pre_delete, sender=Database)
-def delete_protocols(sender, **kwargs):
-    instance = kwargs['instance']
-    instance.protocols.all().delete()
-
-
-#----------------------------------------------------------
-
-
-# These two auto-delete files from filesystem when they are unneeded:
-@receiver(models.signals.post_delete, sender=Database)
-def auto_delete_file_on_delete(sender, instance, **kwargs):
-    """Deletes file from filesystem when ``Database`` object is deleted.
-    """
-    if instance.declaration_file:
-        instance.declaration_file.delete(save=False)
-
-    if instance.source_code_file:
-        instance.source_code_file.delete(save=False)
-
-    if instance.description_file:
-        instance.description_file.delete(save=False)
-
-
-@receiver(models.signals.pre_save, sender=Database)
-def auto_delete_file_on_change(sender, instance, **kwargs):
-    """Deletes file from filesystem when ``Database`` object is changed."""
-
-    if not instance.pk:
-        return False
-
-    try:
-        old_file = Database.objects.get(pk=instance.pk).declaration_file
-    except Database.DoesNotExist:
-        return False
-
-    if old_file != instance.declaration_file:
-        old_file.delete(save=False)
-
-    try:
-        old_code = Database.objects.get(pk=instance.pk).source_code_file
-    except Database.DoesNotExist:
-        return False
-
-    if old_code != instance.source_code_file:
-        old_code.delete(save=False)
-
-    try:
-        old_descr = Database.objects.get(pk=instance.pk).description_file
-    except Database.DoesNotExist:
-        return False
-
-    if old_descr != instance.description_file:
-        old_descr.delete(save=False)
-
-
-#----------------------------------------------------------
-
-
-@receiver(models.signals.post_save, sender=Database)
-def refresh_protocols(sender, instance, **kwargs):
-    """Refreshes changed protocols"""
-
-    try:
-        json_declaration = instance.declaration
-
-        protocols = DatabaseProtocol.objects.filter(
-                database__name=instance.name,
-                database__version=instance.version,
-            )
-
-        existing = set((k.name, k.set_template_basename()) for k in protocols)
-        new_objects = set((k['name'], k['template']) for k in json_declaration['protocols'])
-
-        for protocol_name, template in existing - new_objects:
-            # notice: no need to worry, this will clean-up all the rest
-            protocols.get(name__iexact=protocol_name).delete()
-
-        json_protocols = dict([(k['name'], k) for k in json_declaration['protocols']])
-
-        for protocol_name, template in new_objects - existing:
-            protocol = DatabaseProtocol(name=protocol_name, database=instance)
-            protocol.save()
-
-            json_protocol = json_protocols[protocol_name]
-
-            # creates all the template sets, outputs, etc for the first time
-            for set_attr in json_protocol['sets']:
-
-                tset_name = json_protocol['template'] + '__' + set_attr['template']
-
-                dataset_template = DatabaseSetTemplate.objects.filter(name=tset_name)
-                if not dataset_template: #create
-                    dataset_template = DatabaseSetTemplate(name=tset_name)
-                    dataset_template.save()
-                else:
-                    dataset_template = dataset_template[0]
-
-                # Create the databaset
-                dataset_set = DatabaseSet.objects.filter(
-                    name = set_attr['name'],
-                    template = dataset_template,
-                    protocol = protocol,
-                    )
-
-                if not dataset_set: #create
-                    dataset_set = DatabaseSet(
-                        name = set_attr['name'],
-                        template = dataset_template,
-                        protocol = protocol,
-                        )
-                    dataset_set.save()
-
-                    # Create the database set output
-                    for output_name, format_name in set_attr['outputs'].items():
-                        if len(format_name.split('/')) != 3:
-                            raise SyntaxError(
-                                    "Dataformat should be named following the style " \
-                                    "`<user>/<format>/<version>', the " \
-                                    "value `%s' is not valid" % (
-                                        format_name,
-                                        )
-                                    )
-                        (author, name, version) = format_name.split('/')
-                        dataformats = DataFormat.objects.filter(
-                            author__username=author,
-                            name=name,
-                            version=version,
-                            )
-
-                        # TODO: Remove this when validation works (see comments)
-                        if len(dataformats) != 1:
-                            raise SyntaxError(
-                                "Could not find dataformat named `%s' to set" \
-                                "output `%s' of template `%s' for protocol" \
-                                "`%s' of database `%s'", (
-                                  format_name,
-                                  output_name,
-                                  dataset_template.name,
-                                  protocol_name,
-                                  instance.name,
-                                  )
-                                )
-                            return
-
-                        database_output = DatabaseOutput.objects.filter(
-                            name=output_name,
-                            template=dataset_template,
-                            dataformat=dataformats[0],
-                            )
-
-                        if not database_output: # create
-                            database_output = DatabaseOutput(
-                                name=output_name,
-                                template=dataset_template,
-                                dataformat=dataformats[0],
-                                )
-                            database_output.save()
-
-    except Exception:
-        instance.delete() #do we need this or is it auto-rolled back?
-        raise
-
-
-#----------------------------------------------------------
-
-
 class DatabaseProtocolManager(models.Manager):
 
     def get_by_natural_key(self, database_name, database_version, name):
@@ -454,10 +285,11 @@ class DatabaseProtocolManager(models.Manager):
 
 class DatabaseProtocol(models.Model):
 
-    objects     = DatabaseProtocolManager()
+    objects = DatabaseProtocolManager()
 
-    database    = models.ForeignKey(Database, related_name='protocols')
-    name        = models.CharField(max_length=200, blank=True)
+    database = models.ForeignKey(Database, related_name='protocols',
+        on_delete=models.CASCADE)
+    name = models.CharField(max_length=200, blank=True)
 
     class Meta:
         unique_together = ('database', 'name')
@@ -496,16 +328,6 @@ class DatabaseProtocol(models.Model):
 #----------------------------------------------------------
 
 
-@receiver(models.signals.pre_delete, sender=DatabaseProtocol)
-def delete_sets(sender, **kwargs):
-
-    instance = kwargs['instance']
-    instance.sets.all().delete()
-
-
-#----------------------------------------------------------
-
-
 class DatabaseSetTemplateManager(models.Manager):
 
     def get_by_natural_key(self, name):
@@ -528,15 +350,6 @@ class DatabaseSetTemplate(models.Model):
 #----------------------------------------------------------
 
 
-@receiver(models.signals.pre_delete, sender=DatabaseSetTemplate)
-def delete_outputs(sender, **kwargs):
-
-    instance = kwargs['instance']
-    instance.outputs.all().delete()
-
-
-#----------------------------------------------------------
-
 class DatabaseSetManager(models.Manager):
 
     def get_by_natural_key(self, database_name, database_version, protocol_name, name, template_name):
@@ -553,9 +366,11 @@ class DatabaseSet(models.Model):
 
     objects     = DatabaseSetManager()
 
-    protocol    = models.ForeignKey(DatabaseProtocol, related_name='sets')
+    protocol    = models.ForeignKey(DatabaseProtocol, related_name='sets',
+        on_delete=models.CASCADE)
     name        = models.CharField(max_length=200, blank=True)
-    template    = models.ForeignKey(DatabaseSetTemplate, related_name='sets')
+    template    = models.ForeignKey(DatabaseSetTemplate, related_name='sets',
+        on_delete=models.CASCADE)
 
     class Meta:
         unique_together = ('protocol', 'name', 'template')
@@ -587,29 +402,46 @@ class DatabaseSet(models.Model):
 #----------------------------------------------------------
 
 
-@receiver(models.signals.post_delete, sender=DatabaseSet)
-def delete_empty_template_sets(sender, **kwargs):
+class DatabaseSetTemplateOutput(models.Model):
+    template        = models.ForeignKey(DatabaseSetTemplate,
+        related_name='outputs', on_delete=models.CASCADE)
+    name            = models.CharField(max_length=200)
+    dataformat      = models.ForeignKey(DataFormat,
+        related_name='database_outputs', on_delete=models.CASCADE)
+
+    class Meta:
+        unique_together = ('template', 'name')
+
+    def __str__(self):
+        return self.fullname()
 
-    instance = kwargs['instance']
-    try:
-        if not instance.template.sets.all(): instance.template.delete()
-    except:
-        pass
+    def fullname(self):
+        return self.template.name + '.' + self.name
 
 
 #----------------------------------------------------------
 
 
-class DatabaseOutput(models.Model):
-    template        = models.ForeignKey(DatabaseSetTemplate, related_name='outputs')
-    name            = models.CharField(max_length=200)
-    dataformat      = models.ForeignKey(DataFormat, related_name='database_outputs')
-
-    class Meta:
-        unique_together = ('template', 'name', 'dataformat')
+class DatabaseSetOutput(models.Model):
+    template = models.ForeignKey(DatabaseSetTemplateOutput,
+        related_name='instances', on_delete=models.CASCADE)
+    set = models.ForeignKey(DatabaseSet, related_name='outputs',
+        on_delete=models.CASCADE)
+    hash = models.CharField(max_length=64, unique=True)
 
     def __str__(self):
         return self.fullname()
 
     def fullname(self):
-        return self.template.name + '.' + self.name
+        return '%s.%s.%s.%s' % (
+            self.set.protocol.database.fullname(),
+            self.set.protocol.name,
+            self.set.name,
+            self.template.name,
+            )
+
+    def all_referenced_dataformats(self):
+        return self.template.all_referenced_dataformats()
+
+    def all_needed_dataformats(self):
+        return self.template.all_needed_dataformats()
diff --git a/beat/web/databases/signals.py b/beat/web/databases/signals.py
new file mode 100644
index 0000000000000000000000000000000000000000..474c719ba27387a6b57fd02ec0f8e73e1a585dc3
--- /dev/null
+++ b/beat/web/databases/signals.py
@@ -0,0 +1,219 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+
+from django.db import models
+from django.dispatch import receiver
+
+from ..dataformats.models import DataFormat
+
+from .models import Database, DatabaseProtocol, DatabaseSet
+from .models import DatabaseSetTemplate, DatabaseSetTemplateOutput
+from .models import DatabaseSetOutput
+
+from .models import validate_database
+
+
+@receiver(models.signals.post_delete, sender=Database)
+def auto_delete_file_on_delete(sender, instance, **kwargs):
+    """Deletes file from filesystem when ``Database`` object is deleted.
+    """
+    if instance.declaration_file:
+        instance.declaration_file.delete(save=False)
+
+    if instance.source_code_file:
+        instance.source_code_file.delete(save=False)
+
+    if instance.description_file:
+        instance.description_file.delete(save=False)
+
+
+@receiver(models.signals.pre_save, sender=Database)
+def auto_delete_file_on_change(sender, instance, **kwargs):
+    """Deletes file from filesystem when ``Database`` object is changed."""
+
+    if not instance.pk:
+        return False
+
+    try:
+        old_file = Database.objects.get(pk=instance.pk).declaration_file
+    except Database.DoesNotExist:
+        return False
+
+    if old_file != instance.declaration_file:
+        old_file.delete(save=False)
+
+    try:
+        old_code = Database.objects.get(pk=instance.pk).source_code_file
+    except Database.DoesNotExist:
+        return False
+
+    if old_code != instance.source_code_file:
+        old_code.delete(save=False)
+
+    try:
+        old_descr = Database.objects.get(pk=instance.pk).description_file
+    except Database.DoesNotExist:
+        return False
+
+    if old_descr != instance.description_file:
+        old_descr.delete(save=False)
+
+
+@receiver(models.signals.post_save, sender=Database)
+def refresh_protocols(sender, instance, **kwargs):
+    """Refreshes changed protocols"""
+
+    try:
+
+        core = validate_database(instance.declaration)
+        core.name = instance.fullname()
+
+        protocols = DatabaseProtocol.objects.filter(
+                database__name=instance.name,
+                database__version=instance.version,
+            )
+
+        existing = set((k.name, k.set_template_basename()) for k in protocols)
+        new_objects = set([(k,v['template']) for k,v in core.protocols.items()])
+
+        for protocol_name, template in existing - new_objects:
+            # notice: no need to worry, this will clean-up all the rest
+            protocols.get(name__iexact=protocol_name).delete()
+
+        json_protocols = dict([(k,v) for k,v in core.protocols.items()])
+
+        for protocol_name, template in new_objects - existing:
+            protocol = DatabaseProtocol(name=protocol_name, database=instance)
+            protocol.save()
+
+            json_protocol = json_protocols[protocol_name]
+
+            # creates all the template sets, outputs, etc for the first time
+            for set_attr in json_protocol['sets']:
+
+                tset_name = json_protocol['template'] + '__' + set_attr['template']
+
+                dataset_template = DatabaseSetTemplate.objects.filter(name=tset_name)
+                if not dataset_template: #create
+                    dataset_template = DatabaseSetTemplate(name=tset_name)
+                    dataset_template.save()
+                else:
+                    dataset_template = dataset_template[0]
+
+                # Create the database set
+                dataset = DatabaseSet.objects.filter(
+                    name = set_attr['name'],
+                    template = dataset_template,
+                    protocol = protocol,
+                    )
+
+                if not dataset: #create
+                    dataset = DatabaseSet(
+                        name = set_attr['name'],
+                        template = dataset_template,
+                        protocol = protocol,
+                        )
+                    dataset.save()
+
+                # Create the database set template output
+                for output_name, format_name in set_attr['outputs'].items():
+                    if len(format_name.split('/')) != 3:
+                        raise SyntaxError(
+                            "Dataformat should be named following the " \
+                            "style `<username>/<format>/<version>', the " \
+                            "value `%s' is not valid" % (format_name,)
+                            )
+                    (author, name, version) = format_name.split('/')
+                    dataformats = DataFormat.objects.filter(
+                        author__username=author,
+                        name=name,
+                        version=version,
+                        )
+
+                    # TODO: Remove this when validation works (see comments)
+                    if len(dataformats) != 1:
+                        raise SyntaxError(
+                            "Could not find dataformat named `%s' to set" \
+                            "output `%s' of template `%s' for protocol" \
+                            "`%s' of database `%s'", (
+                              format_name,
+                              output_name,
+                              dataset_template.name,
+                              protocol_name,
+                              instance.name,
+                              )
+                            )
+                        return
+
+                    database_template_output = \
+                        DatabaseSetTemplateOutput.objects.filter(
+                            name=output_name,
+                            template=dataset_template,
+                            dataformat=dataformats[0],
+                            )
+
+                    if not database_template_output: # create
+                        database_template_output = \
+                            DatabaseSetTemplateOutput(
+                                name=output_name,
+                                template=dataset_template,
+                                dataformat=dataformats[0],
+                                )
+                        database_template_output.save()
+
+                    else:
+                        database_template_output = \
+                            database_template_output[0]
+
+                    # Create the database set output
+                    hash = core.hash_output(protocol.name,
+                        dataset.name, output_name)
+                    dataset_output = \
+                        DatabaseSetOutput.objects.filter(hash=hash)
+
+                    if not dataset_output: # create
+                        dataset_output = DatabaseSetOutput(
+                            template=database_template_output,
+                            set=dataset,
+                            hash=hash,
+                            )
+                        dataset_output.save()
+
+    except Exception:
+        instance.delete()
+        raise
+
+
+@receiver(models.signals.post_delete, sender=DatabaseSet)
+def delete_empty_template_sets(sender, **kwargs):
+
+    instance = kwargs['instance']
+    try:
+        if not instance.template.sets.all(): instance.template.delete()
+    except:
+        pass
diff --git a/beat/web/databases/templates/databases/view.html b/beat/web/databases/templates/databases/view.html
index 005007b431e86e6809155adcca699eb9dec7fc81..688b77cb07775e18e67f336f77a46df9a7cfb4d2 100644
--- a/beat/web/databases/templates/databases/view.html
+++ b/beat/web/databases/templates/databases/view.html
@@ -2,21 +2,21 @@
 {% comment %}
  * Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/
  * Contact: beat.support@idiap.ch
- * 
+ *
  * This file is part of the beat.web module of the BEAT platform.
- * 
+ *
  * Commercial License Usage
  * Licensees holding valid commercial BEAT licenses may use this file in
  * accordance with the terms contained in a written agreement between you
  * and Idiap. For further information contact tto@idiap.ch
- * 
+ *
  * Alternatively, this file may be used under the terms of the GNU Affero
  * Public License version 3 as published by the Free Software and appearing
  * in the file LICENSE.AGPL included in the packaging of this file.
  * The BEAT platform is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  * or FITNESS FOR A PARTICULAR PURPOSE.
- * 
+ *
  * You should have received a copy of the GNU Affero Public License along
  * with the BEAT platform. If not, see http://www.gnu.org/licenses/.
 {% endcomment %}
@@ -80,7 +80,7 @@
 
     <ul id="object-tabs" class="nav nav-tabs" role="tablist">
       <li role="presentation" class="active"><a {% if not database.description %}title="No documentation available" {% endif %} href="#doc" role="tab" data-toggle="tab" aria-controls="doc">Documentation{% if not database.description %} <i class="fa fa-warning"></i>{% endif %}</a></li>
-      <li role="presentation"><a href="#protocols" role="tab" data-toggle="tab" aria-controls="protocols">Protocols <span class="badge">{{ database.protocols.count }}</span></a></li>
+      <li role="presentation"><a href="#protos" role="tab" data-toggle="tab" aria-controls="protos">Protocols <span class="badge">{{ database.protocols.count }}</span></a></li>
       <li role="presentation"><a href="#sharing" role="tab" data-toggle="tab" aria-controls="sharing">Sharing</a></li>
     </ul>
 
@@ -92,11 +92,11 @@
         {% endif %}
       </div>
 
-      <div role="tabpanel" class="tab-pane" id="protocols">
+      <div role="tabpanel" class="tab-pane" id="protos">
         <div class="panel-group" id="protocol-accordion" role="tablist" aria-multiselectable="true">
 
           {% for protocol in database.protocols.all %}
-          {% with protocol.set_template_basename as name %}
+          {% with protocol.name as name %}
           <div class="panel panel-default">
             <div class="panel-heading" role="tab" id="heading_{{ name }}">
               <h4 class="panel-title">
diff --git a/beat/web/dataformats/apps.py b/beat/web/dataformats/apps.py
index 4460ab9ac3ed6169ccc8e90f12faacccee277ae7..6a14ef8a690cd91fcdc3a2fc36dcc454a1a04b23 100644
--- a/beat/web/dataformats/apps.py
+++ b/beat/web/dataformats/apps.py
@@ -27,7 +27,6 @@
 
 from ..common.apps import CommonAppConfig
 from django.utils.translation import ugettext_lazy as _
-from actstream import registry
 
 class DataFormatsConfig(CommonAppConfig):
 
@@ -35,9 +34,7 @@ class DataFormatsConfig(CommonAppConfig):
     verbose_name = _('Data Formats')
 
     def ready(self):
-    	super(DataFormatsConfig, self).ready()
-
-        from .signals import (on_team_delete,
-            auto_delete_file_on_delete, auto_delete_file_on_change)
-
+        super(DataFormatsConfig, self).ready()
+        from .signals import on_team_delete, auto_delete_file_on_delete, auto_delete_file_on_change
+        from actstream import registry
         registry.register(self.get_model('DataFormat'))
diff --git a/beat/web/dataformats/tests/core.py b/beat/web/dataformats/tests/core.py
index 53aaf270ca1baa9794e9343e0b1eaa9a8373f57a..50e7b99fb4fea976e074f5186e862a2f6976bcd4 100644
--- a/beat/web/dataformats/tests/core.py
+++ b/beat/web/dataformats/tests/core.py
@@ -35,8 +35,8 @@ from django.contrib.auth.models import User
 
 from ..models import DataFormat
 
-from beat.web.team.models import Team
-from beat.web.common.testutils import BaseTestCase
+from ...team.models import Team
+from ...common.testutils import BaseTestCase, tearDownModule
 
 class DataFormatsAccessibilityFunctionsBase(BaseTestCase):
 
diff --git a/beat/web/dataformats/tests/tests.py b/beat/web/dataformats/tests/tests.py
index 226d450d1853d2819de5048395f3aa323114e2c0..ab4a68e5da729861d1f65fb767cc111ef4e412e1 100644
--- a/beat/web/dataformats/tests/tests.py
+++ b/beat/web/dataformats/tests/tests.py
@@ -33,6 +33,8 @@ from django.test import TestCase
 from django.conf import settings
 from django.contrib.auth.models import User
 
+from ...common.testutils import tearDownModule
+
 from beat.core.dataformat import Storage, DataFormat as CoreDataFormat
 
 from ..models import DataFormat
diff --git a/beat/web/dataformats/tests/tests_api.py b/beat/web/dataformats/tests/tests_api.py
index bb2e929d11cfe5b20564124d07aee24069dd7eab..7f35c370547d1cac8a6442c2d2870335f0045fe9 100644
--- a/beat/web/dataformats/tests/tests_api.py
+++ b/beat/web/dataformats/tests/tests_api.py
@@ -32,6 +32,7 @@ from django.contrib.auth.models import User
 from django.conf import settings
 from django.core.urlresolvers import reverse
 
+from ...common.testutils import tearDownModule
 from ..models import DataFormat
 
 from core import DataFormatsAPIBase, DataFormatSharingAPIBase
diff --git a/beat/web/dataformats/tests/tests_user.py b/beat/web/dataformats/tests/tests_user.py
index a667ced99344d120a277bf5993153eb871329b1d..c784dc6edd78eed147c90f540f35e8d3d3adffa2 100644
--- a/beat/web/dataformats/tests/tests_user.py
+++ b/beat/web/dataformats/tests/tests_user.py
@@ -28,6 +28,7 @@
 
 from core import DataFormatsAccessibilityFunctionsBase
 
+from ...common.testutils import tearDownModule
 from ..models import DataFormat
 
 class NotSharedDataFormat_CheckAccessibilityFunction(DataFormatsAccessibilityFunctionsBase):
diff --git a/beat/web/experiments/admin.py b/beat/web/experiments/admin.py
index 42d9d0b90a1c7a099604f4bf5be67af191d221ea..33027eb385e6c4a54a3e1f1d473a25613884b43a 100644
--- a/beat/web/experiments/admin.py
+++ b/beat/web/experiments/admin.py
@@ -32,17 +32,19 @@ from django.contrib import admin
 from django.core.files.base import ContentFile
 from django.utils import six
 from django.utils.html import format_html
+from django.utils.safestring import mark_safe
 from django.core.urlresolvers import reverse
-from django.db.models import Max
+from django.db.models import Max, Count
 
 from .models import Experiment as ExperimentModel
 from .models import Block as BlockModel
 from .models import Result as ResultModel
 from .models import CachedFile as CachedFileModel
+from .models import BlockInput as BlockInputModel
 from .models import validate_experiment
 
 from ..ui.forms import CodeMirrorJSONFileField, CodeMirrorRSTFileField, \
-        NameField
+        NameField, CodeMirrorJSONCharField
 
 from ..common.texts import Messages
 
@@ -113,6 +115,27 @@ class ExperimentModelForm(forms.ModelForm):
             self.data['declaration_file'] = ContentFile(self.data['declaration_file'], name='unsaved')
 
 
+class BlockInline(admin.TabularInline):
+
+    model = BlockModel
+    extra = 0
+
+    readonly_fields = ['link', 'algorithm', 'analyzer', 'status']
+    ordering = ['id']
+    fields = readonly_fields
+
+    def link(self, obj):
+        url = reverse('admin:experiments_block_change', args=(obj.pk,))
+        return mark_safe('<a href="%s">%s</a>' % (url, obj.name))
+    link.short_description = 'name'
+
+    def has_delete_permission(self, request, obj=None):
+        return False
+
+    def has_add_permission(self, request):
+            return False
+
+
 #----------------------------------------------------------
 
 
@@ -121,6 +144,11 @@ def reset_experiment(modeladmin, request, queryset):
 reset_experiment.short_description = 'Reset selected experiments'
 
 
+def cancel_experiment(modeladmin, request, queryset):
+    for q in queryset: q.cancel()
+cancel_experiment.short_description = 'Cancel selected experiments'
+
+
 def rehash_experiment(modeladmin, request, queryset):
     for q in queryset: q.save()
 rehash_experiment.short_description = 'Rehash selected experiments'
@@ -154,6 +182,7 @@ class Experiment(admin.ModelAdmin):
     actions = [
         rehash_experiment,
         reset_experiment,
+        cancel_experiment,
         ]
 
     form = ExperimentModelForm
@@ -163,6 +192,10 @@ class Experiment(admin.ModelAdmin):
         'shared_with_team'
     ]
 
+    inlines = [
+            BlockInline,
+            ]
+
     fieldsets = (
         (None,
           dict(
@@ -205,9 +238,140 @@ admin.site.register(ExperimentModel, Experiment)
 #----------------------------------------------------------
 
 
+class BlockInputInline(admin.TabularInline):
+
+    model           = BlockInputModel
+    verbose_name = 'Input'
+    verbose_name_plural = 'Inputs'
+    extra           = 0
+    ordering        = ['database', 'cache']
+    readonly_fields = ['input', 'channel']
+    fields = readonly_fields
+
+    def input(self, obj):
+        if obj.database:
+            url = reverse('admin:databases_databaseset_change',
+                args=(obj.database.set.pk,))
+            text = '%s (%s)' % (obj.database, obj.database.hash)
+            what = 'Dataset Output'
+        else:
+            url = reverse('admin:experiments_cachedfile_change',
+                args=(obj.cache.pk,))
+            text = obj.cache.hash
+            what = 'Cached File'
+        return mark_safe('%s: <a href="%s">%s</a>' % (what, url, text))
+
+    def has_delete_permission(self, request, obj=None):
+        return False
+
+    def has_add_permission(self, request):
+            return False
+
+
+class CachedFileInline(admin.TabularInline):
+
+    model = CachedFileModel.blocks.through
+    verbose_name = 'Output'
+    verbose_name_plural = 'Outputs'
+    extra = 0
+
+    readonly_fields = ['output']
+    fields = readonly_fields
+
+    def output(self, obj):
+        url = reverse('admin:experiments_cachedfile_change', args=(obj.cachedfile.pk,))
+        text = obj.cachedfile.hash
+        what = 'Cached File'
+        return mark_safe('%s: <a href="%s">%s</a>' % (what, url, text))
+
+    def has_delete_permission(self, request, obj=None):
+        return False
+
+    def has_add_permission(self, request):
+            return False
+
+
+class BlockDependentsInline(admin.TabularInline):
+
+    model = BlockModel.dependencies.through
+    verbose_name = 'Dependent'
+    verbose_name_plural = 'Dependents'
+    fk_name = 'to_block'
+    extra = 0
+
+    readonly_fields = ['name', 'algorithm', 'analyzer', 'status']
+    ordering = ['id']
+    fields = readonly_fields
+
+    def name(self, obj):
+        url = reverse('admin:experiments_block_change', args=(obj.from_block.pk,))
+        return mark_safe('<a href="%s">%s</a>' % (url, obj.from_block.name))
+
+    def algorithm(self, obj):
+        return obj.from_block.algorithm
+
+    def analyzer(self, obj):
+        return obj.from_block.analyzer
+    analyzer.boolean = True
+
+    def status(self, obj):
+        return obj.from_block.get_status_display()
+
+    def has_delete_permission(self, request, obj=None):
+        return False
+
+    def has_add_permission(self, request):
+            return False
+
+
+class BlockDependenciesInline(admin.TabularInline):
+
+    model = BlockModel.dependencies.through
+    verbose_name = 'Dependency'
+    verbose_name_plural = 'Dependencies'
+    fk_name = 'from_block'
+    extra = 0
+
+    readonly_fields = ['name', 'algorithm', 'analyzer', 'status']
+    ordering = ['id']
+    fields = readonly_fields
+
+    def name(self, obj):
+        url = reverse('admin:experiments_block_change', args=(obj.to_block.pk,))
+        return mark_safe('<a href="%s">%s</a>' % (url, obj.to_block.name))
+
+    def algorithm(self, obj):
+        return obj.to_block.algorithm
+
+    def analyzer(self, obj):
+        return obj.to_block.analyzer
+    analyzer.boolean = True
+
+    def status(self, obj):
+        return obj.to_block.get_status_display()
+
+    def has_delete_permission(self, request, obj=None):
+        return False
+
+    def has_add_permission(self, request):
+            return False
+
+
+class BlockModelForm(forms.ModelForm):
+
+    command = CodeMirrorJSONCharField(
+        help_text=Messages['json'],
+        readonly=True,
+        )
+
+    class Meta:
+        model = BlockModel
+        exclude = []
+
+
 class Block(admin.ModelAdmin):
 
-    list_display        = ('id', 'experiment', 'name', 'algorithm', 'analyzer', 'status', 'environment')
+    list_display        = ('id', 'author', 'toolchain', 'xp', 'name', 'algorithm', 'analyzer', 'status', 'ins', 'outs', 'environment', 'q')
     search_fields       = ['name',
                            'experiment__author__username',
                            'experiment__toolchain__author__username',
@@ -220,6 +384,84 @@ class Block(admin.ModelAdmin):
                            ]
     list_display_links  = ('id', 'name')
 
+    inlines = [
+            BlockDependenciesInline,
+            BlockInputInline,
+            CachedFileInline,
+            BlockDependentsInline,
+            ]
+
+    exclude = ['dependencies']
+
+    def get_queryset(self, request):
+        qs = super(Block, self).get_queryset(request)
+        return qs.annotate(Count('outputs'))
+
+    def author(self, obj):
+        return obj.experiment.author
+
+    def toolchain(self, obj):
+        return obj.experiment.toolchain
+
+    def xp(self, obj):
+        return obj.experiment.name
+    xp.short_description = 'experiment'
+
+    def ins(self, obj):
+        return obj.inputs.count()
+
+    def outs(self, obj):
+        return obj.outputs__count
+    outs.admin_order_field = 'outputs__count'
+
+    def q(self, obj):
+        if obj.queue: return obj.queue.name
+        return None
+    q.short_description = 'queue'
+
+    def get_readonly_fields(self, request, obj=None):
+        return list(self.readonly_fields) + \
+            [field.name for field in obj._meta.fields if field.name != 'command']
+
+    def has_delete_permission(self, request, obj=None):
+        return False
+
+    def has_add_permission(self, request):
+            return False
+
+    form = BlockModelForm
+
+    fieldsets = (
+        (None,
+          dict(
+            fields=('id', 'name', 'experiment'),
+            ),
+          ),
+        ('Status and dates',
+          dict(
+            fields=('creation_date', 'start_date', 'end_date', 'status'),
+            ),
+          ),
+        ('Code',
+          dict(
+            classes=('collapse',),
+            fields=('algorithm', 'analyzer',),
+            ),
+          ),
+        ('Backend',
+          dict(
+            classes=('collapse',),
+            fields=('environment', 'queue', 'required_slots', 'channel'),
+            ),
+          ),
+        ('Command',
+          dict(
+            classes=('collapse',),
+            fields=('command',),
+            ),
+          ),
+        )
+
 admin.site.register(BlockModel, Block)
 
 
@@ -228,19 +470,25 @@ admin.site.register(BlockModel, Block)
 
 class Result(admin.ModelAdmin):
 
-    list_display        = ('id', 'block', 'name', 'type', 'primary', 'data_value')
+    list_display        = ('id', 'cache', 'name', 'type', 'primary', 'data_value')
 
     search_fields       = [
             'name',
-            'type',
-            'block__name',
-            'block__experiment__name',
-            'block__experiment__author__username',
-            'block__experiment__toolchain__name',
+            'cache__hash',
             ]
 
     list_display_links  = ('id', 'name')
 
+    def get_readonly_fields(self, request, obj=None):
+        return list(self.readonly_fields) + \
+               [field.name for field in obj._meta.fields]
+
+    def has_delete_permission(self, request, obj=None):
+        return False
+
+    def has_add_permission(self, request):
+            return False
+
 admin.site.register(ResultModel, Result)
 
 
@@ -334,4 +582,16 @@ class CachedFile(admin.ModelAdmin):
           ),
         )
 
+    readonly_fields = ['blocks']
+
+    def get_readonly_fields(self, request, obj=None):
+        return list(self.readonly_fields) + \
+               [field.name for field in obj._meta.fields]
+
+    def has_delete_permission(self, request, obj=None):
+        return False
+
+    def has_add_permission(self, request):
+            return False
+
 admin.site.register(CachedFileModel, CachedFile)
diff --git a/beat/web/experiments/api.py b/beat/web/experiments/api.py
index 4660163ef33e54c6eb1fcaa1fc4cffa102c5940d..39a322cc1ee39078290f963ec82a8fc51f7dc65b 100644
--- a/beat/web/experiments/api.py
+++ b/beat/web/experiments/api.py
@@ -27,7 +27,8 @@
 
 import re
 import uuid
-import simplejson as json
+
+import simplejson
 
 from django.conf import settings
 from django.shortcuts import get_object_or_404
@@ -63,9 +64,6 @@ from ..algorithms.models import Algorithm
 
 from ..toolchains.models import Toolchain
 
-from ..utils.api import send_email_to_administrators
-from ..utils import scheduler
-
 
 #----------------------------------------------------------
 
@@ -214,7 +212,7 @@ class ListCreateExperimentsView(ListCreateContributionView):
         else:
             declaration_string = data['declaration']
             try:
-                declaration = json.loads(declaration_string)
+                declaration = simplejson.loads(declaration_string)
             except:
                 return BadRequestResponse('Invalid declaration data')
 
@@ -388,11 +386,11 @@ class RetrieveUpdateDestroyExperimentView(RetrieveUpdateDestroyContributionView)
 
             if isinstance(data['declaration'], dict):
                 declaration = data['declaration']
-                declaration_string = json.dumps(declaration, indent=4)
+                declaration_string = simplejson.dumps(declaration, indent=4)
             else:
                 declaration_string = data['declaration']
                 try:
-                    declaration = json.loads(declaration_string)
+                    declaration = simplejson.loads(declaration_string)
                 except:
                     raise serializers.ValidationError({'declaration' :'Invalid declaration data'})
 
@@ -403,7 +401,7 @@ class RetrieveUpdateDestroyExperimentView(RetrieveUpdateDestroyContributionView)
                 short_description = declaration['description']
             elif short_description is not None:
                 declaration['description'] = short_description
-                declaration_string = json.dumps(declaration, indent=4)
+                declaration_string = simplejson.dumps(declaration, indent=4)
         else:
             declaration = None
 
@@ -540,104 +538,16 @@ class StartExperimentView(APIView):
 
         # Retrieve the experiment
         experiment = get_object_or_404(Experiment.objects.for_user(request.user, True),
-                                       author__username=author_name,
-                                       toolchain__author__username=toolchain_author_name,
-                                       toolchain__name=toolchain_name,
-                                       toolchain__version=version,
-                                       name=name
-                                    )
+            author__username=author_name,
+            toolchain__author__username=toolchain_author_name,
+            toolchain__name=toolchain_name,
+            toolchain__version=version,
+            name=name
+            )
 
         self.check_object_permissions(request, experiment)
 
-        # cache for needed algorithms
-        algorithms = {}
-
-        # Extract the experiment declaration for all blocks
-        core_experiment = experiment.core()
-
-        if not core_experiment.valid:
-            return Response("Experiment `%s' is invalid:\n  * %s" % \
-                    '\n  * '.join(core_experiment.errors), status=404)
-
-        for block_name, block_details in core_experiment.setup().items():
-
-            config = block_details['configuration'] #easier handle
-
-            if config['algorithm'] not in algorithms:
-
-                # Retrieve the DB algorithm entry
-                core_algorithm = core_experiment.algorithms[config['algorithm']]
-
-                try:
-                    algorithm = Algorithm.objects.for_user(request.user, True).get(
-                            author__username=core_algorithm.storage.username,
-                            name=core_algorithm.storage.name,
-                            version=int(core_algorithm.storage.version),
-                            )
-                except Algorithm.DoesNotExist:
-                    experiment.delete()
-                    return Response("Algorithm `%s' not found in " \
-                            "database" % config['algorithm'],
-                            status=404)
-
-                # cache it
-                algorithms[config['algorithm']] = algorithm
-
-            else:
-                algorithm = algorithms[config['algorithm']]
-
-            # Create the block DB entry, if it is not already there
-            try:
-                block = Block.objects.get(name=block_name, experiment=experiment)
-            except Block.DoesNotExist:
-                block = Block(name=block_name, experiment=experiment)
-                block.status    = Block.NOT_CACHED
-                block.analyzer  = not config.has_key('outputs')
-                block.algorithm = algorithm
-
-                # connects used environment on creation
-                _env = config.get('environment',
-                        core_experiment.data['globals']['environment'])
-                block.environment = Environment.objects.get(name=_env['name'],
-                        version=_env['version'])
-
-                block.save()
-
-                # checks if we have an existing cache and attach
-                for out in config.get('outputs', {}):
-                    cache = CachedFile.objects.filter(hash=config['outputs'][out]['hash'])
-                    if cache: cache.get().blocks.add(block)
-
-
-        experiment.status = Experiment.SCHEDULED
-        experiment.save()
-
-        status_and_data = scheduler.postMessage('/run-experiment',
-                params={
-                    'experiment': experiment.fullname(),
-                    'hash': experiment.hash,
-                    })
-        if status_and_data is None:
-            experiment.reset()
-
-            send_email_to_administrators("The scheduler can't be contacted",
-                    "The user '%s' tried to schedule the experiment '%s'" % \
-                            (request.user.username, experiment.fullname()))
-
-            return Response("ERROR: Could not connect to the scheduler, " \
-                            "an administrator has been notified.\n\n" \
-                            "This experiment has been queued, you'll be " \
-                            "able to start it  later.\n\nWe apologize " \
-                            "for the inconvenience.",
-                            status=500)
-
-        status, data = status_and_data
-        if status != 200:
-            experiment.reset()
-            error_message = 'ERROR: The scheduler did not accept the ' \
-                    'experiment (saved for later execution).\nREASON: %s' % \
-                    data
-            return Response(error_message, status=404)
+        experiment.schedule()
 
         # Send the result
         result = { 'name': experiment.fullname(),
@@ -656,6 +566,7 @@ class CancelExperimentView(APIView):
     """
     Cancel a running experiment
     """
+
     permission_classes = [permissions.IsAuthenticated]
 
     def post(self, request, author_name, toolchain_author_name, toolchain_name, version, name):
@@ -665,48 +576,14 @@ class CancelExperimentView(APIView):
 
         # Retrieve the experiment
         experiment = get_object_or_404(Experiment,
-                                       author__username=author_name,
-                                       toolchain__author__username=toolchain_author_name,
-                                       toolchain__name=toolchain_name,
-                                       toolchain__version=version,
-                                       name=name
-                                    )
-
-        if (experiment.status == Experiment.RUNNING) or (experiment.status == Experiment.SCHEDULED):
-            previous_status = experiment.status
-
-            experiment.status = Experiment.CANCELING
-            experiment.save()
-
-            status_and_data = scheduler.postMessage('/cancel-experiment',
-                                                   params={
-                                                       'experiment': experiment.fullname(),
-                                                   })
-            if status_and_data is None:
-                experiment.status = previous_status
-                experiment.save()
-                return Response('ERROR: Could not connect to the scheduler', status=500)
-            else:
-                status, data = status_and_data
-                if status == 500:
-                    experiment.status = previous_status
-                    experiment.save()
-                    send_email_to_administrators('Scheduler internal error', data)
-                    error_message = 'ERROR: The scheduler did not accept the request.\n' \
-                                    'An administrator has been notified about this problem.'
-                    return Response(error_message, status=status)
-                elif status != 200:
-                    experiment.status = previous_status
-                    experiment.save()
-                    error_message = 'ERROR: The scheduler did not accept the request.\n' \
-                                    '    REASON: %s' % data
-                    return Response(error_message, status=status)
-
-        elif experiment.status == Experiment.DONE:
-            return Response("The experiment is already done", status=409)
+            author__username=author_name,
+            toolchain__author__username=toolchain_author_name,
+            toolchain__name=toolchain_name,
+            toolchain__version=version,
+            name=name
+            )
 
-        else:
-            return BadRequestResponse("The experiment isn't running")
+        experiment.cancel()
 
         return Response(status=200)
 
diff --git a/beat/web/experiments/apps.py b/beat/web/experiments/apps.py
index 68593dc35eb35080bf22341d34d4bb1c5019b6a1..6392aa6ee0362a37b0b5a503548e552ac0fe0e93 100644
--- a/beat/web/experiments/apps.py
+++ b/beat/web/experiments/apps.py
@@ -27,15 +27,13 @@
 
 from ..common.apps import CommonAppConfig
 from django.utils.translation import ugettext_lazy as _
-from actstream import registry
 
 class ExperimentsConfig(CommonAppConfig):
     name = 'beat.web.experiments'
     verbose_name = _('Experiments')
 
     def ready(self):
-    	super(ExperimentsConfig, self).ready()
-
+        super(ExperimentsConfig, self).ready()
         from .signals import on_team_delete
-
+        from actstream import registry
         registry.register(self.get_model('Experiment'))
diff --git a/beat/web/experiments/management/__init__.py b/beat/web/experiments/management/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/beat/web/experiments/management/commands/__init__.py b/beat/web/experiments/management/commands/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/beat/web/utils/management/commands/qconf.py b/beat/web/experiments/management/commands/cleanup_orphaned_caches.py
similarity index 74%
rename from beat/web/utils/management/commands/qconf.py
rename to beat/web/experiments/management/commands/cleanup_orphaned_caches.py
index e96ae95be1b1a6628786999e5be00f77cb10aaf4..b52977cf85def5b08bd721906c0960ac7bddc24b 100644
--- a/beat/web/utils/management/commands/qconf.py
+++ b/beat/web/experiments/management/commands/cleanup_orphaned_caches.py
@@ -29,15 +29,21 @@
 import logging
 logger = logging.getLogger(__name__)
 
-from django.core.management.base import BaseCommand, CommandError
+from django.core.management.base import BaseCommand
 
-from ....utils import scheduler
-from ....backend.models import Queue
+from ... import utils
 
 
 class Command(BaseCommand):
 
-    help = 'Re-configures the scheduler'
+    help = 'Sets and resets queue configurations'
+
+
+    def add_arguments(self, parser):
+
+        parser.add_argument('--delete', action='store_true', dest='delete',
+                default=False, help='Really deletes the CachedFiles - ' \
+                        'otherwise only displays what would be deleted')
 
     def handle(self, *ignored, **arguments):
 
@@ -48,20 +54,10 @@ class Command(BaseCommand):
             if arguments['verbosity'] == 1: logger.setLevel(logging.INFO)
             elif arguments['verbosity'] >= 2: logger.setLevel(logging.DEBUG)
 
-        configuration = {}
-        for queue in Queue.objects.all():
-            configuration[queue.name] = queue.as_json()
-
-        # Send the configuration to the Scheduler
-        status_and_data = scheduler.putMessage('/queue-configuration',
-                data=configuration)
-
-        if status_and_data is None:
-            logger.error('Could *not* connect to the scheduler')
+        if arguments['delete']:
+            utils.cleanup_orphaned_cachedfiles()
 
-        status, data = status_and_data
-        if status != 200:
-            logger.error('Scheduler answered with *error* status %d:', status)
-            logger.error(data + '\n')
         else:
-            logger.info('Scheduler reconfiguration succesfully launched')
+            l = utils.list_orphaned_cachedfiles()
+            for c in l: print(c)
+            print('%d CachedFiles are unreacheable' % len(l))
diff --git a/beat/web/experiments/migrations/0002_scheduler_addons.py b/beat/web/experiments/migrations/0002_scheduler_addons.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1f462385b995661ad68608a03f82773c712bfb2
--- /dev/null
+++ b/beat/web/experiments/migrations/0002_scheduler_addons.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+def move_result_to_cache(apps, schema_editor):
+    '''Moves the result association from the block to the related cache file'''
+
+    Result = apps.get_model("experiments", "Result")
+
+    total = Result.objects.count()
+    if total: print('')
+    for i, r in enumerate(Result.objects.order_by('-id')):
+        print("Resetting result (%d) %d/%d..." % (r.id, i+1, total))
+        r.cache = r.block.hashes.first()
+        r.save()
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('experiments', '0001_initial'),
+    ]
+
+    operations = [
+        migrations.AddField(
+            model_name='result',
+            name='cache',
+            field=models.ForeignKey(related_name='results',
+              to='experiments.CachedFile', null=True),
+        ),
+        migrations.RunPython(move_result_to_cache),
+    ]
diff --git a/beat/web/experiments/migrations/0003_scheduler_addons_2.py b/beat/web/experiments/migrations/0003_scheduler_addons_2.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d72f2965b2e05e7e19b4621e2cfd9ecd967248d
--- /dev/null
+++ b/beat/web/experiments/migrations/0003_scheduler_addons_2.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+
+from __future__ import unicode_literals
+
+from django.db import migrations
+
+
+def dedup_resuls(apps, schema_editor):
+    '''Deletes duplicated results (older ones)'''
+
+    Result = apps.get_model("experiments", "Result")
+
+    for i, r in enumerate(Result.objects.order_by('-id')):
+        older = Result.objects.filter(name=r.name, id__lt=r.id,
+            cache=r.block.hashes.first())
+        if older:
+            print("Cache %s already contains Result `%s' - keeping " \
+                "newest (out of %d)..." % (r.block.hashes.first().hash, r.name,
+                  older.count()+1))
+            older.delete()
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('experiments', '0002_scheduler_addons'),
+        ('search', '0002_scheduler_addons'),
+    ]
+
+    operations = [
+        migrations.RunPython(dedup_resuls),
+        ]
diff --git a/beat/web/experiments/migrations/0004_scheduler_addons_3.py b/beat/web/experiments/migrations/0004_scheduler_addons_3.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b41d1496e91d176d9745225dbf4d7f4187dd12c
--- /dev/null
+++ b/beat/web/experiments/migrations/0004_scheduler_addons_3.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('backend', '0002_scheduler_addons'),
+        ('databases', '0002_scheduler_addons'),
+        ('experiments', '0003_scheduler_addons_2'),
+    ]
+
+    operations = [
+        migrations.AlterUniqueTogether(
+            name='result',
+            unique_together=set([('cache', 'name')]),
+        ),
+        migrations.RemoveField(
+            model_name='result',
+            name='block',
+        ),
+        migrations.CreateModel(
+            name='BlockInput',
+            fields=[
+                ('id', models.AutoField(verbose_name='ID', serialize=False,
+                  auto_created=True, primary_key=True)),
+                ('channel', models.CharField(default=b'',
+                  help_text=b'Synchronization channel within the toolchain',
+                  max_length=200, blank=True)),
+                ('block', models.ForeignKey(related_name='inputs',
+                  to='experiments.Block', null=True)),
+                ('cache', models.ForeignKey(related_name='inputs',
+                  to='experiments.CachedFile', null=True)),
+                ('database', models.ForeignKey(related_name='blocks',
+                  to='databases.DatabaseSetOutput', null=True)),
+            ],
+        ),
+        migrations.AddField(
+            model_name='block',
+            name='channel',
+            field=models.CharField(default=b'',
+            help_text=b'Synchronization channel within the toolchain',
+            max_length=200, blank=True),
+        ),
+        migrations.AddField(
+            model_name='block',
+            name='command',
+            field=models.TextField(null=True, blank=True),
+        ),
+        migrations.AddField(
+            model_name='block',
+            name='dependencies',
+            field=models.ManyToManyField(related_name='dependents',
+              to='experiments.Block', blank=True),
+        ),
+        migrations.AlterField(
+            model_name='block',
+            name='environment',
+            field=models.ForeignKey(related_name='blocks',
+              on_delete=models.deletion.SET_NULL, to='backend.Environment',
+              null=True),
+        ),
+        migrations.AddField(
+            model_name='block',
+            name='queue',
+            field=models.ForeignKey(related_name='blocks',
+              on_delete=models.deletion.SET_NULL, to='backend.Queue',
+              null=True),
+        ),
+        migrations.AddField(
+            model_name='block',
+            name='required_slots',
+            field=models.PositiveIntegerField(default=1),
+        ),
+        migrations.AlterField(
+            model_name='block',
+            name='status',
+            field=models.CharField(default=b'N', max_length=1,
+              choices=[
+                (b'N', b'Not cached'),
+                (b'P', b'Processing'),
+                (b'C', b'Cached'),
+                (b'F', b'Failed'),
+                (b'S', b'Skipped'),
+                (b'L', b'Cancelled'),
+                ]
+              ),
+        ),
+        migrations.AlterUniqueTogether(
+            name='block',
+            unique_together=set([('experiment', 'name')]),
+        ),
+        migrations.AlterField(
+            model_name='cachedfile',
+            name='blocks',
+            field=models.ManyToManyField(related_name='outputs',
+              to='experiments.Block', blank=True),
+        ),
+    ]
diff --git a/beat/web/experiments/migrations/0005_scheduler_addons_4.py b/beat/web/experiments/migrations/0005_scheduler_addons_4.py
new file mode 100644
index 0000000000000000000000000000000000000000..5479d104fae5e0831d71f1b93c1f52a65b099b95
--- /dev/null
+++ b/beat/web/experiments/migrations/0005_scheduler_addons_4.py
@@ -0,0 +1,218 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+
+from __future__ import unicode_literals
+
+from django.db import migrations, utils
+from django.conf import settings
+
+import simplejson
+import beat.core.experiment
+from ...common import storage
+
+
+def reset_blocks(apps, schema_editor):
+    '''Resets block dependencies and queue relationship'''
+
+    Experiment = apps.get_model("experiments", "Experiment")
+    Block = apps.get_model("experiments", "Block")
+    BlockInput = apps.get_model("experiments", "BlockInput")
+    CachedFile = apps.get_model("experiments", "CachedFile")
+    Queue = apps.get_model("backend", "Queue")
+    Environment = apps.get_model("backend", "Environment")
+    Algorithm = apps.get_model("algorithms", "Algorithm")
+    DatabaseSetOutput = apps.get_model("databases", "DatabaseSetOutput")
+    Result = apps.get_model("experiments", "Result")
+
+    total = Experiment.objects.count()
+    for i, e in enumerate(Experiment.objects.order_by('id')):
+
+        fullname = '%s/%s/%s/%d/%s' % (
+            e.author.username,
+            e.toolchain.author.username,
+            e.toolchain.name,
+            e.toolchain.version,
+            e.name,
+            )
+
+        print("Updating blocks for experiment %d/%d (%s, id=%d)..." % \
+            (i+1, total, fullname, e.id))
+
+        xp_decl = simplejson.loads(storage.get_file_content(e,
+          'declaration_file'))
+        tc_decl = simplejson.loads(storage.get_file_content(e.toolchain,
+          'declaration_file'))
+
+        xp = beat.core.experiment.Experiment(settings.PREFIX, (xp_decl,
+          tc_decl))
+
+        if xp.errors:
+            message = "The experiment `%s' isn't valid (skipping " \
+                "block update), due to the following errors:\n  * %s"
+            print message % (fullname, '\n * '.join(xp.errors))
+            continue
+
+        # Loads the experiment execution description, creating the Block's,
+        # BlockInput's and BlockOutput's as required.
+        for block_name, description in xp.setup().items():
+
+            # Checks that the Queue/Environment exists
+            job_description = description['configuration']
+
+            env = Environment.objects.filter(
+                name=job_description['environment']['name'],
+                version=job_description['environment']['version'],
+                )
+
+            if not env:
+                print("Cannot find environment `%s (%s)' - not setting" % \
+                    (job_description['environment']['name'],
+                    job_description['environment']['version']))
+                env = None
+            else:
+                env = env[0]
+
+            # Search for queue that contains a specific environment
+            # notice we don't require environment to exist in relation to
+            # the queue as it may have been removed already.
+            queue = Queue.objects.filter(name=job_description['queue'])
+            if not queue:
+                print("Cannot find queue `%s'" % job_description['queue'])
+                queue = None
+            else:
+                queue = queue[0]
+
+            parts = job_description['algorithm'].split('/')
+            algorithm = Algorithm.objects.get(
+                author__username=parts[0],
+                name=parts[1],
+                version=parts[2],
+                )
+
+            # Ties the block in
+            slots = job_description.get('nb_slots')
+
+            try:
+                b, _ = Block.objects.get_or_create(experiment=e,
+                    name=block_name, algorithm=algorithm)
+            except utils.IntegrityError as exc:
+                print("Block `%s' for experiment `%s' already exists - " \
+                    "modifying entry for migration purposes. This " \
+                    "issue is due a misconnection on the toolchain level " \
+                    "(known case: tpereira/full_isv/2)" % \
+                    (block_name, fullname))
+                b = Block.objects.get(experiment=e, name=block_name)
+
+            b.command=simplejson.dumps(job_description, indent=4)
+            b.status='N' if (e.status == 'P') else b.status
+            b.environment=env
+            b.queue=queue
+            b.algorithm = algorithm
+            b.analyzer = (algorithm.result_dataformat is not None)
+            b.required_slots=job_description['nb_slots']
+            b.channel=job_description['channel']
+            b.save()
+
+            # from this point: requires block to have an assigned id
+            b.dependencies.add(*[e.blocks.get(name=k) \
+                for k in description['dependencies']])
+
+            # reset inputs and outputs - creates if necessary only
+            for v in job_description['inputs'].values():
+                if 'database' in v: #database input
+                    db = DatabaseSetOutput.objects.get(hash=v['hash'])
+                    BlockInput.objects.get_or_create(block=b,
+                        channel=v['channel'], database=db)
+                else:
+                    cache = CachedFile.objects.get(hash=v['hash'])
+                    BlockInput.objects.get_or_create(block=b,
+                        channel=v['channel'], cache=cache)
+
+            current = list(b.outputs.all())
+            b.outputs.clear() #dissociates all current outputs
+            outputs = job_description.get('outputs',
+                {'': job_description.get('result')})
+            for v in outputs.values():
+                cache, cr = CachedFile.objects.get_or_create(hash=v['hash'])
+                if cr:
+                    if len(current) == len(outputs): #copy
+                        cache.linear_exedution_time = \
+                            current[0].linear_execution_time
+                        cache.speed_up_real = current[0].speed_up_real
+                        cache.speed_up_maximal = current[0].speed_up_maximal
+                        cache.queuing_time = current[0].queuing_time
+                        cache.stdout = current[0].stdout
+                        cache.stderr = current[0].stderr
+                        cache.error_report = current[0].error_report
+                        cache.cpu_time = current[0].cpu_time
+                        cache.max_memory = current[0].max_memory
+                        cache.data_read_size = current[0].data_read_size
+                        cache.data_read_nb_blocks = \
+                            current[0].data_read_nb_blocks
+                        cache.data_read_time = current[0].data_read_time
+                        cache.data_written_size = current[0].data_written_size
+                        cache.data_written_nb_blocks = \
+                            current[0].data_written_nb_blocks
+                        cache.data_written_time = current[0].data_written_time
+                        if current[0].results.count():
+                            for r in current[0].results.all():
+                                r.cache = cache
+                                r.save()
+                        print("CachedFile data `%s' MOVED from `%s'" % \
+                            (cache.hash, current[0].hash))
+                    else:
+                        print("CachedFile (hash=%s) CREATED for block `%s' " \
+                            "of experiment `%s' which is in state `%s'" % \
+                            (cache.hash, block_name, fullname,
+                              b.get_status_display()))
+                cache.blocks.add(b)
+
+        #asserts all blocks (except analysis blocks have dependents)
+        for b in e.blocks.all():
+            assert (b.analyzer and b.dependents.count() == 0) or \
+                b.dependents.count() > 0
+
+        #asserts all analysis blocks have only one output
+        for b in e.blocks.filter(analyzer=True):
+            assert b.outputs.count() == 1
+
+        #removes results without caches
+        for r in Result.objects.filter(cache=None):
+            print("Removing result %d (no associated cache)" % r.id)
+            r.delete()
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('experiments', '0004_scheduler_addons_3'),
+    ]
+
+    operations = [
+        migrations.RunPython(reset_blocks),
+    ]
diff --git a/beat/web/experiments/models.py b/beat/web/experiments/models.py
index e3366d373cd6d94e5d0887c6c4cd527c0660307b..67af2255bfce850fe4562d33b4997a5a86ee0f47 100644
--- a/beat/web/experiments/models.py
+++ b/beat/web/experiments/models.py
@@ -26,6 +26,7 @@
 ###############################################################################
 
 from django.db import models
+from django.db import transaction
 from django.contrib.auth.models import User
 from django.core.urlresolvers import reverse
 from django.conf import settings
@@ -35,13 +36,15 @@ from django.template.loader import render_to_string
 from django.contrib.sites.models import Site
 
 import beat.core.hash
+import beat.core.data
+import beat.core.algorithm
 import beat.core.experiment
 
 from beat.core.utils import NumpyJSONEncoder
 
 from ..algorithms.models import Algorithm
-from ..databases.models import DatabaseSet
 from ..toolchains.models import Toolchain
+from ..utils.api import send_email_to_administrators
 
 from ..common.models import Shareable
 from ..common.models import ContributionManager
@@ -55,12 +58,14 @@ from ..common.exceptions import ShareError
 from ..common.texts import Messages
 from ..common.storage import OverwriteStorage
 from ..backend.models import Queue, Environment
+from ..databases.models import DatabaseSet,  DatabaseSetOutput
 from ..import __version__
 
 
 from datetime import datetime
 
 import os
+import glob
 import simplejson
 
 import logging
@@ -217,8 +222,10 @@ class Experiment(Shareable):
 
     #_____ Fields __________
 
-    author            = models.ForeignKey(User, related_name='experiments')
-    toolchain         = models.ForeignKey(Toolchain, related_name='experiments')
+    author            = models.ForeignKey(User, related_name='experiments',
+        on_delete=models.CASCADE)
+    toolchain         = models.ForeignKey(Toolchain,
+        related_name='experiments', on_delete=models.CASCADE)
     name              = models.CharField(max_length=200)
     short_description = models.CharField(max_length=100, default='', blank=True, help_text=Messages['short_description'])
     status            = models.CharField(max_length=1, choices=STATUS, default=PENDING)
@@ -244,8 +251,8 @@ class Experiment(Shareable):
 
 
     # read-only parameters that are updated at every save(), if required
-    hash                  = models.CharField(max_length=64)
-    referenced_datasets   = models.ManyToManyField(DatabaseSet, related_name='experiments', blank=True)
+    hash = models.CharField(max_length=64)
+    referenced_datasets = models.ManyToManyField(DatabaseSet, related_name='experiments', blank=True)
     referenced_algorithms = models.ManyToManyField(Algorithm, related_name='experiments', blank=True)
 
     objects = ExperimentManager()
@@ -400,6 +407,10 @@ class Experiment(Shareable):
 
         is_adding = self._state.adding
 
+        if not is_adding and self._loaded_status != self.status:
+            if self.status in [Experiment.DONE, Experiment.FAILED]:
+                self.email()
+
         # Invoke the base implementation
         super(Experiment, self).save(*args, **kwargs)
 
@@ -409,6 +420,9 @@ class Experiment(Shareable):
             storage.rename_file(self, 'description_file', self.description_filename())
 
         if content_modified:
+            # Creates experiment blocks and setup dependencies
+            self.update_blocks()
+
             # Link the experiment to the datasets
             self.referenced_datasets.clear()
             for dataset_declaration in xp.datasets.values():
@@ -451,31 +465,32 @@ class Experiment(Shareable):
 
                 self.referenced_algorithms.add(algorithm_db)
 
-        if not is_adding and self._loaded_status != self.status:
-            if self.status in [Experiment.DONE, Experiment.FAILED]:
-                user_email_list = [self.author.email] if self.author.accountsettings.experiment_mail_notifications_enabled else []
-                user_email_list.extend([user.email for user in self.shared_with.all() if user.accountsettings.experiment_mail_notifications_enabled])
-                all_team_members = [user for team in self.shared_with_team.all() for user in team.members.all()]
-                user_email_list.extend([user.email for user in all_team_members if user.email not in user_email_list and user.accountsettings.experiment_mail_notifications_enabled])
 
-                if user_email_list:
-                    if self.status == Experiment.DONE:
-                        subject = "Experiment %s finished successfully" % \
-                            self.fullname()
-                        template_path = 'experiments/successful_experiment_email.txt'
+    def email(self):
+        '''e-mails owners and shared parties about this experiment status'''
+
+        user_email_list = [self.author.email] if self.author.accountsettings.experiment_mail_notifications_enabled else []
+        user_email_list.extend([user.email for user in self.shared_with.all() if user.accountsettings.experiment_mail_notifications_enabled])
+        all_team_members = [user for team in self.shared_with_team.all() for user in team.members.all()]
+        user_email_list.extend([user.email for user in all_team_members if user.email not in user_email_list and user.accountsettings.experiment_mail_notifications_enabled])
 
-                    elif self.status == Experiment.FAILED:
-                        subject = "Experiment %s failed" % self.fullname()
-                        template_path = 'experiments/failed_experiment_email.txt'
+        if user_email_list:
+            if self.status == Experiment.DONE:
+                subject = "Experiment %s finished successfully" % \
+                    self.fullname()
+                template_path = 'experiments/successful_experiment_email.txt'
 
-                    try:
-                        send_mail(subject, render_to_string(template_path, {'experiment': self, 'beat_version': __version__, 'site': Site.objects.get_current()}), settings.DEFAULT_FROM_EMAIL, user_email_list)
-                    except Exception:
-                        import traceback
-                        logger.warn("Could not send e-mail to `%s' about " \
-                            "`%s'. Exception caught: %s", user_email_list,
-                            self, traceback.format_exc())
+            elif self.status == Experiment.FAILED:
+                subject = "Experiment %s failed" % self.fullname()
+                template_path = 'experiments/failed_experiment_email.txt'
 
+            try:
+                send_mail(subject, render_to_string(template_path, {'experiment': self, 'beat_version': __version__, 'site': Site.objects.get_current()}), settings.DEFAULT_FROM_EMAIL, user_email_list)
+            except Exception:
+                import traceback
+                logger.warn("Could not send e-mail to `%s' about " \
+                    "`%s'. Exception caught: %s", user_email_list,
+                    self, traceback.format_exc())
 
 
     def share(self, users=None, teams=None, algorithms_infos={}):
@@ -485,6 +500,94 @@ class Experiment(Shareable):
         super(Experiment, self).share(users=users, teams=teams)
 
 
+    def update_blocks(self):
+        """Updates internal block representation of an experiment"""
+
+        corexp = self.core()
+
+        # Loads the experiment execution description, creating the Block's,
+        # BlockInput's and BlockOutput's as required.
+        for block_name, description in corexp.setup().items():
+
+            # Checks that the Queue/Environment exists
+            job_description = description['configuration']
+
+            env = Environment.objects.filter(
+                name=job_description['environment']['name'],
+                version=job_description['environment']['version'],
+                )
+
+            if not env:
+                logger.warn("Cannot find environment `%s (%s)' - not setting",
+                    job_description['environment']['name'],
+                    job_description['environment']['version'])
+                env = None
+            else:
+                env = env[0]
+
+            # Search for queue that contains a specific environment
+            if env:
+              queue = Queue.objects.filter(name=job_description['queue'],
+                environments__in=[env])
+            else:
+              queue = Queue.objects.filter(name=queue)
+            if not queue:
+                env_name = env.fullname() if env else 'NULL'
+                logger.warn("Cannot find queue `%s' which contains " \
+                    "environment `%s' - not setting",
+                    job_description['queue'], env_name)
+                queue = None
+            else:
+                queue = queue[0]
+
+            parts = job_description['algorithm'].split('/')
+            algorithm = Algorithm.objects.get(
+                author__username=parts[0],
+                name=parts[1],
+                version=parts[2],
+                )
+
+            # Ties the block in
+            slots = job_description.get('nb_slots')
+            b = Block.objects.filter(experiment=self, name=block_name).first()
+            if b is None:
+                b = Block(experiment=self, name=block_name, algorithm=algorithm)
+            else:
+                b.algorithm = algorithm
+            b.command=simplejson.dumps(job_description, indent=4)
+            b.status=Block.NOT_CACHED
+            b.analyzer=algorithm.analysis()
+            b.environment=env
+            b.queue=queue
+            b.required_slots=job_description['nb_slots']
+            b.channel=job_description['channel']
+            b.save()
+
+            # from this point: requires block to have an assigned id
+            b.dependencies.clear()
+            b.dependencies.add(*[self.blocks.get(name=k) \
+                for k in description['dependencies']])
+
+            # reset inputs and outputs - creates if necessary only
+            b.inputs.clear()
+            for v in job_description['inputs'].values():
+                if 'database' in v: #database input
+                    db = DatabaseSetOutput.objects.get(hash=v['hash'])
+                    BlockInput.objects.get_or_create(block=b,
+                        channel=v['channel'], database=db)
+                else:
+                    cache = CachedFile.objects.get(hash=v['hash'])
+                    BlockInput.objects.get_or_create(block=b,
+                        channel=v['channel'], cache=cache)
+
+            b.outputs.clear()
+            outputs = job_description.get('outputs',
+                {'': job_description.get('result')})
+            for v in outputs.values():
+                cache, cr = CachedFile.objects.get_or_create(hash=v['hash'])
+                cache.blocks.add(b)
+
+
     #_____ Methods __________
 
     def is_busy(self):
@@ -505,6 +608,13 @@ class Experiment(Shareable):
     def core(self):
         return validate_experiment(self.declaration, self.toolchain.declaration)[0]
 
+    def job_splits(self, status=None):
+        from ..backend.models import JobSplit
+        retval = JobSplit.objects.filter(job__block__in=self.blocks.all())
+        if status is not None:
+            retval = retval.filter(status=status)
+        return retval
+
     def get_absolute_url(self):
         return reverse(
                 'experiments:view',
@@ -541,6 +651,9 @@ class Experiment(Shareable):
                     ),
                 )
 
+    def get_admin_change_url(self):
+        return reverse('admin:experiments_experiment_change', args=(self.id,))
+
     def completion(self):
         if self.start_date is None:
             return 0
@@ -570,13 +683,17 @@ class Experiment(Shareable):
     def reset(self):
         """Resets an experiment so it can be run again"""
 
-        for block in self.blocks.all():
-            block.results.all().delete()
-            block.delete()
+        if not self.is_done(): return #can only reset experiments which are done
+
+        self.blocks.update(
+            status=Block.NOT_CACHED,
+            start_date=None,
+            end_date=None,
+            )
 
         self.start_date = None
-        self.end_date   = None
-        self.status     = self.PENDING
+        self.end_date = None
+        self.status = self.PENDING
 
         # reset sharing state
         self.sharing    = Shareable.PRIVATE
@@ -584,8 +701,7 @@ class Experiment(Shareable):
         self.shared_with_team.clear()
 
         # remove associated attestations
-        if self.has_attestation():
-            self.attestation.all().delete()
+        if self.has_attestation(): self.attestation.all().delete()
 
         self.save()
 
@@ -625,6 +741,121 @@ class Experiment(Shareable):
         return storage.get_file_content(self, 'declaration_file')
 
 
+    def _update_state(self):
+        '''Update self state based on associated block states
+
+        This method is called by the underlying block. It is not part of the
+        Experiment's public API and must not be called by any other user code.
+        '''
+
+        self_ = Experiment.objects.select_for_update().get(pk=self.pk)
+
+        if self_.is_done(): return
+
+        if self.start_date is None:
+            d = self.blocks.filter(start_date__isnull=False).\
+                order_by('start_date')
+            if d:
+                self.start_date = d.first().start_date
+            else:
+                self.start_date = datetime.now()
+
+        block_statuses = self.blocks.values_list('status', flat=True)
+
+        # Process main state and state from job results
+        if Block.FAILED in block_statuses or Block.CANCELLED in block_statuses:
+            if Block.PROCESSING in block_statuses:
+                self.status = Experiment.CANCELING
+            else:
+                self.status = Experiment.FAILED
+
+        elif (Block.PROCESSING in block_statuses) or \
+            ((Block.NOT_CACHED in block_statuses or \
+            Block.SKIPPED in block_statuses) and \
+            Block.CACHED in block_statuses):
+            self.status = Experiment.RUNNING
+
+        elif Block.NOT_CACHED not in block_statuses:
+            self.status = Experiment.DONE
+
+        else:
+            self.status = Experiment.SCHEDULED
+
+        # Set end date if experiment is done
+        if self.is_done() and self.end_date is None:
+            d = self.blocks.filter(end_date__isnull=False).\
+                order_by('-end_date')
+            if d:
+                self.end_date = d.first().end_date
+            else:
+                self.end_date = datetime.now()
+
+
+        self.save()
+
+
+    @transaction.atomic
+    def schedule(self):
+        '''Schedules this experiment for execution at the backend
+
+        Because the experiment is fully built on ``save()`` (including block
+        interdependence and cache requirements), to "schedule" means solely
+        creating :py:class:`..backend.models.Job`'s to address all
+        algorithm-equipped blocks in the experiment. A ``Job`` is the
+        reflection of the experiment's block for the backend and makes the
+        schedule aware of execution units that must be processed. Each ``Job``
+        is then split on the scheduler process, for as many times as required
+        by the :py:class:`Block`'s ``required_slots`` entry, effectively
+        creating one :py:class:`..backend.models.JobSplit` per split.
+        '''
+
+        self_ = Experiment.objects.select_for_update().get(pk=self.pk)
+
+        if self_.status != Experiment.PENDING: return
+
+        for b in self.blocks.all(): b._schedule()
+
+        # notice that the previous call may decide all is done already
+        # so, we must respect that before setting the SCHEDULED status
+        self.refresh_from_db()
+        if not self.is_done():
+            self.status = Experiment.SCHEDULED
+            self.save()
+
+
+    @transaction.atomic
+    def cancel(self):
+        '''Cancels the execution of this experiment on the backend.
+
+        .. caution::
+
+           After each block is scheduled, it is possible some or all splits for
+           a given block are under execution. We must select-for-update all
+           Blocks and associated Jobs, as to avoid concurrent resetting from a
+           separate scheduling process.
+
+        '''
+
+        self_ = Experiment.objects.get(pk=self.pk)
+
+        if self_.status not in (Experiment.SCHEDULED, Experiment.RUNNING):
+            return
+
+        with transaction.atomic():
+            for b in self.blocks.all(): b._cancel()
+
+
+    def fork(self, username=None, name=None):
+        '''Forks this experiment under a new username or name'''
+
+        author = username or self.author
+        name = name or self.name
+        xp, _, __ = Experiment.objects.create_experiment(author,
+            self.toolchain, name, self.get_declaration(),
+            self.short_description, self.description)
+        return xp
+
+
 #----------------------------------------------------------
 
 
@@ -649,26 +880,53 @@ class Block(models.Model):
     PROCESSING = 'P'
     CACHED     = 'C'
     FAILED     = 'F'
+    SKIPPED    = 'S'
+    CANCELLED  = 'L'
 
     STATUS = (
         (NOT_CACHED, 'Not cached'),
         (PROCESSING, 'Processing'),
         (CACHED,     'Cached'),
         (FAILED,     'Failed'),
+        (SKIPPED,    'Skipped'),
+        (CANCELLED,  'Cancelled'),
     )
 
-    experiment              = models.ForeignKey(Experiment, related_name='blocks')
-    name                    = models.CharField(max_length=200)
-    status                  = models.CharField(max_length=1, choices=STATUS, default=NOT_CACHED)
-    analyzer                = models.BooleanField(default=False)
-    algorithm               = models.ForeignKey(Algorithm, related_name='blocks')
-    creation_date           = models.DateTimeField(null=True, blank=True, auto_now_add=True)
-    start_date              = models.DateTimeField(null=True, blank=True)
-    end_date                = models.DateTimeField(null=True, blank=True)
-    environment             = models.ForeignKey(Environment, related_name='blocks', null=True)
+    experiment = models.ForeignKey(Experiment, related_name='blocks',
+        on_delete=models.CASCADE)
+    name = models.CharField(max_length=200)
+    command = models.TextField(null=True, blank=True)
+    status = models.CharField(max_length=1, choices=STATUS, default=NOT_CACHED)
+    analyzer = models.BooleanField(default=False)
+    algorithm = models.ForeignKey(Algorithm, related_name='blocks',
+        on_delete=models.CASCADE)
+    creation_date = models.DateTimeField(null=True, blank=True,
+        auto_now_add=True)
+    start_date = models.DateTimeField(null=True, blank=True)
+    end_date = models.DateTimeField(null=True, blank=True)
+    environment = models.ForeignKey(Environment, related_name='blocks',
+        null=True, on_delete=models.SET_NULL)
+    queue = models.ForeignKey(Queue, related_name='blocks', null=True,
+        on_delete=models.SET_NULL)
+
+    required_slots = models.PositiveIntegerField(default=1)
+    channel = models.CharField(max_length=200, default='', blank=True,
+        help_text="Synchronization channel within the toolchain")
+
+    # relationship to blocks to which this block depends on
+    dependencies = models.ManyToManyField('self',
+                                          related_name='dependents',
+                                          blank=True,
+                                          symmetrical=False,
+                                         )
 
     objects = BlockManager()
 
+
+    class Meta:
+        unique_together = ('experiment', 'name')
+
+
     def __str__(self):
         return self.experiment.fullname() + ', ' + self.name + ' (%s)' % self.get_status_display()
 
@@ -684,13 +942,11 @@ class Block(models.Model):
 
     # Accessors for statistics
 
-    def __return_first__(self, field):
-        if not self.hashes.count(): return ''
-        return getattr(self.hashes.first(), field)
+    def __return_first__(self, field, default=None):
+        return getattr(self.outputs.first(), field, default)
 
     def first_cache(self):
-        if not self.hashes.count(): return None
-        return self.hashes.first()
+        return self.outputs.first()
 
     def error_report(self):
         return self.__return_first__('error_report')
@@ -702,40 +958,248 @@ class Block(models.Model):
         return self.__return_first__('stderr')
 
     def speed_up_real(self):
-        return self.__return_first__('speed_up_real') or 0.
+        return self.__return_first__('speed_up_real')
 
     def speed_up_maximal(self):
-        return self.__return_first__('speed_up_maximal') or 0.
+        return self.__return_first__('speed_up_maximal')
 
     def linear_execution_time(self):
-        return self.__return_first__('linear_execution_time') or 0.
+        return self.__return_first__('linear_execution_time')
 
     def queuing_time(self):
-        return self.__return_first__('queuing_time') or 0.
+        return self.__return_first__('queuing_time')
 
     def cpu_time(self):
-        return self.__return_first__('cpu_time') or 0.
+        return self.__return_first__('cpu_time')
 
     def max_memory(self):
-        return self.__return_first__('max_memory') or 0
+        return self.__return_first__('max_memory')
 
     def data_read_size(self):
-        return self.__return_first__('data_read_size') or 0
+        return self.__return_first__('data_read_size')
 
     def data_read_nb_blocks(self):
-        return self.__return_first__('data_read_nb_blocks') or 0
+        return self.__return_first__('data_read_nb_blocks')
 
     def data_read_time(self):
-        return self.__return_first__('data_read_time') or 0.
+        return self.__return_first__('data_read_time')
 
     def data_written_size(self):
-        return self.__return_first__('data_written_size') or 0
+        return self.__return_first__('data_written_size')
 
     def data_written_nb_blocks(self):
-        return self.__return_first__('data_written_nb_blocks') or 0
+        return self.__return_first__('data_written_nb_blocks')
 
     def data_written_time(self):
-        return self.__return_first__('data_written_time') or 0.
+        return self.__return_first__('data_written_time')
+
+    # Accessor for results
+    results = property(lambda self: self.__return_first__('results'))
+
+
+    def _schedule(self):
+        '''Schedules this block for execution at the backend
+
+        To "schedule" means solely creating a :py:class:`..backend.models.Job`
+        pointing to this object. This method **should only be called by the
+        owning experiment**. It is not part of the Block's public API.
+        '''
+
+        # lock self - avoids concurrent update from scheduler/worker subsystem
+        self_ = Block.objects.select_for_update().get(pk=self.pk)
+
+        # checks we have not, meanwhile, been cancelled
+        if self_.done(): return
+
+        # checks queue and environment
+        if self.queue is None:
+            raise RuntimeError("Block `%s' does not have a queue assigned " \
+                "- this normally indicates the originally selected " \
+                "queue was deleted since the experiment was first " \
+                "configured. Re-configure this experiment and select a new " \
+                "default or block-specific queue" % self.name)
+
+        if self.environment is None:
+            raise RuntimeError("Block `%s' does not have an environment " \
+                "assigned - this normally indicates the originally selected " \
+                "environment was deleted since the experiment was first " \
+                "configured. Re-configure this experiment and select a new " \
+                "default or block-specific environment" % self.name)
+
+        from ..backend.models import Job
+
+        # search for other jobs with similar outputs that have no children yet
+        # do this carefully, as other experiments may be scheduled at the same
+        # time, invalidating our "parent" choice
+        parent = Job.objects.filter(block__outputs__in=self.outputs.all(),
+            child=None).first()
+        if parent is not None: #(candidate only) try to lock it
+            while True:
+                parent = Job.objects.select_for_update().get(pk=parent.pk)
+                if parent.child_ is not None: #was taken meanwhile, retry
+                    parent = parent.child
+                    continue
+                Job(block=self, parent=parent).save()
+                break
+        else:
+            Job(block=self).save()
+
+        # checks if the job is immediately runnable - if so, tries to
+        # make it runnable (check caches and other)
+        if self.is_runnable(): self.job._make_runnable()
+
+
+    def done(self):
+      '''Says whether the block has finished or not'''
+
+      return self.status not in (Block.NOT_CACHED, Block.PROCESSING)
+
+
+    def _cancel(self):
+        '''Cancels the execution of this block on the backend.
+
+        This method should only be called from the experiment equivalent. It is
+        not part of the Block's public API.
+        '''
+
+        # lock self - avoids concurrent update from scheduler/worker subsystem
+        self_ = Block.objects.select_for_update().get(pk=self.pk)
+
+        if self_.done(): return
+
+        if hasattr(self, 'job'):
+            self.job._cancel()
+        else:
+            self.status = Block.CANCELLED
+            self.save()
+            self.experiment._update_state()
+
+
+    def is_runnable(self):
+        '''Checks if a block is runnable presently'''
+
+        return all([k.status in (Block.CACHED, Block.SKIPPED) \
+                for k in self.dependencies.all()]) and \
+                (hasattr(self, 'job') and self.job.parent is None)
+
+
+    def _cascade_updates(self):
+        '''Cascade updates to blocks once I'm done.
+        '''
+
+        for b in self.dependents.all():
+            if any([k.status in (Block.FAILED, Block.CANCELLED) \
+                for k in b.dependencies.all()]):
+                    b._cancel()
+            if b.is_runnable(): b.job._make_runnable()
+
+        # Update eventual running siblings in case of a failure
+        if self.status == Block.FAILED:
+            for b in Block.objects.filter(experiment=self.experiment,
+                status=Block.PROCESSING):
+                b._cancel()
+
+
+    def _update_state(self, timings=None):
+        '''Updates self state as a result of backend running
+
+
+        Parameters:
+
+          timings (dict, Optional): A dictionary containing key-value pairs
+            corresponding to:
+
+              * queuing time (in seconds)
+              * sequential execution time (in seconds)
+              * real speed-up obtained
+              * maximum speed-up obtainable
+
+
+        This method is supposed to be called only by the underlying job
+        instance. It is not part of the Block's public API.
+
+        '''
+
+        # lock self - avoids concurrent update from scheduler/worker subsystem
+        self_ = Block.objects.select_for_update().get(pk=self.pk)
+
+        if self_.done(): return
+
+        if self.start_date is None:
+            self.start_date = self.job.start_date
+
+        if self.job.result:
+
+            statistics = self.job.result.stats
+
+            info = dict(
+                cpu_time = statistics.cpu['user'] + statistics.cpu['system'],
+                max_memory = statistics.memory['rss'],
+                data_read_size = statistics.data['volume']['read'],
+                data_read_nb_blocks = statistics.data['blocks']['read'],
+                data_read_time = statistics.data['time']['read'],
+                data_written_size = statistics.data['volume']['write'],
+                data_written_nb_blocks = statistics.data['blocks']['write'],
+                data_written_time = statistics.data['time']['write'],
+                stdout = self.job.result.stdout,
+                stderr = self.job.result.stderr,
+                error_report = self.job.result.usrerr,
+                )
+
+            if timings:
+                info.update(dict(
+                  queuing_time = timings['queuing'],
+                  linear_execution_time = timings['linear_execution'],
+                  speed_up_real = timings['speed_up_real'],
+                  speed_up_maximal = timings['speed_up_maximal'],
+                  ))
+
+            self.outputs.update(**info)
+
+            if self.job.result.syserr: #mail admins
+                send_email_to_administrators('System error captured',
+                    self.job.result.syserr)
+
+        if self.job.status == Block.SKIPPED:
+            self.status = Block.CACHED
+        else:
+            self.status = self.job.status
+
+        if self.job.done():
+            self.end_date = self.job.end_date
+            r = self.job.result
+            self.job.delete()
+            if r: r.delete()
+
+        # Loads Results from cache
+        if self.job.result and self.analyzer and self.status == Block.CACHED:
+            cache = self.first_cache()
+            data_source = beat.core.data.CachedDataSource()
+            data_source.setup(os.path.join(settings.CACHE_ROOT,
+                    beat.core.hash.toPath(cache.hash)), settings.PREFIX)
+            output_data = data_source.next()[0]
+            if output_data is not None:
+                algorithm = beat.core.algorithm.Algorithm(settings.PREFIX,
+                        self.algorithm.fullname())
+                for field, value in output_data.as_dict().items():
+                    res, _ = Result.objects.get_or_create(name=field,
+                        cache=cache)
+                    res.primary = algorithm.results[field]['display']
+                    res.type = algorithm.results[field]["type"]
+
+                    if res.type in ['int32', 'float32', 'bool', 'string']:
+                        res.data_value = str(value)
+                    else:
+                        res.data_value = simplejson.dumps(value, indent=4,
+                            cls=NumpyJSONEncoder)
+
+                    res.save()
+
+            data_source.close()
+
+        self.save()
+        self._cascade_updates()
+        self.experiment._update_state()
 
 
 #----------------------------------------------------------
@@ -749,89 +1213,176 @@ class CachedFileManager(models.Manager):
 
 class CachedFile(models.Model):
 
-    blocks = models.ManyToManyField(Block, related_name='hashes', blank=True)
+    blocks = models.ManyToManyField(Block, related_name='outputs', blank=True)
     hash  = models.CharField(max_length=64, unique=True)
 
     # the total amount of time this block took to run considering the
     # wall-clock time.
-    linear_execution_time   = models.FloatField(default=0.)
+    linear_execution_time = models.FloatField(default=0.)
 
     # the real speed-up obtained by running this block using X slots
-    speed_up_real           = models.FloatField(default=0.)
+    speed_up_real = models.FloatField(default=0.)
 
     # the maximum obtainable speed-up that could be achieved if all slots
     # were running in parallel. Essentially linear_execution_time /
     # maximum_slot_time
-    speed_up_maximal        = models.FloatField(default=0.)
+    speed_up_maximal = models.FloatField(default=0.)
 
     # the time this block waited to be executed
-    queuing_time            = models.FloatField(default=0.)
+    queuing_time = models.FloatField(default=0.)
 
-    stdout                  = models.TextField(null=True, blank=True)
-    stderr                  = models.TextField(null=True, blank=True)
-    error_report            = models.TextField(null=True, blank=True)
+    stdout = models.TextField(null=True, blank=True)
+    stderr = models.TextField(null=True, blank=True)
+    error_report = models.TextField(null=True, blank=True)
 
     # other statistics of interest
-    cpu_time                = models.FloatField(default=0.)
-    max_memory              = models.BigIntegerField(default=0)
-    data_read_size          = models.BigIntegerField(default=0)
-    data_read_nb_blocks     = models.IntegerField(default=0)
-    data_read_time          = models.FloatField(default=0.)
-    data_written_size       = models.BigIntegerField(default=0)
-    data_written_nb_blocks  = models.IntegerField(default=0)
-    data_written_time       = models.FloatField(default=0.)
+    cpu_time = models.FloatField(default=0.)
+    max_memory = models.BigIntegerField(default=0)
+    data_read_size = models.BigIntegerField(default=0)
+    data_read_nb_blocks = models.IntegerField(default=0)
+    data_read_time = models.FloatField(default=0.)
+    data_written_size = models.BigIntegerField(default=0)
+    data_written_nb_blocks = models.IntegerField(default=0)
+    data_written_time = models.FloatField(default=0.)
+
+    objects = CachedFileManager()
+
 
     def __str__(self):
-        return self.hash
+        return 'CachedFile(%s, %d blocks)' % (self.hash, self.blocks.count())
+
 
     def natural_key(self):
         return self.hash
 
 
+    def path(self):
+        '''Returns the full path prefix to the cached file on disk'''
+
+        return beat.core.hash.toPath(self.hash, suffix='')
+
+
+    def absolute_path(self, cache=settings.CACHE_ROOT):
+        '''Returns the full path prefix to the cached file on disk'''
+
+        return os.path.join(cache, self.path())
+
+
+    def files(self, cache=settings.CACHE_ROOT):
+        '''Checks if any file belonging to this cache exist on disk'''
+
+        return glob.glob(self.absolute_path(cache) + '*')
+
+
+    def exists(self, cache=settings.CACHE_ROOT):
+        '''Checks if any file belonging to this cache exist on disk'''
+
+        return bool(self.files(cache))
+
+
+    def index_checksums(self, cache=settings.CACHE_ROOT):
+        '''Checks if this cached file indexes checksum properly'''
+
+        abs_path = self.absolute_path(cache)
+        index = sorted(glob.glob(abs_path + '*.index'))
+        chksum = sorted(glob.glob(abs_path + '*.index.checksum'))
+
+        if len(index) != len(chksum):
+            logger.warn("Number of index files (%d) is different from " \
+                "checksums (%d) for cache `%s'", len(index), len(chksum),
+                abs_path)
+            return False
+
+        for i, c in zip(index, chksum):
+            with open(c, 'rt') as f: recorded = f.read().strip()
+            actual = beat.core.hash.hashFileContents(i)
+            if actual != recorded:
+                logger.warn("Checksum for index of cache `%s' does not " \
+                    "match for file `%s' (%s != %s)", abs_path, i,
+                    actual, recorded)
+                return False
+
+        return True
+
+
+#----------------------------------------------------------
+
+
+class BlockInputManager(models.Manager):
+
+    def get_by_natural_key(self, hash):
+        candidate = self.filter(cache__hash=hash)
+        if candidate:
+            return candidate[0]
+        else:
+            return self.get(database__hash=hash)
+
+
+class BlockInput(models.Model):
+
+    block = models.ForeignKey(Block, related_name='inputs', null=True,
+        on_delete=models.CASCADE)
+
+    # if the input cames from another block, then this one is set
+    cache = models.ForeignKey(CachedFile, related_name='inputs', null=True,
+        on_delete=models.CASCADE)
+
+    # if the input cames from a dataset, then this one is set
+    database = models.ForeignKey(DatabaseSetOutput, related_name='blocks',
+        null=True, on_delete=models.CASCADE)
+
+    channel = models.CharField(max_length=200, default='', blank=True,
+        help_text="Synchronization channel within the toolchain")
+
+    objects = BlockInputManager()
+
+    def natural_key(self):
+        return self.has,
+
+
 #----------------------------------------------------------
 
 
 class ResultManager(models.Manager):
 
-    def get_by_natural_key(self, name, block_name, experiment_author,
-        toolchain_author, toolchain_name,
-        toolchain_version, experiment_name):
+    def get_by_natural_key(self, name, hash):
         return self.get(
             name=name,
-            block__name=block_name,
-            block__experiment__author__username=experiment_author,
-            block__experiment__toolchain__author__username=toolchain_author,
-            block__experiment__toolchain__name=toolchain_name,
-            block__experiment__toolchain__version=toolchain_version,
-            block__experiment__name=experiment_name,
+            cache__hash=hash,
             )
 
+
 class Result(models.Model):
 
     SIMPLE_TYPE_NAMES  = ('int32', 'float32', 'bool', 'string')
 
-    block        = models.ForeignKey(Block, related_name='results')
-    name         = models.CharField(max_length=200)
-    type         = models.CharField(max_length=200)
-    primary      = models.BooleanField(default=False)
-    data_value   = models.TextField(null=True, blank=True)
+    cache = models.ForeignKey(CachedFile, related_name='results', null=True,
+        on_delete=models.CASCADE)
+    name = models.CharField(max_length=200)
+    type = models.CharField(max_length=200)
+    primary = models.BooleanField(default=False)
+    data_value = models.TextField(null=True, blank=True)
 
     objects = ResultManager()
 
+
+    #_____ Meta parameters __________
+
+    class Meta:
+        unique_together = ('cache', 'name')
+
+
     def __str__(self):
-        return '%s - %s' % (self.block, self.name)
+        return '%s - %s' % (self.cache, self.name)
+
 
     def natural_key(self):
         return (
             self.name,
-            self.block.name,
-            self.block.experiment.author.username,
-            self.block.experiment.toolchain.author.username,
-            self.block.experiment.toolchain.name,
-            self.block.experiment.toolchain.version,
-            self.block.experiment.name,
+            self.cache.hash,
             )
 
+
     def value(self):
         if self.data_value in ['+inf', '-inf', 'NaN']:
             return self.data_value
diff --git a/beat/web/experiments/permissions.py b/beat/web/experiments/permissions.py
index 40133b879cca88f6fdc67ef17db5802dad07917d..bbc04aa6549ac0f773b411991108512f115b89af 100644
--- a/beat/web/experiments/permissions.py
+++ b/beat/web/experiments/permissions.py
@@ -37,6 +37,6 @@ class IsDatabaseAccessible(permissions.BasePermission):
 
     def has_object_permission(self, request, view, obj):
         accessible_databases = Database.objects.for_user(request.user, True)
-        experiment_databases = Database.objects.filter(protocols__sets=obj.referenced_datasets.all()).distinct()
+        experiment_databases = Database.objects.filter(protocols__sets__in=obj.referenced_datasets.all()).distinct()
 
         return all(experiment_db in accessible_databases for experiment_db in experiment_databases)
diff --git a/beat/web/experiments/serializers.py b/beat/web/experiments/serializers.py
index cd0bbaf3f40d92b7e28e380acf0c565dbaf67cf2..bab80c88bb4e8650d70ef0d01fee8e93d6fd8c07 100644
--- a/beat/web/experiments/serializers.py
+++ b/beat/web/experiments/serializers.py
@@ -31,7 +31,7 @@ from ..common.serializers import ShareableSerializer
 from ..common.fields import JSONSerializerField
 from ..ui.templatetags.markup import restructuredtext
 
-from .models import Experiment, Block, CachedFile
+from .models import Experiment, Block
 
 from datetime import datetime
 
@@ -252,7 +252,7 @@ class ExperimentResultsSerializer(ShareableSerializer):
         return results
 
     def get_errors(self, obj):
-        serializer = BlockErrorSerializer(obj.blocks.filter(hashes__error_report__isnull=False), many=True)
+        serializer = BlockErrorSerializer(obj.blocks.filter(outputs__error_report__isnull=False), many=True)
         return serializer.data
 
     def get_html_description(self, obj):
diff --git a/beat/web/experiments/templates/experiments/setup.html b/beat/web/experiments/templates/experiments/setup.html
index abf8d10575064039d5606fa4db85b52cdbee3d25..61083eb1002a1d330938f9e7fb6b4ef3d9e27c9c 100644
--- a/beat/web/experiments/templates/experiments/setup.html
+++ b/beat/web/experiments/templates/experiments/setup.html
@@ -2,21 +2,21 @@
 {% comment %}
  * Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/
  * Contact: beat.support@idiap.ch
- * 
+ *
  * This file is part of the beat.web module of the BEAT platform.
- * 
+ *
  * Commercial License Usage
  * Licensees holding valid commercial BEAT licenses may use this file in
  * accordance with the terms contained in a written agreement between you
  * and Idiap. For further information contact tto@idiap.ch
- * 
+ *
  * Alternatively, this file may be used under the terms of the GNU Affero
  * Public License version 3 as published by the Free Software and appearing
  * in the file LICENSE.AGPL included in the packaging of this file.
  * The BEAT platform is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  * or FITNESS FOR A PARTICULAR PURPOSE.
- * 
+ *
  * You should have received a copy of the GNU Affero Public License along
  * with the BEAT platform. If not, see http://www.gnu.org/licenses/.
 {% endcomment %}
@@ -64,7 +64,7 @@
 {% block content %}
 
 <div class="row">
-  <div class="col-sm-9 vertical-center">
+  <div class="col-sm-9 vertical-center" onmouseover="expand_breadcrumb(this, 9, 3);" onmouseout="reset_breadcrumb(this, 9, 3);">
     {% ifequal action 'pending' %}
     {% experiment_breadcrumb experiment %}
     {% else %}{# new or forked from #}
diff --git a/beat/web/experiments/templates/experiments/view.html b/beat/web/experiments/templates/experiments/view.html
index 6f5903938e6711843633dcbbabe982ffc00454eb..3b2f8b9e976eec740fd8894c1c9f8771151a7902 100644
--- a/beat/web/experiments/templates/experiments/view.html
+++ b/beat/web/experiments/templates/experiments/view.html
@@ -2,21 +2,21 @@
 {% comment %}
  * Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/
  * Contact: beat.support@idiap.ch
- * 
+ *
  * This file is part of the beat.web module of the BEAT platform.
- * 
+ *
  * Commercial License Usage
  * Licensees holding valid commercial BEAT licenses may use this file in
  * accordance with the terms contained in a written agreement between you
  * and Idiap. For further information contact tto@idiap.ch
- * 
+ *
  * Alternatively, this file may be used under the terms of the GNU Affero
  * Public License version 3 as published by the Free Software and appearing
  * in the file LICENSE.AGPL included in the packaging of this file.
  * The BEAT platform is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  * or FITNESS FOR A PARTICULAR PURPOSE.
- * 
+ *
  * You should have received a copy of the GNU Affero Public License along
  * with the BEAT platform. If not, see http://www.gnu.org/licenses/.
 {% endcomment %}
@@ -193,11 +193,12 @@
             {% for block in blocks %}
             {% with xpcore.blocks|getkey:block.name as core_block %}
             {% with block.get_status_display as block_status %}
+            {% with block.error_report as error_report %}
             <div id="{{ block.name }}" data-beat-block-name="{{ block.name }}" data-beat-status="{{ block.get_status_display|lower }}" class="panel panel-default may-update{% if block.analyzer %} analyzer{% endif %}">
 
               <div class="panel-heading" role="tab" id="heading-{{ block.name }}">
                 <h4 class="panel-title">
-                  <a{% if block_status != 'Failed' %} class="collapsed"{% endif %} role="button" data-toggle="collapse" data-parent="#{{ block.name }}" href="#collapse-{{ block.name }}" aria-expanded="{% if block_status == 'Failed' %}true{% else %}false{% endif %}" aria-controls="collapse-{{ block.name }}">
+                  <a{% if not error_report and block.done and block_status != 'Cancelled' %} class="collapsed"{% endif %} role="button" data-toggle="collapse" data-parent="#{{ block.name }}" href="#collapse-{{ block.name }}" aria-expanded="{% if error_report and block.done and block_status != 'Cancelled' %}true{% else %}false{% endif %}" aria-controls="collapse-{{ block.name }}">
                     <i data-toggle="tooltip" data-placement="bottom" title="{{ block_status }}"
                       {% if block_status == 'Not cached' %}
                       class="icon-scheduled fa fa-play"
@@ -205,17 +206,19 @@
                       class="icon-done fa fa-check"
                       {% elif block_status == 'Failed' %}
                       class="icon-failed fa fa-bug"
-                      {% elif status == 'Running' %}
+                      {% elif block_status == 'Processing' %}
                       class="icon-running fa fa-spin fa-refresh"
+                      {% elif block_status == 'Cancelled' %}
+                      class="icon-failed fa fa-power-off"
                       {% else %}
-                      class="icon-pending fa fa-question"
+                      class="icon-pending fa fa-asterisk"
                       {% endif %}
                       ></i>
                     {{ block.name }} ({{ block.algorithm.fullname }}){% if block_status in 'Cached Failed' %} @ {{ block.linear_execution_time|floatformat:-2 }}s{% endif %}
                     </a>
                 </h4>
               </div>{# panel heading #}
-              <div id="collapse-{{ block.name }}" class="panel-collapse collapse{% if block_status == 'Failed' %} in{% endif %}" role="tabpanel" aria-labelledby="heading-{{ block.name }}">
+              <div id="collapse-{{ block.name }}" class="panel-collapse collapse{% if error_report and block.done and block_status != 'Cancelled' %} in{% endif %}" role="tabpanel" aria-labelledby="heading-{{ block.name }}">
                 <div class="panel-body">
                   <ul>
                     <li>Algorithm: <a href="{{ block.algorithm.get_absolute_url }}">{{ block.algorithm.fullname }}</a></li>
@@ -233,7 +236,7 @@
                     </ul>
                     -->
                     {% endcomment %}
-                    {% if core_block.nb_slots  and core_block.nb_slots > 1 %}
+                    {% if core_block.nb_slots and core_block.nb_slots > 1 %}
                     <li>Slots: {{ core_block.nb_slots }} (speed-up achieved: {{ block.speed_up_real|floatformat:-1 }}x)</li>
                     <li>Maximum speed-up achievable: {{ block.speed_up_maximal|floatformat:-1 }}x</li>
                     {% else %}
@@ -252,9 +255,10 @@
                       </ul>
                     </li>
                     {% endif %}
-                    {% if block.error_report %}
+                    {% if block.done and block_status != 'Cancelled' %}
+                    {% if error_report %}
                     <li>Captured Errors (on user code):
-                      <pre class="console-output">{{ block.error_report }}</pre>
+                      <pre class="console-output">{{ error_report }}</pre>
                     </li>
                     {% endif %}
                     {% if block.stdout %}
@@ -267,12 +271,14 @@
                       <pre class="console-output">{{ block.stderr }}</pre>
                     </li>
                     {% endif %}
+                    {% endif %}{# block.done #}
                   </ul>
                 </div>{# panel body #}
               </div>{# collapse #}
             </div>{# panel #}
             {% endwith %}
             {% endwith %}
+            {% endwith %}
             {% endfor %}
 
           </div>{# panel group #}
diff --git a/beat/web/experiments/tests.py b/beat/web/experiments/tests.py
index de67b02555b79d4d1083787b683affcb6202c50f..23107adf2e7a445df9bdc3b162445cf90324822a 100644
--- a/beat/web/experiments/tests.py
+++ b/beat/web/experiments/tests.py
@@ -34,19 +34,6 @@ from django.conf import settings
 from django.contrib.auth.models import User
 from django.core.urlresolvers import reverse
 
-# Override the Scheduler API
-from ..utils import scheduler
-
-def mockPutMessage(url, params=None, data=None):
-    return (200, None)
-
-def mockPostMessage(url, params=None, data=None):
-    return (200, None)
-
-scheduler.putMessage  = mockPutMessage
-scheduler.postMessage = mockPostMessage
-
-
 from .models import Experiment
 from .models import CachedFile
 from .models import Block
@@ -63,7 +50,7 @@ from ..backend.models import Queue
 from ..attestations.models import Attestation
 from ..databases.models import Database
 
-from ..common.testutils import BaseTestCase
+from ..common.testutils import BaseTestCase, tearDownModule
 
 HASHES = {
         'addition1': 'ff59a471cec5c17b45d1dfa5aff3ed897ee2d7ed87de205365b372be1c726c87',
@@ -207,7 +194,7 @@ class ExperimentTestBase(BaseTestCase):
         environment = Environment(name='env1', version='1.0')
         environment.save()
 
-        queue = Queue(name='queue1', memory_limit=1024, time_limit=60, nb_cores_per_slot=1, max_slots_per_user=10)
+        queue = Queue(name='queue1', memory_limit=1024, time_limit=60, cores_per_slot=1, max_slots_per_user=10)
         queue.save()
 
         queue.environments.add(environment)
@@ -548,7 +535,7 @@ class ExperimentCreationAPI(ExperimentTestBase):
         self.assertTrue(experiment.start_date is None)
         self.assertTrue(experiment.end_date is None)
         self.assertEqual(experiment.status, Experiment.PENDING)
-        self.assertEqual(experiment.blocks.count(), 0)
+        self.assertEqual(experiment.blocks.count(), 3)
 
 
 #----------------------------------------------------------
@@ -816,24 +803,24 @@ class ExperimentStartingAPI(ExperimentTestBase):
         self.assertFalse(block.analyzer)
         self.assertEqual(0, block.results.count())
 
-        hashes = block.hashes.all()
-        self.assertEqual(0, hashes.count())
+        hashes = block.outputs.all()
+        self.assertEqual(1, hashes.count())
 
         block = experiment.blocks.get(name='addition2')
         self.assertEqual(Block.NOT_CACHED, block.status)
         self.assertFalse(block.analyzer)
         self.assertEqual(0, block.results.count())
 
-        hashes = block.hashes.all()
-        self.assertEqual(0, hashes.count())
+        hashes = block.outputs.all()
+        self.assertEqual(1, hashes.count())
 
         block = experiment.blocks.get(name='analysis')
         self.assertEqual(Block.NOT_CACHED, block.status)
         self.assertTrue(block.analyzer)
         self.assertEqual(0, block.results.count())
 
-        hashes = block.hashes.all()
-        self.assertEqual(0, hashes.count())
+        hashes = block.outputs.all()
+        self.assertEqual(1, hashes.count())
 
 
     def test_start_team_shared_experiment(self):
@@ -1102,11 +1089,9 @@ class ResultsAPI(ExperimentTestBase):
         self.experiment.status = Experiment.FAILED
         self.experiment.save()
 
-        cached_file = CachedFile()
-        cached_file.hash = 'deadbeef123456'
+        cached_file = block.first_cache()
         cached_file.error_report = 'ERROR REPORT'
         cached_file.save()
-        cached_file.blocks.add(block)
 
         self.client.login(username='johndoe', password='1234')
 
@@ -1190,7 +1175,7 @@ class ResultsAPI(ExperimentTestBase):
         analysis_block.save()
 
         db_result             = Result()
-        db_result.block       = analysis_block
+        db_result.cache       = analysis_block.first_cache()
         db_result.name        = 'out_float'
         db_result.type        = 'float32'
         db_result.primary     = True
@@ -1198,7 +1183,7 @@ class ResultsAPI(ExperimentTestBase):
         db_result.save()
 
         db_result             = Result()
-        db_result.block       = analysis_block
+        db_result.cache       = analysis_block.first_cache()
         db_result.name        = 'out_text'
         db_result.type        = 'string'
         db_result.primary     = True
@@ -1235,7 +1220,7 @@ class ResultsAPI(ExperimentTestBase):
         analysis_block.save()
 
         db_result             = Result()
-        db_result.block       = analysis_block
+        db_result.cache       = analysis_block.first_cache()
         db_result.name        = 'out_float'
         db_result.type        = 'float32'
         db_result.primary     = True
@@ -1243,7 +1228,7 @@ class ResultsAPI(ExperimentTestBase):
         db_result.save()
 
         db_result             = Result()
-        db_result.block       = analysis_block
+        db_result.cache       = analysis_block.first_cache()
         db_result.name        = 'out_text'
         db_result.type        = 'string'
         db_result.primary     = True
diff --git a/beat/web/experiments/utils.py b/beat/web/experiments/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b25bd01fa924bdc285a0d969a5d6e2779d8aed3
--- /dev/null
+++ b/beat/web/experiments/utils.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+
+'''Utilities for experiment management'''
+
+
+from django.db.models import Count
+
+from .models import CachedFile
+
+import logging
+logger = logging.getLogger(__name__)
+
+
+def list_orphaned_cachedfiles():
+    '''Lists orphaned cache files that do not exist in the disk either'''
+
+    q = CachedFile.objects.annotate(Count('blocks')).filter(blocks__count__lt=1)
+    return [c for c in q if not c.exists()]
+
+
+def cleanup_orphaned_cachedfiles():
+    '''Cleans-up orphaned cache files that do not exist in the disk either'''
+
+    for c in list_orphaned_cachedfiles():
+        logger.info("Removing orphaned CachedFile object `%s'..." % c.hash)
+        c.delete()
diff --git a/beat/web/libraries/apps.py b/beat/web/libraries/apps.py
index 51e0c735b7a926fbd76064842c1f79526934d426..790fe2b73ccede3198edd0f7b3e8c623065a05b7 100644
--- a/beat/web/libraries/apps.py
+++ b/beat/web/libraries/apps.py
@@ -27,15 +27,13 @@
 
 from ..common.apps import CommonAppConfig
 from django.utils.translation import ugettext_lazy as _
-from actstream import registry
 
 class LibrariesConfig(CommonAppConfig):
     name = 'beat.web.libraries'
     verbose_name = _('Libraries')
 
     def ready(self):
-    	super(LibrariesConfig, self).ready()
-
+        super(LibrariesConfig, self).ready()
         from .signals import auto_delete_file_on_delete, auto_delete_file_on_change
-
+        from actstream import registry
         registry.register(self.get_model('Library'))
diff --git a/beat/web/libraries/tests/core.py b/beat/web/libraries/tests/core.py
index df17669db317eb88e6cd3a20720ebb7d4d5d8d13..31567388cf2706b7c6b6d628e8d57788274d897c 100644
--- a/beat/web/libraries/tests/core.py
+++ b/beat/web/libraries/tests/core.py
@@ -34,10 +34,10 @@ import simplejson as json
 from django.contrib.auth.models import User
 from django.conf import settings
 
-from beat.web.dataformats.models import DataFormat
+from ...dataformats.models import DataFormat
 
-from beat.web.common.testutils import BaseTestCase
-from beat.web.team.models import Team
+from ...common.testutils import BaseTestCase, tearDownModule
+from ...team.models import Team
 
 from ..models import Library
 
diff --git a/beat/web/libraries/tests/tests_api.py b/beat/web/libraries/tests/tests_api.py
index 7ce42f90bc6a17cca80e415bc0146e4d1aa2c068..f673ddb0e3aec5a5f30afd0db5239ce2067a5409 100644
--- a/beat/web/libraries/tests/tests_api.py
+++ b/beat/web/libraries/tests/tests_api.py
@@ -34,6 +34,8 @@ from django.core.urlresolvers import reverse
 
 import beat.core.library
 
+from ...common.testutils import tearDownModule
+
 from ..models import Library
 
 from .core import LibrariesAPIBase
diff --git a/beat/web/libraries/tests/tests_team.py b/beat/web/libraries/tests/tests_team.py
index 8e4f25e7b5f5178c741d3f75ebba08f42d6f40f2..ac3d89a0bdd8d0f13de1f5b13b58478fccfd9d46 100644
--- a/beat/web/libraries/tests/tests_team.py
+++ b/beat/web/libraries/tests/tests_team.py
@@ -27,7 +27,8 @@
 
 from django.contrib.auth.models import User
 
-from beat.web.team.models import Team
+from ...common.testutils import tearDownModule
+from ...team.models import Team
 
 from ..models import Library
 
diff --git a/beat/web/libraries/tests/tests_user.py b/beat/web/libraries/tests/tests_user.py
index ccd48e3600d798153cadd1006a413d6ac31b9ec7..22fdc09e158884ce442bc69537ebf3704dbb0eeb 100644
--- a/beat/web/libraries/tests/tests_user.py
+++ b/beat/web/libraries/tests/tests_user.py
@@ -26,6 +26,8 @@
 ###############################################################################
 
 
+from ...common.testutils import tearDownModule
+
 from ..models import Library
 
 from .core import LibrariesAccessibilityFunctionsBase
diff --git a/beat/web/navigation/templates/navigation/contact.html b/beat/web/navigation/templates/navigation/contact.html
index 2bc727d1bf03190c5152a244386a26508d11cd1e..8d0c4523150ffd86735abcba39533b806c646186 100644
--- a/beat/web/navigation/templates/navigation/contact.html
+++ b/beat/web/navigation/templates/navigation/contact.html
@@ -2,21 +2,21 @@
 {% comment %}
  * Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/
  * Contact: beat.support@idiap.ch
- * 
+ *
  * This file is part of the beat.web module of the BEAT platform.
- * 
+ *
  * Commercial License Usage
  * Licensees holding valid commercial BEAT licenses may use this file in
  * accordance with the terms contained in a written agreement between you
  * and Idiap. For further information contact tto@idiap.ch
- * 
+ *
  * Alternatively, this file may be used under the terms of the GNU Affero
  * Public License version 3 as published by the Free Software and appearing
  * in the file LICENSE.AGPL included in the packaging of this file.
  * The BEAT platform is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  * or FITNESS FOR A PARTICULAR PURPOSE.
- * 
+ *
  * You should have received a copy of the GNU Affero Public License along
  * with the BEAT platform. If not, see http://www.gnu.org/licenses/.
 {% endcomment %}
@@ -31,7 +31,7 @@
 
     <ul class="list-group">
 
-      <li class="list-group-item"><i class="fa fa-envelope"></i> <a href="mailto:support@beat-eu.org">support@beat-eu.org</a></li>
+      <li class="list-group-item"><i class="fa fa-envelope"></i> <a href="mailto:beat.support@idiap.ch">beat.support@idiap.ch</a></li>
 
       <li class="list-group-item"><i class="fa fa-globe"></i> <a href="https://www.beat-eu.org">https://www.beat-eu.org</a></li>
 
diff --git a/beat/web/plotters/apps.py b/beat/web/plotters/apps.py
index ae9fb5853b15780242dc294fa314cc32c1dd308d..a4f7a101dfcdd331438dbec9ddb559172c3f4e22 100644
--- a/beat/web/plotters/apps.py
+++ b/beat/web/plotters/apps.py
@@ -27,7 +27,6 @@
 
 from ..common.apps import CommonAppConfig
 from django.utils.translation import ugettext_lazy as _
-from actstream import registry
 
 class PlottersConfig(CommonAppConfig):
     name = 'beat.web.plotters'
@@ -35,4 +34,5 @@ class PlottersConfig(CommonAppConfig):
 
     def ready(self):
         super(PlottersConfig, self).ready()
+        from actstream import registry
         registry.register(self.get_model('Plotter'))
diff --git a/beat/web/plotters/views.py b/beat/web/plotters/views.py
index b41c4aafcc61e8b3b5f66f425914e1d1a59bdd21..eab4599a71d4d4db259b5b04ba95b48a5a41d748 100644
--- a/beat/web/plotters/views.py
+++ b/beat/web/plotters/views.py
@@ -42,7 +42,7 @@ from django.shortcuts import get_object_or_404
 from django.template import RequestContext, Context
 from django.contrib.auth.models import User
 
-from ..experiments.models import Experiment, Result
+from ..experiments.models import Experiment, Block, Result
 from ..dataformats.models import DataFormat
 from ..reports.models import Report
 
@@ -208,23 +208,22 @@ def plot(request):
     for k, v in experiments.items():
         if v['analyzer'].find('/') >= 0:
             (analyzer_author, analyzer_name, analyzer_version) = v['analyzer'].split('/')
-
-            result = get_object_or_404(
-                    Result,
-                    name=v['output'],
-                    block__algorithm__author__username=analyzer_author,
-                    block__algorithm__name=analyzer_name,
-                    block__algorithm__version=int(analyzer_version),
-                    block__analyzer=True,
-                    block__experiment=v['obj'],
-            )
+            xp = v['obj']
+            block = get_object_or_404(Block,
+                experiment=v['obj'],
+                algorithm__author__username=analyzer_author,
+                algorithm__name=analyzer_name,
+                algorithm__version=analyzer_version,
+                )
         else:
-            result = get_object_or_404(
-                    Result,
-                    name=v['output'],
-                    block__name=v['analyzer'],
-                    block__analyzer=True,
-                    block__experiment=v['obj'],
+            block = get_object_or_404(Block,
+                experiment=v['obj'],
+                name=v['analyzer'],
+                )
+
+        result = get_object_or_404(Result,
+            cache=block.outputs.get(),
+            name=v['output'],
             )
 
         # now decide if the chart is compatible
diff --git a/beat/web/reports/templates/reports/partials/reportSingleTable.html b/beat/web/reports/templates/reports/partials/reportSingleTable.html
index 74114570ad0645605c4692c96f5c0e4d06ba1986..5d2e92a631a9812484a9ad50f040a1856ba27694 100644
--- a/beat/web/reports/templates/reports/partials/reportSingleTable.html
+++ b/beat/web/reports/templates/reports/partials/reportSingleTable.html
@@ -1,21 +1,21 @@
 {% comment %}
  * Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/
  * Contact: beat.support@idiap.ch
- * 
+ *
  * This file is part of the beat.web module of the BEAT platform.
- * 
+ *
  * Commercial License Usage
  * Licensees holding valid commercial BEAT licenses may use this file in
  * accordance with the terms contained in a written agreement between you
  * and Idiap. For further information contact tto@idiap.ch
- * 
+ *
  * Alternatively, this file may be used under the terms of the GNU Affero
  * Public License version 3 as published by the Free Software and appearing
  * in the file LICENSE.AGPL included in the packaging of this file.
  * The BEAT platform is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  * or FITNESS FOR A PARTICULAR PURPOSE.
- * 
+ *
  * You should have received a copy of the GNU Affero Public License along
  * with the BEAT platform. If not, see http://www.gnu.org/licenses/.
 {% endcomment %}
diff --git a/beat/web/reports/tests.py b/beat/web/reports/tests.py
index d9cbe6e6af909a4e3808811b03d529a551510df3..e9edac72e6db5d5b86d213886616f537be9769c2 100644
--- a/beat/web/reports/tests.py
+++ b/beat/web/reports/tests.py
@@ -47,6 +47,7 @@ from ..plotters.models import Plotter
 from ..plotters.models import PlotterParameter
 
 from ..common.models import Shareable
+from ..common.testutils import tearDownModule
 from ..backend.models import Environment
 from ..backend.models import Queue
 
@@ -57,19 +58,6 @@ import os
 import shutil
 
 
-# Override the Scheduler API
-from ..utils import scheduler
-
-def mockPutMessage(url, params=None, data=None):
-    return (200, None)
-
-def mockPostMessage(url, params=None, data=None):
-    return (200, None)
-
-scheduler.putMessage  = mockPutMessage
-scheduler.postMessage = mockPostMessage
-
-
 #----------------------------------------------------------
 
 
@@ -511,7 +499,7 @@ class ReportTestCase(APITestCase):
         environment = Environment(name='env1', version='1.0')
         environment.save()
 
-        queue = Queue(name='queue1', memory_limit=1024, time_limit=60, nb_cores_per_slot=1, max_slots_per_user=10)
+        queue = Queue(name='queue1', memory_limit=1024, time_limit=60, cores_per_slot=1, max_slots_per_user=10)
         queue.save()
 
         queue.environments.add(environment)
diff --git a/beat/web/scripts/localhost.py b/beat/web/scripts/localhost.py
index 0bc72549fcf8cc86bc3b3ed3caca4e4451313ff9..09623703162f42b77a7944c498ed1fbc7e11bf71 100644
--- a/beat/web/scripts/localhost.py
+++ b/beat/web/scripts/localhost.py
@@ -65,7 +65,7 @@ Options:
                                      [default: %(nb_cores)s]
   -q <path>, --prefix=<path>         The prefix path of the directory
                                      containing the toolchains, algorithms,
-                                     etc. [default: web_dynamic_data]
+                                     etc. [default: prefix]
   -p <seconds>, --period=<seconds>   The number of seconds between successive
                                      attempts to schedule or to process worker
                                      duties. [default: 5]
@@ -196,17 +196,11 @@ SCHEDULER_BIN = os.path.join(bindir, 'scheduler.py')
 WORKER_BIN = os.path.join(bindir, 'worker.py')
 
 
-def get_scheduler_token(django_settings):
+def get_scheduler_token(account):
     """Retrieves the scheduler token from the Django database"""
 
-    os.environ.setdefault('DJANGO_SETTINGS_MODULE', django_settings)
-    from django.conf import settings
-
-    from django import setup
-    setup()
-
     from rest_framework.authtoken.models import Token
-    return Token.objects.get(user__username=settings.SCHEDULER_ACCOUNT).key
+    return Token.objects.get(user__username=account).key
 
 
 def main(user_input=None):
@@ -227,6 +221,12 @@ def main(user_input=None):
 
     try:
 
+        os.environ.setdefault('DJANGO_SETTINGS_MODULE', arguments['--settings'])
+        from django.conf import settings
+
+        from django import setup
+        setup()
+
         # Web server, via django
         web_arguments = [
           DJANGO_BIN,
@@ -247,7 +247,7 @@ def main(user_input=None):
           '--port=10001', #worker -> scheduler port
           '--sport=10000', #web -> scheduler port
           '--web-api=http://127.0.0.1:8000/api/v1', #web API URL
-          '--secret-key=%s' % get_scheduler_token(arguments['--settings']),
+          '--secret-key=%s' % get_scheduler_token(settings.SCHEDULER_ACCOUNT),
           ]
         if arguments['--verbose']:
             scheduler_arguments.append('-' + arguments['--verbose']*'v')
@@ -268,11 +268,12 @@ def main(user_input=None):
                 arguments['--scheduler-log'], qconf_exclude, delay=5)
 
         # Worker, also based on Twisted
+        from ..backend.models import Worker
         worker_arguments = [
           WORKER_BIN,
           '--prefix=%s' % arguments['--prefix'],
           '--port=10001', #scheduler port
-          '--name=node1', #node name on the scheduler (look at queue config)
+          '--name=%s' % Worker.objects.order_by('id').first().name,
           '--period=%s' % arguments['--period'], #seconds
           '--cores=%s' % arguments['--cores'],
           '--keep-erred-configuration',
diff --git a/beat/web/scripts/process.py b/beat/web/scripts/process.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c009f9522716502bdf57e36209816a745547eec
--- /dev/null
+++ b/beat/web/scripts/process.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+
+"""\
+Processes one split.
+
+Usage:
+  %(prog)s [--settings=<file>] [--cpulimit=<file>] [-v ...] <execute> <split>
+  %(prog)s (-h | --help)
+  %(prog)s (-V | --version)
+
+Arguments:
+
+
+  <execute>  The path to the base execution program for running the user code
+  <split>    The primary-key of the split to treat by this subprocess
+
+
+Options:
+  -h, --help                    Show this help message
+  -V, --version                 Show program's version number
+  -v, --verbose                 Increases the output verbosity level
+  -S <file>, --settings=<file>  The module name to the Django settings file
+                                [default: beat.web.settings.settings]
+  -C <file>, --cpulimit=<file>  The path to the cpulimit program to use. If
+                                not set, CPU limiting is not enforced.
+
+
+Examples:
+
+  To start the job split processing do the following:
+
+    $ %(prog)s <path-to-execute> <split-id>
+
+  You can optionally pass the ``-v`` flag to start the worker with the logging
+  level set to ``INFO`` or ``-vv`` to set it to ``DEBUG``. By default, the
+  logging level is set to ``WARNING`` if no ``-v`` flag is passed.
+
+  You can optionally also set the path to the ``cpulimit`` program to use. If
+  it is not set, then CPU limiting will not be enforced.
+
+"""
+
+
+import os
+import sys
+import signal
+import docopt
+import logging
+
+
+def main(user_input=None):
+
+    arguments = docopt.docopt(
+      __doc__ % dict(
+        prog=os.path.basename(sys.argv[0]),
+        ),
+      )
+
+    # Initializes the Django framework
+    os.environ.setdefault('DJANGO_SETTINGS_MODULE', arguments['--settings'])
+    from django.conf import settings
+    from django import setup
+    setup()
+
+    logger = logging.getLogger('beat.web')
+    if arguments['--verbose'] == 1: logger.setLevel(logging.INFO)
+    elif arguments['--verbose'] >= 2: logger.setLevel(logging.DEBUG)
+
+    from ..backend.models import JobSplit, Result
+
+    try:
+        split = JobSplit.objects.get(pk=int(arguments['<split>']))
+    except JobSplit.DoesNotExist:
+        logger.info("Job split `%s' does not exist. Likely cancelled, " \
+            "so, ignoring.", arguments['<split>'])
+        sys.exit(0)
+
+    def stop():
+        import psutil
+        for child in psutil.Process().children(recursive=True):
+            if 'cpulimit' in child.name(): continue #only user processes
+            child.kill()
+            logger.info("Killing user process %d...", child.pid)
+
+        message = "Force-stopped user processes for split `%s' for block " \
+                "`%s' of experiment `%s'" % \
+                (split, split.job.block.name,
+                    split.job.block.experiment.fullname())
+        logger.info(message)
+
+    # installs SIGTERM handler
+    def handler(signum, frame):
+        #ignore further signals
+        signal.signal(signal.SIGTERM, signal.SIG_IGN)
+        signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+        logger.info("Signal %d caught, terminating...", signum)
+        stop()
+
+    signal.signal(signal.SIGTERM, handler)
+    signal.signal(signal.SIGINT, handler)
+
+    split.process(
+        execute=arguments['<execute>'],
+        cpulimit=arguments['--cpulimit'],
+        )
diff --git a/beat/web/scripts/scheduler.py b/beat/web/scripts/scheduler.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8a346f6b80e6f1aee65f4d08a725e1b7fe6bcf5
--- /dev/null
+++ b/beat/web/scripts/scheduler.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+
+"""\
+Starts the scheduling process.
+
+Usage:
+  %(prog)s [-v ... | --verbose ...] [--settings=<file>] [--period=<seconds>]
+  %(prog)s (-h | --help)
+  %(prog)s (-V | --version)
+
+
+Options:
+  -h, --help                        Show this help message
+  -V, --version                     Show program's version number
+  -v, --verbose                     Increases the output verbosity level
+  -S <file>, --settings=<file>      The module name to the Django settings
+                                    file [default: beat.web.settings.settings]
+  -p <seconds, --period=<seconds>   The time, in seconds, in which this
+                                    scheduler will try to allocate job splits
+                                    to existing workers. If not set, use the
+                                    value available on the Django settings
+                                    file, at the variable
+                                    `SCHEDULING_INTERVAL`.
+
+
+Examples:
+
+  To start the scheduling process do the following:
+
+    $ %(prog)s
+
+  You can pass the ``-v`` flag to start the scheduler with the logging level
+  set to ``INFO`` or ``-vv`` to set it to ``DEBUG``. By default, the logging
+  level is set to ``WARNING`` if no ``-v`` flag is passed.
+
+"""
+
+import os
+import sys
+import time
+import signal
+import docopt
+import logging
+
+stop = False
+
+def main(user_input=None):
+
+    arguments = docopt.docopt(
+      __doc__ % dict(
+        prog=os.path.basename(sys.argv[0]),
+        ),
+      )
+
+    os.environ.setdefault('DJANGO_SETTINGS_MODULE', arguments['--settings'])
+    from django.conf import settings
+    from django import setup
+    setup()
+
+    logger = logging.getLogger('beat.web')
+    if arguments['--verbose'] == 1: logger.setLevel(logging.INFO)
+    elif arguments['--verbose'] >= 2: logger.setLevel(logging.DEBUG)
+
+    # installs SIGTERM handler
+    def handler(signum, frame):
+        #ignore further signals
+        signal.signal(signal.SIGTERM, signal.SIG_IGN)
+        signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+        logger.info("Signal %d caught, terminating...", signum)
+        global stop
+        stop = True
+
+    signal.signal(signal.SIGTERM, handler)
+    signal.signal(signal.SIGINT, handler)
+
+    from ..backend import schedule
+
+    timing = int(arguments['--period']) \
+        if arguments['--period'] else settings.SCHEDULING_INTERVAL
+    logger.info("Scheduling every %d seconds", timing)
+
+    global stop
+    while not stop:
+
+        start = time.time()
+        logger.debug("Starting scheduler cycle...")
+        schedule.schedule()
+        duration = time.time() - start
+        if duration < timing:
+            time.sleep(timing - duration)
+
+    logger.info("Gracefully exiting the scheduler")
diff --git a/beat/web/scripts/worker.py b/beat/web/scripts/worker.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4a257ce3a27493011249caa961c7cdfd5f8f047
--- /dev/null
+++ b/beat/web/scripts/worker.py
@@ -0,0 +1,170 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+
+"""\
+Starts the worker process.
+
+Usage:
+  %(prog)s [-v ... | --verbose ...] [--settings=<file>] [--period=<seconds>]
+           [--cpulimit=<file>] [--environments=<path>] [--name=<name>]
+  %(prog)s (-h | --help)
+  %(prog)s (-V | --version)
+
+
+Options:
+  -h, --help                        Show this help message
+  -V, --version                     Show program's version number
+  -v, --verbose                     Increases the output verbosity level
+  -S <file>, --settings=<file>      The module name to the Django settings
+                                    file [default: beat.web.settings.settings]
+  -c <file>, --cpulimit=<file>      The path to the cpulimit program to use. If
+                                    not set, try to search in standard
+                                    locations. If not found, CPU limiting is
+                                    not enforced.
+  -e <path>, --environments=<path>  The path to the installation root of
+                                    available environments.
+  -n <name>, --name=<name>          The unique name of this worker on the
+                                    database. This is typically the assigned
+                                    hostname of the node, but not necessarily
+                                    [default: %(hostname)s]
+  -p <seconds, --period=<seconds>   The time, in seconds, in which this worker
+                                    will probe the database for jobs to run or
+                                    cancel. If not set, use the value available
+                                    on the Django settings file, at the
+                                    variable `WORKER_INTERVAL`.
+
+
+Examples:
+
+  To start the worker do the following:
+
+    $ %(prog)s
+
+  You can pass the ``-v`` flag to start the worker with the logging level set
+  to ``INFO`` or ``-vv`` to set it to ``DEBUG``. By default, the logging level
+  is set to ``WARNING`` if no ``-v`` flag is passed.
+
+"""
+
+import os
+import sys
+import time
+import socket
+import signal
+import docopt
+import logging
+
+stop = False
+
+def main(user_input=None):
+
+    arguments = docopt.docopt(
+      __doc__ % dict(
+        prog=os.path.basename(sys.argv[0]),
+        hostname=socket.gethostname(),
+        ),
+      )
+
+    os.environ.setdefault('DJANGO_SETTINGS_MODULE', arguments['--settings'])
+    from django.conf import settings
+    from django import setup
+    setup()
+
+    logger = logging.getLogger('beat.web')
+    if arguments['--verbose'] == 1: logger.setLevel(logging.INFO)
+    elif arguments['--verbose'] >= 2: logger.setLevel(logging.DEBUG)
+
+    # installs SIGTERM handler
+    def handler(signum, frame):
+        #ignore further signals
+        signal.signal(signal.SIGTERM, signal.SIG_IGN)
+        signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+        logger.info("Signal %d caught, terminating...", signum)
+        global stop
+        stop = True
+
+    signal.signal(signal.SIGTERM, handler)
+    signal.signal(signal.SIGINT, handler)
+
+    from ..backend import utils
+    from ..backend.models import Worker
+
+    try:
+        worker = Worker.objects.get(name=arguments['--name'])
+    except Worker.DoesNotExist:
+        logger.error("Cannot find worker `%s' in database, aborting",
+            arguments['--name'])
+        sys.exit(1)
+    else:
+        logger.info("Found worker `%s' in database, proceeding...",
+            arguments['--name'])
+
+    # figure out paths to programs I need to use
+    from beat.core.async import resolve_cpulimit_path
+    cpulimit = resolve_cpulimit_path(arguments['--cpulimit'])
+    logger.debug("(path) cpulimit: `%s'", cpulimit)
+    process = utils.resolve_process_path()
+    logger.debug("(path) process: `%s'", process)
+
+    from django.utils import six
+    paths = arguments['--environments']
+    if isinstance(paths, six.string_types):
+        paths = paths.split(os.pathsep)
+    environments = utils.find_environments(paths)
+    logger.debug("Environments: %s", ", ".join(environments))
+
+    # check environments
+    missing, unused = worker.check_environments(environments)
+    if unused:
+        logger.info("The following environments where found on your " \
+            "setup, but will not be used with the current queue " \
+            "configuration: %s" % ", ".join(unused))
+    if missing:
+        raise RuntimeError("The following environments are currently " \
+            "missing from your setup: %s" % ", ".join(missing))
+    else:
+        logger.info("All required software environments were found")
+
+    timing = int(arguments['--period']) \
+        if arguments['--period'] else settings.WORKER_INTERVAL
+    logger.info("Working at `%s' every %d seconds", arguments['--name'], timing)
+
+    global stop
+    with worker:
+
+        while not stop:
+
+            start = time.time()
+            logger.debug("Starting work cycle...")
+            worker.work(environments, cpulimit, process)
+            duration = time.time() - start
+            if duration < timing:
+                time.sleep(timing - duration)
+
+    logger.info("Gracefully exiting worker `%s'" % arguments['--name'])
diff --git a/beat/web/search/apps.py b/beat/web/search/apps.py
index 7ca7c2afa8063dc440f77b25ceafaa6aa22d445d..da7e5f4f5192466ebb523e43de42fa01a689b775 100644
--- a/beat/web/search/apps.py
+++ b/beat/web/search/apps.py
@@ -27,7 +27,6 @@
 
 from ..common.apps import CommonAppConfig
 from django.utils.translation import ugettext_lazy as _
-from actstream import registry
 
 class SearchConfig(CommonAppConfig):
     name = 'beat.web.search'
@@ -35,4 +34,5 @@ class SearchConfig(CommonAppConfig):
 
     def ready(self):
         super(SearchConfig, self).ready()
+        from actstream import registry
         registry.register(self.get_model('Search'))
diff --git a/beat/web/search/migrations/0002_scheduler_addons.py b/beat/web/search/migrations/0002_scheduler_addons.py
new file mode 100644
index 0000000000000000000000000000000000000000..f269a5f4a1b220cadd5f6abe5862d667daba5abc
--- /dev/null
+++ b/beat/web/search/migrations/0002_scheduler_addons.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+###############################################################################
+#                                                                             #
+# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
+# Contact: beat.support@idiap.ch                                              #
+#                                                                             #
+# This file is part of the beat.web module of the BEAT platform.              #
+#                                                                             #
+# Commercial License Usage                                                    #
+# Licensees holding valid commercial BEAT licenses may use this file in       #
+# accordance with the terms contained in a written agreement between you      #
+# and Idiap. For further information contact tto@idiap.ch                     #
+#                                                                             #
+# Alternatively, this file may be used under the terms of the GNU Affero      #
+# Public License version 3 as published by the Free Software and appearing    #
+# in the file LICENSE.AGPL included in the packaging of this file.            #
+# The BEAT platform is distributed in the hope that it will be useful, but    #
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
+# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
+#                                                                             #
+# You should have received a copy of the GNU Affero Public License along      #
+# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
+#                                                                             #
+###############################################################################
+
+
+from __future__ import unicode_literals
+
+from django.db import migrations
+
+
+def reset_ranks(apps, schema_editor):
+    '''Reset ranks before older results can be deleted'''
+
+    Result = apps.get_model("experiments", "Result")
+    Rank = apps.get_model("search", "Rank")
+
+    total = Result.objects.count()
+    if total: print('')
+    for i, r in enumerate(Result.objects.order_by('-id')):
+        older = Result.objects.filter(name=r.name, id__lt=r.id,
+            cache=r.block.hashes.first())
+        for old in older:
+            # check if any leaderboard ranks require updates
+            for rank in Rank.objects.filter(result__in=(old,)):
+                print("Rank %d for search `%s/%s' uses old Result `%d' - " \
+                    "resetting to newer Result `%d'..." % \
+                    (rank.id, rank.leaderboard.search.author.username,
+                      rank.leaderboard.search.name, old.id, r.id))
+                rank.result.remove(old)
+                rank.result.add(r)
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('search', '0001_initial'),
+        ('experiments', '0002_scheduler_addons'),
+    ]
+
+    operations = [
+        migrations.RunPython(reset_ranks),
+        ]
diff --git a/beat/web/search/models.py b/beat/web/search/models.py
index db4809593ce422395db96f4935bceea9005543f1..54c95ed7669dc00e64a404419bb58161818bd9bf 100644
--- a/beat/web/search/models.py
+++ b/beat/web/search/models.py
@@ -179,8 +179,7 @@ class Leaderboard(models.Model):
             header = []
             for block in blocks:
 
-                analyzer_output = Result.objects.filter(block=block,
-                    type__in=Result.SIMPLE_TYPE_NAMES).order_by('name')
+                analyzer_output = Result.objects.filter(cache__in=block.outputs.all(), type__in=Result.SIMPLE_TYPE_NAMES).order_by('name')
 
                 if not header: #first row, set order
                     header = [k.name for k in analyzer_output]
diff --git a/beat/web/settings/settings.py b/beat/web/settings/settings.py
index 2f53e5f11fa03376d9a63ac5886ef233a9ef62c8..2583579d640a60d0cce4edaf46a984796d090594 100644
--- a/beat/web/settings/settings.py
+++ b/beat/web/settings/settings.py
@@ -78,6 +78,10 @@ LOGGING = {
             },
         },
     'handlers': {
+        'discard': {
+            'level': 'DEBUG',
+            'class': 'logging.NullHandler',
+            },
         'console': {
             'level': 'DEBUG',
             'class': 'logging.StreamHandler',
@@ -92,6 +96,29 @@ LOGGING = {
         'beat.web': {
             'handlers': ['console', 'mail_admins'],
             },
+        'beat.core': {
+            'handlers': ['console'],
+            },
+        'beat.web.attestations.management.commands': {
+            'handlers': ['console'],
+            'propagate': False, #don't e-mail those!
+            },
+        'beat.web.search.management.commands': {
+            'handlers': ['console'],
+            'propagate': False, #don't e-mail those!
+            },
+        'beat.web.experiments.management.commands': {
+            'handlers': ['console'],
+            'propagate': False, #don't e-mail those!
+            },
+        'beat.web.reports.management.commands': {
+            'handlers': ['console'],
+            'propagate': False, #don't e-mail those!
+            },
+        'beat.web.backend.management.commands': {
+            'handlers': ['console'],
+            'propagate': False, #don't e-mail those!
+            },
         'beat.web.utils.management.commands': {
             'handlers': ['console'],
             'propagate': False, #don't e-mail those!
@@ -99,6 +126,27 @@ LOGGING = {
     }
 }
 
+##############################################################################
+#
+# Special code to remove Django 1.10 deprecation warnings caused by
+# Django-JSONField while that is not resolved:
+# https://bitbucket.org/schinckel/django-jsonfield/issues/46/django-190-complains-about-subfieldbase
+# Current django-jsonfield available: 0.9.19 (28.04.2016)
+#
+##############################################################################
+import logging
+
+class SuppressDeprecated(logging.Filter):
+    def filter(self, record):
+        WARNINGS_TO_SUPPRESS = [
+            'RemovedInDjango110Warning',
+        ]
+        # Return false to suppress message.
+        return not any([warn in record.getMessage() for warn in WARNINGS_TO_SUPPRESS])
+
+warn_logger = logging.getLogger('py.warnings')
+warn_logger.addFilter(SuppressDeprecated())
+
 ##############################################################################
 #
 # DATABASE
@@ -109,6 +157,9 @@ DATABASES = {
     'default': {
       'ENGINE': 'django.db.backends.sqlite3',
       'NAME': 'django.sql3',
+      'OPTIONS': {
+        'timeout': 10, #seconds
+        },
       },
 }
 
@@ -133,7 +184,7 @@ STATIC_ROOT         = 'static'
 #
 ##############################################################################
 
-PREFIX           = os.path.join(os.getcwd(), 'web_dynamic_data')
+PREFIX           = os.path.join(os.getcwd(), 'prefix')
 ALGORITHMS_ROOT  = os.path.join(PREFIX, 'algorithms')
 PLOTTERS_ROOT    = os.path.join(PREFIX, 'plotters')
 LIBRARIES_ROOT   = os.path.join(PREFIX, 'libraries')
@@ -155,13 +206,12 @@ LOGIN_REDIRECT_URL       = '/'
 LOGIN_URL                = '/login/'
 SYSTEM_ACCOUNT           = 'system'
 PLOT_ACCOUNT             = 'plot'
-SCHEDULER_ACCOUNT        = 'scheduler'
 PREREGISTRATION_ONLY     = False
 TERMS_OF_SERVICE_VERSION = 1
 LEGAL_DISCLAIMER_VERSION = 1
 
-ACCOUNTS_TO_EXCLUDE_FROM_SEARCH = [SCHEDULER_ACCOUNT, 'AnonymousUser']
-ACCOUNTS_TO_EXCLUDE_FROM_TEAMS = [SYSTEM_ACCOUNT, PLOT_ACCOUNT, SCHEDULER_ACCOUNT, 'AnonymousUser']
+ACCOUNTS_TO_EXCLUDE_FROM_SEARCH = ['AnonymousUser']
+ACCOUNTS_TO_EXCLUDE_FROM_TEAMS = [SYSTEM_ACCOUNT, PLOT_ACCOUNT, 'AnonymousUser']
 
 
 ###########################################################################
@@ -188,8 +238,59 @@ EMAIL_USE_TLS        = False
 #
 ##############################################################################
 
-SCHEDULER_ADDRESS   = 'http://127.0.0.1'
-SCHEDULER_PORT      = 10000
+# The scheduling interval controls the number of seconds between
+# scheduling attempts (calls to :py:func:`beat.web.backend.schedule.schedule`)
+SCHEDULING_INTERVAL = 5 #seconds
+
+# The worker interval controls the number of seconds between checks
+# a particular worker will run to verify jobs are not scheduled to itself
+WORKER_INTERVAL = 5 #seconds
+
+# If set, a testing panel that can accomplish scheduling activities will appear
+# at the scheduler page allowing administrators to launch scheduling activities
+# manually.
+SCHEDULING_PANEL = True
+
+# The maximum index split errors control the maximum number of times we can
+# incur in an index split error condition without cancelling the block
+# execution altogether. This number, multiplied by the scheduling interval,
+# must be larger than 60 seconds, as this is the default NFS caching interval.
+# If you run on a reliable networked filesystem (i.e., not NFS) or on the local
+# node, you may set this value to 0, which will cause the scheduling activity
+# to consider even a single splitting error as enough reason to cancel the
+# block execution (and, by consequence), the experiment.
+MAXIMUM_SPLIT_ERRORS = 0 #attempts to split without errors
+
+# The maximum number of IOErrors (due to cache loading) which are acceptable
+# for a particular split. If the number of cache errors is bigger than the
+# value set, then the split is considered as failed. This variable serves the
+# same purpose as ``MAXIMUM_SPLIT_ERRORS`` above and fills-in where NFS caching
+# does not. If you're running on a reliable filesystem, you can't leave it to
+# zero.
+MAXIMUM_IO_ERRORS = 0 #attempts to load cache files without errors
+
+# The maximum number of retries for saving (mostly at start() and end()), job
+# splits from remote processes. If you're using a SQLite database backend, this
+# number should be higher than 1 (recommended value is 3 to 5). In case you're
+# using another database, then this can value can be ignored. If set to a value
+# of one or more and there are "database is locked" errors, then job split
+# saving at ``start()`` or ``end()`` will be retried with a 1-second interval.
+MAXIMUM_SPLIT_SAVE_RETRIES = 5
+
+# The default user error is a message string that is set upon the failure of
+# execution of a particular block if the root cause of the problem is NOT a
+# user error, but the systems'. In this case, the system administrators receive
+# an e-mail indicating the exception caught and the user block for a given
+# experiment is marked with this informative message.
+DEFAULT_USER_ERROR = "A system error occurred and we could not run your " \
+    "algorithm.\nThe administrators have been informed.\nYou may try to run " \
+    "the experiment again at another moment."
+
+# If set, then detach children processes (I/O daemons) from the worker process
+# group. In this case, signals sent to the worker process will not be forwarded
+# to the processing children. This is desirable in a production environment, to
+# avoid user processes to be terminated in case the worker is updated.
+WORKER_DETACH_CHILDREN = False
 
 
 ##############################################################################
@@ -276,9 +377,6 @@ RESTRUCTUREDTEXT_FILTER_SETTINGS = {
     'raw_enabled': 0,
     }
 
-# Use nose to run tests
-TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
-
 # Where CodeMirror is installed and some other settings
 CODEMIRROR_PATH = 'codemirror' #w.r.t to STATIC_URL
 CODEMIRROR_THEME = 'default'
diff --git a/beat/web/settings/test.py b/beat/web/settings/test.py
index e7137a2fed710ea53efa5fdb3bb883c1146d588a..9bb7d4dc121920241d630c0f8c4430553968aa88 100644
--- a/beat/web/settings/test.py
+++ b/beat/web/settings/test.py
@@ -36,17 +36,20 @@ ALLOWED_HOSTS = [
     'testserver',
 ]
 
-# To always use a (in-memory) sqlite3 database for the tests
-DATABASES = {
-    'default': {
-        'ENGINE': 'django.db.backends.sqlite3',
-        'NAME': ':memory:',
-    },
-}
+DATABASES['default']['NAME'] = 'test.sql3'
+DATABASES['default']['TEST'] = {'NAME': DATABASES['default']['NAME']}
 
+import sys
+if 'beat.cmdline' in sys.argv:
+    # make it in-memory for cmdline app tests
+    DATABASES['default']['NAME'] = ':memory:'
 
-PREFIX           = os.path.join(os.getcwd(), 'web_dynamic_data_tests')
+LOGGING['handlers']['console']['level'] = 'DEBUG'
+LOGGING['loggers']['beat.core']['handlers'] = ['discard']
+
+PREFIX = os.environ.get('BEAT_TEST_PREFIX', os.path.realpath('./test_prefix'))
 ALGORITHMS_ROOT  = os.path.join(PREFIX, 'algorithms')
+PLOTTERS_ROOT    = os.path.join(PREFIX, 'plotters')
 LIBRARIES_ROOT   = os.path.join(PREFIX, 'libraries')
 DATABASES_ROOT   = os.path.join(PREFIX, 'databases')
 DATAFORMATS_ROOT = os.path.join(PREFIX, 'dataformats')
@@ -54,13 +57,7 @@ TOOLCHAINS_ROOT  = os.path.join(PREFIX, 'toolchains')
 EXPERIMENTS_ROOT = os.path.join(PREFIX, 'experiments')
 CACHE_ROOT       = os.path.join(PREFIX, 'cache')
 
-
 # To speed-up tests, don't put this in production
-PASSWORD_HASHERS = (
-  'django.contrib.auth.hashers.SHA1PasswordHasher',
-  'django.contrib.auth.hashers.PBKDF2PasswordHasher',
-  'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
-  'django.contrib.auth.hashers.BCryptPasswordHasher',
+PASSWORD_HASHERS = [
   'django.contrib.auth.hashers.MD5PasswordHasher',
-  'django.contrib.auth.hashers.CryptPasswordHasher',
-)
+]
diff --git a/beat/web/team/apps.py b/beat/web/team/apps.py
index 21b471c8d39831218aa6c1797ba66b54ecf4b648..dcd09cd58ec9f6e4b48467821a583713031423a5 100644
--- a/beat/web/team/apps.py
+++ b/beat/web/team/apps.py
@@ -27,15 +27,13 @@
 
 from ..common.apps import CommonAppConfig
 from django.utils.translation import ugettext_lazy as _
-from actstream import registry
 
 class TeamConfig(CommonAppConfig):
     name = 'beat.web.team'
     verbose_name = _('Team')
 
     def ready(self):
-    	super(TeamConfig, self).ready()
-
+        super(TeamConfig, self).ready()
         from .signals.handlers import on_added_to_team
-
+        from actstream import registry
         registry.register(self.get_model('Team'))
diff --git a/beat/web/team/tests.py b/beat/web/team/tests.py
index fc3351e6061e62ee2957e68f8bef0b6a4779a98a..3839dcfe72a34109e0822ba6213ef01f40e64728 100644
--- a/beat/web/team/tests.py
+++ b/beat/web/team/tests.py
@@ -34,6 +34,7 @@ from django.contrib.auth.models import User
 
 import simplejson as json
 
+from ..common.testutils import tearDownModule
 from .models import Team
 from .serializers import SimpleTeamSerializer
 
@@ -220,10 +221,10 @@ class TeamAddMemberTestCase(TeamTestCase):
             'short_description': u'',
             'is_owner': True,
             'accessibility': 'public',
-            'members': [
+            'members': set([
                 self.johndoe.username,
                 self.jackdoe.username,
-            ]
+            ])
         }
 
         self.client.login(username=self.johndoe.username, password=self.password)
@@ -232,7 +233,9 @@ class TeamAddMemberTestCase(TeamTestCase):
         self.assertEqual(response.status_code, status.HTTP_200_OK)
 
         response = self.client.get(self.url, format='json')
-        self.assertEqual(json.loads(response.content), expected_answer)
+        r = json.loads(response.content)
+        r['members'] = set(r['members']) #fix order
+        self.assertEqual(r, expected_answer)
 
     def test_logged_in_user_not_owner(self):
         self.client.login(username=self.jackdoe, password=self.password)
diff --git a/beat/web/toolchains/apps.py b/beat/web/toolchains/apps.py
index dce6dbbcdecfa081383216ea747a26f814c3e2b9..623cb76300880209de6296c5899f4198facd10b7 100644
--- a/beat/web/toolchains/apps.py
+++ b/beat/web/toolchains/apps.py
@@ -27,7 +27,6 @@
 
 from ..common.apps import CommonAppConfig
 from django.utils.translation import ugettext_lazy as _
-from actstream import registry
 
 
 class ToolchainsConfig(CommonAppConfig):
@@ -37,8 +36,7 @@ class ToolchainsConfig(CommonAppConfig):
 
     def ready(self):
         super(ToolchainsConfig, self).ready()
-
         from .signals import auto_delete_file_on_delete, auto_delete_file_on_change
-
+        from actstream import registry
         registry.register(self.get_model('Toolchain'))
 
diff --git a/beat/web/toolchains/tests.py b/beat/web/toolchains/tests.py
index 74bc3eda6e67c3a7797a01ccc435f43e984fdad8..b78bd7a36772b362a706c21c96c20f3c7a3ec643 100644
--- a/beat/web/toolchains/tests.py
+++ b/beat/web/toolchains/tests.py
@@ -40,7 +40,7 @@ import beat.core.toolchain
 from .models import Toolchain
 
 from ..common.models import Shareable
-from ..common.testutils import BaseTestCase
+from ..common.testutils import BaseTestCase, tearDownModule
 
 
 
diff --git a/beat/web/ui/static/ui/css/style.css b/beat/web/ui/static/ui/css/style.css
index 29ce2345f3fd4b458f0982eb01cd1d6a12ee31cb..f490e370a77b2a87818f24a239e8e3a93ad488be 100644
--- a/beat/web/ui/static/ui/css/style.css
+++ b/beat/web/ui/static/ui/css/style.css
@@ -528,3 +528,14 @@ select.form-control.input-sm + .chosen-container.chosen-container-single .chosen
 .chosen-container .chosen-results li.highlighted .help {
   color: #3e3e3e;
 }
+
+
+/*********** Backend: scheduler page style ************/
+tr.job-split {
+  color: #ababab;
+  font-style: italic;
+}
+
+tr.job-split td.job-split-empty {
+  background-color: #dedede;
+}
diff --git a/beat/web/ui/templates/ui/activity_stream.html b/beat/web/ui/templates/ui/activity_stream.html
index ccae1ce4b6e4541597879cf0107a84d85e4e95e0..579d1d2701830a52f7d892aeeddca76ab38818d0 100644
--- a/beat/web/ui/templates/ui/activity_stream.html
+++ b/beat/web/ui/templates/ui/activity_stream.html
@@ -91,7 +91,7 @@
 
   {% if leaderboards and not request.GET.all %}
   <div class="col-sm-5">
-    <h3>Latest Leaderboard Changes</h3>
+    <h3>Latest Leaderboard Updates</h3>
     <p class="help">From subscribed leaderboards only. Click <a href="{% url "search:list" author.username %}">here</a> to see more</p>
     <hr/>
 
diff --git a/beat/web/ui/templates/ui/bar.html b/beat/web/ui/templates/ui/bar.html
index d274e66c1788f0b5deec88e94c89f0cda396cc37..b011c0541c7c8b59d0ef6934fbbfd1a025c5bc0d 100644
--- a/beat/web/ui/templates/ui/bar.html
+++ b/beat/web/ui/templates/ui/bar.html
@@ -1,21 +1,21 @@
 {% comment %}
  * Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/
  * Contact: beat.support@idiap.ch
- * 
+ *
  * This file is part of the beat.web module of the BEAT platform.
- * 
+ *
  * Commercial License Usage
  * Licensees holding valid commercial BEAT licenses may use this file in
  * accordance with the terms contained in a written agreement between you
  * and Idiap. For further information contact tto@idiap.ch
- * 
+ *
  * Alternatively, this file may be used under the terms of the GNU Affero
  * Public License version 3 as published by the Free Software and appearing
  * in the file LICENSE.AGPL included in the packaging of this file.
  * The BEAT platform is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  * or FITNESS FOR A PARTICULAR PURPOSE.
- * 
+ *
  * You should have received a copy of the GNU Affero Public License along
  * with the BEAT platform. If not, see http://www.gnu.org/licenses/.
 {% endcomment %}
@@ -119,7 +119,7 @@
         {% else %}
         <li><p class="navbar-btn"><a class="btn btn-sm btn-primary" title="Register now for free!" href="{% url 'registration' %}"><i class="fa fa-plus fa-fw fa-lg"></i> Sign-up</a></p></li>
         {% endif %}
-        <li><p class="navbar-btn"><a class="btn btn-sm btn-success" title="Sign-in" href="{% url 'login' %}"><i class="fa fa-sign-in fa-fw fa-lg"></i> Sign-in</a></p></li>
+        <li><p class="navbar-btn"><a class="btn btn-sm btn-success" title="Sign-in" href="{% url 'login' %}?next={{ request.path }}"><i class="fa fa-sign-in fa-fw fa-lg"></i> Sign-in</a></p></li>
 
         {% else %}
         <li class="visible-xs"><a title="Your homepage" href="{% url 'activity-stream' request.user.username %}"><i class="fa fa-home fa-fw"></i> Your homepage</a></li>
diff --git a/beat/web/ui/templatetags/ui_tags.py b/beat/web/ui/templatetags/ui_tags.py
index 95ad1eb73eddd5b2c57a17096922ea65dcaef9b5..1f953f2e12812b9f9361c13a343cadf2be46ce52 100644
--- a/beat/web/ui/templatetags/ui_tags.py
+++ b/beat/web/ui/templatetags/ui_tags.py
@@ -27,6 +27,7 @@
 
 from django import template
 from django.conf import settings
+from django.utils.html import format_html
 from ...common.texts import Messages as Texts
 from ... import __version__
 from collections import OrderedDict
@@ -40,7 +41,6 @@ register = template.Library()
 
 @register.inclusion_tag('ui/bar.html', takes_context=True)
 def navbar(context):
-    request = context['request']
     return {
         'request': context['request'],
         'public_urls': [
@@ -245,7 +245,7 @@ def code_editor_scripts(modes):
 
     js = ['<script src="%s%s?v%s" type="text/javascript" charset="utf-8"></script>' % (settings.STATIC_URL, k, __version__) for k in js]
 
-    return '\n'.join(js)
+    return format_html('\n'.join(js))
 
 
 #--------------------------------------------------
@@ -262,8 +262,7 @@ def code_editor_css():
     if settings.CODEMIRROR_THEME != 'default':
         css.append('%s/theme/%s.css' % (settings.CODEMIRROR_PATH, settings.CODEMIRROR_THEME))
 
-    return '\n'.join(map(lambda x: '<link rel="stylesheet" href="%s%s?v%s" type="text/css" media="screen" />' % \
-                            (settings.STATIC_URL, x, __version__), css))
+    return format_html('\n'.join(map(lambda x: '<link rel="stylesheet" href="%s%s?v%s" type="text/css" media="screen" />' % (settings.STATIC_URL, x, __version__), css)))
 
 
 #--------------------------------------------------
diff --git a/beat/web/ui/urls.py b/beat/web/ui/urls.py
index 73ac2f08404b671577aadf3181d485bb0418293f..75dcf8a9eb76845c119d4c78beeb67020ee13468 100644
--- a/beat/web/ui/urls.py
+++ b/beat/web/ui/urls.py
@@ -28,6 +28,7 @@
 from django.conf.urls import url
 from django.conf import settings
 from django.views.generic.base import TemplateView
+from django.contrib.auth.views import logout
 
 from . import views
 
@@ -53,7 +54,7 @@ urlpatterns = [
     ),
 
     url(r'^logout/$',
-        'django.contrib.auth.views.logout',
+        logout,
         {'next_page': 'index'},
         name='logout',
         ),
diff --git a/beat/web/ui/views.py b/beat/web/ui/views.py
index 9d9d8af8d35590737b5323bb55617a0f5971a14b..07bbef7f5e0a16b299afbfeadc100b3ae19d8f34 100644
--- a/beat/web/ui/views.py
+++ b/beat/web/ui/views.py
@@ -62,7 +62,11 @@ def login(request):
 
     response = django_login(request)
     if request.user.is_authenticated():
-        return HttpResponseRedirect(reverse('activity-stream', args=[request.user.username]))
+        path = request.GET.get('next', '/')
+        if path in ('/', reverse('login')):
+            return HttpResponseRedirect(reverse('activity-stream', args=[request.user.username]))
+        else:
+            return HttpResponseRedirect(path)
 
     return response
 
diff --git a/beat/web/utils/management/commands/install.py b/beat/web/utils/management/commands/install.py
index 3043d287dbc8b42478c36239edba6a6b11afd88c..8e8cba72e46b321ab8f4df1a22a98c00fe325585 100644
--- a/beat/web/utils/management/commands/install.py
+++ b/beat/web/utils/management/commands/install.py
@@ -130,11 +130,11 @@ def add_group(name):
     return group
 
 
-def setup_environment(queue_config_filename):
+def setup_environment(queue_config_filename, verbosity):
 
-    from .qsetup import setup_environment as _method
-
-    return _method(queue_config_filename)
+    from django.core.management import call_command
+    call_command('qsetup', verbosity=verbosity, reset=True,
+        config=queue_config_filename)
 
 
 def create_sites():
@@ -153,6 +153,16 @@ def create_sites():
     _setup_site(3, 'Production System', 'www.beat-eu.org')
 
 
+def create_users(username, passwd):
+
+    # Sets up initial users, if not already there.
+    system_user = add_user(settings.SYSTEM_ACCOUNT, None, '1')
+    plot_user = add_user(settings.PLOT_ACCOUNT, None, '2')
+    user = add_user(username, passwd, '3')
+
+    return system_user, plot_user, user
+
+
 def list_objects(prefix, project, category, fnfilter):
     """Lists all objects matching a certain filter"""
 
@@ -813,6 +823,104 @@ def link_contribution_versions(klass):
                 pass #ignores
 
 
+def install_contributions(source_prefix, project, template_data,
+    db_root_file=None):
+    '''Installs all contributions for a given project
+
+
+    Parameters:
+
+      source_prefix (str): The path to the base directory containing the
+        projects where objects must be installed from.
+
+      project (str): The project within the ``source_prefix`` where to install
+        objects from.
+
+      template_data (dict): A dictionary containing standard template data for
+        completing template objects installed on the project.
+
+      db_root_file (str, Optional): Optional path to a JSON describing the
+        database root for databases to be inserted. Database names not present
+        at the project directory will be ignored.
+
+    '''
+
+    # Dataformat adding requires a special trick as there are dependencies
+    # between different dataformats. Our recipe: we try to upload all of them
+    # one after the other. If one fails, we retry on the next loop, until all
+    # formats have been uploaded.
+    dataformat_filenames_next = list_objects(source_prefix, project,
+        'dataformats', '*.json')
+    dataformat_filenames_cur = []
+
+    while True:
+        if not dataformat_filenames_next: break
+        if len(dataformat_filenames_cur) == len(dataformat_filenames_next):
+            break
+        dataformat_filenames_cur = dataformat_filenames_next
+        dataformat_filenames_next = []
+        for k in dataformat_filenames_cur:
+            if not upload_dispatcher(source_prefix, project, 'dataformats', k,
+                template_data):
+                dataformat_filenames_next.append(k)
+    from ....dataformats.models import DataFormat
+    link_contribution_versions(DataFormat)
+
+    # Reads database root file, if provided
+    db_root = {}
+    if db_root_file: db_root.update(load_database_folders(db_root_file))
+
+    for k in list_objects(source_prefix, project, 'databases', '*.json'):
+        if k in db_root: template_data['root_folder'] = db_root[k]
+        upload_dispatcher(source_prefix, project, 'databases', k,
+                template_data)
+    link_database_versions()
+
+    for k in list_objects(source_prefix, project, 'toolchains',
+            '*.json'):
+        upload_dispatcher(source_prefix, project, 'toolchains', k,
+                template_data)
+    from ....toolchains.models import Toolchain
+    link_contribution_versions(Toolchain)
+
+    # Libraries adding requires a special trick as there are
+    # dependencies between different libraries and algorithms. Our
+    # recipe: we use the same technique as for dataformats.
+    library_filenames_next = list_objects(source_prefix, project,
+            'libraries', '*.json')
+    library_filenames_cur = []
+
+    while True:
+        if not library_filenames_next: break
+        if len(library_filenames_cur) == len(library_filenames_next):
+            break
+        library_filenames_cur = library_filenames_next
+        library_filenames_next = []
+        for k in library_filenames_cur:
+            if not upload_dispatcher(source_prefix, project,
+                    'libraries', k, template_data):
+                library_filenames_next.append(k)
+    from ....libraries.models import Library
+    link_contribution_versions(Library)
+
+    for k in list_objects(source_prefix, project, 'algorithms',
+            '*.json'):
+        upload_dispatcher(source_prefix, project, 'algorithms', k,
+                template_data)
+    from ....algorithms.models import Algorithm
+    link_contribution_versions(Algorithm)
+
+    for k in list_objects(source_prefix, project, 'plotters', '*.json'):
+        upload_dispatcher(source_prefix, project, 'plotters', k,
+                template_data)
+    from ....plotters.models import Plotter
+    link_contribution_versions(Plotter)
+
+    for k in list_objects(source_prefix, project, 'experiments',
+            '*.json'):
+        upload_dispatcher(source_prefix, project, 'experiments', k,
+                template_data)
+
 
 class Command(BaseCommand):
 
@@ -920,23 +1028,24 @@ class Command(BaseCommand):
 
         # Sync database
         from django.core.management import call_command
-        call_command('migrate', interactive=False, verbose=1)
+        call_command('migrate', interactive=False, verbosity=1)
 
         # Setup sites: 1.Development; 2.Staging; 3.Production
         create_sites()
 
-        # Sets up initial users, if not already there.
-        system_user = add_user(settings.SYSTEM_ACCOUNT, None, '1')
-        plot_user = add_user(settings.PLOT_ACCOUNT, None, '2')
-        scheduler_user = add_user(settings.SCHEDULER_ACCOUNT, None, '3')
-        user = add_user(arguments['username'], arguments['password'], '4')
+        system_user, plot_user, user = create_users(arguments['username'],
+            arguments['password'])
+
 
         # Sets up initial groups
         add_group('Default')
 
         # Sets up the queue and environments
-        queue, environment = \
-                setup_environment(arguments['queue_configuration'])
+        setup_environment(arguments['queue_configuration'],
+            arguments['verbosity'])
+        from ....backend.models import Environment, Queue
+        environment = Environment.objects.first()
+        queue = Queue.objects.first()
 
         # Iterates over projects to install
         for project in ['system'] + arguments['project']:
@@ -944,7 +1053,6 @@ class Command(BaseCommand):
             template_data = dict(
                 system_user = system_user,
                 plot_user = plot_user,
-                scheduler_user = scheduler_user,
                 user = user,
                 private = arguments['private'],
                 queue = queue.name,
@@ -954,89 +1062,5 @@ class Command(BaseCommand):
 
             logger.info("Adding objects for project `%s'...", project)
 
-            # Dataformat adding requires a special trick as there are
-            # dependencies between different dataformats. Our recipe: we try
-            # to upload all of them one after the other. If one fails, we
-            # retry on the next loop, until all formats have been uploaded.
-            dataformat_filenames_next = list_objects(self.prefix, project,
-                    'dataformats', '*.json')
-            dataformat_filenames_cur = []
-
-            while True:
-                if not dataformat_filenames_next: break
-                if len(dataformat_filenames_cur) == \
-                        len(dataformat_filenames_next): break
-                dataformat_filenames_cur = dataformat_filenames_next
-                dataformat_filenames_next = []
-                for k in dataformat_filenames_cur:
-                    if not upload_dispatcher(self.prefix, project,
-                            'dataformats', k, template_data):
-                        dataformat_filenames_next.append(k)
-            from ....dataformats.models import DataFormat
-            link_contribution_versions(DataFormat)
-
-            # Reads database root file, if provided
-            db_root = {}
-            if arguments['database_root_file']:
-                db_root.update(load_database_folders(
-                    arguments['database_root_file']))
-
-            for k in list_objects(self.prefix, project, 'databases', '*.json'):
-                if k in db_root: template_data['root_folder'] = db_root[k]
-                upload_dispatcher(self.prefix, project, 'databases', k,
-                        template_data)
-            link_database_versions()
-
-            for k in list_objects(self.prefix, project, 'toolchains',
-                    '*.json'):
-                upload_dispatcher(self.prefix, project, 'toolchains', k,
-                        template_data)
-            from ....toolchains.models import Toolchain
-            link_contribution_versions(Toolchain)
-
-            # Libraries adding requires a special trick as there are
-            # dependencies between different libraries and algorithms. Our
-            # recipe: we use the same technique as for dataformats.
-            library_filenames_next = list_objects(self.prefix, project,
-                    'libraries', '*.json')
-            library_filenames_cur = []
-
-            while True:
-                if not library_filenames_next: break
-                if len(library_filenames_cur) == len(library_filenames_next):
-                    break
-                library_filenames_cur = library_filenames_next
-                library_filenames_next = []
-                for k in library_filenames_cur:
-                    if not upload_dispatcher(self.prefix, project,
-                            'libraries', k, template_data):
-                        library_filenames_next.append(k)
-            from ....libraries.models import Library
-            link_contribution_versions(Library)
-
-            for k in list_objects(self.prefix, project, 'algorithms',
-                    '*.json'):
-                upload_dispatcher(self.prefix, project, 'algorithms', k,
-                        template_data)
-            from ....algorithms.models import Algorithm
-            link_contribution_versions(Algorithm)
-
-            for k in list_objects(self.prefix, project, 'plotters', '*.json'):
-                upload_dispatcher(self.prefix, project, 'plotters', k,
-                        template_data)
-            from ....plotters.models import Plotter
-            link_contribution_versions(Plotter)
-
-            for k in list_objects(self.prefix, project, 'experiments',
-                    '*.json'):
-                upload_dispatcher(self.prefix, project, 'experiments', k,
-                        template_data)
-
-            '''
-            for k in list_objects(self.prefix, project, 'plotter_parameters',
-                    '*.json'):
-                upload_dispatcher(self.prefix, project, 'plotter_parameters',
-                        k, template_data)
-            from ....plotters.models import PlotterParameter
-            link_contribution_versions(PlotterParameter)
-            '''
+            install_contributions(self.prefix, project, template_data,
+                arguments['database_root_file'])
diff --git a/beat/web/utils/management/commands/qsetup.py b/beat/web/utils/management/commands/qsetup.py
deleted file mode 100644
index 911485cf5670c331f37148451c93b6c0c462842d..0000000000000000000000000000000000000000
--- a/beat/web/utils/management/commands/qsetup.py
+++ /dev/null
@@ -1,224 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-
-###############################################################################
-#                                                                             #
-# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
-# Contact: beat.support@idiap.ch                                              #
-#                                                                             #
-# This file is part of the beat.web module of the BEAT platform.              #
-#                                                                             #
-# Commercial License Usage                                                    #
-# Licensees holding valid commercial BEAT licenses may use this file in       #
-# accordance with the terms contained in a written agreement between you      #
-# and Idiap. For further information contact tto@idiap.ch                     #
-#                                                                             #
-# Alternatively, this file may be used under the terms of the GNU Affero      #
-# Public License version 3 as published by the Free Software and appearing    #
-# in the file LICENSE.AGPL included in the packaging of this file.            #
-# The BEAT platform is distributed in the hope that it will be useful, but    #
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
-# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
-#                                                                             #
-# You should have received a copy of the GNU Affero Public License along      #
-# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
-#                                                                             #
-###############################################################################
-
-
-import logging
-logger = logging.getLogger(__name__)
-
-import multiprocessing
-
-import simplejson
-
-from django.conf import settings
-from django.core.management.base import BaseCommand, CommandError
-from django.contrib.auth.models import User, Group
-from rest_framework.authtoken.models import Token
-from guardian.shortcuts import assign_perm
-
-from ....backend.models import Environment, Queue, Worker, QueueWorkerSlot
-from ....backend.models import Shareable
-
-
-DEFAULT_QUEUE_CONFIGURATION = {
-        "default": {
-            "memory-in-megabytes": 4096,
-            "time-limit-in-minutes": 180, #3 hours
-            "nb-cores-per-slot": 1,
-            "max-slots-per-user": multiprocessing.cpu_count(),
-            "environments": [
-                {
-                    "name": "environment",
-                    "version": "1",
-                    }
-                ],
-            "slots": {
-                "node1": multiprocessing.cpu_count(),
-                }
-            }
-        }
-
-
-def setup_environment(queue_config_filename):
-
-    global logger
-
-    if queue_config_filename is None:
-        qconfig = DEFAULT_QUEUE_CONFIGURATION
-    else:
-        with open(queue_config_filename, 'rt') as f:
-            qconfig = simplejson.loads(f.read())
-
-    default_group = Group.objects.get(name='Default')
-
-    for queue_name, queue_attrs in qconfig.items():
-
-        queue = Queue.objects.filter(name=queue_name)
-
-        if not queue:  # create it
-            queue = Queue(
-                name=queue_name,
-                memory_limit=queue_attrs['memory-in-megabytes'],
-                time_limit=queue_attrs['time-limit-in-minutes'],
-                nb_cores_per_slot=queue_attrs['nb-cores-per-slot'],
-                max_slots_per_user=queue_attrs['max-slots-per-user'],
-            )
-            queue.save()
-            logger.info("Created queue `%s'", queue)
-
-        else:
-            assert queue.count() == 1
-            queue = queue[0]
-            logger.info("Re-using existing queue `%s'", queue)
-
-        # Sets up the permissions on the queue
-        assign_perm('can_access', default_group, queue)
-
-        for env_attrs in queue_attrs['environments']:
-
-            env = Environment.objects.filter(name=env_attrs['name'],
-                                             version=env_attrs['version'])
-
-            if not env:  # create it
-                env = Environment(
-                    name=env_attrs['name'],
-                    version=env_attrs['version'],
-                    short_description=env_attrs.get('short_description', ''),
-                    description=env_attrs.get('description', ''),
-                    sharing=Shareable.PUBLIC,
-                )
-                env.save()
-                logger.info("Created environment `%s'", env)
-            else:
-                assert env.count() == 1
-                env = env[0]  # there must be only one anyways
-                env.active = True # activate it
-                env.save()
-                logger.info("Re-using existing environment `%s'", env)
-
-            queue.environments.add(env)
-            queue.save()
-
-        for worker_name, slots in queue_attrs['slots'].items():
-
-            worker = Worker.objects.filter(name=worker_name)
-
-            if not worker:  # create it
-                worker = Worker(name=worker_name, nb_cores=slots)
-                worker.save()
-                logger.info("Created worker `%s'", worker)
-            else:
-                assert worker.count() == 1
-                worker = worker[0]  # there must be only one anyways
-                logger.info("Re-using existing worker `%s'", worker)
-
-            # associate worker - queue - number of slots
-            worker_link = QueueWorkerSlot.objects.filter(
-                queue=queue,
-                worker=worker,
-                nb_slots=slots,
-            )
-
-            if not worker_link:  # create it
-                worker_link = QueueWorkerSlot(
-                    queue=queue,
-                    worker=worker,
-                    nb_slots=slots,
-                )
-                worker_link.save()
-                logger.info("Created queue-worker slot `%s'", worker_link)
-            else:
-                assert worker_link.count() == 1
-
-    # returns one queue and one environment to use as default
-    return Queue.objects.all()[0], Environment.objects.all()[0]
-
-
-class Command(BaseCommand):
-
-    help = 'Sets and resets queue configurations'
-
-
-    def add_arguments(self, parser):
-
-        parser.add_argument('--reset', action='store_true', dest='reset',
-                default=False, help='Delete all environment/worker/queues ' \
-                        'before setting the given configuration')
-
-        parser.add_argument('qconfig', type=str, nargs='?',
-                help='Optional custom queue configuration to use. If not ' \
-                        'passed, uses an internal default with a single ' \
-                        'queue/worker/%d slots' % multiprocessing.cpu_count())
-
-
-    def handle(self, *ignored, **arguments):
-
-        # Setup this command's logging level
-        global logger
-        arguments['verbosity'] = int(arguments['verbosity'])
-        if arguments['verbosity'] >= 1:
-            if arguments['verbosity'] == 1: logger.setLevel(logging.INFO)
-            elif arguments['verbosity'] >= 2: logger.setLevel(logging.DEBUG)
-
-        if arguments['reset']:
-
-            # make all old environments inactive
-            q = Environment.objects.all()
-            updates = q.update(active=False)
-            for obj in q: obj.save()
-            logger.info("De-activated %d environments", updates)
-
-            # cleans-up queues and workers before installing new config
-            q = Queue.objects.all()
-            q.delete()
-            logger.info("Erased %d queues", q.count())
-
-            q = Worker.objects.all()
-            q.delete()
-            logger.info("Erased %d workers", q.count())
-
-            assert QueueWorkerSlot.objects.count() == 0, \
-                    "There is still %d queue-worker relations undeleted" % \
-                    QueueWorkerSlot.objects.count()
-
-            # sets the tokens for the scheduler user to '3' (default localhost)
-            scheduler = User.objects.filter(username=settings.SCHEDULER_ACCOUNT)
-            usetoken = '3'
-            if scheduler:
-                scheduler = scheduler[0]
-                try:
-                    scheduler.auth_token.delete()
-                except Exception:
-                    pass
-                token = Token(user=scheduler)
-                token.key = usetoken
-                token.save()
-                logger.info("Reset `%s' token to `%s'", scheduler.username,
-                        usetoken)
-            else:
-                raise CommandError("Could not find an account named `%s' on this database" % settings.SCHEDULER_ACCOUNT)
-
-        setup_environment(arguments['qconfig'])
diff --git a/beat/web/utils/scheduler.py b/beat/web/utils/scheduler.py
deleted file mode 100644
index cdff2df66a811ef938155b13b382610367d79cb5..0000000000000000000000000000000000000000
--- a/beat/web/utils/scheduler.py
+++ /dev/null
@@ -1,153 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-
-###############################################################################
-#                                                                             #
-# Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/           #
-# Contact: beat.support@idiap.ch                                              #
-#                                                                             #
-# This file is part of the beat.web module of the BEAT platform.              #
-#                                                                             #
-# Commercial License Usage                                                    #
-# Licensees holding valid commercial BEAT licenses may use this file in       #
-# accordance with the terms contained in a written agreement between you      #
-# and Idiap. For further information contact tto@idiap.ch                     #
-#                                                                             #
-# Alternatively, this file may be used under the terms of the GNU Affero      #
-# Public License version 3 as published by the Free Software and appearing    #
-# in the file LICENSE.AGPL included in the packaging of this file.            #
-# The BEAT platform is distributed in the hope that it will be useful, but    #
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  #
-# or FITNESS FOR A PARTICULAR PURPOSE.                                        #
-#                                                                             #
-# You should have received a copy of the GNU Affero Public License along      #
-# with the BEAT platform. If not, see http://www.gnu.org/licenses/.           #
-#                                                                             #
-###############################################################################
-
-from django.conf import settings
-import httplib
-import logging
-import traceback
-import urlparse
-import urllib
-import simplejson as json
-
-
-logger = logging.getLogger(__name__)
-
-
-#----------------------------------------------------------
-
-
-def _connect(url, port):
-    parsed_url = urlparse.urlparse(url)
-
-    host_and_port = parsed_url.netloc.split(':')
-
-    if port is None:
-        if len(host_and_port) == 2:
-            host, port = host_and_port
-        else:
-            host = host_and_port
-            if parsed_url.scheme == 'https':
-                port = 443
-            else:
-                port = 80
-    else:
-        host = host_and_port[0]
-
-    if parsed_url.scheme == 'https':
-        connection = httplib.HTTPSConnection(host, port)
-    else:
-        connection = httplib.HTTPConnection(host, port)
-
-    try:
-        connection.connect()
-    except:
-        logger.error('Failed to establish a connection with the scheduler API, reason: %s' % traceback.format_exc())
-        return None
-
-    return connection
-
-
-#----------------------------------------------------------
-
-
-def _combine_url_parameters(url, params):
-    """Combines the given URL with the parameter dictionary"""
-
-    parsed_url = urlparse.urlparse(url)
-
-    qs = []
-
-    if parsed_url.query:
-        qs.extend([parsed_url.query, '&'])
-
-    qs.append(urllib.urlencode(params, doseq=True))
-
-    return urlparse.urlunparse((
-        parsed_url[0],
-        parsed_url[1],
-        parsed_url[2],
-        parsed_url[3],
-        ''.join(qs),
-        parsed_url[5]
-    ))
-
-
-#----------------------------------------------------------
-
-
-def _sendMessage(url, params=None, data=None, msg_type='PUT'):
-    # Establish the connection
-    connection = _connect(settings.SCHEDULER_ADDRESS, settings.SCHEDULER_PORT)
-    if connection is None:
-        return None
-
-    # Send the request
-    headers = {}
-
-    if data is not None:
-        data = json.dumps(data)
-        headers['Content-Type'] = 'application/json'
-    else:
-        headers['Content-Length'] = 0
-
-    try:
-        if params:
-            full_url = _combine_url_parameters(url, params)
-        else:
-            full_url = url
-
-        connection.request(msg_type, full_url, data, headers)
-        response = connection.getresponse()
-        data = response.read()
-    except:
-        logger.error('Failed to use the Web API, reason: %s' % traceback.format_exc())
-        return None
-    finally:
-        connection.close()
-
-    return (response.status, data)
-
-
-#----------------------------------------------------------
-
-
-def putMessage(url, params=None, data=None):
-    return _sendMessage(url, params=params, data=data, msg_type='PUT')
-
-
-#----------------------------------------------------------
-
-
-def postMessage(url, params=None, data=None):
-    return _sendMessage(url, params=params, data=data, msg_type='POST')
-
-
-#----------------------------------------------------------
-
-
-def getMessage(url, params=None, data=None):
-    return _sendMessage(url, params=params, data=data, msg_type='GET')
diff --git a/buildout.cfg b/buildout.cfg
index baf669f3713516896e5cf48d3f96634322e1973d..d1f05a47e27e2c8f6dc04b9d2d205fbdc4d3e275 100644
--- a/buildout.cfg
+++ b/buildout.cfg
@@ -6,8 +6,6 @@ eggs = beat.core
        beat.cmdline
        beat.backend.python
        beat.web
-       beat.scheduler
-       mistune
        ipdb
        coverage
        pip
@@ -16,95 +14,80 @@ develop = .
 versions = versions
 
 [versions]
-django = >=1.8,<1.9
+django = >=1.8
 django-rest-swagger = >=0.3.2
 django-guardian = >=1.3
-django-nose = >=1.4.1
 djangorestframework = >=3.2
+django-activity-stream = >= 0.6.0
 
 [sysegg]
 recipe = syseggrecipe
 ;force-sysegg = true
 eggs = alabaster
        babel
-       cffi
-       characteristic
+       backports.shutil-get-terminal-size
        colorlog
        coverage
-       cryptography
        cycler
        decorator
        django
        django-activity-stream
-       django_guardian
-       django-jsonfield
-       django_nose
+       django-guardian
        djangorestframework
        django-rest-swagger
+       django-jsonfield
        docopt
        docutils
-       enum34
        funcsigs
        functools32
+       gevent
        graphviz
-       idna
-       ipaddress
+       greenlet
        ipdb
        ipython
-       jinja2
+       ipython-genutils
+       imagesize
+       Jinja2
        jsonschema
-       markupsafe
+       MarkupSafe
        matplotlib
-       mistune
-       mysqlclient
        mock
-       nose
        numpy
        oset
-       path.py
+       pathlib2
        pbr
        pexpect
-       Pillow
        pickleshare
        pip
-       pockets
        psutil
-       psycopg2
        ptyprocess
-       pyasn1
-       pyasn1_modules
-       pycparser
-       pygments
-       pyopenssl
+       psycopg2
+       Pygments
        pyparsing
-       python_dateutil
+       python-archive
+       python-dateutil
        pytz
        PyYAML
        pyzmq
-       service_identity
-       simplejson
        simplegeneric
+       simplejson
        six
        snowballstemmer
-       sphinx
+       Sphinx
        sphinxcontrib-ansi
-       sphinxcontrib_mscgen
        sphinxcontrib-httpdomain
+       sphinxcontrib-mscgen
        sphinxcontrib-programoutput
-       sphinx_numfig
        sphinx-rtd-theme
-       sqlalchemy
-       subprocess32
        termcolor
        traitlets
-       twisted
-       zope.interface
+       uwsgi
+       wsgiref
 
 [sources]
 beat.core = git git@gitlab.idiap.ch:beat/beat.core
 beat.cmdline = git git@gitlab.idiap.ch:beat/beat.cmdline
 beat.backend.python = git git@gitlab.idiap.ch:beat/beat.backend.python
-beat.scheduler = git git@gitlab.idiap.ch:beat/beat.scheduler
 beat.examples = git git@gitlab.idiap.ch:beat/beat.examples egg=false
 cpulimit = git https://github.com/opsengine/cpulimit rev=v0.2 egg=false
 
diff --git a/doc/admin/applications.rst b/doc/admin/applications.rst
index 4ac8f49a17f1741842162a6a50d73427b7f66403..2e7dc8ee4cf36cf3d98a78f4b842f43679964d3e 100644
--- a/doc/admin/applications.rst
+++ b/doc/admin/applications.rst
@@ -23,20 +23,26 @@
 
 .. _hardwarerequirements-applications:
 
-Organization in Applications
-============================
+Software Organization
+=====================
 
-The BEAT platform is organized into a set of applications (or packages) as
-roughly indicated in :num:`Figure #administratorguide-architecture-software`.
-This is highly beneficial for the construction of a  distributed platform,
-since specific processing nodes typically require a different software stacks.
-Splitting the software project into different applications allows, hence,
-administrators to only install the required functionalities as well as the
-corresponding limited number of dependencies on each server/node. This
-increases security and provides a simplified operational framework to
-administrators.
+BEAT is currently packaged as 5 separate projects. We describe them on this
+section.
 
-BEAT is currently packaged in 5 separate projects. We describe them next.
+
+beat.backend.python
+-------------------
+
+``beat.backend.python`` defines the minimal set of components that are required
+by **user code** to be able to run and communicate with the BEAT platform
+backend. These are:
+
+* **Data formats**: the specification of data which is transmitted between
+  blocks of a toolchain;
+* **Libraries**: routines (source-code or binaries) that can be incorporated
+  into other libraries or user code on algorithms;
+* **Algorithms**: the program (source-code or binaries) that define the user
+  algorithm to be run within the blocks of a toolchain;
 
 
 beat.core
@@ -44,137 +50,66 @@ beat.core
 
 ``beat.core`` defines a set of core components useful for the whole platform:
 the building blocks used by all other packages in the BEAT software suite (c.f.
-the BEAT public API). These are:
+the BEAT public API). ``beat.core`` extends the models defined by
+``beat.backend.python``, completing full experiment description:
 
-* **Toolchain**: the definition of the data flow in an experiment, as a set of interconnected blocks;
-* **Data formats**: the specification of data which is transmitted between blocks of a toolchain;
-* **Libraries**: routines (source-code or binaries) that can be incorporated into other libraries or user code on algorithms;
-* **Algorithms**: the program (source-code or binaries) that define the user algorithm to be run within the blocks of a toolchain;
-* **Databases** and **Datasets**: means to read raw-data from a disk and feed into a toolchain, respecting a certain usage protocol;
-* **Experiment**: the reunion of algorithms, datasets, a toolchain and parameters that allow the platform to schedule and run the prescribed recipe to produce displayable results.
+* **Toolchain**: the definition of the data flow in an experiment, as a set of
+  interconnected blocks;
+* **Databases** and **Datasets**: means to read raw-data from a disk and feed
+  into a toolchain, respecting a certain usage protocol;
+* **Experiment**: the reunion of algorithms, datasets, a toolchain and
+  parameters that allow the platform to schedule and run the prescribed recipe
+  to produce displayable results.
 
-All other BEAT packages depend directly or indirectly on ``beat.core``.
+All other BEAT packages depend directly on ``beat.core``.
 
 
 beat.web
 --------
 
-``beat.web`` defines the web component of the platform. In particular, it
-provides a Representational State Transfer (ReSTful) API for some of the
-services that are provided by the platform, together with a web-based user
-interface in which users can register and interact with experiments and
-generated results. The web server is written using in Python using a
-well-established library for such a purpose called `Django`_, while the
-powerful Javascript `JQuery`_ library is used for the client-side user
-interface. A database backend is deployed to store all the information about
-the users, toolchains, data formats and all other core objects. `Django`_
-supports several database backends, both serverless such as `SQLite`_ and
-dedicated running servers such as `MySQL`_ or `PostgreSQL`_.
-
-``beat.web`` also ensures privacy and confidentiality, allowing users to
-access resources, prepare new experiments and control the amount of
-information that will be shared with other users of the platform.
-
-It communicates with a job scheduler to execute and to manage user
-experiments.
+``beat.web`` defines the application layer containing an implementation of the
+web, scheduler and worker components of the platform. The web application, in
+particular, provides a Representational State Transfer (ReSTful) API for some
+of the services that are provided by the platform, together with a web-based
+user interface in which users can register and interact with experiments and
+generated results. The web server is written in Python using a well-established
+library for such a purpose called `Django`_, while powerful Javascript
+libraries are used for the dynamic behaviour of the client-side user interface.
+A database backend is deployed to store all the information about the users,
+experiments, their state and all other core objects. `Django`_ supports several
+database backends, both serverless such as `SQLite`_ and dedicated running
+servers such as `PostgreSQL`_.
+
+``beat.web`` also ensures privacy and confidentiality, allowing users to access
+resources, prepare new experiments and control the amount of information that
+will be shared with other users of the platform.
+
+The web application communicates with the scheduler, via the shared database,
+to execute and to manage user experiments.
+
+
+beat.examples
+-------------
+
+This package contains installable examples for all BEAT platform components and
+can be used to bootstrap a new platform for development and testing purposes.
+This package cannot be used by itself. It is automatically imported and used
+from ``beat.web``.
+
+
+beat.cmdline
+------------
+
+This package contains a command-line client that uses the BEAT web ReSTful API
+to copy and synchronize local versions of objects installed in a given
+platform. It can be used to copy full experiments locally and to **run**
+experiments using the local machine for debugging purposes. This package
+provides its own documentation set with instructions and examples show how to
+use it.
+
+
+.. Place your references here
 
 .. _django: http://www.djangoproject.com
-.. _jquery: http://jquery.com
 .. _sqlite: http://www.sqlite.org
-.. _mysql: http://www.mysql.com
 .. _postgresql: http://www.postgresql.org
-
-
-beat.scheduler
---------------
-
-``beat.scheduler`` defines the scheduling infrastructure for the platform.  The
-goal of the scheduler is to split the experiments defined by the toolchains
-into small jobs (one or a few algorithms processing some data) and to dispatch
-them on the available processing nodes (c.f. the BEAT public API).
-
-Furthermore, ``beat.scheduler`` implements both the **scheduler**, the central
-application in charge of distributing the execution of experiments on several
-processing nodes, as well as a **worker** application that is in charge of
-executing a given job in independent processing nodes. This setup is depicted
-in :num:`Figure #administratorguide-running-an-experiment`. The process is
-initiated by the user through the BEAT web server, either using the graphical
-user interface or via one of its ReSTful APIs. If the experiment has not yet
-been run (i.e., it is not cached), the scheduler receives a signal indicating
-it should run it. The scheduler application then breaks-down the experiment
-into individual executables that are scheduled to run, in order, following a
-precise scheduling order. Until all jobs of an experiment are processed or an
-error condition is met, the scheduler continues to schedule job processing in
-order to conclude the experiment. Once the experiment is concluded, the fact is
-signaled back to the web service so it can read the data from disk and display
-results to the user.
-
-.. _administratorguide-running-an-experiment:
-.. figure:: img/running-an-experiment.*
-   :width: 60%
-
-   Actions that take place on the backend until an experiment is run for the
-   user and the results are displayed.
-
-
-The scheduler waits for instructions for ``beat.web`` using a network socket
-and update later the content of a Database with the appropriate results. Note
-that this is an asynchronous process: since the execution of the instructions
-can take some time, the website does not wait for the results to be available.
-
-In particular, `Twisted`_ is employed to handle communications between the
-scheduler and the workers, as well as between the scheduler and the web
-component.
-
-.. _twisted: http://twistedmatrix.com
-
-
-Executable Environment
-----------------------
-
-To achieve reproducibility of scientific experiments and improve flexibility,
-each scheduled job, defined by user parameters, inputs and outputs and a
-particular algorithm represented by a single-file program, is executed as a
-separate operating system (OS) process on the **worker** machine, using a
-particular software environment. A software environment is defined by a set of
-libraries, available for the user algorithm, wrapped in an executable program
-for a particular OS that is called with the settings defined by the user and
-passed to the **worker** via the **scheduler**.
-
-The user is the authoritative party that decides what environment to use for
-each algorithm in an experiment. This provides flexibility so that a single
-experiment can have each of its processing blocks executed using a different
-combination environments and OSes, matching implementation requirements as
-needed. For example, in a given BEAT platform instance equipped for such a
-purpose, feature extraction in a given experiment may be executed using an
-executable binary implementation while analysis algorithms may use the Python
-backend.
-
-:num:`Figure #administratorguide-executable-environment` depicts this design,
-as well as it gives some insight on how processes are executed inside the
-platform backend. When the **worker** receives a job running request, it
-bootstraps three processes which will ultimately lead to new data being written
-to disk for that particular user algorithm. The first process is a resource
-controller. It ensures no more than the allowed CPU and memory limits are used
-by the user process. This is both a safety and a fair-usage feature, avoiding
-user processes to occupy the entirety of the processing farm for an
-undetermined amount of time. It makes the scheduling work more deterministic.
-The second process is an I/O controller. Its task is solely to read and write
-disk data, providing a both secure and simple I/O interface to the user
-algorithm. This strategy administrators prohibit direct disk access to user
-processes while simplifying data production and consumption inside the software
-environment, therefore making it easier to write such wrappers. Finally, the
-process hosting the user (algorithm) code is started. The user code is
-sand-boxed inside the software environment and can access any of its libraries
-while also being able to indirectly communicate with the I/O controller.
-
-Any number of software environments can co-exist in the same machine or in
-different machines, allowing the platform **scheduler** to manage a hybrid set
-of resources transparently.
-
-.. _administratorguide-executable-environment:
-.. figure:: img/executable-environment.*
-   :width: 60%
-
-   The part of ``beat.scheduler`` that executes user algorithms through the
-   **worker** in details.
diff --git a/doc/admin/architecture.rst b/doc/admin/architecture.rst
index 3a0c74f933238cb1571c71291cc38f230176c36f..e089ad50d617e7fa5fe3b4d2b395162dc84c1f9e 100644
--- a/doc/admin/architecture.rst
+++ b/doc/admin/architecture.rst
@@ -32,43 +32,45 @@ necessary.
 
 .. _administratorguide-architecture-software:
 .. figure:: img/platform-overview.*
-   :width: 60%
+   :width: 40%
+   :align: center
 
    Software architecture of the platform
 
 
-:num:`Figure #administratorguide-architecture-software` represents the
-software architecture of the platform, where the interaction between the
-following modules are shown:
+:numref:`administratorguide-architecture-software` represents the software
+architecture of the platform, where the interaction between the following
+modules are shown:
 
 * The **Web Server**, allows the users to interact, online, with the platform.
 * The **Experiments**, describes fully parametrized scientific workflows as a
   set of organized transformations, from the use of raw data from databases
   such as images, to the generation of results such as ROC plots. Each
   experiment can, hence, be decomposed into a set of execution jobs.
-* The **Scheduler**, distributes jobs on the **Worker Nodes**, also called
+* The **Scheduler**, assigns jobs on the **Worker Nodes**, also called
   processing nodes.
-* The **Cache**, stores all the data written by the experiments
-* The **Object Repository**, contains all the objects required to define
-  scientific experiments, such as algorithm implementations and experiment
-  parameters.
+* The **Experiment, State and Cache Database**, contains all the objects
+  required to define scientific experiments, such as algorithm implementations
+  and experiment parameters, as well the current load state of the processing
+  farm together with intermediary data produced by the experiments (cache). The
+  backend communicates to the frontend via this database.
 
 .. _administratorguide-architecture-hardware:
 .. figure:: img/hardware-architecture.*
    :width: 50%
+   :align: center
 
    Hardware architecture of the platform
 
-:num:`Figure #administratorguide-architecture-hardware` represents the
-matching hardware architecture of the platform. Each component in this figure
-could be deployed on a different computer, as long as it can establish a
-connection with all the other components it needs to interact with. In this
-case, this would make possible to distribute the load on several machines.
-However, it should be equally possible to accommodate all software components
-into a single (multi-core) computer for tests or demonstrations. To benefit
-from commodity computing, plain Intel-compatible PC's either in desktop or
-rack-mountable format are recommended for deployment. Network connectivity,
-if necessary, is assumed IP (Internet Protocol) based.
+:numref:`administratorguide-architecture-hardware` represents the matching
+hardware architecture of the platform. Each component in this figure could be
+deployed on a different computer, as long as it can establish to the central
+database server and storage. In this case, this would make possible to
+distribute the load on several machines. It should be equally possible to
+accommodate all software components into a single (multi-core) computer for
+tests or demonstrations. To benefit from commodity computing, plain
+Intel-compatible PC's either in desktop or rack-mountable format are
+recommended for deployment.
 
 
 Wedding List
diff --git a/doc/admin/backend.rst b/doc/admin/backend.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0d7eeb018a90b00def86fb86ebd30a686d5a2c69
--- /dev/null
+++ b/doc/admin/backend.rst
@@ -0,0 +1,69 @@
+.. vim: set fileencoding=utf-8 :
+
+.. Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/          ..
+.. Contact: beat.support@idiap.ch                                             ..
+..                                                                            ..
+.. This file is part of the beat.web module of the BEAT platform.             ..
+..                                                                            ..
+.. Commercial License Usage                                                   ..
+.. Licensees holding valid commercial BEAT licenses may use this file in      ..
+.. accordance with the terms contained in a written agreement between you     ..
+.. and Idiap. For further information contact tto@idiap.ch                    ..
+..                                                                            ..
+.. Alternatively, this file may be used under the terms of the GNU Affero     ..
+.. Public License version 3 as published by the Free Software and appearing   ..
+.. in the file LICENSE.AGPL included in the packaging of this file.           ..
+.. The BEAT platform is distributed in the hope that it will be useful, but   ..
+.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ..
+.. or FITNESS FOR A PARTICULAR PURPOSE.                                       ..
+..                                                                            ..
+.. You should have received a copy of the GNU Affero Public License along     ..
+.. with the BEAT platform. If not, see http://www.gnu.org/licenses/.          ..
+
+
+.. _administratorguide-backend_maintenance:
+
+Backend Maintenance
+===================
+
+This guide contains information about administrative tasks related to the
+backend. Activities include cache clean-up actions and backend (workers,
+environments, queues) reconfiguration.
+
+
+Configuration
+-------------
+
+The administrative command ``qsetup`` can be used to configure or re-configure
+the backend:
+
+.. command-output:: ./bin/django qsetup --help
+   :cwd: ../..
+
+
+This command will check current queue usage w.r.t. submitted experiments and
+will modify the queue/worker/environment configuration on-the-fly. Running
+schedulers and workers will not be affected.
+
+By default, all new workers inserted are made inactive. This flag should be
+turned-on by the worker process itself once it is started.
+
+
+Cache Maintenance
+-----------------
+
+If your cache becomes very large, you may have to delete old files. The
+administative command ``cleanup_cache`` can help you in that task:
+
+.. command-output:: ./bin/django cleanup_cache --help
+   :cwd: ../..
+
+
+This command will first check running experiments and database files and make
+sure not to remove any files currently used by those are removed. You may then
+specify the amount of time in minutes from the current time, cache files should
+be considered outdated and must be removed.
+
+Notice the command, by default, does not remove any files - it only lists the
+files it would remove. In order to actually remove the files, you must also
+specify the option ``--delete``.
diff --git a/doc/admin/conf.py b/doc/admin/conf.py
index ee02b36fa00e3d7e8ab6e69e511bfb965e6145c6..a8e41fb0948366ab3085a07235d7b6cab0806a1a 100644
--- a/doc/admin/conf.py
+++ b/doc/admin/conf.py
@@ -33,30 +33,36 @@ import pkg_resources
 # -- General configuration -----------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+needs_sphinx = '1.3'
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 extensions = [
     'sphinx.ext.todo',
     'sphinx.ext.coverage',
-    'sphinx.ext.pngmath',
     'sphinx.ext.ifconfig',
     'sphinx.ext.autodoc',
     'sphinx.ext.autosummary',
     'sphinx.ext.doctest',
     'sphinx.ext.intersphinx',
-    'sphinx_numfig',
-]
+    'sphinx.ext.napoleon',
+    'sphinx.ext.viewcode',
+    'sphinxcontrib.ansi',
+    'sphinxcontrib.programoutput',
+    ]
 
-# The viewcode extension appeared only on Sphinx >= 1.0.0
 import sphinx
-if sphinx.__version__ >= "1.0":
-    extensions.append('sphinx.ext.viewcode')
+if sphinx.__version__ >= "1.4.1":
+    extensions.append('sphinx.ext.imgmath')
+else:
+    extensions.append('sphinx.ext.pngmath')
 
 # Always includes todos
 todo_include_todos = True
 
+# Create numbers on figures with captions
+numfig = True
+
 # If we are on OSX, the 'dvipng' path maybe different
 dvipng_osx = '/opt/local/libexec/texlive/binaries/dvipng'
 if os.path.exists(dvipng_osx): pngmath_dvipng = dvipng_osx
diff --git a/doc/admin/deployment_guidelines.rst b/doc/admin/deployment_guidelines.rst
index 59577a3721133ee49a1215409cceaf703cc00f82..841e8f8eb9872e1c827abefedb52b18761246cc1 100644
--- a/doc/admin/deployment_guidelines.rst
+++ b/doc/admin/deployment_guidelines.rst
@@ -59,9 +59,9 @@ Distributed processing nodes
    Distributed processing nodes
 
 
-In this scenario, depicted in :num:`Figure
-#administratorguide-usecaseanalysis-distributed-nodes`, every processing node
-is running on a dedicated machine.
+In this scenario, depicted in
+:numref:`administratorguide-usecaseanalysis-distributed-nodes`, every
+processing node is running on a dedicated machine.
 
 One computer acts as the web front-end, handling all the incoming requests from
 the users, scheduling the jobs on the processing nodes and sending the results
@@ -82,8 +82,8 @@ Load-balanced distributed architecture
 
    Load-balanced distributed architecture
 
-In this scenario, depicted in :num:`Figure
-#administratorguide-usecaseanalysis-load-balancing`, every module (web
+In this scenario, depicted in
+:numref:`administratorguide-usecaseanalysis-load-balancing`, every module (web
 server, database server, scheduler, processing node) is running on a dedicated
 machine.
 
diff --git a/doc/admin/idiap_platform.rst b/doc/admin/idiap_platform.rst
index 4e2cc11de3e6ce1ad7dffeba9ffb9fc1bcbafa61..75e0326f56e3a77ff7ca41e79090b2e9bf082dc5 100644
--- a/doc/admin/idiap_platform.rst
+++ b/doc/admin/idiap_platform.rst
@@ -91,10 +91,10 @@ Summary
 
    Physical hardware of the platform deployed at Idiap
 
-The resulting hardware infrastructure is summarized in :num:`Figure
-#administratorguide-idiapplatform-hardware-physical`. Communication
-between each machine and the storage is through a 10Gbits/s switch
-HP Procurve E8212zl.
+The resulting hardware infrastructure is summarized in
+:numref:`administratorguide-idiapplatform-hardware-physical`. Communication
+between each machine and the storage is through a 10Gbits/s switch HP Procurve
+E8212zl.
 
 
 .. _administratorguide-idiap_platform-virtualization:
diff --git a/doc/admin/img/executable-environment.dia b/doc/admin/img/executable-environment.dia
deleted file mode 100644
index 9d5a65619461f698df6fd76eb6539b819e7be7fa..0000000000000000000000000000000000000000
Binary files a/doc/admin/img/executable-environment.dia and /dev/null differ
diff --git a/doc/admin/img/executable-environment.pdf b/doc/admin/img/executable-environment.pdf
deleted file mode 100644
index f8b7f65d7b8e97cc27830dfbe0bd32b59fef8162..0000000000000000000000000000000000000000
Binary files a/doc/admin/img/executable-environment.pdf and /dev/null differ
diff --git a/doc/admin/img/executable-environment.png b/doc/admin/img/executable-environment.png
deleted file mode 100644
index 44f1cfe00c24ffe314489ddd2c57a8c82d5dcfa1..0000000000000000000000000000000000000000
Binary files a/doc/admin/img/executable-environment.png and /dev/null differ
diff --git a/doc/admin/img/hardware-architecture.pdf b/doc/admin/img/hardware-architecture.pdf
index 3000508e91eb2ced71e167c1d370d98dfe0c4eb8..9a2350a75192f6f46b140ef5188cf13bf44d0fdc 100644
Binary files a/doc/admin/img/hardware-architecture.pdf and b/doc/admin/img/hardware-architecture.pdf differ
diff --git a/doc/admin/img/hardware-architecture.png b/doc/admin/img/hardware-architecture.png
new file mode 100644
index 0000000000000000000000000000000000000000..7eb48f5611995c9a74d6e92640ef3d40f66ecd0f
Binary files /dev/null and b/doc/admin/img/hardware-architecture.png differ
diff --git a/doc/admin/img/hardware-architecture.svg b/doc/admin/img/hardware-architecture.svg
index 8de83a6e1edc818cc307d7965a3dcdd3d2ec06eb..bf158a5b94bea6c891b3b6bebaed0fdf2a8d5210 100644
--- a/doc/admin/img/hardware-architecture.svg
+++ b/doc/admin/img/hardware-architecture.svg
@@ -11,15 +11,66 @@
    xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
    id="svg2"
    version="1.1"
-   inkscape:version="0.48.5 r10040"
-   width="494.82703"
-   height="624.0625"
+   inkscape:version="0.91 r13725"
+   width="410.06097"
+   height="559.0014"
    xml:space="preserve"
-   sodipodi:docname="hardware-architecture.svg"><metadata
+   sodipodi:docname="hardware-architecture.svg"
+   inkscape:export-filename="/Users/andre/work/beat/beat.web/doc/admin/img/hardware-architecture.png"
+   inkscape:export-xdpi="233.76707"
+   inkscape:export-ydpi="233.76707"><metadata
      id="metadata8"><rdf:RDF><cc:Work
          rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
            rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
      id="defs6"><marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         id="path4690"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6,-0.6)"
+         inkscape:connector-curvature="0" /></marker><marker
+       inkscape:stockid="Arrow2Mstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mstart"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         id="path4687"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(0.6,0.6)"
+         inkscape:connector-curvature="0" /></marker><marker
+       inkscape:stockid="Arrow1Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker12645"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path12647"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         transform="matrix(-0.8,0,0,-0.8,-10,0)" /></marker><marker
+       inkscape:stockid="Arrow1Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker12533"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         id="path12535"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         transform="matrix(0.8,0,0,0.8,10,0)"
+         inkscape:connector-curvature="0" /></marker><marker
        inkscape:stockid="Arrow1Lend"
        orient="auto"
        refY="0"
@@ -27,7 +78,7 @@
        id="Arrow1Lend"
        style="overflow:visible"><path
          id="path4653"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
          style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
          transform="matrix(-0.8,0,0,-0.8,-10,0)"
          inkscape:connector-curvature="0" /></marker><marker
@@ -44,7 +95,7 @@
          inkscape:connector-curvature="0" /></marker><clipPath
        clipPathUnits="userSpaceOnUse"
        id="clipPath18"><path
-         d="m 0,0 841.8,0 0,595 L 0,595 0,0 z"
+         d="m 0,0 841.8,0 0,595 L 0,595 0,0 Z"
          id="path20"
          inkscape:connector-curvature="0" /></clipPath><marker
        inkscape:stockid="Arrow1Lend"
@@ -54,10 +105,279 @@
        id="Arrow1Lend-3"
        style="overflow:visible"><path
          id="path4653-1"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
          style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
          transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         inkscape:connector-curvature="0" /></marker></defs><sodipodi:namedview
+         inkscape:connector-curvature="0" /></marker><marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="marker4951"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="Arrow1Lend"><path
+         inkscape:connector-curvature="0"
+         transform="matrix(-0.8,0,0,-0.8,-10,0)"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         id="path4666" /></marker><marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="Arrow1Lstart"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="Arrow1Lstart"
+       inkscape:collect="always"><path
+         inkscape:connector-curvature="0"
+         transform="matrix(0.8,0,0,0.8,10,0)"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         id="path4663" /></marker><marker
+       style="overflow:visible"
+       id="Arrow1Lend-2"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="Arrow1Lend"><path
+         inkscape:connector-curvature="0"
+         transform="matrix(-0.8,0,0,-0.8,-10,0)"
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         id="path4653-5" /></marker><marker
+       style="overflow:visible"
+       id="Arrow2Send-3"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="Arrow2Send"><path
+         inkscape:connector-curvature="0"
+         transform="matrix(-0.3,0,0,-0.3,0.69,0)"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         id="path4683-4" /></marker><clipPath
+       id="clipPath18-4"
+       clipPathUnits="userSpaceOnUse"><path
+         inkscape:connector-curvature="0"
+         id="path20-6"
+         d="m 0,0 841.8,0 0,595 L 0,595 0,0 Z" /></clipPath><marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="Arrow1Lstart-8"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="Arrow1Lstart"><path
+         transform="matrix(0.8,0,0,0.8,10,0)"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         id="path4663-3"
+         inkscape:connector-curvature="0" /></marker><marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="marker4951-8"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="Arrow1Lend"
+       inkscape:collect="always"><path
+         transform="matrix(-0.8,0,0,-0.8,-10,0)"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         id="path4666-8"
+         inkscape:connector-curvature="0" /></marker><marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="Arrow1Lstart-8-8"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="Arrow1Lstart"><path
+         transform="matrix(0.8,0,0,0.8,10,0)"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         id="path4663-3-0"
+         inkscape:connector-curvature="0" /></marker><marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="marker4951-8-9"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="Arrow1Lend"><path
+         transform="matrix(-0.8,0,0,-0.8,-10,0)"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         id="path4666-8-5"
+         inkscape:connector-curvature="0" /></marker><marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="Arrow1Lstart-4"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="Arrow1Lstart"><path
+         transform="matrix(0.8,0,0,0.8,10,0)"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         id="path4663-36"
+         inkscape:connector-curvature="0" /></marker><marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="marker4951-9"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="Arrow1Lend"><path
+         transform="matrix(-0.8,0,0,-0.8,-10,0)"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         id="path4666-3"
+         inkscape:connector-curvature="0" /></marker><marker
+       inkscape:stockid="Arrow2Mstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mstart-2"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4687-6"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(0.6,0.6)" /></marker><marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend-6"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4690-2"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6,-0.6)" /></marker><marker
+       inkscape:stockid="Arrow2Mstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mstart-9"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4687-9"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(0.6,0.6)" /></marker><marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend-8"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4690-7"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6,-0.6)" /></marker><marker
+       inkscape:stockid="Arrow2Mstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mstart-9-7"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4687-9-9"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(0.6,0.6)" /></marker><marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend-8-9"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4690-7-4"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6,-0.6)" /></marker><marker
+       inkscape:stockid="Arrow2Mstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mstart-9-7-5"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4687-9-9-0"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(0.6,0.6)" /></marker><marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend-8-9-6"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4690-7-4-1"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6,-0.6)" /></marker><marker
+       inkscape:stockid="Arrow2Mstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mstart-9-7-9"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4687-9-9-6"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(0.6,0.6)" /></marker><marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend-8-9-5"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4690-7-4-0"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6,-0.6)" /></marker><marker
+       inkscape:stockid="Arrow2Mstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mstart-9-7-6"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4687-9-9-5"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(0.6,0.6)" /></marker><marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Mend-8-9-0"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4690-7-4-17"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="scale(-0.6,-0.6)" /></marker></defs><sodipodi:namedview
      pagecolor="#ffffff"
      bordercolor="#666666"
      borderopacity="1"
@@ -66,16 +386,16 @@
      guidetolerance="10"
      inkscape:pageopacity="0"
      inkscape:pageshadow="2"
-     inkscape:window-width="1920"
-     inkscape:window-height="1033"
+     inkscape:window-width="1421"
+     inkscape:window-height="918"
      id="namedview4"
      showgrid="false"
      inkscape:zoom="0.97953702"
-     inkscape:cx="-195.82046"
-     inkscape:cy="365.22841"
-     inkscape:window-x="0"
-     inkscape:window-y="0"
-     inkscape:window-maximized="1"
+     inkscape:cx="89.419573"
+     inkscape:cy="226.30194"
+     inkscape:window-x="21"
+     inkscape:window-y="1"
+     inkscape:window-maximized="0"
      inkscape:current-layer="g10"
      fit-margin-top="0"
      fit-margin-left="0"
@@ -85,470 +405,534 @@
      id="g10"
      inkscape:groupmode="layer"
      inkscape:label="platform-overview"
-     transform="matrix(1.25,0,0,-1.25,-211.78554,693.53125)"><g
-       id="g12" /><path
-       d="m 339.43053,394.86547 c 33.95439,0 59.86974,-10.77546 59.86974,-24.78356 0,-14.0081 -25.91535,-24.78356 -59.86974,-24.78356 -33.95439,0 -59.76396,10.77546 -59.76396,24.78356 0,14.0081 25.80957,24.78356 59.76396,24.78356 z"
-       style="fill:#cfe7f5;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path134"
-       inkscape:connector-curvature="0" /><path
-       d="m 339.43133,395.59889 c 33.44351,0 58.96893,-11.09434 58.96893,-25.51698 0,-14.42264 -25.52542,-25.51698 -58.96893,-25.51698 -33.44352,0 -58.86475,11.09434 -58.86475,25.51698 0,14.42264 25.42123,25.51698 58.86475,25.51698 z"
-       style="fill:none;stroke:#808080;stroke-width:1.41658592;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path136"
-       inkscape:connector-curvature="0" /><text
+     transform="matrix(1.25,0,0,-1.25,-243.95404,685.72536)"><g
+       id="g12" /><text
        transform="scale(-1,-1)"
        id="text126"
        x="-207.84631"
        y="-312.31671"
        style="font-size:11.19999981px;text-align:center;text-anchor:middle"><tspan
-         style="font-size:11.19999981px;font-variant:normal;font-weight:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Arial;-inkscape-font-specification:ArialMT"
+         style="font-variant:normal;font-weight:normal;font-size:11.19999981px;font-family:Arial;-inkscape-font-specification:ArialMT;text-align:center;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none"
          sodipodi:role="line"
          id="tspan128"
          x="-207.84631"
          y="-312.31671" /></text>
-<path
-       d="m 281.87725,349.38191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path140"
-       inkscape:connector-curvature="0" /><text
+<text
        transform="scale(1,-1)"
        id="text190"
        x="122"
        y="-421"><tspan
-         style="font-size:18px;font-variant:normal;font-weight:bold;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Arial;-inkscape-font-specification:Arial-BoldMT"
+         style="font-variant:normal;font-weight:bold;font-size:18px;font-family:Arial;-inkscape-font-specification:Arial-BoldMT;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none"
          sodipodi:role="line"
          id="tspan192"
          x="122"
          y="-421" /></text>
 <path
-       d="m 421.23342,363.18191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path222"
-       inkscape:connector-curvature="0" /><path
-       d="m 421.23342,363.18191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path228"
-       inkscape:connector-curvature="0" /><path
-       d="m 421.23342,363.18191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path234"
-       inkscape:connector-curvature="0" /><path
-       d="m 344.63342,458.58191 0.2,-46.4 5.9,0 -11.6,-20.1 -11.7,20 5.9,0 -0.2,46.5 11.5,0 z"
-       style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path290"
-       inkscape:connector-curvature="0" /><path
-       d="m 344.63342,458.58191 0.2,-46.4 5.9,0 -11.6,-20.1 -11.7,20 5.9,0 -0.2,46.5 11.5,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path292"
-       inkscape:connector-curvature="0" /><path
-       d="m 350.53342,458.58191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path294"
-       inkscape:connector-curvature="0" /><path
-       d="m 327.43342,392.08191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path296"
-       inkscape:connector-curvature="0" /><path
-       d="m 266.73342,498.98191 c -3.3,11.1 12.3,21.7 27.9,21.7 4.9,0 10,-1 14.2,-2.7 4,5.1 11.4,8.2 19.7,8.2 5.5,-0.2 11.4,-1.7 15.6,-4.4 3,4.4 9.3,7.1 16.2,7.1 5.7,0 11,-1.9 14.3,-4.9 3.8,3 9.6,4.9 15.6,4.9 9.7,0 18,-4.8 19.7,-11.3 9.5,-1.9 16.3,-7.8 16.3,-14.7 0,-2.1 -0.5,-4 -1.9,-6 4,-3.3 6.4,-7.4 6.4,-11.6 0,-9.6 -10.8,-17.6 -24.6,-19 0,-9.1 -10.7,-16.3 -24,-16.3 -4.6,0 -8.9,0.9 -12.8,2.5 -3.5,-8 -14.5,-13.7 -27,-13.7 -9.2,0 -18,3.4 -23.2,8.8 -4.9,-2.1 -2.3,-3.3 -16.4,-3.3 -11.6,0 -22.3,4.2 -27.9,11.1 -13.3,0.1 -20.1,5.5 -20.1,12.4 0,3.1 1.7,6 4.9,8.4 -5.8,2 -8.9,5.9 -8.9,10.5 0,6.4 7,11.6 16,12.3 l 0,0 z"
-       style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path298"
-       inkscape:connector-curvature="0" /><path
-       d="m 266.73342,498.98191 c -3.3,11.1 12.3,21.7 27.9,21.7 4.9,0 10,-1 14.2,-2.7 4,5.1 11.4,8.2 19.7,8.2 5.5,-0.2 11.4,-1.7 15.6,-4.4 3,4.4 9.3,7.1 16.2,7.1 5.7,0 11,-1.9 14.3,-4.9 3.8,3 9.6,4.9 15.6,4.9 9.7,0 18,-4.8 19.7,-11.3 9.5,-1.9 16.3,-7.8 16.3,-14.7 0,-2.1 -0.5,-4 -1.9,-6 4,-3.3 6.4,-7.4 6.4,-11.6 0,-9.6 -10.8,-17.6 -24.6,-19 0,-9.1 -10.7,-16.3 -24,-16.3 -4.6,0 -8.9,0.9 -12.8,2.5 -3.5,-8 -14.5,-13.7 -27,-13.7 -9.2,0 -18,3.4 -23.2,8.8 -4.9,-2.1 -2.3,-3.3 -16.4,-3.3 -11.6,0 -22.3,4.2 -27.9,11.1 -13.3,0.1 -20.1,5.5 -20.1,12.4 0,3.1 1.7,6 4.9,8.4 -5.8,2 -8.9,5.9 -8.9,10.5 0,6.4 7,11.6 16,12.3 l 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path300"
-       inkscape:connector-curvature="0" /><path
-       d="m 250.73342,528.88191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path302"
-       inkscape:connector-curvature="0" /><path
-       d="m 430.73342,438.78191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path304"
-       inkscape:connector-curvature="0" /><path
-       d="m 266.73342,498.98191 c 0.2,-1 1,-2.2 1.4,-3.1"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path306"
-       inkscape:connector-curvature="0" /><path
-       d="m 250.73342,528.88191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path308"
-       inkscape:connector-curvature="0" /><path
-       d="m 430.73342,438.78191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path310"
-       inkscape:connector-curvature="0" /><path
-       d="m 308.83342,517.98191 c 1.9,-0.8 4.2,-1.8 5.8,-2.9"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path312"
-       inkscape:connector-curvature="0" /><path
-       d="m 250.73342,528.88191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path314"
-       inkscape:connector-curvature="0" /><path
-       d="m 430.73342,438.78191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path316"
-       inkscape:connector-curvature="0" /><path
-       d="m 344.13342,521.78191 c -0.7,-0.9 -1.1,-1.9 -1.5,-2.9"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path318"
-       inkscape:connector-curvature="0" /><path
-       d="m 250.73342,528.88191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path320"
-       inkscape:connector-curvature="0" /><path
-       d="m 430.73342,438.78191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path322"
-       inkscape:connector-curvature="0" /><path
-       d="m 374.63342,523.98191 c -1.2,-1 -1.9,-2.3 -2.7,-3.5"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path324"
-       inkscape:connector-curvature="0" /><path
-       d="m 250.73342,528.88191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path326"
-       inkscape:connector-curvature="0" /><path
-       d="m 430.73342,438.78191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path328"
-       inkscape:connector-curvature="0" /><path
-       d="m 409.93342,517.58191 c 0.2,-0.8 1,-2.4 0.7,-2.8"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path330"
-       inkscape:connector-curvature="0" /><path
-       d="m 250.73342,528.88191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path332"
-       inkscape:connector-curvature="0" /><path
-       d="m 424.33342,496.88191 c -1.4,-2.1 -3.4,-4 -6,-5.5"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path336"
-       inkscape:connector-curvature="0" /><path
-       d="m 406.23342,466.28191 c 0.7,3.4 -3.2,11.7 -13.8,14.8"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path342"
-       inkscape:connector-curvature="0" /><path
-       d="m 250.73342,528.88191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path344"
-       inkscape:connector-curvature="0" /><path
-       d="m 369.33342,452.48191 c 0.7,1.4 1,2.7 1.1,4"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path348"
-       inkscape:connector-curvature="0" /><path
-       d="m 250.73342,528.88191 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path350"
-       inkscape:connector-curvature="0" /><text
-       transform="scale(1,-1)"
-       id="text372"
-       x="329.56039"
-       y="-490.19919"
-       style="text-align:center;line-height:125%;text-anchor:middle"
-       sodipodi:linespacing="125%"><tspan
-         style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Arial;-inkscape-font-specification:Arial"
-         sodipodi:role="line"
-         id="tspan374"
-         x="329.56039"
-         y="-490.19919">You</tspan><tspan
-         style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Arial;-inkscape-font-specification:Arial"
-         sodipodi:role="line"
-         x="329.56039"
-         y="-470.19919"
-         id="tspan5489">(via a web client)</tspan></text>
-<g
-       id="g3844"
-       transform="translate(275.64043,-102.12247)"><path
-         inkscape:connector-curvature="0"
-         id="path110"
-         style="fill:#ffffcc;fill-opacity:1;fill-rule:evenodd;stroke:none"
-         d="m 238.74383,336.7 c 0,5.2 -15.9,10.4 -31.8,10.4 -15.8,0 -31.7,-5.2 -31.7,-10.4 l 0,-45.4 c 0,-5.2 15.9,-10.5 31.7,-10.5 15.9,0 31.8,5.3 31.8,10.5 l 0,45.4 z" /><path
-         inkscape:connector-curvature="0"
-         id="path112"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 238.74383,336.7 c 0,5.2 -15.9,10.4 -31.8,10.4 -15.8,0 -31.7,-5.2 -31.7,-10.4 l 0,-45.4 c 0,-5.2 15.9,-10.5 31.7,-10.5 15.9,0 31.8,5.3 31.8,10.5 l 0,45.4 z" /><path
-         inkscape:connector-curvature="0"
-         id="path116"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 175.24383,280.8 0,0 z" /><path
-         inkscape:connector-curvature="0"
-         id="path118"
-         style="fill:#ffffcc;fill-opacity:1;fill-rule:evenodd;stroke:none"
-         d="m 238.74383,336.7 c 0,-5.3 -15.9,-10.5 -31.8,-10.5 -15.8,0 -31.7,5.2 -31.7,10.5 l 63.5,0 z" /><path
-         inkscape:connector-curvature="0"
-         id="path120"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 238.74383,336.7 c 0,-5.3 -15.9,-10.5 -31.8,-10.5 -15.8,0 -31.7,5.2 -31.7,10.5" /><path
-         inkscape:connector-curvature="0"
-         id="path122"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 238.74383,347.1 0,0 z" /><path
-         inkscape:connector-curvature="0"
-         id="path124"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 175.24383,280.8 0,0 z" /><text
-         transform="scale(1,-1)"
-         sodipodi:linespacing="125%"
-         id="text3785"
-         y="-304.76944"
-         x="207.01537"
-         style="font-size:9.60000038px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans Mono;-inkscape-font-specification:Bitstream Vera Sans Mono Bold"
-         xml:space="preserve"
-         inkscape:transform-center-x="3.0626714"
-         inkscape:transform-center-y="-5.6953127"><tspan
-           sodipodi:role="line"
-           id="tspan3261"
-           x="207.01537"
-           y="-304.76944">Storage</tspan></text>
-</g><text
-       xml:space="preserve"
-       style="font-size:16px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans Mono;-inkscape-font-specification:Bitstream Vera Sans Mono Bold"
-       x="339.39401"
-       y="-366.16394"
-       id="text3791-3"
-       sodipodi:linespacing="125%"
-       transform="scale(1,-1)"><tspan
-         sodipodi:role="line"
-         x="339.39401"
-         y="-366.16394"
-         id="tspan3823"
-         style="font-size:12.80000019px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Bitstream Vera Sans Mono;-inkscape-font-specification:Bitstream Vera Sans Mono Bold">Web Server</tspan></text>
-<path
-       d="m 403.51853,305.77116 c 33.95439,0 59.86974,-10.77546 59.86974,-24.78356 0,-14.0081 -25.91535,-24.78356 -59.86974,-24.78356 -33.95439,0 -59.76396,10.77546 -59.76396,24.78356 0,14.0081 25.80957,24.78356 59.76396,24.78356 z"
-       style="fill:#cfe7f5;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path134-2"
-       inkscape:connector-curvature="0" /><path
-       d="m 403.51933,306.50458 c 33.44351,0 58.96893,-11.09434 58.96893,-25.51698 0,-14.42264 -25.52542,-25.51698 -58.96893,-25.51698 -33.44352,0 -58.86475,11.09434 -58.86475,25.51698 0,14.42264 25.42123,25.51698 58.86475,25.51698 z"
-       style="fill:none;stroke:#808080;stroke-width:1.41658592;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path136-2"
-       inkscape:connector-curvature="0" /><path
-       d="m 372.10005,243.545 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path140-1"
-       inkscape:connector-curvature="0" /><path
        d="m 517.1732,276.53774 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
        id="path222-1"
        inkscape:connector-curvature="0" /><path
        d="m 517.1732,276.53774 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
        id="path228-1"
        inkscape:connector-curvature="0" /><path
        d="m 517.1732,276.53774 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
        id="path234-3"
-       inkscape:connector-curvature="0" /><path
-       d="m 391.52142,302.9876 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path296-6"
-       inkscape:connector-curvature="0" /><text
-       xml:space="preserve"
-       style="font-size:12.8px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans Mono;-inkscape-font-specification:Bitstream Vera Sans Mono Bold"
-       x="404.29874"
-       y="-284.01169"
-       id="text3791-3-1"
-       sodipodi:linespacing="125%"
-       transform="scale(1,-1)"><tspan
-         sodipodi:role="line"
-         id="tspan5848">Database</tspan><tspan
-         sodipodi:role="line"
-         id="tspan5850">Server</tspan></text>
-<path
-       d="m 324.95925,231.8587 c 33.95439,0 59.86974,-10.77546 59.86974,-24.78356 0,-14.0081 -25.91535,-24.78356 -59.86974,-24.78356 -33.95439,0 -59.76396,10.77546 -59.76396,24.78356 0,14.0081 25.80957,24.78356 59.76396,24.78356 z"
-       style="fill:#cfe7f5;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path134-2-2"
-       inkscape:connector-curvature="0" /><path
-       d="m 324.96005,232.59212 c 33.44351,0 58.96892,-11.09434 58.96892,-25.51698 0,-14.42264 -25.52541,-25.51698 -58.96892,-25.51698 -33.44352,0 -58.86475,11.09434 -58.86475,25.51698 0,14.42264 25.42123,25.51698 58.86475,25.51698 z"
-       style="fill:none;stroke:#808080;stroke-width:1.41658592;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path136-2-2"
-       inkscape:connector-curvature="0" /><path
-       d="m 267.40597,186.37514 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path140-1-7"
-       inkscape:connector-curvature="0" /><path
-       d="m 312.96214,229.07514 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path296-6-5"
-       inkscape:connector-curvature="0" /><text
-       xml:space="preserve"
-       style="font-size:16px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans Mono;-inkscape-font-specification:Bitstream Vera Sans Mono Bold"
-       x="324.92273"
-       y="-201.9321"
-       id="text3791-3-1-2"
-       sodipodi:linespacing="125%"
-       transform="scale(1,-1)"><tspan
-         sodipodi:role="line"
-         x="324.92273"
-         y="-201.9321"
-         id="tspan3823-1-6"
-         style="font-size:12.80000019px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Bitstream Vera Sans Mono;-inkscape-font-specification:Bitstream Vera Sans Mono Bold">Scheduler</tspan></text>
-<flowRoot
+       inkscape:connector-curvature="0" /><flowRoot
        xml:space="preserve"
        id="flowRoot3187"
-       style="fill:black;stroke:none;stroke-opacity:1;stroke-width:1px;stroke-linejoin:miter;stroke-linecap:butt;fill-opacity:1;font-family:Bitstream Vera Sans;font-style:normal;font-weight:normal;font-size:6px;line-height:125%;letter-spacing:0px;word-spacing:0px"><flowRegion
+       style="font-style:normal;font-weight:normal;font-size:6px;line-height:125%;font-family:'Bitstream Vera Sans';letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"><flowRegion
          id="flowRegion3189"><rect
            id="rect3191"
            width="170.99915"
            height="77.077232"
            x="-328.72672"
            y="336.68182" /></flowRegion><flowPara
-         id="flowPara3193"></flowPara></flowRoot><flowRoot
+         id="flowPara3193" /></flowRoot><flowRoot
        xml:space="preserve"
        id="flowRoot3231"
-       style="fill:black;stroke:none;stroke-opacity:1;stroke-width:1px;stroke-linejoin:miter;stroke-linecap:butt;fill-opacity:1;font-family:Bitstream Vera Sans;font-style:normal;font-weight:normal;font-size:6px;line-height:125%;letter-spacing:0px;word-spacing:0px"><flowRegion
+       style="font-style:normal;font-weight:normal;font-size:6px;line-height:125%;font-family:'Bitstream Vera Sans';letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"><flowRegion
          id="flowRegion3233"><rect
            id="rect3235"
            width="120.46507"
            height="96.474152"
            x="14.802912"
            y="310.13867" /></flowRegion><flowPara
-         id="flowPara3237"></flowPara></flowRoot><path
-       d="m 373.32512,120.50363 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path140-1-5-3"
-       inkscape:connector-curvature="0" /><path
-       d="m 355.17761,136.17719 c 33.95438,0 59.86974,-10.77546 59.86974,-24.78356 0,-14.008098 -25.91536,-24.783558 -59.86974,-24.783558 -33.9544,0 -59.76397,10.77546 -59.76397,24.783558 0,14.0081 25.80957,24.78356 59.76397,24.78356 z"
-       style="fill:#cfe7f5;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path134-2-2-4-0"
-       inkscape:connector-curvature="0" /><path
-       d="m 355.17841,136.91061 c 33.4435,0 58.96891,-11.09434 58.96891,-25.51698 0,-14.422638 -25.52541,-25.516978 -58.96891,-25.516978 -33.44353,0 -58.86476,11.09434 -58.86476,25.516978 0,14.42264 25.42123,25.51698 58.86476,25.51698 z"
-       style="fill:none;stroke:#808080;stroke-width:1.41658592;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path136-2-2-0-8"
-       inkscape:connector-curvature="0" /><path
-       d="m 297.62432,90.693632 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path140-1-7-2-0"
-       inkscape:connector-curvature="0" /><path
-       d="m 343.1805,133.39363 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path296-6-5-5-2"
-       inkscape:connector-curvature="0" /><path
-       d="m 350.68569,132.62815 c 33.95438,0 59.86974,-10.77546 59.86974,-24.78356 0,-14.008098 -25.91536,-24.783568 -59.86974,-24.783568 -33.9544,0 -59.76397,10.77547 -59.76397,24.783568 0,14.0081 25.80957,24.78356 59.76397,24.78356 z"
-       style="fill:#cfe7f5;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path134-2-2-4-0-1"
-       inkscape:connector-curvature="0" /><path
-       d="m 350.68649,133.36157 c 33.4435,0 58.96891,-11.09434 58.96891,-25.51698 0,-14.422648 -25.52541,-25.516988 -58.96891,-25.516988 -33.44353,0 -58.86476,11.09434 -58.86476,25.516988 0,14.42264 25.42123,25.51698 58.86476,25.51698 z"
-       style="fill:none;stroke:#808080;stroke-width:1.41658592;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path136-2-2-0-8-7"
-       inkscape:connector-curvature="0" /><path
-       d="m 293.1324,87.144582 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path140-1-7-2-0-9"
-       inkscape:connector-curvature="0" /><path
-       d="m 338.68858,129.84459 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path296-6-5-5-2-2"
-       inkscape:connector-curvature="0" /><path
-       d="m 344.56035,128.95294 c 33.95438,0 59.86974,-10.77546 59.86974,-24.78356 0,-14.008088 -25.91536,-24.783558 -59.86974,-24.783558 -33.9544,0 -59.76397,10.77547 -59.76397,24.783558 0,14.0081 25.80957,24.78356 59.76397,24.78356 z"
-       style="fill:#cfe7f5;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path134-2-2-4-0-0"
+         id="flowPara3237" /></flowRoot><g
+       id="g12-2"
+       transform="translate(4.2216503,-6.2447096)" /><text
+       transform="scale(-1,-1)"
+       id="text126-6"
+       x="-212.06796"
+       y="-306.07199"
+       style="font-size:11.19999981px;text-align:center;text-anchor:middle"><tspan
+         style="font-variant:normal;font-weight:normal;font-size:11.19999981px;font-family:Arial;-inkscape-font-specification:ArialMT;text-align:center;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none"
+         sodipodi:role="line"
+         id="tspan128-2"
+         x="-212.06796"
+         y="-306.07199" /></text>
+<text
+       transform="scale(1,-1)"
+       id="text190-9"
+       x="126.22165"
+       y="-414.75528"><tspan
+         style="font-variant:normal;font-weight:bold;font-size:18px;font-family:Arial;-inkscape-font-specification:Arial-BoldMT;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none"
+         sodipodi:role="line"
+         id="tspan192-1"
+         x="126.22165"
+         y="-414.75528" /></text>
+<path
+       d="m 253.32165,547.95529 0,0 z"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+       id="path302"
        inkscape:connector-curvature="0" /><path
-       d="m 344.56115,129.68636 c 33.4435,0 58.96891,-11.09434 58.96891,-25.51698 0,-14.422638 -25.52541,-25.516978 -58.96891,-25.516978 -33.44353,0 -58.86476,11.09434 -58.86476,25.516978 0,14.42264 25.42123,25.51698 58.86476,25.51698 z"
-       style="fill:none;stroke:#808080;stroke-width:1.41658592;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path136-2-2-0-8-0"
+       d="m 253.32165,547.95529 0,0 z"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+       id="path308"
        inkscape:connector-curvature="0" /><path
-       d="m 287.00706,83.469382 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path140-1-7-2-0-8"
+       d="m 253.32165,547.95529 0,0 z"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+       id="path314"
        inkscape:connector-curvature="0" /><path
-       d="m 332.56324,126.16938 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path296-6-5-5-2-8"
+       d="m 253.32165,547.95529 0,0 z"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+       id="path320"
        inkscape:connector-curvature="0" /><path
-       d="m 338.43501,125.15157 c 33.95438,0 59.86974,-10.77546 59.86974,-24.78356 0,-14.008088 -25.91536,-24.783548 -59.86974,-24.783548 -33.9544,0 -59.76397,10.77546 -59.76397,24.783548 0,14.0081 25.80957,24.78356 59.76397,24.78356 z"
-       style="fill:#cfe7f5;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path134-2-2-4"
+       d="m 253.32165,547.95529 0,0 z"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+       id="path326"
        inkscape:connector-curvature="0" /><path
-       d="m 338.43581,125.88499 c 33.4435,0 58.96891,-11.09434 58.96891,-25.51698 0,-14.422628 -25.52541,-25.516968 -58.96891,-25.516968 -33.44353,0 -58.86476,11.09434 -58.86476,25.516968 0,14.42264 25.42123,25.51698 58.86476,25.51698 z"
-       style="fill:none;stroke:#808080;stroke-width:1.41658592;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path136-2-2-0"
+       d="m 253.32165,547.95529 0,0 z"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+       id="path332"
        inkscape:connector-curvature="0" /><path
-       d="m 280.88172,79.668022 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path140-1-7-2"
+       d="m 253.32165,547.95529 0,0 z"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+       id="path344"
        inkscape:connector-curvature="0" /><path
-       d="m 326.4379,122.36801 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path296-6-5-5"
-       inkscape:connector-curvature="0" /><text
+       d="m 253.32165,547.95529 0,0 z"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+       id="path350"
+       inkscape:connector-curvature="0" /><g
+       id="g4453"
+       transform="matrix(0.8,0,0,-0.8,301.07261,538.52601)"><g
+         id="g4385"><g
+           id="g4387"><path
+             id="path4389"
+             d="m 49.541,38.655 c 1.617,0 3.158,0.338 4.559,0.946 0.105,-2.286 0.893,-4.4 2.157,-6.15 -0.89,-0.186 -1.808,-0.285 -2.748,-0.285 l -8.906,0 c -0.938,0 -1.856,0.098 -2.739,0.282 1.347,1.869 2.152,4.15 2.165,6.62 1.637,-0.9 3.515,-1.413 5.512,-1.413 z"
+             inkscape:connector-curvature="0" /><circle
+             id="circle4391"
+             r="10.496"
+             cy="21.954"
+             cx="49.054001" /><path
+             id="path4393"
+             d="m 65.539,50.36 c 5.342,0 9.67,-4.33 9.67,-9.67 0,-5.342 -4.328,-9.67 -9.67,-9.67 -5.292,0 -9.583,4.251 -9.663,9.524 3.049,1.912 5.187,5.146 5.577,8.9 1.242,0.582 2.623,0.916 4.086,0.916 z"
+             inkscape:connector-curvature="0" /><path
+             id="path4395"
+             d="m 32.571,31.019 c -5.343,0 -9.671,4.329 -9.671,9.67 0,5.341 4.328,9.669 9.671,9.669 1.892,0 3.651,-0.553 5.143,-1.492 0.475,-3.091 2.132,-5.794 4.499,-7.634 0.01,-0.181 0.027,-0.36 0.027,-0.543 0,-5.341 -4.33,-9.67 -9.669,-9.67 z"
+             inkscape:connector-curvature="0" /><path
+             id="path4397"
+             d="m 71.82,30.813 c 3.049,1.912 5.187,5.146 5.576,8.901 1.241,0.581 2.623,0.916 4.086,0.916 5.342,0 9.67,-4.329 9.67,-9.67 0,-5.341 -4.328,-9.67 -9.67,-9.67 -5.291,-10e-4 -9.582,4.251 -9.662,9.523 z"
+             inkscape:connector-curvature="0" /><circle
+             id="circle4399"
+             r="9.6709995"
+             cy="50.673"
+             cx="49.541" /><path
+             id="path4401"
+             d="m 69.643,51.019 -8.144,0 c -0.089,3.258 -1.479,6.192 -3.679,8.301 6.068,1.806 10.509,7.434 10.509,14.082 l 0,3.092 c 8.04,-0.297 12.674,-2.573 12.979,-2.729 l 0.646,-0.328 0.067,0 0,-10.036 C 82.023,56.573 76.469,51.019 69.643,51.019 Z"
+             inkscape:connector-curvature="0" /><path
+             id="path4403"
+             d="m 85.585,41.289 -8.142,0 c -0.088,3.258 -1.479,6.192 -3.678,8.301 6.068,1.806 10.508,7.433 10.508,14.081 l 0,3.092 c 8.039,-0.296 12.674,-2.572 12.979,-2.729 l 0.646,-0.327 0.069,0 0,-10.036 c 0,-6.827 -5.554,-12.382 -12.382,-12.382 z"
+             inkscape:connector-curvature="0" /><path
+             id="path4405"
+             d="m 41.256,59.319 c -2.189,-2.099 -3.575,-5.017 -3.677,-8.254 -0.301,-0.022 -0.6,-0.047 -0.908,-0.047 l -8.203,0 c -6.828,0 -12.383,5.555 -12.383,12.383 l 0,10.037 0.025,0.155 0.691,0.218 c 5.227,1.633 9.893,2.383 13.944,2.621 l 0,-3.031 c 0.002,-6.647 4.441,-12.275 10.511,-14.082 z"
+             inkscape:connector-curvature="0" /><path
+             id="path4407"
+             d="m 53.643,61.003 -8.206,0 c -6.828,0 -12.383,5.557 -12.383,12.382 l 0,10.037 0.026,0.157 0.69,0.216 c 6.516,2.035 12.177,2.715 16.835,2.715 9.101,0 14.375,-2.595 14.701,-2.76 l 0.646,-0.328 0.068,0 0,-10.037 C 66.023,66.558 60.469,61.003 53.643,61.003 Z"
+             inkscape:connector-curvature="0" /><path
+             id="path4409"
+             d="m 16.486,40.938 c 1.463,0 2.844,-0.335 4.086,-0.916 0.39,-3.755 2.527,-6.99 5.576,-8.902 -0.08,-5.271 -4.371,-9.523 -9.662,-9.523 -5.343,0 -9.671,4.329 -9.671,9.671 0,5.341 4.328,9.67 9.671,9.67 z"
+             inkscape:connector-curvature="0" /><path
+             id="path4411"
+             d="M 24.202,49.899 C 22.004,47.79 20.613,44.855 20.525,41.596 l -8.143,0 C 5.554,41.597 0,47.152 0,53.979 l 0,10.037 0.069,0 0.646,0.327 c 0.306,0.154 4.939,2.433 12.979,2.728 l 0,-3.092 c 0,-6.647 4.439,-12.275 10.508,-14.08 z"
+             inkscape:connector-curvature="0" /><path
+             id="path4413"
+             d="m 27.796,30.063 c 1.16,-0.47 2.93,-1.047 4.62,-1.047 1.967,0 3.891,0.506 5.607,1.469 0.382,-0.375 0.732,-0.783 1.05,-1.22 -1.63,-2.141 -2.52,-4.765 -2.52,-7.464 0,-1.818 0.406,-3.622 1.18,-5.261 -1.762,-1.592 -4.01,-2.461 -6.399,-2.461 -4.348,0 -8.133,2.943 -9.241,7.088 3.248,1.89 5.364,5.194 5.703,8.896 z"
+             inkscape:connector-curvature="0" /><path
+             id="path4415"
+             d="m 59.117,28.718 c 0.336,0.534 0.729,1.037 1.175,1.505 1.588,-0.792 3.334,-1.208 5.092,-1.208 1.729,0 3.386,0.442 4.472,0.812 0.34,-4.013 2.767,-7.555 6.4,-9.35 -1.332,-3.805 -4.938,-6.402 -9.021,-6.402 -2.64,0 -5.14,1.084 -6.945,2.992 0.634,1.512 0.955,3.101 0.955,4.73 -10e-4,2.495 -0.735,4.873 -2.128,6.921 z"
+             inkscape:connector-curvature="0" /></g></g><g
+         id="g4417" /><g
+         id="g4419" /><g
+         id="g4421" /><g
+         id="g4423" /><g
+         id="g4425" /><g
+         id="g4427" /><g
+         id="g4429" /><g
+         id="g4431" /><g
+         id="g4433" /><g
+         id="g4435" /><g
+         id="g4437" /><g
+         id="g4439" /><g
+         id="g4441" /><g
+         id="g4443" /><g
+         id="g4445" /></g><text
+       xml:space="preserve"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       x="407.53061"
+       y="-496.88867"
+       id="text4490"
+       sodipodi:linespacing="125%"
+       transform="scale(1,-1)"><tspan
+         sodipodi:role="line"
+         id="tspan4492"
+         x="407.53061"
+         y="-496.88867">Users</tspan></text>
+<g
+       id="g4629"
+       transform="matrix(0.12031503,0,0,-0.12031503,338.73092,312.27732)"
+       style="fill:#b3b3b3"><g
+         id="g4589"
+         style="fill:#b3b3b3"><path
+           id="path4591"
+           d="M 397.7,24.6 C 356.3,8.7 301.5,0 243.5,0 185.5,0 130.7,8.7 89.3,24.6 43.8,42.1 18.8,66.9 18.8,94.5 l 0,298 c 0,27.6 25,52.4 70.5,69.9 41.4,15.9 96.2,24.6 154.2,24.6 58,0 112.8,-8.7 154.2,-24.6 45.5,-17.4 70.5,-42.3 70.5,-69.9 l 0,-298 c 0,-27.6 -25,-52.4 -70.5,-69.9 z m 43.5,367.9 c 0,15.2 -19.9,31.9 -53.2,44.7 C 349.6,451.9 298.3,460 243.5,460 188.7,460 137.4,451.9 99,437.2 65.7,424.4 45.8,407.7 45.8,392.5 l 0,-51.4 c 11.2,8.8 25.8,16.7 43.5,23.4 41.4,15.9 96.2,24.6 154.2,24.6 58,0 112.8,-8.7 154.2,-24.6 17.7,-6.8 32.3,-14.7 43.5,-23.4 l 0,51.4 z m 0,-97.8 c 0,15.2 -19.9,31.9 -53.2,44.7 -38.4,14.7 -89.7,22.8 -144.5,22.8 C 188.7,362.2 137.4,354.1 99,339.4 65.7,326.6 45.8,309.9 45.8,294.7 l 0,-52.9 c 11.2,8.8 25.8,16.7 43.5,23.4 41.4,15.9 96.2,24.6 154.2,24.6 58,0 112.8,-8.7 154.2,-24.6 17.7,-6.8 32.3,-14.7 43.5,-23.4 l 0,52.9 z m 0,-99.4 c 0,15.2 -19.9,31.9 -53.2,44.7 -38.4,14.7 -89.7,22.8 -144.5,22.8 C 188.7,262.8 137.4,254.7 99,240 65.7,227.2 45.8,210.5 45.8,195.3 l 0,-1.5 0,-52.9 c 11.2,8.8 25.8,16.7 43.5,23.4 41.4,15.9 96.2,24.6 154.2,24.6 58,0 112.8,-8.7 154.2,-24.6 17.7,-6.8 32.3,-14.7 43.5,-23.4 l 0,54.4 z M 388,139.1 C 349.6,153.8 298.3,161.9 243.5,161.9 188.7,161.9 137.4,153.8 99,139.1 65.7,126.3 45.8,109.6 45.8,94.4 45.8,79.2 65.7,62.5 99,49.7 137.4,35 188.7,26.9 243.5,26.9 c 54.8,0 106.1,8.1 144.5,22.8 33.3,12.8 53.2,29.5 53.2,44.7 0,15.2 -19.9,32 -53.2,44.7 z"
+           inkscape:connector-curvature="0"
+           style="fill:#b3b3b3" /></g><g
+         id="g4593"
+         style="fill:#b3b3b3" /><g
+         id="g4595"
+         style="fill:#b3b3b3" /><g
+         id="g4597"
+         style="fill:#b3b3b3" /><g
+         id="g4599"
+         style="fill:#b3b3b3" /><g
+         id="g4601"
+         style="fill:#b3b3b3" /><g
+         id="g4603"
+         style="fill:#b3b3b3" /><g
+         id="g4605"
+         style="fill:#b3b3b3" /><g
+         id="g4607"
+         style="fill:#b3b3b3" /><g
+         id="g4609"
+         style="fill:#b3b3b3" /><g
+         id="g4611"
+         style="fill:#b3b3b3" /><g
+         id="g4613"
+         style="fill:#b3b3b3" /><g
+         id="g4615"
+         style="fill:#b3b3b3" /><g
+         id="g4617"
+         style="fill:#b3b3b3" /><g
+         id="g4619"
+         style="fill:#b3b3b3" /><g
+         id="g4621"
+         style="fill:#b3b3b3" /></g><text
        xml:space="preserve"
-       style="font-size:16px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans Mono;-inkscape-font-specification:Bitstream Vera Sans Mono Bold"
-       x="338.3985"
-       y="-95.22496"
-       id="text3791-3-1-2-2"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#b3b3b3;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       x="454.13269"
+       y="-293.25815"
+       id="text4490-7"
        sodipodi:linespacing="125%"
        transform="scale(1,-1)"><tspan
          sodipodi:role="line"
-         x="338.3985"
-         y="-95.22496"
-         id="tspan3823-1-6-4"
-         style="font-size:12.80000019px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Bitstream Vera Sans Mono;-inkscape-font-specification:Bitstream Vera Sans Mono Bold">Worker Node</tspan></text>
+         x="454.13269"
+         y="-293.25815"
+         id="tspan5018">Experiments,</tspan><tspan
+         sodipodi:role="line"
+         x="454.13269"
+         y="-278.25815"
+         id="tspan5109">Running State &amp;</tspan><tspan
+         sodipodi:role="line"
+         x="454.13269"
+         y="-263.25815"
+         id="tspan5111">Data Cache</tspan></text>
+<path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart-4);marker-end:url(#marker4951-9)"
+       d="m 340.23831,466.85248 0.0422,-36.77641"
+       id="path4657-2"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" /><g
+       id="g12265"
+       transform="translate(6.4,24)"><rect
+         ry="3.626792"
+         transform="scale(1,-1)"
+         y="-400.2276"
+         x="275.60104"
+         height="55.536438"
+         width="115.97315"
+         id="rect12241"
+         style="opacity:1;fill:#e6e6e6;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.80000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" /><text
+         transform="scale(1,-1)"
+         sodipodi:linespacing="125%"
+         id="text12243"
+         y="-366.49454"
+         x="333.38058"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:16px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         xml:space="preserve"><tspan
+           y="-366.49454"
+           x="333.38058"
+           id="tspan12245"
+           sodipodi:role="line">Web Server</tspan></text>
+</g><g
+       id="g12351"><rect
+         ry="3.626792"
+         transform="scale(1,-1)"
+         y="-310.18982"
+         x="198.1579"
+         height="55.536438"
+         width="115.97315"
+         id="rect12241-8"
+         style="opacity:1;fill:#e6e6e6;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.80000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" /><text
+         transform="scale(1,-1)"
+         sodipodi:linespacing="125%"
+         id="text12243-3"
+         y="-286.45676"
+         x="255.79291"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:16px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         xml:space="preserve"><tspan
+           y="-286.45676"
+           x="255.79291"
+           id="tspan12245-0"
+           sodipodi:role="line">Database</tspan><tspan
+           y="-266.45676"
+           x="255.79291"
+           sodipodi:role="line"
+           id="tspan12345">Server</tspan></text>
+</g><path
+       style="fill:#999999;fill-rule:evenodd;stroke:#818181;stroke-width:0.80000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:3.2, 3.2;stroke-dashoffset:0;stroke-opacity:1"
+       d="m 196.78829,346.3246 307.90059,0"
+       id="path4275-5-3"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" /><text
+       id="text142"
+       x="457.41129"
+       y="-355.80618"
+       style="font-style:oblique;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:12px;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Oblique';fill:#666666"
+       transform="scale(1,-1)"><tspan
+         style="font-style:oblique;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:14px;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Oblique';writing-mode:lr-tb;fill:#666666;fill-opacity:1;fill-rule:nonzero;stroke:none"
+         sodipodi:role="line"
+         id="tspan144"
+         x="457.41129"
+         y="-355.80618">Public</tspan></text>
+<text
+       id="text142-2"
+       x="450.44547"
+       y="-328.57266"
+       style="font-style:oblique;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:12px;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Oblique';fill:#666666"
+       transform="scale(1,-1)"><tspan
+         style="font-style:oblique;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:14px;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Oblique';writing-mode:lr-tb;fill:#666666;fill-opacity:1;fill-rule:nonzero;stroke:none"
+         sodipodi:role="line"
+         id="tspan144-7"
+         x="450.44547"
+         y="-328.57266">Private</tspan></text>
+<g
+       id="g12265-3"
+       transform="translate(-80.037814,-208.26164)"><rect
+         ry="3.626792"
+         transform="scale(1,-1)"
+         y="-400.2276"
+         x="275.60104"
+         height="55.536438"
+         width="115.97315"
+         id="rect12241-5"
+         style="opacity:1;fill:#e6e6e6;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.80000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" /><text
+         transform="scale(1,-1)"
+         sodipodi:linespacing="125%"
+         id="text12243-9"
+         y="-366.49454"
+         x="333.38058"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:16px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         xml:space="preserve"><tspan
+           y="-366.49454"
+           x="333.38058"
+           id="tspan12245-9"
+           sodipodi:role="line">Scheduler</tspan></text>
+</g><g
+       id="g12265-3-0"
+       transform="translate(99.638908,-186.2104)"><rect
+         ry="3.626792"
+         transform="scale(1,-1)"
+         y="-400.2276"
+         x="275.60104"
+         height="55.536438"
+         width="115.97315"
+         id="rect12241-5-6"
+         style="opacity:1;fill:#e6e6e6;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.80000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" /><text
+         transform="scale(1,-1)"
+         sodipodi:linespacing="125%"
+         id="text12243-9-8"
+         y="-366.49454"
+         x="333.38058"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:16px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         xml:space="preserve"><tspan
+           y="-366.49454"
+           x="333.38058"
+           id="tspan12245-9-0"
+           sodipodi:role="line">Worker</tspan></text>
+</g><g
+       id="g12265-3-0-1"
+       transform="translate(105.95868,-194.9373)"><rect
+         ry="3.626792"
+         transform="scale(1,-1)"
+         y="-400.2276"
+         x="275.60104"
+         height="55.536438"
+         width="115.97315"
+         id="rect12241-5-6-2"
+         style="opacity:1;fill:#e6e6e6;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.80000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" /><text
+         transform="scale(1,-1)"
+         sodipodi:linespacing="125%"
+         id="text12243-9-8-1"
+         y="-366.49454"
+         x="333.38058"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:16px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         xml:space="preserve"><tspan
+           y="-366.49454"
+           x="333.38058"
+           id="tspan12245-9-0-8"
+           sodipodi:role="line">Worker</tspan></text>
+</g><g
+       id="g12265-3-0-1-6"
+       transform="translate(112.27846,-203.66421)"><rect
+         ry="3.626792"
+         transform="scale(1,-1)"
+         y="-400.2276"
+         x="275.60104"
+         height="55.536438"
+         width="115.97315"
+         id="rect12241-5-6-2-0"
+         style="opacity:1;fill:#e6e6e6;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.80000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" /><text
+         transform="scale(1,-1)"
+         sodipodi:linespacing="125%"
+         id="text12243-9-8-1-3"
+         y="-366.49454"
+         x="333.38058"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:16px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         xml:space="preserve"><tspan
+           y="-366.49454"
+           x="333.38058"
+           id="tspan12245-9-0-8-4"
+           sodipodi:role="line">Worker</tspan></text>
+</g><g
+       id="g12265-3-0-1-7"
+       transform="translate(118.59826,-212.39111)"><rect
+         ry="3.626792"
+         transform="scale(1,-1)"
+         y="-400.2276"
+         x="275.60104"
+         height="55.536438"
+         width="115.97315"
+         id="rect12241-5-6-2-6"
+         style="opacity:1;fill:#e6e6e6;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.80000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" /><text
+         transform="scale(1,-1)"
+         sodipodi:linespacing="125%"
+         id="text12243-9-8-1-37"
+         y="-366.49454"
+         x="333.38058"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:16px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         xml:space="preserve"><tspan
+           y="-366.49454"
+           x="333.38058"
+           id="tspan12245-9-0-8-3"
+           sodipodi:role="line">Worker</tspan></text>
+</g><g
+       id="g12265-3-0-1-9"
+       transform="translate(124.91803,-221.11802)"><rect
+         ry="3.626792"
+         transform="scale(1,-1)"
+         y="-400.2276"
+         x="275.60104"
+         height="55.536438"
+         width="115.97315"
+         id="rect12241-5-6-2-60"
+         style="opacity:1;fill:#e6e6e6;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.80000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" /><text
+         transform="scale(1,-1)"
+         sodipodi:linespacing="125%"
+         id="text12243-9-8-1-9"
+         y="-366.49454"
+         x="333.38058"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:16px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         xml:space="preserve"><tspan
+           y="-366.49454"
+           x="333.38058"
+           id="tspan12245-9-0-8-9"
+           sodipodi:role="line">Worker</tspan></text>
+</g><g
+       id="g12265-3-0-1-4"
+       transform="translate(131.23781,-229.84492)"><rect
+         ry="3.626792"
+         transform="scale(1,-1)"
+         y="-400.2276"
+         x="275.60104"
+         height="55.536438"
+         width="115.97315"
+         id="rect12241-5-6-2-4"
+         style="opacity:1;fill:#e6e6e6;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.80000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" /><text
+         transform="scale(1,-1)"
+         sodipodi:linespacing="125%"
+         id="text12243-9-8-1-30"
+         y="-366.49454"
+         x="333.38058"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:16px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         xml:space="preserve"><tspan
+           y="-366.49454"
+           x="333.38058"
+           id="tspan12245-9-0-8-2"
+           sodipodi:role="line">Worker</tspan></text>
+</g><path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart);marker-end:url(#marker4951-8)"
+       d="m 361.35584,156.03062 42.46904,-49.00274"
+       id="path12489"
+       inkscape:connector-curvature="0" /><text
+       xml:space="preserve"
+       style="font-style:oblique;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:12px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Oblique';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       x="159.50851"
+       y="-352.36765"
+       id="text12521"
+       sodipodi:linespacing="125%"
+       transform="matrix(0.67424682,-0.73850608,-0.73850608,-0.67424682,0,0)"><tspan
+         sodipodi:role="line"
+         id="tspan12523"
+         x="159.50851"
+         y="-352.36765">Scalable</tspan></text>
 <path
-       style="fill:none;stroke:#000000;stroke-width:0.80000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Lend-3);display:inline"
-       d="M 337.28449,345.31278 327.21094,231.84282"
-       id="path3381"
-       inkscape:connector-type="polyline"
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.60000002;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:2;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart);marker-end:url(#Arrow2Mend)"
+       d="M 336.85446,363.47556 258.45008,316.10625"
+       id="path12525"
        inkscape:connector-curvature="0"
-       inkscape:connection-start="#path134"
-       inkscape:connection-start-point="d4"
-       inkscape:connection-end="#path134-2-2"
-       inkscape:connection-end-point="d4" /><path
-       style="fill:none;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend-3)"
-       d="m 356.61197,346.27002 29.82337,-41.46007"
-       id="path4355"
-       inkscape:connector-type="polyline"
+       sodipodi:nodetypes="cc"
+       inkscape:export-filename="/Users/andre/work/beat/beat.web/doc/admin/img/path12525.png"
+       inkscape:export-xdpi="233.76707"
+       inkscape:export-ydpi="233.76707" /><path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.60000002;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:2;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart-2);marker-end:url(#Arrow2Mend-6)"
+       d="m 257.63337,248.31911 -0.81671,-49.00273"
+       id="path12525-5"
        inkscape:connector-curvature="0"
-       inkscape:connection-start="#path134"
-       inkscape:connection-start-point="d4"
-       inkscape:connection-end="#path134-2"
-       inkscape:connection-end-point="d4" /><path
-       style="fill:none;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend-3)"
-       d="m 349.22389,229.85475 30.12186,28.34014"
-       id="path4543"
-       inkscape:connector-type="polyline"
+       sodipodi:nodetypes="cc" /><path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.60000002;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:2;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart-9);marker-end:url(#Arrow2Mend-8)"
+       d="M 403.00817,142.96322 286.2183,247.50241"
+       id="path12525-3"
        inkscape:connector-curvature="0"
-       inkscape:connection-start="#path134-2-2"
-       inkscape:connection-start-point="d4"
-       inkscape:connection-end="#path134-2"
-       inkscape:connection-end-point="d4" /><path
-       style="fill:none;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend-3)"
-       d="m 328.13798,182.32331 7.22381,-57.2014"
-       id="path4731"
-       inkscape:connector-type="polyline"
+       sodipodi:nodetypes="cc" /><path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.60000002;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:2;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart-9-7);marker-end:url(#Arrow2Mend-8-9)"
+       d="M 397.86666,155.76322 285.16035,247.23501"
+       id="path12525-3-8"
        inkscape:connector-curvature="0"
-       inkscape:connection-start="#path134-2-2"
-       inkscape:connection-start-point="d4"
-       inkscape:connection-end="#path134-2-2-4"
-       inkscape:connection-end-point="d4" /><path
-       style="fill:none;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend-3)"
-       d="m 384.68495,208.87431 66.19931,1.99594"
-       id="path4919"
-       inkscape:connector-type="polyline"
+       sodipodi:nodetypes="cc" /><path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.60000002;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:2;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart-9-7-5);marker-end:url(#Arrow2Mend-8-9-6)"
+       d="M 391.46666,168.56322 283.66063,246.96761"
+       id="path12525-3-8-4"
        inkscape:connector-curvature="0"
-       inkscape:connection-start="#path134-2-2"
-       inkscape:connection-start-point="d4"
-       inkscape:connection-end="#g3844"
-       inkscape:connection-end-point="d4" /><path
-       style="fill:none;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend-3)"
-       d="m 366.88924,122.32903 84.4505,65.30037"
-       id="path5107"
-       inkscape:connector-type="polyline"
+       sodipodi:nodetypes="cc" /><path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.60000002;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:2;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart-9-7-9);marker-end:url(#Arrow2Mend-8-9-5)"
+       d="M 385.06666,181.36322 284.61104,247.51693"
+       id="path12525-3-8-43"
        inkscape:connector-curvature="0"
-       inkscape:connection-start="#path134-2-2-4"
-       inkscape:connection-start-point="d4"
-       inkscape:connection-end="#g3844"
-       inkscape:connection-end-point="d4" /><path
-       style="fill:none;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend-3)"
-       d="M 396.88282,368.37583 501.83036,243.41884"
-       id="path5295"
-       inkscape:connector-type="polyline"
-       inkscape:connector-curvature="0" /><path
-       style="fill:none;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend-3)"
-       d="m 429.29663,258.48452 23.59293,-20.63787"
-       id="path5483"
-       inkscape:connector-type="polyline"
+       sodipodi:nodetypes="cc" /><path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.60000002;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:2;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart-9-7-6);marker-end:url(#Arrow2Mend-8-9-0)"
+       d="m 370.66666,202.16322 -86.57151,45.7359"
+       id="path12525-3-8-5"
        inkscape:connector-curvature="0"
-       inkscape:connection-start="#path134-2"
-       inkscape:connection-start-point="d4"
-       inkscape:connection-end="#g3844"
-       inkscape:connection-end-point="d4" /></g></svg>
\ No newline at end of file
+       sodipodi:nodetypes="cc" /></g></svg>
\ No newline at end of file
diff --git a/doc/admin/img/platform-overview.pdf b/doc/admin/img/platform-overview.pdf
index 9323fa24b7904a4af44d051c6697c73a11301701..214413e9f55bf65273f7e1f7695ac312bfca8dd7 100644
Binary files a/doc/admin/img/platform-overview.pdf and b/doc/admin/img/platform-overview.pdf differ
diff --git a/doc/admin/img/platform-overview.png b/doc/admin/img/platform-overview.png
new file mode 100644
index 0000000000000000000000000000000000000000..9781cd02a18edefcfc96463f636170026866fe77
Binary files /dev/null and b/doc/admin/img/platform-overview.png differ
diff --git a/doc/admin/img/platform-overview.svg b/doc/admin/img/platform-overview.svg
index a186bb97da49a087feeb35ed4f7211fd2c991413..dd27d6ab537b8cf848a6f1abb50a3c65d4460cd3 100644
--- a/doc/admin/img/platform-overview.svg
+++ b/doc/admin/img/platform-overview.svg
@@ -11,11 +11,14 @@
    xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
    id="svg2"
    version="1.1"
-   inkscape:version="0.48.5 r10040"
-   width="494.82703"
+   inkscape:version="0.91 r13725"
+   width="384.87573"
    height="624.0625"
    xml:space="preserve"
-   sodipodi:docname="platform-overview.pdf"><metadata
+   sodipodi:docname="platform-overview.svg"
+   inkscape:export-filename="/Users/andre/Projects/beat/beat.web/doc/admin/img/platform-overview.png"
+   inkscape:export-xdpi="233.76707"
+   inkscape:export-ydpi="233.76707"><metadata
      id="metadata8"><rdf:RDF><cc:Work
          rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
            rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
@@ -24,10 +27,34 @@
        orient="auto"
        refY="0"
        refX="0"
+       id="marker4951"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         id="path4666"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         transform="matrix(-0.8,0,0,-0.8,-10,0)"
+         inkscape:connector-curvature="0" /></marker><marker
+       inkscape:stockid="Arrow1Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow1Lstart"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         id="path4663"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         transform="matrix(0.8,0,0,0.8,10,0)"
+         inkscape:connector-curvature="0" /></marker><marker
+       inkscape:stockid="Arrow1Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
        id="Arrow1Lend"
        style="overflow:visible"><path
          id="path4653"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
          style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
          transform="matrix(-0.8,0,0,-0.8,-10,0)"
          inkscape:connector-curvature="0" /></marker><marker
@@ -44,9 +71,81 @@
          inkscape:connector-curvature="0" /></marker><clipPath
        clipPathUnits="userSpaceOnUse"
        id="clipPath18"><path
-         d="m 0,0 841.8,0 0,595 L 0,595 0,0 z"
+         d="m 0,0 841.8,0 0,595 L 0,595 0,0 Z"
          id="path20"
-         inkscape:connector-curvature="0" /></clipPath></defs><sodipodi:namedview
+         inkscape:connector-curvature="0" /></clipPath><marker
+       inkscape:stockid="Arrow1Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow1Lstart-8"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4663-3"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         transform="matrix(0.8,0,0,0.8,10,0)" /></marker><marker
+       inkscape:stockid="Arrow1Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker4951-8"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4666-8"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         transform="matrix(-0.8,0,0,-0.8,-10,0)" /></marker><marker
+       inkscape:stockid="Arrow1Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow1Lstart-8-8"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4663-3-0"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         transform="matrix(0.8,0,0,0.8,10,0)" /></marker><marker
+       inkscape:stockid="Arrow1Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker4951-8-9"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4666-8-5"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         transform="matrix(-0.8,0,0,-0.8,-10,0)" /></marker><marker
+       inkscape:stockid="Arrow1Lstart"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow1Lstart-4"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4663-36"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         transform="matrix(0.8,0,0,0.8,10,0)" /></marker><marker
+       inkscape:stockid="Arrow1Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker4951-9"
+       style="overflow:visible"
+       inkscape:isstock="true"><path
+         inkscape:connector-curvature="0"
+         id="path4666-3"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         transform="matrix(-0.8,0,0,-0.8,-10,0)" /></marker></defs><sodipodi:namedview
      pagecolor="#ffffff"
      bordercolor="#666666"
      borderopacity="1"
@@ -60,10 +159,10 @@
      id="namedview4"
      showgrid="false"
      inkscape:zoom="0.97953702"
-     inkscape:cx="51.965353"
-     inkscape:cy="293.88719"
-     inkscape:window-x="0"
-     inkscape:window-y="25"
+     inkscape:cx="53.159682"
+     inkscape:cy="319.40945"
+     inkscape:window-x="190"
+     inkscape:window-y="35"
      inkscape:window-maximized="0"
      inkscape:current-layer="g10"
      fit-margin-top="0"
@@ -73,524 +172,367 @@
      id="g10"
      inkscape:groupmode="layer"
      inkscape:label="platform-overview"
-     transform="matrix(1.25,0,0,-1.25,-211.78554,693.53125)"><path
-       d="M 173.28048,351 564.7,351"
-       style="fill:none;stroke:#000000;stroke-width:1.17524242;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:1.41262254, 1.41262254;stroke-dashoffset:0"
-       id="path88"
-       inkscape:connector-curvature="0" /><g
-       id="g12" /><path
-       d="M 171.64933,450 564.7,450"
-       style="fill:none;stroke:#000000;stroke-width:1.1776886;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:1.41556287, 1.41556287;stroke-dashoffset:0"
-       id="path22"
-       inkscape:connector-curvature="0" /><path
-       d="m 349.9,69.9 -82.3,0 0,71.8 164.6,0 0,-71.8 -82.3,0 z"
-       style="fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path24"
-       inkscape:connector-curvature="0" /><path
-       d="m 349.9,69.9 -82.3,0 0,71.8 164.6,0 0,-71.8 -82.3,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path26"
-       inkscape:connector-curvature="0" /><path
-       d="m 343.4,63.4 -82.3,0 0,71.7 164.6,0 0,-71.7 -82.3,0 z"
-       style="fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path44"
-       inkscape:connector-curvature="0" /><path
-       d="m 343.4,63.4 -82.3,0 0,71.7 164.6,0 0,-71.7 -82.3,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path46"
-       inkscape:connector-curvature="0" /><path
-       d="m 342.1,376.8 0.5,-123.9 6.1,0.1 -11.6,-17 -11.8,16.9 6.1,0 -0.6,123.9 11.3,0 z"
-       style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path76"
-       inkscape:connector-curvature="0" /><path
-       d="m 342.1,376.8 0.5,-123.9 6.1,0.1 -11.6,-17 -11.8,16.9 6.1,0 -0.6,123.9 11.3,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path78"
-       inkscape:connector-curvature="0" /><path
-       d="m 337.79711,420.18356 c 33.95439,0 59.86974,-10.77546 59.86974,-24.78356 0,-14.0081 -25.91535,-24.78356 -59.86974,-24.78356 -33.95439,0 -59.76396,10.77546 -59.76396,24.78356 0,14.0081 25.80957,24.78356 59.76396,24.78356 z"
-       style="fill:#cfe7f5;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path134"
-       inkscape:connector-curvature="0" /><path
-       d="m 337.79791,420.91698 c 33.44351,0 58.96893,-11.09434 58.96893,-25.51698 0,-14.42264 -25.52542,-25.51698 -58.96893,-25.51698 -33.44352,0 -58.86475,11.09434 -58.86475,25.51698 0,14.42264 25.42123,25.51698 58.86475,25.51698 z"
-       style="fill:none;stroke:#808080;stroke-width:1.41658592;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path136"
-       inkscape:connector-curvature="0" /><text
+     transform="matrix(1.25,0,0,-1.25,-261.1261,693.53125)"><g
+       id="g12" /><text
        transform="scale(1,-1)"
        id="text142"
-       x="174.47357"
-       y="-433.00244"><tspan
-         style="font-size:14px;font-variant:normal;font-weight:normal;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Arial;-inkscape-font-specification:ArialMT"
+       x="211.34021"
+       y="-439.40247"
+       style="font-style:oblique;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Oblique';fill:#666666"><tspan
+         style="font-style:oblique;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:14px;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Oblique';writing-mode:lr-tb;fill:#666666;fill-opacity:1;fill-rule:nonzero;stroke:none"
          sodipodi:role="line"
          id="tspan144"
-         x="174.47357"
-         y="-433.00244">Frontend</tspan></text>
-<path
-       d="m 337.9,56.2 -82.3,0 0,71.8 164.6,0 0,-71.8 -82.3,0 z"
-       style="fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path154"
-       inkscape:connector-curvature="0" /><path
-       d="m 337.9,56.2 -82.3,0 0,71.8 164.6,0 0,-71.8 -82.3,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path156"
-       inkscape:connector-curvature="0" /><text
-       transform="scale(1,-1)"
-       id="text158"
-       x="262.70026"
-       y="-111.7"
-       style="font-size:11.19999981px;font-family:Bitstream Vera Sans Mono;-inkscape-font-specification:Bitstream Vera Sans Mono"><tspan
-         style="font-size:9.60000038px;font-variant:normal;font-weight:normal;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Bitstream Vera Sans Mono;-inkscape-font-specification:Bitstream Vera Sans Mono"
-         x="262.70026"
-         y="-111.7"
-         sodipodi:role="line"
-         id="tspan160">Execution of Experiments</tspan></text>
-<path
-       d="m 301.5,86.6 16.6,0"
-       style="fill:none;stroke:#000000;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path162"
-       inkscape:connector-curvature="0" /><path
-       d="m 353.6,86.6 16.6,0"
-       style="fill:none;stroke:#000000;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path164"
-       inkscape:connector-curvature="0" /><path
-       d="m 284.7,77 -18,0 0,22.1 35.9,0 0,-22.1 -17.9,0 z"
-       style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path166"
-       inkscape:connector-curvature="0" /><path
-       d="m 284.7,77 -18,0 0,22.1 35.9,0 0,-22.1 -17.9,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path168"
-       inkscape:connector-curvature="0" /><path
-       d="m 336.7,77 -18,0 0,22.1 35.9,0 0,-22.1 -17.9,0 z"
-       style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path170"
-       inkscape:connector-curvature="0" /><path
-       d="m 336.7,77 -18,0 0,22.1 35.9,0 0,-22.1 -17.9,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path172"
-       inkscape:connector-curvature="0" /><path
-       d="m 388.7,77 -17.9,0 0,22.1 35.9,0 0,-22.1 -18,0 z"
-       style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path174"
-       inkscape:connector-curvature="0" /><path
-       d="m 388.7,77 -17.9,0 0,22.1 35.9,0 0,-22.1 -18,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path176"
-       inkscape:connector-curvature="0" /><text
+         x="211.34021"
+         y="-439.40247">Frontend</tspan></text>
+<text
        transform="scale(-1,-1)"
        id="text126"
        x="-207.84631"
        y="-312.31671"
        style="font-size:11.19999981px;text-align:center;text-anchor:middle"><tspan
-         style="font-size:11.19999981px;font-variant:normal;font-weight:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Arial;-inkscape-font-specification:ArialMT"
+         style="font-variant:normal;font-weight:normal;font-size:11.19999981px;font-family:Arial;-inkscape-font-specification:ArialMT;text-align:center;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none"
          sodipodi:role="line"
          id="tspan128"
          x="-207.84631"
          y="-312.31671" /></text>
-<path
-       d="m 280.24383,374.7 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path140"
-       inkscape:connector-curvature="0" /><text
+<text
        transform="scale(1,-1)"
        id="text190"
        x="122"
        y="-421"><tspan
-         style="font-size:18px;font-variant:normal;font-weight:bold;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Arial;-inkscape-font-specification:Arial-BoldMT"
+         style="font-variant:normal;font-weight:bold;font-size:18px;font-family:Arial;-inkscape-font-specification:Arial-BoldMT;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none"
          sodipodi:role="line"
          id="tspan192"
          x="122"
          y="-421" /></text>
-<path
-       d="m 341.7,192.5 0.2,-49.1 6.2,0 -11.7,-17.2 -11.7,17.1 6.2,0 -0.3,49.1 11.1,0.1 z"
-       style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path198"
-       inkscape:connector-curvature="0" /><path
-       d="m 341.7,192.5 0.2,-49.1 6.2,0 -11.7,-17.2 -11.7,17.1 6.2,0 -0.3,49.1 11.1,0.1 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path200"
-       inkscape:connector-curvature="0" /><path
-       d="m 347.8,192.5 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path202"
-       inkscape:connector-curvature="0" /><path
-       d="m 324.8,126.2 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path204"
-       inkscape:connector-curvature="0" /><path
-       d="m 337,236.6 c 36,0 63.5,-10.1 63.5,-23.4 0,-13.3 -27.5,-23.5 -63.5,-23.5 -36,0 -63.5,10.2 -63.5,23.5 0,13.3 27.5,23.4 63.5,23.4 z"
-       style="fill:#cfe7f5;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path206"
-       inkscape:connector-curvature="0" /><path
-       d="m 337,236.6 c 36,0 63.5,-10.1 63.5,-23.4 0,-13.3 -27.5,-23.5 -63.5,-23.5 -36,0 -63.5,10.2 -63.5,23.5 0,13.3 27.5,23.4 63.5,23.4 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path208"
-       inkscape:connector-curvature="0" /><path
-       d="m 273.5,236.6 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path210"
-       inkscape:connector-curvature="0" /><text
-       transform="scale(1,-1)"
-       id="text214"
-       x="176.14017"
-       y="-334.0737"><tspan
-         style="font-size:14px;font-variant:normal;font-weight:normal;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Arial;-inkscape-font-specification:ArialMT"
-         x="176.14017"
-         y="-334.0737"
-         sodipodi:role="line"
-         id="tspan216">Backend</tspan></text>
-<path
-       d="m 419.6,388.5 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path222"
-       inkscape:connector-curvature="0" /><path
-       d="m 419.6,388.5 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path228"
-       inkscape:connector-curvature="0" /><path
-       d="m 419.6,388.5 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path234"
-       inkscape:connector-curvature="0" /><path
-       d="m 516.2,78.4 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path272"
-       inkscape:connector-curvature="0" /><path
-       d="m 516.2,78.4 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path278"
-       inkscape:connector-curvature="0" /><g
-       id="g5460"
-       transform="translate(0,-9.8005484)"><path
+<g
+       id="g8566"
+       transform="translate(-7.86224,0)"><path
          inkscape:connector-curvature="0"
-         id="path146"
-         style="fill:#e6e6ff;fill-opacity:1;fill-rule:evenodd;stroke:none"
-         d="m 492.15123,197.42467 -70.4,0 0,145.63807 140.8,0 0,-145.63807 -70.4,0 z" /><path
+         id="path24"
+         style="fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none"
+         d="m 349.9,69.9 -82.3,0 0,71.8 164.6,0 0,-71.8 -82.3,0 z" /><path
          inkscape:connector-curvature="0"
-         id="path148"
-         style="fill:none;stroke:#808080;stroke-width:0.91820848;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 492.15123,197.70507 -70.4,0 0,145.21845 140.8,0 0,-145.21845 -70.4,0 z" /><text
-         y="-323.30124"
-         x="429.55124"
-         id="text150"
-         transform="scale(1,-1)"><tspan
-           y="-323.30124"
-           x="429.55124"
-           id="tspan152"
-           sodipodi:role="line"
-           style="font-size:14px;font-variant:normal;font-weight:bold;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Arial;-inkscape-font-specification:Arial-BoldMT">Cache</tspan></text>
-<path
+         id="path26"
+         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+         d="m 349.9,69.9 -82.3,0 0,71.8 164.6,0 0,-71.8 -82.3,0 z" /><path
          inkscape:connector-curvature="0"
-         id="path212"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 422.55123,219.10165 0,0 z" /><path
+         id="path44"
+         style="fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none"
+         d="m 343.4,63.4 -82.3,0 0,71.7 164.6,0 0,-71.7 -82.3,0 z" /><path
          inkscape:connector-curvature="0"
-         id="path224"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 538.25123,329.50165 0,0 z" /><path
+         id="path46"
+         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+         d="m 343.4,63.4 -82.3,0 0,71.7 164.6,0 0,-71.7 -82.3,0 z" /><path
          inkscape:connector-curvature="0"
-         id="path230"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 538.25123,329.50165 0,0 z" /><path
+         id="path154"
+         style="fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none"
+         d="m 337.9,56.2 -82.3,0 0,71.8 164.6,0 0,-71.8 -82.3,0 z" /><path
          inkscape:connector-curvature="0"
-         id="path236"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 538.25123,329.50165 0,0 z" /><path
-         inkscape:connector-curvature="0"
-         id="path242"
-         style="fill:#ffffcc;fill-opacity:1;fill-rule:evenodd;stroke:none"
-         d="m 441.55123,292.30165 6.7,0 0,7.4 6.7,0 0,7.3 83.2,0 0,-58.9 -6.7,0 0,-7.4 -6.7,0 0,-7.3 c -31.2,0.4 -31.9,-11.6 -61.6,-14.4 -10.6,1.7 -14.5,3.1 -21.6,5 l 0,68.3 z" /><path
+         id="path156"
+         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+         d="m 337.9,56.2 -82.3,0 0,71.8 164.6,0 0,-71.8 -82.3,0 z" /><text
+         style="font-size:11.19999981px;font-family:'Bitstream Vera Sans Mono';-inkscape-font-specification:'Bitstream Vera Sans Mono'"
+         y="-111.7"
+         x="262.70026"
+         id="text158"
+         transform="scale(1,-1)"><tspan
+           id="tspan160"
+           sodipodi:role="line"
+           y="-111.7"
+           x="262.70026"
+           style="font-variant:normal;font-weight:normal;font-size:12px;font-family:'Bitstream Vera Sans Mono';-inkscape-font-specification:'Bitstream Vera Sans Mono';writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;">Execution of Experiments</tspan></text>
+<path
          inkscape:connector-curvature="0"
-         id="path244"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 441.55123,292.30165 6.7,0 0,7.4 6.7,0 0,7.3 83.2,0 0,-58.9 -6.7,0 0,-7.4 -6.7,0 0,-7.3 c -31.2,0.4 -31.9,-11.6 -61.6,-14.4 -10.6,1.7 -14.5,3.1 -21.6,5 l 0,68.3 z" /><path
+         id="path162"
+         style="fill:none;stroke:#000000;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+         d="m 301.5,86.6 16.6,0" /><path
          inkscape:connector-curvature="0"
-         id="path246"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 441.55123,307.00165 0,0 z" /><path
+         id="path164"
+         style="fill:none;stroke:#000000;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+         d="m 353.6,86.6 16.6,0" /><path
          inkscape:connector-curvature="0"
-         id="path248"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 538.25123,218.70165 0,0 z" /><path
+         id="path166"
+         style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
+         d="m 284.7,77 -18,0 0,22.1 35.9,0 0,-22.1 -17.9,0 z" /><path
          inkscape:connector-curvature="0"
-         id="path250"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 448.25123,292.30165 76.5,0 0,-51.6" /><path
+         id="path168"
+         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+         d="m 284.7,77 -18,0 0,22.1 35.9,0 0,-22.1 -17.9,0 z" /><path
          inkscape:connector-curvature="0"
-         id="path252"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 441.55123,307.00165 0,0 z" /><path
+         id="path170"
+         style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
+         d="m 336.7,77 -18,0 0,22.1 35.9,0 0,-22.1 -17.9,0 z" /><path
          inkscape:connector-curvature="0"
-         id="path254"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 538.25123,218.70165 0,0 z" /><path
+         id="path172"
+         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+         d="m 336.7,77 -18,0 0,22.1 35.9,0 0,-22.1 -17.9,0 z" /><path
          inkscape:connector-curvature="0"
-         id="path256"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 454.95123,299.70165 76.5,0 0,-51.6" /><path
+         id="path174"
+         style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
+         d="m 388.7,77 -17.9,0 0,22.1 35.9,0 0,-22.1 -18,0 z" /><path
          inkscape:connector-curvature="0"
-         id="path258"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 441.55123,307.00165 0,0 z" /><path
+         id="path176"
+         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+         d="m 388.7,77 -17.9,0 0,22.1 35.9,0 0,-22.1 -18,0 z" /><path
          inkscape:connector-curvature="0"
-         id="path260"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 538.25123,218.70165 0,0 z" /><text
-         style="font-size:11.19999981px;font-weight:bold;font-family:Bitstream Vera Sans Mono;-inkscape-font-specification:Bitstream Vera Sans Mono Bold"
-         y="-258.30164"
-         x="469.45123"
-         id="text262"
-         transform="scale(1,-1)"><tspan
-           y="-258.30164"
-           x="469.45123"
-           id="tspan264"
-           sodipodi:role="line"
-           style="font-size:11.19999981px;font-variant:normal;font-weight:bold;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Bitstream Vera Sans Mono;-inkscape-font-specification:Bitstream Vera Sans Mono Bold">Data</tspan></text>
+         id="path204"
+         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+         d="m 324.8,126.2 0,0 z" /></g><text
+       transform="scale(1,-1)"
+       id="text214"
+       x="211.34021"
+       y="-342.07373"
+       style="font-style:oblique;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Oblique';fill:#666666"><tspan
+         style="font-style:oblique;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:14px;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Oblique';writing-mode:lr-tb;fill:#666666;fill-opacity:1;fill-rule:nonzero;stroke:none"
+         x="211.34021"
+         y="-342.07373"
+         sodipodi:role="line"
+         id="tspan216">Backend</tspan></text>
 <path
-         inkscape:connector-curvature="0"
-         id="path270"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 441.55123,196.20165 0,0 z" /><path
-         inkscape:connector-curvature="0"
-         id="path276"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 441.55123,196.20165 0,0 z" /><path
-         inkscape:connector-curvature="0"
-         id="path282"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 441.55123,196.20165 0,0 z" /></g><path
-       d="m 516.2,78.4 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path284"
-       inkscape:connector-curvature="0" /><path
-       d="m 343,483.9 0.2,-46.4 5.9,0 -11.6,-20.1 -11.7,20 5.9,0 -0.2,46.5 11.5,0 z"
-       style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path290"
-       inkscape:connector-curvature="0" /><path
-       d="m 343,483.9 0.2,-46.4 5.9,0 -11.6,-20.1 -11.7,20 5.9,0 -0.2,46.5 11.5,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path292"
-       inkscape:connector-curvature="0" /><path
-       d="m 348.9,483.9 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path294"
-       inkscape:connector-curvature="0" /><path
-       d="m 325.8,417.4 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path296"
-       inkscape:connector-curvature="0" /><path
-       d="m 265.1,524.3 c -3.3,11.1 12.3,21.7 27.9,21.7 4.9,0 10,-1 14.2,-2.7 4,5.1 11.4,8.2 19.7,8.2 5.5,-0.2 11.4,-1.7 15.6,-4.4 3,4.4 9.3,7.1 16.2,7.1 5.7,0 11,-1.9 14.3,-4.9 3.8,3 9.6,4.9 15.6,4.9 9.7,0 18,-4.8 19.7,-11.3 9.5,-1.9 16.3,-7.8 16.3,-14.7 0,-2.1 -0.5,-4 -1.9,-6 4,-3.3 6.4,-7.4 6.4,-11.6 0,-9.6 -10.8,-17.6 -24.6,-19 0,-9.1 -10.7,-16.3 -24,-16.3 -4.6,0 -8.9,0.9 -12.8,2.5 -3.5,-8 -14.5,-13.7 -27,-13.7 -9.2,0 -18,3.4 -23.2,8.8 -4.9,-2.1 -2.3,-3.3 -16.4,-3.3 -11.6,0 -22.3,4.2 -27.9,11.1 -13.3,0.1 -20.1,5.5 -20.1,12.4 0,3.1 1.7,6 4.9,8.4 -5.8,2 -8.9,5.9 -8.9,10.5 0,6.4 7,11.6 16,12.3 l 0,0 z"
-       style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
-       id="path298"
-       inkscape:connector-curvature="0" /><path
-       d="m 265.1,524.3 c -3.3,11.1 12.3,21.7 27.9,21.7 4.9,0 10,-1 14.2,-2.7 4,5.1 11.4,8.2 19.7,8.2 5.5,-0.2 11.4,-1.7 15.6,-4.4 3,4.4 9.3,7.1 16.2,7.1 5.7,0 11,-1.9 14.3,-4.9 3.8,3 9.6,4.9 15.6,4.9 9.7,0 18,-4.8 19.7,-11.3 9.5,-1.9 16.3,-7.8 16.3,-14.7 0,-2.1 -0.5,-4 -1.9,-6 4,-3.3 6.4,-7.4 6.4,-11.6 0,-9.6 -10.8,-17.6 -24.6,-19 0,-9.1 -10.7,-16.3 -24,-16.3 -4.6,0 -8.9,0.9 -12.8,2.5 -3.5,-8 -14.5,-13.7 -27,-13.7 -9.2,0 -18,3.4 -23.2,8.8 -4.9,-2.1 -2.3,-3.3 -16.4,-3.3 -11.6,0 -22.3,4.2 -27.9,11.1 -13.3,0.1 -20.1,5.5 -20.1,12.4 0,3.1 1.7,6 4.9,8.4 -5.8,2 -8.9,5.9 -8.9,10.5 0,6.4 7,11.6 16,12.3 l 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path300"
-       inkscape:connector-curvature="0" /><path
        d="m 249.1,554.2 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
        id="path302"
        inkscape:connector-curvature="0" /><path
-       d="m 429.1,464.1 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path304"
-       inkscape:connector-curvature="0" /><path
-       d="m 265.1,524.3 c 0.2,-1 1,-2.2 1.4,-3.1"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path306"
-       inkscape:connector-curvature="0" /><path
        d="m 249.1,554.2 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
        id="path308"
        inkscape:connector-curvature="0" /><path
-       d="m 429.1,464.1 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path310"
-       inkscape:connector-curvature="0" /><path
-       d="m 307.2,543.3 c 1.9,-0.8 4.2,-1.8 5.8,-2.9"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path312"
-       inkscape:connector-curvature="0" /><path
        d="m 249.1,554.2 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
        id="path314"
        inkscape:connector-curvature="0" /><path
-       d="m 429.1,464.1 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path316"
-       inkscape:connector-curvature="0" /><path
-       d="m 342.5,547.1 c -0.7,-0.9 -1.1,-1.9 -1.5,-2.9"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path318"
-       inkscape:connector-curvature="0" /><path
        d="m 249.1,554.2 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
        id="path320"
        inkscape:connector-curvature="0" /><path
-       d="m 429.1,464.1 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path322"
-       inkscape:connector-curvature="0" /><path
-       d="m 373,549.3 c -1.2,-1 -1.9,-2.3 -2.7,-3.5"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path324"
-       inkscape:connector-curvature="0" /><path
        d="m 249.1,554.2 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
        id="path326"
        inkscape:connector-curvature="0" /><path
-       d="m 429.1,464.1 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path328"
-       inkscape:connector-curvature="0" /><path
-       d="m 408.3,542.9 c 0.2,-0.8 1,-2.4 0.7,-2.8"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path330"
-       inkscape:connector-curvature="0" /><path
        d="m 249.1,554.2 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
        id="path332"
        inkscape:connector-curvature="0" /><path
-       d="m 422.7,522.2 c -1.4,-2.1 -3.4,-4 -6,-5.5"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path336"
-       inkscape:connector-curvature="0" /><path
-       d="m 404.6,491.6 c 0.7,3.4 -3.2,11.7 -13.8,14.8"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path342"
-       inkscape:connector-curvature="0" /><path
        d="m 249.1,554.2 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
        id="path344"
        inkscape:connector-curvature="0" /><path
-       d="m 367.7,477.8 c 0.7,1.4 1,2.7 1.1,4"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-       id="path348"
-       inkscape:connector-curvature="0" /><path
        d="m 249.1,554.2 0,0 z"
-       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
+       style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
        id="path350"
-       inkscape:connector-curvature="0" /><text
-       transform="scale(1,-1)"
-       id="text372"
-       x="334.05231"
-       y="-518.78412"
-       style="text-align:center;text-anchor:middle"><tspan
-         style="font-size:18px;font-variant:normal;font-weight:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Arial;-inkscape-font-specification:ArialMT"
-         sodipodi:role="line"
-         id="tspan374"
-         x="334.05231"
-         y="-518.78412">You</tspan><tspan
-         style="font-size:18px;font-variant:normal;font-weight:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Arial;-inkscape-font-specification:ArialMT"
-         sodipodi:role="line"
-         x="334.05231"
-         y="-496.28412"
-         id="tspan5489">(via internet)</tspan></text>
-<g
-       id="g3844"
-       transform="translate(22.051235,-44.952605)"><path
+       inkscape:connector-curvature="0" /><g
+       id="g8533"
+       transform="translate(-0.96224,-2.6310059)"><path
+         inkscape:connector-curvature="0"
+         id="path202"
+         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+         d="m 347.8,192.5 0,0 z" /><path
          inkscape:connector-curvature="0"
-         id="path110"
-         style="fill:#ffffcc;fill-opacity:1;fill-rule:evenodd;stroke:none"
-         d="m 238.74383,336.7 c 0,5.2 -15.9,10.4 -31.8,10.4 -15.8,0 -31.7,-5.2 -31.7,-10.4 l 0,-45.4 c 0,-5.2 15.9,-10.5 31.7,-10.5 15.9,0 31.8,5.3 31.8,10.5 l 0,45.4 z" /><path
+         id="path206"
+         style="fill:#cfe7f5;fill-opacity:1;fill-rule:evenodd;stroke:none"
+         d="m 337,236.6 c 36,0 63.5,-10.1 63.5,-23.4 0,-13.3 -27.5,-23.5 -63.5,-23.5 -36,0 -63.5,10.2 -63.5,23.5 0,13.3 27.5,23.4 63.5,23.4 z" /><path
          inkscape:connector-curvature="0"
-         id="path112"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 238.74383,336.7 c 0,5.2 -15.9,10.4 -31.8,10.4 -15.8,0 -31.7,-5.2 -31.7,-10.4 l 0,-45.4 c 0,-5.2 15.9,-10.5 31.7,-10.5 15.9,0 31.8,5.3 31.8,10.5 l 0,45.4 z" /><path
+         id="path208"
+         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+         d="m 337,236.6 c 36,0 63.5,-10.1 63.5,-23.4 0,-13.3 -27.5,-23.5 -63.5,-23.5 -36,0 -63.5,10.2 -63.5,23.5 0,13.3 27.5,23.4 63.5,23.4 z" /><path
          inkscape:connector-curvature="0"
-         id="path116"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 175.24383,280.8 0,0 z" /><path
+         id="path210"
+         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+         d="m 273.5,236.6 0,0 z" /><text
+         transform="scale(1,-1)"
+         sodipodi:linespacing="125%"
+         id="text3791"
+         y="-218.63278"
+         x="338.6962"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12px;line-height:125%;font-family:'Bitstream Vera Sans Mono';-inkscape-font-specification:'Bitstream Vera Sans Mono Bold';text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;"
+         xml:space="preserve"><tspan
+           y="-218.63278"
+           x="338.6962"
+           id="tspan3793"
+           sodipodi:role="line">Scheduler +</tspan><tspan
+           id="tspan3795"
+           y="-202.93951"
+           x="338.6962"
+           sodipodi:role="line">Workers Nodes</tspan></text>
+</g><g
+       id="g4648"
+       transform="translate(-1.81224,12.250664)"><path
          inkscape:connector-curvature="0"
-         id="path118"
-         style="fill:#ffffcc;fill-opacity:1;fill-rule:evenodd;stroke:none"
-         d="m 238.74383,336.7 c 0,-5.3 -15.9,-10.5 -31.8,-10.5 -15.8,0 -31.7,5.2 -31.7,10.5 l 63.5,0 z" /><path
+         id="path134"
+         style="fill:#cfe7f5;fill-opacity:1;fill-rule:evenodd;stroke:none"
+         d="m 337.79711,420.18356 c 33.95439,0 59.86974,-10.77546 59.86974,-24.78356 0,-14.0081 -25.91535,-24.78356 -59.86974,-24.78356 -33.95439,0 -59.76396,10.77546 -59.76396,24.78356 0,14.0081 25.80957,24.78356 59.76396,24.78356 z" /><path
          inkscape:connector-curvature="0"
-         id="path120"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 238.74383,336.7 c 0,-5.3 -15.9,-10.5 -31.8,-10.5 -15.8,0 -31.7,5.2 -31.7,10.5" /><path
+         id="path136"
+         style="fill:none;stroke:#808080;stroke-width:1.41658592;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+         d="m 337.79791,420.91698 c 33.44351,0 58.96893,-11.09434 58.96893,-25.51698 0,-14.42264 -25.52542,-25.51698 -58.96893,-25.51698 -33.44352,0 -58.86475,11.09434 -58.86475,25.51698 0,14.42264 25.42123,25.51698 58.86475,25.51698 z" /><path
          inkscape:connector-curvature="0"
-         id="path122"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 238.74383,347.1 0,0 z" /><path
+         id="path140"
+         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+         d="m 280.24383,374.7 0,0 z" /><path
          inkscape:connector-curvature="0"
-         id="path124"
-         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-opacity:1;stroke-dasharray:none"
-         d="m 175.24383,280.8 0,0 z" /><text
+         id="path296"
+         style="fill:none;stroke:#808080;stroke-width:1.25;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+         d="m 325.8,417.4 0,0 z" /><text
          transform="scale(1,-1)"
          sodipodi:linespacing="125%"
-         id="text3785"
-         y="-312.11984"
-         x="206.19865"
-         style="font-size:9.60000038px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans Mono;-inkscape-font-specification:Bitstream Vera Sans Mono Bold"
+         id="text3791-3"
+         y="-399.64914"
+         x="338.16895"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12px;line-height:125%;font-family:'Bitstream Vera Sans Mono';-inkscape-font-specification:'Bitstream Vera Sans Mono Bold';text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;"
          xml:space="preserve"><tspan
-           y="-312.11984"
-           x="206.19865"
-           id="tspan3787"
-           sodipodi:role="line">Object</tspan><tspan
-           id="tspan3789"
-           y="-300.11984"
-           x="206.19865"
-           sodipodi:role="line">Repository</tspan></text>
-</g><text
+           id="tspan3795-8"
+           y="-399.64914"
+           x="338.16895"
+           sodipodi:role="line">Web Server</tspan><tspan
+           id="tspan3823"
+           y="-384.31229"
+           x="338.16895"
+           sodipodi:role="line">+ RESTful API</tspan></text>
+</g><g
+       id="g4453"
+       transform="matrix(0.8,0,0,-0.8,296.85096,544.77072)"><g
+         id="g4385"><g
+           id="g4387"><path
+             id="path4389"
+             d="m 49.541,38.655 c 1.617,0 3.158,0.338 4.559,0.946 0.105,-2.286 0.893,-4.4 2.157,-6.15 -0.89,-0.186 -1.808,-0.285 -2.748,-0.285 l -8.906,0 c -0.938,0 -1.856,0.098 -2.739,0.282 1.347,1.869 2.152,4.15 2.165,6.62 1.637,-0.9 3.515,-1.413 5.512,-1.413 z"
+             inkscape:connector-curvature="0" /><circle
+             id="circle4391"
+             r="10.496"
+             cy="21.954"
+             cx="49.054001" /><path
+             id="path4393"
+             d="m 65.539,50.36 c 5.342,0 9.67,-4.33 9.67,-9.67 0,-5.342 -4.328,-9.67 -9.67,-9.67 -5.292,0 -9.583,4.251 -9.663,9.524 3.049,1.912 5.187,5.146 5.577,8.9 1.242,0.582 2.623,0.916 4.086,0.916 z"
+             inkscape:connector-curvature="0" /><path
+             id="path4395"
+             d="m 32.571,31.019 c -5.343,0 -9.671,4.329 -9.671,9.67 0,5.341 4.328,9.669 9.671,9.669 1.892,0 3.651,-0.553 5.143,-1.492 0.475,-3.091 2.132,-5.794 4.499,-7.634 0.01,-0.181 0.027,-0.36 0.027,-0.543 0,-5.341 -4.33,-9.67 -9.669,-9.67 z"
+             inkscape:connector-curvature="0" /><path
+             id="path4397"
+             d="m 71.82,30.813 c 3.049,1.912 5.187,5.146 5.576,8.901 1.241,0.581 2.623,0.916 4.086,0.916 5.342,0 9.67,-4.329 9.67,-9.67 0,-5.341 -4.328,-9.67 -9.67,-9.67 -5.291,-10e-4 -9.582,4.251 -9.662,9.523 z"
+             inkscape:connector-curvature="0" /><circle
+             id="circle4399"
+             r="9.6709995"
+             cy="50.673"
+             cx="49.541" /><path
+             id="path4401"
+             d="m 69.643,51.019 -8.144,0 c -0.089,3.258 -1.479,6.192 -3.679,8.301 6.068,1.806 10.509,7.434 10.509,14.082 l 0,3.092 c 8.04,-0.297 12.674,-2.573 12.979,-2.729 l 0.646,-0.328 0.067,0 0,-10.036 C 82.023,56.573 76.469,51.019 69.643,51.019 Z"
+             inkscape:connector-curvature="0" /><path
+             id="path4403"
+             d="m 85.585,41.289 -8.142,0 c -0.088,3.258 -1.479,6.192 -3.678,8.301 6.068,1.806 10.508,7.433 10.508,14.081 l 0,3.092 c 8.039,-0.296 12.674,-2.572 12.979,-2.729 l 0.646,-0.327 0.069,0 0,-10.036 c 0,-6.827 -5.554,-12.382 -12.382,-12.382 z"
+             inkscape:connector-curvature="0" /><path
+             id="path4405"
+             d="m 41.256,59.319 c -2.189,-2.099 -3.575,-5.017 -3.677,-8.254 -0.301,-0.022 -0.6,-0.047 -0.908,-0.047 l -8.203,0 c -6.828,0 -12.383,5.555 -12.383,12.383 l 0,10.037 0.025,0.155 0.691,0.218 c 5.227,1.633 9.893,2.383 13.944,2.621 l 0,-3.031 c 0.002,-6.647 4.441,-12.275 10.511,-14.082 z"
+             inkscape:connector-curvature="0" /><path
+             id="path4407"
+             d="m 53.643,61.003 -8.206,0 c -6.828,0 -12.383,5.557 -12.383,12.382 l 0,10.037 0.026,0.157 0.69,0.216 c 6.516,2.035 12.177,2.715 16.835,2.715 9.101,0 14.375,-2.595 14.701,-2.76 l 0.646,-0.328 0.068,0 0,-10.037 C 66.023,66.558 60.469,61.003 53.643,61.003 Z"
+             inkscape:connector-curvature="0" /><path
+             id="path4409"
+             d="m 16.486,40.938 c 1.463,0 2.844,-0.335 4.086,-0.916 0.39,-3.755 2.527,-6.99 5.576,-8.902 -0.08,-5.271 -4.371,-9.523 -9.662,-9.523 -5.343,0 -9.671,4.329 -9.671,9.671 0,5.341 4.328,9.67 9.671,9.67 z"
+             inkscape:connector-curvature="0" /><path
+             id="path4411"
+             d="M 24.202,49.899 C 22.004,47.79 20.613,44.855 20.525,41.596 l -8.143,0 C 5.554,41.597 0,47.152 0,53.979 l 0,10.037 0.069,0 0.646,0.327 c 0.306,0.154 4.939,2.433 12.979,2.728 l 0,-3.092 c 0,-6.647 4.439,-12.275 10.508,-14.08 z"
+             inkscape:connector-curvature="0" /><path
+             id="path4413"
+             d="m 27.796,30.063 c 1.16,-0.47 2.93,-1.047 4.62,-1.047 1.967,0 3.891,0.506 5.607,1.469 0.382,-0.375 0.732,-0.783 1.05,-1.22 -1.63,-2.141 -2.52,-4.765 -2.52,-7.464 0,-1.818 0.406,-3.622 1.18,-5.261 -1.762,-1.592 -4.01,-2.461 -6.399,-2.461 -4.348,0 -8.133,2.943 -9.241,7.088 3.248,1.89 5.364,5.194 5.703,8.896 z"
+             inkscape:connector-curvature="0" /><path
+             id="path4415"
+             d="m 59.117,28.718 c 0.336,0.534 0.729,1.037 1.175,1.505 1.588,-0.792 3.334,-1.208 5.092,-1.208 1.729,0 3.386,0.442 4.472,0.812 0.34,-4.013 2.767,-7.555 6.4,-9.35 -1.332,-3.805 -4.938,-6.402 -9.021,-6.402 -2.64,0 -5.14,1.084 -6.945,2.992 0.634,1.512 0.955,3.101 0.955,4.73 -10e-4,2.495 -0.735,4.873 -2.128,6.921 z"
+             inkscape:connector-curvature="0" /></g></g><g
+         id="g4417" /><g
+         id="g4419" /><g
+         id="g4421" /><g
+         id="g4423" /><g
+         id="g4425" /><g
+         id="g4427" /><g
+         id="g4429" /><g
+         id="g4431" /><g
+         id="g4433" /><g
+         id="g4435" /><g
+         id="g4437" /><g
+         id="g4439" /><g
+         id="g4441" /><g
+         id="g4443" /><g
+         id="g4445" /></g><text
        xml:space="preserve"
-       style="font-size:12.55461216px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans Mono;-inkscape-font-specification:Bitstream Vera Sans Mono Bold"
-       x="338.6962"
-       y="-218.63278"
-       id="text3791"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       x="403.30896"
+       y="-503.13339"
+       id="text4490"
        sodipodi:linespacing="125%"
        transform="scale(1,-1)"><tspan
          sodipodi:role="line"
-         id="tspan3793"
-         x="338.6962"
-         y="-218.63278">Scheduler +</tspan><tspan
-         sodipodi:role="line"
-         x="338.6962"
-         y="-202.93951"
-         id="tspan3795">Workers Nodes</tspan></text>
-<text
+         id="tspan4492"
+         x="403.30896"
+         y="-503.13339">Users</tspan></text>
+<path
+       style="fill:#b3b3b3;fill-rule:evenodd;stroke:#818181;stroke-width:0.80000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:3.2, 3.2;stroke-dashoffset:0;stroke-opacity:1"
+       d="m 208.90089,358.94735 307.90058,0"
+       id="path4275-5"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" /><g
+       id="g4629"
+       transform="matrix(0.12031503,0,0,-0.12031503,306.74105,337.30641)"><g
+         id="g4589"><path
+           id="path4591"
+           d="M 397.7,24.6 C 356.3,8.7 301.5,0 243.5,0 185.5,0 130.7,8.7 89.3,24.6 43.8,42.1 18.8,66.9 18.8,94.5 l 0,298 c 0,27.6 25,52.4 70.5,69.9 41.4,15.9 96.2,24.6 154.2,24.6 58,0 112.8,-8.7 154.2,-24.6 45.5,-17.4 70.5,-42.3 70.5,-69.9 l 0,-298 c 0,-27.6 -25,-52.4 -70.5,-69.9 z m 43.5,367.9 c 0,15.2 -19.9,31.9 -53.2,44.7 C 349.6,451.9 298.3,460 243.5,460 188.7,460 137.4,451.9 99,437.2 65.7,424.4 45.8,407.7 45.8,392.5 l 0,-51.4 c 11.2,8.8 25.8,16.7 43.5,23.4 41.4,15.9 96.2,24.6 154.2,24.6 58,0 112.8,-8.7 154.2,-24.6 17.7,-6.8 32.3,-14.7 43.5,-23.4 l 0,51.4 z m 0,-97.8 c 0,15.2 -19.9,31.9 -53.2,44.7 -38.4,14.7 -89.7,22.8 -144.5,22.8 C 188.7,362.2 137.4,354.1 99,339.4 65.7,326.6 45.8,309.9 45.8,294.7 l 0,-52.9 c 11.2,8.8 25.8,16.7 43.5,23.4 41.4,15.9 96.2,24.6 154.2,24.6 58,0 112.8,-8.7 154.2,-24.6 17.7,-6.8 32.3,-14.7 43.5,-23.4 l 0,52.9 z m 0,-99.4 c 0,15.2 -19.9,31.9 -53.2,44.7 -38.4,14.7 -89.7,22.8 -144.5,22.8 C 188.7,262.8 137.4,254.7 99,240 65.7,227.2 45.8,210.5 45.8,195.3 l 0,-1.5 0,-52.9 c 11.2,8.8 25.8,16.7 43.5,23.4 41.4,15.9 96.2,24.6 154.2,24.6 58,0 112.8,-8.7 154.2,-24.6 17.7,-6.8 32.3,-14.7 43.5,-23.4 l 0,54.4 z M 388,139.1 C 349.6,153.8 298.3,161.9 243.5,161.9 188.7,161.9 137.4,153.8 99,139.1 65.7,126.3 45.8,109.6 45.8,94.4 45.8,79.2 65.7,62.5 99,49.7 137.4,35 188.7,26.9 243.5,26.9 c 54.8,0 106.1,8.1 144.5,22.8 33.3,12.8 53.2,29.5 53.2,44.7 0,15.2 -19.9,32 -53.2,44.7 z"
+           inkscape:connector-curvature="0" /></g><g
+         id="g4593" /><g
+         id="g4595" /><g
+         id="g4597" /><g
+         id="g4599" /><g
+         id="g4601" /><g
+         id="g4603" /><g
+         id="g4605" /><g
+         id="g4607" /><g
+         id="g4609" /><g
+         id="g4611" /><g
+         id="g4613" /><g
+         id="g4615" /><g
+         id="g4617" /><g
+         id="g4619" /><g
+         id="g4621" /></g><path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart);marker-end:url(#marker4951)"
+       d="m 336.01666,378.34352 0.0422,-36.77641"
+       id="path4657"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" /><text
        xml:space="preserve"
-       style="font-size:12.2694912px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans Mono;-inkscape-font-specification:Bitstream Vera Sans Mono Bold"
-       x="338.16895"
-       y="-399.64914"
-       id="text3791-3"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       x="421.32611"
+       y="-319.92068"
+       id="text4490-7"
        sodipodi:linespacing="125%"
        transform="scale(1,-1)"><tspan
          sodipodi:role="line"
-         x="338.16895"
-         y="-399.64914"
-         id="tspan3795-8">Web Server</tspan><tspan
+         x="421.32611"
+         y="-319.92068"
+         id="tspan5018">Experiments,</tspan><tspan
          sodipodi:role="line"
-         x="338.16895"
-         y="-384.31229"
-         id="tspan3823">+ RESTful API</tspan></text>
-<path
-       style="fill:none;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend)"
-       d="m 279.96762,223.91492 -25.96674,14.99191"
-       id="path3874"
-       inkscape:connector-curvature="0" /><path
-       style="fill:none;stroke:#000000;stroke-width:0.80000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:0.8, 2.4;stroke-dashoffset:0;marker-end:url(#Arrow1Lend)"
-       d="m 297.93529,377.45684 -45.2346,-78.34862"
-       id="path3876"
-       inkscape:connector-curvature="0" /><text
-       xml:space="preserve"
-       style="font-size:11.19999981px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans Mono;-inkscape-font-specification:Bitstream Vera Sans Mono Bold"
-       x="248.20567"
-       y="341.34204"
-       id="text5485"
-       sodipodi:linespacing="125%"
-       transform="matrix(-0.00559672,0.99998434,0.99998434,0.00559672,0,0)"><tspan
+         x="421.32611"
+         y="-304.92068"
+         id="tspan5109">Running State &amp;</tspan><tspan
          sodipodi:role="line"
-         id="tspan5487"
-         x="248.20567"
-         y="341.34204">commands &lt;-&gt; data</tspan></text>
+         x="421.32611"
+         y="-289.92068"
+         id="tspan5111">Data Cache</tspan></text>
 <path
-       style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:1, 3;stroke-dashoffset:0;marker-end:url(#Arrow1Lend)"
-       d="m 337.91474,232.26157 96.69632,55.82764"
-       id="path5493"
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart-8);marker-end:url(#marker4951-8)"
+       d="m 335.19994,275.24651 0.0422,-36.77641"
+       id="path4657-5"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" /><path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart-8-8);marker-end:url(#marker4951-8-9)"
+       d="m 336.01666,182.58595 0.0422,-36.77641"
+       id="path4657-5-6"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" /><path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart-4);marker-end:url(#marker4951-9)"
+       d="m 336.01666,473.09719 0.0422,-36.77641"
+       id="path4657-2"
        inkscape:connector-curvature="0"
-       transform="matrix(0.8,0,0,-0.8,110.8457,563.2661)" /><path
-       style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend)"
-       d="m 362.41611,437.46055 24.50138,0"
-       id="path5495"
+       sodipodi:nodetypes="cc" /><path
+       style="fill:#999999;fill-rule:evenodd;stroke:#818181;stroke-width:0.80000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:3.2, 3.2;stroke-dashoffset:0;stroke-opacity:1"
+       d="m 208.90088,455.392 307.90059,0"
+       id="path4275-5-3"
        inkscape:connector-curvature="0"
-       transform="matrix(0.8,0,0,-0.8,110.8457,563.2661)" /></g></svg>
\ No newline at end of file
+       sodipodi:nodetypes="cc" /></g></svg>
\ No newline at end of file
diff --git a/doc/admin/img/running-an-experiment.dia b/doc/admin/img/running-an-experiment.dia
deleted file mode 100644
index adfbbd80c5b7c939cd4f80d7758ee7a14f760ae3..0000000000000000000000000000000000000000
Binary files a/doc/admin/img/running-an-experiment.dia and /dev/null differ
diff --git a/doc/admin/img/running-an-experiment.pdf b/doc/admin/img/running-an-experiment.pdf
deleted file mode 100644
index 7e36a13df14d3b7d4eb4871d69b5bccb58b24728..0000000000000000000000000000000000000000
Binary files a/doc/admin/img/running-an-experiment.pdf and /dev/null differ
diff --git a/doc/admin/img/running-an-experiment.png b/doc/admin/img/running-an-experiment.png
deleted file mode 100644
index 580c146fbc5e28c77a954eea59d83666021b1934..0000000000000000000000000000000000000000
Binary files a/doc/admin/img/running-an-experiment.png and /dev/null differ
diff --git a/doc/admin/index.rst b/doc/admin/index.rst
index 38e3c860b135fa3a9d597b00075b9f29ad94a02d..37b4b043eb6ab78e78f4717cafa77f2a84f2665a 100644
--- a/doc/admin/index.rst
+++ b/doc/admin/index.rst
@@ -40,8 +40,9 @@ BEAT platform.
     :numbered:
 
     architecture
+    idiap_platform
     applications
     deployment_guidelines
     installation
-    idiap_platform
+    backend
     platform_extension
diff --git a/doc/admin/installation.rst b/doc/admin/installation.rst
index e9d4b8a0afd06684de47486284150dff86438dd7..7bc4b2ae22da0a6c6d4a0f2f54314e6b0caee274 100644
--- a/doc/admin/installation.rst
+++ b/doc/admin/installation.rst
@@ -23,25 +23,30 @@
 
 .. _administratorguide-installation:
 
-Installation
-============
+==============
+ Installation
+==============
 
 In this section, we provide basic instructions and fundamental ideas required
 to deploy the BEAT platform. Depending on the deployment strategy (single
-machine or distributed across several machines), the installation instruction
-will of course differ. Nevertheless, configuring and installing a platform
-instance is reasonably easy.
+machine or distributed across several machines), the installation instructions
+will of course differ. Nevertheless, configuring and installing a simple
+platform instance remains reasonably easy.
 
 
-.. _administratorguide-installation-installing:
-
 Installing beat.web
 -------------------
 
-The BEAT platform is written as a set of python packages. The package
-``beat.web`` depends both on ``beat.core`` and ``beat.scheduler``. To deploy a
-platform on a single machine, it is, hence, sufficient to install ``beat.web``
-to get the full BEAT software stack installed. The recipe is as follows::
+The BEAT platform is written as a set of python packages. This package
+(beat.web), in particular, constitutes the central deployment pillar of BEAT
+platform instance. It uses as a base development library, a web framework
+called Django_. If you are unfamiliar with this framework, but wishes to deploy
+or develop the BEAT platform, it is recommended you familiarize yourself with
+it.
+
+To deploy a platform on a single machine, it is, hence, sufficient to install
+``beat.web`` to get the full BEAT software stack installed. The recipe is as
+follows::
 
   $ # after downloading and extracting the beat.web package
   $ python bootstrap-buildout.py
@@ -51,33 +56,140 @@ These two commands should download and install all non-installed dependencies
 and generate a fully operational test and development environment.
 
 
+.. note::
+
+
+   If the program cpulimit_ is available on your system, the BEAT platform may
+   use it to limit user process load on worker resources. In order to install a
+   copy of this program, follow these simple steps::
+
+     $ cd src/cpulimit
+     $ make
+     $ ./src/cpulimit -h #to test it
+     $ cd ../../bin #go back to the root of beat.web and the into the `bin' dir
+     $ ln -s ../src/cpulimit/src/cpulimit
+     $ cd .. #go back to the root of beat.web
+
+
+.. tip::
+
+  If you'd like to **speed-up** the installation, it is strongly advised you
+  prepare a preset virtual environment (see the virtualenv_ package) with all
+  required dependencies, so that ``./bin/buildout`` does not download and
+  installs all of them every time you cleanup. This technique should allow you
+  to quickly clean-up and re-start your working environment which is useful
+  during development.
+
+  In order to fetch currently needed dependencies, run::
+
+    $ ./bin/buildout #to setup once
+    $ ./bin/pip freeze > requirements.txt
+
+  Examine the file ``requirements.txt`` and remove packages you are either
+  developing locally (e.g., all that are under ``src``) or that you think you
+  don't need. The command ``pip freeze`` reports all installed packages and not
+  only those which are needed by your project. If the Python prompt you used
+  for bootstrapping already had a good set of packages installed, you may see
+  them there.
+
+  Once you have a satisfying ``requirements.txt`` file, you may proceed to
+  recreate a virtualenv_ you'll use for your development. Just call::
+
+    $ virtualenv ~/work/beat-env #--system-site-packages
+
+  To create the virtual environment. This new environment does not contain
+  system packages by default. You may override that by specifying
+  ``--system-site-packages`` as suggested above. Then, install the required
+  packages on your new virtual environment::
+
+    $ ~/work/beat-env/bin/pip install -r requirements.txt
+
+  After that step is done, your virtual environment is ready for deployment.
+  You may now start from scratch to develop ``beat.web`` taking as base the
+  Python interpreter on your virtualenv_::
+
+    $ cd beat.web
+    $ git clean -fdx #full clean-up
+    $ ~/work/beat-env/bin/python bootstrap-buildout.py
+    $ ./bin/buildout
+
+  You'll realize the buildout step now takes considerably less time and you may
+  repeat this last step as much as needed. ``pip`` is a very flexible tool and
+  you may use it to manage the virtualenv_ installing and removing packages as
+  needed.
+
+
+Documentation
+-------------
+
+The documentation project is divided in 3 parts. The user guide is the only one
+which is automatically built as part of the ``buildout`` procedure. The API and
+administrators guide need to be manually compiled if required.
+
+To build the API documentation, just do::
+
+  $ ./bin/sphinx-apidoc --separate -d 2 --output=doc/api/api beat beat/web/*/migrations beat/web/*/tests
+  $ ./bin/sphinx-build doc/api html/api
+
+
+To build the administrator guide, just do::
+
+  $ ./bin/sphinx-build doc/admin html/admin
+
+
+The above commands will build the stated guides, in HTML format, and dump
+results into your local directory ``html``. You may navigate then to that
+directory and, with your preferred web browser, open the file ``index.html`` to
+browse the available documentation.
+
+The basic user guide which includes information for users of the platform, is
+built automatically upon ``buildout``. If you wish to build it and place it
+alongside the other guides, you may do it as well like this::
+
+  $ ./bin/sphinx-build doc/user html/user
+
+
 Unit Testing
-............
+------------
 
-After installation, it is possible to run a suite of unit tests. To do so,
-use::
+After installation, it is possible to run a suite of unit tests to check for
+the installation sanity. To do so, use::
 
-  $ ./bin/django test --settings=beat.web.settings.test -v2
+  $ ./bin/django test --settings=beat.web.settings.test -v 1
 
 You may pass filtering criteria to just launch tests for a particular set of
 ``beat.web`` applications. For example, to run tests only concerning
 ``beat.web.toolchains``, run::
 
-  $ ./bin/django test --settings=beat.web.settings.test -v2 \
-    beat.web.toolchains.tests
+  $ ./bin/django test --settings=beat.web.settings.test -v 1 beat.web.toolchains.tests
 
 To measure coverage, you must set an environment variable for nose::
 
-  $ NOSE_WITH_COVERAGE=1 NOSE_COVER_PACKAGE=beat.web ./bin/django test \
-    --settings=beat.web.settings.test -v2
+  $ ./bin/coverage run --source='./beat/web' ./bin/django test --settings=beat.web.settings.test
+  $ ./bin/coverage report
+
+Or, to generate an HTML report::
+
+  $ ./bin/coverate html
+
+.. tip::
+
+   You may significatively speed-up your testing by re-using the same test
+   database from run to run. In order to do this, just specify the flag
+   ``--keepdb`` when you run your tests::
+
+     $ ./bin/django test --settings=beat.web.settings.test -v 1 --keepdb
+
+   In this case, Django will create and keep a test database called
+   ``test.sql3`` on your current directory. You may delete it when you're done.
 
 
 .. _administratorguide-installation-instantiating:
 
-Instantiating and Starting a Web Server
----------------------------------------
+Instantiating and Starting a Development System
+-----------------------------------------------
 
-For a simple (development) web server, the default settings on
+For a simple (development) system, the default settings on
 ``beat/web/settings/settings.py`` should work out of the box. These settings:
 
   * Instantiate the web service on the local host under port 8000 (the address
@@ -93,9 +205,56 @@ If you need to tweak these settings, just edit the file
 ``beat/web/settings/settings.py``. You may also consult the `Django
 documentation`_ for detailed information on other settings.
 
-The test server is now ready to be started::
+Once the Django settings are tweaked to your liking, you can run a single
+command to fully populate the development webserver with test databases,
+toolchains, algorithms and experiments::
 
-  $ ./bin/django runserver -v3
+  $ ./bin/django install -v1
+
+.. note::
+
+   Concerning databases installed by this command, we only explain the platform
+   how to **access** their data. It does not download the raw data for the
+   databases that you must procure yourself through the relevant web sites
+   (checkout the database pages on the Idiap instance of the BEAT platform for
+   details).
+
+.. note::
+
+  If you need to specify your own path to the directories containing the
+  databases, you could just create a simple JSON file as follows::
+
+    {
+      "atnt/1": "/remote/databases/atnt",
+      "banca/2": "/remote/databases/banca"
+    }
+
+  Then just use the previous script with the option ``--database-root-file``::
+
+    $ ./bin/django install -v1 --database-root-file=MYFILE.json
+
+  By default, paths to the root of all databases are set to match the Idiap
+  Research Institute filesystem organisation.
+
+.. note::
+
+  For every installed database, you'll need to generate their data indices,
+  which allows the platform to correctly parallelize algorithms. To do so, for
+  every combination of database and version you wish to support, run the
+  following command::
+
+    $ ./bin/beat -p prefix db index <name>/<version>
+
+  Replacing the strings ``<name>`` by the name of the database you wish to dump
+  the indices for, together with the version in ``<version>``. For example, to
+  dump the indices for the AT&T database, version 1, do the following::
+
+    $ ./bin/beat -p prefix db index atnt/1
+
+Once the contributions and users are in place, you're ready to start the test
+server::
+
+  $ ./bin/django runserver
 
 At this point, the platform can be accessed by typing the URL
 ``http://127.0.0.1:8000`` in a web browser on the machine the server is
@@ -108,82 +267,181 @@ running.
    assuming the the database server is operational.
 
 
-.. _administratorguide-installation-localhost:
+.. _administratorguide-installation-allinone:
 
-All-in-one Platform using Localhost
------------------------------------
+All-in-one Platform
+===================
 
-To effectively use your new server and test all aspects of it, you'll also need
-a scheduler with at least one attached worker that can execute experiments. For
-most development purposes, a simple 3-node system, with all components running
-on the current (local) host is sufficient.
+The BEAT platform is composed of 3 application types that run in synchrony to
+create, store and process your experiments: the web server, the scheduler and
+one or more workers. The web server is used by you to create and launch
+experiments. The scheduler assigns experiment blocks (actually
+:py:class:`beat.web.backend.JobSplit`'s) to run in one of the available
+workers, respecting user quotas and worker limitations. The worker runs the
+user algorithms installed on each block upon scheduling, notifying the web
+server when it's done.
 
-Here is a recipe to start a simple 3-node system in which the local worker uses
-the system-wide installed Python interpreter to execute the algorithms.
+The base software framework and models that allow the 3 applications to run
+cooperatively are described in one single place: the Django_ models and the
+central database of this package. Effectively, it means this package contains
+all information that is required to run the 3 types of applications. The
+applications "communicate" between each other using the shared Django_
+database, reading and modifying objects as experiments are assigned and
+treated. Several deployment scenarios are therefore possible and you must use
+the one most suited for your requirements.
 
-.. _administratorguide-installation-localhost-cpulimit:
+In order to start the system, just run::
 
-External Utility: cpulimit
-..........................
+  $ ./bin/django runserver
 
-First, make sure the program ``cpulimit`` is available on your system. The BEAT
-platform uses this program to control slot usage on the scheduling/worker
-level (Resource Controller)::
+Once the Django development web server is up and running, open a browser and
+navigate to http://127.0.0.1:8000. Login with an account with administrative
+rights and click on the scheduler icon, using the omni-bar, on the top of any
+page. Use the "Helper panel" available to launch one-off or repetitive
+scheduling and/or worker activities. In this case, both the scheduling and
+worker activities run in the context of the web server process.
 
-  $ cpulimit -h
 
-If that is not the case, then you need to install it. Either install a package
-that is native to your system (e.g. on Debian or Ubuntu platforms) or compile
-the checked-out version available at ``src/cpulimit``::
+Discrete Platform using Localhost
+=================================
 
-  $ cd src/cpulimit;
-  $ make
-  $ ./src/cpulimit -h #to test it
-  $ cd ../../bin #go back to the root of beat.web and the into the `bin' dir
-  $ ln -s ../src/cpulimit/src/cpulimit
-  $ cd .. #go back to the root of beat.web
+It is also possible to run each of the applications as separated processes.
+Here is how to do it.
 
-Now start the localhost system::
+  1. Start the web service normally::
 
-  $ ./bin/localhost.py -v
-  ...
+        $ ./bin/django runserver
 
-You may inspect this programs help message for details on its usage and
-options.
+  2. Start a single scheduling node::
 
-Once the localhost system is started and the scheduler is properly configured,
-you may open a browser window to your `localhost, port 8000
-<http://127.0.0.1:8000>`_, to get started with your locally installed platform.
+        $ ./bin/scheduler -vv
 
+  3. Start a worker for your current node::
 
-Triggering a Scheduler Reconfiguration
-......................................
+        $ ./bin/worker -vv
 
-If you modify the queue configuration on the Django administrative panel,
-you'll need to notify the scheduler of those changes. You can trigger a
-scheduler (hot) re-configuration using the following command-line program::
+By default, the applications are configured to figure out paths and
+configuration options by themselves. You can override some defaults via the
+command line. Just check the output of each of those commands running the
+``--help`` flag on any of them.
 
-  $ ./bin/django qconf
 
-.. note::
+Mixing and matching
+===================
 
-   Optionally, you may also visit `your local scheduler page
-   <http://127.0.0.1:8000/backend/scheduler>`_, and hit the (green) button that
-   says "Send configuration to Scheduler". It has the same effect.
+You can mix and match any of the above techniques to run a 3-node system
+(all-in-one or discrete) to build a test system to suite to your needs. For
+example, it is possible to launch the scheduling activities using the web
+server and the page reload trick while launching the worker process separately
+as per above.
+
+
+Going full scale
+================
+
+In order to transform the development system into a fully scale platform, you
+will have to create your own maintenance scripts allowing you to automatically
+start/stop, update and secure the BEAT platform applications across your BEAT
+web nodes. It is beyond the scope of this documentation to enter into details
+concerning these. We provide only some tips which we consider important:
+
+  * Don't use the SQLite backend on a production system, it does not work well
+    with the concurrency you may generate. Prefer a PostGRES SQL database.
+
+  * The "cache" directory (see the variable ``CACHE_ROOT`` on the Django_
+    settings file) is shared amongst all applications in the cluster. It is
+    adviseable you use a proper networked filesystem with good synchronisation
+    primitives to avoid issues concerning the production and consumption of
+    data caches between workers living in different nodes.
+
+  * Don't rely on your memory: script all deployment instructions so that you
+    can do them routinely whenever newer versions come up or you have an issue.
+
+  * Security: You'll be running code uploaded by users on your computer. Make
+    sure you properly isolate each of the processes and the backend farm to
+    avoid unpleasant surprises. Some helpers:
+
+    * Disk access: two main directories are shared across the applications. The
+      cache directory stores intermediary block results. The prefix directory
+      stores user contributions on disk. You may tune the file system access on
+      a distributed BEAT platform to increase its security:
+
+      - The web server only needs read access to the cache directories. It
+        needs read and write access to the prefix directory in order to store
+        user contributions.
+
+      - The scheduler needs read/write access to the cache directory. It does
+        not use the prefix directory and does not read or treat user
+        contributions. The scheduler also need access to the Django database.
+
+      - The workers need read/write access to the cache directory and read
+        access to the prefix directory. The workers also need access to the
+        Django database.
+
+      - The processes launched by the worker need to have similar permissions
+        as their worker. The user executable though, should have demoted
+        permissions to increase security. For example, no need to access the
+        Django database (or the settings file), the prefix or the cache. All is
+        done via the parent process. In order to implement this, the easiest is
+        to make sure the worker process is run by an unpriviledged user and a
+        group with the right access permissions, allowing it to access the
+        Django database (and the Django settings file), the prefix and the
+        cache. This will be inherited by the processes launched by the worker,
+        that will serve data to the processes wrapping the user code. To demote
+        the user process, just set the group id of the environment executable
+        to an unpriviledged group. This way, the following security chain is
+        achieved (pseudo user/groups)::
+
+             worker        ->      process      -> environment exec(user code)
+          [nobody:beat]         [nobody:beat]           [nobody:nogroup]
+
+        It is a requirement by the BEAT platform that this process chain
+        belongs to the same user. Signals for stopping or killing the
+        applications in the chain if necessary.
+
+        If you don't do anything, then the user code will be run in a process
+        with the same privileges as the worker application.
+
+    * E-mail privileges: e-mailing maybe configured as part of the Django_
+      standard logging facilities or used to report experiment completion and
+      other platform activity. While, by default, all node types have access to
+      Django the configuration and can potentially send e-mails, it is wiser to
+      use a Django extension such as Post-office_ to centralize e-mail sending
+      to one node, avoiding potential spam.
+
+    * User processes: user code is run in isolated processes launched by the
+      children of worker processes. Because the user code process does not
+      require disk access to either the prefix or the cache, it should run
+      without access to those resources in order to improve the platform
+      security. This may be achieved by running user processes in ``chroot``'ed
+      environments or making sure user code is launched with a user identity
+      which has far fewer access permissions than the worker process itself.
+      Have a look at the ``--help`` output of the ``worker`` application for
+      more information and examples.
+
+You may contact our `support` in case you need advice concerning this topic.
+
+Development Notes
+-----------------
 
 
 .. _administratorguide-installation-localhost-snapshot:
 
-Local Development Server
-........................
+Backup and Restore
+==================
+
+The BEAT platform can be backed-up and restore easily. These commands allow for
+safe information keeping, but also to copy over the state of a given deployment
+to a local development server, where more thorough tests can be performed while
+tracking a bug or improving performance.
 
 It is easy to quickly setup a local system for development, taking as base the
-current state of a production system.
+current state of a production system. Here are some instructions:
 
 
 1. Before starting, make sure you have gone through, at least once, over the
-   administratorguide-installation-localhost_ instructions above. It explains
-   the very basic setup required for a complete development environment.
+   instructions above. It explains the very basic setup required for a complete
+   development environment.
 
 
 2. Dump and back-up your current **production** BEAT database::
@@ -236,20 +494,23 @@ current state of a production system.
      [development]$ ./bin/django qsetup --reset
 
 
-7. Apply migrations::
+7. Re-checkout the tip::
+
+   $ git co master #or any other branch
+
+
+8. Apply migrations::
 
    $ ./bin/django migrate
 
 
 At this point, you should have a complete development setup with all elements
 available on the production system installed locally. This system is fully
-capable of running experiments locally using your machine. Start a full system
-using ``localhost.py`` as explained on the
-administratorguide-installation-localhost_ section above.
+capable of running experiments locally using your machine.
 
 
 Testing Django Migrations
-.........................
+=========================
 
 Django migrations, introduced in version 1.7, is a useful feature for
 automatically migrating your database to new model schemas, if you get it
@@ -311,108 +572,34 @@ loop.
       Go back to a. and restart.
 
 
-.. _administratorguide-installation-scheduler:
-
-Starting the scheduler and the workers separately
--------------------------------------------------
-
-When using the localhost script described in
-administratorguide-installation-localhost_, three distinct processes (a web
-server, a scheduler and a worker) are started on the same machine. It is very
-convenient for testing purposes. However, we may want to distribute the load
-on several servers in a real scenario.
-
-Starting a BEAT web server has been described in
-administratorguide-installation-instantiating_. The configuration of a
-scheduler and or a worker is also very similar. Both components are
-implemented within the ``beat.scheduler`` package.
-
-
-Installing beat.scheduler
-.........................
-
-This package can be installed similarly to ``beat.web``, as follows::
-
-  $ # after downloading and extracting the beat.scheduler package
-  $ python bootstrap-buildout.py
-  $ ./bin/buildout
-
-These two commands should download and install all non-installed dependencies.
-
-
-Unit Testing
-............
-
-After installation, it is possible to run a suite of unit tests. To do so,
-use ``nose``::
-
-  $ ./bin/nosetests -sv
-
-You can limit the number of tests executed using nose attributes. For the
-current batch of tests, you may select:
-
-  * ``run``: Only execute tests in which experiments are run
-
-To measure the test coverage, do the following::
-
-  $ ./bin/nosetests -sv --with-coverage --cover-package=beat.scheduler
-
-To produce an HTML test coverage report, at the directory `./htmlcov`, do the
-following::
-
-  $ ./bin/nosetests -sv --with-coverage --cover-package=beat.scheduler \
-    --cover-html --cover-html-dir=htmlcov
-
-To profile the code, for example, do::
-
-  $ ./bin/python -mcProfile -s tottime ./bin/nosetests -sv | less
+Javascript Management with Node.js/Bower
+========================================
 
+We manage javascript external packages with the help of Bower_. If you'd like
+to include more packages that will be statically served with the Django web
+app, please consider including them at the appropriate section of
+``buildout.cfg``.
 
-Starting a Scheduler
-....................
 
-To start a scheduler process, execute the `./bin/scheduler.py` script. Here is
-an example set of parameters for local running::
+Issues
+------
 
-  $ ./bin/scheduler.py -vv --prefix=<your-beat-prefix> --period=10 \
-    --web-api=http://localhost:8000/api
-
-.. note::
-
-   1. If you don't set the ``--web-api`` flag, then supposed-to-be-sent
-      messages to the web server will be printed to the screen.
-
-   2. If you don't set the ``--prefix`` flag, then the scheduler will use the
-      local prefix path at ``./beat/scheduler/prefix``.
-
-   3. You'll want to configure the scheduler once it is started. To do so,
-      follow the instructions
-      administratorguide-installation-localhost-snapshot_, point 6.
-
-
-Starting a Worker
-.................
-
-The **worker** requires no particular configuration for being able to execute
-user algorithms on your local node. Here is an example set of parameters for
-local running::
-
-  $ ./bin/worker.py -vv --prefix=<your-beat-prefix> --name=node1 \
-     --period=10 --slots=1
-
-.. note::
-
-   If you don't set the ``--prefix`` flag, then the scheduler will use the
-   local prefix path at ``./beat/scheduler/prefix``.
-
-.. note::
+If you find problems concerning this package, please post a message to our
+`group mailing list`_. Currently open issues can be tracked at `our gitlab
+page`_.
 
-   The worker requires the tool `cpulimit`, as previously described in
-   administratorguide-installation-localhost-cpulimit_.
 
-Running in a production environment requires you organize your environments
-into folders rooted on the same directory and pass that root directory
-parameter using the command-line. Workers started this way may be able to
-execute more experiment variations.
+.. Place here references to all citations in lower case
 
 .. _django documentation: https://doc.djangoproject.com/en/
+.. _django: https://www.djangoproject.com/
+.. _cpulimit: https://github.com/opsengine/cpulimit/
+.. _pip: http://pypi.python.org/pypi/pip
+.. _easy_install: http://pypi.python.org/pypi/setuptools
+.. _zc.buildout: http://pypi.python.org/pypi/zc.buildout
+.. _virtualenv: http://pypi.python.org/pypi/virtualenv
+.. _group mailing list: https://groups.google.com/d/forum/beat-devel
+.. _our gitlab page: https://gitlab.idiap.ch/beat/beat.web/issues
+.. _bower: http://bower.io
+.. _support: https://www.beat-eu.org/platform/contact/
+.. _post-office: https://pypi.python.org/pypi/django-post_office
diff --git a/doc/admin/platform_extension.rst b/doc/admin/platform_extension.rst
index 285e5ddc7d1b9809e010cfb56d50e56548013bd3..afbf174a66f47418739226d2b844b24642329498 100644
--- a/doc/admin/platform_extension.rst
+++ b/doc/admin/platform_extension.rst
@@ -27,7 +27,7 @@ Extending the platform
 ======================
 
 A running platform can be extended in several ways, depending to the specific
-needs of this particular instance. We devise this section following a
+needs of the particular instance. We devise this section following a
 `Troubleshooting Guide` style. Counters installed on the BEAT platform software
 allow for diagnosing and understanding of similar issues.
 
diff --git a/doc/api/conf.py b/doc/api/conf.py
index 80b081cef72efa8457c9281a852a204b5e2a6d32..ed592c1f99aae04bd7cd929d1cfcde4957ec8216 100644
--- a/doc/api/conf.py
+++ b/doc/api/conf.py
@@ -54,7 +54,6 @@ extensions = [
 
     'sphinx.ext.todo',
     'sphinx.ext.coverage',
-    'sphinx.ext.pngmath',
     'sphinx.ext.ifconfig',
     'sphinx.ext.autodoc',
     'sphinx.ext.autosummary',
@@ -64,16 +63,24 @@ extensions = [
     'sphinx.ext.napoleon',
     'sphinx.ext.viewcode',
 
-    'sphinx_numfig',
     'sphinxcontrib.ansi',
     'sphinxcontrib.programoutput',
     'sphinxcontrib.httpdomain',
 
     ]
 
+import sphinx
+if sphinx.__version__ >= "1.4.1":
+    extensions.append('sphinx.ext.imgmath')
+else:
+    extensions.append('sphinx.ext.pngmath')
+
 # Always includes todos
 todo_include_todos = True
 
+# Create numbers on figures with captions
+numfig = True
+
 # Generates auto-summary automatically
 autosummary_generate = True
 
diff --git a/doc/user/algorithms/img/case-study-4.pdf b/doc/user/algorithms/img/case-study-4.pdf
index cdd649e6ed58802f041ad3411c12ab1291dab717..c5933798fb5939cc0a625173fd0e9688221997bf 100644
Binary files a/doc/user/algorithms/img/case-study-4.pdf and b/doc/user/algorithms/img/case-study-4.pdf differ
diff --git a/doc/user/algorithms/img/case-study-4.svg b/doc/user/algorithms/img/case-study-4.svg
index 89ac930fce3fbabbe9ea789fee0b8d438976292d..de45087c15e2a5349e76f80a6cfe7e52381dba87 100644
--- a/doc/user/algorithms/img/case-study-4.svg
+++ b/doc/user/algorithms/img/case-study-4.svg
@@ -9,12 +9,12 @@
    xmlns="http://www.w3.org/2000/svg"
    xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
    xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="915.20312"
-   height="243.85526"
-   viewBox="0 0 915.20312 243.85525"
+   width="916.23114"
+   height="352.40994"
+   viewBox="0 0 916.23114 352.40994"
    id="svg2"
    version="1.1"
-   inkscape:version="0.48.5 r10040"
+   inkscape:version="0.91 r13725"
    sodipodi:docname="case-study-4.svg"
    inkscape:export-filename="/remote/idiap.svm/user.active/aanjos/work/beat/beat.web/doc/user/algorithms/img/case-study-2.png"
    inkscape:export-xdpi="39.93"
@@ -27,8 +27,8 @@
      inkscape:pageopacity="0.0"
      inkscape:pageshadow="2"
      inkscape:zoom="1.1507426"
-     inkscape:cx="417.32462"
-     inkscape:cy="126.84122"
+     inkscape:cx="426.85751"
+     inkscape:cy="143.10754"
      inkscape:document-units="px"
      inkscape:current-layer="layer1"
      showgrid="false"
@@ -38,9 +38,10 @@
      fit-margin-bottom="0"
      inkscape:window-width="1737"
      inkscape:window-height="678"
-     inkscape:window-x="92"
-     inkscape:window-y="67"
-     inkscape:window-maximized="0" />
+     inkscape:window-x="141"
+     inkscape:window-y="299"
+     inkscape:window-maximized="0"
+     showguides="false" />
   <defs
      id="defs4">
     <marker
@@ -53,7 +54,7 @@
        inkscape:isstock="true">
       <path
          id="path4221"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
          style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
          transform="matrix(-0.4,0,0,-0.4,-4,0)"
          inkscape:connector-curvature="0" />
@@ -68,7 +69,7 @@
        inkscape:isstock="true">
       <path
          id="path4215"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
          style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
          transform="matrix(-0.8,0,0,-0.8,-10,0)"
          inkscape:connector-curvature="0" />
@@ -84,8 +85,8 @@
       <path
          inkscape:connector-curvature="0"
          id="path4221-0"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         style="fill:#8800a7;fill-opacity:1;fill-rule:evenodd;stroke:#8800a7;stroke-width:1pt;stroke-opacity:1"
          transform="matrix(-0.4,0,0,-0.4,-4,0)" />
     </marker>
     <marker
@@ -99,7 +100,7 @@
       <path
          inkscape:connector-curvature="0"
          id="path4221-0-3"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
          style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
          transform="matrix(-0.4,0,0,-0.4,-4,0)" />
     </marker>
@@ -114,7 +115,7 @@
       <path
          inkscape:connector-curvature="0"
          id="path4221-0-7"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
          style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
          transform="matrix(-0.4,0,0,-0.4,-4,0)" />
     </marker>
@@ -128,7 +129,7 @@
        inkscape:isstock="true">
       <path
          id="path3897"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
          style="fill:#00a700;fill-opacity:1;fill-rule:evenodd;stroke:#00a700;stroke-width:1pt;stroke-opacity:1"
          transform="matrix(-0.4,0,0,-0.4,-4,0)"
          inkscape:connector-curvature="0" />
@@ -144,7 +145,7 @@
       <path
          inkscape:connector-curvature="0"
          id="path4119"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
          style="fill:#00a700;fill-opacity:1;fill-rule:evenodd;stroke:#00a700;stroke-width:1pt;stroke-opacity:1"
          transform="matrix(-0.4,0,0,-0.4,-4,0)" />
     </marker>
@@ -159,7 +160,7 @@
       <path
          inkscape:connector-curvature="0"
          id="path4344"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
          style="fill:#00a700;fill-opacity:1;fill-rule:evenodd;stroke:#00a700;stroke-width:1pt;stroke-opacity:1"
          transform="matrix(-0.4,0,0,-0.4,-4,0)" />
     </marker>
@@ -174,8 +175,23 @@
       <path
          inkscape:connector-curvature="0"
          id="path3926"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
-         style="stroke-opacity:1;fill-rule:evenodd;fill-opacity:1;stroke:#8800a7;stroke-width:1pt;fill:#8800a7"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         style="fill:#00a700;fill-opacity:1;fill-rule:evenodd;stroke:#00a700;stroke-width:1pt;stroke-opacity:1"
+         transform="matrix(-0.4,0,0,-0.4,-4,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow1Mend-5-73Jn"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow1Mend-5-73Jn-0"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path3926-0"
+         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+         style="fill:#8800a7;fill-opacity:1;fill-rule:evenodd;stroke:#8800a7;stroke-width:1pt;stroke-opacity:1"
          transform="matrix(-0.4,0,0,-0.4,-4,0)" />
     </marker>
   </defs>
@@ -195,11 +211,11 @@
      inkscape:label="Layer 1"
      inkscape:groupmode="layer"
      id="layer1"
-     transform="translate(62.458773,-67.280764)">
+     transform="translate(61.50674,-67.280762)">
     <flowRoot
        xml:space="preserve"
        id="flowRoot4181"
-       style="font-size:8px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"><flowRegion
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:8px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"><flowRegion
          id="flowRegion4183"><rect
            id="rect4185"
            width="76.135551"
@@ -207,10 +223,10 @@
            x="262.5499"
            y="216.83342" /></flowRegion><flowPara
          id="flowPara4187" /></flowRoot>    <rect
-       style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
        id="rect4136"
-       width="274.28571"
-       height="148.57143"
+       width="274.28568"
+       height="195.2718"
        x="257.8447"
        y="114.86711"
        ry="22.857143" />
@@ -224,13 +240,13 @@
          height="28.256489"
          width="49.448856"
          id="rect4138"
-         style="fill:#e3ff00;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+         style="fill:#e3ff00;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
       <text
          sodipodi:linespacing="125%"
          id="text4140"
          y="236.03981"
          x="187.99271"
-         style="font-size:17.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:17.5px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
          xml:space="preserve"><tspan
            y="236.03981"
            x="187.99271"
@@ -247,13 +263,13 @@
          height="28.256489"
          width="49.448856"
          id="rect4138-9"
-         style="fill:#e3ff00;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+         style="fill:#e3ff00;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
       <text
          sodipodi:linespacing="125%"
          id="text4140-9"
          y="232.19667"
          x="412.23044"
-         style="font-size:17.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:17.5px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
          xml:space="preserve"><tspan
            y="232.19667"
            x="412.23044"
@@ -262,7 +278,7 @@
     </g>
     <text
        xml:space="preserve"
-       style="font-size:25px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:25px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
        x="393.91333"
        y="142.4402"
        id="text4189"
@@ -272,7 +288,7 @@
          x="393.91333"
          y="142.4402">Block</tspan></text>
     <path
-       style="fill:none;stroke:#00a700;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Mendw)"
+       style="fill:none;stroke:#00a700;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow1Mendw)"
        d="m 533.37904,189.15282 315.61533,0"
        id="path4206"
        inkscape:connector-curvature="0"
@@ -280,7 +296,7 @@
     <flowRoot
        xml:space="preserve"
        id="flowRoot5058"
-       style="font-size:8px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:8px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
        transform="translate(8.92149,113.86711)"><flowRegion
          id="flowRegion5060"><rect
            id="rect5062"
@@ -289,14 +305,14 @@
            x="7.6556292"
            y="-41.912968" /></flowRegion><flowPara
          id="flowPara5064" /></flowRoot>    <path
-       style="fill:none;stroke:#00a700;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Mend-5o)"
+       style="fill:none;stroke:#00a700;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow1Mend-5o)"
        d="m -60.958773,157.15282 311.270313,0"
        id="path4206-5"
        inkscape:connector-curvature="0"
        sodipodi:nodetypes="cc" />
     <text
        xml:space="preserve"
-       style="font-size:17.5px;font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:monospace;-inkscape-font-specification:monospace Italic"
+       style="font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:17.5px;line-height:125%;font-family:monospace;-inkscape-font-specification:'monospace Italic';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
        x="88.325813"
        y="80.57666"
        id="text5066"
@@ -304,25 +320,25 @@
          sodipodi:role="line"
          id="tspan5068"
          x="88.325813"
-         y="80.57666">&lt;user/format/1&gt;</tspan></text>
+         y="80.57666">&lt;user/data/1&gt;</tspan></text>
     <path
-       style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+       style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
        d="m 33.445712,180.60126 0,-8.47588 47.300853,0 0,8.47588"
        id="path5084"
        inkscape:connector-curvature="0" />
     <path
-       style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+       style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
        d="m 99.212743,180.56436 0,-8.47588 47.300847,0 0,8.47588"
        id="path5084-6"
        inkscape:connector-curvature="0" />
     <path
-       style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+       style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
        d="m 164.97977,180.56436 0,-8.47588 47.30085,0 0,8.47588"
        id="path5084-6-2"
        inkscape:connector-curvature="0" />
     <text
        xml:space="preserve"
-       style="font-size:12.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12.5px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
        x="212.61592"
        y="193.59842"
        id="text5121"
@@ -333,7 +349,7 @@
          y="193.59842">0</tspan></text>
     <text
        xml:space="preserve"
-       style="font-size:12.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12.5px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
        x="165.31508"
        y="193.71951"
        id="text5125"
@@ -344,7 +360,7 @@
          y="193.71951">1</tspan></text>
     <text
        xml:space="preserve"
-       style="font-size:12.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12.5px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
        x="146.17599"
        y="193.71951"
        id="text5129"
@@ -355,7 +371,7 @@
          y="193.71951">1</tspan></text>
     <text
        xml:space="preserve"
-       style="font-size:12.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12.5px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
        x="99.148552"
        y="193.71951"
        id="text5133"
@@ -366,7 +382,7 @@
          y="193.71951">2</tspan></text>
     <text
        xml:space="preserve"
-       style="font-size:12.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12.5px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
        x="80.556313"
        y="193.71951"
        id="text5137"
@@ -377,7 +393,7 @@
          y="193.71951">2</tspan></text>
     <text
        xml:space="preserve"
-       style="font-size:12.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12.5px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
        x="32.708626"
        y="193.59842"
        id="text5141"
@@ -386,32 +402,28 @@
          id="tspan5143"
          x="32.708626"
          y="193.59842">3</tspan></text>
-    <g
-       id="g4144-8"
-       transform="translate(94.6736,-10.805757)">
-      <rect
-         ry="0"
-         y="215.26361"
-         x="163.25974"
-         height="28.256489"
-         width="49.448856"
-         id="rect4138-0"
-         style="fill:#e3ff00;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
-      <text
-         sodipodi:linespacing="125%"
-         id="text4140-94"
-         y="236.03981"
-         x="187.99271"
-         style="font-size:17.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
-         xml:space="preserve"><tspan
-           y="236.03981"
-           x="187.99271"
-           id="tspan4142-3"
-           sodipodi:role="line">in2</tspan></text>
-    </g>
+    <rect
+       style="fill:#e3ff00;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="rect4138-0"
+       width="49.448856"
+       height="28.256489"
+       x="257.93335"
+       y="204.45786"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:17.5px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
+       x="282.66632"
+       y="225.23405"
+       id="text4140-94"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4142-3"
+         x="282.66632"
+         y="225.23405">in2</tspan></text>
     <text
        xml:space="preserve"
-       style="font-size:17.5px;font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:monospace;-inkscape-font-specification:monospace Italic"
+       style="font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:17.5px;line-height:125%;font-family:monospace;-inkscape-font-specification:'monospace Italic';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
        x="88.586716"
        y="309.51248"
        id="text5066-00"
@@ -419,19 +431,19 @@
          sodipodi:role="line"
          id="tspan5068-4"
          x="88.586716"
-         y="309.51248">&lt;user/model/1&gt;</tspan></text>
+         y="309.51248">&lt;user/label/1&gt;</tspan></text>
     <rect
        rx="0"
-       style="fill:#808000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
+       style="fill:#808000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
        id="rect5021-1"
-       width="273.39331"
+       width="114.55061"
        height="41.559128"
-       x="-49.593361"
+       x="-32.695198"
        y="-280.11407"
        ry="0"
        transform="scale(1,-1)" />
     <path
-       style="stroke-linejoin:miter;marker-end:url(#Arrow1Mend-5-73Jn);stroke-opacity:1;stroke:#8800a7;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;stroke-width:3;fill:none"
+       style="fill:none;stroke:#00a700;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow1Mend-5-73Jn)"
        d="m -60.95877,218.42004 311.27031,0"
        id="path4206-5-3"
        inkscape:connector-curvature="0"
@@ -445,7 +457,7 @@
          height="41.559132"
          width="47.027439"
          id="rect5021"
-         style="fill:#ff0000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+         style="fill:#ff0000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
       <rect
          ry="0"
          y="97.918182"
@@ -453,7 +465,7 @@
          height="41.559132"
          width="47.027439"
          id="rect5021-6"
-         style="fill:#ff0000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+         style="fill:#ff0000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
       <rect
          ry="0"
          y="97.918182"
@@ -461,7 +473,7 @@
          height="41.559132"
          width="47.027439"
          id="rect5021-6-0"
-         style="fill:#ff0000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+         style="fill:#ff0000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
       <rect
          ry="0"
          y="97.471497"
@@ -469,16 +481,16 @@
          height="41.559132"
          width="47.027439"
          id="rect5021-9"
-         style="fill:#ff0000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
+         style="fill:#ff0000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
     </g>
     <path
-       style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+       style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
        d="m -33.039874,180.15457 0,-8.47588 47.300854,0 0,8.47588"
        id="path5084-1"
        inkscape:connector-curvature="0" />
     <text
        xml:space="preserve"
-       style="font-size:12.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12.5px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
        x="14.070717"
        y="193.27283"
        id="text5137-0"
@@ -489,7 +501,7 @@
          y="193.27283">3</tspan></text>
     <text
        xml:space="preserve"
-       style="font-size:12.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12.5px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
        x="-33.776962"
        y="193.15173"
        id="text5141-15"
@@ -498,188 +510,236 @@
          id="tspan5143-6"
          x="-33.776962"
          y="193.15173">4</tspan></text>
-    <g
-       id="g8745"
-       transform="translate(18,0)">
-      <text
-         sodipodi:linespacing="125%"
-         id="text5066-0"
-         y="112.57666"
-         x="665.1803"
-         style="font-size:17.5px;font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:monospace;-inkscape-font-specification:monospace Italic"
-         xml:space="preserve"><tspan
-           y="112.57666"
-           x="665.1803"
-           id="tspan5068-1"
-           sodipodi:role="line">&lt;user/other/1&gt;</tspan></text>
-      <g
-         id="g8715">
-        <g
-           id="g8699">
-          <rect
-             ry="0"
-             y="130.29988"
-             x="741.71167"
-             height="41.559132"
-             width="47.027439"
-             id="rect5021-6-0-8"
-             style="fill:#0000ff;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
-          <path
-             inkscape:connector-curvature="0"
-             id="path5084-6-2-3"
-             d="m 741.99571,212.94606 0,-8.47588 47.30085,0 0,8.47588"
-             style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
-          <text
-             sodipodi:linespacing="125%"
-             id="text5121-1"
-             y="225.98012"
-             x="789.63184"
-             style="font-size:12.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
-             xml:space="preserve"><tspan
-               y="225.98012"
-               x="789.63184"
-               id="tspan5123-5"
-               sodipodi:role="line">0</tspan></text>
-          <text
-             sodipodi:linespacing="125%"
-             id="text5125-7"
-             y="226.10121"
-             x="742.33099"
-             style="font-size:12.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
-             xml:space="preserve"><tspan
-               y="226.10121"
-               x="742.33099"
-               id="tspan5127-7"
-               sodipodi:role="line">1</tspan></text>
-        </g>
-        <g
-           id="g8691"
-           transform="translate(0.40252686,0)">
-          <rect
-             ry="0"
-             y="130.29988"
-             x="676.36536"
-             height="41.559132"
-             width="47.027439"
-             id="rect5021-6-1"
-             style="fill:#0000ff;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
-          <path
-             inkscape:connector-curvature="0"
-             id="path5084-6-6"
-             d="m 676.22868,212.94606 0,-8.47588 47.30085,0 0,8.47588"
-             style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
-          <text
-             sodipodi:linespacing="125%"
-             id="text5129-0"
-             y="226.10121"
-             x="723.19196"
-             style="font-size:12.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
-             xml:space="preserve"><tspan
-               y="226.10121"
-               x="723.19196"
-               id="tspan5131-7"
-               sodipodi:role="line">1</tspan></text>
-          <text
-             sodipodi:linespacing="125%"
-             id="text5133-6"
-             y="226.10121"
-             x="676.16449"
-             style="font-size:12.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
-             xml:space="preserve"><tspan
-               y="226.10121"
-               x="676.16449"
-               id="tspan5135-6"
-               sodipodi:role="line">2</tspan></text>
-        </g>
-        <g
-           id="g8683"
-           transform="translate(-0.12145996,0)">
-          <rect
-             ry="0"
-             y="130.29988"
-             x="611.0191"
-             height="41.559132"
-             width="47.027439"
-             id="rect5021-61"
-             style="fill:#0000ff;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
-          <path
-             inkscape:connector-curvature="0"
-             id="path5084-2"
-             d="m 610.46165,212.98296 0,-8.47588 47.30086,0 0,8.47588"
-             style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
-          <text
-             sodipodi:linespacing="125%"
-             id="text5137-9"
-             y="226.10121"
-             x="657.57227"
-             style="font-size:12.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
-             xml:space="preserve"><tspan
-               y="226.10121"
-               x="657.57227"
-               id="tspan5139-8"
-               sodipodi:role="line">2</tspan></text>
-          <text
-             sodipodi:linespacing="125%"
-             id="text5141-1"
-             y="225.98012"
-             x="609.72455"
-             style="font-size:12.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
-             xml:space="preserve"><tspan
-               y="225.98012"
-               x="609.72455"
-               id="tspan5143-5"
-               sodipodi:role="line">3</tspan></text>
-        </g>
-        <g
-           id="g8707"
-           transform="translate(0,1.261734)">
-          <rect
-             ry="0"
-             y="129.03815"
-             x="544.78333"
-             height="41.559132"
-             width="47.027439"
-             id="rect5021-61-8"
-             style="fill:#0000ff;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0" />
-          <path
-             inkscape:connector-curvature="0"
-             id="path5084-2-8"
-             d="m 544.22585,211.72123 0,-8.47588 47.30086,0 0,8.47588"
-             style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
-          <text
-             sodipodi:linespacing="125%"
-             id="text5137-9-6"
-             y="224.83948"
-             x="591.33649"
-             style="font-size:12.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
-             xml:space="preserve"><tspan
-               y="224.83948"
-               x="591.33649"
-               id="tspan5139-8-3"
-               sodipodi:role="line">3</tspan></text>
-          <text
-             sodipodi:linespacing="125%"
-             id="text5141-1-2"
-             y="224.71838"
-             x="543.48877"
-             style="font-size:12.5px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
-             xml:space="preserve"><tspan
-               y="224.71838"
-               x="543.48877"
-               id="tspan5143-5-2"
-               sodipodi:role="line">4</tspan></text>
-        </g>
-      </g>
-    </g>
+    <text
+       xml:space="preserve"
+       style="font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:17.5px;line-height:125%;font-family:monospace;-inkscape-font-specification:'monospace Italic';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
+       x="683.1803"
+       y="112.57666"
+       id="text5066-0"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan5068-1"
+         x="683.1803"
+         y="112.57666">&lt;user/output/1&gt;</tspan></text>
+    <rect
+       style="fill:#0000ff;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="rect5021-6-0-8"
+       width="47.027439"
+       height="41.559132"
+       x="759.71167"
+       y="130.29988"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12.5px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
+       x="807.63184"
+       y="225.98012"
+       id="text5121-1"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan5123-5"
+         x="807.63184"
+         y="225.98012">0</tspan></text>
+    <rect
+       style="fill:#0000ff;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="rect5021-6-1"
+       width="47.027439"
+       height="41.559132"
+       x="694.76788"
+       y="130.29988"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12.5px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
+       x="694.56702"
+       y="226.10121"
+       id="text5133-6"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan5135-6"
+         x="694.56702"
+         y="226.10121">2</tspan></text>
+    <rect
+       style="fill:#0000ff;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="rect5021-61"
+       width="47.027439"
+       height="41.559132"
+       x="628.89764"
+       y="130.29988"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12.5px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
+       x="675.45081"
+       y="226.10121"
+       id="text5137-9"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan5139-8"
+         x="675.45081"
+         y="226.10121">2</tspan></text>
+    <rect
+       style="fill:#0000ff;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="rect5021-61-8"
+       width="47.027439"
+       height="41.559132"
+       x="562.78333"
+       y="130.29988"
+       ry="0" />
     <path
-       sodipodi:type="arc"
+       id="path5084-2-8"
+       style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       d="m 562.22585,212.98296 c 0,0 -0.16294,-8.47588 0,-8.47588 m -0.24934,0 113.66455,0 0,8.47588"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="ccccc" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:12.5px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
+       x="561.48877"
+       y="225.98012"
+       id="text5141-1-2"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan5143-5-2"
+         x="561.48877"
+         y="225.98012">4</tspan></text>
+    <circle
        style="fill:#00a700;fill-opacity:1;fill-rule:nonzero;stroke:#00a700;stroke-opacity:1"
        id="path3084"
-       sodipodi:cx="575.2807"
-       sodipodi:cy="66.578423"
-       sodipodi:rx="11.297053"
-       sodipodi:ry="11.297053"
-       d="m 586.57775,66.578423 c 0,6.23919 -5.05786,11.297053 -11.29705,11.297053 -6.23919,0 -11.29705,-5.057863 -11.29705,-11.297053 0,-6.239191 5.05786,-11.297054 11.29705,-11.297054 6.23919,0 11.29705,5.057863 11.29705,11.297054 z"
-       transform="matrix(0.88950579,0,0,0.88950579,0.40991991,75.333784)" />
+       transform="matrix(0.88950579,0,0,0.88950579,0.40991991,75.333784)"
+       cx="575.2807"
+       cy="66.578423"
+       r="11.297053" />
+    <path
+       style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       d="m 98.769463,197.13924 0,7.92225 113.620307,0 0,-7.92225"
+       id="path5084-6-2-9"
+       inkscape:connector-curvature="0" />
+    <path
+       style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       d="m -32.706413,196.98457 0,7.92225 113.62031,0 0,-7.92225"
+       id="path5084-6-2-9-2"
+       inkscape:connector-curvature="0" />
+    <rect
+       rx="0"
+       style="fill:#808000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="rect5021-1-3"
+       width="114.55061"
+       height="41.559128"
+       x="98.015137"
+       y="-280.11337"
+       ry="0"
+       transform="scale(1,-1)" />
+    <path
+       id="path5084-2-8-7"
+       style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       d="m 694.25062,212.85716 c 0,0 -0.16294,-8.47588 0,-8.47588 m -0.24934,0 113.66455,0 0,8.47588"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="ccccc" />
+    <rect
+       ry="0"
+       y="256.04727"
+       x="257.84659"
+       height="28.256489"
+       width="49.448856"
+       id="rect4138-0-4"
+       style="fill:#e3ff00;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:17.5px;line-height:125%;font-family:'Bitstream Vera Sans';-inkscape-font-specification:'Bitstream Vera Sans Bold';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
+       x="283.6882"
+       y="277.14557"
+       id="text4140-94-6"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan4142-3-0"
+         x="283.6882"
+         y="277.14557">in3</tspan></text>
+    <path
+       style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#8800a7;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow1Mend-5)"
+       d="m -61.50674,395.42941 284.16433,0 0,-126.00559 27.80813,0"
+       id="path6931"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cccc" />
+    <g
+       id="g7555"
+       transform="translate(0,4)">
+      <rect
+         ry="0"
+         y="340.33063"
+         x="185.33801"
+         height="41.559143"
+         width="26.171322"
+         id="rect5021-6-0-3"
+         style="fill:#ff7f2a;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+      <rect
+         ry="0"
+         y="340.33063"
+         x="150.35455"
+         height="41.559143"
+         width="26.171322"
+         id="rect5021-6-0-3-6"
+         style="fill:#ff7f2a;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+      <rect
+         ry="0"
+         y="340.33063"
+         x="115.37111"
+         height="41.559143"
+         width="26.171322"
+         id="rect5021-6-0-3-0"
+         style="fill:#ff7f2a;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+      <rect
+         ry="0"
+         y="340.33063"
+         x="80.38765"
+         height="41.559143"
+         width="26.171322"
+         id="rect5021-6-0-3-2"
+         style="fill:#ff7f2a;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+      <rect
+         ry="0"
+         y="340.33063"
+         x="45.404198"
+         height="41.559143"
+         width="26.171322"
+         id="rect5021-6-0-3-7"
+         style="fill:#ff7f2a;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+      <rect
+         ry="0"
+         y="340.33063"
+         x="10.420746"
+         height="41.559143"
+         width="26.171322"
+         id="rect5021-6-0-3-9"
+         style="fill:#ff7f2a;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+      <rect
+         ry="0"
+         y="340.33063"
+         x="-24.562706"
+         height="41.559143"
+         width="26.171322"
+         id="rect5021-6-0-3-3"
+         style="fill:#ff7f2a;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+      <rect
+         transform="scale(1,-1)"
+         ry="0"
+         y="-381.88977"
+         x="-59.546158"
+         height="41.559143"
+         width="26.171322"
+         id="rect5021-6-0-3-5"
+         style="fill:#ff7f2a;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+    </g>
+    <text
+       xml:space="preserve"
+       style="font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:17.5px;line-height:125%;font-family:monospace;-inkscape-font-specification:'monospace Italic';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
+       x="88.040718"
+       y="418.06717"
+       id="text5066-00-6"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan5068-4-5"
+         x="88.040718"
+         y="418.06717">&lt;user/altdata/1&gt;</tspan></text>
   </g>
 </svg>
diff --git a/doc/user/conf.py b/doc/user/conf.py
index c76e8c9f96a8fdcd239e85e2746608df732bfd0c..aca795319099802cbbaf9bb45255b22cb3e0c368 100644
--- a/doc/user/conf.py
+++ b/doc/user/conf.py
@@ -46,7 +46,6 @@ needs_sphinx = '1.3'
 extensions = [
     'sphinx.ext.todo',
     'sphinx.ext.coverage',
-    'sphinx.ext.pngmath',
     'sphinx.ext.ifconfig',
     'sphinx.ext.autodoc',
     'sphinx.ext.autosummary',
@@ -57,9 +56,18 @@ extensions = [
     'sphinx.ext.viewcode',
     ]
 
+import sphinx
+if sphinx.__version__ >= "1.4.1":
+    extensions.append('sphinx.ext.imgmath')
+else:
+    extensions.append('sphinx.ext.pngmath')
+
 # Always includes todos
 todo_include_todos = True
 
+# Create numbers on figures with captions
+numfig = True
+
 # Generates auto-summary automatically
 autosummary_generate = True
 
diff --git a/setup.py b/setup.py
index 06c69b4e1b7b8e1bccffc2764adabb66e1a4cc6e..8872dc8620e932dc3ce69c37855ba2774a2ee810 100644
--- a/setup.py
+++ b/setup.py
@@ -32,7 +32,7 @@ from setuptools import setup, find_packages
 setup(
 
     name='beat.web',
-    version='1.0.5',
+    version='1.1.0b5',
     description='Biometrics Evaluation and Testing Platform (Web Modules)',
     url='https://gitlab.idiap.ch/beat/beat.web',
     license='AGPLv3',
@@ -55,22 +55,21 @@ setup(
         "django-activity-stream",
         "django-jsonfield",
         "django-guardian",
-        "django_nose",
         "djangorestframework",
         "django-rest-swagger",
         "docopt",
         "docutils",
         "Jinja2",
-        "mysqlclient",
         "psycopg2",
         "pytz",
+        "psutil",
         "setuptools",
         "simplejson",
         "sphinx",
-        "sphinx-numfig",
         "sphinxcontrib-programoutput",
         "sphinxcontrib-ansi",
         "sphinxcontrib-httpdomain",
+        "sphinx-rtd-theme",
         "matplotlib",
     ],
 
@@ -87,7 +86,9 @@ setup(
 
     entry_points={
         'console_scripts': [
-            'localhost.py = beat.web.scripts.localhost:main',
+            'process = beat.web.scripts.process:main',
+            'worker = beat.web.scripts.worker:main',
+            'scheduler = beat.web.scripts.scheduler:main',
         ],
     },
 
diff --git a/todo.rst b/todo.rst
index f66cb84710b7ecac83c02845a91df5e804a7ef4f..64cb9a7939b2e1a70bd71bd0dff8a69a14391fb0 100644
--- a/todo.rst
+++ b/todo.rst
@@ -24,20 +24,8 @@
 UI
 --
 
-* Change statistics:
-
-  -----  ----------  ------
-  files  new_design  master
-  -----  ----------  ------
-  css        3400      8498
-  js        32504     44831
-  html      10071      8857
-  -----  ----------  ------
-
 * Remove outdated icons from the distribution
 
-* Remove control marks for 80-columns on JSON files
-
 * Add docutils roles
   (http://docutils.sourceforge.net/docs/howto/rst-roles.html) for the following
   elements:
@@ -54,14 +42,6 @@ UI
   * :beat:environment
   * :beat:team
 
-* Make sure to squash migrations after this move is done
-
-* Check with Philip on the history panel size, viewer size
-
-* In the history panel, rows are out of control
-
-* Shall we get rid of "smart_select"?
-
 * Use datatable class for all object lists?
 
 
@@ -70,16 +50,3 @@ Admin
 
 * The help text of code editor bits looks displaced. Font-Awesome icons do not
   display.
-
-
-Experiments
------------
-
-* Make sure to remove any spurious logs from the beat.scheduler before
-  introducing stdout/stderr components to the experiment view
-
-
-UI
---
-
-* displayPlot: bootstrap form components + fix expanded view style