diff --git a/bob/bio/base/__init__.py b/bob/bio/base/__init__.py
index a81db5cd825633417b6f1a2e24a37380e4120bd0..6c6a27914e2d86c7807d265ef61571157f080dec 100644
--- a/bob/bio/base/__init__.py
+++ b/bob/bio/base/__init__.py
@@ -6,6 +6,7 @@ from . import algorithm
 from . import tools
 from . import grid # only one file, not complete directory
 
+from . import script
 from . import test
 
 
diff --git a/bob/bio/base/config/grid/__init__.py b/bob/bio/base/config/grid/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/bob/bio/base/config/grid/local.py b/bob/bio/base/config/grid/local.py
new file mode 100644
index 0000000000000000000000000000000000000000..beb2f917e6b28eff2c8121c6a9dd98f7137511a8
--- /dev/null
+++ b/bob/bio/base/config/grid/local.py
@@ -0,0 +1,20 @@
+import bob.bio.base
+
+# define the queue using all the default parameters
+grid = bob.bio.base.GridParameters(
+  grid = 'local',
+  number_of_parallel_processes = 4
+)
+
+
+# define a queue that is highly parallelized
+grid_p8 = bob.bio.base.GridParameters(
+  grid = 'local',
+  number_of_parallel_processes = 8
+)
+
+# define a queue that is highly parallelized
+grid_p16 = bob.bio.base.GridParameters(
+  grid = 'local',
+  number_of_parallel_processes = 16
+)
diff --git a/bob/bio/base/database/__init__.py b/bob/bio/base/database/__init__.py
index e40749ee493def073b3bad13699846e9596112b5..945cd33ebec6cc299460208f41a20b86ee0eeea6 100644
--- a/bob/bio/base/database/__init__.py
+++ b/bob/bio/base/database/__init__.py
@@ -1,3 +1,5 @@
+from .utils import File, FileSet
+
 from .Database import Database, DatabaseZT
 from .DatabaseBob import DatabaseBob, DatabaseBobZT
 from .DatabaseFileList import DatabaseFileList
diff --git a/bob/bio/base/script/__init__.py b/bob/bio/base/script/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..f2d91fd15687caa1d73b8979652a4d7c607212ce 100644
--- a/bob/bio/base/script/__init__.py
+++ b/bob/bio/base/script/__init__.py
@@ -0,0 +1 @@
+from . import verify
diff --git a/bob/bio/base/script/verify.py b/bob/bio/base/script/verify.py
index 4df60e4ee3790cacba340e912d4ba6b4b7aeca56..748df9770544804bfd6b8d096cf5753a0e165b0a 100644
--- a/bob/bio/base/script/verify.py
+++ b/bob/bio/base/script/verify.py
@@ -86,12 +86,12 @@ def add_jobs(args, submitter = None):
     if args.grid is None:
       jobs_to_execute.append(('train-projector',))
     else:
-      job_ids['projector_training'] = submitter.submit(
+      job_ids['projector-training'] = submitter.submit(
               '--sub-task train-projector',
               name="train-p",
               dependencies = deps,
               **args.grid.training_queue)
-      deps.append(job_ids['projector_training'])
+      deps.append(job_ids['projector-training'])
 
   # feature projection
   if not args.skip_projection and args.algorithm.performs_projection:
@@ -110,12 +110,12 @@ def add_jobs(args, submitter = None):
     if args.grid is None:
       jobs_to_execute.append(('train-enroller',))
     else:
-      job_ids['enroller_training'] = submitter.submit(
+      job_ids['enroller-training'] = submitter.submit(
               '--sub-task train-enroller',
               name = "train-e",
               dependencies = deps,
               **args.grid.training_queue)
-      deps.append(job_ids['enroller_training'])
+      deps.append(job_ids['enroller-training'])
 
   # enroll models
   enroll_deps_n = {}
@@ -129,38 +129,38 @@ def add_jobs(args, submitter = None):
       if args.grid is None:
         jobs_to_execute.append(('enroll', group, 'N'))
       else:
-        job_ids['enroll_%s_N'%group] = submitter.submit(
+        job_ids['enroll-%s-N'%group] = submitter.submit(
                 '--sub-task enroll --group %s --model-type N'%group,
                 name = "enr-N-%s"%group,
                 number_of_parallel_jobs = args.grid.number_of_enrollment_jobs,
                 dependencies = deps,
                 **args.grid.enrollment_queue)
-        enroll_deps_n[group].append(job_ids['enroll_%s_N'%group])
+        enroll_deps_n[group].append(job_ids['enroll-%s-N'%group])
 
       if args.zt_norm:
         if args.grid is None:
           jobs_to_execute.append(('enroll', group, 'T'))
         else:
-          job_ids['enroll_%s_T'%group] = submitter.submit(
+          job_ids['enroll-%s-T'%group] = submitter.submit(
                   '--sub-task enroll --group %s --model-type T'%group,
                   name = "enr-T-%s"%group,
                   number_of_parallel_jobs = args.grid.number_of_enrollment_jobs,
                   dependencies = deps,
                   **args.grid.enrollment_queue)
-          enroll_deps_t[group].append(job_ids['enroll_%s_T'%group])
+          enroll_deps_t[group].append(job_ids['enroll-%s-T'%group])
 
     # compute A,B,C, and D scores
     if not args.skip_score_computation:
       if args.grid is None:
         jobs_to_execute.append(('compute-scores', group, None, 'A'))
       else:
-        job_ids['score_%s_A'%group] = submitter.submit(
+        job_ids['score-%s-A'%group] = submitter.submit(
                 '--sub-task compute-scores --group %s --score-type A'%group,
                 name = "score-A-%s"%group,
                 number_of_parallel_jobs = args.grid.number_of_scoring_jobs,
                 dependencies = enroll_deps_n[group],
                 **args.grid.scoring_queue)
-        concat_deps[group] = [job_ids['score_%s_A'%group]]
+        concat_deps[group] = [job_ids['score-%s-A'%group]]
 
       if args.zt_norm:
         if args.grid is None:
@@ -169,21 +169,21 @@ def add_jobs(args, submitter = None):
           jobs_to_execute.append(('compute-scores', group, None, 'D'))
           jobs_to_execute.append(('compute-scores', group, None, 'Z'))
         else:
-          job_ids['score_%s_B'%group] = submitter.submit(
+          job_ids['score-%s-B'%group] = submitter.submit(
                   '--sub-task compute-scores --group %s --score-type B'%group,
                   name = "score-B-%s"%group,
                   number_of_parallel_jobs = args.grid.number_of_scoring_jobs,
                   dependencies = enroll_deps_n[group],
                   **args.grid.scoring_queue)
 
-          job_ids['score_%s_C'%group] = submitter.submit(
+          job_ids['score-%s-C'%group] = submitter.submit(
                   '--sub-task compute-scores --group %s --score-type C'%group,
                   name = "score-C-%s"%group,
                   number_of_parallel_jobs = args.grid.number_of_scoring_jobs,
                   dependencies = enroll_deps_t[group],
                   **args.grid.scoring_queue)
 
-          job_ids['score_%s_D'%group] = submitter.submit(
+          job_ids['score-%s-D'%group] = submitter.submit(
                   '--sub-task compute-scores --group %s --score-type D'%group,
                   name = "score-D-%s"%group,
                   number_of_parallel_jobs = args.grid.number_of_scoring_jobs,
@@ -191,12 +191,12 @@ def add_jobs(args, submitter = None):
                   **args.grid.scoring_queue)
 
           # compute zt-norm
-          score_deps[group] = [job_ids['score_%s_A'%group], job_ids['score_%s_B'%group], job_ids['score_%s_C'%group], job_ids['score_%s_D'%group]]
-          job_ids['score_%s_Z'%group] = submitter.submit(
+          score_deps[group] = [job_ids['score-%s-A'%group], job_ids['score-%s-B'%group], job_ids['score-%s-C'%group], job_ids['score-%s-D'%group]]
+          job_ids['score-%s-Z'%group] = submitter.submit(
                   '--sub-task compute-scores --group %s --score-type Z'%group,
                   name = "score-Z-%s"%group,
                   dependencies = score_deps[group])
-          concat_deps[group].extend([job_ids['score_%s_B'%group], job_ids['score_%s_C'%group], job_ids['score_%s_D'%group], job_ids['score_%s_Z'%group]])
+          concat_deps[group].extend([job_ids['score-%s-B'%group], job_ids['score-%s-C'%group], job_ids['score-%s-D'%group], job_ids['score-%s-Z'%group]])
     else:
       concat_deps[group] = []
 
@@ -205,7 +205,7 @@ def add_jobs(args, submitter = None):
       if args.grid is None:
         jobs_to_execute.append(('concatenate', group))
       else:
-        job_ids['concat_%s'%group] = submitter.submit(
+        job_ids['concat-%s'%group] = submitter.submit(
                 '--sub-task concatenate --group %s'%group,
                 name = "concat-%s"%group,
                 dependencies = concat_deps[group])
@@ -215,7 +215,7 @@ def add_jobs(args, submitter = None):
     if args.grid is None:
       jobs_to_execute.append(('calibrate',))
     else:
-      calib_deps = [job_ids['concat_%s'%g] for g in args.groups if 'concat_%s'%g in job_ids]
+      calib_deps = [job_ids['concat-%s'%g] for g in args.groups if 'concat-%s'%g in job_ids]
       job_ids['calibrate'] = submitter.submit(
               '--sub-task calibrate',
               dependencies = calib_deps)
@@ -362,7 +362,8 @@ def verify(args, command_line_parameters, external_fake_job_id = 0):
   # as the main entry point, check whether the sub-task is specified
   if args.sub_task is not None:
     # execute the desired sub-task
-    execute(args)
+    if not execute(args):
+      raise ValueError("The specified --sub-task '%s' is not known to the system" % args.sub_task)
     return {}
   else:
     # add jobs
@@ -394,7 +395,8 @@ def verify(args, command_line_parameters, external_fake_job_id = 0):
         args.group = None if len(job) <= 1 else job[1]
         args.model_type = None if len(job) <= 2 else job[2]
         args.score_type = None if len(job) <= 3 else job[3]
-        execute(args)
+        if not execute(args):
+          raise ValueError("The current --sub-task '%s' is not known to the system" % args.sub_task)
 
       if args.timer:
         end_time = os.times()
diff --git a/bob/bio/base/test/dummy/fileset.py b/bob/bio/base/test/dummy/fileset.py
index e4de45f962daa943b72fde8f7ce8401de324caea..a3f7ab6b7d4d28181ca1e051642a4e9dcb8538b7 100644
--- a/bob/bio/base/test/dummy/fileset.py
+++ b/bob/bio/base/test/dummy/fileset.py
@@ -1,14 +1,14 @@
 import bob.db.atnt
 import os
 
-from bob.bio.base.database import DatabaseBob, DatabaseBobZT
+from bob.bio.base.database import DatabaseBob, DatabaseBobZT, FileSet
 from bob.bio.base.test.utils import atnt_database_directory
 
 class FileSetDatabase (DatabaseBobZT):
 
   def __init__(self):
     # call base class constructor with useful parameters
-    facereclib.databases.DatabaseBobZT.__init__(
+    DatabaseBobZT.__init__(
         self,
         database = bob.db.atnt.Database(
             original_directory = atnt_database_directory(),
@@ -29,7 +29,7 @@ class FileSetDatabase (DatabaseBobZT):
     file_sets = []
     for client_files in files:
       # generate file set for each client
-      file_set = facereclib.databases.FileSet(client_files[0].client_id, client_files[0].client_id, client_files[0].path)
+      file_set = FileSet(client_files[0].client_id, client_files[0].client_id, client_files[0].path)
       file_set.files = client_files
       file_sets.append(file_set)
     return file_sets
diff --git a/bob/bio/base/test/test_scripts.py b/bob/bio/base/test/test_scripts.py
index 582dc4abe12818867c8c5740544998fbf5dafad1..0666319cae83b18200ed5baa82d053f0e7590968 100644
--- a/bob/bio/base/test/test_scripts.py
+++ b/bob/bio/base/test/test_scripts.py
@@ -124,7 +124,7 @@ def test_verify_commandline():
 @utils.grid_available
 def test_verify_parallel():
   test_dir = tempfile.mkdtemp(prefix='frltest_')
-  test_database = os.path.join(test_dir, "database.sql3")
+  test_database = os.path.join(test_dir, "submitted.sql3")
 
   # define dummy parameters
   parameters = [
diff --git a/bob/bio/base/utils/resources.py b/bob/bio/base/utils/resources.py
index de064d99c8a769a46b264785b82dfa039025808a..a2a5d8dd1dcbf30b046a8d1521a8e3e07b1307ec 100644
--- a/bob/bio/base/utils/resources.py
+++ b/bob/bio/base/utils/resources.py
@@ -89,7 +89,7 @@ def load_resource(resource, keyword, imports = ['bob.bio.base'], preferred_distr
     else:
       # TODO: extract current package name and use this one, if possible
 
-      # Now: check if there are only two entry points, and one is from the facereclib, then use the other one
+      # Now: check if there are only two entry points, and one is from the bob.bio.base, then use the other one
       index = -1
       if preferred_distribution:
         for i,p in enumerate(entry_points):
@@ -141,11 +141,11 @@ def read_file_resource(resource, keyword):
   else:
     # TODO: extract current package name and use this one, if possible
 
-    # Now: check if there are only two entry points, and one is from the facereclib, then use the other one
+    # Now: check if there are only two entry points, and one is from the bob.bio.base, then use the other one
     index = -1
     if len(entry_points) == 2:
-      if entry_points[0].dist.project_name == 'facereclib': index = 1
-      elif entry_points[1].dist.project_name == 'facereclib': index = 0
+      if entry_points[0].dist.project_name == 'bob.bio.base': index = 1
+      elif entry_points[1].dist.project_name == 'bob.bio.base': index = 0
 
     if index != -1:
       logger.info("RESOURCES: Using the resource '%s' from '%s', and ignoring the one from '%s'" %(resource, entry_points[index].module_name, entry_points[1-index].module_name))
@@ -168,7 +168,7 @@ def list_resources(keyword, strip=['dummy']):
   entry_points = _get_entry_points(keyword, strip)
   last_dist = None
   retval = ""
-  for entry_point in entry_points:
+  for entry_point in sorted(entry_points):
     if last_dist != str(entry_point.dist):
       retval += "\n- %s: \n" % str(entry_point.dist)
       last_dist = str(entry_point.dist)
diff --git a/setup.py b/setup.py
index 213ac20a467cc23a63fde2fef137dd6d2c8f201b..fbe07a678b1c6bb135ded380f6f83cf6fc367071 100644
--- a/setup.py
+++ b/setup.py
@@ -125,6 +125,12 @@ setup(
         'pca+lda           = bob.bio.base.config.algorithm.lda:algorithm',
         'bic               = bob.bio.base.config.algorithm.bic:algorithm',
       ],
+
+      'bob.bio.grid': [
+        'local-p4          = bob.bio.base.config.grid.local:grid',
+        'local-p8          = bob.bio.base.config.grid.local:grid_p8',
+        'local-p16         = bob.bio.base.config.grid.local:grid_p16'
+      ],
    },
 
     # Classifiers are important if you plan to distribute this package through