diff --git a/.gitignore b/.gitignore
index 5a869159a737019f458e0072aa640cbe08bc6696..e91866d7f03a0488226f6c44d273af4cec6d19f4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,3 +9,8 @@ MANIFEST
 develop-eggs
 eggs
 bin
+sphinx
+.project
+.pydevproject
+.settings
+
diff --git a/gridtk/local.py b/gridtk/local.py
index bca1a80c9e9b8897b11dd31026d77a2fec0e1c57..eaac286cb9ecb8e10cb1661450fb084f32cc5e0c 100644
--- a/gridtk/local.py
+++ b/gridtk/local.py
@@ -21,7 +21,7 @@ from .models import add_job, Job
 
 class JobManagerLocal(JobManager):
   """Manages jobs run in parallel on the local machine."""
-  def __init__(self, database='submitted.sql3', sleep_time = 0.1):
+  def __init__(self, database='submitted.sql3', sleep_time = 0.1, wrapper_script = './bin/jman'):
     """Initializes this object with a state file and a method for qsub'bing.
 
     Keyword parameters:
@@ -31,7 +31,7 @@ class JobManagerLocal(JobManager):
       does not exist it is initialized. If it exists, it is loaded.
 
     """
-    JobManager.__init__(self, database)
+    JobManager.__init__(self, database, wrapper_script)
     self._sleep_time = sleep_time
 
 
@@ -47,6 +47,31 @@ class JobManagerLocal(JobManager):
     return job_id
 
 
+  def resubmit(self, job_ids = None, failed_only = False, running_jobs = False):
+    """Re-submit jobs automatically"""
+    self.lock()
+    # iterate over all jobs
+    jobs = self.get_jobs(job_ids)
+    for job in jobs:
+      # check if this job needs re-submission
+      if running_jobs or job.status == 'finished':
+        if not failed_only or job.result != 0:
+          job.status = 'waiting'
+          job.result = None
+          if job.array:
+            for array_job in job.array:
+              if running_jobs or array_job.status == 'finished':
+                if not failed_only or array_job.result != 0:
+                  array_job.status = 'waiting'
+                  array_job.result = None
+
+    self.session.commit()
+    self.unlock()
+
+
+#####################################################################
+###### Methods to run the jobs in parallel on the local machine #####
+
   def _run_parallel_job(self, job_id, array_id = None):
     """Executes the code for this job on the local machine."""
     environ = copy.deepcopy(os.environ)
@@ -56,10 +81,8 @@ class JobManagerLocal(JobManager):
     else:
       environ['SGE_TASK_ID'] = 'undefined'
 
-    # get the name of the file that was called originally
-    jman = os.path.realpath(sys.argv[0])
     # generate call to the wrapper script
-    command = [jman, '-l', 'run-job', self.database]
+    command = [self.wrapper_script, '-l', 'run-job', self._database]
     # return the subprocess pipe to the process
     try:
       return subprocess.Popen(command, env=environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -97,8 +120,8 @@ class JobManagerLocal(JobManager):
     """Runs the jobs stored in this job manager on the local machine."""
     self.lock()
     query = self.session.query(Job).filter(Job.status != 'finished')
-    if job_ids:
-      query.filter(Job.id.in_(job_ids))
+    if job_ids is not None:
+      query = query.filter(Job.id.in_(job_ids))
 
     jobs = list(query)
 
@@ -114,7 +137,7 @@ class JobManagerLocal(JobManager):
     # collect the dependencies for the jobs
     dependencies = {}
     for job in jobs:
-      dependencies[job.id] = [dependent.id for dependent in job.dependent_jobs]
+      dependencies[job.id] = [waited.id for waited in job.get_jobs_we_wait_for()]
 
     self.unlock()
 
@@ -155,7 +178,7 @@ class JobManagerLocal(JobManager):
         # start new jobs
         for job_id in unfinished_jobs:
           # check if there are unsatisfied dependencies for this job
-          unsatisfied_dependencies = [dep for dep in dependencies[job_id] if dep in unfinished_jobs]
+          unsatisfied_dependencies = [dep for dep in dependencies[job_id]]
 
           if len(unsatisfied_dependencies) == 0:
             # all dependencies are met
@@ -191,6 +214,12 @@ class JobManagerLocal(JobManager):
                   # remove the job that could not be started
                   unfinished_jobs.remove(job_id)
 
+      if not len(running_jobs) and len(unfinished_jobs) != 0:
+        # This is a weird case, which leads to a dead lock.
+        # It seems that the is a dependence that cannot be fulfilled
+        # This might happen, when a single job should be executed, but it depends on another job...
+        raise RuntimeError("Dead lock detected. There are dependencies in the database that cannot be fulfilled. Did you try to run a job that has unfulfilled dependencies?")
+
       # sleep for some time (default: 0.1 seconds)
       time.sleep(self._sleep_time)
 
diff --git a/gridtk/manager.py b/gridtk/manager.py
index efd1a4d6923110ed11bbed495c98c123dad878e0..a266cc84f066ecfff01ab1bc50b5a2675248e7a8 100644
--- a/gridtk/manager.py
+++ b/gridtk/manager.py
@@ -1,77 +1,70 @@
 
-import bob
 import os
 import subprocess
 from .models import Base, Job, ArrayJob
 from .tools import logger
 
+import sqlalchemy
 echo = False
 
 """This file defines a minimum Job Manager interface."""
 
 class JobManager:
 
-  def __init__(self, sql_database):
-    self.database = os.path.realpath(sql_database)
-    if not os.path.exists(self.database):
-      self.create()
+  def __init__(self, sql_database, wrapper_script = './bin/jman'):
+    self._database = os.path.realpath(sql_database)
+    self._engine = sqlalchemy.create_engine("sqlite:///"+self._database, echo=echo)
+    if not os.path.exists(self._database):
+      self._create()
+
+    # store the command that this job manager was called with
+    self.wrapper_script = wrapper_script
 
-    # get the next free job id (simply as the largest ID in the database + 1)
-#    self.lock()
-#    self.next_job_id = max([job.id for job in self.session.query(Job)] + [0]) + 1
-#    self.unlock()
 
   def __del__(self):
-    # remove the database if it is empty
-    self.lock()
-    job_count = len(self.get_jobs())
-    self.unlock()
-    if not job_count:
-      os.remove(self.database)
+    # remove the database if it is empty$
+    if os.path.isfile(self._database):
+      self.lock()
+      job_count = len(self.get_jobs())
+      self.unlock()
+      if not job_count:
+        os.remove(self._database)
 
 
   def lock(self):
-    self.session = bob.db.utils.SQLiteConnector(self.database).session(echo=echo)
+    Session = sqlalchemy.orm.sessionmaker()
+    self.session = Session(bind=self._engine)
+    return self.session
 
   def unlock(self):
     self.session.close()
     del self.session
 
 
-  def create(self):
+  def _create(self):
     """Creates a new and empty database."""
     from .tools import makedirs_safe
 
     # create directory for sql database
-    makedirs_safe(os.path.dirname(self.database))
+    makedirs_safe(os.path.dirname(self._database))
 
-    # create an engine
-    engine = bob.db.utils.create_engine_try_nolock('sqlite', self.database, echo=echo)
     # create all the tables
-    Base.metadata.create_all(engine)
-
-
-  def list(self):
-    """Lists the jobs currently added to the database."""
-    self.lock()
-    for job in self.get_jobs():
-      print job
-    self.unlock()
+    Base.metadata.create_all(self._engine)
 
 
-  def get_jobs(self, grid_ids = None):
+  def get_jobs(self, job_ids = None):
     q = self.session.query(Job)
-    if grid_ids:
-      q = q.filter(Job.grid_id.in_(grid_ids))
+    if job_ids:
+      q = q.filter(Job.id.in_(job_ids))
     return list(q)
 
 
-  def _job_and_array(self, grid_id, array_id=None):
+  def _job_and_array(self, id, array_id=None):
     # get the job (and the array job) with the given id(s)
-    job = self.get_jobs((grid_id,))
+    job = self.get_jobs((id,))
     assert (len(job) == 1)
     job = job[0]
-    job_id = job.id
+    job_id = job.unique
 
     if array_id is not None:
       array_job = list(self.session.query(ArrayJob).filter(ArrayJob.job_id == job_id).filter(ArrayJob.id == array_id))
@@ -99,7 +92,7 @@ class JobManager:
     # execute the command line of the job, and wait untils it has finished
     try:
       result = subprocess.call(command_line)
-    except Error:
+    except Exception:
       result = 69 # ASCII: 'E'
 
     # set a new status and the results of the job
@@ -123,15 +116,36 @@ class JobManager:
     self.unlock()
 
 
-  def report(self, grid_ids=None, array_ids=None, unfinished=False, output=True, error=True):
+  def list(self, job_ids, print_array_jobs = False, print_dependencies = False):
+    """Lists the jobs currently added to the database."""
+    self.lock()
+    for job in self.get_jobs(job_ids):
+      print job
+      if print_dependencies:
+        waiting_jobs = [j.id for j in job.get_jobs_waiting_for_us()]
+        waited_for_jobs = [j.id for j in job.get_jobs_we_wait_for()]
+        if len(waiting_jobs):
+          print "These jobs wait for <Job %d>:" % job.id, waiting_jobs
+        if len(waited_for_jobs):
+          print "These jobs need to run before <Job %d>:" % job.id, waited_for_jobs
+      if print_array_jobs and job.array:
+        for array_job in job.array:
+          print array_job
+
+    self.unlock()
+
+
+  def report(self, job_ids=None, array_ids=None, unfinished=False, output=True, error=True):
     """Iterates through the output and error files and write the results to command line."""
     def _write_contents(job):
       # Writes the contents of the output and error files to command line
       out_file, err_file = job.std_out_file(), job.std_err_file()
-      if output and out_file is not None and os.path.exists(out_file) and os.stat(out_file).st_size:
+      if output and out_file is not None and os.path.exists(out_file) and os.stat(out_file).st_size > 0:
+        print "Output file:", out_file
         print open(out_file).read().rstrip()
         print "-"*20
-      if error and err_file is not None and os.path.exists(err_file) and os.stat(err_file).st_size:
+      if error and err_file is not None and os.path.exists(err_file) and os.stat(err_file).st_size > 0:
+        print "Error file:", err_file
         print open(err_file).read().rstrip()
         print "-"*40
 
@@ -144,14 +158,14 @@ class JobManager:
     # check if an array job should be reported
     self.lock()
     if array_ids:
-      if len(grid_ids) != 1: logger.error("If array ids are specified exactly one job id must be given.")
-      array_jobs = list(self.session.query(ArrayJob).join(Job).filter(Job.grid_id.in_(grid_ids)).filter(Job.id == ArrayJob.job_id).filter(ArrayJob.id.in_(array_ids)))
+      if len(job_ids) != 1: logger.error("If array ids are specified exactly one job id must be given.")
+      array_jobs = list(self.session.query(ArrayJob).join(Job).filter(Job.id.in_(job_ids)).filter(Job.unique == ArrayJob.job_id).filter(ArrayJob.id.in_(array_ids)))
       if array_jobs: print array_jobs[0].job
       _write_array_jobs(array_jobs)
 
     else:
       # iterate over all jobs
-      jobs = self.get_jobs(grid_ids)
+      jobs = self.get_jobs(job_ids)
       for job in jobs:
         if job.array:
           if (unfinished or job.status in ('finished', 'executing')):
@@ -166,7 +180,7 @@ class JobManager:
     self.unlock()
 
 
-  def delete(self, grid_ids, array_ids = None, delete_logs = True, delete_log_dir = False):
+  def delete(self, job_ids, array_ids = None, delete_logs = True, delete_log_dir = False):
     """Deletes the jobs with the given ids from the database."""
     def _delete_dir_if_empty(log_dir):
       if log_dir and delete_log_dir and os.path.isdir(log_dir) and not os.listdir(log_dir):
@@ -185,8 +199,8 @@ class JobManager:
 
     self.lock()
     if array_ids:
-      if len(grid_ids) != 1: logger.error("If array ids are specified exactly one job id must be given.")
-      array_jobs = list(self.session.query(ArrayJob).join(Job).filter(Job.grid_id.in_(grid_ids)).filter(Job.id == ArrayJob.job_id).filter(ArrayJob.id.in_(array_ids)))
+      if len(job_ids) != 1: logger.error("If array ids are specified exactly one job id must be given.")
+      array_jobs = list(self.session.query(ArrayJob).join(Job).filter(Job.id.in_(job_ids)).filter(Job.unique == ArrayJob.job_id).filter(ArrayJob.id.in_(array_ids)))
       if array_jobs:
         job = array_jobs[0].job
         for array_job in array_jobs:
@@ -196,7 +210,7 @@ class JobManager:
 
     else:
       # iterate over all jobs
-      jobs = self.get_jobs(grid_ids)
+      jobs = self.get_jobs(job_ids)
       for job in jobs:
         # delete all array jobs
         if job.array:
@@ -208,5 +222,3 @@ class JobManager:
     self.session.commit()
 
     self.unlock()
-
-
diff --git a/gridtk/models.py b/gridtk/models.py
index 12e171a80495e34ef9e8b2db3be8657b9099de85..d5776b46372d030cd872b3224112483418d2e990 100644
--- a/gridtk/models.py
+++ b/gridtk/models.py
@@ -7,6 +7,7 @@ from sqlalchemy.ext.declarative import declarative_base
 import os
 
 from cPickle import dumps, loads
+from .tools import logger
 
 Base = declarative_base()
 
@@ -18,7 +19,7 @@ class ArrayJob(Base):
 
   unique = Column(Integer, primary_key = True)
   id = Column(Integer)
-  job_id = Column(Integer, ForeignKey('Job.id'))
+  job_id = Column(Integer, ForeignKey('Job.unique'))
   status = Column(Enum(*Status))
   result = Column(Integer)
 
@@ -36,33 +37,44 @@ class ArrayJob(Base):
   def std_err_file(self):
     return self.job.std_err_file() + "." + str(self.id) if self.job.log_dir else None
 
+  def __str__(self):
+    n = "<ArrayJob %d> of <Job %d>" % (self.id, self.job.id)
+    if self.result is not None: r = "%s (%d)" % (self.status, self.result)
+    else: r = "%s" % self.status
+    return "%s : %s" % (n, r)
+
 
 class Job(Base):
   """This class defines one Job that was submitted to the Job Manager."""
   __tablename__ = 'Job'
 
-  id = Column(Integer, primary_key = True) # The ID of the job (not corresponding to the grid ID)
-  command_line = Column(String(255))       # The command line to execute, converted to one string
-  name = Column(String(20))                # A hand-chosen name for the task
-  arguments = Column(String(255))          # The kwargs arguments for the job submission (e.g. in the grid)
-  grid_id = Column(Integer, unique = True) # The ID of the job as given from the grid
-  log_dir = Column(String(255))            # The directory where the log files will be put to
+  unique = Column(Integer, primary_key = True) # The unique ID of the job (not corresponding to the grid ID)
+  command_line = Column(String(255))           # The command line to execute, converted to one string
+  name = Column(String(20))                    # A hand-chosen name for the task
+  arguments = Column(String(255))              # The kwargs arguments for the job submission (e.g. in the grid)
+  id = Column(Integer, unique = True)          # The ID of the job as given from the grid
+  log_dir = Column(String(255))                # The directory where the log files will be put to
+  array_string = Column(String(255))           # The array string (only needed for re-submission)
 
   status = Column(Enum(*Status))
   result = Column(Integer)
 
-  def __init__(self, command_line, name = None, log_dir = None, **kwargs):
-    """Constructor taking the job id from the grid."""
+  def __init__(self, command_line, name = None, log_dir = None, array_string = None, **kwargs):
+    """Constructs a Job object without an ID (needs to be set later)."""
     self.command_line = dumps(command_line)
     self.name = name
     self.status = Status[0]
     self.result = None
     self.log_dir = log_dir
+    self.array_string = dumps(array_string)
     self.arguments = dumps(kwargs)
 
   def get_command_line(self):
     return loads(str(self.command_line))
 
+  def get_array(self):
+    return loads(str(self.array_string))
+
   def set_arguments(self, **kwargs):
     previous = self.get_arguments()
     previous.update(kwargs)
@@ -71,16 +83,23 @@ class Job(Base):
   def get_arguments(self):
     return loads(str(self.arguments))
 
+  def get_jobs_we_wait_for(self):
+    return [j.waited_for_job for j in self.jobs_we_have_to_wait_for if j.waited_for_job is not None]
+
+  def get_jobs_waiting_for_us(self):
+    return [j.waiting_job for j in self.jobs_that_wait_for_us if j.waiting_job is not None]
+
+
   def std_out_file(self, array_id = None):
-    return os.path.join(self.log_dir, "o" + str(self.grid_id)) if self.log_dir else None
+    return os.path.join(self.log_dir, (self.name if self.name else 'job') + ".o" + str(self.id)) if self.log_dir else None
 
   def std_err_file(self, array_id = None):
-    return os.path.join(self.log_dir, "e" + str(self.grid_id)) if self.log_dir else None
+    return os.path.join(self.log_dir, (self.name if self.name else 'job') + ".e" + str(self.id)) if self.log_dir else None
 
 
   def __str__(self):
-    id = "%d" % self.grid_id
-    if self.array: j = "%s (%d-%d)" % (self.id, self.array[0].id, self.array[-1].id)
+    id = "%d" % self.id
+    if self.array: j = "%s (%d-%d)" % (id, self.array[0].id, self.array[-1].id)
     else: j = "%s" % id
     if self.name is not None: n = "<Job: %s - '%s'>" % (j, self.name)
     else: n = "<Job: %s>" % j
@@ -88,95 +107,50 @@ class Job(Base):
     else: r = "%s" % self.status
     return "%s : %s -- %s" % (n, r, " ".join(self.get_command_line()))
 
-  def execute(self, manager, index = None):
-    """Executes the code for this job on the local machine."""
-    import copy
-    environ = copy.deepcopy(os.environ)
-
-    manager.lock()
-    job = manager.get_jobs(self.id)
-    if 'JOB_ID' in environ:
-      # we execute a job in the grid
-      wait_for_job = True
-    else:
-      # we execute a job locally
-      environ['JOB_ID'] = str(self.id)
-    if index:
-      environ['SGE_TASK_ID'] = str(index.id)
-    self.status = "executing"
-
-    # return the subprocess pipe to the process
-    try:
-      import subprocess
-      return subprocess.Popen(self.get_command_line(), env=environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    except OSError:
-      self.status = "finished"
-      raise
-
 
 
 class JobDependence(Base):
   """This table defines a many-to-many relationship between Jobs."""
   __tablename__ = 'JobDependence'
   id = Column(Integer, primary_key=True)
-  dependent_job_id = Column('dependent_job_id', Integer, ForeignKey('Job.id'))
-  dependent_job = relationship('Job', backref = 'dependent_jobs', primaryjoin=(Job.id == dependent_job_id), order_by=id) # A list of Jobs that this one depends on
-  depending_job_id = Column('depending_job_id', Integer, ForeignKey('Job.id'))
-  depending_job = relationship('Job', backref = 'depending_jobs', primaryjoin=(Job.id == depending_job_id), order_by=id) # A list of Jobs that this one depends on
+  waiting_job_id = Column(Integer, ForeignKey('Job.unique')) # The ID of the waiting job
+  waited_for_job_id = Column(Integer, ForeignKey('Job.unique')) # The ID of the job to wait for
 
-  def __init__(self, depending_job, dependent_job):
-    self.dependent_job = dependent_job
-    self.depending_job = depending_job
+  # This is twisted: The 'jobs_we_have_to_wait_for' field in the Job class needs to be joined with the waiting job id, so that jobs_we_have_to_wait_for.waiting_job is correct
+  # Honestly, I am lost but it seems to work...
+  waiting_job = relationship('Job', backref = 'jobs_we_have_to_wait_for', primaryjoin=(Job.unique == waiting_job_id), order_by=id) # The job that is waited for
+  waited_for_job = relationship('Job', backref = 'jobs_that_wait_for_us', primaryjoin=(Job.unique == waited_for_job_id), order_by=id) # The job that waits
 
+  def __init__(self, waiting_job_id, waited_for_job_id):
+    self.waiting_job_id = waiting_job_id
+    self.waited_for_job_id = waited_for_job_id
 
-def add_grid_job(session, data, command_line, kwargs):
-  """Helper function to create a job from the results of the grid execution via qsub."""
-  # create job
-  job = Job(data=data, command_line=command_line, kwargs=kwargs)
 
-  session.add(job)
-  session.flush()
-  session.refresh(job)
 
-  # add dependent jobs
-  if 'deps' in kwargs:
-    dependencies = session.query(Job).filter(id.in_(kwargs['deps']))
-    assert(len(list(dependencies)) == len(kwargs['deps']))
-    for d in dependecies:
-      session.add(JobDependence(job, d))
-
-  # create array job if desired
-  if 'job-array tasks' in data:
-    import re
-    b = re.compile(r'^(?P<m>\d+)-(?P<n>\d+):(?P<s>\d+)$').match(data['job-array tasks']).groupdict()
-    (start, stop, step) =  (int(b['m']), int(b['n']), int(b['s']))
-    # add array jobs
-    for i in range(start, stop+1, step):
-      session.add(ArrayJob(i, job.id))
-
-  session.commit()
-  return job
-
-
-def add_job(session, command_line, name=None, dependencies=[], array=None, log_dir=None, **kwargs):
-  """Helper function to create a job that will run on the local machine."""
-  job = Job(command_line=command_line, name=name, log_dir=log_dir, kwargs=kwargs)
+def add_job(session, command_line, name = 'job', dependencies = [], array = None, log_dir = None, **kwargs):
+  """Helper function to create a job, add the dependencies and the array jobs."""
+  job = Job(command_line=command_line, name=name, log_dir=log_dir, array_string=array, kwargs=kwargs)
 
   session.add(job)
   session.flush()
   session.refresh(job)
 
-  # by default grid_id and id are identical, but the grid_id might be overwritten later on
-  job.grid_id = job.id
+  # by default id and unique id are identical, but the id might be overwritten later on
+  job.id = job.unique
 
   for d in dependencies:
-    session.add(JobDependence(job, d))
+    depending = list(session.query(Job).filter(Job.id == d))
+    if len(depending):
+      session.add(JobDependence(job.unique, depending[0].unique))
+    else:
+      logger.warn("Could not find dependent job with id %d in database" % d)
+
 
   if array:
     (start, stop, step) = array
     # add array jobs
     for i in range(start, stop+1, step):
-      session.add(ArrayJob(i, job.id))
+      session.add(ArrayJob(i, job.unique))
 
   session.commit()
 
diff --git a/gridtk/script/jman.py b/gridtk/script/jman.py
index 73ad0ec9ab32800ae5ecf594308632e5f4db2952..0036c09c84849bc7b5a71d2664748d9fd9318e43 100644
--- a/gridtk/script/jman.py
+++ b/gridtk/script/jman.py
@@ -26,7 +26,7 @@ from ..tools import make_shell, random_logdir, logger
 def setup(args):
   """Returns the JobManager and sets up the basic infrastructure"""
 
-  kwargs = {}
+  kwargs = {'wrapper_script' : args.wrapper_script}
   if args.db: kwargs['database'] = args.db
   if args.local:
     jm = local.JobManagerLocal(**kwargs)
@@ -34,10 +34,13 @@ def setup(args):
     jm = sge.JobManagerSGE(**kwargs)
 
   # set-up logging
+  import logging
   if args.debug:
-    import logging
     logger.addHandler(logging.StreamHandler())
     logger.setLevel(logging.DEBUG)
+  else:
+    logger.setLevel(logging.WARNING)
+
 
   return jm
 
@@ -103,65 +106,10 @@ def submit(args):
   job_id = jm.submit(args.job, **kwargs)
 
 
-def explain(args):
-  """Explain action"""
-
-  jm = setup(args)
-
-  if args.jobid:
-    jobs = [[int(n) for n in k.split('.', 1)] for k in args.jobid]
-    for v in jobs:
-      if len(v) == 1: v.append(None)
-  else:
-    jobs = [(k, None) for k in jm.keys()]
-
-  first_time = True
-  for k in jobs:
-    if not first_time: print 79*'-'
-    first_time = False
-    J = jm[k[0]]
-    print "Job", J
-    print "Command line:", J.command_line()
-    if args.verbose:
-      print "%s stdout (%s)" % (J.name(k[1]), J.stdout_filename(k[1]))
-      print J.stdout(k[1])
-    if args.verbose:
-      print "%s stderr (%s)" % (J.name(k[1]), J.stderr_filename(k[1]))
-    print J.stderr(k[1])
-
 def resubmit(args):
-
+  """Re-submits the jobs with the given ids."""
   jm = setup(args)
-  fromjm = JobManager(args.fromdb)
-  jobs = fromjm.keys()
-  if args.jobid: jobs = args.jobid
-  for k in jobs:
-    O = fromjm[k]
-
-    args.stdout, args.stderr = get_logdirs(args.stdout, args.stderr, args.logbase)
-
-    J = jm.resubmit(O, args.stdout, args.stderr, args.deps, args.failed_only)
-
-    if args.verbose:
-      if isinstance(J, (tuple, list)):
-        for k in J: print 'Re-submitted job', J
-      else:
-        print 'Re-submitted job', J
-    else:
-      if isinstance(J, (tuple, list)):
-        print 'Re-submitted %d jobs' % len(J)
-      else:
-        print 'Re-submitted job', J.name()
-
-    if args.cleanup:
-      if args.verbose:
-        O.rm_stdout(verbose='  ')
-        O.rm_stderr(verbose='  ')
-      else:
-        O.rm_stdout()
-        O.rm_stderr()
-      del fromjm[k]
-      print '  deleted job %s from database' % O.name()
+  jm.resubmit(args.job_ids, args.failed_only, args.running_jobs)
 
 
 def execute(args):
@@ -172,25 +120,38 @@ def execute(args):
   jm.run(parallel_jobs=args.parallel, job_ids=args.job_ids)
 
 
-def ls(args):
+def list(args):
   """Lists the jobs in the given database."""
   jm = setup(args)
-  jm.list()
+  jm.list(args.job_ids, args.print_array_jobs, args.print_dependencies)
 
 
 def report(args):
   """Reports the results of the finished (and unfinished) jobs."""
   jm = setup(args)
-  jm.report(grid_ids=args.job_ids, array_ids=args.array_ids, unfinished=args.unfinished_also, output=not args.errors_only, error=not args.output_only)
+  jm.report(job_ids=args.job_ids, array_ids=args.array_ids, unfinished=args.unfinished_also, output=not args.errors_only, error=not args.output_only)
+
+
+def stop(args):
+  """Stops (qdel's) the jobs with the given ids."""
+  if args.local:
+    raise ValueError("Stopping commands locally is not supported (please kill them yourself)")
+  jm = setup(args)
+  jm.stop_jobs(args.job_ids)
 
 
 def delete(args):
-  """Deletes the jobs from the job manager."""
+  """Deletes the jobs from the job manager. If the jobs are still running in the grid, they are stopped."""
   jm = setup(args)
-  jm.delete(grid_ids=args.job_ids, array_ids=args.array_ids, delete_logs=not args.keep_logs, delete_log_dir=not args.keep_log_dir)
+  # first, stop the jobs if they are running in the grid
+  if not args.local:
+    stop(args)
+  # then, delete them from the database
+  jm.delete(job_ids=args.job_ids, array_ids=args.array_ids, delete_logs=not args.keep_logs, delete_log_dir=not args.keep_log_dir)
 
 
 def run_job(args):
+  """Starts the wrapper script to execute a job, interpreting the JOB_ID and SGE_TASK_ID keywords that are set by the grid or by us."""
   jm = setup(args)
   job_id = int(os.environ['JOB_ID'])
   array_id = int(os.environ['SGE_TASK_ID']) if os.environ['SGE_TASK_ID'] != 'undefined' else None
@@ -232,7 +193,7 @@ class AliasedSubParsersAction(argparse._SubParsersAction):
     return parser
 
 
-def main():
+def main(command_line_options = None):
 
   from ..config import __version__
 
@@ -253,50 +214,58 @@ def main():
         help = 'Uses the local job manager instead of the SGE one.')
   cmdparser = parser.add_subparsers(title='commands', help='commands accepted by %(prog)s')
 
-  # subcommand 'list'
-  lsparser = cmdparser.add_parser('list', aliases=['ls'],
-      help='lists jobs stored in the database')
-  lsparser.add_argument('db', metavar='DATABASE', help='replace the default database by one provided by you; this option is only required if you are running outside the directory where you originally submitted the jobs from or if you have altered manually the location of the JobManager database', nargs='?')
-  lsparser.set_defaults(func=ls)
-
   # subcommand 'submit'
-  subparser = cmdparser.add_parser('submit', aliases=['sub'],
+  submit_parser = cmdparser.add_parser('submit', aliases=['sub'],
       help='submits self-contained jobs to the SGE queue and logs them in a private database')
-  subparser.add_argument('-d', '--db', '--database', metavar='DATABASE', help='replace the default database to be used by one provided by you; this option is only required if you are running outside the directory where you originally submitted the jobs from or if you have altered manually the location of the JobManager database')
-  subparser.add_argument('-q', '--queue', metavar='QNAME',
+  submit_parser.add_argument('-d', '--db', '--database', metavar='DATABASE', help='replace the default database to be used by one provided by you; this option is only required if you are running outside the directory where you originally submitted the jobs from or if you have altered manually the location of the JobManager database')
+  submit_parser.add_argument('-q', '--queue', metavar='QNAME',
       dest='qname', default='all.q', help='the name of the SGE queue to submit the job to (defaults to "%(default)s")')
-  #this is ON by default as it helps job management
-  #subparser.add_argument('-c', '--cwd', default=False, action='store_true',
-  #    dest='cwd', help='Makes SGE switch to the current working directory before executing the job')
-  subparser.add_argument('-m', '--memory', dest='memory', help='Sets both the h_vmem **and** the mem_free parameters when submitting the job to the specified value (e.g. 8G to set the memory requirements to 8 gigabytes)')
-  subparser.add_argument('-n', '--name', dest='name', help='Sets the jobname')
-  subparser.add_argument('-x', '--dependencies', type=int,
+  submit_parser.add_argument('-m', '--memory', dest='memory', help='Sets both the h_vmem **and** the mem_free parameters when submitting the job to the specified value (e.g. 8G to set the memory requirements to 8 gigabytes)')
+  submit_parser.add_argument('-n', '--name', dest='name', help='Sets the jobname')
+  submit_parser.add_argument('-x', '--dependencies', type=int,
       default=[], metavar='ID', nargs='*', help='set job dependencies by giving this option an a list of job identifiers separated by spaces')
-  subparser.add_argument('-l', '--log-dir', metavar='DIR', help='Sets the log directory. By default, "logs" is selected. If the jobs are executed locally, by default the result is written to console.')
-  subparser.add_argument('-s', '--environment', '--env', metavar='KEY=VALUE',
+  submit_parser.add_argument('-l', '--log-dir', metavar='DIR', help='Sets the log directory. By default, "logs" is selected. If the jobs are executed locally, by default the result is written to console.')
+  submit_parser.add_argument('-s', '--environment', '--env', metavar='KEY=VALUE',
       dest='env', nargs='*', default=[],
       help='Passes specific environment variables to the job')
-  subparser.add_argument('-t', '--array', '--parametric', metavar='[start:]stop[-step]',
+  submit_parser.add_argument('-t', '--array', '--parametric', metavar='[start:]stop[-step]',
       dest='array', help='Creates a parametric (array) job. You must specify the stop value, but start (default=1) and step (default=1) can be specified as well.')
-  subparser.add_argument('-p', '--py', '--python', dest='python', default=False,
+  submit_parser.add_argument('-p', '--py', '--python', dest='python', default=False,
       action='store_true', help='Wrap execution of your command using the current python interpreter')
-  subparser.add_argument('-z', '--dry-run',
+  submit_parser.add_argument('-z', '--dry-run',
       action='store_true', help='Do not really submit anything, just print out what would submit in this case')
-  subparser.add_argument('-I', '--io-big', dest='io_big', default=False,
+  submit_parser.add_argument('-I', '--io-big', dest='io_big', default=False,
       action='store_true', help='Sets "io_big" on the submitted jobs so it limits the machines in which the job is submitted to those that can do high-throughput')
-  subparser.add_argument('job', metavar='command', nargs=argparse.REMAINDER)
-  subparser.set_defaults(func=submit)
+  submit_parser.add_argument('job', metavar='command', nargs=argparse.REMAINDER)
+  submit_parser.set_defaults(func=submit)
+
+  # re-submit parser
+  resubmit_parser = cmdparser.add_parser('resubmit', aliases=['re'],
+      help='Re-submits a list of jobs')
+  resubmit_parser.add_argument('-d', '--db', '--database', metavar='DATABASE', help='replace the default database to be used by one provided by you; this option is only required if you are running outside the directory where you originally submitted the jobs from or if you have altered manually the location of the JobManager database')
+  resubmit_parser.add_argument('-j', '--job-ids', metavar='ID', nargs='*', type=int, help='List only the jobs with the given ids (by default, all jobs are listed)')
+  resubmit_parser.add_argument('-f', '--failed-only', action='store_true', help='Re-submit only jobs that have failed')
+  resubmit_parser.add_argument('-a', '--running-jobs', action='store_true', help='Re-submit even jobs that are running or waiting')
+  resubmit_parser.set_defaults(func=resubmit)
+
+  # stop parser
+  stop_parser = cmdparser.add_parser('stop', help='Stops the execution of jobs in the grid')
+  stop_parser.add_argument('-d', '--db', '--database', metavar='DATABASE', help='replace the default database to be used by one provided by you; this option is only required if you are running outside the directory where you originally submitted the jobs from or if you have altered manually the location of the JobManager database')
+  stop_parser.add_argument('-j', '--job-ids', metavar='ID', nargs='*', type=int, help='Stop only the jobs with the given ids (by default, all jobs are stopped)')
+  stop_parser.set_defaults(func=stop)
 
-  execute_parser = cmdparser.add_parser('execute', aliases=['exe', 'x'],
-      help='Executes the registered jobs on the local machine; only valid in combination with the \'--local\' option.')
-  execute_parser.add_argument('db', metavar='DATABASE', help='replace the default database to be executed by one provided by you', nargs='?')
-  execute_parser.add_argument('-p', '--parallel', type=int, default=1, help='Select the number of parallel jobs that you want to execute locally')
-  execute_parser.add_argument('-j', '--job-ids', metavar='ID', nargs='*', type=int, help='Execute only the jobs with the given ids (by default, all unfinished jobs are executed)')
-  execute_parser.set_defaults(func=execute)
+  # subcommand 'list'
+  list_parser = cmdparser.add_parser('list', aliases=['ls'],
+      help='lists jobs stored in the database')
+  list_parser.add_argument('-d', '--db', metavar='DATABASE', help='replace the default database by one provided by you; this option is only required if you are running outside the directory where you originally submitted the jobs from or if you have altered manually the location of the JobManager database', nargs='?')
+  list_parser.add_argument('-j', '--job-ids', metavar='ID', nargs='*', type=int, help='List only the jobs with the given ids (by default, all jobs are listed)')
+  list_parser.add_argument('-a', '--print-array-jobs', action='store_true', help='Report only the jobs with the given array ids. If specified, a single job-id must be given as well.')
+  list_parser.add_argument('-x', '--print-dependencies', action='store_true', help='Print the dependencies of the jobs as well.')
+  list_parser.set_defaults(func=list)
 
   report_parser = cmdparser.add_parser('report', aliases=['ref', 'r'],
       help='Iterates through the result and error log files and prints out the logs')
-  report_parser.add_argument('db', metavar='DATABASE', help='replace the default database to be reported by one provided by you', nargs='?')
+  report_parser.add_argument('-d', '--db', metavar='DATABASE', help='replace the default database to be reported by one provided by you', nargs='?')
   report_parser.add_argument('-e', '--errors-only', action='store_true', help='Only report the error logs (by default, both logs are reported).')
   report_parser.add_argument('-o', '--output-only', action='store_true', help='Only report the output logs  (by default, both logs are reported).')
   report_parser.add_argument('-u', '--unfinished-also', action='store_true', help='Report also the unfinished jobs.')
@@ -307,22 +276,35 @@ def main():
   # subcommand 'delete'
   delete_parser = cmdparser.add_parser('delete', aliases=['del', 'rm', 'remove'],
       help='removes jobs from the database; if jobs are running or are still scheduled in SGE, the jobs are also removed from the SGE queue')
-  delete_parser.add_argument('db', metavar='DATABASE', help='replace the default database to be reported by one provided by you', nargs='?')
+  delete_parser.add_argument('-d', '--db', metavar='DATABASE', help='replace the default database to be reported by one provided by you', nargs='?')
   delete_parser.add_argument('-j', '--job-ids', metavar='ID', nargs='*', type=int, help='Delete only the jobs with the given ids (by default, all jobs are deleted)')
   delete_parser.add_argument('-a', '--array-ids', metavar='ID', nargs='*', type=int, help='Delete only the jobs with the given array ids. If specified, a single job-id must be given as well.')
   delete_parser.add_argument('-r', '--keep-logs', action='store_true', help='If set, the log files will NOT be removed.')
   delete_parser.add_argument('-R', '--keep-log-dir', action='store_true', help='When removing the logs, keep the log directory.')
   delete_parser.set_defaults(func=delete)
 
+  # subcommand 'execute'
+  execute_parser = cmdparser.add_parser('execute', aliases=['exe', 'x'],
+      help='Executes the registered jobs on the local machine; only valid in combination with the \'--local\' option.')
+  execute_parser.add_argument('-d', '--db', metavar='DATABASE', help='replace the default database to be executed by one provided by you', nargs='?')
+  execute_parser.add_argument('-p', '--parallel', type=int, default=1, help='Select the number of parallel jobs that you want to execute locally')
+  execute_parser.add_argument('-j', '--job-ids', metavar='ID', nargs='*', type=int, help='Execute only the jobs with the given ids (by default, all unfinished jobs are executed)')
+  execute_parser.set_defaults(func=execute)
+
 
+  # subcommand 'run.job'; this is not seen on the command line since it is the actual wrapper script
   run_parser = cmdparser.add_parser('run-job', help=argparse.SUPPRESS)
   run_parser.add_argument('db', metavar='DATABASE', nargs='?', help=argparse.SUPPRESS)
-#  run_parser.add_argument('--job-id', required = True, type=int, help=argparse.SUPPRESS)
-#  run_parser.add_argument('--array-id', type=int, help=argparse.SUPPRESS)
   run_parser.set_defaults(func=run_job)
 
-  args = parser.parse_args()
+
+  if command_line_options:
+    args = parser.parse_args(command_line_options[1:])
+    args.wrapper_script = command_line_options[0]
+  else:
+    args = parser.parse_args()
+    args.wrapper_script = sys.argv[0]
 
   args.func(args)
 
-  sys.exit(0)
+  return 0
diff --git a/gridtk/sge.py b/gridtk/sge.py
index 15403365544a69ba69ba160c5d82b4c5987c1c2e..41da815c4cc2fb21f121e5e20a6e5ee6b7d5bb9e 100644
--- a/gridtk/sge.py
+++ b/gridtk/sge.py
@@ -9,14 +9,14 @@
 from .manager import JobManager
 from .setshell import environ
 from .models import add_job
-from .tools import qsub, qstat, make_shell
+from .tools import qsub, qstat, qdel, make_shell
 
 import os, sys
 
 class JobManagerSGE(JobManager):
   """The JobManager will submit and control the status of submitted jobs"""
 
-  def __init__(self, database='submitted.sql3', context='grid'):
+  def __init__(self, database='submitted.sql3', context='grid', wrapper_script = './bin/jman'):
     """Initializes this object with a state file and a method for qsub'bing.
 
     Keyword parameters:
@@ -32,33 +32,36 @@ class JobManagerSGE(JobManager):
     """
 
     self.context = environ(context)
-    JobManager.__init__(self, database)
+    JobManager.__init__(self, database, wrapper_script)
 
 
-  def submit(self, command_line, name = None, array = None, dependencies = [], log_dir = "logs", **kwargs):
-    """Submits a job that will be executed in the grid."""
-    # add job to database
-    self.lock()
-    job = add_job(self.session, command_line, name, dependencies, array, log_dir=log_dir, context=self.context, **kwargs)
-
+  def _submit_to_grid(self, job, name, array, dependencies, log_dir, **kwargs):
     # ... what we will actually submit to the grid is a wrapper script that will call the desired command...
     # get the name of the file that was called originally
-    jman = os.path.realpath(sys.argv[0])
+    jman = self.wrapper_script
     python = jman.replace('jman', 'python')
     # generate call to the wrapper script
-    command = make_shell(python, [jman, 'run-job', self.database])
-    if array:
-      q_array = "%d-%d:%d" % array
+    command = make_shell(python, [jman, 'run-job', self._database])
+    q_array = "%d-%d:%d" % array if array else None
     grid_id = qsub(command, context=self.context, name=name, deps=dependencies, array=q_array, stdout=log_dir, stderr=log_dir, **kwargs)
 
-    # set the grid id of the job
-    job.grid_id = grid_id
-    self.session.commit()
-
     # get the result of qstat
     status = qstat(grid_id, context=self.context)
-    for k,v in status.iteritems():
-      print k, ":", v
+
+    # set the grid id of the job
+    job.id = int(status['job_number'])
+    assert job.id == grid_id
+    job.name = status['job_name']
+
+
+  def submit(self, command_line, name = None, array = None, dependencies = [], log_dir = "logs", **kwargs):
+    """Submits a job that will be executed in the grid."""
+    # add job to database
+    self.lock()
+    job = add_job(self.session, command_line, name, dependencies, array, log_dir=log_dir, context=self.context, **kwargs)
+
+    self._submit_to_grid(job, name, array, dependencies, log_dir, **kwargs)
+    self.session.commit()
 
     # return the job id
     job_id = job.id
@@ -67,97 +70,39 @@ class JobManagerSGE(JobManager):
     return job_id
 
 
-  def resubmit(self, job, stdout='', stderr='', dependencies=[],
-      failed_only=False):
+  def resubmit(self, job_ids = None, failed_only = False, running_jobs = False):
     """Re-submit jobs automatically"""
+    self.lock()
+    # iterate over all jobs
+    jobs = self.get_jobs(job_ids)
+    for job in jobs:
+      # check if this job needs re-submission
+      if running_jobs or job.status == 'finished':
+        if not failed_only or job.result != 0:
+          # resubmit
+          if job.array:
+            # get the array as before
+            array = job.get_array()
+          else:
+            array = None
+          job.status = 'waiting'
+          job.result = None
+          # re-submit job to the grid
+          self._submit_to_grid(job, job.name, array, [dep.id for dep in job.dependent_jobs], job.log_dir)
 
-    if dependencies: job.kwargs['deps'] = dependencies
-    if stdout: job.kwargs['stdout'] = stdout
-    if stderr: job.kwargs['stderr'] = stderr
-
-    if failed_only and job.is_array():
-      retval = []
-      for k in job.check_array():
-        job.kwargs['array'] = (k,k,1)
-        retval.append(self.submit(job.args[0], **job.kwargs))
-      return retval
-
-    else: #either failed_only is not set or submit the job as it was, entirely
-      return self.submit(job.args[0], **job.kwargs)
-
-  def keys(self):
-    return self.job.keys()
-
-  def has_key(self, key):
-    return self.job.has_key(key)
-
-  def __getitem__(self, key):
-    return self.job[key]
-
-  def __delitem__(self, key):
-    if not self.job.has_key(key): raise KeyError, key
-    qdel(key, context=self.context)
-    del self.job[key]
-
-  def __str__(self):
-    """Returns the status of each job still being tracked"""
-
-    return self.table(43)
-
-  def table(self, maxcmdline=0):
-    """Returns the status of each job still being tracked"""
-
-    # configuration
-    fields = ("job-id", "queue", "age", "job-name", "arguments")
-    lengths = (20, 7, 3, 20, 43)
-    marker = '='
-
-    # work
-    fmt = "%%%ds  %%%ds  %%%ds  %%%ds  %%-%ds" % lengths
-    delimiter = fmt % tuple([k*marker for k in lengths])
-    header = [fields[k].center(lengths[k]) for k in range(len(lengths))]
-    header = '  '.join(header)
-
-    return '\n'.join([header] + [delimiter] + \
-        [self[k].row(fmt, maxcmdline) for k in sorted(self.job.keys())])
-
-  def clear(self):
-    """Clear the whole job queue"""
-    for k in self.keys(): del self[k]
-
-  def describe(self, key):
-    """Returns a string explaining a certain job"""
-    return str(self[key])
+    self.session.commit()
+    self.unlock()
 
-  def stdout(self, key, instance=None):
-    """Gets the output of a certain job"""
-    return self[key].stdout(instance)
 
-  def stderr(self, key, instance=None):
-    """Gets the error output of a certain job"""
-    return self[key].stderr(instance)
+  def stop_jobs(self, job_ids):
+    """Stops the jobs in the grid."""
+    self.lock()
 
-  def refresh(self, ignore_warnings=False):
-    """Conducts a qstat over all jobs in the cache. If the job is not present
-    anymore check the logs directory for output and error files. If the size of
-    the error file is different than zero, warn the user.
+    jobs = self.get_jobs(job_ids)
+    for job in jobs:
+      qdel(job.id, context=self.context)
+      if job.status == 'executing':
+        job.status = 'waiting'
 
-    Returns two lists: jobs that work and jobs that require attention
-    (error file does not have size 0).
-    """
-    success = []
-    error = []
-    for k in sorted(self.job.keys()):
-      d = qstat(k, context=self.context)
-      if not d: #job has finished. check
-        status = self.job[k].check(ignore_warnings)
-        if status:
-          success.append(self.job[k])
-          del self.job[k]
-          logger.debug("Job %d completed successfully" % k)
-        else:
-          error.append(self.job[k])
-          del self.job[k]
-          logger.debug("Job %d probably did not complete successfully" % k)
-
-    return success, error
+    self.session.commit()
+    self.unlock()
diff --git a/gridtk/tests/__init__.py b/gridtk/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1e46f49abcbedeb3ff4ef77dc2b9e8315b79dd2
--- /dev/null
+++ b/gridtk/tests/__init__.py
@@ -0,0 +1,120 @@
+import unittest
+import nose
+
+import os
+import pkg_resources
+
+import gridtk
+
+from gridtk.models import Job
+
+class DatabaseTest(unittest.TestCase):
+  # This class defines tests for the gridtk
+
+  def setUp(self):
+    # Create a temporary directory that will contain all outputs
+    import tempfile
+    self.temp_dir = tempfile.mkdtemp(prefix='gridtk_test')
+    self.log_dir = os.path.join(self.temp_dir, 'logs')
+    self.db = os.path.join(self.temp_dir, 'database.sql3')
+
+
+  def tearDown(self):
+    # Clean up the mess that we created
+    import shutil
+    shutil.rmtree(self.temp_dir)
+
+  def test01_local(self):
+    # This test executes all commands of the local grid manager and asserts that everything is fine
+
+    # first, add some commands to the database
+    script_1 = pkg_resources.resource_filename('gridtk.tests', 'test_script.sh')
+    script_2 = pkg_resources.resource_filename('gridtk.tests', 'test_array.sh')
+    from gridtk.script import jman
+    # add a simple script that will write some information to the
+    jman.main(['./bin/jman', '--local', 'submit', '--db', self.db, '--log-dir', self.log_dir, '--name', 'test_1', script_1])
+    jman.main(['./bin/jman', '--local', 'submit', '--db', self.db, '--log-dir', self.log_dir, '--name', 'test_2',  '--dependencies', '1', '--parametric', '1-7:2', script_2])
+
+    # check that the database was created successfully
+    assert os.path.exists(self.db)
+
+    # test that the list command works (should also work with the "default" grid manager
+    jman.main(['./bin/jman', 'list', '--db', self.db, '--job-ids', '1'])
+    jman.main(['./bin/jman', 'list', '--db', self.db, '--job-ids', '2', '--print-array-jobs', '--print-dependencies'])
+
+    # get insight into the database
+    job_manager = gridtk.local.JobManagerLocal(self.db)
+    session = job_manager.lock()
+    jobs = list(session.query(Job))
+    assert len(jobs) == 2
+    assert jobs[0].id == 1
+    assert jobs[1].id == 2
+    assert len(jobs[1].array) == 4
+
+    # check that the job dependencies are correct
+    waiting = jobs[0].get_jobs_waiting_for_us()
+    assert len(waiting) == 1
+    assert waiting[0].id == 2
+    waited = jobs[1].get_jobs_we_wait_for()
+    assert len(waited) == 1
+    assert waited[0].id == 1
+
+    job_manager.unlock()
+
+    # try to run the job 2 first (should fail since it depends on job 1)
+    nose.tools.assert_raises(RuntimeError, jman.main, ['./bin/jman', '--local', 'execute', '--db', self.db, '--job-id', '2'])
+
+    # execute job 1
+    jman.main(['./bin/jman', '--local', 'execute', '--db', self.db, '--job-id', '1'])
+
+    # check that the output is actually there
+    out_file = os.path.join(self.log_dir, 'test_1.o1')
+    err_file = os.path.join(self.log_dir, 'test_1.e1')
+    assert os.path.isfile(out_file)
+    assert os.path.isfile(err_file)
+    assert open(out_file).read().rstrip() == 'This is a text message to std-out'
+    assert open(err_file).read().rstrip() == 'This is a text message to std-err'
+
+    # check the status and the result of job 1
+    session = job_manager.lock()
+    job = list(session.query(Job).filter(Job.id == 1))[0]
+    assert job.status == 'finished'
+    assert job.result == 255
+    job_manager.unlock()
+
+    # reset the job 1
+    jman.main(['./bin/jman', '--local', 'resubmit', '--db', self.db, '--job-id', '1'])
+    # assert that job 2 still can't run
+    nose.tools.assert_raises(RuntimeError, jman.main, ['./bin/jman', '--local', 'execute', '--db', self.db, '--job-id', '2'])
+
+    # delete job 1 from the database
+    jman.main(['./bin/jman', '--local', 'delete', '--db', self.db, '--job-id', '1'])
+    # check that the clean-up was successful
+    assert not os.path.exists(self.log_dir)
+
+    # now, execute job 2 with 2 parallel jobs (this might not work during the nightlies...)
+    jman.main(['./bin/jman', '--local', 'execute', '--db', self.db, '--job-id', '2', '--parallel', '2'])
+
+    # check that exactly four output and four error files have been created
+    files = os.listdir(self.log_dir)
+    assert len(files) == 8
+    for i in range(1,8,2):
+      assert 'test_2.o2.%d'%i in files
+      assert 'test_2.e2.%d'%i in files
+
+    # test the result of the experiments
+    session = job_manager.lock()
+    job = list(session.query(Job).filter(Job.id == 2))[0]
+    assert job.status == 'finished'
+    assert job.result == 1
+    for i in range(4):
+      assert job.array[i].id == 2*i+1
+      assert job.array[i].result == (0 if i else 1)
+      assert job.array[i].status == 'finished'
+    job_manager.unlock()
+
+    # clean-up
+    jman.main(['./bin/jman', '--local', 'delete', '--db', self.db])
+
+    # check that the db and the log files are gone
+    assert len(os.listdir(self.temp_dir)) == 0
diff --git a/gridtk/tests/test_array.sh b/gridtk/tests/test_array.sh
new file mode 100755
index 0000000000000000000000000000000000000000..08f9cef4acf569fb50f8d9e882c54745b808daa7
--- /dev/null
+++ b/gridtk/tests/test_array.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+echo "The job id is '$JOB_ID' and the task id is '$SGE_TASK_ID'"
+
+if [[ $SGE_TASK_ID == '1' ]]; then
+  exit 1
+else
+  exit 0
+fi
diff --git a/gridtk/tests/test_script.sh b/gridtk/tests/test_script.sh
new file mode 100755
index 0000000000000000000000000000000000000000..90adbeedcf875829ad9bd07c57f1f5db640dd1ee
--- /dev/null
+++ b/gridtk/tests/test_script.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+# We simply write one line to stdout and one line to stderr
+echo "This is a text message to std-out"
+echo "This is a text message to std-err" >&2
+
+# We exit with -1 (should be 255 as the "result")
+exit -1
diff --git a/setup.py b/setup.py
index 8fdf1d0e886eef43453f90d2cc52e4b544bf13b2..b11b68bfea3c3ac8bd72469eebcab35fb90d1d67 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
 
 setup(
     name='gridtk',
-    version='0.3.7',
+    version='1.0.0.a0',
     description='SGE Grid Submission and Monitoring Tools for Idiap',
 
     url='https://github.com/idiap/gridtk',