# we here set the status to executing manually to avoid jobs to be run twice
# e.g., if the loop is executed while the asynchronous job did not start yet
array_job.status='executing'
job.status='executing'
iflen(running_tasks)==parallel_jobs:
break
else:
# execute job
ifjob_idnotinrunning_jobs:
process=self._run_parallel_job(job_id)
ifprocessisnotNone:
# remember that we currently run this job
running_jobs.append((process,job_id))
else:
# remove the job that could not be started
unfinished_jobs.remove(job_id)
ifnotlen(running_jobs)andlen(unfinished_jobs)!=0:
# This is a weird case, which leads to a dead lock.
# It seems that the is a dependence that cannot be fulfilled
# This might happen, when a single job should be executed, but it depends on another job...
raiseRuntimeError("Dead lock detected. There are dependencies in the database that cannot be fulfilled. Did you try to run a job that has unfulfilled dependencies?")
# sleep for some time (default: 0.1 seconds)
time.sleep(self._sleep_time)
ifjob.status=='queued':
# start a new job
process=self._run_parallel_job(job.id)
running_tasks.append((process,job.id))
# we here set the status to executing manually to avoid jobs to be run twice
# e.g., if the loop is executed while the asynchronous job did not start yet
job.status='executing'
iflen(running_tasks)==parallel_jobs:
break
self.session.commit()
self.unlock()
# THIRD: sleep the desired amount of time before re-checking
time.sleep(sleep_time)
# This is the only way to stop: you have to interrupt the scheduler
exceptKeyboardInterrupt:
logger.info("Stopping task scheduler due to user interrupt.")
fortaskinrunning_tasks:
logger.warn("Killing job '%s' that was still running."%self._format_log(task[1],task[2]iflen(task)>2elseNone))