Skip to content
Snippets Groups Projects
Commit 9f92f26e authored by André Anjos's avatar André Anjos :speech_balloon:
Browse files

Merge branch 'zmq_refactoring' into 'master'

ZMQ refactoring

Closes #518 and #517

See merge request !275
parents 7165564b 18cca41c
No related branches found
No related tags found
1 merge request!275ZMQ refactoring
Pipeline #27969 passed
Showing
with 1452 additions and 38 deletions
[flake8]
max-line-length = 80
select = B,C,E,F,W,T4,B9,B950
ignore = E501, W503
...@@ -15,6 +15,7 @@ sphinx/ ...@@ -15,6 +15,7 @@ sphinx/
.mr.developer.cfg .mr.developer.cfg
.coverage .coverage
*.sql3 *.sql3
*.sqlite3
.DS_Store .DS_Store
beat/web/settings/settings.py beat/web/settings/settings.py
src/ src/
......
...@@ -27,7 +27,7 @@ build_linux_36: ...@@ -27,7 +27,7 @@ build_linux_36:
- buildout - buildout
- python -c "from beat.core.test.utils import pull_docker_test_images as f; f()" - python -c "from beat.core.test.utils import pull_docker_test_images as f; f()"
- export COVERAGE_FILE=.coverage.django - export COVERAGE_FILE=.coverage.django
- ./bin/coverage run --source=${CI_PROJECT_NAME} ./bin/django test --settings=beat.web.settings.ci -v 2 - ./bin/coverage run --source=${CI_PROJECT_NAME} ./bin/django test --settings=beat.web.settings.ci -v 2 --noinput
- export BEAT_CMDLINE_TEST_PLATFORM="django://beat.web.settings.ci" - export BEAT_CMDLINE_TEST_PLATFORM="django://beat.web.settings.ci"
- export COVERAGE_FILE=.coverage.cmdline - export COVERAGE_FILE=.coverage.cmdline
- export NOSE_WITH_COVERAGE=1 - export NOSE_WITH_COVERAGE=1
......
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/ambv/black
rev: stable
hooks:
- id: black
language_version: python3.6
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: debug-statements
- id: check-added-large-files
- id: check-docstring-first
- id: flake8
- id: check-yaml
exclude: conda/meta.yaml
- repo: https://github.com/PyCQA/bandit
rev: 'master' # Update me!
hooks:
- id: bandit
exclude: beat/editor/test
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
# Django settings for tests on the CI server # Django settings for tests on the CI server
from .test import * from .test import * # noqa
RUNNING_ON_CI = True RUNNING_ON_CI = True
DATABASES["default"]["OPTIONS"]["timeout"] = 60 # noqa
...@@ -26,48 +26,64 @@ ...@@ -26,48 +26,64 @@
############################################################################### ###############################################################################
# Django settings for tests # Django settings for tests
import os
import platform
import sys
from .settings import * from .settings import * # noqa
TEST_CONFIGURATION = True TEST_CONFIGURATION = True
RUNNING_ON_CI = False RUNNING_ON_CI = False
DEBUG = False DEBUG = False
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG # noqa
ALLOWED_HOSTS = [ ALLOWED_HOSTS = ["testserver"]
'testserver',
]
DATABASES['default']['NAME'] = 'test.sql3' if platform.system() == "Linux":
DATABASES['default']['TEST'] = {'NAME': DATABASES['default']['NAME']} shm_path = "/dev/shm/beatweb" # nosec
DATABASES['default']['OPTIONS']['timeout'] = 30 if not os.path.exists(shm_path):
os.makedirs(shm_path)
import sys database_name = os.path.join(shm_path, "test.sqlite3")
if 'beat.cmdline' in sys.argv: else:
database_name = "test.sqlite3"
DATABASES["default"]["NAME"] = database_name # noqa
DATABASES["default"]["TEST"] = {"NAME": DATABASES["default"]["NAME"]} # noqa
DATABASES["default"]["OPTIONS"]["timeout"] = 30 # noqa
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa
# Timeout used in test when waiting for an update
DB_REFRESH_TIMEOUT = int(os.environ.get("DB_REFRESH_TIMEOUT", 10))
if "beat.cmdline" in sys.argv:
# make it in-memory for cmdline app tests # make it in-memory for cmdline app tests
DATABASES['default']['NAME'] = ':memory:' DATABASES["default"]["NAME"] = ":memory:" # noqa
LOGGING['handlers']['console']['level'] = 'DEBUG' LOGGING["handlers"]["console"]["level"] = "DEBUG" # noqa
LOGGING['loggers']['beat.core']['handlers'] = ['discard'] LOGGING["loggers"]["beat.core"]["handlers"] = ["discard"] # noqa
LOGGING['loggers']['beat.web']['handlers'] = ['discard'] LOGGING["loggers"]["beat.web"]["handlers"] = ["discard"] # noqa
LOGGING['loggers']['beat.web.utils.management.commands']['handlers'] = ['discard'] LOGGING["loggers"]["beat.web.utils.management.commands"]["handlers"] = [ # noqa
"discard"
]
BASE_DIR = os.path.dirname(os.path.abspath(__name__)) BASE_DIR = os.path.dirname(os.path.abspath(__name__))
PREFIX = os.environ.get('BEAT_TEST_PREFIX', os.path.realpath('./test_prefix')) if platform.system() == "Linux":
ALGORITHMS_ROOT = os.path.join(PREFIX, 'algorithms') default_prefix = os.path.join(shm_path, "test_prefix") # nosec
PLOTTERS_ROOT = os.path.join(PREFIX, 'plotters') else:
LIBRARIES_ROOT = os.path.join(PREFIX, 'libraries') default_prefix = os.path.realpath("./test_prefix")
DATABASES_ROOT = os.path.join(PREFIX, 'databases') PREFIX = os.environ.get("BEAT_TEST_PREFIX", default_prefix)
DATAFORMATS_ROOT = os.path.join(PREFIX, 'dataformats') ALGORITHMS_ROOT = os.path.join(PREFIX, "algorithms")
TOOLCHAINS_ROOT = os.path.join(PREFIX, 'toolchains') PLOTTERS_ROOT = os.path.join(PREFIX, "plotters")
EXPERIMENTS_ROOT = os.path.join(PREFIX, 'experiments') LIBRARIES_ROOT = os.path.join(PREFIX, "libraries")
CACHE_ROOT = os.path.join(PREFIX, 'cache') DATABASES_ROOT = os.path.join(PREFIX, "databases")
DATAFORMATS_ROOT = os.path.join(PREFIX, "dataformats")
TOOLCHAINS_ROOT = os.path.join(PREFIX, "toolchains")
EXPERIMENTS_ROOT = os.path.join(PREFIX, "experiments")
CACHE_ROOT = os.path.join(PREFIX, "cache")
LOCAL_SCHEDULER_VERBOSITY = None LOCAL_SCHEDULER_VERBOSITY = None
LOCAL_SCHEDULER_USE_DOCKER = False LOCAL_SCHEDULER_USE_DOCKER = False
# To speed-up tests, don't put this in production # To speed-up tests, don't put this in production
PASSWORD_HASHERS = [ PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
'django.contrib.auth.hashers.MD5PasswordHasher',
]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# encoding: utf-8
###############################################################################
# #
# Copyright (c) 2019 Idiap Research Institute, http://www.idiap.ch/ #
# Contact: beat.support@idiap.ch #
# #
# This file is part of the beat.web module of the BEAT platform. #
# #
# Commercial License Usage #
# Licensees holding valid commercial BEAT licenses may use this file in #
# accordance with the terms contained in a written agreement between you #
# and Idiap. For further information contact tto@idiap.ch #
# #
# Alternatively, this file may be used under the terms of the GNU Affero #
# Public License version 3 as published by the Free Software and appearing #
# in the file LICENSE.AGPL included in the packaging of this file. #
# The BEAT platform is distributed in the hope that it will be useful, but #
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY #
# or FITNESS FOR A PARTICULAR PURPOSE. #
# #
# You should have received a copy of the GNU Affero Public License along #
# with the BEAT platform. If not, see http://www.gnu.org/licenses/. #
# #
###############################################################################
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from beat.web.backend.models import Worker
from beat.core.bcpapi.broker import BeatComputationBroker
from beat.core.utils import setup_logging
logger = logging.getLogger(__name__)
# ----------------------------------------------------------
def onWorkerReady(name):
logger.info("Worker '%s' is ready", name)
try:
worker = Worker.objects.get(name=name)
except Worker.DoesNotExist:
logger.error("No worker named '%s' found in the database", name)
else:
with transaction.atomic():
worker.active = True
worker.info = "Connected to the scheduler"
worker.save()
# ----------------------------------------------------------
def onWorkerGone(name):
logger.info("Worker '%s' is gone", name)
try:
worker = Worker.objects.get(name=name)
except Worker.DoesNotExist:
logger.error("No worker named '%s' found in the database", name)
else:
with transaction.atomic():
worker.active = False
worker.info = "Disconnected from the scheduler"
worker.save()
class Command(BaseCommand):
help = "Start zmq broker"
def add_arguments(self, parser):
parser.add_argument(
"--port",
"-p",
type=int,
dest="port",
default=5555,
help="Port of the broker",
)
def handle(self, *args, **options):
verbosity = int(options["verbosity"])
logger = setup_logging(verbosity, "Broker", __name__)
if verbosity >= 1:
if verbosity == 1:
logger.setLevel(logging.INFO)
elif verbosity >= 2:
logger.setLevel(logging.DEBUG)
logger.info("starting broker")
address = "tcp://*:{}".format(options["port"])
broker = BeatComputationBroker(verbosity >= 2)
broker.set_worker_callbacks(onWorkerReady, onWorkerGone)
broker.bind(address)
broker.mediate()
broker.purge_workers()
logger.info("broker stopped")
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# encoding: utf-8
###############################################################################
# #
# Copyright (c) 2019 Idiap Research Institute, http://www.idiap.ch/ #
# Contact: beat.support@idiap.ch #
# #
# This file is part of the beat.web module of the BEAT platform. #
# #
# Commercial License Usage #
# Licensees holding valid commercial BEAT licenses may use this file in #
# accordance with the terms contained in a written agreement between you #
# and Idiap. For further information contact tto@idiap.ch #
# #
# Alternatively, this file may be used under the terms of the GNU Affero #
# Public License version 3 as published by the Free Software and appearing #
# in the file LICENSE.AGPL included in the packaging of this file. #
# The BEAT platform is distributed in the hope that it will be useful, but #
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY #
# or FITNESS FOR A PARTICULAR PURPOSE. #
# #
# You should have received a copy of the GNU Affero Public License along #
# with the BEAT platform. If not, see http://www.gnu.org/licenses/. #
# #
###############################################################################
import logging
import multiprocessing
import signal
from django.core.management.base import BaseCommand
from django.core.management import call_command
from django.conf import settings
from django import db
from beat.core.utils import find_free_port
logger = logging.getLogger(__name__)
def start_broker(port, verbosity=0):
db.connections.close_all()
call_command("broker", port=port, verbosity=verbosity)
def start_scheduler(broker_address, verbosity=0):
db.connections.close_all()
call_command("scheduler", broker_address=broker_address, verbosity=verbosity)
def start_worker(broker_address, prefix, cache, use_docker, verbosity=0):
call_command(
"worker",
broker_address=broker_address,
prefix=prefix,
cache=cache,
use_docker=use_docker,
verbosity=verbosity,
)
class Command(BaseCommand):
help = "Run a complete local scheduler/broker/worker setup"
def __init__(self):
super(Command, self).__init__()
self.broker = None
self.worker = None
self.scheduler = None
def __signal_handler(self, signum, frame):
self.scheduler.terminate()
self.worker.terminate()
self.broker.terminate()
def add_arguments(self, parser):
parser.add_argument(
"--docker",
"-d",
action="store_true",
dest="use_docker",
default=False,
help="Use docker",
)
def handle(self, *args, **options):
signal.signal(signal.SIGTERM, self.__signal_handler)
signal.signal(signal.SIGINT, self.__signal_handler)
verbosity = options["verbosity"]
port = find_free_port()
broker_address = "tcp://localhost:{}".format(port)
self.broker = multiprocessing.Process(
target=start_broker, args=(port, verbosity)
)
self.worker = multiprocessing.Process(
target=start_worker,
args=(
broker_address,
settings.PREFIX,
settings.CACHE_ROOT,
options["use_docker"],
verbosity,
),
)
self.scheduler = multiprocessing.Process(
target=start_scheduler, args=(broker_address, verbosity)
)
self.broker.start()
self.worker.start()
self.scheduler.start()
self.broker.join()
self.scheduler.join()
self.worker.join()
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# encoding: utf-8
###############################################################################
# #
# Copyright (c) 2019 Idiap Research Institute, http://www.idiap.ch/ #
# Contact: beat.support@idiap.ch #
# #
# This file is part of the beat.web module of the BEAT platform. #
# #
# Commercial License Usage #
# Licensees holding valid commercial BEAT licenses may use this file in #
# accordance with the terms contained in a written agreement between you #
# and Idiap. For further information contact tto@idiap.ch #
# #
# Alternatively, this file may be used under the terms of the GNU Affero #
# Public License version 3 as published by the Free Software and appearing #
# in the file LICENSE.AGPL included in the packaging of this file. #
# The BEAT platform is distributed in the hope that it will be useful, but #
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY #
# or FITNESS FOR A PARTICULAR PURPOSE. #
# #
# You should have received a copy of the GNU Affero Public License along #
# with the BEAT platform. If not, see http://www.gnu.org/licenses/. #
# #
###############################################################################
import logging
import signal
import json
import sys
from django.core.management.base import BaseCommand
from django.conf import settings
from beat.core.bcpapi import BCP
from beat.core.bcpapi.client import BeatComputationClient
from ....backend.models import JobSplit
from ....backend.helpers import split_new_jobs
from ....backend.helpers import process_newly_cancelled_experiments
from ....backend.helpers import assign_splits_to_workers
from ....backend.helpers import get_configuration_for_split
from ....backend.helpers import on_split_started
from ....backend.helpers import on_split_done
from ....backend.helpers import on_split_fail
from ....backend.helpers import on_split_cancelled
logger = logging.getLogger(__name__)
def get_split(split_id, status):
try:
return JobSplit.objects.get(id=split_id)
except JobSplit.DoesNotExist:
logger.warning(
"Received message '{}' for unknown job split #{}".format(status, split_id),
status,
split_id,
)
return None
def remove_split_id_from(split_list, split_id):
try:
split_list.remove(split_list.index(split_id))
except ValueError:
pass
class Command(BaseCommand):
help = "Start scheduler"
def __init__(self):
super(Command, self).__init__()
self.continue_ = True
def __signal_handler(self, signum, frame):
self.continue_ = False
def add_arguments(self, parser):
parser.add_argument(
"--broker-address",
"-b",
type=str,
dest="broker_address",
default="tcp://localhost:5555",
help="Address of the broker",
)
parser.add_argument(
"--interval",
"-i",
type=int,
dest="interval",
default=settings.SCHEDULING_INTERVAL,
help="Polling interval",
)
def handle(self, *args, **options):
signal.signal(signal.SIGTERM, self.__signal_handler)
signal.signal(signal.SIGINT, self.__signal_handler)
client = BeatComputationClient(
options["broker_address"], options["verbosity"] >= 2
)
# client.timeout = 100
running_job_splits = []
cancelling_jobs = []
logger.info("starting scheduler")
while self.continue_:
splits_to_cancel = []
# Process all the incoming messages
reply = client.recv()
if reply is not None:
logger.info("Received: {}".format(reply))
worker_id, status = reply[:2]
split_id = int(reply[2])
if status == BCP.BCPP_JOB_RECEIVED:
logger.info(
"Job split {} was received by worker {}".format(
split_id, worker_id
)
)
elif status == BCP.BCPP_JOB_STARTED:
logger.info(
"Job split {} was was started by worker {}".format(
split_id, worker_id
)
)
split = get_split(split_id, status)
if split is not None:
on_split_started(split)
elif status == BCP.BCPP_JOB_DONE:
output = reply[3]
if sys.version_info < (3, 6):
output = output.decode("utf-8")
logger.info(
"Job split {} was was done by worker {}".format(
split_id, worker_id
)
)
split = get_split(split_id, status)
if split is not None:
on_split_done(split, json.loads(output))
remove_split_id_from(running_job_splits, split_id)
elif status == BCP.BCPP_JOB_CANCELLED:
split = get_split(split_id, status)
if split is not None:
logger.info(
"Job split #%d (%s %d/%d @ %s) on '%s' is CANCELLED",
split.id,
split.job.block.name,
split.split_index,
split.job.splits.count(),
split.job.block.experiment.fullname(),
split.worker.name,
)
on_split_cancelled(split)
remove_split_id_from(running_job_splits, split_id)
elif status == BCP.BCPP_JOB_ERROR:
message = reply[3]
if sys.version_info < (3, 6):
message = message.decode("utf-8")
logger.info(
"Job split {} processed by worker {} failed:\n{}".format(
split_id, worker_id, message
)
)
split = get_split(split_id, status)
if split is not None:
try:
error = json.loads(message)
except json.decoder.JSONDecodeError:
error = message
splits_to_cancel.extend(on_split_fail(split, error))
remove_split_id_from(running_job_splits, split_id)
elif status == BCP.BCPP_ERROR:
message = reply[3]
logger.info(
"Worker {} error for job split {}:\n{}".format(
worker_id, split_id, message
)
)
if split_id in running_job_splits:
split = get_split(split_id, status)
if split is not None:
splits_to_cancel.extend(on_split_fail(split, message))
remove_split_id_from(running_job_splits, split_id)
# Effectively cancel newly-cancelled experiments
splits_to_cancel.extend(process_newly_cancelled_experiments())
# Cancel the necessary jobs (if any)
for split_to_cancel in splits_to_cancel:
if split_to_cancel.id in running_job_splits:
logger.info(
"Cancelling job split #%d (%s %d/%d @ %s) on '%s'",
split_to_cancel.id,
split_to_cancel.job.block.name,
split_to_cancel.split_index,
split_to_cancel.job.splits.count(),
split_to_cancel.job.block.experiment.fullname(),
split_to_cancel.worker.name,
)
request = [BCP.BCPE_CANCEL, str(split_to_cancel.id).encode("utf-8")]
client.send(split_to_cancel.worker.name.encode("utf-8"), request)
remove_split_id_from(running_job_splits, split_to_cancel.id)
cancelling_jobs.append(split_to_cancel.id)
if not self.continue_:
break
# Start new jobs
split_new_jobs()
assigned_splits = assign_splits_to_workers()
for split in assigned_splits:
running_job_splits.append(split.id)
configuration = get_configuration_for_split(split)
logger.info(
"Sending job split #%d (%s %d/%d @ %s) on '%s'",
split.id,
split.job.block.name,
split.split_index,
split.job.splits.count(),
split.job.block.experiment.fullname(),
split.worker.name,
)
request = [
BCP.BCPE_EXECUTE,
str(split.id).encode("utf-8"),
json.dumps(configuration).encode("utf-8"),
]
client.send(split.worker.name.encode("utf-8"), request)
logger.info("scheduler stopped")
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# encoding: utf-8
###############################################################################
# #
# Copyright (c) 2019 Idiap Research Institute, http://www.idiap.ch/ #
# Contact: beat.support@idiap.ch #
# #
# This file is part of the beat.web module of the BEAT platform. #
# #
# Commercial License Usage #
# Licensees holding valid commercial BEAT licenses may use this file in #
# accordance with the terms contained in a written agreement between you #
# and Idiap. For further information contact tto@idiap.ch #
# #
# Alternatively, this file may be used under the terms of the GNU Affero #
# Public License version 3 as published by the Free Software and appearing #
# in the file LICENSE.AGPL included in the packaging of this file. #
# The BEAT platform is distributed in the hope that it will be useful, but #
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY #
# or FITNESS FOR A PARTICULAR PURPOSE. #
# #
# You should have received a copy of the GNU Affero Public License along #
# with the BEAT platform. If not, see http://www.gnu.org/licenses/. #
# #
###############################################################################
import socket
from django.core.management.base import BaseCommand
from django.conf import settings
from beat.core.bcp import worker
from beat.core.utils import setup_logging
class Command(BaseCommand):
help = "Start zmq worker"
def add_arguments(self, parser):
parser.add_argument(
"--name",
"-n",
type=str,
dest="service_name",
default=socket.gethostname(),
help="Service name",
)
parser.add_argument(
"--broker",
"-b",
type=str,
dest="broker_address",
default="tcp://localhost:5555",
help="Broker address",
)
parser.add_argument(
"--docker",
"-d",
action="store_true",
dest="use_docker",
default=False,
help="Use docker",
)
parser.add_argument(
"--prefix", "-p", dest="prefix", default=settings.PREFIX, help="Prefix path"
)
parser.add_argument(
"--cache",
"-c",
type=str,
dest="cache",
default=settings.CACHE_ROOT,
help="Cache path",
)
parser.add_argument(
"--network-name",
dest="network_name",
type=str,
default=None,
help="Docker network name",
)
parser.add_argument(
"--port-range",
dest="port_range",
type=str,
default=None,
help="Docker port range",
)
parser.add_argument(
"--docker-images-cache",
dest="docker_images_cache",
type=str,
default=None,
help="Docker image cache",
)
def handle(self, *args, **options):
verbosity = options["verbosity"]
logger = setup_logging(verbosity, "Worker", __name__)
logger.info("starting worker")
argv = [
"--name=" + options["service_name"],
"--prefix=" + options["prefix"],
"--cache=" + options["cache"],
]
argv.extend(["-v" for i in range(options["verbosity"])])
if options["use_docker"]:
argv.append("--docker")
network_name = options.get("network_name")
if network_name:
argv.append("--docker-network=" + network_name)
port_range = options.get("port_range")
if port_range:
argv.append("--port-range=" + port_range)
docker_images_cache = options.get("docker_images_cache")
if docker_images_cache:
argv.append("--docker-images-cache=" + docker_images_cache)
argv.append(options["broker_address"])
status = worker.main(argv)
logger.info("worker stopped")
return status
This diff is collapsed.
[buildout] [buildout]
extends = common.cfg extends = common.cfg
parts += docker
develop = . develop = .
[sources] [sources]
...@@ -11,3 +12,9 @@ scripts += protractor webdriver-manager ...@@ -11,3 +12,9 @@ scripts += protractor webdriver-manager
[bower] [bower]
base-directory = beat/web base-directory = beat/web
[docker]
recipe = collective.recipe.cmd
cmds = ./bin/python -c 'from beat.core.test.utils import pull_docker_test_images as f; f()'
on_install = true
on_update = true
...@@ -7,6 +7,15 @@ eggs = beat.web ...@@ -7,6 +7,15 @@ eggs = beat.web
beat.cmdline beat.cmdline
beat.core beat.core
beat.backend.python beat.backend.python
versions = versions
[versions]
django = >=1.11,<2.0
django-rest-swagger = >2.1
django-guardian = >=1.3
djangorestframework = >3.7
django-activity-stream = >= 0.6.5
django-jsonfield = >= 1.0.1
[scripts] [scripts]
recipe = bob.buildout:scripts recipe = bob.buildout:scripts
......
...@@ -10,11 +10,10 @@ dependencies: ...@@ -10,11 +10,10 @@ dependencies:
- beat-devel=2019.03.07 - beat-devel=2019.03.07
# requirements.txt, they are indirectly pinned through the above # requirements.txt, they are indirectly pinned through the above
- beat.core - beat.core=1.8
- docopt - docopt
- docutils - docutils
- jinja2 - jinja2
- graphviz
- matplotlib - matplotlib
- psutil - psutil
- psycopg2 - psycopg2
......
[buildout]
extends = common.cfg
always-checkout = force
versions = versions
develop = src/beat.backend.python
src/beat.core
src/beat.cmdline
.
[scripts]
eggs = ${buildout:eggs}
interpreter = python
[sources]
beat.core = git git@gitlab.idiap.ch:beat/beat.core egg=false
beat.cmdline = git git@gitlab.idiap.ch:beat/beat.cmdline egg=false
beat.backend.python = git git@gitlab.idiap.ch:beat/beat.backend.python egg=false
beat.examples = git git@gitlab.idiap.ch:beat/beat.examples egg=false
[bower]
base-directory = ./beat/web/
...@@ -440,13 +440,26 @@ Here is how to do it. ...@@ -440,13 +440,26 @@ Here is how to do it.
$ ./bin/django runserver $ ./bin/django runserver
2. Start the full scheduling setup::
$ ./bin/django full_scheduling
This will start all elements of the scheduling/working process. Docker can
be used for the worker node passing the ``--docker`` option.
Each element composing the scheduling can also be started separately:
1. Start a the broker node::
$ ./bin/django broker -v 2
2. Start a single scheduling node:: 2. Start a single scheduling node::
$ ./bin/scheduler -vv $ ./bin/django scheduler -v 2
3. Start a worker for your current node:: 3. Start a worker for your current node::
$ ./bin/worker -vv $ ./bin/django worker -v 2
By default, the applications are configured to figure out paths and By default, the applications are configured to figure out paths and
configuration options by themselves. You can override some defaults via the configuration options by themselves. You can override some defaults via the
...@@ -457,7 +470,7 @@ command line. Just check the output of each of those commands running the ...@@ -457,7 +470,7 @@ command line. Just check the output of each of those commands running the
Mixing and matching Mixing and matching
=================== ===================
You can mix and match any of the above techniques to run a 3-node system You can mix and match any of the above techniques to run a 4-node system
(all-in-one or discrete) to build a test system to suite to your needs. For (all-in-one or discrete) to build a test system to suite to your needs. For
example, it is possible to launch the scheduling activities using the web example, it is possible to launch the scheduling activities using the web
server and the page reload trick while launching the worker process separately server and the page reload trick while launching the worker process separately
......
...@@ -6,9 +6,9 @@ eggs-directory = .buildout/eggs ...@@ -6,9 +6,9 @@ eggs-directory = .buildout/eggs
download-cache = .buildout/download-cache download-cache = .buildout/download-cache
abi-tag-eggs = true abi-tag-eggs = true
versions = versions versions = versions
develop = beat.backend.python develop = src/beat.backend.python
beat.core src/beat.core
beat.cmdline src/beat.cmdline
. .
[versions] [versions]
......
1.4.2b0 1.5.0b0
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment