...
 
Commits (160)
[flake8]
max-line-length = 80
select = B,C,E,F,W,T4,B9,B950
ignore = E501, W503
ignore = E501, W503, E203
......@@ -5,7 +5,6 @@ repos:
rev: stable
hooks:
- id: black
language_version: python3.6
exclude: beat/core/test/prefix/algorithms/errors/syntax_error/1.py|beat/core/test/prefix/databases/invalid/1.py
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.0.0
......
......@@ -31,9 +31,9 @@
.. OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
===========================================================
Authors of the Biometrics Evaluation and Testing Platform
===========================================================
==============================
Authors of the BEAT Platform
==============================
Andre Anjos <andre.anjos@idiap.ch>
Flavio Tarsetti <flavio.tarsetti@idiap.ch>
......
......@@ -31,18 +31,14 @@
.. OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.. image:: https://img.shields.io/badge/docs-stable-yellow.svg
:target: https://www.idiap.ch/software/beat/docs/beat/beat.core/stable/index.html
.. image:: https://img.shields.io/badge/docs-latest-orange.svg
.. image:: https://img.shields.io/badge/docs-available-orange.svg
:target: https://www.idiap.ch/software/beat/docs/beat/beat.core/master/index.html
.. image:: https://gitlab.idiap.ch/beat/beat.core/badges/master/build.svg
.. image:: https://gitlab.idiap.ch/beat/beat.core/badges/master/pipeline.svg
:target: https://gitlab.idiap.ch/beat/beat.core/commits/master
.. image:: https://gitlab.idiap.ch/beat/beat.core/badges/master/coverage.svg
:target: https://gitlab.idiap.ch/beat/beat.core/commits/master
.. image:: https://img.shields.io/badge/gitlab-project-0000c0.svg
:target: https://gitlab.idiap.ch/beat/beat.core
.. image:: https://img.shields.io/pypi/v/beat.core.svg
:target: https://pypi.python.org/pypi/beat.core
==========================
......
......@@ -182,6 +182,8 @@ class Algorithm(BackendAlgorithm):
"""
dataformat_klass = dataformat.DataFormat
def __init__(self, prefix, data, dataformat_cache=None, library_cache=None):
super(Algorithm, self).__init__(prefix, data, dataformat_cache, library_cache)
......@@ -303,89 +305,52 @@ class Algorithm(BackendAlgorithm):
"declaration: %s" % (self.name, ", ".join(all_output_names))
)
def _validate_format(self, type_name, group_name, entry_name, dataformat):
if dataformat.errors:
self.errors.append(
"found error validating data format `%s' "
"for %s `%s' on algorithm `%s': %s"
% (
type_name,
group_name,
entry_name,
self.name,
"\n".join(dataformat.errors),
)
)
def _validate_dataformats(self, group, group_name, dataformat_cache):
for name, entry in group[group_name].items():
type_name = entry["type"]
thisformat = self._update_dataformat_cache(type_name, dataformat_cache)
self._validate_format(type_name, group_name, name, thisformat)
def _validate_required_dataformats(self, dataformat_cache):
"""Makes sure we can load all requested formats
"""
for group in self.groups:
for name, input in group["inputs"].items():
if input["type"] in self.dataformats:
continue
if dataformat_cache and input["type"] in dataformat_cache: # reuse
thisformat = dataformat_cache[input["type"]]
else: # load it
thisformat = dataformat.DataFormat(self.prefix, input["type"])
if dataformat_cache is not None: # update it
dataformat_cache[input["type"]] = thisformat
self.dataformats[input["type"]] = thisformat
if thisformat.errors:
self.errors.append(
"found error validating data format `%s' "
"for input `%s' on algorithm `%s': %s"
% (input["type"], name, self.name, "\n".join(thisformat.errors))
)
if "outputs" not in group:
continue
for name, output in group["outputs"].items():
if output["type"] in self.dataformats:
continue
for name, input_ in group["inputs"].items():
self._validate_dataformats(group, "inputs", dataformat_cache)
if dataformat_cache and output["type"] in dataformat_cache: # reuse
thisformat = dataformat_cache[output["type"]]
else: # load it
thisformat = dataformat.DataFormat(self.prefix, output["type"])
if dataformat_cache is not None: # update it
dataformat_cache[output["type"]] = thisformat
if "outputs" in group:
self._validate_dataformats(group, "outputs", dataformat_cache)
self.dataformats[output["type"]] = thisformat
if thisformat.errors:
self.errors.append(
"found error validating data format `%s' "
"for output `%s' on algorithm `%s': %s"
% (
output["type"],
name,
self.name,
"\n".join(thisformat.errors),
)
)
if "loop" in group:
self._validate_dataformats(group, "loop", dataformat_cache)
if self.results:
for name, result in self.results.items():
if result["type"].find("/") != -1:
if result["type"] in self.dataformats:
continue
if dataformat_cache and result["type"] in dataformat_cache: # reuse
thisformat = dataformat_cache[result["type"]]
else:
thisformat = dataformat.DataFormat(self.prefix, result["type"])
if dataformat_cache is not None: # update it
dataformat_cache[result["type"]] = thisformat
self.dataformats[result["type"]] = thisformat
if thisformat.errors:
self.errors.append(
"found error validating data format `%s' "
"for result `%s' on algorithm `%s': %s"
% (
result["type"],
name,
self.name,
"\n".join(thisformat.errors),
)
)
result_type = result["type"]
# results can only contain base types and plots therefore, only
# process plots
if result_type.find("/") != -1:
thisformat = self._update_dataformat_cache(
result_type, dataformat_cache
)
self._validate_format(result_type, "result", name, thisformat)
def _convert_parameter_types(self):
"""Converts types to numpy equivalents, checks defaults, ranges and
......
......@@ -41,22 +41,24 @@ Based on the Majordomo Protocol worker example of the ZMQ Guide.
Usage:
%(prog)s [-v ... | --verbose ...] [ --name=<name>] [--prefix=<path>]
[--cache=<path>] [--docker] [--docker-network=<name>]
[--port-range=<range>] <broker_address>
[--port-range=<range>] [--cache-mount-point=<cache_mount_point>]
<broker_address>
%(prog)s (--help | -h)
%(prog)s (--version | -V)
Options:
-h, --help Show this screen
-V, --version Show version
-v, --verbose Increases the output verbosity level
-n <name>, --name=<name> The unique name of this worker on the database.
This is typically the assigned hostname of the node,
but not necessarily [default: %(hostname)s]
-p, --prefix=<path> Comma-separated list of the prefix(es) of your local data [default: .]
-c, --cache=<path> Cache prefix, otherwise defaults to '<prefix>/cache'
--docker-network=<name> Name of the docker network to use
--port-range=<range> Range of port usable for communication with containers
-h, --help Show this screen
-V, --version Show version
-v, --verbose Increases the output verbosity level
-n <name>, --name=<name> The unique name of this worker on the database.
This is typically the assigned hostname of the node,
but not necessarily [default: %(hostname)s]
-p, --prefix=<path> Comma-separated list of the prefix(es) of your local data [default: .]
-c, --cache=<path> Cache prefix, otherwise defaults to '<prefix>/cache'
--docker-network=<name> Name of the docker network to use
--port-range=<range> Range of port usable for communication with containers
--cache-mount-point=<cache_mount_point> NFS mount point to use for cache setup
"""
......@@ -107,6 +109,7 @@ def run(
docker_network_name=None,
docker_port_range=None,
docker_images_cache=None,
docker_cache_mount_point=None,
):
"""Start the worker
......@@ -202,6 +205,8 @@ def run(
data["network_name"] = docker_network_name
if docker_port_range:
data["port_range"] = docker_port_range
if docker_cache_mount_point:
data["cache_mount_point"] = docker_cache_mount_point
# Start the execution
logger.info("Running '%s' with job id #%s", data["algorithm"], job_id)
......@@ -248,6 +253,7 @@ def run(
logger.info("The scheduler shut down, we will wait for it")
worker.destroy()
worker.send_to_broker(BCP.BCPW_DISCONNECT)
worker.destroy()
# Cleanup
for execution_process in execution_processes:
......@@ -301,6 +307,8 @@ def main(argv=None):
docker_images_cache = None
docker_network_name = None
docker_port_range = None
docker_cache_mount_point = None
if args["--docker"]:
docker_images_cache = os.path.join(
tempfile.gettempdir(), "beat-docker-images.json"
......@@ -319,6 +327,14 @@ def main(argv=None):
return 1
logger.info("Using port range %s", docker_port_range)
docker_cache_mount_point = args.get("--cache-mount-point", None)
if docker_cache_mount_point:
if not docker_cache_mount_point.startswith("nfs://"):
raise RuntimeError(
"Invalid nfs mount point: {}".format(docker_cache_mount_point)
)
logger.info("Using volume cache mount point %s", docker_cache_mount_point)
return run(
broker_address,
service_name=args.get("--name"),
......@@ -329,6 +345,7 @@ def main(argv=None):
docker_network_name=docker_network_name,
docker_port_range=docker_port_range,
docker_images_cache=docker_images_cache,
docker_cache_mount_point=docker_cache_mount_point,
)
......
......@@ -95,6 +95,7 @@ class BeatComputationBroker(object):
def __init__(self, verbose=False):
"""Initialize broker state."""
self.verbose = verbose
self.continue_ = True
self.services = {}
......@@ -163,14 +164,17 @@ class BeatComputationBroker(object):
def process_client(self, sender, msg):
"""Process a request coming from a client."""
# Service name + body
assert len(msg) >= 2 # nosec
service = msg.pop(0)
# Set reply return address to client sender
msg = [sender, b""] + msg
self.dispatch(self.require_service(service), msg)
def process_worker(self, sender, msg):
"""Process message sent to us by a worker."""
# At least, command
assert len(msg) >= 1 # nosec
......@@ -222,12 +226,14 @@ class BeatComputationBroker(object):
def delete_worker(self, worker, disconnect):
"""Deletes worker from all data structures, and deletes worker."""
assert worker is not None # nosec
if disconnect:
self.send_to_worker(worker, BCP.BCPW_DISCONNECT, None, None)
if worker.service is not None:
worker.service.waiting.remove(worker)
if worker in worker.service.waiting:
worker.service.waiting.remove(worker)
on_disconnection = self.callbacks.get("on_disconnection", None)
if on_disconnection:
......@@ -236,8 +242,12 @@ class BeatComputationBroker(object):
if worker.identity in self.workers:
self.workers.pop(worker.identity)
if worker in self.waiting:
self.waiting.pop(self.waiting.index(worker))
def require_worker(self, address):
"""Finds the worker (creates if necessary)."""
assert address is not None # nosec
identity = hexlify(address)
worker = self.workers.get(identity)
......@@ -251,6 +261,7 @@ class BeatComputationBroker(object):
def require_service(self, name):
"""Locates the service (creates if necessary)."""
assert name is not None # nosec
service = self.services.get(name)
if service is None:
......@@ -264,11 +275,13 @@ class BeatComputationBroker(object):
We use a single socket for both clients and workers.
"""
self.socket.bind(endpoint)
logger.info("I: BCP broker/0.0.1 is active at %s", endpoint)
def send_heartbeats(self):
"""Send heartbeats to idle workers if it's time"""
if time.time() > self.heartbeat_at:
for worker in self.waiting:
self.send_to_worker(worker, BCP.BCPW_HEARTBEAT, None, None)
......@@ -276,16 +289,16 @@ class BeatComputationBroker(object):
self.heartbeat_at = time.time() + 1e-3 * self.HEARTBEAT_INTERVAL
def purge_workers(self):
"""Look for & kill expired workers.
"""
"""Look for & kill expired workers."""
for item in self.waiting:
if item.expiry < time.time():
logger.info("I: deleting expired worker: %s", item.identity)
self.delete_worker(item, False)
self.waiting.pop(self.waiting.index(item))
def worker_waiting(self, worker):
"""This worker is now waiting for work."""
# Queue to broker and service waiting lists
if worker not in self.waiting:
......@@ -298,10 +311,14 @@ class BeatComputationBroker(object):
def dispatch(self, service, msg):
"""Dispatch requests to waiting workers as possible"""
assert service is not None # nosec
if msg is not None: # Queue message if any
service.requests.append(msg)
self.purge_workers()
while service.waiting and service.requests:
msg = service.requests.pop(0)
worker = service.waiting.pop(0)
......
......@@ -85,7 +85,7 @@ class BeatComputationClient(object):
request = [b"", BCP.BCPC_CLIENT, service] + request
if self.verbose:
logger.warn("I: send request to '%s' service: ", service)
logger.warning("I: send request to '%s' service: ", service)
dump(request)
self.client.send_multipart(request)
......
......@@ -210,7 +210,7 @@ class BeatComputationWorker(object):
self.liveness -= 1
if self.liveness == 0:
if self.verbose:
logger.warn("W: disconnected from broker - retrying…")
logger.warning("W: disconnected from broker - retrying…")
try:
time.sleep(1e-3 * self.reconnect)
except KeyboardInterrupt:
......@@ -222,7 +222,7 @@ class BeatComputationWorker(object):
self.send_to_broker(BCP.BCPW_HEARTBEAT)
self.heartbeat_at = time.time() + 1e-3 * self.heartbeat
logger.warn("W: interrupt received, killing worker…")
logger.warning("W: interrupt received, killing worker…")
return None
def destroy(self):
......
......@@ -52,6 +52,8 @@ import six
from . import schema
from .dataformat import DataFormat
from .protocoltemplate import ProtocolTemplate
from . import prototypes
from beat.backend.python.database import Storage
......@@ -61,17 +63,27 @@ from beat.backend.python.protocoltemplate import Storage as PTStorage
def get_first_procotol_template(prefix):
pt_root_folder = os.path.join(prefix, PTStorage.asset_folder)
pts_available = os.listdir(pt_root_folder)
pts_available = sorted(os.listdir(pt_root_folder))
if not pts_available:
raise RuntimeError("Invalid prefix content, no protocol template available")
procotol_template_folder = pts_available[0]
protocol_template_versions = sorted(
os.listdir(os.path.join(pt_root_folder, procotol_template_folder))
)
version = protocol_template_versions[-1].split(".")[0]
return "{}/{}".format(procotol_template_folder, version)
selected_protocol_template = None
for procotol_template_folder in pts_available:
protocol_template_versions = sorted(
os.listdir(os.path.join(pt_root_folder, procotol_template_folder))
)
version = protocol_template_versions[-1].split(".")[0]
protocol_template_name = "{}/{}".format(procotol_template_folder, version)
protocol_template = ProtocolTemplate(prefix, protocol_template_name)
if protocol_template.valid:
selected_protocol_template = protocol_template_name
break
if selected_protocol_template is None:
raise RuntimeError("No valid protocol template found")
return selected_protocol_template
class Database(BackendDatabase):
......
......@@ -43,6 +43,7 @@ Docker helper classes
"""
import ast
import os
import simplejson as json
import socket
......@@ -52,8 +53,12 @@ import docker
import subprocess as sp # nosec
import logging
from packaging import version
from beat.core import stats
from .utils import build_env_name
logger = logging.getLogger(__name__)
......@@ -85,7 +90,13 @@ class Host(object):
(
self.processing_environments,
self.db_environments,
) = self._discover_environments()
) = self._discover_environments_using_labels()
if not self.db_environments and not self.processing_environments:
(
self.processing_environments,
self.db_environments,
) = self._discover_environments_using_describe()
# (If necessary) Save the known infos about the images
if self.images_cache_filename is not None:
......@@ -123,6 +134,12 @@ class Host(object):
return attrs["image"]
def dbenv2docker(self, key):
"""Returns a nice docker image name given a BEAT database environment key"""
attrs = self.db_environments[key]
return attrs["image"]
def teardown(self):
for container in self.containers:
self.rm(container)
......@@ -157,7 +174,7 @@ class Host(object):
s.connect(("8.8.8.8", 1)) # connecting to a UDP address doesn't send packets
return s.getsockname()[0]
def _discover_environments(self):
def _discover_environments_using_describe(self):
"""Returns a dictionary containing information about docker environments
Raises:
......@@ -184,14 +201,14 @@ class Host(object):
Host.images_cache[image] = infos
return infos
except Exception as e:
logger.warn(
logger.warning(
"Ignoring potential environment at `%s' since "
"`describe' output cannot be parsed: %s",
image,
str(e),
)
else:
logger.warn(
logger.warning(
"Execution failed with status {}: \n"
"stdout: '{}'\n"
"stderr: '{}'".format(status, stdout, stderr)
......@@ -270,7 +287,7 @@ class Host(object):
)
return False
else:
logger.warn(
logger.warning(
"Overriding **existing** environment '%s' image "
"with '%s'. To avoid this warning make "
"sure your docker images do not contain environments "
......@@ -299,10 +316,12 @@ class Host(object):
for image in images:
# Call the "describe" application on each existing image
description = _describe(image)
if not description:
logger.debug("Description not found for", image)
continue
key = description["name"] + " (" + description["version"] + ")"
key = build_env_name(description)
if "databases" in description:
if (key in db_environments) and not _must_replace(
......@@ -331,6 +350,115 @@ class Host(object):
return (environments, db_environments)
def _discover_environments_using_labels(self):
"""Search BEAT runtime environments using docker labels"""
def _must_replace(key, image, environments):
environment = environments[key]
if environment["image"] not in image.tags:
logger.warning(
"Different images providing the same environment: {} VS {}".format(
environment["image"], image.tags
)
)
if self.raise_on_errors:
raise RuntimeError(
"Environments at '%s' and '%s' have the "
"same name ('%s'). Distinct environments must be "
"uniquely named. Fix this and re-start."
% (image.tags[0], environments[key]["image"], key)
)
else:
logger.debug("Keeping more recent")
current_version = "{}{}".format(
environment["version"], environment["revision"]
)
new_version = "{}{}".format(
image.labels["beat.env.version"], image.labels["beat.env.revision"]
)
current_version = version.parse(current_version)
new_version = version.parse(new_version)
return new_version > current_version
def _parse_image_info(image):
labels = image.labels
data = {
"image": image.tags[0],
"name": labels["beat.env.name"],
"version": labels["beat.env.version"],
"revision": labels["beat.env.revision"],
}
database_list = labels.get("beat.env.databases")
if database_list:
data["databases"] = ast.literal_eval(database_list)
capabilities = labels.get("beat.env.capabilities")
if capabilities:
data["capabilities"] = ast.literal_eval(capabilities)
return data
def _process_image_list(image_list):
environments = {}
for image in image_list:
if not len(image.tags):
logger.warning("Untagged image, skipping")
continue
image_info = _parse_image_info(image)
key = build_env_name(image_info)
image_name = image_info["image"]
if key in environments:
if _must_replace(key, image, environments):
environments[key] = image_info
logger.info("Updated '%s' -> '%s'", key, image_name)
else:
environments[key] = image_info
Host.images_cache[image_name] = environments[key]
logger.info("Registered '%s' -> '%s'", key, image_name)
return environments
client = docker.from_env()
try:
databases = client.images.list(
filters={"label": ["beat.env.type=database"]}
)
except Exception as e:
if self.raise_on_errors:
raise
else:
logger.error("Docker error: {}".format(e))
return {}, {}
else:
db_environments = _process_image_list(databases)
try:
executors = client.images.list(
filters={"label": ["beat.env.type=execution"]}
)
except Exception as e:
if self.raise_on_errors:
raise
else:
logger.error("Docker error: {}".format(e))
return {}, {}
else:
environments = _process_image_list(executors)
logger.debug(
"Found %d environments and %d database environments",
len(environments),
len(db_environments),
)
return environments, db_environments
def create_container(self, image, command):
if image in self: # Replace by a real image name
......@@ -360,7 +488,7 @@ class Host(object):
limitation is not put in place.
"""
cmd = ["docker", "run", "-tid"]
cmd = ["docker", "run", "--tty", "--interactive", "--detach", "--read-only"]
network = container.network
if network:
......@@ -387,7 +515,8 @@ class Host(object):
if ("capabilities" in image_infos) and (
"gpu" in image_infos["capabilities"]
):
cmd.append("--runtime=nvidia")
if os.path.exists("/proc/driver/nvidia"):
cmd.append("--gpus=all")
if virtual_memory_in_megabytes:
# For this to work properly, memory swap limitation has to be
......@@ -541,7 +670,7 @@ class Host(object):
status = self.status(container)
if status not in ("created", "exited"):
logger.warn(
logger.warning(
"Killing container '%s' which is on state '%s'", container.id, status
)
self._exec(["docker", "container", "stop", container.id])
......
......@@ -230,6 +230,17 @@ class BaseExecutor(object):
"The input '%s' doesn't exist in the loop algorithm" % name
)
if len(loop["outputs"]) != len(self.loop_algorithm.output_map):
self.errors.append(
"The number of outputs of the loop algorithm doesn't correspond"
)
for name in self.data["outputs"].keys():
if name not in self.algorithm.output_map.keys():
self.errors.append(
"The output '%s' doesn't exist in the loop algorithm" % name
)
# Check that the mapping in coherent
if len(self.data["inputs"]) != len(self.algorithm.input_map):
self.errors.append(
......
This diff is collapsed.
......@@ -188,14 +188,24 @@ class LocalExecutor(BaseExecutor):
self.zmq_context = None
def __cleanup(self):
def __cleanup(self, early=False):
if self.loop_executor:
if early:
self.loop_socket.send_string("don")
self.loop_socket.recv() # ack
self.loop_executor.wait()
self.loop_executor.close()
for handler in [self.message_handler, self.loop_message_handler]:
if handler:
handler.kill()
handler.join()
try:
handler.join()
except RuntimeError:
# The handler was not started
pass
handler.destroy()
for socket in [self.executor_socket, self.loop_socket]:
......@@ -310,15 +320,35 @@ class LocalExecutor(BaseExecutor):
cache_root=self.cache,
)
retval = self.loop_executor.setup()
try:
retval = self.loop_executor.setup()
except Exception as e:
message = _process_exception(e, self.prefix, "algorithms")
retval = False
else:
message = None
if not retval:
self.__cleanup()
raise RuntimeError("Loop algorithm setup failed")
error = "Loop algorithm {} setup failed".format(self.algorithm.name)
if message:
error += ": {}".format(message)
raise RuntimeError(error)
try:
prepared = self.loop_executor.prepare()
except Exception as e:
message = _process_exception(e, self.prefix, "algorithms")
prepared = False
else:
message = None
prepared = self.loop_executor.prepare()
if not prepared:
self.__cleanup()
raise RuntimeError("Loop algorithm prepare failed")
error = "Loop algorithm {} prepare failed".format(self.algorithm.name)
if message:
error += ": {}".format(message)
raise RuntimeError(error)
self.loop_executor.process()
......@@ -330,28 +360,50 @@ class LocalExecutor(BaseExecutor):
loop_socket=self.loop_socket,
)
retval = self.executor.setup()
if not retval:
self.__cleanup()
raise RuntimeError("Algorithm setup failed")
try:
status = self.executor.setup()
except Exception as e:
message = _process_exception(e, self.prefix, "algorithms")
status = 0
else:
message = None
if not status:
self.__cleanup(early=True)
error = "Algorithm {} setup failed".format(self.algorithm.name)
if message:
error += ": {}".format(message)
raise RuntimeError(error)
try:
prepared = self.executor.prepare()
except Exception as e:
message = _process_exception(e, self.prefix, "algorithms")
prepared = 0
else:
message = None
prepared = self.executor.prepare()
if not prepared:
self.__cleanup()
raise RuntimeError("Algorithm prepare failed")
self.__cleanup(early=True)
error = "Algorithm {} prepare failed".format(self.algorithm.name)
if message:
error += ": {}".format(message)
raise RuntimeError(error)
_start = time.time()
try:
processed = self.executor.process()
except Exception as e:
message = _process_exception(e, self.prefix, "databases")
message = _process_exception(e, self.prefix, "algorithms")
self.__cleanup()
return _create_result(1, message)
if not processed:
self.__cleanup()
raise RuntimeError("Algorithm process failed")
raise RuntimeError(
"Algorithm {} process failed".format(self.algorithm.name)
)
proc_time = time.time() - _start
......
......@@ -42,18 +42,12 @@ remote
Execution utilities
"""
import logging
from .base import BaseExecutor
from beat.backend.python.helpers import create_inputs_from_configuration
from beat.backend.python.helpers import create_outputs_from_configuration
logger = logging.getLogger(__name__)
class RemoteExecutor(BaseExecutor):
"""Base class for Executors that communicate with a message handler
......
......@@ -123,6 +123,12 @@ class SubprocessExecutor(RemoteExecutor):
guarantee that the cache is refreshed as appropriate in case the
underlying libraries change.
custom_root_folders (dict): A dictionary mapping databases name and
their location on disk
ip_address (str): IP address of the machine to connect to for the database
execution and message handlers.
python_path (str): Path to the python executable of the environment to use
for experiment execution.
Attributes:
......@@ -172,8 +178,8 @@ class SubprocessExecutor(RemoteExecutor):
library_cache=None,
custom_root_folders=None,
ip_address="127.0.0.1",
python_path=None,
):
super(SubprocessExecutor, self).__init__(
prefix,
data,
......@@ -186,14 +192,30 @@ class SubprocessExecutor(RemoteExecutor):
custom_root_folders=custom_root_folders,
)
# We need three apps to run this function: databases_provider and execute
self.EXECUTE_BIN = _which(os.path.join(os.path.dirname(sys.argv[0]), "execute"))
self.LOOP_EXECUTE_BIN = _which(
os.path.join(os.path.dirname(sys.argv[0]), "loop_execute")
)
self.DBPROVIDER_BIN = _which(
os.path.join(os.path.dirname(sys.argv[0]), "databases_provider")
)
if python_path is None:
base_path = os.path.dirname(sys.argv[0])
# We need three apps to run this function: databases_provider and execute
self.EXECUTE_BIN = _which(os.path.join(base_path, "execute"))
self.LOOP_EXECUTE_BIN = _which(os.path.join(base_path, "loop_execute"))
self.DBPROVIDER_BIN = _which(os.path.join(base_path, "databases_provider"))
else:
base_path = os.path.dirname(python_path)
self.EXECUTE_BIN = os.path.join(base_path, "execute")
self.LOOP_EXECUTE_BIN = os.path.join(base_path, "loop_execute")
self.DBPROVIDER_BIN = os.path.join(base_path, "databases_provider")
if any(
[
not os.path.exists(executable)
for executable in [
self.EXECUTE_BIN,
self.LOOP_EXECUTE_BIN,
self.DBPROVIDER_BIN,
]
]
):
raise RuntimeError("Invalid environment")
def __create_db_process(self, configuration_name=None):
databases_process = None
......@@ -384,7 +406,9 @@ class SubprocessExecutor(RemoteExecutor):
)
if self.loop_algorithm is not None:
cmd.append("tcp://" + self.ip_address + (":%d" % loop_algorithm_port))
cmd.append(
"--loop=tcp://" + self.ip_address + (":%d" % loop_algorithm_port)
)
if logger.getEffectiveLevel() <= logging.DEBUG:
cmd.insert(1, "--debug")
......
This diff is collapsed.
......@@ -67,7 +67,9 @@ class Storage(utils.Storage):
def __init__(self, prefix, name):
if name.count("/") != 2:
raise RuntimeError(f"invalid plotterparameter name: {name}")
raise RuntimeError(
"invalid plotterparameter name: {name}".format(name=name)
)
self.username, self.name, self.version = name.split("/")
self.fullname = name
......@@ -170,7 +172,9 @@ class Plotterparameter(object):
self.storage = Storage(self.prefix, self._name)
if not self.storage.json.exists():
self.errors.append(
f"Plotterparameter declaration file not found: {data}"
"Plotterparameter declaration file not found: {data}".format(
data=data
)
)
return
data = self.storage.json.path # loads data from JSON declaration
......@@ -219,12 +223,16 @@ class Plotterparameter(object):
self.plotter.clean_parameter(key, val)
except KeyError:
self.errors.append(
f"'{key}' isn't a parameter for plotter {self.plotter.name}"
"'{key}' isn't a parameter for plotter {name}".format(
key=key, name=self.plotter.name
)
)
return
except ValueError:
self.errors.append(
f"'{val}' is invalid for parameter {key} of plotter {self.plotter.name}"
"'{val}' is invalid for parameter {key} of plotter {name}".format(
val=val, key=key, name=self.plotter.name
)
)
return
......
......@@ -7,15 +7,15 @@ from beat.backend.python.database import View
class FooView(View):
def setup(
self,
root_folder,
outputs,
parameters,
force_start_index=None,
force_end_index=None,
self, root_folder, outputs, parameters, start_index=None, end_index=None,
):
"""Initializes the database"""
# DO NOT REMOVE: this is required to setup the view internal state
super().setup(root_folder, outputs, parameters, start_index, end_index)
# Add custom setup code here
return True
def index(self, root_folder, parameters):
......
......@@ -55,7 +55,6 @@
"language": { "$ref": "../common/1.json#/definitions/language" },
"description": { "$ref": "../common/1.json#/definitions/description" },
"groups": { "$ref": "common.json#/definitions/analyzer_groups" },
"parameters": { "$ref": "common.json#/definitions/parameters" },
"results": { "$ref": "common.json#/definitions/results" },
"uses": { "$ref": "../common/1.json#/definitions/uses" },
"schema_version": { "$ref": "common.json#/definitions/schema_version" },
......
......@@ -6,8 +6,8 @@
"oneOf": [
{ "$ref": "2.json#/definitions/block" },
{ "$ref": "2.json#/definitions/analyzer" },
{ "$ref": "#/definitions/loop_user" },
{ "$ref": "#/definitions/loop" }
{ "$ref": "#/definitions/loop_evaluator" },
{ "$ref": "#/definitions/loop_processor" }
],
"definitions": {
......@@ -71,11 +71,13 @@
"properties": {
"name": { "type": "string" },
"inputs": { "$ref": "common.json#/definitions/endpoints" },
"outputs": { "$ref": "common.json#/definitions/endpoints" },
"loop": { "$ref": "#/definitions/loop_io_group" }
},
"required": [
"inputs",
"outputs",
"loop"
],
......@@ -99,7 +101,7 @@
},
"loop_user": {
"loop_processor": {
"type": "object",
......@@ -113,9 +115,8 @@
"schema_version": { "$ref": "common.json#/definitions/schema_version" },
"api_version": { "$ref": "common.json#/definitions/api_version" },
"type": {
"$comment": "Change enum to const when tools allow v6 json schema",
"type": "string",
"enum": ["loop_user"]
"enum": ["sequential_loop_processor", "autonomous_loop_processor"]
}
},
......@@ -129,7 +130,7 @@
},
"loop": {
"loop_evaluator": {
"type": "object",
......@@ -142,9 +143,8 @@
"schema_version": { "$ref": "common.json#/definitions/schema_version" },
"api_version": { "$ref": "common.json#/definitions/api_version" },
"type": {
"$comment": "Change enum to const when tools allow v6 json schema",
"type": "string",
"enum": ["loop"]
"enum": ["sequential_loop_evaluator", "autonomous_loop_evaluator"]
}
},
......
......@@ -39,7 +39,8 @@
},
"description": {
"type": "string"
"type": "string",
"maxLength": 100
},
"language": {
......
......@@ -19,6 +19,19 @@
"items": { "$ref": "#/definitions/protocol" }
},
"environment": {
"type": "object",
"properties": {
"name": { "type": "string" },
"version": { "type": "string" }
},
"required": [
"name",
"version"
],
"additionalProperties": false
},
"description": { "$ref": "../common/1.json#/definitions/description" },
"schema_version": { "$ref": "../common/1.json#/definitions/version" }
......
......@@ -21,6 +21,19 @@
"description": { "$ref": "../common/1.json#/definitions/description" },
"environment": {
"type": "object",
"properties": {
"name": { "type": "string" },
"version": { "type": "string" }
},
"required": [
"name",
"version"
],
"additionalProperties": false
},
"schema_version": { "const": 2 }
},
......
......@@ -52,13 +52,28 @@
"loop": {
"type": "object",
"properties": {
"algorithm": { "$ref": "../common/1.json#/definitions/reference" },
"parameters": { "$ref": "common.json#/definitions/parameter_set" },
"inputs": { "$ref": "common.json#/definitions/connection_map" },
"processor_algorithm": { "$ref": "../common/1.json#/definitions/reference" },
"processor_parameters": { "$ref": "common.json#/definitions/parameter_set" },
"processor_inputs": { "$ref": "common.json#/definitions/connection_map" },
"processor_outputs": { "$ref": "common.json#/definitions/connection_map" },
"queue": { "$ref": "common.json#/definitions/queue" },
"environment": { "$ref": "common.json#/definitions/environment" },
"nb_slots": { "$ref": "common.json#/definitions/slots" }
}
"processor_environment": { "$ref": "common.json#/definitions/environment" },
"nb_slots": { "$ref": "common.json#/definitions/slots" },
"evaluator_algorithm": { "$ref": "../common/1.json#/definitions/reference" },
"evaluator_parameters": { "$ref": "common.json#/definitions/parameter_set" },
"evaluator_inputs": { "$ref": "common.json#/definitions/connection_map" },
"evaluator_outputs": { "$ref": "common.json#/definitions/connection_map" },
"evaluator_environment": { "$ref": "common.json#/definitions/environment" }
},
"required": [
"processor_algorithm",
"processor_inputs",
"processor_outputs",
"evaluator_algorithm",
"evaluator_inputs",
"evaluator_outputs"
],
"additionalProperties": false
}
}
......
......@@ -74,7 +74,7 @@
"versioned_database": {
"type": "string",
"pattern": "^[a-zA-Z_][a-zA-Z0-9_]*/[0-9]+$"
"pattern": "^[a-zA-Z_][a-zA-Z0-9_-]+[a-zA-Z0-9]+/[0-9]+$"
},
"dataset": {
......
......@@ -30,12 +30,6 @@
"$ref": "common.json#/definitions/connections"
},
"loop_connections": {
"type": "array",
"uniqueItems": true,
"items": { "$ref": "#/definitions/loop_connection" }
},
"representation": {
"type": "object",
......@@ -45,9 +39,6 @@
"connections": {
"$ref": "common.json#/definitions/representation/connection_list"
},
"loop_connections": {
"$ref": "common.json#/definitions/representation/connection_list"
},
"blocks": {
"$ref": "common.json#/definitions/representation/block_list"
},
......@@ -74,6 +65,7 @@
"required": [
"datasets",
"blocks",
"loops",
"analyzers",
"connections",
"representation"
......@@ -90,27 +82,40 @@
"synchronized_channel": {
"$ref": "../database/1.json#/definitions/identifier"
},
"inputs": {
"processor_inputs": {
"type": "array",
"minItems": 1,
"uniqueItems": true,
"items": { "$ref": "common.json#/definitions/identifier" }
},
"processor_outputs": {
"type": "array",
"minItems": 1,
"uniqueItems": true,
"items": { "$ref": "common.json#/definitions/identifier" }
},
"evaluator_inputs": {
"type": "array",
"minItems": 1,
"uniqueItems": true,
"items": { "$ref": "common.json#/definitions/identifier" }
},
"evaluator_outputs": {
"type": "array",
"minItems": 1,
"uniqueItems": true,
"items": { "$ref": "common.json#/definitions/identifier" }
}
},
"required": ["name", "synchronized_channel", "inputs"],
"required": [
"name",
"synchronized_channel",
"processor_inputs",
"processor_outputs",
"evaluator_inputs",
"evaluator_outputs"
],