Commit 85f2ddf8 authored by Flavio TARSETTI's avatar Flavio TARSETTI

Merge branch '94_improve_complex_setup_handling' into 'master'

improve complex setup handling

See merge request !111
parents 83d806eb 24b4b8dc
Pipeline #37580 passed with stages
in 17 minutes and 35 seconds
......@@ -41,22 +41,24 @@ Based on the Majordomo Protocol worker example of the ZMQ Guide.
Usage:
%(prog)s [-v ... | --verbose ...] [ --name=<name>] [--prefix=<path>]
[--cache=<path>] [--docker] [--docker-network=<name>]
[--port-range=<range>] <broker_address>
[--port-range=<range>] [--cache-mount-point=<cache_mount_point>]
<broker_address>
%(prog)s (--help | -h)
%(prog)s (--version | -V)
Options:
-h, --help Show this screen
-V, --version Show version
-v, --verbose Increases the output verbosity level
-n <name>, --name=<name> The unique name of this worker on the database.
This is typically the assigned hostname of the node,
but not necessarily [default: %(hostname)s]
-p, --prefix=<path> Comma-separated list of the prefix(es) of your local data [default: .]
-c, --cache=<path> Cache prefix, otherwise defaults to '<prefix>/cache'
--docker-network=<name> Name of the docker network to use
--port-range=<range> Range of port usable for communication with containers
-h, --help Show this screen
-V, --version Show version
-v, --verbose Increases the output verbosity level
-n <name>, --name=<name> The unique name of this worker on the database.
This is typically the assigned hostname of the node,
but not necessarily [default: %(hostname)s]
-p, --prefix=<path> Comma-separated list of the prefix(es) of your local data [default: .]
-c, --cache=<path> Cache prefix, otherwise defaults to '<prefix>/cache'
--docker-network=<name> Name of the docker network to use
--port-range=<range> Range of port usable for communication with containers
--cache-mount-point=<cache_mount_point> NFS mount point to use for cache setup
"""
......@@ -107,6 +109,7 @@ def run(
docker_network_name=None,
docker_port_range=None,
docker_images_cache=None,
docker_cache_mount_point=None,
):
"""Start the worker
......@@ -202,6 +205,8 @@ def run(
data["network_name"] = docker_network_name
if docker_port_range:
data["port_range"] = docker_port_range
if docker_cache_mount_point:
data["cache_mount_point"] = docker_cache_mount_point
# Start the execution
logger.info("Running '%s' with job id #%s", data["algorithm"], job_id)
......@@ -319,6 +324,14 @@ def main(argv=None):
return 1
logger.info("Using port range %s", docker_port_range)
docker_cache_mount_point = args.get("--cache-mount-point", None)
if docker_cache_mount_point:
if not docker_cache_mount_point.startswith("nfs://"):
raise RuntimeError(
"Invalid nfs mount point: {}".format(docker_cache_mount_point)
)
logger.info("Using volume cache mount point %s", docker_cache_mount_point)
return run(
broker_address,
service_name=args.get("--name"),
......@@ -329,6 +342,7 @@ def main(argv=None):
docker_network_name=docker_network_name,
docker_port_range=docker_port_range,
docker_images_cache=docker_images_cache,
docker_cache_mount_point=docker_cache_mount_point,
)
......
......@@ -279,7 +279,9 @@ class DockerExecutor(RemoteExecutor):
)
return retval
def __setup_io_volumes(self, algorithm_container, configuration):
def __setup_io_volumes(
self, algorithm_container, docker_cache_mount_point, configuration
):
"""Setup all the volumes for input and output files.
Parameters:
......@@ -293,25 +295,35 @@ class DockerExecutor(RemoteExecutor):
file_path = item["path"]
source_path = os.path.join(self.cache, file_path)
if os.path.isfile(source_path):
algorithm_container.add_volume(
source_path, os.path.join(self.CONTAINER_CACHE_PATH, file_path)
)
if docker_cache_mount_point is None:
if os.path.isfile(source_path):
algorithm_container.add_volume(
source_path, os.path.join(self.CONTAINER_CACHE_PATH, file_path)
)
else:
all_files = getAllFilenames(source_path)
for file_list in all_files:
for file_ in file_list:
target_path = file_[len(self.cache) + 1 :]
cache_path = os.path.join(
self.CONTAINER_CACHE_PATH, target_path
)
algorithm_container.add_volume(file_, cache_path)
else:
all_files = getAllFilenames(source_path)
for file_list in all_files:
for file_ in file_list:
target_path = file_[len(self.cache) + 1 :]
cache_path = os.path.join(
self.CONTAINER_CACHE_PATH, target_path
)
algorithm_container.add_volume(file_, cache_path)
input_folder = file_path[: file_path.rfind("/")]
source_folder = os.path.join(docker_cache_mount_point, input_folder)
target_folder = os.path.join(self.CONTAINER_CACHE_PATH, input_folder)
algorithm_container.add_volume(source_folder, target_folder)
def __add_writable_volume(file_path):
output_folder = file_path[: file_path.rfind("/")]
source_folder = os.path.join(self.cache, output_folder)
if not os.path.exists(source_folder):
os.makedirs(source_folder)
if docker_cache_mount_point is not None:
source_folder = os.path.join(docker_cache_mount_point, output_folder)
algorithm_container.add_volume(
source_folder,
os.path.join(self.CONTAINER_CACHE_PATH, output_folder),
......@@ -405,6 +417,8 @@ class DockerExecutor(RemoteExecutor):
port = utils.find_free_port_in_range(int(min_port), int(max_port))
address += ":{}".format(port)
volume_cache_mount_point = self.data.pop("cache_mount_point", None)
self.message_handler = MessageHandler(address, kill_callback=_kill)
# ----- (If necessary) Instantiate the docker container that provide the databases
......@@ -463,7 +477,9 @@ class DockerExecutor(RemoteExecutor):
loop_algorithm_container.add_volume(
configuration_path, self.CONTAINER_PREFIX_PATH
)
self.__setup_io_volumes(loop_algorithm_container, self.data["loop"])
self.__setup_io_volumes(
loop_algorithm_container, volume_cache_mount_point, self.data["loop"]
)
# Start the container
self.host.start(
......@@ -505,7 +521,9 @@ class DockerExecutor(RemoteExecutor):
# Volumes
algorithm_container.add_volume(configuration_path, self.CONTAINER_PREFIX_PATH)
self.__setup_io_volumes(algorithm_container, self.data)
self.__setup_io_volumes(
algorithm_container, volume_cache_mount_point, self.data
)
# Start the container
self.host.start(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment