Commit 436b5a42 authored by Samuel GAIST's avatar Samuel GAIST
Browse files

[utils][commands][install] Implement asset subset installation

This refactoring allows to give only a subset of
assets to install. This will allow to fully install
a single asset.

Note that dependencies must be correctly provided.
For example if Protocol Templates are to be installed
on an empty database, the dataformat must be listed
along i.e.:
django install --assets dataformats protocoltemplates
parent 0b0238dc
......@@ -126,7 +126,11 @@ class BackendUtilitiesMixin(object):
for contribution in ["system", "test"]:
install.install_contributions(
source_prefix, contribution, template_data, db_root_file_path
source_prefix=source_prefix,
project=contribution,
assets=list(install.ASSET_UPLOADER_MAP.keys()),
template_data=template_data,
db_root_file=db_root_file_path,
)
if not os.path.exists(settings.CACHE_ROOT):
......
......@@ -919,6 +919,18 @@ def upload_plotter(prefix, name, data):
return True
ASSET_UPLOADER_MAP = collections.OrderedDict(
dataformats=upload_dataformat,
protocoltemplates=upload_protocoltemplate,
databases=upload_database,
toolchains=upload_toolchain,
libraries=upload_library,
algorithms=upload_algorithm,
plotters=upload_plotter,
experiments=upload_experiment,
)
def upload_dispatcher(prefix, project, type, name, data):
"""Uploads the experiment to the running platform
......@@ -940,27 +952,18 @@ def upload_dispatcher(prefix, project, type, name, data):
Returns:
bool: Indicates if the operation was succesful
bool: Indicates if the operation was successful
"""
base_subdir = os.path.join(prefix, project)
valid_types = {
"dataformats": upload_dataformat,
"protocoltemplates": upload_protocoltemplate,
"databases": upload_database,
"libraries": upload_library,
"algorithms": upload_algorithm,
"toolchains": upload_toolchain,
"experiments": upload_experiment,
"plotters": upload_plotter,
}
if type not in valid_types:
raise KeyError("Type must be one of `%s'" % ", ".join(valid_types.keys()))
if type not in ASSET_UPLOADER_MAP:
raise KeyError(
"Type must be one of `%s'" % ", ".join(ASSET_UPLOADER_MAP.keys())
)
upload_function = valid_types[type]
upload_function = ASSET_UPLOADER_MAP[type]
try:
......@@ -979,6 +982,9 @@ def upload_dispatcher(prefix, project, type, name, data):
def link_contribution_versions(klass):
"""Link object versions together"""
if not hasattr(klass, "version"):
return
for obj in klass.objects.all():
if obj.version > 1:
# search for similar
......@@ -1002,7 +1008,9 @@ def link_contribution_versions(klass):
pass # ignores
def install_contributions(source_prefix, project, template_data, db_root_file=None):
def install_contributions(
source_prefix, project, assets, template_data, db_root_file=None
):
"""Installs all contributions for a given project
......@@ -1014,6 +1022,10 @@ def install_contributions(source_prefix, project, template_data, db_root_file=No
project (str): The project within the ``source_prefix`` where to install
objects from.
assets (list): The list of assets to install, this allows to install only
a reduced number of them for testing purpose or in production systems to
only add new type of asset.
template_data (dict): A dictionary containing standard template data for
completing template objects installed on the project.
......@@ -1023,96 +1035,86 @@ def install_contributions(source_prefix, project, template_data, db_root_file=No
"""
# Dataformat adding requires a special trick as there are dependencies
# between different dataformats. Our recipe: we try to upload all of them
# one after the other. If one fails, we retry on the next loop, until all
# formats have been uploaded.
dataformat_filenames_next = list_objects(
source_prefix, project, "dataformats", "*.json"
)
dataformat_filenames_cur = []
while True:
if not dataformat_filenames_next:
break
if len(dataformat_filenames_cur) == len(dataformat_filenames_next):
break
dataformat_filenames_cur = dataformat_filenames_next
dataformat_filenames_next = []
for k in dataformat_filenames_cur:
if not upload_dispatcher(
source_prefix, project, "dataformats", k, template_data
):
dataformat_filenames_next.append(k)
from ....dataformats.models import DataFormat
link_contribution_versions(DataFormat)
# Template protocols
from ....protocoltemplates.models import ProtocolTemplate
for object_ in list_objects(source_prefix, project, "protocoltemplates", "*.json"):
upload_dispatcher(
source_prefix, project, "protocoltemplates", object_, template_data
)
link_contribution_versions(ProtocolTemplate)
# Reads database root file, if provided
db_root = {}
if db_root_file:
db_root.update(load_database_folders(db_root_file))
for k in list_objects(source_prefix, project, "databases", "*.json"):
if k in db_root:
template_data["root_folder"] = db_root[k]
upload_dispatcher(source_prefix, project, "databases", k, template_data)
from ....databases.models import Database
link_contribution_versions(Database)
for k in list_objects(source_prefix, project, "toolchains", "*.json"):
upload_dispatcher(source_prefix, project, "toolchains", k, template_data)
from ....toolchains.models import Toolchain
link_contribution_versions(Toolchain)
# Libraries adding requires a special trick as there are
# dependencies between different libraries and algorithms. Our
# recipe: we use the same technique as for dataformats.
library_filenames_next = list_objects(source_prefix, project, "libraries", "*.json")
library_filenames_cur = []
while True:
if not library_filenames_next:
break
if len(library_filenames_cur) == len(library_filenames_next):
break
library_filenames_cur = library_filenames_next
library_filenames_next = []
for k in library_filenames_cur:
if not upload_dispatcher(
source_prefix, project, "libraries", k, template_data
):
library_filenames_next.append(k)
from ....libraries.models import Library
link_contribution_versions(Library)
for k in list_objects(source_prefix, project, "algorithms", "*.json"):
upload_dispatcher(source_prefix, project, "algorithms", k, template_data)
from ....algorithms.models import Algorithm
link_contribution_versions(Algorithm)
for k in list_objects(source_prefix, project, "plotters", "*.json"):
upload_dispatcher(source_prefix, project, "plotters", k, template_data)
from ....plotters.models import Plotter
link_contribution_versions(Plotter)
for k in list_objects(source_prefix, project, "experiments", "*.json"):
upload_dispatcher(source_prefix, project, "experiments", k, template_data)
for asset in assets:
if asset == "dataformats":
# Dataformat adding requires a special trick as there are dependencies
# between different dataformats. Our recipe: we try to upload all of them
# one after the other. If one fails, we retry on the next loop, until all
# formats have been uploaded.
dataformat_filenames_next = list_objects(
source_prefix, project, asset, "*.json"
)
dataformat_filenames_cur = []
while True:
if not dataformat_filenames_next:
break
if len(dataformat_filenames_cur) == len(dataformat_filenames_next):
break
dataformat_filenames_cur = dataformat_filenames_next
dataformat_filenames_next = []
for k in dataformat_filenames_cur:
if not upload_dispatcher(
source_prefix, project, asset, k, template_data
):
dataformat_filenames_next.append(k)
elif asset == "libraries":
# Libraries adding requires a special trick as there are
# dependencies between different libraries and algorithms. Our
# recipe: we use the same technique as for dataformats.
library_filenames_next = list_objects(
source_prefix, project, asset, "*.json"
)
library_filenames_cur = []
while True:
if not library_filenames_next:
break
if len(library_filenames_cur) == len(library_filenames_next):
break
library_filenames_cur = library_filenames_next
library_filenames_next = []
for k in library_filenames_cur:
if not upload_dispatcher(
source_prefix, project, asset, k, template_data
):
library_filenames_next.append(k)
elif asset == "databases":
db_root = {}
if db_root_file:
db_root.update(load_database_folders(db_root_file))
for object_ in list_objects(source_prefix, project, asset, "*.json"):
if object_ in db_root:
template_data["root_folder"] = db_root[object_]
upload_dispatcher(source_prefix, project, asset, object_, template_data)
else:
for object_ in list_objects(source_prefix, project, asset, "*.json"):
upload_dispatcher(source_prefix, project, asset, object_, template_data)
from ....algorithms.models import Algorithm
from ....databases.models import Database
from ....dataformats.models import DataFormat
from ....experiments.models import Experiment
from ....libraries.models import Library
from ....plotters.models import Plotter
from ....protocoltemplates.models import ProtocolTemplate
from ....toolchains.models import Toolchain
klass_map = {
"dataformats": DataFormat,
"protocoltemplates": ProtocolTemplate,
"databases": Database,
"libraries": Library,
"algorithms": Algorithm,
"toolchains": Toolchain,
"experiments": Experiment,
"plotters": Plotter,
}
link_contribution_versions(klass_map[asset])
class Command(BaseCommand):
......@@ -1123,6 +1125,7 @@ class Command(BaseCommand):
super(Command, self).__init__()
self.assets = list(ASSET_UPLOADER_MAP.keys())
self.prefix = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0]))),
"src",
......@@ -1223,6 +1226,16 @@ class Command(BaseCommand):
"%s]" % ", ".join(self.projects),
)
parser.add_argument(
"--assets",
nargs="*",
type=str,
default=self.assets,
help="The assets that you wish to install [default: %(default)s] "
"WARNING: order is important, dataformats before any other assets "
"that uses them, protocol templates before databases, etc.",
)
def handle(self, *ignored, **arguments):
# Setup this command's logging level
......@@ -1246,6 +1259,14 @@ class Command(BaseCommand):
)
sys.exit(1)
assets = arguments["assets"]
if not all(item in ASSET_UPLOADER_MAP.keys() for item in assets):
logger.error(f"Invalid asset list {assets}")
sys.exit(1)
assets = sorted(assets, key=self.assets.index)
# Creates the prefix directory
if not os.path.exists(settings.PREFIX):
logger.info("Creating prefix directory `%s'...", settings.PREFIX)
......@@ -1295,5 +1316,9 @@ class Command(BaseCommand):
logger.info("Adding objects for project `%s'...", project)
install_contributions(
self.prefix, project, template_data, arguments["database_root_file"]
self.prefix,
project,
assets,
template_data,
arguments["database_root_file"],
)
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment