diff --git a/beat/web/utils/management/commands/install.py b/beat/web/utils/management/commands/install.py index e00aface14c4516ab320bad121011792df183a94..2fd0772e730ea6320585f0cd575472e04495a57c 100644 --- a/beat/web/utils/management/commands/install.py +++ b/beat/web/utils/management/commands/install.py @@ -153,6 +153,16 @@ def create_sites(): _setup_site(3, 'Production System', 'www.beat-eu.org') +def create_users(username, passwd): + + # Sets up initial users, if not already there. + system_user = add_user(settings.SYSTEM_ACCOUNT, None, '1') + plot_user = add_user(settings.PLOT_ACCOUNT, None, '2') + user = add_user(username, passwd, '3') + + return system_user, plot_user, user + + def list_objects(prefix, project, category, fnfilter): """Lists all objects matching a certain filter""" @@ -813,6 +823,104 @@ def link_contribution_versions(klass): pass #ignores +def install_contributions(source_prefix, project, template_data, + db_root_file=None): + '''Installs all contributions for a given project + + + Parameters: + + source_prefix (str): The path to the base directory containing the + projects where objects must be installed from. + + project (str): The project within the ``source_prefix`` where to install + objects from. + + template_data (dict): A dictionary containing standard template data for + completing template objects installed on the project. + + db_root_file (str, Optional): Optional path to a JSON describing the + database root for databases to be inserted. Database names not present + at the project directory will be ignored. + + ''' + + # Dataformat adding requires a special trick as there are dependencies + # between different dataformats. Our recipe: we try to upload all of them + # one after the other. If one fails, we retry on the next loop, until all + # formats have been uploaded. + dataformat_filenames_next = list_objects(source_prefix, project, + 'dataformats', '*.json') + dataformat_filenames_cur = [] + + while True: + if not dataformat_filenames_next: break + if len(dataformat_filenames_cur) == len(dataformat_filenames_next): + break + dataformat_filenames_cur = dataformat_filenames_next + dataformat_filenames_next = [] + for k in dataformat_filenames_cur: + if not upload_dispatcher(source_prefix, project, 'dataformats', k, + template_data): + dataformat_filenames_next.append(k) + from ....dataformats.models import DataFormat + link_contribution_versions(DataFormat) + + # Reads database root file, if provided + db_root = {} + if db_root_file: db_root.update(load_database_folders(db_root_file)) + + for k in list_objects(source_prefix, project, 'databases', '*.json'): + if k in db_root: template_data['root_folder'] = db_root[k] + upload_dispatcher(source_prefix, project, 'databases', k, + template_data) + link_database_versions() + + for k in list_objects(source_prefix, project, 'toolchains', + '*.json'): + upload_dispatcher(source_prefix, project, 'toolchains', k, + template_data) + from ....toolchains.models import Toolchain + link_contribution_versions(Toolchain) + + # Libraries adding requires a special trick as there are + # dependencies between different libraries and algorithms. Our + # recipe: we use the same technique as for dataformats. + library_filenames_next = list_objects(source_prefix, project, + 'libraries', '*.json') + library_filenames_cur = [] + + while True: + if not library_filenames_next: break + if len(library_filenames_cur) == len(library_filenames_next): + break + library_filenames_cur = library_filenames_next + library_filenames_next = [] + for k in library_filenames_cur: + if not upload_dispatcher(source_prefix, project, + 'libraries', k, template_data): + library_filenames_next.append(k) + from ....libraries.models import Library + link_contribution_versions(Library) + + for k in list_objects(source_prefix, project, 'algorithms', + '*.json'): + upload_dispatcher(source_prefix, project, 'algorithms', k, + template_data) + from ....algorithms.models import Algorithm + link_contribution_versions(Algorithm) + + for k in list_objects(source_prefix, project, 'plotters', '*.json'): + upload_dispatcher(source_prefix, project, 'plotters', k, + template_data) + from ....plotters.models import Plotter + link_contribution_versions(Plotter) + + for k in list_objects(source_prefix, project, 'experiments', + '*.json'): + upload_dispatcher(source_prefix, project, 'experiments', k, + template_data) + class Command(BaseCommand): @@ -925,10 +1033,9 @@ class Command(BaseCommand): # Setup sites: 1.Development; 2.Staging; 3.Production create_sites() - # Sets up initial users, if not already there. - system_user = add_user(settings.SYSTEM_ACCOUNT, None, '1') - plot_user = add_user(settings.PLOT_ACCOUNT, None, '2') - user = add_user(arguments['username'], arguments['password'], '4') + system_user, plot_user, user = create_users(arguments['username'], + arguments['password']) + # Sets up initial groups add_group('Default') @@ -953,89 +1060,5 @@ class Command(BaseCommand): logger.info("Adding objects for project `%s'...", project) - # Dataformat adding requires a special trick as there are - # dependencies between different dataformats. Our recipe: we try - # to upload all of them one after the other. If one fails, we - # retry on the next loop, until all formats have been uploaded. - dataformat_filenames_next = list_objects(self.prefix, project, - 'dataformats', '*.json') - dataformat_filenames_cur = [] - - while True: - if not dataformat_filenames_next: break - if len(dataformat_filenames_cur) == \ - len(dataformat_filenames_next): break - dataformat_filenames_cur = dataformat_filenames_next - dataformat_filenames_next = [] - for k in dataformat_filenames_cur: - if not upload_dispatcher(self.prefix, project, - 'dataformats', k, template_data): - dataformat_filenames_next.append(k) - from ....dataformats.models import DataFormat - link_contribution_versions(DataFormat) - - # Reads database root file, if provided - db_root = {} - if arguments['database_root_file']: - db_root.update(load_database_folders( - arguments['database_root_file'])) - - for k in list_objects(self.prefix, project, 'databases', '*.json'): - if k in db_root: template_data['root_folder'] = db_root[k] - upload_dispatcher(self.prefix, project, 'databases', k, - template_data) - link_database_versions() - - for k in list_objects(self.prefix, project, 'toolchains', - '*.json'): - upload_dispatcher(self.prefix, project, 'toolchains', k, - template_data) - from ....toolchains.models import Toolchain - link_contribution_versions(Toolchain) - - # Libraries adding requires a special trick as there are - # dependencies between different libraries and algorithms. Our - # recipe: we use the same technique as for dataformats. - library_filenames_next = list_objects(self.prefix, project, - 'libraries', '*.json') - library_filenames_cur = [] - - while True: - if not library_filenames_next: break - if len(library_filenames_cur) == len(library_filenames_next): - break - library_filenames_cur = library_filenames_next - library_filenames_next = [] - for k in library_filenames_cur: - if not upload_dispatcher(self.prefix, project, - 'libraries', k, template_data): - library_filenames_next.append(k) - from ....libraries.models import Library - link_contribution_versions(Library) - - for k in list_objects(self.prefix, project, 'algorithms', - '*.json'): - upload_dispatcher(self.prefix, project, 'algorithms', k, - template_data) - from ....algorithms.models import Algorithm - link_contribution_versions(Algorithm) - - for k in list_objects(self.prefix, project, 'plotters', '*.json'): - upload_dispatcher(self.prefix, project, 'plotters', k, - template_data) - from ....plotters.models import Plotter - link_contribution_versions(Plotter) - - for k in list_objects(self.prefix, project, 'experiments', - '*.json'): - upload_dispatcher(self.prefix, project, 'experiments', k, - template_data) - - ''' - for k in list_objects(self.prefix, project, 'plotter_parameters', - '*.json'): - upload_dispatcher(self.prefix, project, 'plotter_parameters', - k, template_data) - from ....plotters.models import PlotterParameter - link_contribution_versions(PlotterParameter) - ''' + install_contributions(self.prefix, project, template_data, + arguments['database_root_file'])