diff --git a/beat/web/attestations/tests.py b/beat/web/attestations/tests.py old mode 100644 new mode 100755 index d2ea57a1ca3b421ae1d37d1a2534c0c580ab06fe..baea5e956e203a6196272fe4fbe00d06db9c90b5 --- a/beat/web/attestations/tests.py +++ b/beat/web/attestations/tests.py @@ -220,6 +220,7 @@ class AttestationsAPIBase(BaseTestCase): # Create an environment and queue environment = Environment(name='env1', version='1.0') environment.save() + environment.share() queue = Queue(name='queue1', memory_limit=1024, time_limit=60, cores_per_slot=1, max_slots_per_user=10) queue.save() diff --git a/beat/web/backend/tests.py b/beat/web/backend/tests.py index e4cfea7fdb4665084f15531839fe800532202510..263eb10294628aca81a40ab302787c5c750afe8d 100755 --- a/beat/web/backend/tests.py +++ b/beat/web/backend/tests.py @@ -779,7 +779,7 @@ class Scheduling(BaseBackendTestCase): self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) self.check_stats_success(split) @@ -820,7 +820,7 @@ class Scheduling(BaseBackendTestCase): split.end(Result(status=0)) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) self.assertEqual(split.job.status, Job.COMPLETED) self.assertEqual(split.job.block.status, Block.CACHED) @@ -930,7 +930,7 @@ class Scheduling(BaseBackendTestCase): self.assertEqual(split.job.block.experiment.status, Experiment.FAILED) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) # assert we have no database traces after the last block is done self.assertEqual(Job.objects.count(), 0) @@ -987,7 +987,7 @@ class Scheduling(BaseBackendTestCase): self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) self.check_stats_success(split) @@ -1028,7 +1028,7 @@ class Scheduling(BaseBackendTestCase): split.end(Result(status=1)) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) self.assertEqual(split.job.status, Job.FAILED) self.assertEqual(split.job.block.status, Block.FAILED) @@ -1124,7 +1124,7 @@ class Scheduling(BaseBackendTestCase): self.assertEqual(split.job.block.experiment.status, Experiment.RUNNING) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) self.check_stats_success(split) @@ -1266,7 +1266,7 @@ class Scheduling(BaseBackendTestCase): self.assertEqual(split.job.block.experiment.status, Experiment.FAILED) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) # assert we have no database traces after the last block is done self.assertEqual(Job.objects.count(), 0) @@ -1344,7 +1344,7 @@ class Scheduling(BaseBackendTestCase): Experiment.RUNNING) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) self.check_stats_success(split) @@ -1392,7 +1392,7 @@ class Scheduling(BaseBackendTestCase): split.end(Result(status=0)) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) self.assertEqual(split.job.status, Job.COMPLETED) self.assertEqual(split.job.block.status, Block.CACHED) @@ -1479,7 +1479,7 @@ class Scheduling(BaseBackendTestCase): Experiment.FAILED) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) # assert we have no database traces after the last block is done self.assertEqual(Job.objects.count(), 0) @@ -1554,7 +1554,7 @@ class Scheduling(BaseBackendTestCase): Experiment.RUNNING) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) self.check_stats_success(split) @@ -1761,7 +1761,7 @@ class Scheduling(BaseBackendTestCase): Experiment.RUNNING) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) self.check_stats_success(split) @@ -2161,7 +2161,7 @@ class Working(BaseBackendTestCase): assert all([k.index_checksums() for k in block.outputs.all()]) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) self.check_stats_success(block) @@ -2190,7 +2190,7 @@ class Working(BaseBackendTestCase): split.process() # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) xp.refresh_from_db() block = xp.blocks.last() @@ -2251,7 +2251,7 @@ class Working(BaseBackendTestCase): assert all([not k.exists() for k in block.outputs.all()]) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) assert abs(block.speed_up_real() - 1.0) < 0.1 assert abs(block.speed_up_maximal() - 1.0) < 0.1 @@ -2310,7 +2310,7 @@ class Working(BaseBackendTestCase): assert all([k.index_checksums() for k in block.outputs.all()]) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) self.check_stats_success(block) @@ -2339,7 +2339,7 @@ class Working(BaseBackendTestCase): split.process() # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) xp.refresh_from_db() block = xp.blocks.last() @@ -2409,7 +2409,7 @@ class Working(BaseBackendTestCase): assert all([k.index_checksums() for k in block.outputs.all()]) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) self.check_stats_success(block) @@ -2449,7 +2449,7 @@ class Working(BaseBackendTestCase): split.process() # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) xpc.refresh_from_db() block = xpc.blocks.last() @@ -2520,7 +2520,7 @@ class Working(BaseBackendTestCase): assert all([k.index_checksums() for k in block.outputs.all()]) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) self.check_stats_success(block) @@ -2622,7 +2622,8 @@ class WorkingExternally(TransactionTestCase): xp.refresh_from_db() block = xp.blocks.first() return block.status == Block.CACHED - _sleep(20, condition) + + _sleep(120, condition) # at this point, split should have been successful which shall # trigger job deletion and block update @@ -2636,7 +2637,7 @@ class WorkingExternally(TransactionTestCase): assert all([k.index_checksums() for k in split.job.block.outputs.all()]) # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) # assert we have no database traces after the block is done self.assertEqual(Job.objects.filter(block=split.job.block).count(), 0) @@ -2665,10 +2666,11 @@ class WorkingExternally(TransactionTestCase): def condition(): xp.refresh_from_db() return xp.status == Experiment.DONE - _sleep(20, condition) #wait job completion + + _sleep(120, condition) #wait job completion # checks the number of statistics objects has increased by 1 - self.assertEqual(HourlyStatistics.objects.count(), current_stats + 1) + self.assertTrue(HourlyStatistics.objects.count() > current_stats) # at this point, split should have been successful which shall # trigger job deletion and block update diff --git a/beat/web/experiments/api.py b/beat/web/experiments/api.py old mode 100644 new mode 100755 diff --git a/beat/web/experiments/models.py b/beat/web/experiments/models.py old mode 100644 new mode 100755 index 2b7e9c398ffa3113a2fbc2ea1c5653aa47e3c1d2..392382b9f6c2efb0acbe52e1c9dc26223fbe013d --- a/beat/web/experiments/models.py +++ b/beat/web/experiments/models.py @@ -74,37 +74,52 @@ logger = logging.getLogger(__name__) #---------------------------------------------------------- -def validate_environments(experiment): +def validate_environments(experiment, user=None): """Validates the environments throughout the experiment""" - def _valid(queue, environment): + def _valid(environment): + q = Environment.objects.for_user(user, True) if user is not None else Environment.objects + + return bool(q.filter( + name=environment['name'], + version=environment['version'], + )) + + def _valid_combination(queue, environment): return bool(Queue.objects.filter(name=queue, environments__name=environment['name'], - environments__version=environment['version'])) + environments__version=environment['version'] + )) errors = [] default_q = experiment.data['globals']['queue'] default_env = experiment.data['globals']['environment'] - if not _valid(default_q, default_env): + if not _valid(default_env): + errors.append("The environment '%s (%s)' in the global experiment declaration does not exist" % (default_env['name'], default_env['version'])) + elif not _valid_combination(default_q, default_env): errors.append("The combination of queue '%s' with environment '%s (%s)' in the global experiment declaration does not exist" % (default_q, default_env['name'], default_env['version'])) for name, config in experiment.blocks.items(): q = config.get('queue', default_q) env = config.get('environment', default_env) - if not _valid(q, env): + if not _valid(env): + errors.append("The environment '%s (%s)' for block '%s' does not exist" % (env['name'], env['version'], name)) + elif not _valid_combination(q, env): errors.append("The combination of queue '%s' with environment '%s (%s)' for block '%s' does not exist" % (q, env['name'], env['version'], name)) for name, config in experiment.analyzers.items(): q = config.get('queue', default_q) env = config.get('environment', default_env) - if not _valid(q, env): + if not _valid(env): + errors.append("The environment '%s (%s)' for analyzer '%s' does not exist" % (env['name'], env['version'], name)) + elif not _valid_combination(q, env): errors.append("The combination of queue '%s' with environment '%s (%s)' for analyzer '%s' does not exist" % (q, env['name'], env['version'], name)) return errors -def validate_experiment(experiment_info, toolchain_info): +def validate_experiment(experiment_info, toolchain_info, user=None): """Makes sure the experiment can be run""" xp = beat.core.experiment.Experiment(settings.PREFIX, @@ -112,7 +127,7 @@ def validate_experiment(experiment_info, toolchain_info): if not xp.valid: return xp, xp.errors - return xp, xp.errors + validate_environments(xp) + return xp, xp.errors + validate_environments(xp, user) #---------------------------------------------------------- @@ -180,6 +195,8 @@ class ExperimentManager(ContributionManager): # Save the experiment (will run the validation) try: experiment.save() + except SyntaxError, e: + return (None, None, e.message) except Exception: import traceback return (None, None, traceback.format_exc()) @@ -387,7 +404,7 @@ class Experiment(Shareable): if content_modified: # validates the experiment - xp, errors = validate_experiment(declaration, self.toolchain.declaration) + xp, errors = validate_experiment(declaration, self.toolchain.declaration, self.author) if errors: message = "The experiment isn't valid, due to the " \ "following errors:\n * %s" @@ -607,7 +624,7 @@ class Experiment(Shareable): return (self.reports.count() == 0) and not self.has_attestation() and super(Experiment, self).deletable() def core(self): - return validate_experiment(self.declaration, self.toolchain.declaration)[0] + return validate_experiment(self.declaration, self.toolchain.declaration, self.author)[0] def job_splits(self, status=None): from ..backend.models import JobSplit diff --git a/beat/web/experiments/tests.py b/beat/web/experiments/tests.py old mode 100644 new mode 100755 index 23107adf2e7a445df9bdc3b162445cf90324822a..10408a6ab6577c5d39e761e32291f1e197429103 --- a/beat/web/experiments/tests.py +++ b/beat/web/experiments/tests.py @@ -28,6 +28,7 @@ import os import simplejson as json import shutil +import copy from datetime import datetime from django.conf import settings @@ -193,11 +194,16 @@ class ExperimentTestBase(BaseTestCase): # Create an environment and queue environment = Environment(name='env1', version='1.0') environment.save() + environment.share() + + environment2 = Environment(name='private_env', version='1.0') + environment2.save() queue = Queue(name='queue1', memory_limit=1024, time_limit=60, cores_per_slot=1, max_slots_per_user=10) queue.save() queue.environments.add(environment) + queue.environments.add(environment2) DataFormat.objects.create_dataformat( @@ -509,6 +515,171 @@ class ExperimentCreationAPI(ExperimentTestBase): self.checkResponse(response, 400, content_type='application/json') + def test_bad_request_with_unknown_global_environment_name(self): + self.client.login(username='johndoe', password='1234') + + declaration = copy.deepcopy(ExperimentTestBase.DECLARATION1) + + declaration['globals']['environment']['name'] = 'unknown' + + response = self.client.post(self.url, + json.dumps({ + 'toolchain': 'johndoe/toolchain1/1', + 'declaration': declaration, + }), content_type='application/json') + + self.checkResponse(response, 400, content_type='application/json') + + + def test_bad_request_with_unknown_global_environment_version(self): + self.client.login(username='johndoe', password='1234') + + declaration = copy.deepcopy(ExperimentTestBase.DECLARATION1) + + declaration['globals']['environment']['version'] = 'unknown' + + response = self.client.post(self.url, + json.dumps({ + 'toolchain': 'johndoe/toolchain1/1', + 'declaration': declaration, + }), content_type='application/json') + + self.checkResponse(response, 400, content_type='application/json') + + + def test_bad_request_with_unknown_algorithm_environment_name(self): + self.client.login(username='johndoe', password='1234') + + declaration = copy.deepcopy(ExperimentTestBase.DECLARATION1) + + declaration['blocks']['addition1']['environment'] = dict( + name='unknown', + version='1', + ) + + response = self.client.post(self.url, + json.dumps({ + 'toolchain': 'johndoe/toolchain1/1', + 'declaration': declaration, + }), content_type='application/json') + + self.checkResponse(response, 400, content_type='application/json') + + + def test_bad_request_with_unknown_algorithm_environment_version(self): + self.client.login(username='johndoe', password='1234') + + declaration = copy.deepcopy(ExperimentTestBase.DECLARATION1) + + declaration['blocks']['addition1']['environment'] = dict( + name='env1', + version='unknown', + ) + + response = self.client.post(self.url, + json.dumps({ + 'toolchain': 'johndoe/toolchain1/1', + 'declaration': declaration, + }), content_type='application/json') + + self.checkResponse(response, 400, content_type='application/json') + + + def test_bad_request_with_unknown_analyzer_environment_name(self): + self.client.login(username='johndoe', password='1234') + + declaration = copy.deepcopy(ExperimentTestBase.DECLARATION1) + + declaration['analyzers']['analysis']['environment'] = dict( + name='unknown', + version='1', + ) + + response = self.client.post(self.url, + json.dumps({ + 'toolchain': 'johndoe/toolchain1/1', + 'declaration': declaration, + }), content_type='application/json') + + self.checkResponse(response, 400, content_type='application/json') + + + def test_bad_request_with_unknown_analyzer_environment_version(self): + self.client.login(username='johndoe', password='1234') + + declaration = copy.deepcopy(ExperimentTestBase.DECLARATION1) + + declaration['analyzers']['analysis']['environment'] = dict( + name='env1', + version='unknown', + ) + + response = self.client.post(self.url, + json.dumps({ + 'toolchain': 'johndoe/toolchain1/1', + 'declaration': declaration, + }), content_type='application/json') + + self.checkResponse(response, 400, content_type='application/json') + + + def test_bad_request_with_unusable_global_environment(self): + self.client.login(username='johndoe', password='1234') + + declaration = copy.deepcopy(ExperimentTestBase.DECLARATION1) + + declaration['globals']['environment'] = dict( + name='private_env', + version='1.0' + ) + + response = self.client.post(self.url, + json.dumps({ + 'toolchain': 'johndoe/toolchain1/1', + 'declaration': declaration, + }), content_type='application/json') + + self.checkResponse(response, 400, content_type='application/json') + + + def test_bad_request_with_unusable_algorithm_environment(self): + self.client.login(username='johndoe', password='1234') + + declaration = copy.deepcopy(ExperimentTestBase.DECLARATION1) + + declaration['blocks']['addition1']['environment'] = dict( + name='private_env', + version='1.0', + ) + + response = self.client.post(self.url, + json.dumps({ + 'toolchain': 'johndoe/toolchain1/1', + 'declaration': declaration, + }), content_type='application/json') + + self.checkResponse(response, 400, content_type='application/json') + + + def test_bad_request_with_unusable_analyzer_environment(self): + self.client.login(username='johndoe', password='1234') + + declaration = copy.deepcopy(ExperimentTestBase.DECLARATION1) + + declaration['analyzers']['analysis']['environment'] = dict( + name='private_env', + version='1.0', + ) + + response = self.client.post(self.url, + json.dumps({ + 'toolchain': 'johndoe/toolchain1/1', + 'declaration': declaration, + }), content_type='application/json') + + self.checkResponse(response, 400, content_type='application/json') + + def test_valid_experiment(self): self.client.login(username='johndoe', password='1234') diff --git a/beat/web/reports/tests.py b/beat/web/reports/tests.py old mode 100644 new mode 100755 index b21349ff664005e9a2af6a5a30f2a360b6b99845..693a884b1097e1a96aca4e9456975451e7ea0900 --- a/beat/web/reports/tests.py +++ b/beat/web/reports/tests.py @@ -498,6 +498,7 @@ class ReportTestCase(APITestCase): # Create an environment and queue environment = Environment(name='env1', version='1.0') environment.save() + environment.share() queue = Queue(name='queue1', memory_limit=1024, time_limit=60, cores_per_slot=1, max_slots_per_user=10) queue.save() diff --git a/buildout.cfg b/buildout.cfg index f3395542f543a95f51759ba04712ad2fa11869a1..66779e63e8f3295a8f75a1dac31154c0be540f8b 100644 --- a/buildout.cfg +++ b/buildout.cfg @@ -1,5 +1,5 @@ [buildout] -parts = sysegg scripts django node bower help +parts = sysegg scripts docker_images cxx_algorithms django node bower help extensions = mr.developer index = https://pypi.org/simple auto-checkout = * @@ -94,6 +94,20 @@ beat.examples = git git@gitlab.idiap.ch:beat/beat.examples egg=false [scripts] recipe = bob.buildout:scripts +[docker_images] +recipe = collective.recipe.cmd +cmds = ./src/beat.core/buildout_pull_images.sh +uninstall_cmds = +on_install = true +on_update = true + +[cxx_algorithms] +recipe = collective.recipe.cmd +cmds = ./src/beat.core//buildout_compile_cxx_algorithm.sh build +uninstall_cmds = ./src/beat.core/buildout_compile_cxx_algorithm cleanup +on_install = true +on_update = true + [django] recipe = djangorecipe project = beat diff --git a/setup.py b/setup.py index 432261f68c3788a4671a4da71771fde62074cd3b..a363c6edd162b9e923f0f63fd0d718daa499197f 100755 --- a/setup.py +++ b/setup.py @@ -60,6 +60,7 @@ setup( "docopt", "docutils", "Jinja2", + "nose", "psycopg2", "pytz", "psutil",