Commit 75fc7d7b authored by Samuel GAIST's avatar Samuel GAIST

[experiment] Rename loop_XXX fields to evaluator_XXX

This makes them coherent with the algorithm types.
parent d96dcefe
Pipeline #33033 passed with stage
in 48 minutes and 29 seconds
......@@ -54,6 +54,8 @@ from . import database
from . import toolchain
from . import hash
EVALUATOR_PREFIX = "evaluator_"
class Storage(utils.Storage):
"""Resolves paths for experiments
......@@ -438,7 +440,7 @@ class Experiment(object):
loops = self.data.get("loops", {})
for loopname, loop in loops.items():
for key in ["", "loop_"]:
for key in ["", EVALUATOR_PREFIX]:
algoname = loop[key + "algorithm"]
if algoname not in self.algorithms:
......@@ -728,7 +730,7 @@ class Experiment(object):
# the number of block endpoints and the toolchain's must match
for block_name, block in self.loops.items():
for prefix in ["", "loop_"]:
for prefix in ["", EVALUATOR_PREFIX]:
block_input_count = len(block[prefix + "inputs"])
toolchain_input_block = len(
toolchain_loops[block_name][prefix + "inputs"]
......@@ -801,7 +803,7 @@ class Experiment(object):
elif from_endpt[0] in self.loops:
loop = self.loops[from_endpt[0]]
for prefix in ["", "loop_"]:
for prefix in ["", EVALUATOR_PREFIX]:
mapping = loop[prefix + "outputs"]
imapping = dict(zip(mapping.values(), mapping.keys()))
if from_endpt[1] in imapping:
......@@ -828,7 +830,7 @@ class Experiment(object):
elif to_endpt[0] in self.loops:
loop = self.loops[to_endpt[0]]
for prefix in ["", "loop_"]:
for prefix in ["", EVALUATOR_PREFIX]:
mapping = loop[prefix + "inputs"]
imapping = dict(zip(mapping.values(), mapping.keys()))
if to_endpt[1] in imapping:
......@@ -968,18 +970,20 @@ class Experiment(object):
# now check the algorithms for conformance
algorithm_name = loop["algorithm"]
loop_algorithm_name = loop["loop_algorithm"]
evaluator_algorithm_name = loop[EVALUATOR_PREFIX + "algorithm"]
algo_groups_list = self.algorithms[algorithm_name].groups
loop_algo_groups_list = self.algorithms[loop_algorithm_name].groups
evaluator_algo_groups_list = self.algorithms[
evaluator_algorithm_name
].groups
groups_count = [
(
len(algo_groups["inputs"]) + len(loop_algo_groups["inputs"]),
len(algo_groups["outputs"]) + len(loop_algo_groups["outputs"]),
len(algo_groups["inputs"]) + len(evaluator_algo_groups["inputs"]),
len(algo_groups["outputs"]) + len(evaluator_algo_groups["outputs"]),
)
for algo_groups, loop_algo_groups in zip(
algo_groups_list, loop_algo_groups_list
for algo_groups, evaluator_algo_groups in zip(
algo_groups_list, evaluator_algo_groups_list
)
]
......@@ -990,21 +994,24 @@ class Experiment(object):
"synchronization mismatch in input/output "
"grouping between loop `{}', algorithm `{}' "
"and loop algorithm `{}'".format(
name, algorithm_name, loop_algorithm_name
name, algorithm_name, evaluator_algorithm_name
)
)
for algo_groups, loop_algo_groups in zip(
algo_groups_list, loop_algo_groups_list
for algo_groups, evaluator_algo_groups in zip(
algo_groups_list, evaluator_algo_groups_list
):
algo_loop = algo_groups["loop"]
loop_algo_loop = loop_algo_groups["loop"]
evaluator_algo_loop = evaluator_algo_groups["loop"]
for channel in ["request", "answer"]:
if algo_loop[channel]["type"] != loop_algo_loop[channel]["type"]:
if (
algo_loop[channel]["type"]
!= evaluator_algo_loop[channel]["type"]
):
self.errors.append(
"{} loop channel type incompatible between {} and {}".format(
channel, algorithm_name, loop_algorithm_name
channel, algorithm_name, evaluator_algorithm_name
)
)
......@@ -1101,7 +1108,7 @@ class Experiment(object):
raise KeyError("did not find `%s' among blocks, loops or analyzers" % name)
if get_loop_data:
inputs = config_data["loop_inputs"]
inputs = config_data[EVALUATOR_PREFIX + "inputs"]
else:
inputs = config_data["inputs"]
......@@ -1177,7 +1184,7 @@ class Experiment(object):
# notice: there can be multiply connected outputs
if get_loop_data:
outputs = config_data["loop_outputs"]
outputs = config_data[EVALUATOR_PREFIX + "outputs"]
else:
outputs = config_data["outputs"]
......@@ -1246,14 +1253,14 @@ class Experiment(object):
if name in self.loops:
loop_environment = config_data.get(
"loop_environment", self.data["globals"]["environment"]
EVALUATOR_PREFIX + "environment", self.data["globals"]["environment"]
)
loop_data = dict(
inputs=self._inputs(name, get_loop_data=True),
outputs=self._block_outputs(name, get_loop_data=True),
channel=retval["channel"],
algorithm=config_data["loop_algorithm"],
parameters=config_data["loop_parameters"],
algorithm=config_data[EVALUATOR_PREFIX + "algorithm"],
parameters=config_data[EVALUATOR_PREFIX + "parameters"],
queue=queue,
environment=loop_environment,
)
......
......@@ -59,19 +59,19 @@
"queue": { "$ref": "common.json#/definitions/queue" },
"environment": { "$ref": "common.json#/definitions/environment" },
"nb_slots": { "$ref": "common.json#/definitions/slots" },
"loop_algorithm": { "$ref": "../common/1.json#/definitions/reference" },
"loop_parameters": { "$ref": "common.json#/definitions/parameter_set" },
"loop_inputs": { "$ref": "common.json#/definitions/connection_map" },
"loop_outputs": { "$ref": "common.json#/definitions/connection_map" },
"loop_environment": { "$ref": "common.json#/definitions/environment" }
"evaluator_algorithm": { "$ref": "../common/1.json#/definitions/reference" },
"evaluator_parameters": { "$ref": "common.json#/definitions/parameter_set" },
"evaluator_inputs": { "$ref": "common.json#/definitions/connection_map" },
"evaluator_outputs": { "$ref": "common.json#/definitions/connection_map" },
"evaluator_environment": { "$ref": "common.json#/definitions/environment" }
},
"required": [
"algorithm",
"inputs",
"outputs",
"loop_algorithm",
"loop_inputs",
"loop_outputs"
"evaluator_algorithm",
"evaluator_inputs",
"evaluator_outputs"
],
"additionalProperties": false
}
......
......@@ -14,14 +14,14 @@
"out": "out"
},
"loop_algorithm": "user/db_input_loop_evaluator/1",
"loop_parameters": {
"evaluator_algorithm": "user/db_input_loop_evaluator/1",
"evaluator_parameters": {
"threshold": 1
},
"loop_inputs": {
"evaluator_inputs": {
"in_loop": "in_loop"
},
"loop_outputs": {
"evaluator_outputs": {
"out_loop": "out_loop"
}
}
......
......@@ -33,14 +33,14 @@
"out": "out"
},
"loop_algorithm": "user/db_input_loop_evaluator/1",
"loop_parameters": {
"evaluator_algorithm": "user/db_input_loop_evaluator/1",
"evaluator_parameters": {
"threshold": 1
},
"loop_inputs": {
"evaluator_inputs": {
"in_loop": "in_loop"
},
"loop_outputs": {
"evaluator_outputs": {
"out_loop": "out_loop"
}
},
......@@ -53,14 +53,14 @@
"out": "out"
},
"loop_algorithm": "user/block_input_loop_evaluator/1",
"loop_parameters": {
"evaluator_algorithm": "user/block_input_loop_evaluator/1",
"evaluator_parameters": {
"threshold": 1
},
"loop_inputs": {
"evaluator_inputs": {
"in_loop": "in_loop"
},
"loop_outputs": {
"evaluator_outputs": {
"out_loop": "out_loop"
}
}
......
......@@ -149,6 +149,29 @@ def test_no_description():
nose.tools.eq_(experiment.description, description)
def test_load_experiment_with_loop():
experiment = Experiment(prefix, "user/two_loops/1/two_loops")
nose.tools.assert_true(
experiment.valid, "\n * %s" % "\n * ".join(experiment.errors)
)
nose.tools.eq_(experiment.label, "user/user/two_loops/1/two_loops")
nose.tools.assert_true(
experiment.toolchain.valid,
"\n * %s" % "\n * ".join(experiment.toolchain.errors),
)
nose.tools.eq_(experiment.toolchain.name, "user/two_loops/1")
nose.tools.eq_(len(experiment.datasets), 2)
nose.tools.eq_(len(experiment.databases), 2)
nose.tools.eq_(len(experiment.blocks), 2)
nose.tools.eq_(len(experiment.loops), 2)
nose.tools.eq_(len(experiment.analyzers), 2)
nose.tools.eq_(len(experiment.algorithms), 6)
@nose.tools.with_setup(teardown=cleanup)
def test_export():
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment