Commit e8bfebf9 authored by Samuel GAIST's avatar Samuel GAIST

[widgets][experimenteditor] Implement creation of a new experiment

This implementation auto-fiels the experiment content whith
whatever it can find matches input/outputs in number.

It currently doesn't check for compatibility.

Part of #185
parent 425f0298
......@@ -24,6 +24,7 @@
###############################################################################
import re
import os
import copy
import simplejson as json
import numpy as np
......@@ -70,6 +71,7 @@ from .scrollwidget import EditorListWidget
from .spinboxes import NumpySpinBox
from .editor import AbstractAssetEditor
from .dialogs import AssetCreationDialog
PARAMETER_TYPE_KEY = "parameter_type"
......@@ -125,6 +127,7 @@ class FieldPresenceFilterProxyModel(QSortFilterProxyModel):
self.must_have = must_have
self.value = value
self.prog = re.compile(value) if value is not None else None
self.invalidate()
def filterAcceptsRow(self, source_row, source_parent):
"""Filter assets based on whether the field configured must or must not
......@@ -1108,6 +1111,186 @@ class ExperimentEditor(AbstractAssetEditor):
algorithms.add(editor.evaluator_properties_editor.algorithm)
self.globalparameters_widget.setup(algorithms)
def _createNewAsset(self, creation_type, asset_info):
"""Re-implement"""
if creation_type == AssetCreationDialog.NEW:
toolchain_name, experiment_name = asset_info
toolchain = Asset(self.prefix_path, AssetType.TOOLCHAIN, toolchain_name)
toolchain_declaration = toolchain.declaration
experiment_declaration = {
"schema_version": 1,
"datasets": {},
"blocks": {},
"analyzers": {},
"globals": {
"environment": {"name": "dummy", "version": "0.0.0"},
"queue": "queue",
},
}
# databases
database_model = AssetModel()
database_model.asset_type = AssetType.DATABASE
database_model.prefix_path = self.prefix_path
for dataset in toolchain_declaration["datasets"]:
dataset_data = {"database": None, "protocol": None, "set": None}
dataset_outputs = dataset["outputs"]
for i in range(database_model.rowCount()):
done = False
database_name = database_model.index(i, 0).data()
database = AssetType.DATABASE.klass(self.prefix_path, database_name)
if not database.valid:
continue
for db_protocol in database.protocols:
sets = database.sets(db_protocol)
for set_name, set_data in sets.items():
set_outputs = list(set_data["outputs"])
if all(output in dataset_outputs for output in set_outputs):
dataset_data["database"] = database_name
dataset_data["protocol"] = db_protocol
dataset_data["set"] = set_name
done = True
break
if done:
break
if done:
break
name = dataset["name"]
experiment_declaration["datasets"][name] = dataset_data
# blocks
def build_block_data(block, algorithm_filter):
block_data = {"algorithm": None, "inputs": {}}
input_count = len(block["inputs"])
output_count = len(block.get("outputs", []))
if output_count:
block_data["outputs"] = {}
for i in range(algorithm_filter.rowCount()):
asset_name = algorithm_filter.index(i, 0).data()
asset = Asset(self.prefix_path, AssetType.ALGORITHM, asset_name)
asset_declaration = asset.declaration
inputs = {}
outputs = {}
for group in asset_declaration["groups"]:
inputs.update(group["inputs"])
outputs.update(group.get("outputs", {}))
if len(inputs) == input_count and len(outputs) == output_count:
keys = list(inputs)
for index, input_ in enumerate(block["inputs"]):
algorithm_input = keys[index]
block_data["inputs"][algorithm_input] = input_
if output_count:
keys = list(outputs)
for index, output in enumerate(block["outputs"]):
algorithm_output = keys[index]
block_data["outputs"][algorithm_output] = output
block_data["algorithm"] = asset_name
break
return block_data
algorithm_filter = FieldPresenceFilterProxyModel(
"type", True, "[sequential|autonomous]"
)
algorithm_filter.setSourceModel(self.algorithm_model)
for block in toolchain_declaration["blocks"]:
name = block["name"]
experiment_declaration["blocks"][name] = build_block_data(
block, algorithm_filter
)
# analyzers
algorithm_filter.setFieldFilter("results", True)
for analyzer in toolchain_declaration["analyzers"]:
name = analyzer["name"]
experiment_declaration["analyzers"][name] = build_block_data(
analyzer, algorithm_filter
)
# loops
if "loops" in toolchain_declaration:
experiment_declaration["schema_version"] = 2
experiment_declaration["loops"] = {}
processor_filter = FieldPresenceFilterProxyModel(
"type", True, ".*loop_processor"
)
processor_filter.setSourceModel(self.algorithm_model)
evaluator_filter = FieldPresenceFilterProxyModel(
"type", True, ".*loop_evaluator"
)
evaluator_filter.setSourceModel(self.algorithm_model)
for loop in toolchain_declaration["loops"]:
loop_data = {}
processor_block = {
key[len(PROCESSOR_PREFIX) :]: value
for key, value in loop.items()
if key
in [
f"{PROCESSOR_PREFIX}inputs",
f"{PROCESSOR_PREFIX}outputs",
f"{PROCESSOR_PREFIX}algorithm",
]
}
loop_block = {
key[len(EVALUATOR_PREFIX) :]: value
for key, value in loop.items()
if key
in [
f"{EVALUATOR_PREFIX}inputs",
f"{EVALUATOR_PREFIX}outputs",
f"{EVALUATOR_PREFIX}algorithm",
]
}
processor_data = build_block_data(processor_block, processor_filter)
evaluator_data = build_block_data(loop_block, evaluator_filter)
loop_data.update(
{
f"{PROCESSOR_PREFIX}{key}": value
for key, value in processor_data.items()
}
)
loop_data.update(
{
f"{EVALUATOR_PREFIX}{key}": value
for key, value in evaluator_data.items()
}
)
name = loop["name"]
experiment_declaration["loops"][name] = loop_data
user = self.context.meta["config"].user
asset = Asset(
self.prefix_path,
AssetType.EXPERIMENT,
os.path.join(user, toolchain_name, experiment_name),
)
return asset, experiment_declaration
else:
return super(ExperimentEditor, self)._createNewAsset(
creation_type, asset_info
)
def _load_json(self, json_object):
"""Load the json object passed as parameter"""
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment