experiment.py 61.3 KB
Newer Older
André Anjos's avatar
André Anjos committed
1
2
3
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :

Samuel GAIST's avatar
Samuel GAIST committed
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
###################################################################################
#                                                                                 #
# Copyright (c) 2019 Idiap Research Institute, http://www.idiap.ch/               #
# Contact: beat.support@idiap.ch                                                  #
#                                                                                 #
# Redistribution and use in source and binary forms, with or without              #
# modification, are permitted provided that the following conditions are met:     #
#                                                                                 #
# 1. Redistributions of source code must retain the above copyright notice, this  #
# list of conditions and the following disclaimer.                                #
#                                                                                 #
# 2. Redistributions in binary form must reproduce the above copyright notice,    #
# this list of conditions and the following disclaimer in the documentation       #
# and/or other materials provided with the distribution.                          #
#                                                                                 #
# 3. Neither the name of the copyright holder nor the names of its contributors   #
# may be used to endorse or promote products derived from this software without   #
# specific prior written permission.                                              #
#                                                                                 #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND #
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED   #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE          #
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE    #
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL      #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR      #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER      #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,   #
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE   #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.            #
#                                                                                 #
###################################################################################
André Anjos's avatar
André Anjos committed
35
36


37
38
39
40
41
42
43
"""
==========
experiment
==========

Validation for experiments
"""
André Anjos's avatar
André Anjos committed
44
import collections
45
import itertools
Samuel GAIST's avatar
Samuel GAIST committed
46
47
import os

48
import simplejson as json
André Anjos's avatar
André Anjos committed
49
50
51
52

from . import algorithm
from . import database
from . import hash
Samuel GAIST's avatar
Samuel GAIST committed
53
54
55
from . import schema
from . import toolchain
from . import utils
André Anjos's avatar
André Anjos committed
56

57
EVALUATOR_PREFIX = "evaluator_"
58
PROCESSOR_PREFIX = "processor_"
59

André Anjos's avatar
André Anjos committed
60
61

class Storage(utils.Storage):
Philip ABBET's avatar
Philip ABBET committed
62
    """Resolves paths for experiments
André Anjos's avatar
André Anjos committed
63

Philip ABBET's avatar
Philip ABBET committed
64
    Parameters:
André Anjos's avatar
André Anjos committed
65

Philip ABBET's avatar
Philip ABBET committed
66
      prefix (str): Establishes the prefix of your installation.
André Anjos's avatar
André Anjos committed
67

Philip ABBET's avatar
Philip ABBET committed
68
69
70
71
72
      name (str): The name of the experiment object in the format
        ``<user>/<toolchain-user>/<toolchain-name>/<version>/<name>`` or
        ``<user>/<toolchain-name>/<version>/<name>``, in case ``<user>`` and
        ``<toolchain-user>`` are the same.
    """
André Anjos's avatar
André Anjos committed
73

74
75
76
    asset_type = "experiment"
    asset_folder = "experiments"

Philip ABBET's avatar
Philip ABBET committed
77
    def __init__(self, prefix, name):
André Anjos's avatar
André Anjos committed
78

Philip ABBET's avatar
Philip ABBET committed
79
80
        if name.count(os.sep) not in (3, 4):
            raise RuntimeError("invalid experiment label: `%s'" % name)
André Anjos's avatar
André Anjos committed
81

Philip ABBET's avatar
Philip ABBET committed
82
        s = name.split(os.sep)
83
84
        if len(s) == 4:
            name = os.path.join(s[0], name)
André Anjos's avatar
André Anjos committed
85

Samuel GAIST's avatar
Samuel GAIST committed
86
87
88
89
90
91
92
        (
            self.username,
            self.toolchain_username,
            self.toolchain,
            self.version,
            self.name,
        ) = name.split(os.sep)
André Anjos's avatar
André Anjos committed
93

94
        self.label = name
Samuel GAIST's avatar
Samuel GAIST committed
95
96
97
        self.toolchain = os.path.join(
            self.toolchain_username, self.toolchain, self.version
        )
98
        self.prefix = prefix
99

100
101
102
        path = utils.hashed_or_simple(
            self.prefix, self.asset_folder, name, suffix=".json"
        )
103
104
        path = path[:-5]

Philip ABBET's avatar
Philip ABBET committed
105
        super(Storage, self).__init__(path)
André Anjos's avatar
André Anjos committed
106
107
108


class Experiment(object):
Philip ABBET's avatar
Philip ABBET committed
109
    """Experiments define the complete workflow for user test on the platform.
André Anjos's avatar
André Anjos committed
110

Philip ABBET's avatar
Philip ABBET committed
111
    Parameters:
André Anjos's avatar
André Anjos committed
112

Philip ABBET's avatar
Philip ABBET committed
113
      prefix (str): Establishes the prefix of your installation.
André Anjos's avatar
André Anjos committed
114

André Anjos's avatar
André Anjos committed
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
      data (:py:class:`object`, Optional): The piece of data representing the
        experiment. It must validate against the schema defined for toolchains.
        If a string is passed, it is supposed to be a valid path to an
        experiment in the designated prefix area. If ``None`` is passed, loads
        our default prototype for toolchains. If a tuple is passed (or a list),
        then we consider that the first element represents the experiment,
        while the second, the toolchain definition. The toolchain bit can be
        defined as a dictionary or as a string (pointing to a valid path in the
        designated prefix area).

      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
        dataformat names to loaded dataformats. This parameter is optional and,
        if passed, may greatly speed-up experiment loading times as dataformats
        that are already loaded may be re-used. If you use this parameter, you
        must guarantee that the cache is refreshed as appropriate in case the
        underlying dataformats change.

      database_cache (:py:class:`dict`, Optional): A dictionary mapping
        database names to loaded databases. This parameter is optional and, if
        passed, may greatly speed-up experiment loading times as databases that
        are already loaded may be re-used. If you use this parameter, you must
        guarantee that the cache is refreshed as appropriate in case the
        underlying databases change.

      algorithm_cache (:py:class:`dict`, Optional): A dictionary mapping
        algorithm names to loaded algorithms. This parameter is optional and,
        if passed, may greatly speed-up experiment loading times as algorithms
        that are already loaded may be re-used. If you use this parameter, you
        must guarantee that the cache is refreshed as appropriate in case the
        underlying algorithms change.

      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
        names to loaded libraries. This parameter is optional and, if passed,
        may greatly speed-up library loading times as libraries that are
        already loaded may be re-used. If you use this parameter, you must
        guarantee that the cache is refreshed as appropriate in case the
        underlying libraries change.
André Anjos's avatar
André Anjos committed
152
153


Philip ABBET's avatar
Philip ABBET committed
154
    Attributes:
André Anjos's avatar
André Anjos committed
155

Philip ABBET's avatar
Philip ABBET committed
156
157
      storage (object): A simple object that provides information about file
        paths for this toolchain
André Anjos's avatar
André Anjos committed
158

Philip ABBET's avatar
Philip ABBET committed
159
160
      toolchain (beat.core.toolchain.Toolchain): The toolchain in which this
        experiment is based.
André Anjos's avatar
André Anjos committed
161

Philip ABBET's avatar
Philip ABBET committed
162
163
164
      databases (dict): A dictionary containing the names and
        :py:class:`beat.core.database.Database` pointers for all referenced
        databases.
André Anjos's avatar
André Anjos committed
165

Philip ABBET's avatar
Philip ABBET committed
166
167
168
      algorithms (dict): A dictionary containing the names and
        :py:class:`beat.core.algorithm.Algorithm` pointers for all referenced
        algorithms.
André Anjos's avatar
André Anjos committed
169

Philip ABBET's avatar
Philip ABBET committed
170
      datasets (dict): A dictionary containing the names and
171
172
        :py:class:`beat.core.database.Database` pointers for all datasets in
        this experiment.
André Anjos's avatar
André Anjos committed
173

Philip ABBET's avatar
Philip ABBET committed
174
      blocks (dict): A dictionary containing the names and
175
176
        :py:class:`beat.core.algorithm.Algorithm` pointers for all blocks in
        this experiment.
André Anjos's avatar
André Anjos committed
177

Philip ABBET's avatar
Philip ABBET committed
178
179
180
      analyzers (dict): A dictionary containing the names and
        :py:class:`beat.core.algorithm.Algorithm` pointers for all analyzers in
        this experiment.
André Anjos's avatar
André Anjos committed
181

182
      errors (list): A list strings containing errors found while loading this
Philip ABBET's avatar
Philip ABBET committed
183
        experiment.
André Anjos's avatar
André Anjos committed
184

Philip ABBET's avatar
Philip ABBET committed
185
186
187
      data (dict): The original data for this experiment, as loaded by our JSON
        decoder.
    """
André Anjos's avatar
André Anjos committed
188

Samuel GAIST's avatar
Samuel GAIST committed
189
190
191
192
193
194
195
196
197
    def __init__(
        self,
        prefix,
        data,
        dataformat_cache=None,
        database_cache=None,
        algorithm_cache=None,
        library_cache=None,
    ):
André Anjos's avatar
André Anjos committed
198

Philip ABBET's avatar
Philip ABBET committed
199
        self.prefix = prefix
André Anjos's avatar
André Anjos committed
200

Philip ABBET's avatar
Philip ABBET committed
201
202
        # initializes the internal object cache
        self.toolchain = None
André Anjos's avatar
André Anjos committed
203

Philip ABBET's avatar
Philip ABBET committed
204
205
206
207
208
209
        self._label = None
        self.data = None
        self.errors = []
        self.storage = None
        self.datasets = {}
        self.blocks = {}
210
        self.loops = {}
Philip ABBET's avatar
Philip ABBET committed
211
212
213
214
215
216
217
218
219
220
221
        self.analyzers = {}

        self.databases = {}
        self.algorithms = {}

        # temporary caches, if the user has not set them, for performance
        database_cache = database_cache if database_cache is not None else {}
        dataformat_cache = dataformat_cache if dataformat_cache is not None else {}
        algorithm_cache = algorithm_cache if algorithm_cache is not None else {}
        library_cache = library_cache if library_cache is not None else {}

Samuel GAIST's avatar
Samuel GAIST committed
222
223
224
        self._load(
            data, database_cache, dataformat_cache, algorithm_cache, library_cache
        )
Philip ABBET's avatar
Philip ABBET committed
225

Samuel GAIST's avatar
Samuel GAIST committed
226
227
228
    def _load(
        self, data, database_cache, dataformat_cache, algorithm_cache, library_cache
    ):
Philip ABBET's avatar
Philip ABBET committed
229
230
231
232
233
234
        """Loads the experiment"""

        self._label = None
        self.data = None
        self.errors = []

235
236
237
238
        if data is None:  # Invalid case
            # There can't be a prototype for experiments they must be
            # filled based on the toolchain and the content of the prefix
            raise RuntimeError("Experiments can't have default implementation")
Philip ABBET's avatar
Philip ABBET committed
239

240
        elif isinstance(data, (tuple, list)):  # the user has passed a tuple
Philip ABBET's avatar
Philip ABBET committed
241
242
243

            experiment_data, toolchain_data = data

244
        else:  # the user has passed a path-like object
Philip ABBET's avatar
Philip ABBET committed
245
246
247
248
249
250

            self.storage = Storage(self.prefix, data)
            self._label = self.storage.label
            experiment_data = self.storage.json.path
            toolchain_data = self.storage.toolchain
            if not self.storage.exists():
Samuel GAIST's avatar
Samuel GAIST committed
251
                self.errors.append("Experiment declaration file not found: %s" % data)
Philip ABBET's avatar
Philip ABBET committed
252
253
254
                return

        # this runs basic validation, including JSON loading if required
Samuel GAIST's avatar
Samuel GAIST committed
255
256
257
        self.data, self.errors = schema.validate("experiment", experiment_data)
        if self.errors:
            return  # don't proceed with the rest of validation
Philip ABBET's avatar
Philip ABBET committed
258
259
260
261

        # checks all internal aspects of the experiment
        self._check_datasets(database_cache, dataformat_cache)
        self._check_blocks(algorithm_cache, dataformat_cache, library_cache)
262
        self._check_loops(algorithm_cache, dataformat_cache, library_cache)
Philip ABBET's avatar
Philip ABBET committed
263
264
265
        self._check_analyzers(algorithm_cache, dataformat_cache, library_cache)
        self._check_global_parameters()
        self._load_toolchain(toolchain_data)
Samuel GAIST's avatar
Samuel GAIST committed
266
267
        if self.errors:
            return  # stop, if up to here there were problems
Philip ABBET's avatar
Philip ABBET committed
268
269
270

        # cross-checks all aspects of the experiment against related toolchain
        self._crosscheck_toolchain_datasets()
Samuel GAIST's avatar
Samuel GAIST committed
271
272
        if self.errors:
            return
Philip ABBET's avatar
Philip ABBET committed
273
274

        self._crosscheck_toolchain_blocks()
Samuel GAIST's avatar
Samuel GAIST committed
275
276
        if self.errors:
            return
Philip ABBET's avatar
Philip ABBET committed
277

278
279
280
281
        self._crosscheck_toolchain_loops()
        if self.errors:
            return

Philip ABBET's avatar
Philip ABBET committed
282
        self._crosscheck_toolchain_analyzers()
Samuel GAIST's avatar
Samuel GAIST committed
283
284
        if self.errors:
            return
Philip ABBET's avatar
Philip ABBET committed
285
286

        self._crosscheck_connection_dataformats(dataformat_cache)
Samuel GAIST's avatar
Samuel GAIST committed
287
288
        if self.errors:
            return
Philip ABBET's avatar
Philip ABBET committed
289
290

        self._crosscheck_block_algorithm_pertinence()
291
292
293
294
        if self.errors:
            return

        self._crosscheck_loop_algorithm_pertinence()
Philip ABBET's avatar
Philip ABBET committed
295
296
297
298

    def _check_datasets(self, database_cache, dataformat_cache):
        """checks all datasets are valid"""

Samuel GAIST's avatar
Samuel GAIST committed
299
        for dataset, properties in self.data["datasets"].items():
Philip ABBET's avatar
Philip ABBET committed
300
301

            # loads the database
Samuel GAIST's avatar
Samuel GAIST committed
302
            dbname = properties["database"]
Philip ABBET's avatar
Philip ABBET committed
303
304
305
306
307
308
309
310
311
312
313
            if dbname not in self.databases:

                # load database
                if dbname in database_cache:
                    db = database_cache[dbname]
                else:
                    db = database.Database(self.prefix, dbname, dataformat_cache)
                    database_cache[dbname] = db

                self.databases[dbname] = db
                if db.errors:
Samuel GAIST's avatar
Samuel GAIST committed
314
315
316
                    self.errors.append(
                        "/datasets/%s: database `%s' is invalid" % (dataset, dbname)
                    )
Philip ABBET's avatar
Philip ABBET committed
317
318
                    continue
            else:
319
                db = self.databases[dbname]  # take a loaded value
Samuel GAIST's avatar
Samuel GAIST committed
320
321
                if db.errors:
                    continue  # already done
Philip ABBET's avatar
Philip ABBET committed
322
323

            # checks that the referred protocol is there
Samuel GAIST's avatar
Samuel GAIST committed
324
            protoname = properties["protocol"]
Philip ABBET's avatar
Philip ABBET committed
325
            if protoname not in db.protocols:
Samuel GAIST's avatar
Samuel GAIST committed
326
327
328
329
330
                self.errors.append(
                    "/datasets/%s: cannot find protocol `%s' on "
                    "database `%s' - valid protocols are %s"
                    % (dataset, protoname, dbname, ", ".join(db.protocols.keys()))
                )
Philip ABBET's avatar
Philip ABBET committed
331
332
333
                continue

            # finally, check if the referred set is inside the protocol
Samuel GAIST's avatar
Samuel GAIST committed
334
            setname = properties["set"]
Philip ABBET's avatar
Philip ABBET committed
335
            if setname not in db.sets(protoname):
Samuel GAIST's avatar
Samuel GAIST committed
336
337
338
339
340
341
342
343
344
345
346
347
                self.errors.append(
                    "/datasets/%s: cannot find set `%s' on "
                    "protocol `%s' from database `%s' - valid set names "
                    "are %s"
                    % (
                        dataset,
                        setname,
                        protoname,
                        dbname,
                        ", ".join(db.sets(protoname).keys()),
                    )
                )
Philip ABBET's avatar
Philip ABBET committed
348
349
350
                continue

            # if you get to this point, then adds the set to our cache
Samuel GAIST's avatar
Samuel GAIST committed
351
            self.datasets[dataset] = dict(database=db, protocol=protoname, set=setname)
André Anjos's avatar
André Anjos committed
352

Philip ABBET's avatar
Philip ABBET committed
353
    def _check_blocks(self, algorithm_cache, dataformat_cache, library_cache):
354
        """checks all blocks are valid"""
Philip ABBET's avatar
Philip ABBET committed
355

Samuel GAIST's avatar
Samuel GAIST committed
356
        for blockname, block in self.data["blocks"].items():
Philip ABBET's avatar
Philip ABBET committed
357

Samuel GAIST's avatar
Samuel GAIST committed
358
            algoname = block["algorithm"]
Philip ABBET's avatar
Philip ABBET committed
359
360
361
362
363
364
            if algoname not in self.algorithms:

                # loads the algorithm
                if algoname in algorithm_cache:
                    thisalgo = algorithm_cache[algoname]
                else:
Samuel GAIST's avatar
Samuel GAIST committed
365
366
367
                    thisalgo = algorithm.Algorithm(
                        self.prefix, algoname, dataformat_cache, library_cache
                    )
Philip ABBET's avatar
Philip ABBET committed
368
369
370
371
                    algorithm_cache[algoname] = thisalgo

                self.algorithms[algoname] = thisalgo
                if thisalgo.errors:
Samuel GAIST's avatar
Samuel GAIST committed
372
373
374
375
                    self.errors.append(
                        "/blocks/%s: algorithm `%s' is invalid: %s"
                        % (blockname, algoname, "\n".join(thisalgo.errors))
                    )
Philip ABBET's avatar
Philip ABBET committed
376
377
            else:
                thisalgo = self.algorithms[algoname]
Samuel GAIST's avatar
Samuel GAIST committed
378
379
                if thisalgo.errors:
                    continue  # already done
Philip ABBET's avatar
Philip ABBET committed
380
381

            # checks all inputs correspond
Samuel GAIST's avatar
Samuel GAIST committed
382
383
384
385
386
387
388
389
390
391
392
393
394
            for algoin, blockin in block["inputs"].items():
                if hasattr(thisalgo, "input_map") and algoin not in thisalgo.input_map:
                    self.errors.append(
                        "/blocks/%s/inputs/%s: algorithm `%s' does not "
                        "have an input named `%s' - valid algorithm inputs are %s"
                        % (
                            blockname,
                            blockin,
                            algoname,
                            algoin,
                            ", ".join(thisalgo.input_map.keys()),
                        )
                    )
Philip ABBET's avatar
Philip ABBET committed
395
396

            # checks all outputs correspond
Samuel GAIST's avatar
Samuel GAIST committed
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
            for algout, blockout in block["outputs"].items():
                if (
                    hasattr(thisalgo, "output_map")
                    and algout not in thisalgo.output_map
                ):
                    self.errors.append(
                        "/blocks/%s/outputs/%s: algorithm `%s' does not "
                        "have an output named `%s' - valid algorithm outputs are "
                        "%s"
                        % (
                            blockname,
                            blockout,
                            algoname,
                            algout,
                            ", ".join(thisalgo.output_map.keys()),
                        )
                    )
Philip ABBET's avatar
Philip ABBET committed
414
415

            # checks if parallelization make sense
Samuel GAIST's avatar
Samuel GAIST committed
416
417
418
419
420
421
            if block.get("nb_slots", 1) > 1 and not thisalgo.splittable:
                self.errors.append(
                    "/blocks/%s/nb_slots: you have set the number of "
                    "slots for algorithm `%s' to %d, but it is not splittable"
                    % (blockname, thisalgo.name, block["nb_slots"])
                )
Philip ABBET's avatar
Philip ABBET committed
422
423

            # check parameter consistence
Samuel GAIST's avatar
Samuel GAIST committed
424
            for parameter, value in block.get("parameters", {}).items():
Philip ABBET's avatar
Philip ABBET committed
425
426
427
                try:
                    thisalgo.clean_parameter(parameter, value)
                except Exception as e:
Samuel GAIST's avatar
Samuel GAIST committed
428
429
430
431
432
                    self.errors.append(
                        "/blocks/%s/parameters/%s: cannot convert "
                        "value `%s' to required type: %s"
                        % (blockname, parameter, value, e)
                    )
Philip ABBET's avatar
Philip ABBET committed
433
434
435

            self.blocks[blockname] = block

436
437
    def _check_loops(self, algorithm_cache, dataformat_cache, library_cache):
        """checks all loops are valid"""
438
439
440
        loops = self.data.get("loops", {})

        for loopname, loop in loops.items():
441
            for key in [PROCESSOR_PREFIX, EVALUATOR_PREFIX]:
442
443
444
445
446
447
448
449
450
451
452
                algoname = loop[key + "algorithm"]
                if algoname not in self.algorithms:

                    # loads the algorithm
                    if algoname in algorithm_cache:
                        thisalgo = algorithm_cache[algoname]
                    else:
                        thisalgo = algorithm.Algorithm(
                            self.prefix, algoname, dataformat_cache, library_cache
                        )
                        algorithm_cache[algoname] = thisalgo
453

454
455
456
457
458
459
460
                    self.algorithms[algoname] = thisalgo
                    if thisalgo.errors:
                        self.errors.append(
                            "/loops/%s: algorithm `%s' is invalid:\n%s"
                            % (loopname, algoname, "\n".join(thisalgo.errors))
                        )
                        continue
461
462

                else:
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
                    thisalgo = self.algorithms[algoname]
                    if thisalgo.errors:
                        continue  # already done

                # checks all inputs correspond
                for algoin, loop_input in loop[key + "inputs"].items():
                    if algoin not in thisalgo.input_map:
                        self.errors.append(
                            "/loop/%s/inputs/%s: algorithm `%s' does "
                            "not have an input named `%s' - valid algorithm inputs "
                            "are %s"
                            % (
                                loopname,
                                loop_input,
                                algoname,
                                algoin,
                                ", ".join(thisalgo.input_map.keys()),
                            )
                        )
482

483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
                # checks all outputs correspond
                for algout, loop_output in loop[key + "outputs"].items():
                    if (
                        hasattr(thisalgo, "output_map")
                        and algout not in thisalgo.output_map
                    ):
                        self.errors.append(
                            "/loops/%s/outputs/%s: algorithm `%s' does not "
                            "have an output named `%s' - valid algorithm outputs are "
                            "%s"
                            % (
                                loopname,
                                loop_output,
                                algoname,
                                algout,
                                ", ".join(thisalgo.output_map.keys()),
                            )
Samuel GAIST's avatar
Samuel GAIST committed
500
                        )
501
502

            # checks if parallelization makes sense
Samuel GAIST's avatar
Samuel GAIST committed
503
504
505
506
            if loop.get("nb_slots", 1) > 1 and not thisalgo.splittable:
                self.errors.append(
                    "/loop/%s/nb_slots: you have set the number "
                    "of slots for algorithm `%s' to %d, but it is not "
Samuel GAIST's avatar
Samuel GAIST committed
507
                    "splittable" % (algoname, thisalgo.name, loop["nb_slots"])
Samuel GAIST's avatar
Samuel GAIST committed
508
                )
509
510

            # check parameter consistence
Samuel GAIST's avatar
Samuel GAIST committed
511
            for parameter, value in loop.get("parameters", {}).items():
512
513
514
                try:
                    thisalgo.clean_parameter(parameter, value)
                except Exception as e:
Samuel GAIST's avatar
Samuel GAIST committed
515
516
517
518
519
                    self.errors.append(
                        "/loop/%s/parameters/%s: cannot convert "
                        "value `%s' to required type: %s"
                        % (loopname, parameter, value, e)
                    )
520
521
522

            self.loops[loopname] = loop

Philip ABBET's avatar
Philip ABBET committed
523
    def _check_analyzers(self, algorithm_cache, dataformat_cache, library_cache):
524
        """checks all analyzers are valid"""
Philip ABBET's avatar
Philip ABBET committed
525

Samuel GAIST's avatar
Samuel GAIST committed
526
        for analyzername, analyzer in self.data["analyzers"].items():
Philip ABBET's avatar
Philip ABBET committed
527

Samuel GAIST's avatar
Samuel GAIST committed
528
            algoname = analyzer["algorithm"]
Philip ABBET's avatar
Philip ABBET committed
529
530
531
532
533
534
            if algoname not in self.algorithms:

                # loads the algorithm
                if algoname in algorithm_cache:
                    thisalgo = algorithm_cache[algoname]
                else:
Samuel GAIST's avatar
Samuel GAIST committed
535
536
537
                    thisalgo = algorithm.Algorithm(
                        self.prefix, algoname, dataformat_cache, library_cache
                    )
Philip ABBET's avatar
Philip ABBET committed
538
539
540
541
                    algorithm_cache[algoname] = thisalgo

                self.algorithms[algoname] = thisalgo
                if thisalgo.errors:
Samuel GAIST's avatar
Samuel GAIST committed
542
543
544
545
                    self.errors.append(
                        "/analyzers/%s: algorithm `%s' is invalid:\n%s"
                        % (analyzername, algoname, "\n".join(thisalgo.errors))
                    )
Philip ABBET's avatar
Philip ABBET committed
546
547
548
549
                    continue

            else:
                thisalgo = self.algorithms[algoname]
Samuel GAIST's avatar
Samuel GAIST committed
550
551
                if thisalgo.errors:
                    continue  # already done
Philip ABBET's avatar
Philip ABBET committed
552
553

            # checks all inputs correspond
Samuel GAIST's avatar
Samuel GAIST committed
554
            for algoin, analyzerin in analyzer["inputs"].items():
Philip ABBET's avatar
Philip ABBET committed
555
                if algoin not in thisalgo.input_map:
Samuel GAIST's avatar
Samuel GAIST committed
556
557
558
559
560
561
562
563
564
565
566
567
                    self.errors.append(
                        "/analyzers/%s/inputs/%s: algorithm `%s' does "
                        "not have an input named `%s' - valid algorithm inputs "
                        "are %s"
                        % (
                            analyzername,
                            analyzerin,
                            algoname,
                            algoin,
                            ", ".join(thisalgo.input_map.keys()),
                        )
                    )
Philip ABBET's avatar
Philip ABBET committed
568
569

            # checks if parallelization makes sense
Samuel GAIST's avatar
Samuel GAIST committed
570
571
572
573
574
575
            if analyzer.get("nb_slots", 1) > 1 and not thisalgo.splittable:
                self.errors.append(
                    "/analyzer/%s/nb_slots: you have set the number "
                    "of slots for algorithm `%s' to %d, but it is not "
                    "splittable" % (analyzername, thisalgo.name, analyzer["nb_slots"])
                )
Philip ABBET's avatar
Philip ABBET committed
576
577

            # check parameter consistence
Samuel GAIST's avatar
Samuel GAIST committed
578
            for parameter, value in analyzer.get("parameters", {}).items():
Philip ABBET's avatar
Philip ABBET committed
579
580
581
                try:
                    thisalgo.clean_parameter(parameter, value)
                except Exception as e:
Samuel GAIST's avatar
Samuel GAIST committed
582
583
584
585
586
                    self.errors.append(
                        "/analyzer/%s/parameters/%s: cannot convert "
                        "value `%s' to required type: %s"
                        % (analyzername, parameter, value, e)
                    )
Philip ABBET's avatar
Philip ABBET committed
587
588
589
590

            self.analyzers[analyzername] = analyzer

    def _check_global_parameters(self):
591
        """checks global parameters"""
Philip ABBET's avatar
Philip ABBET committed
592

Samuel GAIST's avatar
Samuel GAIST committed
593
594
595
        for algoname, parameters in self.data["globals"].items():
            if algoname in ["queue", "environment"]:
                continue  # skip that
Philip ABBET's avatar
Philip ABBET committed
596
597
598

            # else, algorithms must be loaded in memory already
            if algoname not in self.algorithms:
Samuel GAIST's avatar
Samuel GAIST committed
599
600
601
602
603
                self.errors.append(
                    "/globals/%s: found parameter section for "
                    "algorithm `%s' which is not used anywhere in the "
                    "experiment" % (algoname, algoname)
                )
Philip ABBET's avatar
Philip ABBET committed
604
605
606
607
                continue

            # ...and each parameter must validate
            thisalgo = self.algorithms[algoname]
Samuel GAIST's avatar
Samuel GAIST committed
608
609
            if not thisalgo.valid:
                continue  # doesn't even check
Philip ABBET's avatar
Philip ABBET committed
610
611
612
613
            for parameter, value in parameters.items():
                try:
                    thisalgo.clean_parameter(parameter, value)
                except Exception as e:
Samuel GAIST's avatar
Samuel GAIST committed
614
615
616
617
618
                    self.errors.append(
                        "/globals/%s/%s: cannot convert "
                        "value `%s' to required type: %s"
                        % (algoname, parameter, value, e)
                    )
Philip ABBET's avatar
Philip ABBET committed
619
620

    def _load_toolchain(self, data):
621
        """Loads the related toolchain"""
Philip ABBET's avatar
Philip ABBET committed
622
623
624
625
626
627

        # finally, we load the toolchain and cross-validate it
        self.toolchain = toolchain.Toolchain(self.prefix, data)

        if self.toolchain.errors:
            if self.storage is not None:
Samuel GAIST's avatar
Samuel GAIST committed
628
629
630
631
                self.errors.append(
                    "toolchain `%s' is not valid, because:\n  * %s"
                    % (self.storage.toolchain, "\n  * ".join(self.toolchain.errors))
                )
Philip ABBET's avatar
Philip ABBET committed
632
            else:
Samuel GAIST's avatar
Samuel GAIST committed
633
634
635
636
                self.errors.append(
                    "toolchain data is not valid, because:\n  * %s"
                    % "\n  * ".join(self.toolchain.errors)
                )
Philip ABBET's avatar
Philip ABBET committed
637
638
639
            return

    def _crosscheck_toolchain_datasets(self):
640
        """There must exist a 1-to-1 relation to existing datasets"""
Philip ABBET's avatar
Philip ABBET committed
641
642
643
644
645

        toolchain_datasets = self.toolchain.datasets

        if sorted(toolchain_datasets.keys()) != sorted(self.datasets.keys()):

Samuel GAIST's avatar
Samuel GAIST committed
646
647
648
649
650
651
652
653
            self.errors.append(
                "mismatch between the toolchain dataset names (%s)"
                " and the experiment's (%s)"
                % (
                    ", ".join(sorted(toolchain_datasets.keys())),
                    ", ".join(sorted(self.datasets.keys())),
                )
            )
Philip ABBET's avatar
Philip ABBET committed
654
655
656
657

        # toolchain must use a subset of the dataset endpoints
        for dataset_name, dataset in self.datasets.items():

Samuel GAIST's avatar
Samuel GAIST committed
658
659
660
661
662
663
            db_endpts = set(
                dataset["database"]
                .set(dataset["protocol"], dataset["set"])["outputs"]
                .keys()
            )
            tc_endpts = set(toolchain_datasets[dataset_name]["outputs"])
Philip ABBET's avatar
Philip ABBET committed
664
665
666

            if not tc_endpts.issubset(db_endpts):

Samuel GAIST's avatar
Samuel GAIST committed
667
668
669
670
671
672
673
674
675
676
677
678
679
680
                self.errors.append(
                    "/datasets/%s: toolchain endpoints (%s) must "
                    "be a subset of what is available on database `%s', "
                    "protocol `%s', "
                    "set `%s' outputs (%s)"
                    % (
                        dataset_name,
                        ", ".join(tc_endpts),
                        dataset["database"].name,
                        dataset["protocol"],
                        dataset["set"],
                        ", ".join(db_endpts),
                    )
                )
Philip ABBET's avatar
Philip ABBET committed
681
682

    def _crosscheck_toolchain_blocks(self):
683
        """There must exist a 1-to-1 relation to existing blocks"""
Philip ABBET's avatar
Philip ABBET committed
684
685
686
687
688

        toolchain_blocks = self.toolchain.blocks

        if sorted(toolchain_blocks.keys()) != sorted(self.blocks.keys()):

Samuel GAIST's avatar
Samuel GAIST committed
689
690
691
692
693
694
695
696
            self.errors.append(
                "mismatch between the toolchain block names (%s)"
                " and the experiment's (%s)"
                % (
                    ", ".join(sorted(toolchain_blocks.keys())),
                    ", ".join(sorted(self.blocks.keys())),
                )
            )
Philip ABBET's avatar
Philip ABBET committed
697
698
699
700

        # the number of block endpoints and the toolchain's must match
        for block_name, block in self.blocks.items():

Samuel GAIST's avatar
Samuel GAIST committed
701
            if len(block["inputs"]) != len(toolchain_blocks[block_name]["inputs"]):
Philip ABBET's avatar
Philip ABBET committed
702

Samuel GAIST's avatar
Samuel GAIST committed
703
704
705
706
707
708
709
710
711
                self.errors.append(
                    "/blocks/%s: toolchain blocks has %d inputs "
                    "while the experiment has %d inputs"
                    % (
                        block_name,
                        len(toolchain_blocks[block_name]["inputs"]),
                        len(block["inputs"]),
                    )
                )
Philip ABBET's avatar
Philip ABBET committed
712

713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
    def _crosscheck_toolchain_loops(self):
        """There must exist a 1-to-1 relation to existing loops"""

        toolchain_loops = self.toolchain.loops

        if sorted(toolchain_loops.keys()) != sorted(self.loops.keys()):

            self.errors.append(
                "mismatch between the toolchain loop names (%s)"
                " and the experiment's (%s)"
                % (
                    ", ".join(sorted(toolchain_loops.keys())),
                    ", ".join(sorted(self.loops.keys())),
                )
            )

        # the number of block endpoints and the toolchain's must match
        for block_name, block in self.loops.items():
731
            for prefix in [PROCESSOR_PREFIX, EVALUATOR_PREFIX]:
732
733
734
735
736
737
738
739
740
741
742
743
744
                block_input_count = len(block[prefix + "inputs"])
                toolchain_input_block = len(
                    toolchain_loops[block_name][prefix + "inputs"]
                )
                if block_input_count != toolchain_input_block:

                    self.errors.append(
                        "/loops/{}: toolchain loops has {} {}inputs "
                        "while the experiment has {} inputs".format(
                            block_name, toolchain_input_block, prefix, block_input_count
                        )
                    )

Philip ABBET's avatar
Philip ABBET committed
745
    def _crosscheck_toolchain_analyzers(self):
746
        """There must exist a 1-to-1 relation to existing analyzers"""
Philip ABBET's avatar
Philip ABBET committed
747
748
749
750
751

        toolchain_analyzers = self.toolchain.analyzers

        if sorted(toolchain_analyzers.keys()) != sorted(self.analyzers.keys()):

Samuel GAIST's avatar
Samuel GAIST committed
752
753
754
755
756
757
758
759
            self.errors.append(
                "mismatch between the toolchain analyzer names "
                "(%s) and the experiment's (%s)"
                % (
                    ", ".join(sorted(toolchain_analyzers.keys())),
                    ", ".join(sorted(self.analyzers.keys())),
                )
            )
Philip ABBET's avatar
Philip ABBET committed
760
761
762
763

        # the number of analyzer endpoints and the toolchain's must match
        for analyzer_name, analyzer in self.analyzers.items():

Samuel GAIST's avatar
Samuel GAIST committed
764
765
766
767
768
769
770
771
772
773
774
775
776
            if len(analyzer["inputs"]) != len(
                toolchain_analyzers[analyzer_name]["inputs"]
            ):

                self.errors.append(
                    "/analyzers/%s: toolchain analyzers has %d "
                    "inputs while the experiment has %d inputs"
                    % (
                        analyzer_name,
                        len(toolchain_analyzers[analyzer_name]["inputs"]),
                        len(analyzer["inputs"]),
                    )
                )
Philip ABBET's avatar
Philip ABBET committed
777
778

    def _crosscheck_connection_dataformats(self, dataformat_cache):
779
        """Connected endpoints must use the same dataformat as defined by the
Philip ABBET's avatar
Philip ABBET committed
780
        generator and receptor algorithms
781
        """
Philip ABBET's avatar
Philip ABBET committed
782
783
784

        for connection in self.toolchain.connections:

Samuel GAIST's avatar
Samuel GAIST committed
785
            from_endpt = connection["from"].split(".", 1)
Philip ABBET's avatar
Philip ABBET committed
786
787
788

            if from_endpt[0] in self.datasets:
                dataset = self.datasets[from_endpt[0]]
Samuel GAIST's avatar
Samuel GAIST committed
789
790
791
                from_dtype = dataset["database"].set(
                    dataset["protocol"], dataset["set"]
                )["outputs"][from_endpt[1]]
Philip ABBET's avatar
Philip ABBET committed
792
793
                from_name = "dataset"

Samuel GAIST's avatar
Samuel GAIST committed
794
            elif from_endpt[0] in self.blocks:  # it is a block
Philip ABBET's avatar
Philip ABBET committed
795
                block = self.blocks[from_endpt[0]]
Samuel GAIST's avatar
Samuel GAIST committed
796
                mapping = block["outputs"]
Philip ABBET's avatar
Philip ABBET committed
797
                imapping = dict(zip(mapping.values(), mapping.keys()))
Samuel GAIST's avatar
Samuel GAIST committed
798
799
                algout = imapping[from_endpt[1]]  # name of output on algorithm
                from_dtype = self.algorithms[block["algorithm"]].output_map[algout]
Philip ABBET's avatar
Philip ABBET committed
800
                from_name = "block"
801
802
803

            elif from_endpt[0] in self.loops:
                loop = self.loops[from_endpt[0]]
804
                for prefix in [PROCESSOR_PREFIX, EVALUATOR_PREFIX]:
805
806
807
808
809
810
811
812
813
814
                    mapping = loop[prefix + "outputs"]
                    imapping = dict(zip(mapping.values(), mapping.keys()))
                    if from_endpt[1] in imapping:
                        algout = imapping[from_endpt[1]]  # name of output on algorithm
                        from_dtype = self.algorithms[
                            loop[prefix + "algorithm"]
                        ].output_map[algout]
                        break
                from_name = "loop"

815
            else:
Samuel GAIST's avatar
Samuel GAIST committed
816
                self.errors.append("Unknown endpoint %s" % from_endpt[0])
817
                continue
Philip ABBET's avatar
Philip ABBET committed
818

Samuel GAIST's avatar
Samuel GAIST committed
819
            to_endpt = connection["to"].split(".", 1)
Philip ABBET's avatar
Philip ABBET committed
820
821
822

            if to_endpt[0] in self.blocks:
                block = self.blocks[to_endpt[0]]
Samuel GAIST's avatar
Samuel GAIST committed
823
                mapping = block["inputs"]
Philip ABBET's avatar
Philip ABBET committed
824
                imapping = dict(zip(mapping.values(), mapping.keys()))
Samuel GAIST's avatar
Samuel GAIST committed
825
826
                algoin = imapping[to_endpt[1]]  # name of input on algorithm
                to_dtype = self.algorithms[block["algorithm"]].input_map[algoin]
Philip ABBET's avatar
Philip ABBET committed
827
828
                to_name = "block"

829
830
            elif to_endpt[0] in self.loops:
                loop = self.loops[to_endpt[0]]
831
                for prefix in [PROCESSOR_PREFIX, EVALUATOR_PREFIX]:
832
833
834
835
836
837
838
839
                    mapping = loop[prefix + "inputs"]
                    imapping = dict(zip(mapping.values(), mapping.keys()))
                    if to_endpt[1] in imapping:
                        algoin = imapping[to_endpt[1]]  # name of input on algorithm
                        to_dtype = self.algorithms[
                            loop[prefix + "algorithm"]
                        ].input_map[algoin]
                        break
840
841
842
                to_name = "loop"

            elif to_endpt[0] in self.analyzers:  # it is an analyzer
Philip ABBET's avatar
Philip ABBET committed
843
                analyzer = self.analyzers[to_endpt[0]]
Samuel GAIST's avatar
Samuel GAIST committed
844
                mapping = analyzer["inputs"]
Philip ABBET's avatar
Philip ABBET committed
845
                imapping = dict(zip(mapping.values(), mapping.keys()))
Samuel GAIST's avatar
Samuel GAIST committed
846
847
                algoin = imapping[to_endpt[1]]  # name of input on algorithm
                to_dtype = self.algorithms[analyzer["algorithm"]].input_map[algoin]
Philip ABBET's avatar
Philip ABBET committed
848
                to_name = "analyzer"
849
850
851
            else:
                self.errors.append("Unknown endpoint %s" % to_endpt[0])
                continue
Philip ABBET's avatar
Philip ABBET committed
852

Samuel GAIST's avatar
Samuel GAIST committed
853
854
            if from_dtype == to_dtype:
                continue  # OK
Philip ABBET's avatar
Philip ABBET committed
855

856
            # The other acceptable condition is that the receiving end is a
Samuel GAIST's avatar
Samuel GAIST committed
857
            #  subset of the producing end. This can happen if the producing end
858
859
            # is a subclass of the receiving end - that is, the receiving end
            # uses a data format that is a parent of the producing end.
Philip ABBET's avatar
Philip ABBET committed
860
861
862
863

            from_format = dataformat_cache[from_dtype]
            to_format = dataformat_cache[to_dtype]

Samuel GAIST's avatar
Samuel GAIST committed
864
865
            if to_format.isparent(from_format):
                continue  # OK
André Anjos's avatar
André Anjos committed
866

Philip ABBET's avatar
Philip ABBET committed
867
            # If you get to this point, then an error must be issued
Samuel GAIST's avatar
Samuel GAIST committed
868
869
870
871
872
873
874
875
876
877
878
879
880
            self.errors.append(
                "mismatch in data type at connection (%s) %s "
                "-> (%s) %s - start point uses `%s' while end point "
                "uses `%s' (must be equal or a parent format)"
                % (
                    from_name,
                    ".".join(from_endpt),
                    to_name,
                    ".".join(to_endpt),
                    from_dtype,
                    to_dtype,
                )
            )
André Anjos's avatar
André Anjos committed
881

Philip ABBET's avatar
Philip ABBET committed
882
    def _crosscheck_block_algorithm_pertinence(self):
883
884
885
        """The number of groups and the input-output connectivity must respect
        the individual synchronization channels and the block's.
        """
André Anjos's avatar
André Anjos committed
886

Samuel GAIST's avatar
Samuel GAIST committed
887
        for name, block in self.data["blocks"].items():
André Anjos's avatar
André Anjos committed
888

889
890
891
892
            # filter connections that end on the visited block - remember, each
            # input is checked for receiving a single input connection. It is
            # illegal to connect an input multiple times. At this point, you
            # already know that is not the case.
Samuel GAIST's avatar
Samuel GAIST committed
893
894
895
896
897
            input_connections = [
                k["channel"]
                for k in self.toolchain.connections
                if k["to"].startswith(name + ".")
            ]
André Anjos's avatar
André Anjos committed
898

899
900
            # filter connections that start on the visited block, retain output
            # name so we can check synchronization and then group
Samuel GAIST's avatar
Samuel GAIST committed
901
902
903
904
905
906
907
            output_connections = set(
                [
                    (k["from"].replace(name + ".", ""), k["channel"])
                    for k in self.toolchain.connections
                    if k["from"].startswith(name + ".")
                ]
            )
Philip ABBET's avatar
Philip ABBET committed
908
909

            output_connections = [k[1] for k in output_connections]
André Anjos's avatar
André Anjos committed
910

911
912
            # note: dataformats have already been checked - only need to check
            # for the grouping properties between inputs and outputs
Philip ABBET's avatar
Philip ABBET committed
913
914

            # create channel groups
Samuel GAIST's avatar
Samuel GAIST committed
915
            chain_in = collections.Counter(input_connections)
Philip ABBET's avatar
Philip ABBET committed
916
            chain_out = collections.Counter(output_connections)
Samuel GAIST's avatar
Samuel GAIST committed
917
            chain_groups = [(v, chain_out.get(k, 0)) for k, v in chain_in.items()]
Philip ABBET's avatar
Philip ABBET committed
918
919

            # now check the algorithm for conformance
Samuel GAIST's avatar
Samuel GAIST committed
920
921
922
923
            algo_groups = self.algorithms[self.blocks[name]["algorithm"]].groups
            algo_groups = [
                (len(k["inputs"]), len(k.get("outputs", []))) for k in algo_groups
            ]
Philip ABBET's avatar
Philip ABBET committed
924
            if collections.Counter(chain_groups) != collections.Counter(algo_groups):
Samuel GAIST's avatar
Samuel GAIST committed
925
926
927
928
929
                self.errors.append(
                    "synchronization mismatch in input/output "
                    "grouping between block `%s' and algorithm `%s'"
                    % (name, self.blocks[name]["algorithm"])
                )
Philip ABBET's avatar
Philip ABBET committed
930

931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
    def _crosscheck_loop_algorithm_pertinence(self):
        """The number of groups and the input-output connectivity must respect
        the individual synchronization channels and the block's.
        """

        loops = self.data.get("loops", {})
        for name, loop in loops.items():

            # filter connections that end on the visited block - remember, each
            # input is checked for receiving a single input connection. It is
            # illegal to connect an input multiple times. At this point, you
            # already know that is not the case.
            input_connections = [
                k["channel"]
                for k in self.toolchain.connections
                if k["to"].startswith(name + ".")
            ]

            # filter connections that start on the visited block, retain output
            # name so we can check synchronization and then group
            output_connections = set(
                [
                    (k["from"].replace(name + ".", ""), k["channel"])
                    for k in self.toolchain.connections
                    if k["from"].startswith(name + ".")
                ]
            )

            output_connections = [k[1] for k in output_connections]

            # note: dataformats have already been checked - only need to check
            # for the grouping properties between inputs and outputs

            # create channel groups
            chain_in = collections.Counter(input_connections)
            chain_out = collections.Counter(output_connections)
            chain_groups_count = [(v, chain_out.get(k, 0)) for k, v in chain_in.items()]

            # now check the algorithms for conformance
970
            processor_algorithm_name = loop[PROCESSOR_PREFIX + "algorithm"]
971
            evaluator_algorithm_name = loop[EVALUATOR_PREFIX + "algorithm"]
972

973
974
975
            processor_algo_groups_list = self.algorithms[
                processor_algorithm_name
            ].groups
976
977
978
            evaluator_algo_groups_list = self.algorithms[
                evaluator_algorithm_name
            ].groups
979

980
            groups_count = []
981
982
            for processor_algo_groups, evaluator_algo_groups in itertools.zip_longest(
                processor_algo_groups_list, evaluator_algo_groups_list
983
984
985
            ):
                inputs = 0
                outputs = 0
986
987
988
                if processor_algo_groups:
                    inputs = len(processor_algo_groups["inputs"])
                    outputs = len(processor_algo_groups.get("outputs", []))
989
990
991
992
993
994

                if evaluator_algo_groups:
                    inputs += len(evaluator_algo_groups["inputs"])
                    outputs += len(evaluator_algo_groups.get("outputs", []))

                groups_count.append((inputs, outputs))
995
996
997
998
999
1000

            if collections.Counter(chain_groups_count) != collections.Counter(
                groups_count
            ):
                self.errors.append(
                    "synchronization mismatch in input/output "