Commit bcccc06b authored by Amir MOHAMMADI's avatar Amir MOHAMMADI

Add a command for multi protocol (N-fold cross validation) analysis

parent 0ef7907e
Pipeline #21320 passed with stage
in 25 minutes and 49 seconds
......@@ -11,10 +11,12 @@ SCORE_FORMAT = (
CRITERIA = ('eer', 'min-hter', 'far')
names='FtA, FAR, FRR, FMR, FMNR, HTER',
criteria=CRITERIA, score_format=SCORE_FORMAT,
command='bob measure metrics'), criteria=CRITERIA)
command='bob measure metrics'),
def metrics(ctx, scores, evaluation, **kwargs):
process = figure.Metrics(ctx, scores, evaluation, load.split)
......@@ -59,3 +61,15 @@ def hist(ctx, scores, evaluation, **kwargs):
def evaluate(ctx, scores, evaluation, **kwargs):
ctx, scores, evaluation, metrics, roc, det, epc, hist, **kwargs)
names='FtA, FAR, FRR, FMR, FMNR, HTER',
criteria=CRITERIA, score_format=SCORE_FORMAT,
command='bob measure multi-metrics'),
def multi_metrics(ctx, scores, evaluation, protocols_number, **kwargs):
ctx.meta['min_arg'] = protocols_number * (2 if evaluation else 1)
process = figure.MultiMetrics(ctx, scores, evaluation, load.split)
......@@ -21,7 +21,7 @@ def scores_argument(min_arg=1, force_eval=False, **kwargs):
min_arg : int
the minimum number of file needed to evaluate a system. For example,
PAD functionalities needs licit abd spoof and therefore min_arg = 2
vulnerability analysis needs licit and spoof and therefore min_arg = 2
......@@ -920,3 +920,67 @@ def evaluate_flow(ctx, scores, evaluation, metrics, roc, det, epc, hist,
click.echo("Evaluate successfully completed!")
click.echo("[plots] => %s" % (ctx.meta['output']))
def n_protocols_option(required=True, **kwargs):
'''Get option for number of protocols.'''
def custom_n_protocols_option(func):
def callback(ctx, param, value):
value = abs(value)
ctx.meta['protocols_number'] = value
return value
return click.option(
'-pn', '--protocols-number', type=click.INT,
show_default=True, required=required,
help='The number of protocols of cross validation.',
callback=callback, **kwargs)(func)
return custom_n_protocols_option
def multi_metrics_command(docstring, criteria=('eer', 'min-hter', 'far')):
def custom_metrics_command(func):
func.__doc__ = docstring
def wrapper(*args, **kwds):
return func(*args, **kwds)
return wrapper
return custom_metrics_command
MULTI_METRICS_HELP = """Multi protocol (cross-validation) metrics.
Prints a table that contains {names} for a given threshold criterion
({criteria}). The metrics are averaged over several protocols. The idea is
that each protocol corresponds to one fold in your cross-validation.
You need to provide as many as development score files as the number of
protocols per system. You can also provide evaluation files along with dev
files. If evaluation scores are provided, you must use flag `--eval`. The
number of protocols must be provided using the `--protocols-number` option.
Resulting table format can be changed using the `--tablefmt`.
$ {command} -v {{p1,p2,p3}}/scores-dev
$ {command} -v -e {{p1,p2,p3}}/scores-{{dev,eval}}
$ {command} -v -e {{sys1,sys2}}/{{p1,p2,p3}}/scores-{{dev,eval}}
This diff is collapsed.
......@@ -74,6 +74,7 @@ setup(
'bob.measure.cli': [
'evaluate = bob.measure.script.commands:evaluate',
'metrics = bob.measure.script.commands:metrics',
'multi-metrics = bob.measure.script.commands:multi_metrics',
'roc = bob.measure.script.commands:roc',
'det = bob.measure.script.commands:det',
'epc = bob.measure.script.commands:epc',
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment