Commit 244983ae authored by Amir MOHAMMADI's avatar Amir MOHAMMADI

Small improvements to new metrics codes

parent 25a71f54
......@@ -168,20 +168,19 @@ def epc(ctx, scores, **kwargs):
@common_options.output_plot_file_option(default_out='hist.pdf')
@common_options.eval_option()
@common_options.n_bins_option()
@common_options.legends_option()
@common_options.no_legend_option()
@common_options.legend_ncols_option()
@common_options.criterion_option()
@common_options.no_line_option()
@common_options.hide_dev_option()
@common_options.far_option()
@common_options.no_line_option()
@common_options.thresholds_option()
@common_options.subplot_option()
@common_options.const_layout_option()
@common_options.print_filenames_option()
@common_options.legends_option()
@common_options.figsize_option(dflt=None)
@common_options.style_option()
@common_options.subplot_option()
@common_options.legend_ncols_option()
@common_options.no_legend_option()
@verbosity_option()
@click.pass_context
def hist(ctx, scores, evaluation, **kwargs):
......@@ -190,13 +189,11 @@ def hist(ctx, scores, evaluation, **kwargs):
You need to provide one or more development score file(s) for each
experiment. You can also provide evaluation files along with dev files. If
evaluation scores are provided, you must use flag `--eval`.
By default, when eval-scores are given, only eval-scores histograms are
displayed with threshold line
computed from dev-scores.
evaluation scores are provided, you must use the `--eval` flag. The
threshold is always computed from development score files.
Examples:
$ bob measure hist -v dev-scores
$ bob measure hist -e -v dev-scores1 eval-scores1 dev-scores2
......@@ -215,10 +212,11 @@ def hist(ctx, scores, evaluation, **kwargs):
@common_options.table_option()
@common_options.eval_option()
@common_options.criterion_option()
@common_options.far_option()
@common_options.output_log_metric_option()
@common_options.output_plot_file_option(default_out='eval_plots.pdf')
@common_options.points_curve_option()
@common_options.lines_at_option()
@common_options.points_curve_option()
@common_options.const_layout_option()
@common_options.figsize_option()
@common_options.style_option()
......@@ -229,22 +227,23 @@ def evaluate(ctx, scores, evaluation, **kwargs):
'''Runs error analysis on score sets
\b
1. Computes the threshold using either EER or min. HTER criteria on
1. Computes the threshold using a criteria (EER by default) on
development set scores
2. Applies the above threshold on evaluation set scores to compute the
HTER, if a eval-score set is provided
3. Reports error rates on the console
HTER if a eval-score (use --eval) set is provided.
3. Reports error rates on the console or in a log file.
4. Plots ROC, EPC, DET curves and score distributions to a multi-page PDF
file
You need to provide 2 score files for each biometric system in this order:
You need to provide 1 or 2 score files for each biometric system in this
order:
\b
* development scores
* evaluation scores
Examples:
$ bob measure evaluate -v dev-scores
$ bob measure evaluate -e -v scores-dev1 scores-eval1 scores-dev2
......@@ -252,9 +251,9 @@ def evaluate(ctx, scores, evaluation, **kwargs):
$ bob measure evaluate -e -v /path/to/sys-{1,2,3}/scores-{dev,eval}
$ bob measure evaluate -e -v -l metrics.txt -o my_plots.pdf dev-scores eval-scores
$ bob measure evaluate -v -l metrics.txt -o my_plots.pdf dev-scores
'''
# first time erase if existing file
# open_mode is always write in this command.
ctx.meta['open_mode'] = 'w'
criterion = ctx.meta.get('criterion')
if criterion is not None:
......
......@@ -165,7 +165,8 @@ def lines_at_option(dflt='1e-3', **kwargs):
return list_float_option(
name='lines-at', short_name='la',
desc='If given, draw vertical lines at the given axis positions. '
'Your values must be separated with a comma (,) without space.',
'Your values must be separated with a comma (,) without space. '
'This option works in ROC and DET curves.',
nitems=None, dflt=dflt, **kwargs
)
......@@ -354,6 +355,7 @@ def criterion_option(lcriteria=['eer', 'min-hter', 'far'], **kwargs):
def custom_criterion_option(func):
list_accepted_crit = lcriteria if lcriteria is not None else \
['eer', 'min-hter', 'far']
def callback(ctx, param, value):
if value not in list_accepted_crit:
raise click.BadParameter('Incorrect value for `--criterion`. '
......@@ -365,7 +367,8 @@ def criterion_option(lcriteria=['eer', 'min-hter', 'far'], **kwargs):
'-c', '--criterion', default='eer',
help='Criterion to compute plots and '
'metrics: %s)' % ', '.join(list_accepted_crit),
callback=callback, is_eager=True, **kwargs)(func)
callback=callback, is_eager=True, show_default=True,
**kwargs)(func)
return custom_criterion_option
......@@ -379,7 +382,8 @@ def far_option(**kwargs):
return value
return click.option(
'-f', '--far-value', type=click.FLOAT, default=None,
help='The FAR value for which to compute threshold',
help='The FAR value for which to compute threshold. This option '
'must be used alongside `--criterion far`.',
callback=callback, show_default=True, **kwargs)(func)
return custom_far_option
......@@ -434,7 +438,8 @@ def legend_loc_option(dflt='best', **kwargs):
'''Get the legend location of the plot'''
def custom_legend_loc_option(func):
def callback(ctx, param, value):
ctx.meta['legend_loc'] = value.replace('-', ' ') if value else value
ctx.meta['legend_loc'] = value.replace(
'-', ' ') if value else value
return value
return click.option(
'-lc', '--legend-loc', default=dflt, show_default=True,
......
......@@ -52,7 +52,6 @@ class MeasureBase(object):
func_load : Function that is used to load the input files
"""
self._scores = scores
self._min_arg = ctx.meta.get('min_arg', 1)
self._ctx = ctx
self.func_load = func_load
self._legends = ctx.meta.get('legends')
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment