From 5df44007b75b1732dc3f66f736fd770942957725 Mon Sep 17 00:00:00 2001 From: Theophile GENTILHOMME Date: Thu, 3 May 2018 08:54:49 +0200 Subject: [PATCH] rename hter -> min-hter --- bob/measure/script/commands.py | 8 ++++---- bob/measure/script/common_options.py | 6 +++--- bob/measure/test_script.py | 3 ++- bob/measure/utils.py | 4 ++-- doc/guide.rst | 2 +- 5 files changed, 12 insertions(+), 11 deletions(-) diff --git a/bob/measure/script/commands.py b/bob/measure/script/commands.py index 61238c1..d2c6333 100644 --- a/bob/measure/script/commands.py +++ b/bob/measure/script/commands.py @@ -22,7 +22,7 @@ from bob.extension.scripts.click_helper import (verbosity_option, @click.pass_context def metrics(ctx, scores, evaluation, **kwargs): """Prints a table that contains FtA, FAR, FRR, FMR, FMNR, HTER for a given - threshold criterion (eer or hter). + threshold criterion (eer or min-hter). You need to provide one or more development score file(s) for each experiment. You can also provide evaluation files along with dev files. If only dev scores @@ -189,7 +189,7 @@ def hist(ctx, scores, evaluation, **kwargs): $ bob measure hist dev-scores1 eval-scores1 dev-scores2 eval-scores2 - $ bob measure hist --criterion hter --show-dev dev-scores1 eval-scores1 + $ bob measure hist --criterion min-hter --show-dev dev-scores1 eval-scores1 """ process = figure.Hist(ctx, scores, evaluation, load.split) process.run() @@ -247,8 +247,8 @@ def evaluate(ctx, scores, evaluation, **kwargs): ctx.invoke(metrics, scores=scores, evaluation=evaluation) # second time, appends the content ctx.meta['open_mode'] = 'a' - click.echo("Computing metrics with HTER...") - ctx.meta['criterion'] = 'hter' # no criterion passed in evaluate + click.echo("Computing metrics with min-HTER...") + ctx.meta['criterion'] = 'min-hter' # no criterion passed in evaluate ctx.invoke(metrics, scores=scores, evaluation=evaluation) if 'log' in ctx.meta: click.echo("[metrics] => %s" % ctx.meta['log']) diff --git a/bob/measure/script/common_options.py b/bob/measure/script/common_options.py index 764dbdb..ccaa0f7 100644 --- a/bob/measure/script/common_options.py +++ b/bob/measure/script/common_options.py @@ -273,7 +273,7 @@ def output_log_metric_option(**kwargs): callback=callback, **kwargs)(func) return custom_output_log_file_option -def criterion_option(lcriteria=['eer', 'hter', 'far'], **kwargs): +def criterion_option(lcriteria=['eer', 'min-hter', 'far'], **kwargs): """Get option flag to tell which criteriom is used (default:eer) Parameters @@ -284,7 +284,7 @@ def criterion_option(lcriteria=['eer', 'hter', 'far'], **kwargs): def custom_criterion_option(func): def callback(ctx, param, value): list_accepted_crit = lcriteria if lcriteria is not None else \ - ['eer', 'hter', 'far'] + ['eer', 'min-hter', 'far'] if value not in list_accepted_crit: raise click.BadParameter('Incorrect value for `--criterion`. ' 'Must be one of [`%s`]' % @@ -294,7 +294,7 @@ def criterion_option(lcriteria=['eer', 'hter', 'far'], **kwargs): return click.option( '-c', '--criterion', default='eer', help='Criterion to compute plots and ' - 'metrics: `eer` (default), `hter`', + 'metrics: `eer`, `min-hter` or `far`', callback=callback, is_eager=True ,**kwargs)(func) return custom_criterion_option diff --git a/bob/measure/test_script.py b/bob/measure/test_script.py index 15311ba..307afe0 100644 --- a/bob/measure/test_script.py +++ b/bob/measure/test_script.py @@ -130,7 +130,8 @@ def test_hist(): assert result.exit_code == 0, (result.exit_code, result.output) with runner.isolated_filesystem(): - result = runner.invoke(commands.hist, ['--no-evaluation', '--criterion', 'hter', + result = runner.invoke(commands.hist, ['--no-evaluation', '--criterion', + 'min-hter', '--output', 'HISTO.pdf', '-b', '30,100', dev1, dev2]) if result.output: diff --git a/bob/measure/utils.py b/bob/measure/utils.py index 15ff8a8..64c5527 100644 --- a/bob/measure/utils.py +++ b/bob/measure/utils.py @@ -90,7 +90,7 @@ def get_thres(criter, neg, pos, far=None): Parameters ---------- criter : - Criterion (`eer` or `hter`) + Criterion (`eer` or `hter` or `far`) neg : :py:class:`numpy.ndarray`: array of negative scores pos : :py:class:`numpy.ndarray`:: @@ -104,7 +104,7 @@ def get_thres(criter, neg, pos, far=None): if criter == 'eer': from . import eer_threshold return eer_threshold(neg, pos) - elif criter == 'hter': + elif criter == 'min-hter': from . import min_hter_threshold return min_hter_threshold(neg, pos) elif criter == 'far': diff --git a/doc/guide.rst b/doc/guide.rst index 95a4614..c27e661 100644 --- a/doc/guide.rst +++ b/doc/guide.rst @@ -588,7 +588,7 @@ Evaluate A convenient command ``evaluate`` is provided to generate multiple metrics and plots for a list of experiments. It generates two ``metrics`` outputs with ERR -and HTER criteria along with ``roc``, ``det``, ``epc``, ``hist`` plots for each +and min-HTER criteria along with ``roc``, ``det``, ``epc``, ``hist`` plots for each experiment. For example: .. code-block:: sh -- 2.21.0