Commit 5df44007 authored by Theophile GENTILHOMME's avatar Theophile GENTILHOMME
Browse files

rename hter -> min-hter

parent b6c7d0dd
Pipeline #19736 passed with stage
in 122 minutes and 2 seconds
......@@ -22,7 +22,7 @@ from bob.extension.scripts.click_helper import (verbosity_option,
def metrics(ctx, scores, evaluation, **kwargs):
"""Prints a table that contains FtA, FAR, FRR, FMR, FMNR, HTER for a given
threshold criterion (eer or hter).
threshold criterion (eer or min-hter).
You need to provide one or more development score file(s) for each experiment.
You can also provide evaluation files along with dev files. If only dev scores
......@@ -189,7 +189,7 @@ def hist(ctx, scores, evaluation, **kwargs):
$ bob measure hist dev-scores1 eval-scores1 dev-scores2
$ bob measure hist --criterion hter --show-dev dev-scores1 eval-scores1
$ bob measure hist --criterion min-hter --show-dev dev-scores1 eval-scores1
process = figure.Hist(ctx, scores, evaluation, load.split)
......@@ -247,8 +247,8 @@ def evaluate(ctx, scores, evaluation, **kwargs):
ctx.invoke(metrics, scores=scores, evaluation=evaluation)
# second time, appends the content
ctx.meta['open_mode'] = 'a'
click.echo("Computing metrics with HTER...")
ctx.meta['criterion'] = 'hter' # no criterion passed in evaluate
click.echo("Computing metrics with min-HTER...")
ctx.meta['criterion'] = 'min-hter' # no criterion passed in evaluate
ctx.invoke(metrics, scores=scores, evaluation=evaluation)
if 'log' in ctx.meta:
click.echo("[metrics] => %s" % ctx.meta['log'])
......@@ -273,7 +273,7 @@ def output_log_metric_option(**kwargs):
callback=callback, **kwargs)(func)
return custom_output_log_file_option
def criterion_option(lcriteria=['eer', 'hter', 'far'], **kwargs):
def criterion_option(lcriteria=['eer', 'min-hter', 'far'], **kwargs):
"""Get option flag to tell which criteriom is used (default:eer)
......@@ -284,7 +284,7 @@ def criterion_option(lcriteria=['eer', 'hter', 'far'], **kwargs):
def custom_criterion_option(func):
def callback(ctx, param, value):
list_accepted_crit = lcriteria if lcriteria is not None else \
['eer', 'hter', 'far']
['eer', 'min-hter', 'far']
if value not in list_accepted_crit:
raise click.BadParameter('Incorrect value for `--criterion`. '
'Must be one of [`%s`]' %
......@@ -294,7 +294,7 @@ def criterion_option(lcriteria=['eer', 'hter', 'far'], **kwargs):
return click.option(
'-c', '--criterion', default='eer',
help='Criterion to compute plots and '
'metrics: `eer` (default), `hter`',
'metrics: `eer`, `min-hter` or `far`',
callback=callback, is_eager=True ,**kwargs)(func)
return custom_criterion_option
......@@ -130,7 +130,8 @@ def test_hist():
assert result.exit_code == 0, (result.exit_code, result.output)
with runner.isolated_filesystem():
result = runner.invoke(commands.hist, ['--no-evaluation', '--criterion', 'hter',
result = runner.invoke(commands.hist, ['--no-evaluation', '--criterion',
'--output', 'HISTO.pdf', '-b',
'30,100', dev1, dev2])
if result.output:
......@@ -90,7 +90,7 @@ def get_thres(criter, neg, pos, far=None):
criter :
Criterion (`eer` or `hter`)
Criterion (`eer` or `hter` or `far`)
neg : :py:class:`numpy.ndarray`:
array of negative scores
pos : :py:class:`numpy.ndarray`::
......@@ -104,7 +104,7 @@ def get_thres(criter, neg, pos, far=None):
if criter == 'eer':
from . import eer_threshold
return eer_threshold(neg, pos)
elif criter == 'hter':
elif criter == 'min-hter':
from . import min_hter_threshold
return min_hter_threshold(neg, pos)
elif criter == 'far':
......@@ -588,7 +588,7 @@ Evaluate
A convenient command ``evaluate`` is provided to generate multiple metrics and
plots for a list of experiments. It generates two ``metrics`` outputs with ERR
and HTER criteria along with ``roc``, ``det``, ``epc``, ``hist`` plots for each
and min-HTER criteria along with ``roc``, ``det``, ``epc``, ``hist`` plots for each
experiment. For example:
.. code-block:: sh
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment