Skip to content
Snippets Groups Projects
Commit cb3964bd authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Merge branch 'variousfixes' into 'master'

Change --eval option default and Various fixes

Closes bob.bio.base#112

See merge request !68
parents af833d63 5edb7da6
Branches
Tags
1 merge request!68Change --eval option default and Various fixes
Pipeline #
......@@ -27,16 +27,16 @@ def metrics(ctx, scores, evaluation, **kwargs):
You need to provide one or more development score file(s) for each
experiment. You can also provide evaluation files along with dev files. If
only dev scores are provided, you must use flag `--no-evaluation`.
evaluation scores are provided, you must use flag `--eval`.
Resulting table format can be changed using the `--tablefmt`.
Examples:
$ bob measure metrics dev-scores
$ bob measure metrics -l results.txt dev-scores1 eval-scores1
$ bob measure metrics -e -l results.txt dev-scores1 eval-scores1
$ bob measure metrics {dev,eval}-scores1 {dev,eval}-scores2
$ bob measure metrics -e {dev,eval}-scores1 {dev,eval}-scores2
"""
process = figure.Metrics(ctx, scores, evaluation, load.split)
process.run()
......@@ -72,15 +72,15 @@ def roc(ctx, scores, evaluation, **kwargs):
You need to provide one or more development score file(s) for each
experiment. You can also provide evaluation files along with dev files. If
only dev scores are provided, you must use flag `--no-evaluation`.
evaluation scores are provided, you must use flag `--eval`.
Examples:
$ bob measure roc -v dev-scores
$ bob measure roc -v dev-scores1 eval-scores1 dev-scores2
$ bob measure roc -e -v dev-scores1 eval-scores1 dev-scores2
eval-scores2
$ bob measure roc -v -o my_roc.pdf dev-scores1 eval-scores1
$ bob measure roc -e -v -o my_roc.pdf dev-scores1 eval-scores1
"""
process = figure.Roc(ctx, scores, evaluation, load.split)
process.run()
......@@ -115,15 +115,15 @@ def det(ctx, scores, evaluation, **kwargs):
You need to provide one or more development score file(s) for each
experiment. You can also provide evaluation files along with dev files. If
only dev scores are provided, you must use flag `--no-evaluation`.
evaluation scores are provided, you must use flag `--eval`.
Examples:
$ bob measure det -v dev-scores
$ bob measure det -v dev-scores1 eval-scores1 dev-scores2
$ bob measure det -e -v dev-scores1 eval-scores1 dev-scores2
eval-scores2
$ bob measure det -v -o my_det.pdf dev-scores1 eval-scores1
$ bob measure det -e -v -o my_det.pdf dev-scores1 eval-scores1
"""
process = figure.Det(ctx, scores, evaluation, load.split)
process.run()
......@@ -168,14 +168,16 @@ def epc(ctx, scores, **kwargs):
@common_options.output_plot_file_option(default_out='hist.pdf')
@common_options.eval_option()
@common_options.n_bins_option()
@common_options.no_legend_option()
@common_options.criterion_option()
@common_options.no_line_option()
@common_options.far_option()
@common_options.thresholds_option()
@common_options.const_layout_option()
@common_options.print_filenames_option()
@common_options.legends_option()
@common_options.figsize_option(dflt=None)
@common_options.style_option()
@common_options.linestyles_option()
@common_options.subplot_option()
@common_options.legend_ncols_option()
@common_options.no_legend_option()
......@@ -187,20 +189,19 @@ def hist(ctx, scores, evaluation, **kwargs):
You need to provide one or more development score file(s) for each
experiment. You can also provide evaluation files along with dev files. If
only dev scores are provided, you must use flag `--no-evaluation`.
evaluation scores are provided, you must use flag `--eval`.
By default, when eval-scores are given, only eval-scores histograms are
displayed with threshold line
computed from dev-scores. If you want to display dev-scores distributions
as well, use ``--show-dev`` option.
computed from dev-scores.
Examples:
$ bob measure hist -v dev-scores
$ bob measure hist -v dev-scores1 eval-scores1 dev-scores2
$ bob measure hist -e -v dev-scores1 eval-scores1 dev-scores2
eval-scores2
$ bob measure hist -v --criterion min-hter --show-dev dev-scores1 eval-scores1
$ bob measure hist -e -v --criterion min-hter dev-scores1 eval-scores1
"""
process = figure.Hist(ctx, scores, evaluation, load.split)
process.run()
......@@ -244,12 +245,12 @@ def evaluate(ctx, scores, evaluation, **kwargs):
Examples:
$ bob measure evaluate -v dev-scores
$ bob measure evaluate -v scores-dev1 scores-eval1 scores-dev2
$ bob measure evaluate -e -v scores-dev1 scores-eval1 scores-dev2
scores-eval2
$ bob measure evaluate -v /path/to/sys-{1,2,3}/scores-{dev,eval}
$ bob measure evaluate -e -v /path/to/sys-{1,2,3}/scores-{dev,eval}
$ bob measure evaluate -v -l metrics.txt -o my_plots.pdf dev-scores eval-scores
$ bob measure evaluate -e -v -l metrics.txt -o my_plots.pdf dev-scores eval-scores
'''
# first time erase if existing file
ctx.meta['open_mode'] = 'w'
......
......@@ -13,7 +13,7 @@ LOGGER = logging.getLogger(__name__)
def scores_argument(min_arg=1, force_eval=False, **kwargs):
"""Get the argument for scores, and add `dev-scores` and `eval-scores` in
the context when `--evaluation` flag is on (default)
the context when `--eval` flag is on (default)
Parameters
----------
......@@ -67,10 +67,18 @@ def no_legend_option(dflt=True, **kwargs):
def eval_option(**kwargs):
'''Get option flag to say if eval-scores are provided'''
return bool_option(
'evaluation', 'e', 'If set, evaluation scores must be provided',
dflt=True
)
def custom_eval_option(func):
def callback(ctx, param, value):
ctx.meta['evaluation'] = value
return value
return click.option(
'-e', '--eval', 'evaluation', is_flag=True, default=False,
show_default=True,
help='If set, evaluation scores must be provided',
callback=callback, **kwargs)(func)
return custom_eval_option
def sep_dev_eval_option(dflt=True, **kwargs):
......@@ -301,6 +309,20 @@ def output_log_metric_option(**kwargs):
return custom_output_log_file_option
def no_line_option(**kwargs):
'''Get option flag to say if no line should be displayed'''
def custom_no_line_option(func):
def callback(ctx, param, value):
ctx.meta['no_line'] = value
return value
return click.option(
'--no-line', is_flag=True, default=False,
show_default=True,
help='If set does not display vertical lines',
callback=callback, **kwargs)(func)
return custom_no_line_option
def criterion_option(lcriteria=['eer', 'min-hter', 'far'], **kwargs):
"""Get option flag to tell which criteriom is used (default:eer)
......@@ -337,7 +359,7 @@ def far_option(**kwargs):
return value
return click.option(
'-f', '--far-value', type=click.FLOAT, default=None,
help='The FAR value for which to compute metrics',
help='The FAR value for which to compute threshold',
callback=callback, show_default=True, **kwargs)(func)
return custom_far_option
......
......@@ -558,6 +558,7 @@ class Hist(PlotBase):
% self.n_systems
)
self._criterion = ctx.meta.get('criterion')
self._no_line = ctx.meta.get('no_line', False)
self._nrows = ctx.meta.get('n_row', 1)
self._ncols = ctx.meta.get('n_col', 1)
self._nlegends = ctx.meta.get('legends_ncol', 10)
......@@ -593,6 +594,7 @@ class Hist(PlotBase):
'' if self._criterion is None else
self._criterion.upper(), ' (dev)' if self._eval else ''
)
if self._eval and not self._no_line:
self._lines(threshold, label, neg, pos, idx)
if sub_plot_idx == 1:
self._plot_legends()
......
......@@ -10,7 +10,7 @@ from .script import commands
def test_metrics():
dev1 = bob.io.base.test_utils.datafile('dev-1.txt', 'bob.measure')
runner = CliRunner()
result = runner.invoke(commands.metrics, ['--no-evaluation', dev1])
result = runner.invoke(commands.metrics, [dev1])
with runner.isolated_filesystem():
with open('tmp', 'w') as f:
f.write(result.output)
......@@ -22,7 +22,7 @@ def test_metrics():
test2 = bob.io.base.test_utils.datafile('test-2.txt', 'bob.measure')
with runner.isolated_filesystem():
result = runner.invoke(
commands.metrics, [dev1, test1, dev2, test2]
commands.metrics, ['-e', dev1, test1, dev2, test2]
)
with open('tmp', 'w') as f:
f.write(result.output)
......@@ -30,14 +30,14 @@ def test_metrics():
assert result.exit_code == 0
with runner.isolated_filesystem():
result = runner.invoke(
commands.metrics, ['-l', 'tmp', dev1, test1, dev2, test2, '-lg',
commands.metrics, ['-e', '-l', 'tmp', dev1, test1, dev2, test2, '-lg',
'A,B']
)
assert result.exit_code == 0, (result.exit_code, result.output)
with runner.isolated_filesystem():
result = runner.invoke(
commands.metrics, ['-l', 'tmp', '--no-evaluation', dev1, dev2]
commands.metrics, ['-l', 'tmp', dev1, dev2]
)
assert result.exit_code == 0, (result.exit_code, result.output)
......@@ -45,8 +45,7 @@ def test_roc():
dev1 = bob.io.base.test_utils.datafile('dev-1.txt', 'bob.measure')
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(commands.roc, ['--no-evaluation', '--output',
'test.pdf',dev1])
result = runner.invoke(commands.roc, ['--output', 'test.pdf',dev1])
if result.output:
click.echo(result.output)
assert result.exit_code == 0
......@@ -55,7 +54,7 @@ def test_roc():
test2 = bob.io.base.test_utils.datafile('test-2.txt', 'bob.measure')
with runner.isolated_filesystem():
result = runner.invoke(commands.roc, ['--split', '--output',
'test.pdf',
'test.pdf', '-e',
'-ts', 'A,',
dev1, test1, dev2, test2])
if result.output:
......@@ -63,7 +62,7 @@ def test_roc():
assert result.exit_code == 0, (result.exit_code, result.output)
with runner.isolated_filesystem():
result = runner.invoke(commands.roc, ['--output',
result = runner.invoke(commands.roc, ['-e', '--output',
'test.pdf', '--legends', 'A,B',
dev1, test1, dev2, test2])
if result.output:
......@@ -75,7 +74,7 @@ def test_det():
dev1 = bob.io.base.test_utils.datafile('dev-1.txt', 'bob.measure')
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(commands.det, ['--no-evaluation', dev1])
result = runner.invoke(commands.det, [dev1])
if result.output:
click.echo(result.output)
assert result.exit_code == 0
......@@ -83,7 +82,7 @@ def test_det():
test1 = bob.io.base.test_utils.datafile('test-1.txt', 'bob.measure')
test2 = bob.io.base.test_utils.datafile('test-2.txt', 'bob.measure')
with runner.isolated_filesystem():
result = runner.invoke(commands.det, ['--split', '--output',
result = runner.invoke(commands.det, ['-e', '--split', '--output',
'test.pdf', '--legends', 'A,B',
'-lc', 'upper-right',
dev1, test1, dev2, test2])
......@@ -93,7 +92,7 @@ def test_det():
with runner.isolated_filesystem():
result = runner.invoke(commands.det, ['--output',
'test.pdf',
'test.pdf', '-e',
dev1, test1, dev2, test2])
if result.output:
click.echo(result.output)
......@@ -128,14 +127,14 @@ def test_hist():
test2 = bob.io.base.test_utils.datafile('test-2.txt', 'bob.measure')
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(commands.hist, ['--no-evaluation', dev1])
result = runner.invoke(commands.hist, [dev1])
if result.output:
click.echo(result.output)
assert result.exit_code == 0, (result.exit_code, result.output)
with runner.isolated_filesystem():
result = runner.invoke(commands.hist, ['--no-evaluation', '--criterion',
'min-hter',
result = runner.invoke(commands.hist, ['--criterion',
'min-hter', '--no-line',
'--output', 'HISTO.pdf', '-b',
'30,100', dev1, dev2])
if result.output:
......@@ -143,7 +142,7 @@ def test_hist():
assert result.exit_code == 0, (result.exit_code, result.output)
with runner.isolated_filesystem():
result = runner.invoke(commands.hist, ['--criterion', 'eer','--output',
result = runner.invoke(commands.hist, ['-e', '--criterion', 'eer','--output',
'HISTO.pdf', '-b', '30,20',
'-sp', 221, '-lg', 'A,B',
dev1, test1, dev2, test2])
......@@ -159,16 +158,16 @@ def test_evaluate():
test2 = bob.io.base.test_utils.datafile('test-2.txt', 'bob.measure')
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(commands.evaluate, ['--no-evaluation', dev1])
result = runner.invoke(commands.evaluate, [dev1])
assert result.exit_code == 0, (result.exit_code, result.output)
with runner.isolated_filesystem():
result = runner.invoke(
commands.evaluate, ['--no-evaluation', '--output', 'my_plots.pdf',
commands.evaluate, ['--output', 'my_plots.pdf',
'-n', 300, dev1, dev2])
assert result.exit_code == 0, (result.exit_code, result.output)
with runner.isolated_filesystem():
result = runner.invoke(
commands.evaluate, [dev1, test1, dev2, test2])
commands.evaluate, ['-e', dev1, test1, dev2, test2])
assert result.exit_code == 0, (result.exit_code, result.output)
......@@ -508,7 +508,7 @@ and FRs are also displayed between parenthesis.
.. note::
Several scores files can be given at once and the metrics will be computed
for each of them separatly. Development and evaluation files must be given by
pairs. When only Development file are provided, ``--no-evaluation`` flag
pairs. When evaluation files are provided, ``--eval`` flag
must be given.
......@@ -535,7 +535,7 @@ on an evaluation set:
.. code-block:: sh
$ bob measure metrics dev-1.txt eval-1.txt
$ bob measure metrics -e dev-1.txt eval-1.txt
[Min. criterion: EER] Threshold on Development set `dev-1`: -8.025286e-03
==== =================== ===============
.. Development dev-1 Eval. eval-1
......@@ -575,7 +575,7 @@ For example, to generate a DET curve from development and evaluation datasets:
.. code-block:: sh
$bob measure det -v --output "my_det.pdf" -ts "DetDev1,DetEval1,DetDev2,DetEval2"
$bob measure det -e -v --output "my_det.pdf" -ts "DetDev1,DetEval1,DetDev2,DetEval2"
dev-1.txt eval-1.txt dev-2.txt eval-2.txt
where `my_det.pdf` will contain DET plots for the two experiments.
......@@ -600,7 +600,7 @@ experiment. For example:
.. code-block:: sh
$bob measure evaluate -v -l 'my_metrics.txt' -o 'my_plots.pdf' {sys1,sys2}/{dev,eval}
$bob measure evaluate -e -v -l 'my_metrics.txt' -o 'my_plots.pdf' {sys1,sys2}/{dev,eval}
will output metrics and plots for the two experiments (dev and eval pairs) in
`my_metrics.txt` and `my_plots.pdf`, respectively.
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment