Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
bob.measure
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
2
Issues
2
List
Boards
Labels
Milestones
Merge Requests
1
Merge Requests
1
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
bob
bob.measure
Commits
5df44007
Commit
5df44007
authored
May 03, 2018
by
Theophile GENTILHOMME
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
rename hter -> min-hter
parent
b6c7d0dd
Pipeline
#19736
passed with stage
in 122 minutes and 2 seconds
Changes
5
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
12 additions
and
11 deletions
+12
-11
bob/measure/script/commands.py
bob/measure/script/commands.py
+4
-4
bob/measure/script/common_options.py
bob/measure/script/common_options.py
+3
-3
bob/measure/test_script.py
bob/measure/test_script.py
+2
-1
bob/measure/utils.py
bob/measure/utils.py
+2
-2
doc/guide.rst
doc/guide.rst
+1
-1
No files found.
bob/measure/script/commands.py
View file @
5df44007
...
...
@@ -22,7 +22,7 @@ from bob.extension.scripts.click_helper import (verbosity_option,
@
click
.
pass_context
def
metrics
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
"""Prints a table that contains FtA, FAR, FRR, FMR, FMNR, HTER for a given
threshold criterion (eer or hter).
threshold criterion (eer or
min-
hter).
You need to provide one or more development score file(s) for each experiment.
You can also provide evaluation files along with dev files. If only dev scores
...
...
@@ -189,7 +189,7 @@ def hist(ctx, scores, evaluation, **kwargs):
$ bob measure hist dev-scores1 eval-scores1 dev-scores2
eval-scores2
$ bob measure hist --criterion hter --show-dev dev-scores1 eval-scores1
$ bob measure hist --criterion
min-
hter --show-dev dev-scores1 eval-scores1
"""
process
=
figure
.
Hist
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
...
...
@@ -247,8 +247,8 @@ def evaluate(ctx, scores, evaluation, **kwargs):
ctx
.
invoke
(
metrics
,
scores
=
scores
,
evaluation
=
evaluation
)
# second time, appends the content
ctx
.
meta
[
'open_mode'
]
=
'a'
click
.
echo
(
"Computing metrics with HTER..."
)
ctx
.
meta
[
'criterion'
]
=
'hter'
# no criterion passed in evaluate
click
.
echo
(
"Computing metrics with
min-
HTER..."
)
ctx
.
meta
[
'criterion'
]
=
'
min-
hter'
# no criterion passed in evaluate
ctx
.
invoke
(
metrics
,
scores
=
scores
,
evaluation
=
evaluation
)
if
'log'
in
ctx
.
meta
:
click
.
echo
(
"[metrics] => %s"
%
ctx
.
meta
[
'log'
])
...
...
bob/measure/script/common_options.py
View file @
5df44007
...
...
@@ -273,7 +273,7 @@ def output_log_metric_option(**kwargs):
callback
=
callback
,
**
kwargs
)(
func
)
return
custom_output_log_file_option
def
criterion_option
(
lcriteria
=
[
'eer'
,
'hter'
,
'far'
],
**
kwargs
):
def
criterion_option
(
lcriteria
=
[
'eer'
,
'
min-
hter'
,
'far'
],
**
kwargs
):
"""Get option flag to tell which criteriom is used (default:eer)
Parameters
...
...
@@ -284,7 +284,7 @@ def criterion_option(lcriteria=['eer', 'hter', 'far'], **kwargs):
def
custom_criterion_option
(
func
):
def
callback
(
ctx
,
param
,
value
):
list_accepted_crit
=
lcriteria
if
lcriteria
is
not
None
else
\
[
'eer'
,
'hter'
,
'far'
]
[
'eer'
,
'
min-
hter'
,
'far'
]
if
value
not
in
list_accepted_crit
:
raise
click
.
BadParameter
(
'Incorrect value for `--criterion`. '
'Must be one of [`%s`]'
%
...
...
@@ -294,7 +294,7 @@ def criterion_option(lcriteria=['eer', 'hter', 'far'], **kwargs):
return
click
.
option
(
'-c'
,
'--criterion'
,
default
=
'eer'
,
help
=
'Criterion to compute plots and '
'metrics: `eer`
(default), `hte
r`'
,
'metrics: `eer`
, `min-hter` or `fa
r`'
,
callback
=
callback
,
is_eager
=
True
,
**
kwargs
)(
func
)
return
custom_criterion_option
...
...
bob/measure/test_script.py
View file @
5df44007
...
...
@@ -130,7 +130,8 @@ def test_hist():
assert
result
.
exit_code
==
0
,
(
result
.
exit_code
,
result
.
output
)
with
runner
.
isolated_filesystem
():
result
=
runner
.
invoke
(
commands
.
hist
,
[
'--no-evaluation'
,
'--criterion'
,
'hter'
,
result
=
runner
.
invoke
(
commands
.
hist
,
[
'--no-evaluation'
,
'--criterion'
,
'min-hter'
,
'--output'
,
'HISTO.pdf'
,
'-b'
,
'30,100'
,
dev1
,
dev2
])
if
result
.
output
:
...
...
bob/measure/utils.py
View file @
5df44007
...
...
@@ -90,7 +90,7 @@ def get_thres(criter, neg, pos, far=None):
Parameters
----------
criter :
Criterion (`eer` or `hter`)
Criterion (`eer` or `hter`
or `far`
)
neg : :py:class:`numpy.ndarray`:
array of negative scores
pos : :py:class:`numpy.ndarray`::
...
...
@@ -104,7 +104,7 @@ def get_thres(criter, neg, pos, far=None):
if
criter
==
'eer'
:
from
.
import
eer_threshold
return
eer_threshold
(
neg
,
pos
)
elif
criter
==
'hter'
:
elif
criter
==
'
min-
hter'
:
from
.
import
min_hter_threshold
return
min_hter_threshold
(
neg
,
pos
)
elif
criter
==
'far'
:
...
...
doc/guide.rst
View file @
5df44007
...
...
@@ -588,7 +588,7 @@ Evaluate
A convenient command ``evaluate`` is provided to generate multiple metrics and
plots for a list of experiments. It generates two ``metrics`` outputs with ERR
and HTER criteria along with ``roc``, ``det``, ``epc``, ``hist`` plots for each
and
min-
HTER criteria along with ``roc``, ``det``, ``epc``, ``hist`` plots for each
experiment. For example:
.. code-block:: sh
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment