Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
bob.bio.base
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
bob
bob.bio.base
Commits
93c8f5e7
There was a problem fetching the pipeline summary.
Commit
93c8f5e7
authored
6 years ago
by
Amir MOHAMMADI
Browse files
Options
Downloads
Patches
Plain Diff
Reduce repition between commands
parent
48bb8010
No related branches found
No related tags found
1 merge request
!163
Reduce repition between commands
Pipeline
#
Changes
1
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
bob/bio/base/script/commands.py
+42
-304
42 additions, 304 deletions
bob/bio/base/script/commands.py
with
42 additions
and
304 deletions
bob/bio/base/script/commands.py
+
42
−
304
View file @
93c8f5e7
'''
Click commands for ``bob.bio.base``
'''
import
click
import
bob.bio.base.script.
figure
as
bio_figure
from
.
import
figure
as
bio_figure
import
bob.measure.script.figure
as
measure_figure
from
..score
import
load
from
bob.measure.script
import
common_options
from
bob.extension.scripts.click_helper
import
(
verbosity_option
,
open_file_mode_option
)
from
bob.extension.scripts.click_helper
import
verbosity_option
SCORE_FORMAT
=
(
"
Files must be 4- or 5- columns format, see
"
"
:py:func:`bob.bio.base.score.load.four_column` and
"
"
:py:func:`bob.bio.base.score.load.five_column` for details.
"
)
CRITERIA
=
(
'
eer
'
,
'
min-hter
'
,
'
far
'
,
'
mindcf
'
,
'
cllr
'
,
'
rr
'
)
def
rank_option
(
**
kwargs
):
...
...
@@ -23,43 +29,12 @@ def rank_option(**kwargs):
return
custom_rank_option
@click.command
()
@common_options.scores_argument
(
nargs
=-
1
)
@common_options.table_option
()
@common_options.eval_option
()
@common_options.output_log_metric_option
()
@common_options.criterion_option
([
'
eer
'
,
'
min-hter
'
,
'
far
'
,
'
mindcf
'
,
'
cllr
'
,
'
rr
'
])
@common_options.metrics_command
(
common_options
.
METRICS_HELP
.
format
(
names
=
'
FtA, FAR, FRR, FMR, FMNR, HTER
'
,
criteria
=
CRITERIA
,
score_format
=
SCORE_FORMAT
,
command
=
'
bob bio metrics
'
),
criteria
=
CRITERIA
)
@common_options.cost_option
()
@common_options.thresholds_option
()
@common_options.far_option
()
@common_options.legends_option
()
@open_file_mode_option
()
@verbosity_option
()
@click.pass_context
def
metrics
(
ctx
,
scores
,
evaluation
,
**
kargs
):
"""
Prints a single output line that contains all info for a given
criterion (eer, min-hter, far, mindcf, cllr, rr).
You need to provide one or more development score file(s) for each
experiment. You can also provide eval files along with dev files. If
eval-scores are used, the flag `--eval` must be used. is required
in that case. Files must be 4- or 5- columns format, see
:py:func:`bob.bio.base.score.load.four_column` and
:py:func:`bob.bio.base.score.load.five_column` for details.
Resulting table format can be changer using the `--tablefmt`. Default
formats are `rst` when output in the terminal and `latex` when
written in a log file (see `--log`)
Examples:
$ bob bio metrics dev-scores
$ bob bio metrics dev-scores1 dev-scores2
$ bob bio metrics -e -l results.txt dev-scores1 eval-scores1
$ bob bio metrics -e {dev,eval}-scores1 {dev,eval}-scores2
"""
def
metrics
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
if
'
criterion
'
in
ctx
.
meta
and
ctx
.
meta
[
'
criterion
'
]
==
'
rr
'
:
process
=
bio_figure
.
Metrics
(
ctx
,
scores
,
evaluation
,
load
.
cmc
)
else
:
...
...
@@ -67,134 +42,48 @@ def metrics(ctx, scores, evaluation, **kargs):
process
.
run
()
@click.command
()
@common_options.scores_argument
(
nargs
=-
1
)
@common_options.titles_option
()
@common_options.legends_option
()
@common_options.legend_loc_option
(
dflt
=
None
)
@common_options.no_legend_option
()
@common_options.sep_dev_eval_option
()
@common_options.output_plot_file_option
(
default_out
=
'
roc.pdf
'
)
@common_options.eval_option
()
@common_options.points_curve_option
()
@common_options.semilogx_option
(
True
)
@common_options.axes_val_option
()
@common_options.x_rotation_option
()
@common_options.lines_at_option
()
@common_options.x_label_option
()
@common_options.y_label_option
()
@common_options.const_layout_option
()
@common_options.style_option
()
@common_options.linestyles_option
()
@common_options.figsize_option
()
@common_options.min_far_option
()
@verbosity_option
()
@click.pass_context
@common_options.roc_command
(
common_options
.
ROC_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
'
bob bio roc
'
))
def
roc
(
ctx
,
scores
,
evaluation
,
**
kargs
):
"""
Plot ROC (receiver operating characteristic) curve:
The plot will represent the false match rate on the horizontal axis and the
false non match rate on the vertical axis. The values for the axis will be
computed using :py:func:`bob.measure.roc`.
You need to provide one or more development score file(s) for each
experiment. You can also provide eval files along with dev files. If
eval-scores are used, the flag `--eval` must be used. is required
in that case. Files must be 4- or 5- columns format, see
:py:func:`bob.bio.base.score.load.four_column` and
:py:func:`bob.bio.base.score.load.five_column` for details.
Examples:
$ bob bio roc -v dev-scores
$ bob bio roc -e -v dev-scores1 eval-scores1 dev-scores2
eval-scores2
$ bob bio roc -e -v -o my_roc.pdf dev-scores1 eval-scores1
"""
process
=
bio_figure
.
Roc
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
@click.command
()
@common_options.scores_argument
(
nargs
=-
1
)
@common_options.titles_option
()
@common_options.output_plot_file_option
(
default_out
=
'
det.pdf
'
)
@common_options.legends_option
()
@common_options.legend_loc_option
(
dflt
=
'
upper-right
'
)
@common_options.no_legend_option
()
@common_options.x_label_option
()
@common_options.y_label_option
()
@common_options.sep_dev_eval_option
()
@common_options.eval_option
()
@common_options.axes_val_option
(
dflt
=
'
0.01,95,0.01,95
'
)
@common_options.x_rotation_option
(
dflt
=
45
)
@common_options.points_curve_option
()
@common_options.const_layout_option
()
@common_options.style_option
()
@common_options.linestyles_option
()
@common_options.figsize_option
()
@common_options.lines_at_option
()
@common_options.min_far_option
()
@verbosity_option
()
@click.pass_context
@common_options.det_command
(
common_options
.
DET_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
'
bob bio det
'
))
def
det
(
ctx
,
scores
,
evaluation
,
**
kargs
):
"""
Plot DET (detection error trade-off) curve:
modified ROC curve which plots error rates on both axes
(false positives on the x-axis and false negatives on the y-axis)
You need to provide one or more development score file(s) for each
experiment. You can also provide eval files along with dev files. If
eval-scores are used, the flag `--eval` must be used. is required
in that case. Files must be 4- or 5- columns format, see
:py:func:`bob.bio.base.score.load.four_column` and
:py:func:`bob.bio.base.score.load.five_column` for details.
Examples:
$ bob bio det -v dev-scores
$ bob bio det -e -v dev-scores1 eval-scores1 dev-scores2
eval-scores2
$ bob bio det -e -v -o my_det.pdf dev-scores1 eval-scores1
"""
process
=
bio_figure
.
Det
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
@click.command
()
@common_options.scores_argument
(
min_arg
=
1
,
force_eval
=
True
,
nargs
=-
1
)
@common_options.titles_option
()
@common_options.output_plot_file_option
(
default_out
=
'
epc.pdf
'
)
@common_options.legends_option
()
@common_options.legend_loc_option
(
dflt
=
'
upper-center
'
)
@common_options.no_legend_option
()
@common_options.points_curve_option
()
@common_options.const_layout_option
()
@common_options.style_option
()
@common_options.linestyles_option
()
@common_options.figsize_option
()
@verbosity_option
()
@click.pass_context
@common_options.epc_command
(
common_options
.
EPC_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
'
bob bio epc
'
))
def
epc
(
ctx
,
scores
,
**
kargs
):
"""
Plot EPC (expected performance curve):
plots the error rate on the eval set depending on a threshold selected
a-priori on the development set and accounts for varying relative cost
in [0; 1] of FPR and FNR when calculating the threshold.
You need to provide one or more development score and eval file(s)
for each experiment. Files must be 4- or 5- columns format, see
:py:func:`bob.bio.base.score.load.four_column` and
:py:func:`bob.bio.base.score.load.five_column` for details.
process
=
measure_figure
.
Epc
(
ctx
,
scores
,
True
,
load
.
split
)
process
.
run
()
Examples:
$ bob bio epc -v dev-scores eval-scores
$ bob bio epc -v -o my_epc.pdf dev-scores1 eval-scores1
"""
process
=
measure_figure
.
Epc
(
ctx
,
scores
,
True
,
load
.
split
)
@common_options.hist_command
(
common_options
.
HIST_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
'
bob bio hist
'
))
def
hist
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
process
=
bio_figure
.
Hist
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
@common_options.evaluate_command
(
common_options
.
EVALUATE_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
'
bob bio evaluate
'
),
criteria
=
CRITERIA
)
@common_options.cost_option
()
def
evaluate
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
common_options
.
evaluate_flow
(
ctx
,
scores
,
evaluation
,
metrics
,
roc
,
det
,
epc
,
hist
,
**
kwargs
)
@click.command
()
@common_options.scores_argument
(
nargs
=-
1
)
@common_options.titles_option
()
...
...
@@ -212,7 +101,7 @@ def epc(ctx, scores, **kargs):
@verbosity_option
()
@click.pass_context
def
cmc
(
ctx
,
scores
,
evaluation
,
**
kargs
):
"""
Plot CMC (cumulative match characteristic curve)
:
"""
Plot CMC (cumulative match characteristic curve)
.
graphical presentation of results of an identification task eval, plotting
rank values on the x-axis and the probability of correct identification at
or below that rank on the y-axis. The values for the axis will be computed
...
...
@@ -257,7 +146,7 @@ def cmc(ctx, scores, evaluation, **kargs):
@verbosity_option
()
@click.pass_context
def
dir
(
ctx
,
scores
,
evaluation
,
**
kargs
):
"""
Plots the Detection & Identification Rate curve over the FAR
"""
Plots the Detection & Identification Rate curve over the FAR
.
This curve is designed to be used in an open set identification protocol,
and defined in Chapter 14.1 of [LiJain2005]_. It requires to have at least
...
...
@@ -289,154 +178,3 @@ def dir(ctx, scores, evaluation, **kargs):
"""
process
=
bio_figure
.
Dir
(
ctx
,
scores
,
evaluation
,
load
.
cmc
)
process
.
run
()
@click.command
()
@common_options.scores_argument
(
nargs
=-
1
)
@common_options.output_plot_file_option
(
default_out
=
'
hist.pdf
'
)
@common_options.eval_option
()
@common_options.n_bins_option
()
@common_options.criterion_option
()
@common_options.no_line_option
()
@common_options.far_option
()
@common_options.thresholds_option
()
@common_options.const_layout_option
()
@common_options.print_filenames_option
()
@common_options.legends_option
()
@common_options.style_option
()
@common_options.figsize_option
(
dflt
=
None
)
@common_options.subplot_option
()
@common_options.legend_ncols_option
()
@verbosity_option
()
@click.pass_context
def
hist
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
"""
Plots histograms of positive and negatives along with threshold
criterion.
You need to provide one or more development score file(s) for each
experiment. You can also provide eval files along with dev files. If
eval-scores are used, the flag `--eval` must be used. is required
in that case. Files must be 4- or 5- columns format, see
:py:func:`bob.bio.base.score.load.four_column` and
:py:func:`bob.bio.base.score.load.five_column` for details.
By default, when eval-scores are given, only eval-scores histograms are
displayed with threshold line
computed from dev-scores.
Examples:
$ bob bio hist -v dev-scores
$ bob bio hist -e -v dev-scores1 eval-scores1 dev-scores2
eval-scores2
$ bob bio hist -e -v --criterion min-hter dev-scores1 eval-scores1
"""
process
=
bio_figure
.
Hist
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
@click.command
()
@common_options.scores_argument
(
nargs
=-
1
)
@common_options.legends_option
()
@common_options.sep_dev_eval_option
()
@common_options.table_option
()
@common_options.eval_option
()
@common_options.output_log_metric_option
()
@common_options.output_plot_file_option
(
default_out
=
'
eval_plots.pdf
'
)
@common_options.points_curve_option
()
@common_options.lines_at_option
()
@common_options.cost_option
()
@rank_option
()
@common_options.far_option
()
@common_options.const_layout_option
()
@common_options.style_option
()
@common_options.figsize_option
()
@verbosity_option
()
@click.pass_context
def
evaluate
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
'''
Evalutes score file, runs error analysis on score sets and plot curves.
\b
1. Computes the threshold using either EER, min. HTER or FAR value
criteria on development set scores
2. Applies the above threshold on eval set scores to compute the HTER, if a
eval-score set is provided
3. Computes Cllr and minCllr and minDCF
3. Reports error metrics in the console or in a log file
4. Plots ROC, EPC, DET, score distributions
curves to a multi-page PDF file
You need to provide one or more development score file(s) for each
experiment. You can also provide eval files along with dev files. If
eval-scores are used, the flag `--eval` must be used. is required
in that case. Files must be 4- or 5- columns format, see
:py:func:`bob.bio.base.score.load.four_column` and
:py:func:`bob.bio.base.score.load.five_column` for details.
You need to provide 2 score files for each biometric system in this order:
\b
* development scores
* evaluation scores
Examples:
$ bob bio evaluate -v dev-scores
$ bob bio evaluate -e -v -l metrics.txt -o my_plots.pdf dev-scores eval-scores
$ bob bio evaluate -e -v -o my_plots.pdf /path/to/syst-{1,2,3}/{dev,eval}-scores
'''
log_str
=
''
if
'
log
'
in
ctx
.
meta
and
ctx
.
meta
[
'
log
'
]
is
not
None
:
log_str
=
'
%s
'
%
ctx
.
meta
[
'
log
'
]
# first time erase if existing file
ctx
.
meta
[
'
open_mode
'
]
=
'
w
'
click
.
echo
(
"
Computing metrics with EER%s...
"
%
log_str
)
ctx
.
meta
[
'
criterion
'
]
=
'
eer
'
# no criterion passed to evaluate
ctx
.
invoke
(
metrics
,
scores
=
scores
,
evaluation
=
evaluation
)
# other times, appends the content
ctx
.
meta
[
'
open_mode
'
]
=
'
a
'
click
.
echo
(
"
Computing metrics with min-HTER%s...
"
%
log_str
)
ctx
.
meta
[
'
criterion
'
]
=
'
min-hter
'
# no criterion passed in evaluate
ctx
.
invoke
(
metrics
,
scores
=
scores
,
evaluation
=
evaluation
)
if
'
far_value
'
in
ctx
.
meta
and
ctx
.
meta
[
'
far_value
'
]
is
not
None
:
click
.
echo
(
"
Computing metrics with FAR=%f%s...
"
%
(
ctx
.
meta
[
'
far_value
'
],
log_str
))
ctx
.
meta
[
'
criterion
'
]
=
'
far
'
# no criterio % n passed in evaluate
ctx
.
invoke
(
metrics
,
scores
=
scores
,
evaluation
=
evaluation
)
click
.
echo
(
"
Computing minDCF%s...
"
%
log_str
)
ctx
.
meta
[
'
criterion
'
]
=
'
mindcf
'
# no criterion passed in evaluate
ctx
.
invoke
(
metrics
,
scores
=
scores
,
evaluation
=
evaluation
)
click
.
echo
(
"
Computing Cllr and minCllr%s...
"
%
log_str
)
ctx
.
meta
[
'
criterion
'
]
=
'
cllr
'
# no criterion passed in evaluate
ctx
.
invoke
(
metrics
,
scores
=
scores
,
evaluation
=
evaluation
)
# avoid closing pdf file before all figures are plotted
ctx
.
meta
[
'
closef
'
]
=
False
if
evaluation
:
click
.
echo
(
"
Starting evaluate with dev and eval scores...
"
)
else
:
click
.
echo
(
"
Starting evaluate with dev scores only...
"
)
click
.
echo
(
"
Generating ROC in %s...
"
%
ctx
.
meta
[
'
output
'
])
ctx
.
forward
(
roc
)
# use class defaults plot settings
click
.
echo
(
"
Generating DET in %s...
"
%
ctx
.
meta
[
'
output
'
])
ctx
.
forward
(
det
)
# use class defaults plot settings
if
evaluation
:
click
.
echo
(
"
Generating EPC in %s...
"
%
ctx
.
meta
[
'
output
'
])
ctx
.
forward
(
epc
)
# use class defaults plot settings
# the last one closes the file
ctx
.
meta
[
'
closef
'
]
=
True
click
.
echo
(
"
Generating score histograms in %s...
"
%
ctx
.
meta
[
'
output
'
])
ctx
.
meta
[
'
criterion
'
]
=
'
eer
'
# no criterion passed in evaluate
ctx
.
forward
(
hist
)
click
.
echo
(
"
Evaluate successfully completed!
"
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment