Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.pad.base
Commits
77a00885
Commit
77a00885
authored
May 14, 2018
by
Theophile GENTILHOMME
Browse files
Add bob pad and bob vuln groups of commands
parent
b63237ce
Pipeline
#20067
passed with stage
in 22 minutes and 5 seconds
Changes
13
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
bob/pad/base/script/det.py
deleted
100644 → 0
View file @
b63237ce
"""Generates PAD ISO compliant EPC based on the score files
"""
import
click
from
bob.measure.script
import
common_options
from
bob.extension.scripts.click_helper
import
verbosity_option
from
bob.bio.base.score
import
load
from
.
import
figure
@
click
.
command
()
@
common_options
.
scores_argument
(
min_arg
=
2
,
force_eval
=
True
,
nargs
=-
1
)
@
common_options
.
output_plot_file_option
(
default_out
=
'det.pdf'
)
@
common_options
.
legends_option
()
@
common_options
.
no_legend_option
()
@
common_options
.
legend_loc_option
(
dflt
=
'upper-right'
)
@
common_options
.
title_option
()
@
common_options
.
const_layout_option
()
@
common_options
.
style_option
()
@
common_options
.
figsize_option
()
@
common_options
.
bool_option
(
'no-spoof'
,
'ns'
,
''
,
False
)
@
verbosity_option
()
@
common_options
.
axes_val_option
(
dflt
=
'0.01,95,0.01,95'
)
@
common_options
.
x_rotation_option
(
dflt
=
45
)
@
common_options
.
x_label_option
()
@
common_options
.
y_label_option
()
@
click
.
option
(
'-c'
,
'--criteria'
,
default
=
None
,
show_default
=
True
,
help
=
'Criteria for threshold selection'
,
type
=
click
.
Choice
((
'eer'
,
'min-hter'
,
'bpcer20'
)))
@
click
.
option
(
'--real-data/--no-real-data'
,
default
=
True
,
show_default
=
True
,
help
=
'If False, will annotate the plots hypothetically, instead '
'of with real data values of the calculated error rates.'
)
@
click
.
pass_context
def
det
(
ctx
,
scores
,
criteria
,
real_data
,
**
kwargs
):
"""Plot DET
You need to provide 2 or 4 scores
files for each PAD system in this order:
\b
* licit development scores
* licit evaluation scores
* spoof development scores (when ``--no-spoof`` is False (default))
* spoof evaluation scores (when ``--no-spoof`` is False (default))
Examples:
$ bob pad det --no-spoof dev-scores eval-scores
$ bob pad det {licit,spoof}/scores-{dev,eval}
"""
process
=
figure
.
Det
(
ctx
,
scores
,
True
,
load
.
split
,
criteria
,
real_data
)
process
.
run
()
bob/pad/base/script/epc.py
deleted
100644 → 0
View file @
b63237ce
"""Generates PAD ISO compliant EPC based on the score files
"""
import
click
from
bob.measure.script
import
common_options
from
bob.extension.scripts.click_helper
import
verbosity_option
from
bob.bio.base.score
import
load
from
.
import
figure
@
click
.
command
()
@
common_options
.
scores_argument
(
min_arg
=
2
,
force_eval
=
True
,
nargs
=-
1
)
@
common_options
.
output_plot_file_option
(
default_out
=
'epc.pdf'
)
@
common_options
.
legends_option
()
@
common_options
.
no_legend_option
()
@
common_options
.
legend_loc_option
()
@
common_options
.
title_option
()
@
common_options
.
const_layout_option
()
@
common_options
.
x_label_option
()
@
common_options
.
y_label_option
()
@
common_options
.
figsize_option
()
@
common_options
.
style_option
()
@
common_options
.
bool_option
(
'iapmr'
,
'I'
,
'Whether to plot the IAPMR related lines or not.'
,
True
)
@
common_options
.
style_option
()
@
verbosity_option
()
@
click
.
pass_context
def
epc
(
ctx
,
scores
,
**
kwargs
):
"""Plot EPC (expected performance curve):
You need to provide 4 score
files for each biometric system in this order:
\b
* licit development scores
* licit evaluation scores
* spoof development scores
* spoof evaluation scores
See :ref:`bob.pad.base.vulnerability` in the documentation for a guide on
vulnerability analysis.
Examples:
$ bob pad epc dev-scores eval-scores
$ bob pad epc -o my_epc.pdf dev-scores1 eval-scores1
$ bob pad epc {licit,spoof}/scores-{dev,eval}
"""
process
=
figure
.
Epc
(
ctx
,
scores
,
True
,
load
.
split
)
process
.
run
()
@
click
.
command
()
@
common_options
.
scores_argument
(
min_arg
=
2
,
force_eval
=
True
,
nargs
=-
1
)
@
common_options
.
output_plot_file_option
(
default_out
=
'epsc.pdf'
)
@
common_options
.
legends_option
()
@
common_options
.
no_legend_option
()
@
common_options
.
legend_loc_option
()
@
common_options
.
const_layout_option
()
@
common_options
.
x_label_option
()
@
common_options
.
y_label_option
()
@
common_options
.
figsize_option
()
@
common_options
.
style_option
()
@
common_options
.
bool_option
(
'wer'
,
'w'
,
'Whether to plot the WER related lines or not.'
,
True
)
@
common_options
.
bool_option
(
'three-d'
,
'D'
,
'If true, generate 3D plots'
,
False
)
@
common_options
.
bool_option
(
'iapmr'
,
'I'
,
'Whether to plot the IAPMR related lines or not.'
,
False
)
@
click
.
option
(
'-c'
,
'--criteria'
,
default
=
"eer"
,
show_default
=
True
,
help
=
'Criteria for threshold selection'
,
type
=
click
.
Choice
((
'eer'
,
'min-hter'
,
'bpcer20'
)))
@
click
.
option
(
'-vp'
,
'--var-param'
,
default
=
"omega"
,
show_default
=
True
,
help
=
'Name of the varying parameter'
,
type
=
click
.
Choice
((
'omega'
,
'beta'
)))
@
click
.
option
(
'-fp'
,
'--fixed-param'
,
default
=
0.5
,
show_default
=
True
,
help
=
'Value of the fixed parameter'
,
type
=
click
.
FLOAT
)
@
verbosity_option
()
@
click
.
pass_context
def
epsc
(
ctx
,
scores
,
criteria
,
var_param
,
fixed_param
,
three_d
,
**
kwargs
):
"""Plot EPSC (expected performance spoofing curve):
You need to provide 4 score
files for each biometric system in this order:
\b
* licit development scores
* licit evaluation scores
* spoof development scores
* spoof evaluation scores
See :ref:`bob.pad.base.vulnerability` in the documentation for a guide on
vulnerability analysis.
Note that when using 3D plots with option ``--three-d``, you cannot plot
both WER and IAPMR on the same figure (which is possible in 2D).
Examples:
$ bob pad epsc -o my_epsc.pdf dev-scores1 eval-scores1
$ bob pad epsc -D {licit,spoof}/scores-{dev,eval}
"""
if
three_d
:
if
(
ctx
.
meta
[
'wer'
]
and
ctx
.
meta
[
'iapmr'
]):
raise
click
.
BadParameter
(
'Cannot plot both WER and IAPMR in 3D'
)
process
=
figure
.
Epsc3D
(
ctx
,
scores
,
True
,
load
.
split
,
criteria
,
var_param
,
fixed_param
)
else
:
process
=
figure
.
Epsc
(
ctx
,
scores
,
True
,
load
.
split
,
criteria
,
var_param
,
fixed_param
)
process
.
run
()
bob/pad/base/script/figure.py
View file @
77a00885
...
...
@@ -519,11 +519,11 @@ class Epsc3D(Epsc):
class
Det
(
PadPlot
):
'''DET for PAD'''
def
__init__
(
self
,
ctx
,
scores
,
evaluation
,
func_load
,
criteria
,
real_data
):
def
__init__
(
self
,
ctx
,
scores
,
evaluation
,
func_load
,
criteria
,
real_data
,
no_spoof
):
super
(
Det
,
self
).
__init__
(
ctx
,
scores
,
evaluation
,
func_load
)
self
.
_no_spoof
=
False
if
'no_spoof'
not
in
ctx
.
meta
else
\
ctx
.
meta
[
'no_spoof'
]
self
.
_criteria
=
criteria
self
.
_no_spoof
=
no_spoof
self
.
_criteria
=
criteria
or
'eer'
self
.
_real_data
=
True
if
real_data
is
None
else
real_data
def
compute
(
self
,
idx
,
input_scores
,
input_names
):
...
...
@@ -534,12 +534,11 @@ class Det(PadPlot):
licit_eval_pos
=
input_scores
[
1
][
1
]
spoof_eval_neg
=
input_scores
[
3
][
0
]
if
len
(
input_scores
)
>
2
else
None
spoof_eval_pos
=
input_scores
[
3
][
1
]
if
len
(
input_scores
)
>
2
else
None
det
(
licit_eval_neg
,
licit_eval_pos
,
self
.
_points
,
color
=
self
.
_colors
[
idx
]
,
color
=
'C0'
,
linestyle
=
'-'
,
label
=
self
.
_label
(
"licit"
,
input_names
[
0
],
idx
)
)
...
...
@@ -548,12 +547,12 @@ class Det(PadPlot):
spoof_eval_neg
,
spoof_eval_pos
,
self
.
_points
,
color
=
self
.
_colors
[
idx
]
,
linestyle
=
'
--
'
,
color
=
'C3'
,
linestyle
=
'
:
'
,
label
=
self
.
_label
(
"spoof"
,
input_names
[
3
],
idx
)
)
if
self
.
_criteria
is
None
:
if
self
.
_criteria
is
None
or
self
.
_no_spoof
:
return
thres_baseline
=
calc_threshold
(
...
...
@@ -564,10 +563,12 @@ class Det(PadPlot):
farfrr_licit
=
farfrr
(
licit_eval_neg
,
licit_eval_pos
,
thres_baseline
)
# calculate test frr @ EER (licit scenario)
thres_baseline
)
# calculate test frr @ EER (licit scenario)
farfrr_spoof
=
farfrr
(
spoof_eval_neg
,
spoof_eval_pos
,
thres_baseline
)
# calculate test frr @ EER (spoof scenario)
thres_baseline
)
# calculate test frr @ EER (spoof scenario)
farfrr_licit_det
=
[
ppndf
(
i
)
for
i
in
farfrr_licit
]
...
...
@@ -585,7 +586,7 @@ class Det(PadPlot):
xmax
=
axlim
[
3
],
color
=
'k'
,
linestyle
=
'--'
,
label
=
"FRR @ EER"
)
# vertical FRR threshold
label
=
"FRR @ EER"
)
else
:
mpl
.
axhline
(
y
=
farfrr_licit_det
[
1
],
...
...
@@ -594,40 +595,42 @@ class Det(PadPlot):
color
=
'k'
,
linestyle
=
'--'
,
label
=
"FRR = %.2f%%"
%
(
farfrr_licit
[
1
]
*
100
))
# vertical FRR threshold
(
farfrr_licit
[
1
]
*
100
))
mpl
.
plot
(
farfrr_licit_det
[
0
],
farfrr_licit_det
[
1
],
'o'
,
color
=
self
.
_colors
[
idx
],
markersize
=
9
)
# FAR point, licit scenario
color
=
'C0'
,
markersize
=
9
)
# FAR point, licit scenario
mpl
.
plot
(
farfrr_spoof_det
[
0
],
farfrr_spoof_det
[
1
],
'o'
,
color
=
self
.
_colors
[
idx
],
markersize
=
9
)
# FAR point, spoof scenario
color
=
'C3'
,
markersize
=
9
)
# FAR point, spoof scenario
# annotate the FAR points
xyannotate_licit
=
[
ppndf
(
0.7
*
farfrr_licit
[
0
]
)
,
ppndf
(
1.8
*
farfrr_licit
[
1
]
)
0.6
*
farfrr_licit
_det
[
0
],
0.6
*
farfrr_licit
_det
[
1
]
,
]
xyannotate_spoof
=
[
ppndf
(
0.95
*
farfrr_spoof
[
0
]
)
,
ppndf
(
1.8
*
farfrr_lici
t
[
1
]
)
0.6
*
farfrr_spoof
_det
[
0
],
0.6
*
farfrr_spoof_de
t
[
1
]
,
]
if
not
self
.
_real_data
:
mpl
.
annotate
(
'FMR @
\n
operating point'
,
'FMR @
operating point'
,
xy
=
(
farfrr_licit_det
[
0
],
farfrr_licit_det
[
1
]),
xycoords
=
'data'
,
xytext
=
(
xyannotate_licit
[
0
],
xyannotate_licit
[
1
]),
color
=
self
.
_colors
[
idx
])
mpl
.
annotate
(
'IAPMR @
\n
operating point'
,
'IAPMR @
operating point'
,
xy
=
(
farfrr_spoof_det
[
0
],
farfrr_spoof_det
[
1
]),
xycoords
=
'data'
,
xytext
=
(
xyannotate_spoof
[
0
],
xyannotate_spoof
[
1
]),
...
...
@@ -638,14 +641,14 @@ class Det(PadPlot):
xy
=
(
farfrr_licit_det
[
0
],
farfrr_licit_det
[
1
]),
xycoords
=
'data'
,
xytext
=
(
xyannotate_licit
[
0
],
xyannotate_licit
[
1
]),
color
=
self
.
_colors
[
idx
]
,
color
=
'C0'
,
size
=
'large'
)
mpl
.
annotate
(
'IAPMR=
\n
%.2f%%'
%
(
farfrr_spoof
[
0
]
*
100
),
'IAPMR=%.2f%%'
%
(
farfrr_spoof
[
0
]
*
100
),
xy
=
(
farfrr_spoof_det
[
0
],
farfrr_spoof_det
[
1
]),
xycoords
=
'data'
,
xytext
=
(
xyannotate_spoof
[
0
],
xyannotate_spoof
[
1
]),
color
=
self
.
_colors
[
idx
]
,
color
=
'C3'
,
size
=
'large'
)
def
end_process
(
self
):
...
...
bob/pad/base/script/fmr_iapmr.py
deleted
100644 → 0
View file @
b63237ce
"""Generates PAD ISO compliant FMR vs IAPMR plots based on the score files
"""
import
click
from
bob.measure.script
import
common_options
from
bob.extension.scripts.click_helper
import
verbosity_option
from
bob.bio.base.score
import
load
from
.
import
figure
@
click
.
command
()
@
common_options
.
scores_argument
(
min_arg
=
2
,
force_eval
=
True
,
nargs
=-
1
)
@
common_options
.
output_plot_file_option
(
default_out
=
'fmr_iapmr.pdf'
)
@
common_options
.
legends_option
()
@
common_options
.
no_legend_option
()
@
common_options
.
legend_loc_option
()
@
common_options
.
title_option
()
@
common_options
.
const_layout_option
()
@
common_options
.
style_option
()
@
common_options
.
figsize_option
()
@
verbosity_option
()
@
common_options
.
axes_val_option
()
@
common_options
.
x_rotation_option
()
@
common_options
.
x_label_option
()
@
common_options
.
y_label_option
()
@
common_options
.
semilogx_option
()
@
click
.
pass_context
def
fmr_iapmr
(
ctx
,
scores
,
**
kwargs
):
"""Plot FMR vs IAPMR
You need to provide 2 or 4 scores
files for each PAD system in this order:
\b
* licit development scores
* licit evaluation scores
* spoof development scores (when ``--no-spoof`` is False (default))
* spoof evaluation scores (when ``--no-spoof`` is False (default))
Examples:
$ bob pad fmr_iapmr --no-spoof dev-scores eval-scores
$ bob pad fmr_iapmr {licit,spoof}/scores-{dev,eval}
"""
process
=
figure
.
FmrIapmr
(
ctx
,
scores
,
True
,
load
.
split
)
process
.
run
()
bob/pad/base/script/gen.py
deleted
100644 → 0
View file @
b63237ce
"""Generate random scores.
"""
import
os
import
logging
import
numpy
import
click
from
click.types
import
FLOAT
from
bob.extension.scripts.click_helper
import
verbosity_option
from
bob.core
import
random
from
bob.io.base
import
create_directories_safe
logger
=
logging
.
getLogger
(
__name__
)
NUM_GENUINE_ACCESS
=
5000
NUM_ZEIMPOSTORS
=
5000
NUM_PA
=
5000
def
gen_score_distr
(
mean_gen
,
mean_zei
,
mean_pa
,
sigma_gen
=
1
,
sigma_zei
=
1
,
sigma_pa
=
1
):
mt
=
random
.
mt19937
()
# initialise the random number generator
genuine_generator
=
random
.
normal
(
numpy
.
float32
,
mean_gen
,
sigma_gen
)
zei_generator
=
random
.
normal
(
numpy
.
float32
,
mean_zei
,
sigma_zei
)
pa_generator
=
random
.
normal
(
numpy
.
float32
,
mean_pa
,
sigma_pa
)
genuine_scores
=
[
genuine_generator
(
mt
)
for
i
in
range
(
NUM_GENUINE_ACCESS
)]
zei_scores
=
[
zei_generator
(
mt
)
for
i
in
range
(
NUM_ZEIMPOSTORS
)]
pa_scores
=
[
pa_generator
(
mt
)
for
i
in
range
(
NUM_PA
)]
return
genuine_scores
,
zei_scores
,
pa_scores
def
write_scores_to_file
(
neg
,
pos
,
filename
,
attack
=
False
):
"""Writes score distributions into 4-column score files. For the format of
the 4-column score files, please refer to Bob's documentation.
Parameters
----------
neg : array_like
Scores for negative samples.
pos : array_like
Scores for positive samples.
filename : str
The path to write the score to.
"""
create_directories_safe
(
os
.
path
.
dirname
(
filename
))
with
open
(
filename
,
'wt'
)
as
f
:
for
i
in
pos
:
f
.
write
(
'x x foo %f
\n
'
%
i
)
for
i
in
neg
:
if
attack
:
f
.
write
(
'x attack foo %f
\n
'
%
i
)
else
:
f
.
write
(
'x y foo %f
\n
'
%
i
)
@
click
.
command
()
@
click
.
argument
(
'outdir'
)
@
click
.
option
(
'--mean-gen'
,
default
=
10
,
type
=
FLOAT
,
show_default
=
True
)
@
click
.
option
(
'--mean-zei'
,
default
=
0
,
type
=
FLOAT
,
show_default
=
True
)
@
click
.
option
(
'--mean-pa'
,
default
=
5
,
type
=
FLOAT
,
show_default
=
True
)
@
verbosity_option
()
def
gen
(
outdir
,
mean_gen
,
mean_zei
,
mean_pa
):
"""Generate random scores.
Generates random scores for three types of verification attempts:
genuine users, zero-effort impostors and spoofing attacks and writes them
into 4-column score files for so called licit and spoof scenario. The
scores are generated using Gaussian distribution whose mean is an input
parameter. The generated scores can be used as hypothetical datasets.
"""
# Generate the data
genuine_dev
,
zei_dev
,
pa_dev
=
gen_score_distr
(
mean_gen
,
mean_zei
,
mean_pa
)
genuine_eval
,
zei_eval
,
pa_eval
=
gen_score_distr
(
mean_gen
,
mean_zei
,
mean_pa
)
# Write the data into files
write_scores_to_file
(
genuine_dev
,
zei_dev
,
os
.
path
.
join
(
outdir
,
'licit'
,
'scores-dev'
))
write_scores_to_file
(
genuine_eval
,
zei_eval
,
os
.
path
.
join
(
outdir
,
'licit'
,
'scores-eval'
))
write_scores_to_file
(
genuine_dev
,
pa_dev
,
os
.
path
.
join
(
outdir
,
'spoof'
,
'scores-dev'
),
attack
=
True
)
write_scores_to_file
(
genuine_eval
,
pa_eval
,
os
.
path
.
join
(
outdir
,
'spoof'
,
'scores-eval'
),
attack
=
True
)
bob/pad/base/script/histograms.py
deleted
100644 → 0
View file @
b63237ce
"""Generates PAD ISO compliant histograms based on the score files
"""
import
click
from
bob.measure.script
import
common_options
from
bob.extension.scripts.click_helper
import
(
verbosity_option
,
bool_option
)
from
bob.bio.base.score
import
load
from
.
import
figure
@
click
.
command
()
@
common_options
.
scores_argument
(
nargs
=-
1
)
@
common_options
.
title_option
()
@
common_options
.
output_plot_file_option
(
default_out
=
'hist.pdf'
)
@
common_options
.
eval_option
()
@
common_options
.
n_bins_option
()
@
common_options
.
criterion_option
()
@
common_options
.
thresholds_option
()
@
common_options
.
const_layout_option
()
@
common_options
.
print_filenames_option
(
dflt
=
False
)
@
common_options
.
legends_option
()
@
common_options
.
figsize_option
(
dflt
=
None
)
@
common_options
.
subplot_option
()
@
common_options
.
legend_ncols_option
()
@
common_options
.
style_option
()
@
verbosity_option
()
@
click
.
pass_context
def
hist
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
""" Plots histograms of Bona fida and PA along with threshold
criterion.
You need to provide one or more development score file(s) for each
experiment. You can also provide eval files along with dev files. If only
dev scores are provided, you must use flag `--no-evaluation`.
By default, when eval-scores are given, only eval-scores histograms are
displayed with threshold line
computed from dev-scores. If you want to display dev-scores distributions
as well, use ``--show-dev`` option.
Examples:
$ bob pad hist dev-scores
$ bob pad hist dev-scores1 eval-scores1 dev-scores2
eval-scores2
$ bob pad hist --criterion min-hter dev-scores1 eval-scores1
"""
process
=
figure
.
HistPad
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
@
click
.
command
()
@
common_options
.
scores_argument
(
nargs
=-
1
,
min_arg
=
2
)
@
common_options
.
title_option
()
@
common_options
.
output_plot_file_option
(
default_out
=
'vuln.pdf'
)
@
common_options
.
eval_option
()
@
common_options
.
n_bins_option
()
@
common_options
.
criterion_option
()
@
common_options
.
thresholds_option
()
@
common_options
.
const_layout_option
()
@
common_options
.
print_filenames_option
(
dflt
=
False
)
@
bool_option
(
'iapmr-line'
,
'I'
,
'Whether to plot the IAPMR related lines or not.'
,
True
)
@
bool_option
(
'real-data'
,
'R'
,
'If False, will annotate the plots hypothetically, instead '
'of with real data values of the calculated error rates.'
,
True
)
@
common_options
.
legends_option
()
@
common_options
.
figsize_option
(
dflt
=
None
)
@
common_options
.
subplot_option
()
@
common_options
.
legend_ncols_option
()
@
common_options
.
style_option
()
@
verbosity_option
()
@
click
.
pass_context
def
vuln_hist
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
'''Vulnerability analysis distributions.
Plots the histogram of score distributions. You need to provide 4 score
files for each biometric system in this order:
\b
* licit development scores
* licit evaluation scores
* spoof development scores
* spoof evaluation scores
See :ref:`bob.pad.base.vulnerability` in the documentation for a guide on
vulnerability analysis.
You need to provide one or more development score file(s) for each
experiment. You can also provide eval files along with dev files. If only
dev-scores are used set the flag `--no-evaluation` is required in that
case.
By default, when eval-scores are given, only eval-scores histograms are
displayed with threshold line
computed from dev-scores. If you want to display dev-scores distributions
as well, use ``--show-dev`` option.
Examples:
$ bob pad vuln_hist licit/scores-dev licit/scores-eval
\
spoof/scores-dev spoof/scores-eval
$ bob pad vuln_hist {licit,spoof}/scores-{dev,eval}
'''
process
=
figure
.
HistVuln
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
bob/pad/base/script/metrics.py
deleted
100644 → 0
View file @
b63237ce
"""Calculates PAD ISO compliant metrics based on the score files
"""
import
click
from
bob.measure.script
import
common_options
from
bob.extension.scripts.click_helper
import
(
verbosity_option
,
open_file_mode_option
)
from
bob.bio.base.score
import
load
from
.
import
figure
@
click
.
command
(
context_settings
=
dict
(
token_normalize_func
=
lambda
x
:
x
.
lower
()))
@
common_options
.
scores_argument
(
nargs
=-
1
)
@
common_options
.
eval_option
()
@
common_options
.
table_option
()
@
open_file_mode_option
()
@
common_options
.
output_log_metric_option
()
@
common_options
.
legends_option
()
@
verbosity_option
()
@
click
.
pass_context
def
metrics
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
"""PAD ISO compliant metrics.
Reports several metrics based on a selected thresholds on the development
set and apply them on evaluation sets (if provided). The used thresholds
are:
bpcer20 When APCER is set to 5%.
eer When BPCER == APCER.
min-hter When HTER is minimum.
This command produces one table per sytem. Format of the table can be
changed through option ``--tablefmt``.
Most metrics are according to the ISO/IEC 30107-3:2017 "Information
technology -- Biometric presentation attack detection -- Part 3: Testing
and reporting" standard. The reported metrics are:
APCER: Attack Presentation Classification Error Rate
BPCER: Bona-fide Presentation Classification Error Rate
HTER (non-ISO): Half Total Error Rate ((BPCER+APCER)/2)