Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.pad.base
Commits
861911a5
Commit
861911a5
authored
Jun 26, 2018
by
Amir MOHAMMADI
Browse files
Merge branch 'finalplot' into 'master'
Finalization of plots Closes
#22
See merge request
!43
parents
ed755c46
7ebd988a
Pipeline
#21348
passed with stages
in 8 minutes and 34 seconds
Changes
10
Pipelines
1
Expand all
Hide whitespace changes
Inline
Side-by-side
bob/pad/base/script/error_utils.py
View file @
861911a5
...
...
@@ -5,6 +5,44 @@
import
bob.measure
import
numpy
from
bob.measure
import
(
far_threshold
,
eer_threshold
,
min_hter_threshold
)
def
calc_threshold
(
method
,
neg
,
pos
):
"""Calculates the threshold based on the given method.
The scores should be sorted!
Parameters
----------
method : str
One of ``bpcer20``, ``eer``, ``min-hter``.
neg : array_like
The negative scores. They should be sorted!
pos : array_like
The positive scores. They should be sorted!
Returns
-------
float
The calculated threshold.
Raises
------
ValueError
If method is unknown.
"""
method
=
method
.
lower
()
if
method
==
'bpcer20'
:
threshold
=
far_threshold
(
neg
,
pos
,
0.05
,
True
)
elif
method
==
'eer'
:
threshold
=
eer_threshold
(
neg
,
pos
,
True
)
elif
method
==
'min-hter'
:
threshold
=
min_hter_threshold
(
neg
,
pos
,
True
)
else
:
raise
ValueError
(
"Unknown threshold criteria: {}"
.
format
(
method
))
return
threshold
def
calc_pass_rate
(
threshold
,
attacks
):
...
...
bob/pad/base/script/pad.py
View file @
861911a5
...
...
@@ -3,9 +3,10 @@
import
click
import
pkg_resources
from
click_plugins
import
with_plugins
from
bob.extension.scripts.click_helper
import
AliasedGroup
@
with_plugins
(
pkg_resources
.
iter_entry_points
(
'bob.pad.cli'
))
@
click
.
group
()
@
click
.
group
(
cls
=
AliasedGroup
)
def
pad
():
"""Presentation Attack Detection related commands."""
pass
bob/pad/base/script/pad_commands.py
View file @
861911a5
"""The main entry for bob
.
pad
and its (click-based) script
s.
"""The main entry for bob
pad
command
s.
"""
import
click
import
pkg_resources
from
click_plugins
import
with_plugins
from
bob.measure.script
import
common_options
from
bob.extension.scripts.click_helper
import
(
verbosity_option
,
open_file_mode_option
)
from
bob.extension.scripts.click_helper
import
verbosity_option
import
bob.bio.base.script.gen
as
bio_gen
import
bob.bio.base.script.figure
as
bio_figure
import
bob.measure.script.figure
as
measure_figure
from
bob.bio.base.score
import
load
from
.
import
figure
from
.
import
pad_figure
as
figure
SCORE_FORMAT
=
(
"Files must be 4-col format, see "
":py:func:`bob.bio.base.score.load.four_column`."
)
CRITERIA
=
(
'eer'
,
'min-hter'
,
'bpcer20'
)
@
click
.
command
()
@
click
.
argument
(
'outdir'
)
@
click
.
option
(
'-mm'
,
'--mean-match'
,
default
=
10
,
type
=
click
.
FLOAT
,
show_default
=
True
)
@
click
.
option
(
'-mm'
,
'--mean-match'
,
default
=
10
,
type
=
click
.
FLOAT
,
show_default
=
True
)
@
click
.
option
(
'-mnm'
,
'--mean-non-match'
,
default
=-
10
,
type
=
click
.
FLOAT
,
show_default
=
True
)
@
click
.
option
(
'-n'
,
'--n-sys'
,
default
=
1
,
type
=
click
.
INT
,
show_default
=
True
)
@
click
.
option
(
'--five-col/--four-col'
,
default
=
False
,
show_default
=
True
)
@
verbosity_option
()
@
click
.
pass_context
def
gen
(
ctx
,
outdir
,
mean_match
,
mean_non_match
,
n_sys
,
five_col
):
def
gen
(
ctx
,
outdir
,
mean_match
,
mean_non_match
,
n_sys
):
"""Generate random scores.
Generates random scores in 4col or 5col format. The scores are generated
using Gaussian distribution whose mean is an input
parameter. The generated scores can be used as hypothetical datasets.
Invokes :py:func:`bob.bio.base.script.commands.gen`.
"""
ctx
.
meta
[
'five_col'
]
=
False
ctx
.
forward
(
bio_gen
.
gen
)
@
click
.
command
()
@
common_options
.
scores_argument
(
nargs
=-
1
)
@
common_options
.
title_option
()
@
common_options
.
legends_option
()
@
common_options
.
legend_loc_option
(
dflt
=
'lower-right'
)
@
common_options
.
no_legend_option
()
@
common_options
.
sep_dev_eval_option
()
@
common_options
.
output_plot_file_option
(
default_out
=
'roc.pdf'
)
@
common_options
.
eval_option
()
@
common_options
.
points_curve_option
()
@
common_options
.
semilogx_option
(
True
)
@
common_options
.
axes_val_option
(
dflt
=
'1e-4,1,1e-4,1'
)
@
common_options
.
x_rotation_option
()
@
common_options
.
lines_at_option
()
@
common_options
.
x_label_option
()
@
common_options
.
y_label_option
()
@
common_options
.
const_layout_option
()
@
common_options
.
style_option
()
@
common_options
.
linestyles_option
()
@
common_options
.
figsize_option
()
@
common_options
.
min_far_option
()
@
verbosity_option
()
@
click
.
pass_context
def
roc
(
ctx
,
scores
,
evaluation
,
**
kargs
):
"""Plot ROC (receiver operating characteristic) curve:
The plot will represent the false match rate on the horizontal axis and the
false non match rate on the vertical axis. The values for the axis will be
computed using :py:func:`bob.measure.roc`.
You need to provide one or more development score file(s) for each
experiment. You can also provide eval files along with dev files. If only
dev-scores are used, the flag `--no-evaluation` must be used. is required
in that case. Files must be 4- or 5- columns format, see
:py:func:`bob.bio.base.score.load.four_column` and
:py:func:`bob.bio.base.score.load.five_column` for details.
Examples:
$ bob pad roc -v dev-scores
$ bob pad roc -v dev-scores1 eval-scores1 dev-scores2
eval-scores2
$ bob pad roc -v -o my_roc.pdf dev-scores1 eval-scores1
"""
process
=
bio_figure
.
Roc
(
ctx
,
scores
,
evaluation
,
load
.
split
)
@
common_options
.
metrics_command
(
common_options
.
METRICS_HELP
.
format
(
names
=
'FtA, APCER, BPCER, FAR, FRR, ACER'
,
criteria
=
CRITERIA
,
score_format
=
SCORE_FORMAT
,
command
=
'bob pad metrics'
),
criteria
=
CRITERIA
)
def
metrics
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
process
=
figure
.
Metrics
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
@
click
.
command
()
@
common_options
.
scores_argument
(
min_arg
=
1
,
force_eval
=
True
,
nargs
=-
1
)
@
common_options
.
output_plot_file_option
(
default_out
=
'det.pdf'
)
@
common_options
.
legends_option
()
@
common_options
.
no_legend_option
()
@
common_options
.
legend_loc_option
(
dflt
=
'upper-right'
)
@
common_options
.
title_option
()
@
common_options
.
const_layout_option
()
@
common_options
.
style_option
()
@
common_options
.
figsize_option
()
@
common_options
.
bool_option
(
'no-spoof'
,
'ns'
,
''
,
False
)
@
verbosity_option
()
@
common_options
.
axes_val_option
(
dflt
=
'0.01,95,0.01,95'
)
@
common_options
.
x_rotation_option
(
dflt
=
45
)
@
common_options
.
x_label_option
()
@
common_options
.
y_label_option
()
@
click
.
option
(
'-c'
,
'--criteria'
,
default
=
None
,
show_default
=
True
,
help
=
'Criteria for threshold selection'
,
type
=
click
.
Choice
((
'eer'
,
'min-hter'
,
'bpcer20'
)))
@
click
.
option
(
'--real-data/--no-real-data'
,
default
=
True
,
show_default
=
True
,
help
=
'If False, will annotate the plots hypothetically, instead '
'of with real data values of the calculated error rates.'
)
@
click
.
pass_context
def
det
(
ctx
,
scores
,
criteria
,
real_data
,
**
kwargs
):
"""Plot DET
You need to provide 2 scores
files for each PAD system in this order:
\b
* licit development scores
* licit evaluation scores
Examples:
$ bob pad det --no-spoof dev-scores eval-scores
$ bob pad det {licit,spoof}/scores-{dev,eval}
"""
process
=
figure
.
Det
(
ctx
,
scores
,
True
,
load
.
split
,
criteria
,
real_data
,
True
)
@
common_options
.
roc_command
(
common_options
.
ROC_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
'bob pad roc'
))
def
roc
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
process
=
figure
.
Roc
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
@
click
.
command
()
@
common_options
.
scores_argument
(
nargs
=-
1
)
@
common_options
.
title_option
()
@
common_options
.
output_plot_file_option
(
default_out
=
'hist.pdf'
)
@
common_options
.
eval_option
()
@
common_options
.
n_bins_option
()
@
common_options
.
criterion_option
()
@
common_options
.
thresholds_option
()
@
common_options
.
const_layout_option
()
@
common_options
.
print_filenames_option
(
dflt
=
False
)
@
common_options
.
legends_option
()
@
common_options
.
figsize_option
(
dflt
=
None
)
@
common_options
.
subplot_option
()
@
common_options
.
legend_ncols_option
()
@
common_options
.
style_option
()
@
verbosity_option
()
@
click
.
pass_context
def
hist
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
""" Plots histograms of Bona fida and PA along with threshold
criterion.
You need to provide one or more development score file(s) for each
experiment. You can also provide eval files along with dev files. If only
dev scores are provided, you must use flag `--no-evaluation`.
By default, when eval-scores are given, only eval-scores histograms are
displayed with threshold line
computed from dev-scores. If you want to display dev-scores distributions
as well, use ``--show-dev`` option.
Examples:
$ bob pad hist dev-scores
$ bob pad hist dev-scores1 eval-scores1 dev-scores2
eval-scores2
$ bob pad hist --criterion min-hter dev-scores1 eval-scores1
"""
process
=
figure
.
HistPad
(
ctx
,
scores
,
evaluation
,
load
.
split
)
@
common_options
.
det_command
(
common_options
.
DET_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
'bob pad det'
))
def
det
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
process
=
figure
.
Det
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
@
click
.
command
()
@
common_options
.
scores_argument
(
min_arg
=
1
,
force_eval
=
True
,
nargs
=-
1
)
@
common_options
.
title_option
()
@
common_options
.
output_plot_file_option
(
default_out
=
'epc.pdf'
)
@
common_options
.
legends_option
()
@
common_options
.
legend_loc_option
(
dflt
=
'upper-center'
)
@
common_options
.
no_legend_option
()
@
common_options
.
points_curve_option
()
@
common_options
.
const_layout_option
()
@
common_options
.
style_option
()
@
common_options
.
linestyles_option
()
@
common_options
.
figsize_option
()
@
verbosity_option
()
@
click
.
pass_context
def
epc
(
ctx
,
scores
,
**
kargs
):
"""Plot EPC (expected performance curve):
plots the error rate on the eval set depending on a threshold selected
a-priori on the development set and accounts for varying relative cost
in [0; 1] of FPR and FNR when calculating the threshold.
You need to provide one or more development score and eval file(s)
for each experiment. Files must be 4- or 5- columns format, see
:py:func:`bob.bio.base.score.load.four_column` and
:py:func:`bob.bio.base.score.load.five_column` for details.
Examples:
$ bob bio epc -v dev-scores eval-scores
$ bob bio epc -v -o my_epc.pdf dev-scores1 eval-scores1
"""
process
=
measure_figure
.
Epc
(
ctx
,
scores
,
True
,
load
.
split
)
@
common_options
.
epc_command
(
common_options
.
EPC_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
'bob pad epc'
))
def
epc
(
ctx
,
scores
,
**
kwargs
):
process
=
measure_figure
.
Epc
(
ctx
,
scores
,
True
,
load
.
split
,
hter
=
'ACER'
)
process
.
run
()
@
click
.
command
(
context_settings
=
dict
(
token_normalize_func
=
lambda
x
:
x
.
lower
()))
@
common_options
.
scores_argument
(
nargs
=-
1
)
@
common_options
.
eval_option
()
@
common_options
.
table_option
()
@
open_file_mode_option
()
@
common_options
.
output_log_metric_option
()
@
common_options
.
legends_option
()
@
verbosity_option
()
@
click
.
pass_context
def
metrics
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
"""PAD ISO compliant metrics.
Reports several metrics based on a selected thresholds on the development
set and apply them on evaluation sets (if provided). The used thresholds
are:
bpcer20 When APCER is set to 5%.
eer When BPCER == APCER.
min-hter When HTER is minimum.
This command produces one table per sytem. Format of the table can be
changed through option ``--tablefmt``.
Most metrics are according to the ISO/IEC 30107-3:2017 "Information
technology -- Biometric presentation attack detection -- Part 3: Testing
and reporting" standard. The reported metrics are:
APCER: Attack Presentation Classification Error Rate
BPCER: Bona-fide Presentation Classification Error Rate
HTER (non-ISO): Half Total Error Rate ((BPCER+APCER)/2)
Examples:
$ bob pad metrics /path/to/scores-dev
$ bob pad metrics /path/to/scores-dev /path/to/scores-eval
$ bob pad metrics /path/to/system{1,2,3}/score-{dev,eval}
"""
process
=
figure
.
Metrics
(
ctx
,
scores
,
evaluation
,
load
.
split
)
@
common_options
.
hist_command
(
common_options
.
HIST_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
'bob pad hist'
))
def
hist
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
process
=
figure
.
Hist
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
@
click
.
command
()
@
common_options
.
scores_argument
(
nargs
=-
1
)
@
common_options
.
legends_option
()
@
common_options
.
sep_dev_eval_option
()
@
common_options
.
table_option
()
@
common_options
.
eval_option
()
@
common_options
.
output_log_metric_option
()
@
common_options
.
output_plot_file_option
(
default_out
=
'eval_plots.pdf'
)
@
common_options
.
points_curve_option
()
@
common_options
.
lines_at_option
()
@
common_options
.
const_layout_option
()
@
common_options
.
figsize_option
()
@
common_options
.
style_option
()
@
common_options
.
linestyles_option
()
@
verbosity_option
()
@
click
.
pass_context
@
common_options
.
evaluate_command
(
common_options
.
EVALUATE_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
'bob pad evaluate'
),
criteria
=
CRITERIA
)
def
evaluate
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
'''Runs error analysis on score sets
\b
1. Computes the threshold using either EER or min. HTER criteria on
development set scores
2. Applies the above threshold on evaluation set scores to compute the
HTER, if a eval-score set is provided
3. Reports error rates on the console
4. Plots ROC, EPC, DET curves and score distributions to a multi-page PDF
file
You need to provide 2 score files for each biometric system in this order:
\b
* development scores
* evaluation scores
Examples:
$ bob pad evaluate -v dev-scores
$ bob pad evaluate -v scores-dev1 scores-eval1 scores-dev2
scores-eval2
$ bob pad evaluate -v /path/to/sys-{1,2,3}/scores-{dev,eval}
$ bob pad evaluate -v -l metrics.txt -o my_plots.pdf dev-scores eval-scores
'''
# first time erase if existing file
click
.
echo
(
"Computing metrics..."
)
ctx
.
invoke
(
metrics
,
scores
=
scores
,
evaluation
=
evaluation
)
if
'log'
in
ctx
.
meta
and
ctx
.
meta
[
'log'
]
is
not
None
:
click
.
echo
(
"[metrics] => %s"
%
ctx
.
meta
[
'log'
])
# avoid closing pdf file before all figures are plotted
ctx
.
meta
[
'closef'
]
=
False
if
evaluation
:
click
.
echo
(
"Starting evaluate with dev and eval scores..."
)
else
:
click
.
echo
(
"Starting evaluate with dev scores only..."
)
click
.
echo
(
"Computing ROC..."
)
# set axes limits for ROC
ctx
.
forward
(
roc
)
# use class defaults plot settings
click
.
echo
(
"Computing DET..."
)
ctx
.
forward
(
det
)
# use class defaults plot settings
# the last one closes the file
ctx
.
meta
[
'closef'
]
=
True
click
.
echo
(
"Computing score histograms..."
)
ctx
.
meta
[
'criterion'
]
=
'eer'
# no criterion passed in evaluate
ctx
.
forward
(
hist
)
click
.
echo
(
"Evaluate successfully completed!"
)
click
.
echo
(
"[plots] => %s"
%
(
ctx
.
meta
[
'output'
]))
common_options
.
evaluate_flow
(
ctx
,
scores
,
evaluation
,
metrics
,
roc
,
det
,
epc
,
hist
,
**
kwargs
)
@
common_options
.
multi_metrics_command
(
common_options
.
MULTI_METRICS_HELP
.
format
(
names
=
'FtA, APCER, BPCER, FAR, FRR, ACER'
,
criteria
=
CRITERIA
,
score_format
=
SCORE_FORMAT
,
command
=
'bob measure multi-metrics'
),
criteria
=
CRITERIA
)
def
multi_metrics
(
ctx
,
scores
,
evaluation
,
protocols_number
,
**
kwargs
):
ctx
.
meta
[
'min_arg'
]
=
protocols_number
*
(
2
if
evaluation
else
1
)
process
=
figure
.
MultiMetrics
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
bob/pad/base/script/pad_figure.py
0 → 100644
View file @
861911a5
'''Runs error analysis on score sets, outputs metrics and plots'''
import
bob.measure.script.figure
as
measure_figure
import
bob.bio.base.script.figure
as
bio_figure
from
.error_utils
import
calc_threshold
ALL_CRITERIA
=
(
'bpcer20'
,
'eer'
,
'min-hter'
)
class
Metrics
(
measure_figure
.
Metrics
):
'''Compute metrics from score files'''
def
__init__
(
self
,
ctx
,
scores
,
evaluation
,
func_load
):
super
(
Metrics
,
self
).
__init__
(
ctx
,
scores
,
evaluation
,
func_load
,
names
=
(
'FtA'
,
'APCER'
,
'BPCER'
,
'FAR'
,
'FRR'
,
'ACER'
))
def
get_thres
(
self
,
criterion
,
dev_neg
,
dev_pos
,
far
):
if
self
.
_criterion
==
'bpcer20'
:
return
calc_threshold
(
'bpcer20'
,
dev_neg
,
dev_pos
)
else
:
return
super
(
Metrics
,
self
).
get_thres
(
criterion
,
dev_neg
,
dev_pos
,
far
)
class
MultiMetrics
(
measure_figure
.
MultiMetrics
):
'''Compute metrics from score files'''
def
__init__
(
self
,
ctx
,
scores
,
evaluation
,
func_load
):
super
(
MultiMetrics
,
self
).
__init__
(
ctx
,
scores
,
evaluation
,
func_load
,
names
=
(
'FtA'
,
'APCER'
,
'BPCER'
,
'FAR'
,
'FRR'
,
'ACER'
))
def
get_thres
(
self
,
criterion
,
dev_neg
,
dev_pos
,
far
):
if
self
.
_criterion
==
'bpcer20'
:
return
calc_threshold
(
'bpcer20'
,
dev_neg
,
dev_pos
)
else
:
return
super
(
MultiMetrics
,
self
).
get_thres
(
criterion
,
dev_neg
,
dev_pos
,
far
)
class
Roc
(
bio_figure
.
Roc
):
'''ROC for PAD'''
def
__init__
(
self
,
ctx
,
scores
,
evaluation
,
func_load
):
super
(
Roc
,
self
).
__init__
(
ctx
,
scores
,
evaluation
,
func_load
)
self
.
_x_label
=
ctx
.
meta
.
get
(
'x_label'
)
or
'APCER'
self
.
_y_label
=
ctx
.
meta
.
get
(
'y_label'
)
or
'1 - BPCER'
class
Det
(
bio_figure
.
Det
):
def
__init__
(
self
,
ctx
,
scores
,
evaluation
,
func_load
):
super
(
Det
,
self
).
__init__
(
ctx
,
scores
,
evaluation
,
func_load
)
self
.
_x_label
=
ctx
.
meta
.
get
(
'x_label'
)
or
'APCER (%)'
self
.
_y_label
=
ctx
.
meta
.
get
(
'y_label'
)
or
'BPCER (%)'
class
Hist
(
measure_figure
.
Hist
):
''' Histograms for PAD '''
def
_setup_hist
(
self
,
neg
,
pos
):
self
.
_title_base
=
'PAD'
self
.
_density_hist
(
pos
[
0
],
n
=
0
,
label
=
'Bona Fide'
,
color
=
'C1'
)
self
.
_density_hist
(
neg
[
0
],
n
=
1
,
label
=
'Presentation attack'
,
alpha
=
0.4
,
color
=
'C7'
,
hatch
=
'
\\\\
'
)
bob/pad/base/script/vuln.py
View file @
861911a5
...
...
@@ -3,9 +3,11 @@
import
click
import
pkg_resources
from
click_plugins
import
with_plugins
from
bob.extension.scripts.click_helper
import
AliasedGroup
@
with_plugins
(
pkg_resources
.
iter_entry_points
(
'bob.vuln.cli'
))
@
click
.
group
()
@
click
.
group
(
cls
=
AliasedGroup
)
def
vuln
():
"""
Presentation Attack Detection
related commands."""
"""
Vulnerability analysis
related commands."""
pass
bob/pad/base/script/vuln_commands.py
View file @
861911a5
...
...
@@ -11,24 +11,27 @@ from click.types import FLOAT
from
bob.measure.script
import
common_options
from
bob.extension.scripts.click_helper
import
(
verbosity_option
,
open_file_mode_option
,
bool_option
)
bool_option
,
AliasedGroup
,
list_float_option
)
from
bob.core
import
random
from
bob.io.base
import
create_directories_safe
from
bob.bio.base.score
import
load
from
.
import
figure
from
.
import
vuln_figure
as
figure
NUM_GENUINE_ACCESS
=
5000
NUM_ZEIMPOSTORS
=
5000
NUM_PA
=
5000
@
with_plugins
(
pkg_resources
.
iter_entry_points
(
'bob.vuln.cli'
))
@
click
.
group
()
def
vuln
():
"""Presentation Vulnerability related commands."""
pass
def
hlines_at_option
(
dflt
=
' '
,
**
kwargs
):
'''Get option to draw const FNMRlines'''
return
list_float_option
(
name
=
'hlines-at'
,
short_name
=
'hla'
,
desc
=
'If given, draw horizontal lines at the given axis positions. '
'Your values must be separated with a comma (,) without space. '
'This option works in ROC and DET curves.'
,
nitems
=
None
,
dflt
=
dflt
,
**
kwargs
)
def
gen_score_distr
(
mean_gen
,
mean_zei
,
mean_pa
,
sigma_gen
=
1
,
sigma_zei
=
1
,
...
...
@@ -74,8 +77,8 @@ def write_scores_to_file(neg, pos, filename, attack=False):
@
click
.
command
()
@
click
.
argument
(
'outdir'
)
@
click
.
option
(
'--mean-gen'
,
default
=
10
,
type
=
FLOAT
,
show_default
=
True
)
@
click
.
option
(
'--mean-zei'
,
default
=
0
,
type
=
FLOAT
,
show_default
=
True
)
@
click
.
option
(
'--mean-gen'
,
default
=
7
,
type
=
FLOAT
,
show_default
=
True
)
@
click
.
option
(
'--mean-zei'
,
default
=
3
,
type
=
FLOAT
,
show_default
=
True
)
@
click
.
option
(
'--mean-pa'
,
default
=
5
,
type
=
FLOAT
,
show_default
=
True
)
@
verbosity_option
()
def
gen
(
outdir
,
mean_gen
,
mean_zei
,
mean_pa
):
...
...
@@ -93,21 +96,61 @@ def gen(outdir, mean_gen, mean_zei, mean_pa):
mean_gen
,
mean_zei
,
mean_pa
)
# Write the data into files
write_scores_to_file
(
genuine_dev
,
zei
_dev
,
write_scores_to_file
(
zei_dev
,
genuine
_dev
,
os
.
path
.
join
(
outdir
,
'licit'
,
'scores-dev'
))
write_scores_to_file
(
genuine_eval
,
zei
_eval
,
write_scores_to_file
(
zei_eval
,
genuine
_eval
,
os
.
path
.
join
(
outdir
,
'licit'
,
'scores-eval'
))
write_scores_to_file
(
genuine_dev
,
pa_dev
,
write_scores_to_file
(
pa_dev
,
genuine_dev
,
os
.
path
.
join
(
outdir
,
'spoof'
,
'scores-dev'
),
attack
=
True
)
write_scores_to_file
(
genuine_eval
,
pa
_eval
,
write_scores_to_file
(
pa_eval
,
genuine
_eval
,
os
.
path
.
join
(
outdir
,
'spoof'
,
'scores-eval'
),
attack
=
True
)
@
click
.
command
()
@
common_options
.
scores_argument
(
min_arg
=
2
,
force_eval
=
True
,
nargs
=-
1
)
@
common_options
.
scores_argument
(
min_arg
=
2
,
nargs
=-
1
)
@
common_options
.
output_plot_file_option
(
default_out
=
'vuln_roc.pdf'
)
@
common_options
.
legends_option
()
@
common_options
.
no_legend_option
()