Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
bob.pad.base
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
5
Issues
5
List
Boards
Labels
Milestones
Merge Requests
1
Merge Requests
1
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
bob
bob.pad.base
Commits
a772a3d5
Commit
a772a3d5
authored
Apr 29, 2019
by
Amir MOHAMMADI
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Compute APCER correctly
parent
9f28f82a
Pipeline
#29681
passed with stage
in 14 minutes and 32 seconds
Changes
7
Pipelines
1
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
10876 additions
and
218 deletions
+10876
-218
bob/pad/base/script/error_utils.py
bob/pad/base/script/error_utils.py
+295
-123
bob/pad/base/script/pad_commands.py
bob/pad/base/script/pad_commands.py
+199
-57
bob/pad/base/script/pad_figure.py
bob/pad/base/script/pad_figure.py
+307
-38
bob/pad/base/test/data/per_pai_scores/scores-dev
bob/pad/base/test/data/per_pai_scores/scores-dev
+10000
-0
bob/pad/base/test/data/per_pai_scores/scores-dev-0.hdf5
bob/pad/base/test/data/per_pai_scores/scores-dev-0.hdf5
+0
-0
bob/pad/base/test/data/per_pai_scores/scores-dev-1.hdf5
bob/pad/base/test/data/per_pai_scores/scores-dev-1.hdf5
+0
-0
bob/pad/base/test/test_error_utils.py
bob/pad/base/test/test_error_utils.py
+75
-0
No files found.
bob/pad/base/script/error_utils.py
View file @
a772a3d5
This diff is collapsed.
Click to expand it.
bob/pad/base/script/pad_commands.py
View file @
a772a3d5
...
...
@@ -7,93 +7,235 @@ import bob.bio.base.script.gen as bio_gen
import
bob.measure.script.figure
as
measure_figure
from
bob.bio.base.score
import
load
from
.
import
pad_figure
as
figure
from
.error_utils
import
negatives_per_pai_and_positives
from
functools
import
partial
SCORE_FORMAT
=
(
"Files must be 4-col format, see "
":py:func:`bob.bio.base.score.load.four_column`."
)
CRITERIA
=
(
'eer'
,
'min-hter'
,
'bpcer20'
)
"Files must be 4-col format, see "
":py:func:`bob.bio.base.score.load.four_column`."
)
CRITERIA
=
(
"eer"
,
"min-hter"
,
"far"
,
"bpcer5000"
,
"bpcer2000"
,
"bpcer1000"
,
"bpcer500"
,
"bpcer200"
,
"bpcer100"
,
"bpcer50"
,
"bpcer20"
,
"bpcer10"
,
"bpcer5"
,
"bpcer2"
,
"bpcer1"
,
)
def
metrics_option
(
sname
=
"-m"
,
lname
=
"--metrics"
,
name
=
"metrics"
,
help
=
"List of metrics to print. Provide a string with comma separated metric "
"names. For possible values see the default value."
,
default
=
"apcer_pais,apcer,bpcer,acer,fta,fpr,fnr,hter,far,frr,precision,recall,f1_score"
,
**
kwargs
):
"""The metrics option"""
def
custom_metrics_option
(
func
):
def
callback
(
ctx
,
param
,
value
):
if
value
is
not
None
:
value
=
value
.
split
(
","
)
ctx
.
meta
[
name
]
=
value
return
value
return
click
.
option
(
sname
,
lname
,
default
=
default
,
help
=
help
,
show_default
=
True
,
callback
=
callback
,
**
kwargs
)(
func
)
return
custom_metrics_option
def
regexps_option
(
help
=
"A list of regular expressions (by repeating this option) to be used to "
"categorize PAIs. Each regexp must match one type of PAI."
,
**
kwargs
):
def
custom_regexps_option
(
func
):
def
callback
(
ctx
,
param
,
value
):
ctx
.
meta
[
"regexps"
]
=
value
return
value
return
click
.
option
(
"-r"
,
"--regexps"
,
default
=
None
,
multiple
=
True
,
help
=
help
,
callback
=
callback
,
**
kwargs
)(
func
)
return
custom_regexps_option
def
regexp_column_option
(
help
=
"The column in the score files to match the regular expressions against."
,
**
kwargs
):
def
custom_regexp_column_option
(
func
):
def
callback
(
ctx
,
param
,
value
):
ctx
.
meta
[
"regexp_column"
]
=
value
return
value
return
click
.
option
(
"-rc"
,
"--regexp-column"
,
default
=
"real_id"
,
type
=
click
.
Choice
((
"claimed_id"
,
"real_id"
,
"test_label"
)),
help
=
help
,
show_default
=
True
,
callback
=
callback
,
**
kwargs
)(
func
)
return
custom_regexp_column_option
@
click
.
command
()
@
click
.
argument
(
'outdir'
)
@
click
.
option
(
'-mm'
,
'--mean-match'
,
default
=
10
,
type
=
click
.
FLOAT
,
show_default
=
True
)
@
click
.
option
(
'-mnm'
,
'--mean-non-match'
,
default
=-
10
,
type
=
click
.
FLOAT
,
show_default
=
True
)
@
click
.
option
(
'-n'
,
'--n-sys'
,
default
=
1
,
type
=
click
.
INT
,
show_default
=
True
)
@
click
.
argument
(
"outdir"
)
@
click
.
option
(
"-mm"
,
"--mean-match"
,
default
=
10
,
type
=
click
.
FLOAT
,
show_default
=
True
)
@
click
.
option
(
"-mnm"
,
"--mean-non-match"
,
default
=-
10
,
type
=
click
.
FLOAT
,
show_default
=
True
)
@
click
.
option
(
"-n"
,
"--n-sys"
,
default
=
1
,
type
=
click
.
INT
,
show_default
=
True
)
@
verbosity_option
()
@
click
.
pass_context
def
gen
(
ctx
,
outdir
,
mean_match
,
mean_non_match
,
n_sys
,
**
kwargs
):
"""Generate random scores.
Generates random scores in 4col or 5col format. The scores are generated
using Gaussian distribution whose mean is an input
parameter. The generated scores can be used as hypothetical datasets.
Invokes :py:func:`bob.bio.base.script.commands.gen`.
"""
ctx
.
meta
[
'five_col'
]
=
False
ctx
.
forward
(
bio_gen
.
gen
)
@
common_options
.
metrics_command
(
common_options
.
METRICS_HELP
.
format
(
names
=
'FtA, APCER, BPCER, FAR, FRR, ACER'
,
criteria
=
CRITERIA
,
score_format
=
SCORE_FORMAT
,
hter_note
=
'Note that FAR = APCER * (1 - FtA), '
'FRR = FtA + BPCER * (1 - FtA) and ACER = (APCER + BPCER) / 2.'
,
command
=
'bob pad metrics'
),
criteria
=
CRITERIA
)
def
metrics
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
process
=
figure
.
Metrics
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
"""Generate random scores.
Generates random scores in 4col or 5col format. The scores are generated
using Gaussian distribution whose mean is an input
parameter. The generated scores can be used as hypothetical datasets.
Invokes :py:func:`bob.bio.base.script.commands.gen`.
"""
ctx
.
meta
[
"five_col"
]
=
False
ctx
.
forward
(
bio_gen
.
gen
)
@
common_options
.
metrics_command
(
common_options
.
METRICS_HELP
.
format
(
names
=
"FtA, APCER, BPCER, FPR, FNR, FAR, FRR, ACER, HTER, precision, recall, f1_score"
,
criteria
=
CRITERIA
,
score_format
=
SCORE_FORMAT
,
hter_note
=
"Note that APCER = max(APCER_pais), BPCER=FNR, "
"FAR = FPR * (1 - FtA), "
"FRR = FtA + FNR * (1 - FtA), "
"ACER = (APCER + BPCER) / 2, "
"and HTER = (FPR + FNR) / 2. "
"You can control which metrics are printed using the --metrics option. "
"You can use --regexps and --regexp_column options to change the behavior "
"of finding Presentation Attack Instrument (PAI) types"
,
command
=
"bob pad metrics"
,
),
criteria
=
CRITERIA
,
epilog
=
"""
\
b
More Examples:
\
b
bob pad metrics -vvv -e -lg IQM,LBP -r print -r video -m fta,apcer_pais,apcer,bpcer,acer,hter
\
/scores/oulunpu/{qm-svm,lbp-svm}/Protocol_1/scores/scores-{dev,eval}
See also ``bob pad multi-metrics``.
"""
,
)
@
regexps_option
()
@
regexp_column_option
()
@
metrics_option
()
def
metrics
(
ctx
,
scores
,
evaluation
,
regexps
,
regexp_column
,
metrics
,
**
kwargs
):
load_fn
=
partial
(
negatives_per_pai_and_positives
,
regexps
=
regexps
,
regexp_column
=
regexp_column
)
process
=
figure
.
Metrics
(
ctx
,
scores
,
evaluation
,
load_fn
,
metrics
)
process
.
run
()
@
common_options
.
roc_command
(
common_options
.
ROC_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
'bob pad roc'
)
)
common_options
.
ROC_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
"bob pad roc"
)
)
def
roc
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
process
=
figure
.
Roc
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
process
=
figure
.
Roc
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
@
common_options
.
det_command
(
common_options
.
DET_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
'bob pad det'
)
)
common_options
.
DET_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
"bob pad det"
)
)
def
det
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
process
=
figure
.
Det
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
process
=
figure
.
Det
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
@
common_options
.
epc_command
(
common_options
.
EPC_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
'bob pad epc'
)
)
common_options
.
EPC_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
"bob pad epc"
)
)
def
epc
(
ctx
,
scores
,
**
kwargs
):
process
=
measure_figure
.
Epc
(
ctx
,
scores
,
True
,
load
.
split
,
hter
=
'ACER'
)
process
.
run
()
process
=
measure_figure
.
Epc
(
ctx
,
scores
,
True
,
load
.
split
,
hter
=
"ACER"
)
process
.
run
()
@
common_options
.
hist_command
(
common_options
.
HIST_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
'bob pad hist'
)
)
common_options
.
HIST_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
"bob pad hist"
)
)
def
hist
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
process
=
figure
.
Hist
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
process
=
figure
.
Hist
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
@
common_options
.
evaluate_command
(
common_options
.
EVALUATE_HELP
.
format
(
score_format
=
SCORE_FORMAT
,
command
=
'bob pad evaluate'
),
criteria
=
CRITERIA
)
score_format
=
SCORE_FORMAT
,
command
=
"bob pad evaluate"
),
criteria
=
CRITERIA
,
)
def
evaluate
(
ctx
,
scores
,
evaluation
,
**
kwargs
):
common_options
.
evaluate_flow
(
ctx
,
scores
,
evaluation
,
metrics
,
roc
,
det
,
epc
,
hist
,
**
kwargs
)
common_options
.
evaluate_flow
(
ctx
,
scores
,
evaluation
,
metrics
,
roc
,
det
,
epc
,
hist
,
**
kwargs
)
@
common_options
.
multi_metrics_command
(
common_options
.
MULTI_METRICS_HELP
.
format
(
names
=
'FtA, APCER, BPCER, FAR, FRR, ACER'
,
criteria
=
CRITERIA
,
score_format
=
SCORE_FORMAT
,
command
=
'bob pad multi-metrics'
),
criteria
=
CRITERIA
)
def
multi_metrics
(
ctx
,
scores
,
evaluation
,
protocols_number
,
**
kwargs
):
ctx
.
meta
[
'min_arg'
]
=
protocols_number
*
(
2
if
evaluation
else
1
)
process
=
figure
.
MultiMetrics
(
ctx
,
scores
,
evaluation
,
load
.
split
)
process
.
run
()
names
=
"FtA, APCER, BPCER, FAR, FRR, ACER, HTER, precision, recall, f1_score"
,
criteria
=
CRITERIA
,
score_format
=
SCORE_FORMAT
,
command
=
"bob pad multi-metrics"
,
),
criteria
=
CRITERIA
,
epilog
=
"""
\
b
More examples:
\
b
bob pad multi-metrics -vvv -e -pn 6 -lg IQM,LBP -r print -r video
\
/scores/oulunpu/{qm-svm,lbp-svm}/Protocol_3_{1,2,3,4,5,6}/scores/scores-{dev,eval}
See also ``bob pad metrics``.
"""
,
)
@
regexps_option
()
@
regexp_column_option
()
@
metrics_option
(
default
=
"fta,apcer_pais,apcer,bpcer,acer,hter"
)
def
multi_metrics
(
ctx
,
scores
,
evaluation
,
protocols_number
,
regexps
,
regexp_column
,
metrics
,
**
kwargs
):
ctx
.
meta
[
"min_arg"
]
=
protocols_number
*
(
2
if
evaluation
else
1
)
load_fn
=
partial
(
negatives_per_pai_and_positives
,
regexps
=
regexps
,
regexp_column
=
regexp_column
)
process
=
figure
.
MultiMetrics
(
ctx
,
scores
,
evaluation
,
load_fn
,
metrics
)
process
.
run
()
bob/pad/base/script/pad_figure.py
View file @
a772a3d5
This diff is collapsed.
Click to expand it.
bob/pad/base/test/data/per_pai_scores/scores-dev
0 → 100644
View file @
a772a3d5
This diff is collapsed.
Click to expand it.
bob/pad/base/test/data/per_pai_scores/scores-dev-0.hdf5
0 → 100644
View file @
a772a3d5
File added
bob/pad/base/test/data/per_pai_scores/scores-dev-1.hdf5
0 → 100644
View file @
a772a3d5
File added
bob/pad/base/test/test_error_utils.py
0 → 100644
View file @
a772a3d5
from
bob.io.base.test_utils
import
datafile
from
bob.io.base
import
HDF5File
from
bob.pad.base.script.error_utils
import
(
negatives_per_pai_and_positives
,
apcer_bpcer
,
calc_threshold
,
)
import
nose
import
numpy
as
np
GENERATE_REFERENCES
=
False
scores_dev
=
datafile
(
"per_pai_scores/scores-dev"
,
module
=
__name__
)
scores_dev_reference_mask
=
datafile
(
"per_pai_scores/scores-dev-{i}.hdf5"
,
module
=
__name__
)
def
_dump_dict
(
f
,
d
,
name
):
f
[
f"
{
name
}
_len"
]
=
len
(
d
)
for
i
,
(
k
,
v
)
in
enumerate
(
d
.
items
()):
f
[
f"
{
name
}
_key_
{
i
}
"
]
=
k
f
[
f"
{
name
}
_value_
{
i
}
"
]
=
v
def
_read_dict
(
f
,
name
):
ret
=
dict
()
for
i
in
range
(
f
[
f"
{
name
}
_len"
]):
k
=
f
[
f"
{
name
}
_key_
{
i
}
"
]
v
=
f
[
f"
{
name
}
_value_
{
i
}
"
]
if
isinstance
(
v
,
np
.
ndarray
):
v
=
v
.
tolist
()
ret
[
k
]
=
v
return
ret
def
test_per_pai_apcer
():
for
i
,
regexps
in
enumerate
((
None
,
[
"x[0-2]"
,
"x[3-4]"
],
[
"x[1-2]"
,
"x[3-4]"
])):
try
:
pos
,
negs
=
negatives_per_pai_and_positives
(
scores_dev
,
regexps
)
except
ValueError
:
if
i
==
2
:
continue
raise
all_negs
=
[
s
for
scores
in
negs
.
values
()
for
s
in
scores
]
thresholds
=
dict
()
for
method
in
(
"bpcer20"
,
"far"
,
"eer"
,
"min-hter"
):
thresholds
[
method
]
=
calc_threshold
(
method
,
pos
,
negs
.
values
(),
all_negs
,
far_value
=
0.1
)
metrics
=
dict
()
for
method
,
threshold
in
thresholds
.
items
():
apcers
,
apcer
,
bpcer
=
apcer_bpcer
(
threshold
,
pos
,
*
negs
.
values
())
metrics
[
method
]
=
apcers
+
[
apcer
,
bpcer
]
scores_dev_reference
=
scores_dev_reference_mask
.
format
(
i
=
i
)
if
GENERATE_REFERENCES
:
with
HDF5File
(
scores_dev_reference
,
"w"
)
as
f
:
f
[
"pos"
]
=
pos
_dump_dict
(
f
,
negs
,
"negs"
)
_dump_dict
(
f
,
thresholds
,
"thresholds"
)
_dump_dict
(
f
,
metrics
,
"metrics"
)
with
HDF5File
(
scores_dev_reference
,
"r"
)
as
f
:
ref_pos
=
f
[
"pos"
].
tolist
()
ref_negs
=
_read_dict
(
f
,
"negs"
)
ref_thresholds
=
_read_dict
(
f
,
"thresholds"
)
ref_metrics
=
_read_dict
(
f
,
"metrics"
)
nose
.
tools
.
assert_list_equal
(
pos
,
ref_pos
)
nose
.
tools
.
assert_dict_equal
(
negs
,
ref_negs
)
nose
.
tools
.
assert_dict_equal
(
thresholds
,
ref_thresholds
)
nose
.
tools
.
assert_dict_equal
(
metrics
,
ref_metrics
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment