Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
bob.measure
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
bob
bob.measure
Commits
eae2a343
Commit
eae2a343
authored
Nov 11, 2019
by
Amir MOHAMMADI
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'roc-auc' into 'master'
Add Area Under ROC Curve (AUC) Closes
#2
See merge request
!97
parents
41a69e7f
a6400ba9
Pipeline
#35291
canceled with stages
in 14 minutes and 47 seconds
Changes
8
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
89 additions
and
11 deletions
+89
-11
bob/measure/__init__.py
bob/measure/__init__.py
+38
-0
bob/measure/script/commands.py
bob/measure/script/commands.py
+1
-1
bob/measure/script/figure.py
bob/measure/script/figure.py
+21
-9
bob/measure/test_error.py
bob/measure/test_error.py
+19
-0
bob/measure/utils.py
bob/measure/utils.py
+1
-1
doc/guide.rst
doc/guide.rst
+3
-0
doc/nitpick-exceptions.txt
doc/nitpick-exceptions.txt
+5
-0
doc/py_api.rst
doc/py_api.rst
+1
-0
No files found.
bob/measure/__init__.py
View file @
eae2a343
...
...
@@ -474,6 +474,44 @@ def eer(negatives, positives, is_sorted=False, also_farfrr=False):
return
(
far
+
frr
)
/
2.0
def
roc_auc_score
(
negatives
,
positives
,
npoints
=
2000
,
min_far
=-
8
,
log_scale
=
False
):
"""Area Under the ROC Curve.
Computes the area under the ROC curve. This is useful when you want to report one
number that represents an ROC curve. This implementation uses the trapezoidal rule for the integration of the ROC curve. For more information, see:
https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve
Parameters
----------
negatives : array_like
The negative scores.
positives : array_like
The positive scores.
npoints : int, optional
Number of points in the ROC curve. Higher numbers leads to more accurate ROC.
min_far : float, optional
Min FAR and FRR values to consider when calculating ROC.
log_scale : bool, optional
If True, converts the x axis (FPR) to log10 scale before calculating AUC. This is
useful in cases where len(negatives) >> len(positives)
Returns
-------
float
The ROC AUC. If ``log_scale`` is False, the value should be between 0 and 1.
"""
fpr
,
fnr
=
roc
(
negatives
,
positives
,
npoints
,
min_far
=
min_far
)
tpr
=
1
-
fnr
if
log_scale
:
fpr_pos
=
fpr
>
0
fpr
,
tpr
=
fpr
[
fpr_pos
],
tpr
[
fpr_pos
]
fpr
=
numpy
.
log10
(
fpr
)
area
=
-
1
*
numpy
.
trapz
(
tpr
,
fpr
)
return
area
def
get_config
():
"""Returns a string containing the configuration information.
"""
...
...
bob/measure/script/commands.py
View file @
eae2a343
...
...
@@ -13,7 +13,7 @@ CRITERIA = ('eer', 'min-hter', 'far')
@
common_options
.
metrics_command
(
common_options
.
METRICS_HELP
.
format
(
names
=
'FPR, FNR, precision, recall, F1-score'
,
names
=
'FPR, FNR, precision, recall, F1-score
, AUC ROC
'
,
criteria
=
CRITERIA
,
score_format
=
SCORE_FORMAT
,
hter_note
=
' '
,
command
=
'bob measure metrics'
),
...
...
bob/measure/script/figure.py
View file @
eae2a343
...
...
@@ -183,7 +183,7 @@ class Metrics(MeasureBase):
def
__init__
(
self
,
ctx
,
scores
,
evaluation
,
func_load
,
names
=
(
'False Positive Rate'
,
'False Negative Rate'
,
'Precision'
,
'Recall'
,
'F1-score'
)):
'Precision'
,
'Recall'
,
'F1-score'
,
'Area Under ROC Curve'
,
'Area Under ROC Curve (log scale)'
)):
super
(
Metrics
,
self
)
.
__init__
(
ctx
,
scores
,
evaluation
,
func_load
)
self
.
names
=
names
self
.
_tablefmt
=
ctx
.
meta
.
get
(
'tablefmt'
)
...
...
@@ -209,7 +209,7 @@ class Metrics(MeasureBase):
return
utils
.
get_thres
(
criterion
,
dev_neg
,
dev_pos
,
far
)
def
_numbers
(
self
,
neg
,
pos
,
threshold
,
fta
):
from
..
import
(
farfrr
,
precision_recall
,
f_score
)
from
..
import
(
farfrr
,
precision_recall
,
f_score
,
roc_auc_score
)
# fpr and fnr
fmr
,
fnmr
=
farfrr
(
neg
,
pos
,
threshold
)
hter
=
(
fmr
+
fnmr
)
/
2.0
...
...
@@ -226,8 +226,12 @@ class Metrics(MeasureBase):
# f_score
f1_score
=
f_score
(
neg
,
pos
,
threshold
,
1
)
# AUC ROC
auc
=
roc_auc_score
(
neg
,
pos
)
auc_log
=
roc_auc_score
(
neg
,
pos
,
log_scale
=
True
)
return
(
fta
,
fmr
,
fnmr
,
hter
,
far
,
frr
,
fm
,
ni
,
fnm
,
nc
,
precision
,
recall
,
f1_score
)
recall
,
f1_score
,
auc
,
auc_log
)
def
_strings
(
self
,
metrics
):
n_dec
=
'.
%
df'
%
self
.
_decimal
...
...
@@ -242,9 +246,11 @@ class Metrics(MeasureBase):
prec_str
=
"
%
s"
%
format
(
metrics
[
10
],
n_dec
)
recall_str
=
"
%
s"
%
format
(
metrics
[
11
],
n_dec
)
f1_str
=
"
%
s"
%
format
(
metrics
[
12
],
n_dec
)
auc_str
=
"
%
s"
%
format
(
metrics
[
13
],
n_dec
)
auc_log_str
=
"
%
s"
%
format
(
metrics
[
14
],
n_dec
)
return
(
fta_str
,
fmr_str
,
fnmr_str
,
far_str
,
frr_str
,
hter_str
,
prec_str
,
recall_str
,
f1_str
)
prec_str
,
recall_str
,
f1_str
,
auc_str
,
auc_log_str
)
def
_get_all_metrics
(
self
,
idx
,
input_scores
,
input_names
):
''' Compute all metrics for dev and eval scores'''
...
...
@@ -297,11 +303,15 @@ class Metrics(MeasureBase):
LOGGER
.
warn
(
"NaNs scores (
%
s) were found in
%
s amd removed"
,
all_metrics
[
0
][
0
],
dev_file
)
headers
=
[
' '
or
title
,
'Development'
]
rows
=
[[
self
.
names
[
0
],
all_metrics
[
0
][
1
]],
[
self
.
names
[
1
],
all_metrics
[
0
][
2
]],
[
self
.
names
[
2
],
all_metrics
[
0
][
6
]],
[
self
.
names
[
3
],
all_metrics
[
0
][
7
]],
[
self
.
names
[
4
],
all_metrics
[
0
][
8
]]]
rows
=
[
[
self
.
names
[
0
],
all_metrics
[
0
][
1
]],
[
self
.
names
[
1
],
all_metrics
[
0
][
2
]],
[
self
.
names
[
2
],
all_metrics
[
0
][
6
]],
[
self
.
names
[
3
],
all_metrics
[
0
][
7
]],
[
self
.
names
[
4
],
all_metrics
[
0
][
8
]],
[
self
.
names
[
5
],
all_metrics
[
0
][
9
]],
[
self
.
names
[
6
],
all_metrics
[
0
][
10
]],
]
if
self
.
_eval
:
eval_file
=
input_names
[
1
]
...
...
@@ -317,6 +327,8 @@ class Metrics(MeasureBase):
rows
[
2
]
.
append
(
all_metrics
[
1
][
6
])
rows
[
3
]
.
append
(
all_metrics
[
1
][
7
])
rows
[
4
]
.
append
(
all_metrics
[
1
][
8
])
rows
[
5
]
.
append
(
all_metrics
[
1
][
9
])
rows
[
6
]
.
append
(
all_metrics
[
1
][
10
])
click
.
echo
(
tabulate
(
rows
,
headers
,
self
.
_tablefmt
),
file
=
self
.
log_file
)
...
...
bob/measure/test_error.py
View file @
eae2a343
...
...
@@ -503,3 +503,22 @@ def test_mindcf():
assert
mindcf
<
1.0
+
1e-8
def
test_roc_auc_score
():
from
bob.measure
import
roc_auc_score
positives
=
bob
.
io
.
base
.
load
(
F
(
'nonsep-positives.hdf5'
))
negatives
=
bob
.
io
.
base
.
load
(
F
(
'nonsep-negatives.hdf5'
))
auc
=
roc_auc_score
(
negatives
,
positives
)
# commented out sklearn computation to avoid adding an extra test dependency
# from sklearn.metrics import roc_auc_score as oracle_auc
# y_true = numpy.concatenate([numpy.ones_like(positives), numpy.zeros_like(negatives)], axis=0)
# y_score = numpy.concatenate([positives, negatives], axis=0)
# oracle = oracle_auc(y_true, y_score)
oracle
=
0.9326
assert
numpy
.
allclose
(
auc
,
oracle
),
f
"Expected {oracle} but got {auc} instead."
# test the function on log scale as well
auc
=
roc_auc_score
(
negatives
,
positives
,
log_scale
=
True
)
oracle
=
1.4183699583300993
assert
numpy
.
allclose
(
auc
,
oracle
),
f
"Expected {oracle} but got {auc} instead."
bob/measure/utils.py
View file @
eae2a343
...
...
@@ -115,7 +115,7 @@ def get_thres(criter, neg, pos, far=None):
elif
criter
==
'far'
:
if
far
is
None
:
raise
ValueError
(
"FAR value must be provided through "
"``--far-value`` option."
)
"``--far-value`` o
r ``--fpr-value`` o
ption."
)
from
.
import
far_threshold
return
far_threshold
(
neg
,
pos
,
far
)
else
:
...
...
doc/guide.rst
View file @
eae2a343
...
...
@@ -284,6 +284,9 @@ town. To plot an ROC curve, in possession of your **negatives** and
>>> pyplot.ylabel('FNR (%)') # doctest: +SKIP
>>> pyplot.grid(True)
>>> pyplot.show() # doctest: +SKIP
>>> # You can also compute the area under the ROC curve:
>>> bob.measure.roc_auc_score(negatives, positives)
0.8958
You should see an image like the following one:
...
...
doc/nitpick-exceptions.txt
View file @
eae2a343
# ignores stuff that does not exist in Python 2.7 manual
py:class list
# ignores stuff that does not exist but makes sense
py:class array
py:class array_like
py:class optional
py:class callable
doc/py_api.rst
View file @
eae2a343
...
...
@@ -49,6 +49,7 @@ Curves
.. autosummary::
bob.measure.roc
bob.measure.roc_auc_score
bob.measure.rocch
bob.measure.roc_for_far
bob.measure.det
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment