Commit 861911a5 authored by Amir MOHAMMADI's avatar Amir MOHAMMADI

Merge branch 'finalplot' into 'master'

Finalization of plots

Closes #22

See merge request !43
parents ed755c46 7ebd988a
Pipeline #21348 passed with stages
in 8 minutes and 34 seconds
......@@ -5,6 +5,44 @@
import bob.measure
import numpy
from bob.measure import (
far_threshold, eer_threshold, min_hter_threshold)
def calc_threshold(method, neg, pos):
"""Calculates the threshold based on the given method.
The scores should be sorted!
Parameters
----------
method : str
One of ``bpcer20``, ``eer``, ``min-hter``.
neg : array_like
The negative scores. They should be sorted!
pos : array_like
The positive scores. They should be sorted!
Returns
-------
float
The calculated threshold.
Raises
------
ValueError
If method is unknown.
"""
method = method.lower()
if method == 'bpcer20':
threshold = far_threshold(neg, pos, 0.05, True)
elif method == 'eer':
threshold = eer_threshold(neg, pos, True)
elif method == 'min-hter':
threshold = min_hter_threshold(neg, pos, True)
else:
raise ValueError("Unknown threshold criteria: {}".format(method))
return threshold
def calc_pass_rate(threshold, attacks):
......
......@@ -3,9 +3,10 @@
import click
import pkg_resources
from click_plugins import with_plugins
from bob.extension.scripts.click_helper import AliasedGroup
@with_plugins(pkg_resources.iter_entry_points('bob.pad.cli'))
@click.group()
@click.group(cls=AliasedGroup)
def pad():
"""Presentation Attack Detection related commands."""
pass
This diff is collapsed.
'''Runs error analysis on score sets, outputs metrics and plots'''
import bob.measure.script.figure as measure_figure
import bob.bio.base.script.figure as bio_figure
from .error_utils import calc_threshold
ALL_CRITERIA = ('bpcer20', 'eer', 'min-hter')
class Metrics(measure_figure.Metrics):
'''Compute metrics from score files'''
def __init__(self, ctx, scores, evaluation, func_load):
super(Metrics, self).__init__(
ctx, scores, evaluation, func_load,
names=('FtA', 'APCER', 'BPCER', 'FAR', 'FRR', 'ACER'))
def get_thres(self, criterion, dev_neg, dev_pos, far):
if self._criterion == 'bpcer20':
return calc_threshold('bpcer20', dev_neg, dev_pos)
else:
return super(Metrics, self).get_thres(
criterion, dev_neg, dev_pos, far)
class MultiMetrics(measure_figure.MultiMetrics):
'''Compute metrics from score files'''
def __init__(self, ctx, scores, evaluation, func_load):
super(MultiMetrics, self).__init__(
ctx, scores, evaluation, func_load,
names=('FtA', 'APCER', 'BPCER', 'FAR', 'FRR', 'ACER'))
def get_thres(self, criterion, dev_neg, dev_pos, far):
if self._criterion == 'bpcer20':
return calc_threshold('bpcer20', dev_neg, dev_pos)
else:
return super(MultiMetrics, self).get_thres(
criterion, dev_neg, dev_pos, far)
class Roc(bio_figure.Roc):
'''ROC for PAD'''
def __init__(self, ctx, scores, evaluation, func_load):
super(Roc, self).__init__(ctx, scores, evaluation, func_load)
self._x_label = ctx.meta.get('x_label') or 'APCER'
self._y_label = ctx.meta.get('y_label') or '1 - BPCER'
class Det(bio_figure.Det):
def __init__(self, ctx, scores, evaluation, func_load):
super(Det, self).__init__(ctx, scores, evaluation, func_load)
self._x_label = ctx.meta.get('x_label') or 'APCER (%)'
self._y_label = ctx.meta.get('y_label') or 'BPCER (%)'
class Hist(measure_figure.Hist):
''' Histograms for PAD '''
def _setup_hist(self, neg, pos):
self._title_base = 'PAD'
self._density_hist(
pos[0], n=0, label='Bona Fide', color='C1'
)
self._density_hist(
neg[0], n=1, label='Presentation attack', alpha=0.4, color='C7',
hatch='\\\\'
)
......@@ -3,9 +3,11 @@
import click
import pkg_resources
from click_plugins import with_plugins
from bob.extension.scripts.click_helper import AliasedGroup
@with_plugins(pkg_resources.iter_entry_points('bob.vuln.cli'))
@click.group()
@click.group(cls=AliasedGroup)
def vuln():
"""Presentation Attack Detection related commands."""
"""Vulnerability analysis related commands."""
pass
This diff is collapsed.
......@@ -13,8 +13,7 @@ def test_det_pad():
'data/licit/scores-eval')
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(pad_commands.det, ['-c', 'min-hter',
'--output',
result = runner.invoke(pad_commands.det, ['-e', '--output',
'DET.pdf',
licit_dev, licit_test])
assert result.exit_code == 0, (result.exit_code, result.output)
......@@ -32,8 +31,8 @@ def test_det_vuln():
'data/spoof/scores-eval')
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(vuln_commands.det, ['-c', 'min-hter',
'--output',
result = runner.invoke(vuln_commands.det, ['-hla', '0.2',
'-o',
'DET.pdf',
licit_dev, licit_test,
spoof_dev, spoof_test])
......@@ -77,19 +76,20 @@ def test_hist_pad():
'data/spoof/scores-eval')
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(pad_commands.hist, ['--no-evaluation', licit_dev])
result = runner.invoke(pad_commands.hist, [licit_dev])
assert result.exit_code == 0, (result.exit_code, result.output)
with runner.isolated_filesystem():
result = runner.invoke(pad_commands.hist, ['--criterion', 'min-hter',
'--output',
'HISTO.pdf', '-b',
'30,auto', '--no-evaluation',
'30,20',
licit_dev, spoof_dev])
assert result.exit_code == 0, (result.exit_code, result.output)
with runner.isolated_filesystem():
result = runner.invoke(pad_commands.hist, ['--criterion', 'eer', '--output',
result = runner.invoke(pad_commands.hist, ['-e', '--criterion', 'eer',
'--output',
'HISTO.pdf', '-b', '30',
licit_dev, licit_test,
spoof_dev, spoof_test])
......@@ -110,13 +110,21 @@ def test_hist_vuln():
with runner.isolated_filesystem():
result = runner.invoke(vuln_commands.hist,
['--criterion', 'eer', '--output',
'HISTO.pdf', '-b', '30',
'HISTO.pdf', '-b', '30', '-ts', 'A,B',
licit_dev, licit_test])
assert result.exit_code == 0, (result.exit_code, result.output)
with runner.isolated_filesystem():
result = runner.invoke(vuln_commands.hist,
['--criterion', 'eer', '--output',
'HISTO.pdf', '-b', '2,20,30', '-e',
licit_dev, licit_test,
spoof_dev, spoof_test])
assert result.exit_code == 0, (result.exit_code, result.output)
def test_metrics_vuln():
licit_dev = pkg_resources.resource_filename('bob.pad.base.test',
'data/licit/scores-dev')
......@@ -146,7 +154,7 @@ def test_metrics_pad():
with runner.isolated_filesystem():
result = runner.invoke(
pad_commands.metrics,
[licit_dev, licit_test]
['-e', licit_dev, licit_test]
)
assert result.exit_code == 0, (result.exit_code, result.output)
......@@ -176,7 +184,6 @@ def test_epc_vuln():
assert result.exit_code == 0, (result.exit_code, result.output)
def test_epsc_vuln():
licit_dev = pkg_resources.resource_filename('bob.pad.base.test',
'data/licit/scores-dev')
......
......@@ -115,10 +115,14 @@ The scripts take as input either a 4-column or 5-column data format as specified
in the documentation of :py:func:`bob.bio.base.score.load.four_column` or
:py:func:`bob.bio.base.score.load.five_column`.
Two sets of commands, ``bob pad`` and ``bob vuln`` are available for
Presentation Attack Detection and
Vulnerability analysis, respectively.
Metrics
=======
Several metrics based on a selected thresholds (bpcer20: when APCER is set to 5%,
Several PAD metrics based on a selected thresholds (bpcer20: when APCER is set to 5%,
eer, when BPCER == APCER and min-hter, when HTER is minimum) on the development
set and apply them on evaluation sets (if provided) are generated used
``metrics`` command. The reported `standard metrics`_ are:
......@@ -133,65 +137,114 @@ For example:
.. code-block:: sh
$ bob pad metrics scores-{dev,test} --titles ExpA
Threshold of 6.624767 selected with the bpcer20 criteria
======== ======================== ===================
ExpA Development scores-dev Eval. scores-eval
======== ======================== ===================
BPCER20 5.0% 5.0%
EER 0.0% 0.0%
min-HTER 2.5% 2.5%
======== ======================== ===================
Threshold of 6.534215 selected with the eer criteria
======== ======================== ===================
ExpA Development scores-dev Eval. scores-eval
======== ======================== ===================
BPCER20 6.1% 6.1%
EER 0.0% 0.0%
min-HTER 3.0% 3.0%
======== ======================== ===================
Threshold of 6.534215 selected with the min-hter criteria
======== ======================== ===================
ExpA Development scores-dev Eval. scores-eval
======== ======================== ===================
BPCER20 6.1% 6.1%
EER 0.0% 0.0%
min-HTER 3.0% 3.0%
======== ======================== ===================
$ bob pad metrics -e scores-{dev,eval} --legends ExpA
Threshold of 11.639561 selected with the bpcer20 criteria
====== ======================== ===================
ExpA Development scores-dev Eval. scores-eval
====== ======================== ===================
APCER 5.0% 5.0%
BPCER 100.0% 100.0%
ACER 52.5% 52.5%
====== ======================== ===================
Threshold of 3.969103 selected with the eer criteria
====== ======================== ===================
ExpA Development scores-dev Eval. scores-eval
====== ======================== ===================
APCER 100.0% 100.0%
BPCER 100.0% 100.0%
ACER 100.0% 100.0%
====== ======================== ===================
Threshold of -0.870550 selected with the min-hter criteria
====== ======================== ===================
ExpA Development scores-dev Eval. scores-eval
====== ======================== ===================
APCER 100.0% 100.0%
BPCER 19.5% 19.5%
ACER 59.7% 59.7%
====== ======================== ===================
.. note::
You can compute analysis on development set(s) only by passing option
``--no-evaluation``. See metrics --help for further options.
When evaluation scores are provided, the ``--eval`` option must be passed.
See metrics --help for further options.
Metrics for vulnerability analysis are also avaible trhough:
.. code-block:: sh
$ bob vuln metrics -e .../{licit,spoof}/scores-{dev,test}
========= ===================
None EER (threshold=4)
========= ===================
APCER (%) 100.0%
BPCER (%) 100.0%
ACER (%) 100.0%
IAPMR (%) 100.0%
========= ===================
Plots
=====
Customizable plotting commands are available in the :py:mod:`bob.pad.base` module.
They take a list of development and/or evaluation files and generate a single PDF
file containing the plots. Available plots are:
file containing the plots.
Available plots for PAD are:
* ``hist`` (Bona fida and PA histograms along with threshold criterion)
* ``vuln`` (Vulnerability analysis distributions)
* ``epc`` (expected performance curve)
* ``gen`` (Generate random scores)
* ``roc`` (receiver operating characteristic)
* ``det`` (detection error trade-off)
* ``evaluate`` (Summarize all the above commands in one call)
Available plots for PAD are:
* ``hist`` (Vulnerability analysis distributions)
* ``epc`` (expected performance curve)
* ``gen`` (Generate random scores)
* ``roc`` (receiver operating characteristic)
* ``det`` (detection error trade-off)
* ``epsc`` (expected performance spoofing curve)
* ``fmr_iapmr`` (Plot FMR vs IAPMR)
* ``evaluate`` (Summarize all the above commands in one call)
Use the ``--help`` option on the above-cited commands to find-out about more
options.
For example, to generate a EPSC curve from development and evaluation datasets:
For example, to generate a EPC curve from development and evaluation datasets:
.. code-block:: sh
$bob pad epc -e -o 'my_epc.pdf' scores-{dev,eval}
where `my_epc.pdf` will contain EPC curves for all the experiments.
Vulnerability commands require licit and spoof development and evaluation
datasets. Far example, to generate EPSC curve:
.. code-block:: sh
$bob pad epsc -o 'my_epsc.pdf' scores-{dev,test}
$bob vuln epsc -e .../{licit,spoof}/scores-{dev,eval}
where `my_epsc.pdf` will contain EPSC curves for all the experiments.
.. note::
IAPMR curve can be plotted along with EPC and EPSC using option
......
......@@ -140,9 +140,11 @@ setup(
# bob pad scripts
'bob.pad.cli': [
'metrics = bob.pad.base.script.pad_commands:metrics',
'multi-metrics = bob.pad.base.script.pad_commands:multi_metrics',
'hist = bob.pad.base.script.pad_commands:hist',
'det = bob.pad.base.script.pad_commands:det',
'roc = bob.pad.base.script.pad_commands:roc',
'epc = bob.pad.base.script.pad_commands:epc',
'gen = bob.pad.base.script.pad_commands:gen',
'evaluate = bob.pad.base.script.pad_commands:evaluate',
],
......@@ -152,6 +154,7 @@ setup(
'metrics = bob.pad.base.script.vuln_commands:metrics',
'hist = bob.pad.base.script.vuln_commands:hist',
'det = bob.pad.base.script.vuln_commands:det',
'roc = bob.pad.base.script.vuln_commands:roc',
'epc = bob.pad.base.script.vuln_commands:epc',
'epsc = bob.pad.base.script.vuln_commands:epsc',
'gen = bob.pad.base.script.vuln_commands:gen',
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment