Commit d27974c7 authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

move code around

parent 9e51851e
Pipeline #21208 failed with stage
in 32 minutes and 2 seconds
......@@ -5,6 +5,44 @@
import bob.measure
import numpy
from bob.measure import (
far_threshold, eer_threshold, min_hter_threshold)
def calc_threshold(method, neg, pos):
"""Calculates the threshold based on the given method.
The scores should be sorted!
Parameters
----------
method : str
One of ``bpcer20``, ``eer``, ``min-hter``.
neg : array_like
The negative scores. They should be sorted!
pos : array_like
The positive scores. They should be sorted!
Returns
-------
float
The calculated threshold.
Raises
------
ValueError
If method is unknown.
"""
method = method.lower()
if method == 'bpcer20':
threshold = far_threshold(neg, pos, 0.05, True)
elif method == 'eer':
threshold = eer_threshold(neg, pos, True)
elif method == 'min-hter':
threshold = min_hter_threshold(neg, pos, True)
else:
raise ValueError("Unknown threshold criteria: {}".format(method))
return threshold
def calc_pass_rate(threshold, attacks):
......
"""The main entry for bob.pad and its (click-based) scripts.
"""The main entry for bob pad commands.
"""
import click
import pkg_resources
from click_plugins import with_plugins
from bob.measure.script import common_options
from bob.extension.scripts.click_helper import (verbosity_option,
open_file_mode_option,
AliasedGroup)
from bob.extension.scripts.click_helper import verbosity_option
import bob.bio.base.script.gen as bio_gen
import bob.bio.base.script.figure as bio_figure
import bob.measure.script.figure as measure_figure
from bob.bio.base.score import load
from . import figure
from . import pad_figure as figure
SCORE_FORMAT = (
"Files must be 4-col format, see "
":py:func:`bob.bio.base.score.load.four_column`.")
CRITERIA = ('eer', 'min-hter', 'bpcer20')
@click.command()
@click.argument('outdir')
@click.option('-mm', '--mean-match', default=10, type=click.FLOAT, show_default=True)
@click.option('-mm', '--mean-match', default=10, type=click.FLOAT,
show_default=True)
@click.option('-mnm', '--mean-non-match', default=-10,
type=click.FLOAT, show_default=True)
@click.option('-n', '--n-sys', default=1, type=click.INT, show_default=True)
@click.option('--five-col/--four-col', default=False, show_default=True)
@verbosity_option()
@click.pass_context
def gen(ctx, outdir, mean_match, mean_non_match, n_sys, five_col):
def gen(ctx, outdir, mean_match, mean_non_match, n_sys):
"""Generate random scores.
Generates random scores in 4col or 5col format. The scores are generated
using Gaussian distribution whose mean is an input
parameter. The generated scores can be used as hypothetical datasets.
Invokes :py:func:`bob.bio.base.script.commands.gen`.
"""
ctx.meta['five_col'] = False
ctx.forward(bio_gen.gen)
@click.command()
@common_options.scores_argument(nargs=-1)
@common_options.titles_option()
@common_options.legends_option()
@common_options.legend_loc_option(dflt='lower-right')
@common_options.no_legend_option()
@common_options.sep_dev_eval_option()
@common_options.output_plot_file_option(default_out='roc.pdf')
@common_options.eval_option()
@common_options.points_curve_option()
@common_options.semilogx_option(True)
@common_options.axes_val_option(dflt='1e-4,1,1e-4,1')
@common_options.x_rotation_option()
@common_options.lines_at_option()
@common_options.x_label_option()
@common_options.y_label_option()
@common_options.const_layout_option()
@common_options.style_option()
@common_options.linestyles_option()
@common_options.figsize_option(dflt=None)
@common_options.min_far_option()
@verbosity_option()
@click.pass_context
def roc(ctx, scores, evaluation, **kargs):
"""Plot ROC (receiver operating characteristic) curve:
The plot will represent the false match rate on the horizontal axis and the
false non match rate on the vertical axis. The values for the axis will be
computed using :py:func:`bob.measure.roc`.
You need to provide one or more development score file(s) for each
experiment. You can also provide eval files along with dev files. If
evaluation scores are used, the flag `--eval` must be used. is required
in that case. Files must be 4-col format, see
:py:func:`bob.bio.base.score.load.four_column`
Examples:
$ bob pad roc -v dev-scores
$ bob pad roc -e -v dev-scores1 eval-scores1 dev-scores2
eval-scores2
$ bob pad roc -e -v -o my_roc.pdf dev-scores1 eval-scores1
"""
process = figure.Roc(ctx, scores, evaluation, load.split)
@common_options.metrics_command(common_options.METRICS_HELP.format(
names='FtA, FAR, FRR, APCER, BPCER, ACER',
criteria=CRITERIA, score_format=SCORE_FORMAT,
command='bob pad metrics'), criteria=CRITERIA)
def metrics(ctx, scores, evaluation, **kwargs):
process = figure.Metrics(ctx, scores, evaluation, load.split)
process.run()
@click.command()
@common_options.scores_argument(nargs=-1)
@common_options.titles_option()
@common_options.output_plot_file_option(default_out='det.pdf')
@common_options.legends_option()
@common_options.legend_loc_option(dflt='upper-right')
@common_options.no_legend_option()
@common_options.x_label_option()
@common_options.y_label_option()
@common_options.sep_dev_eval_option()
@common_options.eval_option()
@common_options.axes_val_option(dflt='0.01,95,0.01,95')
@common_options.x_rotation_option(dflt=45)
@common_options.points_curve_option()
@common_options.const_layout_option()
@common_options.style_option()
@common_options.linestyles_option()
@common_options.figsize_option(dflt=None)
@common_options.lines_at_option()
@common_options.min_far_option()
@verbosity_option()
@click.pass_context
def det(ctx, scores, evaluation, **kargs):
"""Plot DET (detection error trade-off) curve:
modified ROC curve which plots error rates on both axes
(false positives on the x-axis and false negatives on the y-axis)
You need to provide one or more development score file(s) for each
experiment. You can also provide eval files along with dev files. If
evale-scores are used, the flag `--eval` must be used. is required
in that case. Files must be 4-col format, see
:py:func:`bob.bio.base.score.load.four_column` for details.
Examples:
$ bob pad det -v dev-scores eval-scores
$ bob pad det -e -v scores-{dev,eval}
"""
process = figure.DetPad(ctx, scores, evaluation, load.split)
@common_options.roc_command(
common_options.ROC_HELP.format(
score_format=SCORE_FORMAT, command='bob pad roc'))
def roc(ctx, scores, evaluation, **kwargs):
process = figure.Roc(ctx, scores, evaluation, load.split)
process.run()
@click.command()
@common_options.scores_argument(nargs=-1)
@common_options.titles_option()
@common_options.output_plot_file_option(default_out='hist.pdf')
@common_options.eval_option()
@common_options.n_bins_option()
@common_options.criterion_option()
@common_options.no_line_option()
@common_options.far_option()
@common_options.thresholds_option()
@common_options.const_layout_option()
@common_options.print_filenames_option(dflt=False)
@common_options.legends_option()
@common_options.figsize_option(dflt=None)
@common_options.subplot_option()
@common_options.legend_ncols_option()
@common_options.style_option()
@verbosity_option()
@click.pass_context
def hist(ctx, scores, evaluation, **kwargs):
""" Plots histograms of Bona fida and PA along with threshold
criterion.
You need to provide one or more development score file(s) for each
experiment. You can also provide eval files along with dev files. If only
evaluation are provided, you must use flag `--eval`.
By default, when eval-scores are given, only eval-scores histograms are
displayed with threshold line
computed from dev-scores.
Examples:
$ bob pad hist -v dev-scores
$ bob pad hist -e -v dev-scores1 eval-scores1 dev-scores2
eval-scores2
$ bob pad hist -e -v --criterion min-hter dev-scores1 eval-scores1
"""
process = figure.HistPad(ctx, scores, evaluation, load.split)
@common_options.det_command(
common_options.DET_HELP.format(
score_format=SCORE_FORMAT, command='bob pad det'))
def det(ctx, scores, evaluation, **kwargs):
process = figure.Det(ctx, scores, evaluation, load.split)
process.run()
@click.command()
@common_options.scores_argument(min_arg=1, force_eval=True, nargs=-1)
@common_options.titles_option()
@common_options.output_plot_file_option(default_out='epc.pdf')
@common_options.legends_option()
@common_options.legend_loc_option(dflt='upper-center')
@common_options.no_legend_option()
@common_options.points_curve_option()
@common_options.const_layout_option()
@common_options.style_option()
@common_options.linestyles_option()
@common_options.figsize_option(dflt=None)
@verbosity_option()
@click.pass_context
def epc(ctx, scores, **kargs):
"""Plot EPC (expected performance curve):
plots the error rate on the eval set depending on a threshold selected
a-priori on the development set and accounts for varying relative cost
in [0; 1] of FPR and FNR when calculating the threshold.
You need to provide one or more development score and eval file(s)
for each experiment. Files must be 4-columns format, see
:py:func:`bob.bio.base.score.load.four_column` for details.
Examples:
$ bob pad epc -v dev-scores eval-scores
$ bob pad epc -v -o my_epc.pdf dev-scores1 eval-scores1
"""
process = measure_figure.Epc(ctx, scores, True, load.split)
@common_options.epc_command(
common_options.EPC_HELP.format(
score_format=SCORE_FORMAT, command='bob pad epc'))
def epc(ctx, scores, **kwargs):
process = measure_figure.Epc(ctx, scores, True, load.split, hter='ACER')
process.run()
@click.command(context_settings=dict(token_normalize_func=lambda x: x.lower()))
@common_options.scores_argument(nargs=-1)
@common_options.eval_option()
@common_options.table_option()
@open_file_mode_option()
@common_options.output_log_metric_option()
@common_options.legends_option()
@verbosity_option()
@click.pass_context
def metrics(ctx, scores, evaluation, **kwargs):
"""PAD ISO compliant metrics.
Reports several metrics based on a selected thresholds on the development
set and apply them on evaluation sets (if provided). The used thresholds
are:
bpcer20 When APCER is set to 5%.
eer When BPCER == APCER.
min-hter When HTER is minimum.
This command produces one table per sytem. Format of the table can be
changed through option ``--tablefmt``.
Most metrics are according to the ISO/IEC 30107-3:2017 "Information
technology -- Biometric presentation attack detection -- Part 3: Testing
and reporting" standard. The reported metrics are:
APCER: Attack Presentation Classification Error Rate
BPCER: Bona-fide Presentation Classification Error Rate
HTER (non-ISO): Half Total Error Rate ((BPCER+APCER)/2)
Examples:
$ bob pad metrics /path/to/scores-dev
$ bob pad metrics /path/to/scores-dev /path/to/scores-eval
$ bob pad metrics /path/to/system{1,2,3}/score-{dev,eval}
"""
process = figure.Metrics(ctx, scores, evaluation, load.split)
@common_options.hist_command(
common_options.HIST_HELP.format(
score_format=SCORE_FORMAT, command='bob pad hist'))
def hist(ctx, scores, evaluation, **kwargs):
process = figure.Hist(ctx, scores, evaluation, load.split)
process.run()
@click.command()
@common_options.scores_argument(nargs=-1)
@common_options.legends_option()
@common_options.sep_dev_eval_option()
@common_options.table_option()
@common_options.eval_option()
@common_options.output_log_metric_option()
@common_options.output_plot_file_option(default_out='eval_plots.pdf')
@common_options.points_curve_option()
@common_options.lines_at_option()
@common_options.const_layout_option()
@common_options.figsize_option(dflt=None)
@common_options.style_option()
@common_options.linestyles_option()
@verbosity_option()
@click.pass_context
@common_options.evaluate_command(
common_options.EVALUATE_HELP.format(
score_format=SCORE_FORMAT, command='bob pad evaluate'),
criteria=CRITERIA)
def evaluate(ctx, scores, evaluation, **kwargs):
'''Runs error analysis on score sets
\b
1. Computes the threshold using either EER or min. HTER criteria on
development set scores
2. Applies the above threshold on evaluation set scores to compute the
HTER, if a eval-score set is provided
3. Reports error rates on the console
4. Plots ROC, EPC, DET curves and score distributions to a multi-page PDF
file
You need to provide 2 score files for each biometric system in this order:
\b
* development scores
* evaluation scores
When evaluation scores are provided, ``--eval`` must be passed.
Examples:
$ bob pad evaluate -v dev-scores
$ bob pad evaluate -e -v scores-dev1 scores-eval1 scores-dev2
scores-eval2
$ bob pad evaluate -e -v /path/to/sys-{1,2,3}/scores-{dev,eval}
$ bob pad evaluate -e -v -l metrics.txt -o my_plots.pdf dev-scores eval-scores
'''
# first time erase if existing file
click.echo("Computing metrics...")
ctx.invoke(metrics, scores=scores, evaluation=evaluation)
if 'log' in ctx.meta and ctx.meta['log'] is not None:
click.echo("[metrics] => %s" % ctx.meta['log'])
# avoid closing pdf file before all figures are plotted
ctx.meta['closef'] = False
if evaluation:
click.echo("Starting evaluate with dev and eval scores...")
else:
click.echo("Starting evaluate with dev scores only...")
click.echo("Computing ROC...")
# set axes limits for ROC
ctx.forward(roc) # use class defaults plot settings
click.echo("Computing DET...")
ctx.forward(det) # use class defaults plot settings
# the last one closes the file
ctx.meta['closef'] = True
click.echo("Computing score histograms...")
ctx.meta['criterion'] = 'eer' # no criterion passed in evaluate
ctx.forward(hist)
click.echo("Evaluate successfully completed!")
click.echo("[plots] => %s" % (ctx.meta['output']))
common_options.evaluate_flow(
ctx, scores, evaluation, metrics, roc, det, epc, hist, **kwargs)
'''Runs error analysis on score sets, outputs metrics and plots'''
import bob.measure.script.figure as measure_figure
import bob.bio.base.script.figure as bio_figure
from .error_utils import calc_threshold
ALL_CRITERIA = ('bpcer20', 'eer', 'min-hter')
class Metrics(measure_figure.Metrics):
'''Compute metrics from score files'''
def __init__(self, ctx, scores, evaluation, func_load):
super(Metrics, self).__init__(
ctx, scores, evaluation, func_load,
names=('FtA', 'APCER', 'BPCER', 'FAR', 'FRR', 'ACER'))
def get_thres(self, criterion, dev_neg, dev_pos, far):
if self._criterion == 'bpcer20':
return calc_threshold('bpcer20', dev_neg, dev_pos)
else:
return super(Metrics, self).get_thres(
criterion, dev_neg, dev_pos, far)
class Roc(bio_figure.Roc):
'''ROC for PAD'''
def __init__(self, ctx, scores, evaluation, func_load):
super(Roc, self).__init__(ctx, scores, evaluation, func_load)
self._x_label = ctx.meta.get('x_label') or 'APCER'
self._y_label = ctx.meta.get('y_label') or '1 - BPCER'
class Det(bio_figure.Det):
def __init__(self, ctx, scores, evaluation, func_load):
super(Det, self).__init__(ctx, scores, evaluation, func_load)
self._x_label = ctx.meta.get('x_label') or 'APCER (%)'
self._y_label = ctx.meta.get('y_label') or 'BPCER (%)'
class Hist(measure_figure.Hist):
''' Histograms for PAD '''
def _setup_hist(self, neg, pos):
self._title_base = 'PAD'
self._density_hist(
pos[0], n=0, label='Bona Fide', color='C1'
)
self._density_hist(
neg[0], n=1, label='Presentation attack', alpha=0.4, color='C7',
hatch='\\\\'
)
......@@ -13,89 +13,11 @@ from bob.measure import (
from bob.measure.plot import (det, det_axis, roc_for_far, log_values)
from . import error_utils
ALL_CRITERIA = ('bpcer20', 'eer', 'min-hter')
def calc_threshold(method, neg, pos):
"""Calculates the threshold based on the given method.
The scores should be sorted!
Parameters
----------
method : str
One of ``bpcer20``, ``eer``, ``min-hter``.
neg : array_like
The negative scores. They should be sorted!
pos : array_like
The positive scores. They should be sorted!
Returns
-------
float
The calculated threshold.
Raises
------
ValueError
If method is unknown.
"""
method = method.lower()
if method == 'bpcer20':
threshold = far_threshold(neg, pos, 0.05, True)
elif method == 'eer':
threshold = eer_threshold(neg, pos, True)
elif method == 'min-hter':
threshold = min_hter_threshold(neg, pos, True)
else:
raise ValueError("Unknown threshold criteria: {}".format(method))
return threshold
class Metrics(measure_figure.Metrics):
'''Compute metrics from score files'''
def __init__(self, ctx, scores, evaluation, func_load):
super(Metrics, self).__init__(ctx, scores, evaluation, func_load)
def compute(self, idx, input_scores, input_names):
''' Compute metrics for the given criteria'''
neg_list, pos_list, _ = get_fta_list(input_scores)
dev_neg, dev_pos = neg_list[0], pos_list[0]
dev_file = input_names[0]
if self._eval:
eval_neg, eval_pos = neg_list[1], pos_list[1]
eval_file = input_names[1]
title = self._legends[idx] if self._legends is not None else None
headers = ['' or title, 'Development %s' % dev_file]
if self._eval:
headers.append('Eval. % s' % eval_file)
for m in ALL_CRITERIA:
raws = []
threshold = calc_threshold(m, dev_neg, dev_pos)
click.echo("\nThreshold of %f selected with the %s criteria" % (
threshold, m), file=self.log_file)
apcer, bpcer = farfrr(dev_neg, dev_pos, threshold)
raws.append(['APCER', '{:>5.1f}%'.format(apcer * 100)])
raws.append(['BPCER', '{:>5.1f}%'.format(bpcer * 100)])
raws.append(['ACER', '{:>5.1f}%'.format((apcer + bpcer) * 50)])
if self._eval and eval_neg is not None:
apcer, bpcer = farfrr(eval_neg, eval_pos, threshold)
raws[0].append('{:>5.1f}%'.format(apcer * 100))
raws[1].append('{:>5.1f}%'.format(bpcer * 100))
raws[2].append('{:>5.1f}%'.format((apcer + bpcer) * 50))
click.echo(
tabulate(raws, headers, self._tablefmt),
file=self.log_file
)
class MetricsVuln(measure_figure.Metrics):
def __init__(self, ctx, scores, evaluation, func_load):
super(MetricsVuln, self).__init__(ctx, scores, evaluation, func_load)
''' Compute metrics from score files'''
def compute(self, idx, input_scores, input_names):
......@@ -121,20 +43,6 @@ class MetricsVuln(measure_figure.Metrics):
)
class HistPad(measure_figure.Hist):
''' Histograms for PAD '''
def _setup_hist(self, neg, pos):
self._title_base = 'PAD'
self._density_hist(
pos[0], n=0, label='Bona Fide', color='C1'
)
self._density_hist(
neg[0], n=1, label='Presentation attack', alpha=0.4, color='C7',
hatch='\\\\'
)
def _iapmr_dot(threshold, iapmr, real_data, **kwargs):
# plot a dot on threshold versus IAPMR line and show IAPMR as a number
axlim = mpl.axis()
......@@ -176,6 +84,10 @@ def _iapmr_plot(scores, threshold, iapmr, real_data, **kwargs):
class HistVuln(measure_figure.Hist):
''' Histograms for vulnerability '''
def __init__(self, ctx, scores, evaluation, func_load):
super(HistVuln, self).__init__(
ctx, scores, evaluation, func_load, nhist_per_system=3)
def _setup_hist(self, neg, pos):
self._title_base = 'Vulnerability'
self._density_hist(
......@@ -519,28 +431,13 @@ class Epsc3D(Epsc):
self._pdf_page.savefig()
class Roc(bio_figure.Roc):
'''ROC for PAD'''
def __init__(self, ctx, scores, evaluation, func_load):
super(Roc, self).__init__(ctx, scores, evaluation, func_load)
self._x_label = ctx.meta.get('x_label') or 'APCER'
self._y_label = ctx.meta.get('y_label') or '1 - BPCER'
class DetPad(bio_figure.Det):
def __init__(self, ctx, scores, evaluation, func_load):
super(DetPad, self).__init__(ctx, scores, evaluation, func_load)
self._x_label = ctx.meta.get('x_label') or 'APCER'
self._y_label = ctx.meta.get('y_label') or 'BPCER'