Commit e477cb6f authored by Theophile GENTILHOMME's avatar Theophile GENTILHOMME
Browse files

Add DET for PAD, add, rename and remove options for some commands, fix some...

Add DET for PAD, add, rename and remove options for some commands, fix some typos and various small changes
parent f94c2c81
Pipeline #19183 failed with stage
in 41 minutes and 56 seconds
"""Generates PAD ISO compliant EPC based on the score files
"""
import click
from bob.measure.script import common_options
from bob.extension.scripts.click_helper import verbosity_option
from bob.bio.base.score import load
from . import figure
FUNC_SPLIT = lambda x: load.load_files(x, load.split)
@click.command()
@common_options.scores_argument(eval_mandatory=True, min_len=2, nargs=-1)
@common_options.output_plot_file_option(default_out='det.pdf')
@common_options.titles_option()
@common_options.const_layout_option()
@common_options.style_option()
@common_options.figsize_option()
@common_options.bool_option(
'no-spoof', 'ns', '', False
)
@verbosity_option()
@common_options.axes_val_option(dflt=[0.01, 95, 0.01, 95])
@common_options.x_rotation_option(dflt=45)
@common_options.x_label_option()
@common_options.y_label_option()
@click.option('-c', '--criteria', default=None, show_default=True,
help='Criteria for threshold selection',
type=click.Choice(('eer', 'min-hter', 'bpcer20')))
@click.option('--real-data/--no-real-data', default=True, show_default=True,
help='If False, will annotate the plots hypothetically, instead '
'of with real data values of the calculated error rates.')
@click.pass_context
def det(ctx, scores, criteria, real_data, **kwargs):
"""Plot DET
You need to provide 2 or 4 scores
files for each PAD system in this order:
\b
* licit development scores
* licit evaluation scores
* spoof development scores (when ``--no-spoof`` is False (default))
* spoof evaluation scores (when ``--no-spoof`` is False (default))
Examples:
$ bob pad det --no-spoof dev-scores eval-scores
$ bob pad det {licit,spoof}/scores-{dev,eval}
"""
process = figure.Det(ctx, scores, True, FUNC_SPLIT, criteria, real_data)
process.run()
...@@ -12,12 +12,13 @@ FUNC_SPLIT = lambda x: load.load_files(x, load.split) ...@@ -12,12 +12,13 @@ FUNC_SPLIT = lambda x: load.load_files(x, load.split)
@common_options.scores_argument(eval_mandatory=True, min_len=2, nargs=-1) @common_options.scores_argument(eval_mandatory=True, min_len=2, nargs=-1)
@common_options.output_plot_file_option(default_out='epc.pdf') @common_options.output_plot_file_option(default_out='epc.pdf')
@common_options.titles_option() @common_options.titles_option()
@common_options.axis_fontsize_option()
@common_options.const_layout_option() @common_options.const_layout_option()
@common_options.figsize_option() @common_options.figsize_option()
@common_options.style_option()
@common_options.bool_option( @common_options.bool_option(
'iapmr', 'I', 'Whether to plot the IAPMR related lines or not.', True 'iapmr', 'I', 'Whether to plot the IAPMR related lines or not.', True
) )
@common_options.style_option()
@verbosity_option() @verbosity_option()
@click.pass_context @click.pass_context
def epc(ctx, scores, **kwargs): def epc(ctx, scores, **kwargs):
...@@ -49,9 +50,9 @@ def epc(ctx, scores, **kwargs): ...@@ -49,9 +50,9 @@ def epc(ctx, scores, **kwargs):
@common_options.scores_argument(eval_mandatory=True, min_len=2, nargs=-1) @common_options.scores_argument(eval_mandatory=True, min_len=2, nargs=-1)
@common_options.output_plot_file_option(default_out='epsc.pdf') @common_options.output_plot_file_option(default_out='epsc.pdf')
@common_options.titles_option() @common_options.titles_option()
@common_options.figsize_option()
@common_options.axis_fontsize_option()
@common_options.const_layout_option() @common_options.const_layout_option()
@common_options.figsize_option()
@common_options.style_option()
@common_options.bool_option( @common_options.bool_option(
'wer', 'w', 'Whether to plot the WER related lines or not.', True 'wer', 'w', 'Whether to plot the WER related lines or not.', True
) )
...@@ -63,7 +64,7 @@ def epc(ctx, scores, **kwargs): ...@@ -63,7 +64,7 @@ def epc(ctx, scores, **kwargs):
) )
@click.option('-c', '--criteria', default="eer", show_default=True, @click.option('-c', '--criteria', default="eer", show_default=True,
help='Criteria for threshold selection', help='Criteria for threshold selection',
type=click.Choice(('eer', 'hter', 'wer'))) type=click.Choice(('eer', 'min-hter', 'bpcer20')))
@click.option('-vp', '--var-param', default="omega", show_default=True, @click.option('-vp', '--var-param', default="omega", show_default=True,
help='Name of the varying parameter', help='Name of the varying parameter',
type=click.Choice(('omega', 'beta'))) type=click.Choice(('omega', 'beta')))
...@@ -73,7 +74,7 @@ def epc(ctx, scores, **kwargs): ...@@ -73,7 +74,7 @@ def epc(ctx, scores, **kwargs):
@verbosity_option() @verbosity_option()
@click.pass_context @click.pass_context
def epsc(ctx, scores, criteria, var_param, fixed_param, three_d, **kwargs): def epsc(ctx, scores, criteria, var_param, fixed_param, three_d, **kwargs):
"""Plot EPSC (expected performance curve): """Plot EPSC (expected performance spoofing curve):
You need to provide 4 score You need to provide 4 score
files for each biometric system in this order: files for each biometric system in this order:
......
...@@ -4,14 +4,15 @@ import pkg_resources # to make sure bob gets imported properly ...@@ -4,14 +4,15 @@ import pkg_resources # to make sure bob gets imported properly
import logging import logging
import click import click
import numpy as np import numpy as np
import matplotlib.pyplot as mpl
import bob.measure.script.figure as measure_figure import bob.measure.script.figure as measure_figure
from tabulate import tabulate from tabulate import tabulate
import matplotlib.pyplot as mpl
from bob.extension.scripts.click_helper import verbosity_option from bob.extension.scripts.click_helper import verbosity_option
from bob.measure.utils import (get_fta, get_thres) from bob.measure.utils import (get_fta, get_thres)
from bob.measure import ( from bob.measure import (
far_threshold, eer_threshold, min_hter_threshold, farfrr, epc far_threshold, eer_threshold, min_hter_threshold, farfrr, epc, ppndf
) )
from bob.measure.plot import (det, det_axis)
from . import error_utils from . import error_utils
ALL_CRITERIA = ('bpcer20', 'eer', 'min-hter') ALL_CRITERIA = ('bpcer20', 'eer', 'min-hter')
...@@ -207,8 +208,7 @@ class PadPlot(measure_figure.PlotBase): ...@@ -207,8 +208,7 @@ class PadPlot(measure_figure.PlotBase):
'''Base class for PAD plots''' '''Base class for PAD plots'''
def __init__(self, ctx, scores, evaluation, func_load): def __init__(self, ctx, scores, evaluation, func_load):
super(PadPlot, self).__init__(ctx, scores, evaluation, func_load) super(PadPlot, self).__init__(ctx, scores, evaluation, func_load)
if 'figsize' in ctx.meta: mpl.rcParams['figure.constrained_layout.use'] = self._clayout
mpl.figure(figsize=ctx.meta['figsize'])
def _process_scores(self, dev_score, eval_score): def _process_scores(self, dev_score, eval_score):
'''Process score files and return neg/pos/fta for eval and dev''' '''Process score files and return neg/pos/fta for eval and dev'''
...@@ -244,7 +244,7 @@ class PadPlot(measure_figure.PlotBase): ...@@ -244,7 +244,7 @@ class PadPlot(measure_figure.PlotBase):
li, la = ax.get_legend_handles_labels() li, la = ax.get_legend_handles_labels()
lines += li lines += li
labels += la labels += la
mpl.gcf().legend(lines, labels, fancybox=True, framealpha=0.5) mpl.gca().legend(lines, labels, loc=0, fancybox=True, framealpha=0.5)
class Epc(PadPlot): class Epc(PadPlot):
''' Handles the plotting of EPC ''' ''' Handles the plotting of EPC '''
...@@ -313,7 +313,6 @@ class Epc(PadPlot): ...@@ -313,7 +313,6 @@ class Epc(PadPlot):
mpl.title(title) mpl.title(title)
#legends for all axes #legends for all axes
self._plot_legends() self._plot_legends()
mpl.gcf().set_tight_layout(True)
mpl.xticks(rotation=self._x_rotation) mpl.xticks(rotation=self._x_rotation)
self._pdf_page.savefig(mpl.gcf()) self._pdf_page.savefig(mpl.gcf())
...@@ -440,7 +439,7 @@ class Epsc(PadPlot): ...@@ -440,7 +439,7 @@ class Epsc(PadPlot):
ax1.set_xticklabels(ax1.get_xticks()) ax1.set_xticklabels(ax1.get_xticks())
ax1.set_yticklabels(ax1.get_yticks()) ax1.set_yticklabels(ax1.get_yticks())
mpl.xticks(rotation=self._x_rotation) mpl.xticks(rotation=self._x_rotation)
self._pdf_page.savefig(bbox_inches='tight') self._pdf_page.savefig()
class Epsc3D(Epsc): class Epsc3D(Epsc):
''' 3D EPSC plots for PAD''' ''' 3D EPSC plots for PAD'''
...@@ -458,6 +457,7 @@ class Epsc3D(Epsc): ...@@ -458,6 +457,7 @@ class Epsc3D(Epsc):
title = self._titles[idx] if self._titles is not None else None title = self._titles[idx] if self._titles is not None else None
mpl.gcf().clear() mpl.gcf().clear()
mpl.gcf().set_constrained_layout(self._clayout)
from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm from matplotlib import cm
...@@ -480,7 +480,8 @@ class Epsc3D(Epsc): ...@@ -480,7 +480,8 @@ class Epsc3D(Epsc):
# the following order: frr, far, IAPMR, far_w, wer_wb, hter_wb # the following order: frr, far, IAPMR, far_w, wer_wb, hter_wb
wer_errors = 100 * errors[2 if self._iapmr else 4] wer_errors = 100 * errors[2 if self._iapmr else 4]
ax1 = mpl.gcf().add_subplot(111, projection='3d') ax1 = mpl.gcf().add_subplot(111, projection='3d',
constrained_layout=self._clayout)
W, B = np.meshgrid(omega, beta) W, B = np.meshgrid(omega, beta)
...@@ -504,4 +505,172 @@ class Epsc3D(Epsc): ...@@ -504,4 +505,172 @@ class Epsc3D(Epsc):
ax1.set_yticklabels(ax1.get_yticks()) ax1.set_yticklabels(ax1.get_yticks())
ax1.set_zticklabels(ax1.get_zticks()) ax1.set_zticklabels(ax1.get_zticks())
self._pdf_page.savefig(bbox_inches='tight') self._pdf_page.savefig()
class Det(PadPlot):
'''DET for PAD'''
def __init__(self, ctx, scores, evaluation, func_load, criteria, real_data):
super(Det, self).__init__(ctx, scores, evaluation, func_load)
self._no_spoof = False if 'no_spoof' not in ctx.meta else\
ctx.meta['no_spoof']
self._criteria = criteria
self._real_data = True if real_data is None else real_data
def compute(self, idx, dev_score, dev_file, eval_score, eval_file=None):
''' Implements plots'''
licit_dev_neg = dev_score[0][0]
licit_dev_pos = dev_score[0][1]
licit_eval_neg = eval_score[0][0]
licit_eval_pos = eval_score[0][1]
spoof_eval_neg = eval_score[1][0] if len(dev_score) > 1 else None
spoof_eval_pos = eval_score[1][1] if len(dev_score) > 1 else None
det(
licit_eval_neg,
licit_eval_pos,
self._points,
color=self._colors[idx],
linestyle='-',
label=self._label("licit", dev_file[0], idx)
)
if not self._no_spoof and spoof_eval_neg is not None:
det(
spoof_eval_neg,
spoof_eval_pos,
self._points,
color=self._colors[idx],
linestyle='--',
label=self._label("spoof", eval_file[0], idx)
)
if self._criteria is None:
return
thres_baseline = calc_threshold(
self._criteria, licit_dev_neg, licit_dev_pos
)
axlim = mpl.axis()
farfrr_licit = farfrr(
licit_eval_neg, licit_eval_pos,
thres_baseline) # calculate test frr @ EER (licit scenario)
farfrr_spoof = farfrr(
spoof_eval_neg, spoof_eval_pos,
thres_baseline) # calculate test frr @ EER (spoof scenario)
farfrr_licit_det = [
ppndf(i) for i in farfrr_licit
]
# find the FAR and FRR values that need to be plotted on normal deviate
# scale
farfrr_spoof_det = [
ppndf(i) for i in farfrr_spoof
]
# find the FAR and FRR values that need to be plotted on normal deviate
# scale
if not self._real_data:
mpl.axhline(
y=farfrr_licit_det[1],
xmin=axlim[2],
xmax=axlim[3],
color='k',
linestyle='--',
label="FRR @ EER") # vertical FRR threshold
else:
mpl.axhline(
y=farfrr_licit_det[1],
xmin=axlim[0],
xmax=axlim[1],
color='k',
linestyle='--',
label="FRR = %.2f%%" %
(farfrr_licit[1] * 100)) # vertical FRR threshold
mpl.plot(
farfrr_licit_det[0],
farfrr_licit_det[1],
'o',
color=self._colors[idx],
markersize=9) # FAR point, licit scenario
mpl.plot(
farfrr_spoof_det[0],
farfrr_spoof_det[1],
'o',
color=self._colors[idx],
markersize=9) # FAR point, spoof scenario
# annotate the FAR points
xyannotate_licit = [
ppndf(0.7 * farfrr_licit[0]),
ppndf(1.8 * farfrr_licit[1])
]
xyannotate_spoof = [
ppndf(0.95 * farfrr_spoof[0]),
ppndf(1.8 * farfrr_licit[1])
]
if not self._real_data:
mpl.annotate(
'FMR @\noperating point',
xy=(farfrr_licit_det[0], farfrr_licit_det[1]),
xycoords='data',
xytext=(xyannotate_licit[0], xyannotate_licit[1]),
color=self._colors[idx])
mpl.annotate(
'IAPMR @\noperating point',
xy=(farfrr_spoof_det[0], farfrr_spoof_det[1]),
xycoords='data',
xytext=(xyannotate_spoof[0], xyannotate_spoof[1]),
color=self._colors[idx])
else:
mpl.annotate(
'FAR=%.2f%%' % (farfrr_licit[0] * 100),
xy=(farfrr_licit_det[0], farfrr_licit_det[1]),
xycoords='data',
xytext=(xyannotate_licit[0], xyannotate_licit[1]),
color=self._colors[idx],
size='large')
mpl.annotate(
'IAPMR=\n%.2f%%' % (farfrr_spoof[0] * 100),
xy=(farfrr_spoof_det[0], farfrr_spoof_det[1]),
xycoords='data',
xytext=(xyannotate_spoof[0], xyannotate_spoof[1]),
color=self._colors[idx],
size='large')
def end_process(self):
''' Set title, legend, axis labels, grid colors, save figures and
close pdf is needed '''
#only for plots
add = ''
if not self._no_spoof:
add = " and overlaid SPOOF scenario"
title = self._title if self._title is not None else \
('DET: LICIT' + add)
mpl.title(title)
mpl.xlabel(self._x_label or "False Acceptance Rate (%)")
mpl.ylabel(self._y_label or "False Rejection Rate (%)")
mpl.grid(True, color=self._grid_color)
mpl.legend(loc='best')
self._set_axis()
#gives warning when applied with mpl
fig = mpl.gcf()
mpl.xticks(rotation=self._x_rotation)
mpl.tick_params(axis='both', which='major', labelsize=4)
for tick in mpl.gca().xaxis.get_major_ticks():
tick.label.set_fontsize(6)
for tick in mpl.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(6)
self._pdf_page.savefig(fig)
#do not want to close PDF when running evaluate
if 'PdfPages' in self._ctx.meta and \
('closef' not in self._ctx.meta or self._ctx.meta['closef']):
self._pdf_page.close()
def _set_axis(self):
if self._axlim is not None and None not in self._axlim:
det_axis(self._axlim)
else:
det_axis([0.01, 99, 0.01, 99])
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
""" """
import click import click
from bob.measure.script import common_options from bob.measure.script import common_options
from bob.extension.scripts.click_helper import verbosity_option from bob.extension.scripts.click_helper import (verbosity_option, bool_option)
from bob.bio.base.score import load from bob.bio.base.score import load
from . import figure from . import figure
...@@ -14,19 +14,20 @@ FUNC_SPLIT = lambda x: load.load_files(x, load.split) ...@@ -14,19 +14,20 @@ FUNC_SPLIT = lambda x: load.load_files(x, load.split)
@common_options.eval_option() @common_options.eval_option()
@common_options.n_bins_option() @common_options.n_bins_option()
@common_options.criterion_option() @common_options.criterion_option()
@common_options.axis_fontsize_option()
@common_options.thresholds_option() @common_options.thresholds_option()
@common_options.const_layout_option() @common_options.const_layout_option()
@common_options.show_dev_option() @common_options.show_dev_option()
@common_options.print_filenames_option(dflt=False) @common_options.print_filenames_option(dflt=False)
@common_options.titles_option() @common_options.titles_option()
@common_options.figsize_option()
@common_options.style_option()
@verbosity_option() @verbosity_option()
@click.pass_context @click.pass_context
def hist(ctx, scores, evaluation, **kwargs): def hist(ctx, scores, evaluation, **kwargs):
""" Plots histograms of Bona fida and PA along with threshold """ Plots histograms of Bona fida and PA along with threshold
criterion. criterion.
You need provide one or more development score file(s) for each experiment. You need to provide one or more development score file(s) for each experiment.
You can also provide eval files along with dev files. If only dev scores You can also provide eval files along with dev files. If only dev scores
are provided, you must use flag `--no-evaluation`. are provided, you must use flag `--no-evaluation`.
...@@ -48,24 +49,25 @@ def hist(ctx, scores, evaluation, **kwargs): ...@@ -48,24 +49,25 @@ def hist(ctx, scores, evaluation, **kwargs):
@click.command() @click.command()
@common_options.scores_argument(nargs=-1, eval_mandatory=True, min_len=2) @common_options.scores_argument(nargs=-1, eval_mandatory=True, min_len=2)
@common_options.output_plot_file_option(default_out='hist.pdf') @common_options.output_plot_file_option(default_out='vuln.pdf')
@common_options.eval_option() @common_options.eval_option()
@common_options.n_bins_option() @common_options.n_bins_option()
@common_options.criterion_option() @common_options.criterion_option()
@common_options.axis_fontsize_option()
@common_options.thresholds_option() @common_options.thresholds_option()
@common_options.const_layout_option() @common_options.const_layout_option()
@common_options.show_dev_option() @common_options.show_dev_option()
@common_options.print_filenames_option(dflt=False) @common_options.print_filenames_option(dflt=False)
@common_options.bool_option( @bool_option(
'iapmr-line', 'I', 'Whether to plot the IAPMR related lines or not.', True 'iapmr-line', 'I', 'Whether to plot the IAPMR related lines or not.', True
) )
@common_options.bool_option( @bool_option(
'real-data', 'R', 'real-data', 'R',
'If False, will annotate the plots hypothetically, instead ' 'If False, will annotate the plots hypothetically, instead '
'of with real data values of the calculated error rates.', True 'of with real data values of the calculated error rates.', True
) )
@common_options.titles_option() @common_options.titles_option()
@common_options.figsize_option()
@common_options.style_option()
@verbosity_option() @verbosity_option()
@click.pass_context @click.pass_context
def vuln(ctx, scores, evaluation, **kwargs): def vuln(ctx, scores, evaluation, **kwargs):
...@@ -83,7 +85,7 @@ def vuln(ctx, scores, evaluation, **kwargs): ...@@ -83,7 +85,7 @@ def vuln(ctx, scores, evaluation, **kwargs):
See :ref:`bob.pad.base.vulnerability` in the documentation for a guide on See :ref:`bob.pad.base.vulnerability` in the documentation for a guide on
vulnerability analysis. vulnerability analysis.
You need provide one or more development score file(s) for each experiment. You need to provide one or more development score file(s) for each experiment.
You can also provide eval files along with dev files. If only dev-scores You can also provide eval files along with dev files. If only dev-scores
are used set the flag `--no-evaluation` are used set the flag `--no-evaluation`
is required in that case. is required in that case.
......
...@@ -2,7 +2,8 @@ ...@@ -2,7 +2,8 @@
""" """
import click import click
from bob.measure.script import common_options from bob.measure.script import common_options
from bob.extension.scripts.click_helper import verbosity_option from bob.extension.scripts.click_helper import (verbosity_option,
open_file_mode_option)
from bob.bio.base.score import load from bob.bio.base.score import load
from . import figure from . import figure
...@@ -12,8 +13,8 @@ FUNC_SPLIT = lambda x: load.load_files(x, load.split) ...@@ -12,8 +13,8 @@ FUNC_SPLIT = lambda x: load.load_files(x, load.split)
@common_options.scores_argument(nargs=-1) @common_options.scores_argument(nargs=-1)
@common_options.eval_option() @common_options.eval_option()
@common_options.table_option() @common_options.table_option()
@common_options.open_file_mode_option() @open_file_mode_option()
@common_options.output_plot_metric_option() @common_options.output_log_metric_option()
@common_options.titles_option() @common_options.titles_option()
@verbosity_option() @verbosity_option()
@click.pass_context @click.pass_context
......
...@@ -2,7 +2,26 @@ import sys ...@@ -2,7 +2,26 @@ import sys
import click import click
from click.testing import CliRunner from click.testing import CliRunner
import pkg_resources import pkg_resources
from ..script import (metrics, histograms, epc) from ..script import (metrics, histograms, epc, det)
def test_det():
licit_dev = pkg_resources.resource_filename('bob.pad.base.test',
'data/licit/scores-dev')
licit_test = pkg_resources.resource_filename('bob.pad.base.test',
'data/licit/scores-eval')
spoof_dev = pkg_resources.resource_filename('bob.pad.base.test',
'data/spoof/scores-dev')
spoof_test = pkg_resources.resource_filename('bob.pad.base.test',
'data/spoof/scores-eval')
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(det.det, ['-c', 'min-hter',
'--output',
'DET.pdf',
licit_dev, licit_test,
spoof_dev, spoof_test])
assert result.exit_code == 0, (result.exit_code, result.output)
def test_hist(): def test_hist():
licit_dev = pkg_resources.resource_filename('bob.pad.base.test', licit_dev = pkg_resources.resource_filename('bob.pad.base.test',
...@@ -16,21 +35,21 @@ def test_hist(): ...@@ -16,21 +35,21 @@ def test_hist():
runner = CliRunner() runner = CliRunner()
with runner.isolated_filesystem(): with runner.isolated_filesystem():
result = runner.invoke(histograms.hist, ['--no-evaluation', licit_dev]) result = runner.invoke(histograms.hist, ['--no-evaluation', licit_dev])
assert result.exit_code == 0 assert result.exit_code == 0, (result.exit_code, result.output)
with runner.isolated_filesystem(): with runner.isolated_filesystem():
result = runner.invoke(histograms.hist, ['--criter', 'hter', '--output', result = runner.invoke(histograms.hist, ['--criter', 'hter', '--output',
'HISTO.pdf', '-b', 'HISTO.pdf', '-b',
30, '--no-evaluation', 30, '--no-evaluation',
licit_dev, spoof_dev]) licit_dev, spoof_dev])
assert result.exit_code == 0 assert result.exit_code == 0, (result.exit_code, result.output)
with runner.isolated_filesystem(): with runner.isolated_filesystem():
result = runner.invoke(histograms.hist, ['--criter', 'eer', '--output', result = runner.invoke(histograms.hist, ['--criter', 'eer', '--output',
'HISTO.pdf', '-b', 30, '-F', 'HISTO.pdf', '-b', 30,
3, licit_dev