vuln_commands.py 13 KB
Newer Older
1 2 3 4 5 6 7 8
"""The main entry for bob.pad and its(click-based) scripts.
"""

import os
import numpy
import click
from click.types import FLOAT
from bob.measure.script import common_options
9
from bob.extension.scripts.click_helper import (
10 11
    verbosity_option, bool_option, list_float_option, CONTEXT_SETTINGS
)
12 13 14
from bob.core import random
from bob.io.base import create_directories_safe
from bob.bio.base.score import load
15
from . import vuln_figure as figure
16 17 18 19 20 21

NUM_GENUINE_ACCESS = 5000
NUM_ZEIMPOSTORS = 5000
NUM_PA = 5000


22 23 24 25 26 27 28 29 30
def fnmr_at_option(dflt=' ', **kwargs):
  '''Get option to draw const FNMR lines'''
  return list_float_option(
      name='fnmr', short_name='fnmr',
      desc='If given, draw horizontal lines at the given FNMR position. '
      'Your values must be separated with a comma (,) without space. '
      'This option works in ROC and DET curves.',
      nitems=None, dflt=dflt, **kwargs
  )
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62


def gen_score_distr(mean_gen, mean_zei, mean_pa, sigma_gen=1, sigma_zei=1,
                    sigma_pa=1):
  mt = random.mt19937()  # initialise the random number generator

  genuine_generator = random.normal(numpy.float32, mean_gen, sigma_gen)
  zei_generator = random.normal(numpy.float32, mean_zei, sigma_zei)
  pa_generator = random.normal(numpy.float32, mean_pa, sigma_pa)

  genuine_scores = [genuine_generator(mt) for i in range(NUM_GENUINE_ACCESS)]
  zei_scores = [zei_generator(mt) for i in range(NUM_ZEIMPOSTORS)]
  pa_scores = [pa_generator(mt) for i in range(NUM_PA)]

  return genuine_scores, zei_scores, pa_scores


def write_scores_to_file(neg, pos, filename, attack=False):
  """Writes score distributions into 4-column score files. For the format of
    the 4-column score files, please refer to Bob's documentation.

  Parameters
  ----------
  neg : array_like
      Scores for negative samples.
  pos : array_like
      Scores for positive samples.
  filename : str
      The path to write the score to.
  """
  create_directories_safe(os.path.dirname(filename))
  with open(filename, 'wt') as f:
63 64 65 66 67 68 69
    for i in pos:
      f.write('x x foo %f\n' % i)
    for i in neg:
      if attack:
        f.write('x attack foo %f\n' % i)
      else:
        f.write('x y foo %f\n' % i)
70 71


72
@click.command(context_settings=CONTEXT_SETTINGS)
73
@click.argument('outdir')
74 75
@click.option('--mean-gen', default=7, type=FLOAT, show_default=True)
@click.option('--mean-zei', default=3, type=FLOAT, show_default=True)
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
@click.option('--mean-pa', default=5, type=FLOAT, show_default=True)
@verbosity_option()
def gen(outdir, mean_gen, mean_zei, mean_pa):
  """Generate random scores.
  Generates random scores for three types of verification attempts:
  genuine users, zero-effort impostors and spoofing attacks and writes them
  into 4-column score files for so called licit and spoof scenario. The
  scores are generated using Gaussian distribution whose mean is an input
  parameter. The generated scores can be used as hypothetical datasets.
  """
  # Generate the data
  genuine_dev, zei_dev, pa_dev = gen_score_distr(
      mean_gen, mean_zei, mean_pa)
  genuine_eval, zei_eval, pa_eval = gen_score_distr(
      mean_gen, mean_zei, mean_pa)

  # Write the data into files
93
  write_scores_to_file(zei_dev, genuine_dev,
94
                       os.path.join(outdir, 'licit', 'scores-dev'))
95
  write_scores_to_file(zei_eval, genuine_eval,
96
                       os.path.join(outdir, 'licit', 'scores-eval'))
97
  write_scores_to_file(pa_dev, genuine_dev,
98 99
                       os.path.join(outdir, 'spoof', 'scores-dev'),
                       attack=True)
100
  write_scores_to_file(pa_eval, genuine_eval,
101 102 103 104
                       os.path.join(outdir, 'spoof', 'scores-eval'),
                       attack=True)


105
@click.command(context_settings=common_options.CONTEXT_SETTINGS)
106
@common_options.scores_argument(min_arg=2, nargs=-1)
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
@common_options.output_plot_file_option(default_out='vuln_roc.pdf')
@common_options.legends_option()
@common_options.no_legend_option()
@common_options.legend_loc_option(dflt='upper-right')
@common_options.title_option()
@common_options.const_layout_option()
@common_options.style_option()
@common_options.figsize_option(dflt=None)
@common_options.min_far_option()
@common_options.axes_val_option()
@verbosity_option()
@common_options.x_rotation_option(dflt=45)
@common_options.x_label_option()
@common_options.y_label_option()
@click.option('--real-data/--no-real-data', default=True, show_default=True,
              help='If False, will annotate the plots hypothetically, instead '
              'of with real data values of the calculated error rates.')
124
@fnmr_at_option()
125
@click.pass_context
126
def roc(ctx, scores, real_data, **kwargs):
127 128
  """Plot ROC

129
  You need to provide 2 scores
130
  files for each vulnerability system in this order:
131 132

  \b
133 134
  * licit scores
  * spoof scores
135 136

  Examples:
137
      $ bob vuln roc -v licit-scores spoof-scores
138

139
      $ bob vuln roc -v scores-{licit,spoof}
140
  """
141
  process = figure.RocVuln(ctx, scores, True, load.split, real_data, False)
142 143 144
  process.run()


145
@click.command(context_settings=common_options.CONTEXT_SETTINGS)
146
@common_options.scores_argument(min_arg=2, nargs=-1)
147 148 149 150 151 152 153
@common_options.output_plot_file_option(default_out='vuln_det.pdf')
@common_options.legends_option()
@common_options.no_legend_option()
@common_options.legend_loc_option(dflt='upper-right')
@common_options.title_option()
@common_options.const_layout_option()
@common_options.style_option()
154
@common_options.figsize_option(dflt=None)
155 156 157 158 159 160 161 162
@verbosity_option()
@common_options.axes_val_option(dflt='0.01,95,0.01,95')
@common_options.x_rotation_option(dflt=45)
@common_options.x_label_option()
@common_options.y_label_option()
@click.option('--real-data/--no-real-data', default=True, show_default=True,
              help='If False, will annotate the plots hypothetically, instead '
              'of with real data values of the calculated error rates.')
163
@fnmr_at_option()
164
@click.pass_context
165
def det(ctx, scores, real_data, **kwargs):
166 167
  """Plot DET

168 169 170

  You need to provide 2 scores
  files for each vulnerability system in this order:
171 172

  \b
173 174
  * licit scores
  * spoof scores
175 176

  Examples:
177
      $ bob vuln det -v licit-scores spoof-scores
178

179
      $ bob vuln det -v scores-{licit,spoof}
180
  """
181
  process = figure.DetVuln(ctx, scores, True, load.split, real_data, False)
182 183 184
  process.run()


185
@click.command(context_settings=common_options.CONTEXT_SETTINGS)
186 187 188 189 190 191 192 193 194
@common_options.scores_argument(min_arg=2, force_eval=True, nargs=-1)
@common_options.output_plot_file_option(default_out='vuln_epc.pdf')
@common_options.legends_option()
@common_options.no_legend_option()
@common_options.legend_loc_option()
@common_options.title_option()
@common_options.const_layout_option()
@common_options.x_label_option()
@common_options.y_label_option()
195
@common_options.figsize_option(dflt=None)
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
@common_options.style_option()
@common_options.bool_option(
    'iapmr', 'I', 'Whether to plot the IAPMR related lines or not.', True
)
@common_options.style_option()
@verbosity_option()
@click.pass_context
def epc(ctx, scores, **kwargs):
  """Plot EPC (expected performance curve):

  You need to provide 4 score
  files for each biometric system in this order:

  \b
  * licit development scores
  * licit evaluation scores
  * spoof development scores
  * spoof evaluation scores

  See :ref:`bob.pad.base.vulnerability` in the documentation for a guide on
  vulnerability analysis.

  Examples:
219
      $ bob vuln epc -v dev-scores eval-scores
220

221
      $ bob vuln epc -v -o my_epc.pdf dev-scores1 eval-scores1
222

223
      $ bob vuln epc -v {licit,spoof}/scores-{dev,eval}
224 225 226 227 228
  """
  process = figure.Epc(ctx, scores, True, load.split)
  process.run()


229
@click.command(context_settings=common_options.CONTEXT_SETTINGS)
230 231 232 233 234 235 236 237
@common_options.scores_argument(min_arg=2, force_eval=True, nargs=-1)
@common_options.output_plot_file_option(default_out='vuln_epsc.pdf')
@common_options.legends_option()
@common_options.no_legend_option()
@common_options.legend_loc_option()
@common_options.const_layout_option()
@common_options.x_label_option()
@common_options.y_label_option()
238
@common_options.figsize_option(dflt=None)
239 240 241 242 243 244 245 246 247 248 249 250
@common_options.style_option()
@common_options.bool_option(
    'wer', 'w', 'Whether to plot the WER related lines or not.', True
)
@common_options.bool_option(
    'three-d', 'D', 'If true, generate 3D plots', False
)
@common_options.bool_option(
    'iapmr', 'I', 'Whether to plot the IAPMR related lines or not.', False
)
@click.option('-c', '--criteria', default="eer", show_default=True,
              help='Criteria for threshold selection',
251
              type=click.Choice(('eer', 'min-hter')))
252 253 254 255 256 257
@click.option('-vp', '--var-param', default="omega", show_default=True,
              help='Name of the varying parameter',
              type=click.Choice(('omega', 'beta')))
@click.option('-fp', '--fixed-param', default=0.5, show_default=True,
              help='Value of the fixed parameter',
              type=click.FLOAT)
258 259
@click.option('-s', '--sampling', default=5, show_default=True,
              help='Sampling of the EPSC 3D surface', type=click.INT)
260 261
@verbosity_option()
@click.pass_context
262 263
def epsc(ctx, scores, criteria, var_param, fixed_param, three_d, sampling,
         **kwargs):
264
  """Plot EPSC (expected performance spoofing curve):
265

266 267
  You need to provide 4 score
  files for each biometric system in this order:
268

269 270 271 272 273
  \b
  * licit development scores
  * licit evaluation scores
  * spoof development scores
  * spoof evaluation scores
274

275 276
  See :ref:`bob.pad.base.vulnerability` in the documentation for a guide on
  vulnerability analysis.
277

278 279
  Note that when using 3D plots with option ``--three-d``, you cannot plot
  both WER and IAPMR on the same figure (which is possible in 2D).
280

281 282
  Examples:
      $ bob vuln epsc -v -o my_epsc.pdf dev-scores1 eval-scores1
283

284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
      $ bob vuln epsc -v -D {licit,spoof}/scores-{dev,eval}
  """
  if three_d:
    if (ctx.meta['wer'] and ctx.meta['iapmr']):
      raise click.BadParameter('Cannot plot both WER and IAPMR in 3D')
    ctx.meta['sampling'] = sampling
    process = figure.Epsc3D(
        ctx, scores, True, load.split,
        criteria, var_param, fixed_param
    )
  else:
    process = figure.Epsc(
        ctx, scores, True, load.split,
        criteria, var_param, fixed_param
    )
  process.run()
300 301


302
@click.command(context_settings=common_options.CONTEXT_SETTINGS)
303
@common_options.scores_argument(nargs=-1, min_arg=2)
304 305 306 307 308 309 310 311 312 313 314 315 316
@common_options.output_plot_file_option(default_out='vuln_hist.pdf')
@common_options.n_bins_option()
@common_options.criterion_option()
@common_options.thresholds_option()
@common_options.print_filenames_option(dflt=False)
@bool_option(
    'iapmr-line', 'I', 'Whether to plot the IAPMR related lines or not.', True
)
@bool_option(
    'real-data', 'R',
    'If False, will annotate the plots hypothetically, instead '
    'of with real data values of the calculated error rates.', True
)
317
@common_options.titles_option()
318
@common_options.const_layout_option()
319 320 321 322
@common_options.figsize_option(dflt=None)
@common_options.subplot_option()
@common_options.legend_ncols_option()
@common_options.style_option()
323
@common_options.hide_dev_option()
324
@common_options.eval_option()
325 326
@verbosity_option()
@click.pass_context
327
def hist(ctx, scores, evaluation, **kwargs):
328 329
  '''Vulnerability analysis distributions.

330 331 332
  Plots the histogram of score distributions. You need to provide 2 or 4 score
  files for each biometric system in this order.
  When evaluation scores are provided, you must use the ``--eval`` option.
333 334 335

  \b
  * licit development scores
336
  * (optional) licit evaluation scores
337
  * spoof development scores
338
  * (optional) spoof evaluation scores
339 340 341 342 343 344 345

  See :ref:`bob.pad.base.vulnerability` in the documentation for a guide on
  vulnerability analysis.


  By default, when eval-scores are given, only eval-scores histograms are
  displayed with threshold line
346
  computed from dev-scores.
347 348 349

  Examples:

350
      $ bob vuln hist -e -v licit/scores-dev licit/scores-eval \
351 352
                          spoof/scores-dev spoof/scores-eval

353
      $ bob vuln hist -e -v {licit,spoof}/scores-{dev,eval}
354
  '''
355
  process = figure.HistVuln(ctx, scores, evaluation, load.split)
356 357 358
  process.run()


359
@click.command(context_settings=common_options.CONTEXT_SETTINGS)
360 361 362 363 364 365 366 367
@common_options.scores_argument(min_arg=2, force_eval=True, nargs=-1)
@common_options.output_plot_file_option(default_out='fmr_iapmr.pdf')
@common_options.legends_option()
@common_options.no_legend_option()
@common_options.legend_loc_option()
@common_options.title_option()
@common_options.const_layout_option()
@common_options.style_option()
368
@common_options.figsize_option(dflt=None)
369 370 371 372 373 374 375 376
@verbosity_option()
@common_options.axes_val_option()
@common_options.x_rotation_option()
@common_options.x_label_option()
@common_options.y_label_option()
@common_options.semilogx_option()
@click.pass_context
def fmr_iapmr(ctx, scores, **kwargs):
377
  """Plot FMR vs IAPMR
378

379 380
  You need to provide 4 scores
  files for each vuln system in this order:
381 382 383 384 385 386 387 388

  \b
  * licit development scores
  * licit evaluation scores
  * spoof development scores
  * spoof evaluation scores

  Examples:
389
      $ bob vuln fmr_iapmr -v dev-scores eval-scores
390

391 392 393 394
      $ bob vuln fmr_iapmr -v {licit,spoof}/scores-{dev,eval}
  """
  process = figure.FmrIapmr(ctx, scores, True, load.split)
  process.run()