pad_commands.py 10.9 KB
Newer Older
1 2 3 4 5 6 7
"""The main entry for bob.pad and its (click-based) scripts.
"""
import click
import pkg_resources
from click_plugins import with_plugins
from bob.measure.script import common_options
from bob.extension.scripts.click_helper import (verbosity_option,
8 9
                                                open_file_mode_option,
                                                AliasedGroup)
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
import bob.bio.base.script.gen as bio_gen
import bob.bio.base.script.figure as bio_figure
import bob.measure.script.figure as measure_figure
from bob.bio.base.score import load
from . import figure



@click.command()
@click.argument('outdir')
@click.option('-mm', '--mean-match', default=10, type=click.FLOAT, show_default=True)
@click.option('-mnm', '--mean-non-match', default=-10,
              type=click.FLOAT, show_default=True)
@click.option('-n', '--n-sys', default=1, type=click.INT, show_default=True)
@click.option('--five-col/--four-col', default=False, show_default=True)
@verbosity_option()
@click.pass_context
def gen(ctx, outdir, mean_match, mean_non_match, n_sys, five_col):
  """Generate random scores.
  Generates random scores in 4col or 5col format. The scores are generated
  using Gaussian distribution whose mean is an input
  parameter. The generated scores can be used as hypothetical datasets.
  Invokes :py:func:`bob.bio.base.script.commands.gen`.
  """
  ctx.forward(bio_gen.gen)



@click.command()
@common_options.scores_argument(nargs=-1)
40
@common_options.titles_option()
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
@common_options.legends_option()
@common_options.legend_loc_option(dflt='lower-right')
@common_options.no_legend_option()
@common_options.sep_dev_eval_option()
@common_options.output_plot_file_option(default_out='roc.pdf')
@common_options.eval_option()
@common_options.points_curve_option()
@common_options.semilogx_option(True)
@common_options.axes_val_option(dflt='1e-4,1,1e-4,1')
@common_options.x_rotation_option()
@common_options.lines_at_option()
@common_options.x_label_option()
@common_options.y_label_option()
@common_options.const_layout_option()
@common_options.style_option()
@common_options.linestyles_option()
57
@common_options.figsize_option(dflt=None)
58 59 60 61 62 63 64 65 66 67
@common_options.min_far_option()
@verbosity_option()
@click.pass_context
def roc(ctx, scores, evaluation, **kargs):
  """Plot ROC (receiver operating characteristic) curve:
  The plot will represent the false match rate on the horizontal axis and the
  false non match rate on the vertical axis.  The values for the axis will be
  computed using :py:func:`bob.measure.roc`.

  You need to provide one or more development score file(s) for each
68 69
  experiment. You can also provide eval files along with dev files. If
  evaluation scores are used, the flag `--eval` must be used. is required
70 71
  in that case. Files must be 4-col format, see
  :py:func:`bob.bio.base.score.load.four_column`
72 73 74
  Examples:
      $ bob pad roc -v dev-scores

75
      $ bob pad roc -e -v dev-scores1 eval-scores1 dev-scores2
76 77
      eval-scores2

78
      $ bob pad roc -e -v -o my_roc.pdf dev-scores1 eval-scores1
79
  """
80
  process = figure.Roc(ctx, scores, evaluation, load.split)
81 82 83 84
  process.run()


@click.command()
85
@common_options.scores_argument(nargs=-1)
86
@common_options.titles_option()
87 88 89
@common_options.output_plot_file_option(default_out='det.pdf')
@common_options.legends_option()
@common_options.legend_loc_option(dflt='upper-right')
90 91 92 93 94 95 96 97
@common_options.no_legend_option()
@common_options.x_label_option()
@common_options.y_label_option()
@common_options.sep_dev_eval_option()
@common_options.eval_option()
@common_options.axes_val_option(dflt='0.01,95,0.01,95')
@common_options.x_rotation_option(dflt=45)
@common_options.points_curve_option()
98 99
@common_options.const_layout_option()
@common_options.style_option()
100
@common_options.linestyles_option()
101
@common_options.figsize_option(dflt=None)
102 103
@common_options.lines_at_option()
@common_options.min_far_option()
104 105
@verbosity_option()
@click.pass_context
106 107 108 109
def det(ctx, scores, evaluation, **kargs):
  """Plot DET (detection error trade-off) curve:
  modified ROC curve which plots error rates on both axes
  (false positives on the x-axis and false negatives on the y-axis)
110

111
  You need to provide one or more development score file(s) for each
112 113
  experiment. You can also provide eval files along with dev files. If
  evale-scores are used, the flag `--eval` must be used. is required
114 115
  in that case. Files must be 4-col format, see
  :py:func:`bob.bio.base.score.load.four_column` for details.
116 117

  Examples:
118
    $ bob pad det -v dev-scores eval-scores
119

120
    $ bob pad det -e -v scores-{dev,eval}
121
  """
122
  process = figure.DetPad(ctx, scores, evaluation, load.split)
123 124 125 126 127
  process.run()


@click.command()
@common_options.scores_argument(nargs=-1)
128
@common_options.titles_option()
129 130 131 132
@common_options.output_plot_file_option(default_out='hist.pdf')
@common_options.eval_option()
@common_options.n_bins_option()
@common_options.criterion_option()
133
@common_options.no_line_option()
134
@common_options.far_option()
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
@common_options.thresholds_option()
@common_options.const_layout_option()
@common_options.print_filenames_option(dflt=False)
@common_options.legends_option()
@common_options.figsize_option(dflt=None)
@common_options.subplot_option()
@common_options.legend_ncols_option()
@common_options.style_option()
@verbosity_option()
@click.pass_context
def hist(ctx, scores, evaluation, **kwargs):
  """ Plots histograms of Bona fida and PA along with threshold
  criterion.

  You need to provide one or more development score file(s) for each
  experiment. You can also provide eval files along with dev files. If only
151
  evaluation are provided, you must use flag `--eval`.
152 153 154

  By default, when eval-scores are given, only eval-scores histograms are
  displayed with threshold line
155
  computed from dev-scores.
156 157

  Examples:
158
      $ bob pad hist -v dev-scores
159

160
      $ bob pad hist -e -v dev-scores1 eval-scores1 dev-scores2
161 162
      eval-scores2

163
      $ bob pad hist -e -v --criterion min-hter dev-scores1 eval-scores1
164 165 166 167 168 169 170 171
  """
  process = figure.HistPad(ctx, scores, evaluation, load.split)
  process.run()



@click.command()
@common_options.scores_argument(min_arg=1, force_eval=True, nargs=-1)
172
@common_options.titles_option()
173 174 175 176 177 178 179 180
@common_options.output_plot_file_option(default_out='epc.pdf')
@common_options.legends_option()
@common_options.legend_loc_option(dflt='upper-center')
@common_options.no_legend_option()
@common_options.points_curve_option()
@common_options.const_layout_option()
@common_options.style_option()
@common_options.linestyles_option()
181
@common_options.figsize_option(dflt=None)
182 183 184 185 186 187 188 189 190
@verbosity_option()
@click.pass_context
def epc(ctx, scores, **kargs):
  """Plot EPC (expected performance curve):
  plots the error rate on the eval set depending on a threshold selected
  a-priori on the development set and accounts for varying relative cost
  in [0; 1] of FPR and FNR when calculating the threshold.

  You need to provide one or more development score and eval file(s)
191 192
  for each experiment. Files must be 4-columns format, see
  :py:func:`bob.bio.base.score.load.four_column` for details.
193 194

  Examples:
195
      $ bob pad epc -v dev-scores eval-scores
196

197
      $ bob pad epc -v -o my_epc.pdf dev-scores1 eval-scores1
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
  """
  process = measure_figure.Epc(ctx, scores, True, load.split)
  process.run()



@click.command(context_settings=dict(token_normalize_func=lambda x: x.lower()))
@common_options.scores_argument(nargs=-1)
@common_options.eval_option()
@common_options.table_option()
@open_file_mode_option()
@common_options.output_log_metric_option()
@common_options.legends_option()
@verbosity_option()
@click.pass_context
def metrics(ctx, scores, evaluation, **kwargs):
  """PAD ISO compliant metrics.

  Reports several metrics based on a selected thresholds on the development
  set and apply them on evaluation sets (if provided). The used thresholds
  are:

      bpcer20     When APCER is set to 5%.

      eer         When BPCER == APCER.

      min-hter    When HTER is minimum.

  This command produces one table per sytem. Format of the table can be
  changed through option ``--tablefmt``.

  Most metrics are according to the ISO/IEC 30107-3:2017 "Information
  technology -- Biometric presentation attack detection -- Part 3: Testing
  and reporting" standard. The reported metrics are:

      APCER: Attack Presentation Classification Error Rate

      BPCER: Bona-fide Presentation Classification Error Rate

      HTER (non-ISO): Half Total Error Rate ((BPCER+APCER)/2)

  Examples:

      $ bob pad metrics /path/to/scores-dev

      $ bob pad metrics /path/to/scores-dev /path/to/scores-eval

      $ bob pad metrics /path/to/system{1,2,3}/score-{dev,eval}
  """
  process = figure.Metrics(ctx, scores, evaluation, load.split)
  process.run()


@click.command()
@common_options.scores_argument(nargs=-1)
@common_options.legends_option()
@common_options.sep_dev_eval_option()
@common_options.table_option()
@common_options.eval_option()
@common_options.output_log_metric_option()
@common_options.output_plot_file_option(default_out='eval_plots.pdf')
@common_options.points_curve_option()
@common_options.lines_at_option()
@common_options.const_layout_option()
262
@common_options.figsize_option(dflt=None)
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
@common_options.style_option()
@common_options.linestyles_option()
@verbosity_option()
@click.pass_context
def evaluate(ctx, scores, evaluation, **kwargs):
  '''Runs error analysis on score sets

  \b
  1. Computes the threshold using either EER or min. HTER criteria on
     development set scores
  2. Applies the above threshold on evaluation set scores to compute the
     HTER, if a eval-score set is provided
  3. Reports error rates on the console
  4. Plots ROC, EPC, DET curves and score distributions to a multi-page PDF
     file


  You need to provide 2 score files for each biometric system in this order:

  \b
  * development scores
  * evaluation scores

286 287
  When evaluation scores are provided, ``--eval`` must be passed.

288 289 290
  Examples:
      $ bob pad evaluate -v dev-scores

291
      $ bob pad evaluate -e -v scores-dev1 scores-eval1 scores-dev2
292 293
      scores-eval2

294
      $ bob pad evaluate -e -v /path/to/sys-{1,2,3}/scores-{dev,eval}
295

296
      $ bob pad evaluate -e -v -l metrics.txt -o my_plots.pdf dev-scores eval-scores
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
  '''
  # first time erase if existing file
  click.echo("Computing metrics...")
  ctx.invoke(metrics, scores=scores, evaluation=evaluation)
  if 'log' in ctx.meta and ctx.meta['log'] is not None:
      click.echo("[metrics] => %s" % ctx.meta['log'])

  # avoid closing pdf file before all figures are plotted
  ctx.meta['closef'] = False
  if evaluation:
      click.echo("Starting evaluate with dev and eval scores...")
  else:
      click.echo("Starting evaluate with dev scores only...")
  click.echo("Computing ROC...")
  # set axes limits for ROC
  ctx.forward(roc)  # use class defaults plot settings
  click.echo("Computing DET...")
  ctx.forward(det)  # use class defaults plot settings
  # the last one closes the file
  ctx.meta['closef'] = True
  click.echo("Computing score histograms...")
  ctx.meta['criterion'] = 'eer'  # no criterion passed in evaluate
  ctx.forward(hist)
  click.echo("Evaluate successfully completed!")
  click.echo("[plots] => %s" % (ctx.meta['output']))