commands.py 9.44 KB
Newer Older
1 2 3 4 5 6 7
''' Click commands for ``bob.measure`` '''


import click
from .. import load
from . import figure
from . import common_options
8 9
from bob.extension.scripts.click_helper import (verbosity_option,
                                                open_file_mode_option)
10

11 12
@click.command()
@common_options.scores_argument(nargs=-1)
13
@common_options.eval_option()
14
@common_options.table_option()
15 16
@common_options.output_plot_metric_option()
@common_options.criterion_option()
17 18
@common_options.thresholds_option()
@common_options.far_option()
19
@common_options.titles_option()
20
@open_file_mode_option()
21 22
@verbosity_option()
@click.pass_context
23
def metrics(ctx, scores, evaluation, **kwargs):
24 25
    """Prints a table that contains FtA, FAR, FRR, FMR, FMNR, HTER for a given
    threshold criterion (eer or hter).
26

27
    You need to provide one or more development score file(s) for each experiment.
28 29
    You can also provide evaluation files along with dev files. If only dev scores
    are provided, you must use flag `--no-evaluation`.
30

31
    Resulting table format can be changed using the `--tablefmt`.
32 33 34 35

    Examples:
        $ bob measure metrics dev-scores

36
        $ bob measure metrics -l results.txt dev-scores1 eval-scores1
37

38
        $ bob measure metrics {dev,eval}-scores1 {dev,eval}-scores2
39
    """
40
    process = figure.Metrics(ctx, scores, evaluation, load.split_files)
41 42 43 44
    process.run()

@click.command()
@common_options.scores_argument(nargs=-1)
45
@common_options.title_option()
46
@common_options.titles_option()
47
@common_options.sep_dev_eval_option()
48
@common_options.output_plot_file_option(default_out='roc.pdf')
49
@common_options.eval_option()
50 51
@common_options.points_curve_option()
@common_options.semilogx_option(True)
52
@common_options.axes_val_option(dflt=[1e-4, 1, 1e-4, 1])
53
@common_options.x_rotation_option()
54 55 56
@common_options.x_label_option()
@common_options.y_label_option()
@common_options.lines_at_option()
57
@common_options.const_layout_option()
58 59
@common_options.figsize_option()
@common_options.style_option()
60 61
@verbosity_option()
@click.pass_context
62
def roc(ctx, scores, evaluation, **kwargs):
63 64 65 66 67
    """Plot ROC (receiver operating characteristic) curve:
    The plot will represent the false match rate on the horizontal axis and the
    false non match rate on the vertical axis.  The values for the axis will be
    computed using :py:func:`bob.measure.roc`.

68
    You need to provide one or more development score file(s) for each experiment.
69 70
    You can also provide evaluation files along with dev files. If only dev scores
    are provided, you must use flag `--no-evaluation`.
71 72 73 74

    Examples:
        $ bob measure roc dev-scores

75 76
        $ bob measure roc dev-scores1 eval-scores1 dev-scores2
        eval-scores2
77

78
        $ bob measure roc -o my_roc.pdf dev-scores1 eval-scores1
79
    """
80
    process = figure.Roc(ctx, scores, evaluation, load.split_files)
81 82 83 84 85
    process.run()

@click.command()
@common_options.scores_argument(nargs=-1)
@common_options.output_plot_file_option(default_out='det.pdf')
86
@common_options.title_option()
87
@common_options.titles_option()
88 89
@common_options.sep_dev_eval_option()
@common_options.eval_option()
90
@common_options.axes_val_option(dflt=[0.01, 95, 0.01, 95])
91
@common_options.x_rotation_option(dflt=45)
92 93
@common_options.x_label_option()
@common_options.y_label_option()
94
@common_options.points_curve_option()
95
@common_options.const_layout_option()
96 97
@common_options.figsize_option()
@common_options.style_option()
98 99
@verbosity_option()
@click.pass_context
100
def det(ctx, scores, evaluation, **kwargs):
101 102 103 104
    """Plot DET (detection error trade-off) curve:
    modified ROC curve which plots error rates on both axes
    (false positives on the x-axis and false negatives on the y-axis)

105
    You need to provide one or more development score file(s) for each experiment.
106 107
    You can also provide evaluation files along with dev files. If only dev scores
    are provided, you must use flag `--no-evaluation`.
108 109 110 111

    Examples:
        $ bob measure det dev-scores

112 113
        $ bob measure det dev-scores1 eval-scores1 dev-scores2
        eval-scores2
114

115
        $ bob measure det -o my_det.pdf dev-scores1 eval-scores1
116
    """
117
    process = figure.Det(ctx, scores, evaluation, load.split_files)
118 119 120
    process.run()

@click.command()
121
@common_options.scores_argument(eval_mandatory=True, nargs=-1)
122
@common_options.output_plot_file_option(default_out='epc.pdf')
123
@common_options.title_option()
124 125
@common_options.titles_option()
@common_options.points_curve_option()
126
@common_options.const_layout_option()
127 128
@common_options.x_label_option()
@common_options.y_label_option()
129 130
@common_options.figsize_option()
@common_options.style_option()
131 132
@verbosity_option()
@click.pass_context
133
def epc(ctx, scores, **kwargs):
134
    """Plot EPC (expected performance curve):
135
    plots the error rate on the eval set depending on a threshold selected
136 137
    a-priori on the development set and accounts for varying relative cost
    in [0; 1] of FPR and FNR when calculating the threshold.
138

139
    You need to provide one or more development score and eval file(s)
140 141 142
    for each experiment.

    Examples:
143
        $ bob measure epc dev-scores eval-scores
144

145
        $ bob measure epc -o my_epc.pdf dev-scores1 eval-scores1
146 147 148 149 150 151 152
    """
    process = figure.Epc(ctx, scores, True, load.split_files)
    process.run()

@click.command()
@common_options.scores_argument(nargs=-1)
@common_options.output_plot_file_option(default_out='hist.pdf')
153
@common_options.eval_option()
154 155
@common_options.n_bins_option()
@common_options.criterion_option()
156
@common_options.thresholds_option()
157 158 159
@common_options.const_layout_option()
@common_options.show_dev_option()
@common_options.print_filenames_option()
160
@common_options.title_option()
161
@common_options.titles_option()
162 163
@common_options.figsize_option()
@common_options.style_option()
164 165
@verbosity_option()
@click.pass_context
166
def hist(ctx, scores, evaluation, **kwargs):
167 168 169
    """ Plots histograms of positive and negatives along with threshold
    criterion.

170
    You need to provide one or more development score file(s) for each experiment.
171 172 173
    You can also provide evaluation files along with dev files. If only dev scores
    are provided, you must use flag `--no-evaluation`.

Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
174
    By default, when eval-scores are given, only eval-scores histograms are
175
    displayed with threshold line
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
176
    computed from dev-scores. If you want to display dev-scores distributions
177
    as well, use ``--show-dev`` option.
178 179 180 181

    Examples:
        $ bob measure hist dev-scores

182 183
        $ bob measure hist dev-scores1 eval-scores1 dev-scores2
        eval-scores2
184

185
        $ bob measure hist --criter hter --show-dev dev-scores1 eval-scores1
186
    """
187
    process = figure.Hist(ctx, scores, evaluation, load.split_files)
188 189 190 191 192
    process.run()

@click.command()
@common_options.scores_argument(nargs=-1)
@common_options.titles_option()
193
@common_options.sep_dev_eval_option()
194
@common_options.table_option()
195
@common_options.eval_option()
196 197 198 199 200
@common_options.output_plot_metric_option()
@common_options.output_plot_file_option(default_out='eval_plots.pdf')
@common_options.points_curve_option()
@common_options.semilogx_option(dflt=True)
@common_options.n_bins_option()
201
@common_options.lines_at_option()
202
@common_options.const_layout_option()
203 204
@common_options.figsize_option()
@common_options.style_option()
205 206
@verbosity_option()
@click.pass_context
207
def evaluate(ctx, scores, evaluation, **kwargs):
208
    '''Runs error analysis on score sets
209 210

    \b
211
    1. Computes the threshold using either EER or min. HTER criteria on
212
       development set scores
213 214
    2. Applies the above threshold on evaluation set scores to compute the HTER, if a
       eval-score set is provided
215 216
    3. Reports error rates on the console
    4. Plots ROC, EPC, DET curves and score distributions to a multi-page PDF
217
       file
218 219 220


    You need to provide 2 score files for each biometric system in this order:
221

222 223 224 225 226 227 228
    \b
    * development scores
    * evaluation scores

    Examples:
        $ bob measure evaluate dev-scores

229 230 231 232 233
        $ bob measure evaluate scores-dev1 scores-eval1 scores-dev2
        scores-eval2

        $ bob measure evaluate /path/to/sys-{1,2,3}/scores-{dev,eval}

234
        $ bob measure evaluate -l metrics.txt -o my_plots.pdf dev-scores eval-scores
235
    '''
236
    # first time erase if existing file
237
    ctx.meta['open_mode'] = 'w'
238
    click.echo("Computing metrics with EER...")
239
    ctx.meta['criter'] = 'eer'  # no criterion passed to evaluate
240
    ctx.invoke(metrics, scores=scores, evaluation=evaluation)
241
    # second time, appends the content
242
    ctx.meta['open_mode'] = 'a'
243
    click.echo("Computing metrics with HTER...")
244
    ctx.meta['criter'] = 'hter'  # no criterion passed in evaluate
245
    ctx.invoke(metrics, scores=scores, evaluation=evaluation)
246 247 248
    if 'log' in ctx.meta:
        click.echo("[metrics] => %s" % ctx.meta['log'])

249
    # avoid closing pdf file before all figures are plotted
250
    ctx.meta['closef'] = False
251 252
    if evaluation:
        click.echo("Starting evaluate with dev and eval scores...")
253 254 255
    else:
        click.echo("Starting evaluate with dev scores only...")
    click.echo("Computing ROC...")
256 257
    # set axes limits for ROC
    ctx.forward(roc) # use class defaults plot settings
258
    click.echo("Computing DET...")
259
    ctx.forward(det) # use class defaults plot settings
260
    if evaluation:
261
        click.echo("Computing EPC...")
262 263
        ctx.forward(epc) # use class defaults plot settings
    # the last one closes the file
264 265
    ctx.meta['closef'] = True
    click.echo("Computing score histograms...")
266
    ctx.meta['criter'] = 'eer'  # no criterion passed in evaluate
267 268 269
    ctx.forward(hist)
    click.echo("Evaluate successfully completed!")
    click.echo("[plots] => %s" % (ctx.meta['output']))