pad_figure.py 12.4 KB
Newer Older
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
1
"""Runs error analysis on score sets, outputs metrics and plots"""
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
2 3

import bob.measure.script.figure as measure_figure
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
4 5
from bob.measure.utils import get_fta_list
from bob.measure import farfrr, precision_recall, f_score
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
6
import bob.bio.base.script.figure as bio_figure
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
7 8 9 10 11
from .error_utils import calc_threshold, apcer_bpcer
import click
from tabulate import tabulate
import numpy as np

Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
12

Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
13 14 15 16 17 18 19 20 21 22 23 24
def _normalize_input_scores(input_score, input_name):
    pos, negs = input_score
    # convert scores to sorted numpy arrays and keep a copy of all negatives
    pos = np.ascontiguousarray(sorted(pos))
    all_negs = np.ascontiguousarray(sorted(s for neg in negs.values() for s in neg))
    # FTA is calculated on pos and all_negs so we remove nans from negs
    for k, v in negs.items():
        v = np.ascontiguousarray(sorted(v))
        negs[k] = v[~np.isnan(v)]
    neg_list, pos_list, fta_list = get_fta_list([(all_negs, pos)])
    all_negs, pos, fta = neg_list[0], pos_list[0], fta_list[0]
    return input_name, pos, negs, all_negs, fta
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
25 26


27
class Metrics(bio_figure.Metrics):
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
    """Compute metrics from score files"""

    def __init__(self, ctx, scores, evaluation, func_load, names):
        if isinstance(names, str):
            names = names.split(",")
        super(Metrics, self).__init__(ctx, scores, evaluation, func_load, names)

    def get_thres(self, criterion, pos, negs, all_negs, far_value):
        return calc_threshold(
            criterion, pos, negs.values(), all_negs, far_value, is_sorted=True
        )

    def _numbers(self, threshold, pos, negs, all_negs, fta):
        pais = list(negs.keys())
        apcer_pais, apcer, bpcer = apcer_bpcer(threshold, pos, *[negs[k] for k in pais])
        apcer_pais = {k: apcer_pais[i] for i, k in enumerate(pais)}
        acer = (apcer + bpcer) / 2.0
        fpr, fnr = farfrr(all_negs, pos, threshold)
        hter = (fpr + fnr) / 2.0
        far = fpr * (1 - fta)
        frr = fta + fnr * (1 - fta)

        nn = all_negs.shape[0]  # number of attack
        fp = int(round(fpr * nn))  # number of false positives
        np = pos.shape[0]  # number of bonafide
        fn = int(round(fnr * np))  # number of false negatives

        # precision and recall
        precision, recall = precision_recall(all_negs, pos, threshold)

        # f_score
        f1_score = f_score(all_negs, pos, threshold, 1)
        metrics = dict(
            apcer_pais=apcer_pais,
            apcer=apcer,
            bpcer=bpcer,
            acer=acer,
            fta=fta,
            fpr=fpr,
            fnr=fnr,
            hter=hter,
            far=far,
            frr=frr,
            fp=fp,
            nn=nn,
            fn=fn,
            np=np,
            precision=precision,
            recall=recall,
            f1_score=f1_score,
        )
        return metrics
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
80

Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
81 82 83
    def _strings(self, metrics):
        n_dec = ".%df" % self._decimal
        for k, v in metrics.items():
84
            if k in ("precision", "recall", "f1_score"):
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
85 86 87 88
                metrics[k] = "%s" % format(v, n_dec)
            elif k in ("np", "nn", "fp", "fn"):
                continue
            elif k in ("fpr", "fnr"):
89 90 91 92 93 94 95 96
                if "fp" in metrics:
                    metrics[k] = "%s%% (%d/%d)" % (
                        format(100 * v, n_dec),
                        metrics["fp" if k == "fpr" else "fn"],
                        metrics["np" if k == "fpr" else "nn"],
                    )
                else:
                    metrics[k] = "%s%%" % format(100 * v, n_dec)
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
            elif k == "apcer_pais":
                metrics[k] = {
                    k1: "%s%%" % format(100 * v1, n_dec) for k1, v1 in v.items()
                }
            else:
                metrics[k] = "%s%%" % format(100 * v, n_dec)

        return metrics

    def _get_all_metrics(self, idx, input_scores, input_names):
        """ Compute all metrics for dev and eval scores"""
        for i, (score, name) in enumerate(zip(input_scores, input_names)):
            input_scores[i] = _normalize_input_scores(score, name)

        dev_file, dev_pos, dev_negs, dev_all_negs, dev_fta = input_scores[0]
        if self._eval:
            eval_file, eval_pos, eval_negs, eval_all_negs, eval_fta = input_scores[1]

        threshold = (
            self.get_thres(self._criterion, dev_pos, dev_negs, dev_all_negs, self._far)
            if self._thres is None
            else self._thres[idx]
        )

        title = self._legends[idx] if self._legends is not None else None
        if self._thres is None:
            far_str = ""
            if self._criterion == "far" and self._far is not None:
                far_str = str(self._far)
            click.echo(
                "[Min. criterion: %s %s] Threshold on Development set `%s`: %e"
                % (self._criterion.upper(), far_str, title or dev_file, threshold),
                file=self.log_file,
            )
        else:
            click.echo(
                "[Min. criterion: user provided] Threshold on "
                "Development set `%s`: %e" % (dev_file or title, threshold),
                file=self.log_file,
            )

        res = []
        res.append(
            self._strings(
                self._numbers(threshold, dev_pos, dev_negs, dev_all_negs, dev_fta)
            )
143
        )
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
144

Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
145 146 147 148 149 150 151 152 153
        if self._eval:
            # computes statistics for the eval set based on the threshold a priori
            res.append(
                self._strings(
                    self._numbers(
                        threshold, eval_pos, eval_negs, eval_all_negs, eval_fta
                    )
                )
            )
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
154
        else:
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
155
            res.append(None)
156

Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
157
        return res
158

Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
159 160 161 162 163 164 165 166
    def compute(self, idx, input_scores, input_names):
        """ Compute metrics for the given criteria"""
        title = self._legends[idx] if self._legends is not None else None
        all_metrics = self._get_all_metrics(idx, input_scores, input_names)
        headers = [" " or title, "Development"]
        if self._eval:
            headers.append("Evaluation")
        rows = []
167

Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
        for name in self.names:
            if name == "apcer_pais":
                for k, v in all_metrics[0][name].items():
                    print_name = f"APCER ({k})"
                    rows += [[print_name, v]]
                    if self._eval:
                        rows[-1].append(all_metrics[1][name][k])
                continue
            print_name = name.upper()
            rows += [[print_name, all_metrics[0][name]]]
            if self._eval:
                rows[-1].append(all_metrics[1][name])

        click.echo(tabulate(rows, headers, self._tablefmt), file=self.log_file)


class MultiMetrics(Metrics):
    """Compute metrics from score files"""

    def __init__(self, ctx, scores, evaluation, func_load, names):
188
        super(MultiMetrics, self).__init__(
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
189 190 191 192 193 194 195 196
            ctx, scores, evaluation, func_load, names=names
        )
        self.rows = []
        self.headers = None
        self.pais = None

    def _compute_headers(self, pais):
        names = list(self.names)
197 198
        if "apcer_pais" in names:
            idx = names.index("apcer_pais")
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
199 200 201 202 203 204
            names = (
                [n.upper() for n in names[:idx]]
                + self.pais
                + [n.upper() for n in names[idx + 1 :]]
            )
        self.headers = ["Methods"] + names
205
        if self._eval and "hter" in self.names:
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
            self.headers.insert(1, "HTER (dev)")

    def _strings(self, metrics):
        formatted_metrics = dict()
        for name in self.names:
            if name == "apcer_pais":
                for pai in self.pais:
                    mean = metrics[pai].mean()
                    std = metrics[pai].std()
                    mean = super()._strings({pai: mean})[pai]
                    std = super()._strings({pai: std})[pai]
                    formatted_metrics[pai] = f"{mean} ({std})"
            else:
                mean = metrics[name].mean()
                std = metrics[name].std()
                mean = super()._strings({name: mean})[name]
                std = super()._strings({name: std})[name]
                formatted_metrics[name] = f"{mean} ({std})"

        return formatted_metrics

    def _structured_array(self, metrics):
        names = list(metrics[0].keys())
229 230
        if "apcer_pais" in names:
            idx = names.index("apcer_pais")
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
            pais = list(f"APCER ({pai})" for pai in metrics[0]["apcer_pais"].keys())
            names = names[:idx] + pais + names[idx + 1 :]
            self.pais = self.pais or pais
        formats = [float] * len(names)
        dtype = dict(names=names, formats=formats)
        array = []
        for each in metrics:
            array.append([])
            for k, v in each.items():
                if k == "apcer_pais":
                    array[-1].extend(list(v.values()))
                else:
                    array[-1].append(v)
        array = [tuple(a) for a in array]
        return np.array(array, dtype=dtype)

    def compute(self, idx, input_scores, input_names):
        """Computes the average of metrics over several protocols."""
        for i, (score, name) in enumerate(zip(input_scores, input_names)):
            input_scores[i] = _normalize_input_scores(score, name)

        step = 2 if self._eval else 1
        self._dev_metrics = []
        self._thresholds = []
        for scores in input_scores[::step]:
            name, pos, negs, all_negs, fta = scores
            threshold = (
                self.get_thres(self._criterion, pos, negs, all_negs, self._far)
                if self._thres is None
                else self._thres[idx]
            )
            self._thresholds.append(threshold)
            self._dev_metrics.append(self._numbers(threshold, pos, negs, all_negs, fta))
        self._dev_metrics = self._structured_array(self._dev_metrics)
265

Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
266 267 268 269 270 271 272 273 274 275 276 277 278 279
        if self._eval:
            self._eval_metrics = []
            for i, scores in enumerate(input_scores[1::step]):
                name, pos, negs, all_negs, fta = scores
                threshold = self._thresholds[i]
                self._eval_metrics.append(
                    self._numbers(threshold, pos, negs, all_negs, fta)
                )
            self._eval_metrics = self._structured_array(self._eval_metrics)

        title = self._legends[idx] if self._legends is not None else name

        dev_metrics = self._strings(self._dev_metrics)

280
        if self._eval and "hter" in dev_metrics:
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
281
            self.rows.append([title, dev_metrics["hter"]])
282
        elif not self._eval:
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
283 284 285 286 287 288 289 290
            row = [title]
            for name in self.names:
                if name == "apcer_pais":
                    for pai in self.pais:
                        row += [dev_metrics[pai]]
                else:
                    row += [dev_metrics[name]]
            self.rows.append(row)
291 292
        else:
            self.rows.append([title])
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314

        if self._eval:
            eval_metrics = self._strings(self._eval_metrics)
            row = []
            for name in self.names:
                if name == "apcer_pais":
                    for pai in self.pais:
                        row += [eval_metrics[pai]]
                else:
                    row += [eval_metrics[name]]

            self.rows[-1].extend(row)

        # compute header based on found PAI names
        if self.headers is None:
            self._compute_headers(self.pais)

    def end_process(self):
        click.echo(
            tabulate(self.rows, self.headers, self._tablefmt), file=self.log_file
        )
        super(MultiMetrics, self).end_process()
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
315 316 317


class Roc(bio_figure.Roc):
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
318
    """ROC for PAD"""
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
319 320 321

    def __init__(self, ctx, scores, evaluation, func_load):
        super(Roc, self).__init__(ctx, scores, evaluation, func_load)
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
322 323 324
        self._x_label = ctx.meta.get("x_label") or "APCER"
        default_y_label = "1-BPCER" if self._semilogx else "BPCER"
        self._y_label = ctx.meta.get("y_label") or default_y_label
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
325 326 327 328 329


class Det(bio_figure.Det):
    def __init__(self, ctx, scores, evaluation, func_load):
        super(Det, self).__init__(ctx, scores, evaluation, func_load)
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
330 331
        self._x_label = ctx.meta.get("x_label") or "APCER (%)"
        self._y_label = ctx.meta.get("y_label") or "BPCER (%)"
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
332 333 334


class Hist(measure_figure.Hist):
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
335
    """ Histograms for PAD """
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
336 337

    def _setup_hist(self, neg, pos):
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
338 339
        self._title_base = "PAD"
        self._density_hist(pos[0], n=0, label="Bona-fide", color="C1")
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
340
        self._density_hist(
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
341 342 343 344 345 346
            neg[0],
            n=1,
            label="Presentation attack",
            alpha=0.4,
            color="C7",
            hatch="\\\\",
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
347
        )