Skip to content
Snippets Groups Projects

Separate semilogx and TPR options in ROC plots

Merged Amir MOHAMMADI requested to merge roc-y-label into master
1 file
+ 142
114
Compare changes
  • Side-by-side
  • Inline
+ 142
114
'''Plots and measures for bob.bio.base'''
"""Plots and measures for bob.bio.base"""
import math
import math
import click
import click
import matplotlib.pyplot as mpl
import matplotlib.pyplot as mpl
import bob.measure.script.figure as measure_figure
import bob.measure.script.figure as measure_figure
import bob.measure
import bob.measure
from bob.measure import (plot, utils)
from bob.measure import plot, utils
from tabulate import tabulate
from tabulate import tabulate
import logging
import logging
LOGGER = logging.getLogger("bob.bio.base")
LOGGER = logging.getLogger("bob.bio.base")
 
class Roc(measure_figure.Roc):
class Roc(measure_figure.Roc):
def __init__(self, ctx, scores, evaluation, func_load):
def __init__(self, ctx, scores, evaluation, func_load):
super(Roc, self).__init__(ctx, scores, evaluation, func_load)
super(Roc, self).__init__(ctx, scores, evaluation, func_load)
self._x_label = ctx.meta.get('x_label') or 'FMR'
self._x_label = ctx.meta.get("x_label") or "FMR"
default_y_label = '1 - FNMR' if self._semilogx \
default_y_label = "1 - FNMR" if self._tpr else "FNMR"
else 'False Non Match Rate'
self._y_label = ctx.meta.get("y_label") or default_y_label
self._y_label = ctx.meta.get('y_label') or default_y_label
class Det(measure_figure.Det):
class Det(measure_figure.Det):
def __init__(self, ctx, scores, evaluation, func_load):
def __init__(self, ctx, scores, evaluation, func_load):
super(Det, self).__init__(ctx, scores, evaluation, func_load)
super(Det, self).__init__(ctx, scores, evaluation, func_load)
self._x_label = ctx.meta.get('x_label') or 'FMR (%)'
self._x_label = ctx.meta.get("x_label") or "FMR (%)"
self._y_label = ctx.meta.get('y_label') or 'FNMR (%)'
self._y_label = ctx.meta.get("y_label") or "FNMR (%)"
class Cmc(measure_figure.PlotBase):
class Cmc(measure_figure.PlotBase):
''' Handles the plotting of Cmc '''
""" Handles the plotting of Cmc """
def __init__(self, ctx, scores, evaluation, func_load):
def __init__(self, ctx, scores, evaluation, func_load):
super(Cmc, self).__init__(ctx, scores, evaluation, func_load)
super(Cmc, self).__init__(ctx, scores, evaluation, func_load)
self._semilogx = ctx.meta.get('semilogx', True)
self._semilogx = ctx.meta.get("semilogx", True)
self._titles = self._titles or ['CMC dev.', 'CMC eval.']
self._titles = self._titles or ["CMC dev.", "CMC eval."]
self._x_label = self._x_label or 'Rank'
self._x_label = self._x_label or "Rank"
self._y_label = self._y_label or 'Identification rate'
self._y_label = self._y_label or "Identification rate"
self._max_R = 0
self._max_R = 0
def compute(self, idx, input_scores, input_names):
def compute(self, idx, input_scores, input_names):
''' Plot CMC for dev and eval data using
""" Plot CMC for dev and eval data using
:py:func:`bob.measure.plot.cmc`'''
:py:func:`bob.measure.plot.cmc`"""
mpl.figure(1)
mpl.figure(1)
if self._eval:
if self._eval:
linestyle = '-' if not self._split else self._linestyles[idx]
linestyle = "-" if not self._split else self._linestyles[idx]
LOGGER.info("CMC dev. curve using %s", input_names[0])
LOGGER.info("CMC dev. curve using %s", input_names[0])
rank = plot.cmc(
rank = plot.cmc(
input_scores[0], logx=self._semilogx,
input_scores[0],
color=self._colors[idx], linestyle=linestyle,
logx=self._semilogx,
label=self._label('dev.', idx)
color=self._colors[idx],
 
linestyle=linestyle,
 
label=self._label("dev.", idx),
)
)
self._max_R = max(rank, self._max_R)
self._max_R = max(rank, self._max_R)
linestyle = '--'
linestyle = "--"
if self._split:
if self._split:
mpl.figure(2)
mpl.figure(2)
linestyle = self._linestyles[idx]
linestyle = self._linestyles[idx]
LOGGER.info("CMC eval. curve using %s", input_names[1])
LOGGER.info("CMC eval. curve using %s", input_names[1])
rank = plot.cmc(
rank = plot.cmc(
input_scores[1], logx=self._semilogx,
input_scores[1],
color=self._colors[idx], linestyle=linestyle,
logx=self._semilogx,
label=self._label('eval.', idx)
color=self._colors[idx],
 
linestyle=linestyle,
 
label=self._label("eval.", idx),
)
)
self._max_R = max(rank, self._max_R)
self._max_R = max(rank, self._max_R)
else:
else:
LOGGER.info("CMC dev. curve using %s", input_names[0])
LOGGER.info("CMC dev. curve using %s", input_names[0])
rank = plot.cmc(
rank = plot.cmc(
input_scores[0], logx=self._semilogx,
input_scores[0],
color=self._colors[idx], linestyle=self._linestyles[idx],
logx=self._semilogx,
label=self._label('dev.', idx)
color=self._colors[idx],
 
linestyle=self._linestyles[idx],
 
label=self._label("dev.", idx),
)
)
self._max_R = max(rank, self._max_R)
self._max_R = max(rank, self._max_R)
class Dir(measure_figure.PlotBase):
class Dir(measure_figure.PlotBase):
''' Handles the plotting of DIR curve'''
""" Handles the plotting of DIR curve"""
def __init__(self, ctx, scores, evaluation, func_load):
def __init__(self, ctx, scores, evaluation, func_load):
super(Dir, self).__init__(ctx, scores, evaluation, func_load)
super(Dir, self).__init__(ctx, scores, evaluation, func_load)
self._semilogx = ctx.meta.get('semilogx', True)
self._semilogx = ctx.meta.get("semilogx", True)
self._rank = ctx.meta.get('rank', 1)
self._rank = ctx.meta.get("rank", 1)
self._titles = self._titles or ['DIR curve'] * 2
self._titles = self._titles or ["DIR curve"] * 2
self._x_label = self._x_label or 'False Alarm Rate'
self._x_label = self._x_label or "False Alarm Rate"
self._y_label = self._y_label or 'DIR'
self._y_label = self._y_label or "DIR"
def compute(self, idx, input_scores, input_names):
def compute(self, idx, input_scores, input_names):
''' Plot DIR for dev and eval data using
""" Plot DIR for dev and eval data using
:py:func:`bob.measure.plot.detection_identification_curve`'''
:py:func:`bob.measure.plot.detection_identification_curve`"""
mpl.figure(1)
mpl.figure(1)
if self._eval:
if self._eval:
linestyle = '-' if not self._split else self._linestyles[idx]
linestyle = "-" if not self._split else self._linestyles[idx]
LOGGER.info("DIR dev. curve using %s", input_names[0])
LOGGER.info("DIR dev. curve using %s", input_names[0])
plot.detection_identification_curve(
plot.detection_identification_curve(
input_scores[0], rank=self._rank, logx=self._semilogx,
input_scores[0],
color=self._colors[idx], linestyle=linestyle,
rank=self._rank,
label=self._label('dev', idx)
logx=self._semilogx,
 
color=self._colors[idx],
 
linestyle=linestyle,
 
label=self._label("dev", idx),
)
)
linestyle = '--'
linestyle = "--"
if self._split:
if self._split:
mpl.figure(2)
mpl.figure(2)
linestyle = self._linestyles[idx]
linestyle = self._linestyles[idx]
LOGGER.info("DIR eval. curve using %s", input_names[1])
LOGGER.info("DIR eval. curve using %s", input_names[1])
plot.detection_identification_curve(
plot.detection_identification_curve(
input_scores[1], rank=self._rank, logx=self._semilogx,
input_scores[1],
color=self._colors[idx], linestyle=linestyle,
rank=self._rank,
label=self._label('eval', idx)
logx=self._semilogx,
 
color=self._colors[idx],
 
linestyle=linestyle,
 
label=self._label("eval", idx),
)
)
else:
else:
LOGGER.info("DIR dev. curve using %s", input_names[0])
LOGGER.info("DIR dev. curve using %s", input_names[0])
plot.detection_identification_curve(
plot.detection_identification_curve(
input_scores[0], rank=self._rank, logx=self._semilogx,
input_scores[0],
color=self._colors[idx], linestyle=self._linestyles[idx],
rank=self._rank,
label=self._label('dev', idx)
logx=self._semilogx,
 
color=self._colors[idx],
 
linestyle=self._linestyles[idx],
 
label=self._label("dev", idx),
)
)
if self._min_dig is not None:
if self._min_dig is not None:
@@ -120,56 +135,66 @@ class Dir(measure_figure.PlotBase):
@@ -120,56 +135,66 @@ class Dir(measure_figure.PlotBase):
class Metrics(measure_figure.Metrics):
class Metrics(measure_figure.Metrics):
''' Compute metrics from score files'''
""" Compute metrics from score files"""
def __init__(self, ctx, scores, evaluation, func_load,
def __init__(
names=('Failure to Acquire', 'False Match Rate',
self,
'False Non Match Rate', 'False Accept Rate',
ctx,
'False Reject Rate', 'Half Total Error Rate')):
scores,
super(Metrics, self).__init__(
evaluation,
ctx, scores, evaluation, func_load, names
func_load,
)
names=(
 
"Failure to Acquire",
 
"False Match Rate",
 
"False Non Match Rate",
 
"False Accept Rate",
 
"False Reject Rate",
 
"Half Total Error Rate",
 
),
 
):
 
super(Metrics, self).__init__(ctx, scores, evaluation, func_load, names)
def init_process(self):
def init_process(self):
if self._criterion == 'rr':
if self._criterion == "rr":
self._thres = [None] * self.n_systems if self._thres is None else \
self._thres = (
self._thres
[None] * self.n_systems if self._thres is None else self._thres
 
)
def compute(self, idx, input_scores, input_names):
def compute(self, idx, input_scores, input_names):
''' Compute metrics for the given criteria'''
""" Compute metrics for the given criteria"""
title = self._legends[idx] if self._legends is not None else None
title = self._legends[idx] if self._legends is not None else None
headers = ['' or title, 'Dev. %s' % input_names[0]]
headers = ["" or title, "Dev. %s" % input_names[0]]
if self._eval and input_scores[1] is not None:
if self._eval and input_scores[1] is not None:
headers.append('eval % s' % input_names[1])
headers.append("eval % s" % input_names[1])
if self._criterion == 'rr':
if self._criterion == "rr":
rr = bob.measure.recognition_rate(
rr = bob.measure.recognition_rate(input_scores[0], self._thres[idx])
input_scores[0], self._thres[idx])
dev_rr = "%.1f%%" % (100 * rr)
dev_rr = "%.1f%%" % (100 * rr)
raws = [['RR', dev_rr]]
raws = [["RR", dev_rr]]
if self._eval and input_scores[1] is not None:
if self._eval and input_scores[1] is not None:
rr = bob.measure.recognition_rate(
rr = bob.measure.recognition_rate(input_scores[1], self._thres[idx])
input_scores[1], self._thres[idx])
eval_rr = "%.1f%%" % (100 * rr)
eval_rr = "%.1f%%" % (100 * rr)
raws[0].append(eval_rr)
raws[0].append(eval_rr)
click.echo(
click.echo(tabulate(raws, headers, self._tablefmt), file=self.log_file)
tabulate(raws, headers, self._tablefmt), file=self.log_file
elif self._criterion == "mindcf":
 
if "cost" in self._ctx.meta:
 
cost = self._ctx.meta.get("cost", 0.99)
 
threshold = (
 
bob.measure.min_weighted_error_rate_threshold(
 
input_scores[0][0], input_scores[0][1], cost
 
)
 
if self._thres is None
 
else self._thres[idx]
)
)
elif self._criterion == 'mindcf':
if 'cost' in self._ctx.meta:
cost = self._ctx.meta.get('cost', 0.99)
threshold = bob.measure.min_weighted_error_rate_threshold(
input_scores[0][0], input_scores[0][1], cost
) if self._thres is None else self._thres[idx]
if self._thres is None:
if self._thres is None:
click.echo(
click.echo(
"[minDCF - Cost:%f] Threshold on Development set `%s`: %e"
"[minDCF - Cost:%f] Threshold on Development set `%s`: %e"
% (cost, input_names[0], threshold),
% (cost, input_names[0], threshold),
file=self.log_file
file=self.log_file,
)
)
else:
else:
click.echo(
click.echo(
"[minDCF] User defined Threshold: %e" % threshold,
"[minDCF] User defined Threshold: %e" % threshold,
file=self.log_file
file=self.log_file,
)
)
# apply threshold to development set
# apply threshold to development set
far, frr = bob.measure.farfrr(
far, frr = bob.measure.farfrr(
@@ -177,11 +202,12 @@ class Metrics(measure_figure.Metrics):
@@ -177,11 +202,12 @@ class Metrics(measure_figure.Metrics):
)
)
dev_far_str = "%.1f%%" % (100 * far)
dev_far_str = "%.1f%%" % (100 * far)
dev_frr_str = "%.1f%%" % (100 * frr)
dev_frr_str = "%.1f%%" % (100 * frr)
dev_mindcf_str = "%.1f%%" % (
dev_mindcf_str = "%.1f%%" % ((cost * far + (1 - cost) * frr) * 100.0)
(cost * far + (1 - cost) * frr) * 100.)
raws = [
raws = [['FAR', dev_far_str],
["FAR", dev_far_str],
['FRR', dev_frr_str],
["FRR", dev_frr_str],
['minDCF', dev_mindcf_str]]
["minDCF", dev_mindcf_str],
 
]
if self._eval and input_scores[1] is not None:
if self._eval and input_scores[1] is not None:
# apply threshold to development set
# apply threshold to development set
far, frr = bob.measure.farfrr(
far, frr = bob.measure.farfrr(
@@ -189,27 +215,23 @@ class Metrics(measure_figure.Metrics):
@@ -189,27 +215,23 @@ class Metrics(measure_figure.Metrics):
)
)
eval_far_str = "%.1f%%" % (100 * far)
eval_far_str = "%.1f%%" % (100 * far)
eval_frr_str = "%.1f%%" % (100 * frr)
eval_frr_str = "%.1f%%" % (100 * frr)
eval_mindcf_str = "%.1f%%" % (
eval_mindcf_str = "%.1f%%" % ((cost * far + (1 - cost) * frr) * 100.0)
(cost * far + (1 - cost) * frr) * 100.)
raws[0].append(eval_far_str)
raws[0].append(eval_far_str)
raws[1].append(eval_frr_str)
raws[1].append(eval_frr_str)
raws[2].append(eval_mindcf_str)
raws[2].append(eval_mindcf_str)
click.echo(
click.echo(tabulate(raws, headers, self._tablefmt), file=self.log_file)
tabulate(raws, headers, self._tablefmt), file=self.log_file
elif self._criterion == "cllr":
)
cllr = bob.measure.calibration.cllr(input_scores[0][0], input_scores[0][1])
elif self._criterion == 'cllr':
cllr = bob.measure.calibration.cllr(input_scores[0][0],
input_scores[0][1])
min_cllr = bob.measure.calibration.min_cllr(
min_cllr = bob.measure.calibration.min_cllr(
input_scores[0][0], input_scores[0][1]
input_scores[0][0], input_scores[0][1]
)
)
dev_cllr_str = "%.1f%%" % cllr
dev_cllr_str = "%.1f%%" % cllr
dev_min_cllr_str = "%.1f%%" % min_cllr
dev_min_cllr_str = "%.1f%%" % min_cllr
raws = [['Cllr', dev_cllr_str],
raws = [["Cllr", dev_cllr_str], ["minCllr", dev_min_cllr_str]]
['minCllr', dev_min_cllr_str]]
if self._eval and input_scores[1] is not None:
if self._eval and input_scores[1] is not None:
cllr = bob.measure.calibration.cllr(input_scores[1][0],
cllr = bob.measure.calibration.cllr(
input_scores[1][1])
input_scores[1][0], input_scores[1][1]
 
)
min_cllr = bob.measure.calibration.min_cllr(
min_cllr = bob.measure.calibration.min_cllr(
input_scores[1][0], input_scores[1][1]
input_scores[1][0], input_scores[1][1]
)
)
@@ -217,24 +239,24 @@ class Metrics(measure_figure.Metrics):
@@ -217,24 +239,24 @@ class Metrics(measure_figure.Metrics):
eval_min_cllr_str = "%.1f%%" % min_cllr
eval_min_cllr_str = "%.1f%%" % min_cllr
raws[0].append(eval_cllr_str)
raws[0].append(eval_cllr_str)
raws[1].append(eval_min_cllr_str)
raws[1].append(eval_min_cllr_str)
click.echo(
click.echo(tabulate(raws, headers, self._tablefmt), file=self.log_file)
tabulate(raws, headers, self._tablefmt), file=self.log_file
)
else:
else:
title = self._legends[idx] if self._legends is not None else None
title = self._legends[idx] if self._legends is not None else None
all_metrics = self._get_all_metrics(idx, input_scores, input_names)
all_metrics = self._get_all_metrics(idx, input_scores, input_names)
headers = [' ' or title, 'Development']
headers = [" " or title, "Development"]
rows = [[self.names[0], all_metrics[0][0]],
rows = [
[self.names[1], all_metrics[0][1]],
[self.names[0], all_metrics[0][0]],
[self.names[2], all_metrics[0][2]],
[self.names[1], all_metrics[0][1]],
[self.names[3], all_metrics[0][3]],
[self.names[2], all_metrics[0][2]],
[self.names[4], all_metrics[0][4]],
[self.names[3], all_metrics[0][3]],
[self.names[5], all_metrics[0][5]]]
[self.names[4], all_metrics[0][4]],
 
[self.names[5], all_metrics[0][5]],
 
]
if self._eval:
if self._eval:
# computes statistics for the eval set based on the threshold a
# computes statistics for the eval set based on the threshold a
# priori
# priori
headers.append('Evaluation')
headers.append("Evaluation")
rows[0].append(all_metrics[1][0])
rows[0].append(all_metrics[1][0])
rows[1].append(all_metrics[1][1])
rows[1].append(all_metrics[1][1])
rows[2].append(all_metrics[1][2])
rows[2].append(all_metrics[1][2])
@@ -246,25 +268,31 @@ class Metrics(measure_figure.Metrics):
@@ -246,25 +268,31 @@ class Metrics(measure_figure.Metrics):
class MultiMetrics(measure_figure.MultiMetrics):
class MultiMetrics(measure_figure.MultiMetrics):
'''Compute metrics from score files'''
"""Compute metrics from score files"""
def __init__(self, ctx, scores, evaluation, func_load):
def __init__(self, ctx, scores, evaluation, func_load):
super(MultiMetrics, self).__init__(
super(MultiMetrics, self).__init__(
ctx, scores, evaluation, func_load,
ctx,
 
scores,
 
evaluation,
 
func_load,
names=(
names=(
'Failure to Acquire', 'False Match Rate',
"Failure to Acquire",
'False Non Match Rate', 'False Accept Rate',
"False Match Rate",
'False Reject Rate', 'Half Total Error Rate'))
"False Non Match Rate",
 
"False Accept Rate",
 
"False Reject Rate",
 
"Half Total Error Rate",
 
),
 
)
class Hist(measure_figure.Hist):
class Hist(measure_figure.Hist):
''' Histograms for biometric scores '''
""" Histograms for biometric scores """
def _setup_hist(self, neg, pos):
def _setup_hist(self, neg, pos):
self._title_base = 'Biometric scores'
self._title_base = "Biometric scores"
self._density_hist(
self._density_hist(pos[0], n=0, label="Genuines", alpha=0.9, color="C2")
pos[0], n=0, label='Genuines', alpha=0.9, color='C2'
)
self._density_hist(
self._density_hist(
neg[0], n=1, label='Zero-effort impostors', alpha=0.8, color='C0'
neg[0], n=1, label="Zero-effort impostors", alpha=0.8, color="C0"
)
)
Loading