From 6ea7c229c717e110085ca6514688a98ac4b84323 Mon Sep 17 00:00:00 2001
From: Theophile GENTILHOMME <tgentilhomme@jurasix08.idiap.ch>
Date: Fri, 20 Apr 2018 14:31:16 +0200
Subject: [PATCH] Made changes according to the modifications of the
 bob.measure base class (more generic implementation)

---
 bob/bio/base/script/commands.py | 20 ++++-----
 bob/bio/base/script/figure.py   | 76 ++++++++++++++++-----------------
 2 files changed, 45 insertions(+), 51 deletions(-)

diff --git a/bob/bio/base/script/commands.py b/bob/bio/base/script/commands.py
index 1c8e7554..c6e01585 100644
--- a/bob/bio/base/script/commands.py
+++ b/bob/bio/base/script/commands.py
@@ -8,8 +8,6 @@ from bob.measure.script import common_options
 from bob.extension.scripts.click_helper import (verbosity_option,
                                                 open_file_mode_option)
 
-FUNC_SPLIT = lambda x: load.load_files(x, load.split)
-FUNC_CMC = lambda x: load.load_files(x, load.cmc)
 
 def rank_option(**kwargs):
     '''Get option for rank parameter'''
@@ -62,9 +60,9 @@ def metrics(ctx, scores, evaluation, **kargs):
         $ bob bio metrics {dev,eval}-scores1 {dev,eval}-scores2
     """
     if 'criter' in ctx.meta and ctx.meta['criter'] == 'rr':
-        process = bio_figure.Metrics(ctx, scores, evaluation, FUNC_CMC)
+        process = bio_figure.Metrics(ctx, scores, evaluation, load.cmc)
     else:
-        process = bio_figure.Metrics(ctx, scores, evaluation, FUNC_SPLIT)
+        process = bio_figure.Metrics(ctx, scores, evaluation, load.split)
     process.run()
 
 @click.command()
@@ -106,7 +104,7 @@ def roc(ctx, scores, evaluation, **kargs):
 
         $ bob bio roc -o my_roc.pdf dev-scores1 eval-scores1
     """
-    process = bio_figure.Roc(ctx, scores, evaluation, FUNC_SPLIT)
+    process = bio_figure.Roc(ctx, scores, evaluation, load.split)
     process.run()
 
 @click.command()
@@ -146,11 +144,11 @@ def det(ctx, scores, evaluation, **kargs):
 
         $ bob bio det -o my_det.pdf dev-scores1 eval-scores1
     """
-    process = bio_figure.Det(ctx, scores, evaluation, FUNC_SPLIT)
+    process = bio_figure.Det(ctx, scores, evaluation, load.split)
     process.run()
 
 @click.command()
-@common_options.scores_argument(eval_mandatory=True, nargs=-1)
+@common_options.scores_argument(min_arg=2, nargs=-1)
 @common_options.output_plot_file_option(default_out='epc.pdf')
 @common_options.titles_option()
 @common_options.points_curve_option()
@@ -175,7 +173,7 @@ def epc(ctx, scores, **kargs):
 
         $ bob bio epc -o my_epc.pdf dev-scores1 eval-scores1
     """
-    process = measure_figure.Epc(ctx, scores, True, FUNC_SPLIT)
+    process = measure_figure.Epc(ctx, scores, True, load.split)
     process.run()
 
 @click.command()
@@ -215,7 +213,7 @@ def cmc(ctx, scores, evaluation, **kargs):
 
         $ bob bio cmc -o my_roc.pdf dev-scores1 eval-scores1
     """
-    process = bio_figure.Cmc(ctx, scores, evaluation, FUNC_CMC)
+    process = bio_figure.Cmc(ctx, scores, evaluation, load.cmc)
     process.run()
 
 @click.command()
@@ -264,7 +262,7 @@ def dic(ctx, scores, evaluation, **kargs):
 
         $ bob bio dic -o my_roc.pdf dev-scores1 eval-scores1
     """
-    process = bio_figure.Dic(ctx, scores, evaluation, FUNC_CMC)
+    process = bio_figure.Dic(ctx, scores, evaluation, load.cmc)
     process.run()
 
 @click.command()
@@ -306,7 +304,7 @@ def hist(ctx, scores, evaluation, **kwargs):
 
         $ bob bio hist --criter --show-dev hter dev-scores1 eval-scores1
     """
-    process = bio_figure.Hist(ctx, scores, evaluation, FUNC_SPLIT)
+    process = bio_figure.Hist(ctx, scores, evaluation, load.split)
     process.run()
 
 @click.command()
diff --git a/bob/bio/base/script/figure.py b/bob/bio/base/script/figure.py
index d8611d4f..3dcd984f 100644
--- a/bob/bio/base/script/figure.py
+++ b/bob/bio/base/script/figure.py
@@ -34,17 +34,16 @@ class Cmc(measure_figure.PlotBase):
         self._y_label = self._y_label or 'Identification rate'
         self._max_R = 0
 
-    def compute(self, idx, dev_score, dev_file=None,
-                eval_score=None, eval_file=None):
+    def compute(self, idx, input_scores, input_names):
         ''' Plot CMC for dev and eval data using
         :py:func:`bob.measure.plot.cmc`'''
         mpl.figure(1)
         if self._eval:
             linestyle = '-' if not self._split else measure_figure.LINESTYLES[idx % 14]
             rank = plot.cmc(
-                dev_score, logx=self._semilogx,
+                input_scores[0], logx=self._semilogx,
                 color=self._colors[idx], linestyle=linestyle,
-                label=self._label('development', dev_file, idx)
+                label=self._label('development', input_names[0], idx)
             )
             self._max_R = max(rank, self._max_R)
             linestyle = '--'
@@ -53,16 +52,16 @@ class Cmc(measure_figure.PlotBase):
                 linestyle = measure_figure.LINESTYLES[idx % 14]
 
             rank = plot.cmc(
-                eval_score, logx=self._semilogx,
+                input_scores[1], logx=self._semilogx,
                 color=self._colors[idx], linestyle=linestyle,
-                label=self._label('eval', eval_file, idx)
+                label=self._label('eval', input_names[1], idx)
             )
             self._max_R = max(rank, self._max_R)
         else:
             rank = plot.cmc(
-                dev_score, logx=self._semilogx,
+                input_scores[0], logx=self._semilogx,
                 color=self._colors[idx], linestyle=measure_figure.LINESTYLES[idx % 14],
-                label=self._label('development', dev_file, idx)
+                label=self._label('development', input_names[0], idx)
             )
             self._max_R = max(rank, self._max_R)
 
@@ -77,17 +76,16 @@ class Dic(measure_figure.PlotBase):
         self._x_label = self._title or 'FAR'
         self._y_label = self._title or 'DIR'
 
-    def compute(self, idx, dev_score, dev_file=None,
-                eval_score=None, eval_file=None):
+    def compute(self, idx, input_scores, input_names):
         ''' Plot DIC for dev and eval data using
         :py:func:`bob.measure.plot.detection_identification_curve`'''
         mpl.figure(1)
         if self._eval:
             linestyle = '-' if not self._split else measure_figure.LINESTYLES[idx % 14]
             plot.detection_identification_curve(
-                dev_score, rank=self._rank, logx=self._semilogx,
+                input_scores[0], rank=self._rank, logx=self._semilogx,
                 color=self._colors[idx], linestyle=linestyle,
-                label=self._label('development', dev_file, idx)
+                label=self._label('development', input_names[0], idx)
             )
             linestyle = '--'
             if self._split:
@@ -95,37 +93,36 @@ class Dic(measure_figure.PlotBase):
                 linestyle = measure_figure.LINESTYLES[idx % 14]
 
             plot.detection_identification_curve(
-                eval_score, rank=self._rank, logx=self._semilogx,
+                input_scores[1], rank=self._rank, logx=self._semilogx,
                 color=self._colors[idx], linestyle=linestyle,
-                label=self._label('eval', eval_file, idx)
+                label=self._label('eval', input_names[1], idx)
             )
         else:
-            rank = plot.detection_identification_curve(
-                dev_score, rank=self._rank, logx=self._semilogx,
+            plot.detection_identification_curve(
+                input_scores[0], rank=self._rank, logx=self._semilogx,
                 color=self._colors[idx], linestyle=measure_figure.LINESTYLES[idx % 14],
-                label=self._label('development', dev_file, idx)
+                label=self._label('development', input_names[0], idx)
             )
 
 class Metrics(measure_figure.Metrics):
     ''' Compute metrics from score files'''
     def init_process(self):
         if self._criter == 'rr':
-            self._thres = [None] * self.n_sytem if self._thres is None else \
+            self._thres = [None] * self.n_systems if self._thres is None else \
                     self._thres
 
-    def compute(self, idx, dev_score, dev_file=None,
-                eval_score=None, eval_file=None):
+    def compute(self, idx, input_scores, input_names):
         ''' Compute metrics for the given criteria'''
         title = self._titles[idx] if self._titles is not None else None
-        headers = ['' or title, 'Development %s' % dev_file]
-        if self._eval and eval_score is not None:
-            headers.append('eval % s' % eval_file)
+        headers = ['' or title, 'Development %s' % input_names[0]]
+        if self._eval and input_scores[1] is not None:
+            headers.append('eval % s' % input_names[1])
         if self._criter == 'rr':
-            rr = bob.measure.recognition_rate(dev_score, self._thres[idx])
+            rr = bob.measure.recognition_rate(input_scores[0], self._thres[idx])
             dev_rr = "%.1f%%" % (100 * rr)
             raws = [['RR', dev_rr]]
-            if self._eval and eval_score is not None:
-                rr = bob.measure.recognition_rate(eval_score, self._thres[idx])
+            if self._eval and input_scores[1] is not None:
+                rr = bob.measure.recognition_rate(input_scores[1], self._thres[idx])
                 eval_rr = "%.1f%%" % (100 * rr)
                 raws[0].append(eval_rr)
             click.echo(
@@ -136,12 +133,12 @@ class Metrics(measure_figure.Metrics):
                 cost = 0.99 if 'cost' not in self._ctx.meta else\
                         self._ctx.meta['cost']
             threshold = bob.measure.min_weighted_error_rate_threshold(
-                dev_score[0], dev_score[1], cost
+                input_scores[0][0], input_scores[0][1], cost
             ) if self._thres is None else self._thres[idx]
             if self._thres is None:
                 click.echo(
                     "[minDCF - Cost:%f] Threshold on Development set `%s`: %e"\
-                    % (cost, dev_file, threshold),
+                    % (cost, input_names[0], threshold),
                     file=self.log_file
                 )
             else:
@@ -151,7 +148,7 @@ class Metrics(measure_figure.Metrics):
                 )
             # apply threshold to development set
             far, frr = bob.measure.farfrr(
-                dev_score[0], dev_score[1], threshold
+                input_scores[0][0], input_scores[0][1], threshold
             )
             dev_far_str = "%.1f%%" % (100 * far)
             dev_frr_str = "%.1f%%" % (100 * frr)
@@ -159,10 +156,10 @@ class Metrics(measure_figure.Metrics):
             raws = [['FAR', dev_far_str],
                     ['FRR', dev_frr_str],
                     ['minDCF', dev_mindcf_str]]
-            if self._eval and eval_score is not None:
+            if self._eval and input_scores[1] is not None:
                 # apply threshold to development set
                 far, frr = bob.measure.farfrr(
-                    eval_score[0], eval_score[1], threshold
+                    input_scores[1][0], input_scores[1][1], threshold
                 )
                 eval_far_str = "%.1f%%" % (100 * far)
                 eval_frr_str = "%.1f%%" % (100 * frr)
@@ -174,19 +171,20 @@ class Metrics(measure_figure.Metrics):
                 tabulate(raws, headers, self._tablefmt), file=self.log_file
             )
         elif self._criter == 'cllr':
-            cllr = bob.measure.calibration.cllr(dev_score[0], dev_score[1])
+            cllr = bob.measure.calibration.cllr(input_scores[0][0],
+                                                input_scores[0][1])
             min_cllr = bob.measure.calibration.min_cllr(
-                dev_score[0], dev_score[1]
+                input_scores[0][0], input_scores[0][1]
             )
             dev_cllr_str = "%.1f%%" % cllr
             dev_min_cllr_str = "%.1f%%" % min_cllr
             raws = [['Cllr', dev_cllr_str],
                     ['minCllr', dev_min_cllr_str]]
-            if self._eval and eval_score is not None:
-                cllr = bob.measure.calibration.cllr(eval_score[0],
-                                                    eval_score[1])
+            if self._eval and input_scores[1] is not None:
+                cllr = bob.measure.calibration.cllr(input_scores[1][0],
+                                                    input_scores[1][1])
                 min_cllr = bob.measure.calibration.min_cllr(
-                    eval_score[0], eval_score[1]
+                    input_scores[1][0], input_scores[1][1]
                 )
                 eval_cllr_str = "%.1f%%" % cllr
                 eval_min_cllr_str = "%.1f%%" % min_cllr
@@ -196,9 +194,7 @@ class Metrics(measure_figure.Metrics):
                     tabulate(raws, headers, self._tablefmt), file=self.log_file
                 )
         else:
-            super(Metrics, self).compute(
-                idx, dev_score, dev_file, eval_score, eval_file
-            )
+            super(Metrics, self).compute(idx, input_scores, input_names)
 
 class Hist(measure_figure.Hist):
     ''' Histograms for biometric scores '''
-- 
GitLab