diff --git a/bob/bio/base/script/commands.py b/bob/bio/base/script/commands.py
index f712823e41fe0de9fa3c92ef4212397a5815fa86..78bb24500f7ee3ee205699c960ea55f869715d34 100644
--- a/bob/bio/base/script/commands.py
+++ b/bob/bio/base/script/commands.py
@@ -71,14 +71,14 @@ def metrics(ctx, scores, evaluation, **kargs):
 @common_options.scores_argument(nargs=-1)
 @common_options.title_option()
 @common_options.legends_option()
-@common_options.legend_loc_option(dflt='lower-right')
+@common_options.legend_loc_option(dflt=None)
 @common_options.no_legend_option()
 @common_options.sep_dev_eval_option()
 @common_options.output_plot_file_option(default_out='roc.pdf')
 @common_options.eval_option()
 @common_options.points_curve_option()
 @common_options.semilogx_option(True)
-@common_options.axes_val_option(dflt='1e-4,1,1e-4,1')
+@common_options.axes_val_option()
 @common_options.x_rotation_option()
 @common_options.lines_at_option()
 @common_options.x_label_option()
diff --git a/bob/bio/base/script/figure.py b/bob/bio/base/script/figure.py
index ec95d88e34d24f33c18625f292c5eb2ba1b3dbe1..c41da2bdbd2754fe74bc40160988dc1bfc04d29f 100644
--- a/bob/bio/base/script/figure.py
+++ b/bob/bio/base/script/figure.py
@@ -8,28 +8,29 @@ import bob.measure
 from bob.measure import plot
 from tabulate import tabulate
 
+
 class Roc(measure_figure.Roc):
     def __init__(self, ctx, scores, evaluation, func_load):
         super(Roc, self).__init__(ctx, scores, evaluation, func_load)
-        self._x_label = 'False Match Rate' if 'x_label' not in ctx.meta  or \
-        ctx.meta['x_label'] is None else ctx.meta['x_label']
-        self._y_label = '1 - False Non Match Rate' if 'y_label' not in \
-        ctx.meta or ctx.meta['y_label'] is None else ctx.meta['y_label']
+        self._x_label = ctx.meta.get('x_label') or 'False Match Rate'
+        default_y_label = '1 - False Non Match Rate' if self._semilogx \
+            else 'False Non Match Rate'
+        self._y_label = ctx.meta.get('y_label') or default_y_label
+
 
 class Det(measure_figure.Det):
     def __init__(self, ctx, scores, evaluation, func_load):
         super(Det, self).__init__(ctx, scores, evaluation, func_load)
-        self._x_label = 'False Match Rate' if 'x_label' not in ctx.meta or \
-        ctx.meta['x_label'] is None else ctx.meta['x_label']
-        self._y_label = 'False Non Match Rate' if 'y_label' not in ctx.meta or\
-        ctx.meta['y_label'] is None else ctx.meta['y_label']
+        self._x_label = ctx.meta.get('x_label') or 'False Match Rate (%)'
+        self._y_label = ctx.meta.get('y_label') or 'False Non Match Rate (%)'
+
 
 class Cmc(measure_figure.PlotBase):
     ''' Handles the plotting of Cmc '''
+
     def __init__(self, ctx, scores, evaluation, func_load):
         super(Cmc, self).__init__(ctx, scores, evaluation, func_load)
-        self._semilogx = True if 'semilogx' not in ctx.meta else\
-        ctx.meta['semilogx']
+        self._semilogx = ctx.meta.get('semilogx', True)
         self._title = self._title or 'CMC'
         self._x_label = self._x_label or 'Rank'
         self._y_label = self._y_label or 'Identification rate'
@@ -66,19 +67,18 @@ class Cmc(measure_figure.PlotBase):
             )
             self._max_R = max(rank, self._max_R)
 
+
 class Dir(measure_figure.PlotBase):
     ''' Handles the plotting of DIR curve'''
+
     def __init__(self, ctx, scores, evaluation, func_load):
         super(Dir, self).__init__(ctx, scores, evaluation, func_load)
-        self._semilogx = True if 'semilogx' not in ctx.meta else\
-                ctx.meta['semilogx']
-        self._rank = 1 if 'rank' not in ctx.meta else ctx.meta['rank']
+        self._semilogx = ctx.meta.get('semilogx', True)
+        self._rank = ctx.meta.get('rank', 1)
         self._title = self._title or 'DIR curve'
         self._x_label = self._title or 'FAR'
         self._y_label = self._title or 'DIR'
 
-
-
     def compute(self, idx, input_scores, input_names):
         ''' Plot DIR for dev and eval data using
         :py:func:`bob.measure.plot.detection_identification_curve`'''
@@ -110,12 +110,14 @@ class Dir(measure_figure.PlotBase):
         if self._min_dig is not None:
             mpl.xlim(xmin=math.pow(10, self._min_dig))
 
+
 class Metrics(measure_figure.Metrics):
     ''' Compute metrics from score files'''
+
     def init_process(self):
         if self._criterion == 'rr':
             self._thres = [None] * self.n_systems if self._thres is None else \
-                    self._thres
+                self._thres
 
     def compute(self, idx, input_scores, input_names):
         ''' Compute metrics for the given criteria'''
@@ -124,11 +126,13 @@ class Metrics(measure_figure.Metrics):
         if self._eval and input_scores[1] is not None:
             headers.append('eval % s' % input_names[1])
         if self._criterion == 'rr':
-            rr = bob.measure.recognition_rate(input_scores[0], self._thres[idx])
+            rr = bob.measure.recognition_rate(
+                input_scores[0], self._thres[idx])
             dev_rr = "%.1f%%" % (100 * rr)
             raws = [['RR', dev_rr]]
             if self._eval and input_scores[1] is not None:
-                rr = bob.measure.recognition_rate(input_scores[1], self._thres[idx])
+                rr = bob.measure.recognition_rate(
+                    input_scores[1], self._thres[idx])
                 eval_rr = "%.1f%%" % (100 * rr)
                 raws[0].append(eval_rr)
             click.echo(
@@ -136,20 +140,19 @@ class Metrics(measure_figure.Metrics):
             )
         elif self._criterion == 'mindcf':
             if 'cost' in self._ctx.meta:
-                cost = 0.99 if 'cost' not in self._ctx.meta else\
-                        self._ctx.meta['cost']
+                cost = self._ctx.meta.get('cost', 0.99)
             threshold = bob.measure.min_weighted_error_rate_threshold(
                 input_scores[0][0], input_scores[0][1], cost
             ) if self._thres is None else self._thres[idx]
             if self._thres is None:
                 click.echo(
-                    "[minDCF - Cost:%f] Threshold on Development set `%s`: %e"\
+                    "[minDCF - Cost:%f] Threshold on Development set `%s`: %e"
                     % (cost, input_names[0], threshold),
                     file=self.log_file
                 )
             else:
                 click.echo(
-                    "[minDCF] User defined Threshold: %e" %  threshold,
+                    "[minDCF] User defined Threshold: %e" % threshold,
                     file=self.log_file
                 )
             # apply threshold to development set
@@ -158,7 +161,8 @@ class Metrics(measure_figure.Metrics):
             )
             dev_far_str = "%.1f%%" % (100 * far)
             dev_frr_str = "%.1f%%" % (100 * frr)
-            dev_mindcf_str = "%.1f%%" % ((cost * far + (1 - cost) * frr) * 100.)
+            dev_mindcf_str = "%.1f%%" % (
+                (cost * far + (1 - cost) * frr) * 100.)
             raws = [['FAR', dev_far_str],
                     ['FRR', dev_frr_str],
                     ['minDCF', dev_mindcf_str]]
@@ -169,7 +173,8 @@ class Metrics(measure_figure.Metrics):
                 )
                 eval_far_str = "%.1f%%" % (100 * far)
                 eval_frr_str = "%.1f%%" % (100 * frr)
-                eval_mindcf_str = "%.1f%%" % ((cost * far + (1 - cost) * frr) * 100.)
+                eval_mindcf_str = "%.1f%%" % (
+                    (cost * far + (1 - cost) * frr) * 100.)
                 raws[0].append(eval_far_str)
                 raws[1].append(eval_frr_str)
                 raws[2].append(eval_mindcf_str)
@@ -202,6 +207,7 @@ class Metrics(measure_figure.Metrics):
         else:
             super(Metrics, self).compute(idx, input_scores, input_names)
 
+
 class Hist(measure_figure.Hist):
     ''' Histograms for biometric scores '''