diff --git a/bob/bio/base/script/commands.py b/bob/bio/base/script/commands.py
index f97833e309d5e88048eec804ed77bee7b9176185..469c6d9ee81249dd703e54913e784ece723aaeef 100644
--- a/bob/bio/base/script/commands.py
+++ b/bob/bio/base/script/commands.py
@@ -14,21 +14,23 @@ FUNC_CMC = lambda x: load.load_files(x, load.cmc)
 @click.command()
 @common_options.scores_argument(nargs=-1)
 @common_options.table_option()
-@common_options.test_option()
+@common_options.eval_option()
 @common_options.open_file_mode_option()
 @common_options.output_plot_metric_option()
 @common_options.criterion_option(['eer', 'hter', 'far', 'mindcf', 'cllr', 'rr'])
 @common_options.cost_option()
 @common_options.thresholds_option()
 @common_options.far_option()
+@common_options.titles_option()
 @verbosity_option()
 @click.pass_context
-def metrics(ctx, scores, test, **kargs):
+def metrics(ctx, scores, evaluation, **kargs):
     """Prints a single output line that contains all info for a given
     criterion (eer or hter).
 
     You need provide one or more development score file(s) for each experiment.
-    You can also provide test files along with dev files but the flag `--test`
+    You can also provide eval files along with dev files. If only dev-scores
+    are used, the flag `--no-evaluation` must be used.
     is required in that case. Files must be 4- or 5- columns format, see
     :py:func:`bob.bio.base.score.load.four_column` and
     :py:func:`bob.bio.base.score.load.five_column` for details.
@@ -40,167 +42,145 @@ def metrics(ctx, scores, test, **kargs):
     Examples:
         $ bob bio metrics dev-scores
 
-        $ bob bio metrics --test -l results.txt dev-scores1 test-scores1
+        $ bob bio metrics --no-evaluation dev-scores1 dev-scores2
+
+        $ bob bio metrics -l results.txt dev-scores1 eval-scores1
 
-        $ bob bio metrics --test {dev,test}-scores1 {dev,test}-scores2
+        $ bob bio metrics {dev,eval}-scores1 {dev,eval}-scores2
     """
     if 'criter' in ctx.meta and ctx.meta['criter'] == 'rr':
-        process = bio_figure.Metrics(ctx, scores, test, FUNC_CMC)
+        process = bio_figure.Metrics(ctx, scores, evaluation, FUNC_CMC)
     else:
-        process = bio_figure.Metrics(ctx, scores, test, FUNC_SPLIT)
+        process = bio_figure.Metrics(ctx, scores, evaluation, FUNC_SPLIT)
     process.run()
 
 @click.command()
 @common_options.scores_argument(nargs=-1)
 @common_options.titles_option()
-@common_options.sep_dev_test_option()
+@common_options.sep_dev_eval_option()
 @common_options.output_plot_file_option(default_out='roc.pdf')
-@common_options.test_option()
+@common_options.eval_option()
 @common_options.points_curve_option()
 @common_options.semilogx_option(True)
 @common_options.axes_val_option(dflt=[1e-4, 1, 1e-4, 1])
 @common_options.axis_fontsize_option()
 @common_options.x_rotation_option()
 @common_options.fmr_line_at_option()
+@common_options.const_layout_option()
 @verbosity_option()
 @click.pass_context
-def roc(ctx, scores, test, **kargs):
+def roc(ctx, scores, evaluation, **kargs):
     """Plot ROC (receiver operating characteristic) curve:
     The plot will represent the false match rate on the horizontal axis and the
     false non match rate on the vertical axis.  The values for the axis will be
     computed using :py:func:`bob.measure.roc`.
 
     You need provide one or more development score file(s) for each experiment.
-    You can also provide test files along with dev files but the flag `--test`
-    is required in that case.Files must be 4- or 5- columns format, see
+    You can also provide eval files along with dev files. If only dev-scores
+    are used, the flag `--no-evaluation` must be used.
+    is required in that case. Files must be 4- or 5- columns format, see
     :py:func:`bob.bio.base.score.load.four_column` and
     :py:func:`bob.bio.base.score.load.five_column` for details.
 
-
     Examples:
         $ bob bio roc dev-scores
 
-        $ bob bio roc --test dev-scores1 test-scores1 dev-scores2
-        test-scores2
+        $ bob bio roc dev-scores1 eval-scores1 dev-scores2
+        eval-scores2
 
-        $ bob bio roc --test -o my_roc.pdf dev-scores1 test-scores1
+        $ bob bio roc -o my_roc.pdf dev-scores1 eval-scores1
     """
-    process = measure_figure.Roc(ctx, scores, test, FUNC_SPLIT)
+    process = measure_figure.Roc(ctx, scores, evaluation, FUNC_SPLIT)
     process.run()
 
 @click.command()
 @common_options.scores_argument(nargs=-1)
 @common_options.output_plot_file_option(default_out='det.pdf')
 @common_options.titles_option()
-@common_options.sep_dev_test_option()
-@common_options.test_option()
+@common_options.sep_dev_eval_option()
+@common_options.eval_option()
 @common_options.axis_fontsize_option(dflt=6)
 @common_options.axes_val_option(dflt=[0.01, 95, 0.01, 95])
 @common_options.x_rotation_option(dflt=45)
 @common_options.points_curve_option()
+@common_options.const_layout_option()
 @verbosity_option()
 @click.pass_context
-def det(ctx, scores, test, **kargs):
+def det(ctx, scores, evaluation, **kargs):
     """Plot DET (detection error trade-off) curve:
     modified ROC curve which plots error rates on both axes
     (false positives on the x-axis and false negatives on the y-axis)
 
     You need provide one or more development score file(s) for each experiment.
-    You can also provide test files along with dev files but the flag `--test`
+    You can also provide eval files along with dev files. If only dev-scores
+    are used, the flag `--no-evaluation` must be used.
     is required in that case. Files must be 4- or 5- columns format, see
     :py:func:`bob.bio.base.score.load.four_column` and
     :py:func:`bob.bio.base.score.load.five_column` for details.
 
-
     Examples:
         $ bob bio det dev-scores
 
-        $ bob bio det --test dev-scores1 test-scores1 dev-scores2
-        test-scores2
+        $ bob bio det dev-scores1 eval-scores1 dev-scores2
+        eval-scores2
 
-        $ bob bio det --test -o my_det.pdf dev-scores1 test-scores1
+        $ bob bio det -o my_det.pdf dev-scores1 eval-scores1
     """
-    process = measure_figure.Det(ctx, scores, test, FUNC_SPLIT)
+    process = measure_figure.Det(ctx, scores, evaluation, FUNC_SPLIT)
     process.run()
 
 @click.command()
-@common_options.scores_argument(test_mandatory=True, nargs=-1)
+@common_options.scores_argument(eval_mandatory=True, nargs=-1)
 @common_options.output_plot_file_option(default_out='epc.pdf')
 @common_options.titles_option()
 @common_options.points_curve_option()
 @common_options.axis_fontsize_option()
+@common_options.const_layout_option()
 @verbosity_option()
 @click.pass_context
 def epc(ctx, scores, **kargs):
     """Plot EPC (expected performance curve):
-    plots the error rate on the test set depending on a threshold selected
+    plots the error rate on the eval set depending on a threshold selected
     a-priori on the development set and accounts for varying relative cost
     in [0; 1] of FPR and FNR when calculating the threshold.
 
-    You need provide one or more development score and test file(s)
+    You need provide one or more development score and eval file(s)
     for each experiment. Files must be 4- or 5- columns format, see
     :py:func:`bob.bio.base.score.load.four_column` and
     :py:func:`bob.bio.base.score.load.five_column` for details.
 
     Examples:
-        $ bob bio epc dev-scores test-scores
+        $ bob bio epc dev-scores eval-scores
 
-        $ bob bio epc -o my_epc.pdf dev-scores1 test-scores1
+        $ bob bio epc -o my_epc.pdf dev-scores1 eval-scores1
     """
     process = measure_figure.Epc(ctx, scores, True, FUNC_SPLIT)
     process.run()
 
-@click.command()
-@common_options.scores_argument(nargs=-1)
-@common_options.output_plot_file_option(default_out='hist.pdf')
-@common_options.test_option()
-@common_options.n_bins_option()
-@common_options.criterion_option()
-@common_options.axis_fontsize_option()
-@common_options.thresholds_option()
-@verbosity_option()
-@click.pass_context
-def hist(ctx, scores, test, **kwargs):
-    """ Plots histograms of positive and negatives along with threshold
-    criterion.
-
-    You need provide one or more development score file(s) for each experiment.
-    You can also provide test files along with dev files but the flag `--test`
-    is required in that case.
-
-    Examples:
-        $ bob bio hist dev-scores
-
-        $ bob bio hist --test dev-scores1 test-scores1 dev-scores2
-        test-scores2
-
-        $ bob bio hist --test --criter hter dev-scores1 test-scores1
-    """
-    process = measure_figure.Hist(ctx, scores, test, FUNC_SPLIT)
-    process.run()
-
 @click.command()
 @common_options.scores_argument(nargs=-1)
 @common_options.titles_option()
-@common_options.sep_dev_test_option()
+@common_options.sep_dev_eval_option()
 @common_options.output_plot_file_option(default_out='cmc.pdf')
-@common_options.test_option()
+@common_options.eval_option()
 @common_options.semilogx_option(True)
 @common_options.axes_val_option(dflt=None)
 @common_options.axis_fontsize_option()
 @common_options.x_rotation_option()
+@common_options.const_layout_option()
 @verbosity_option()
 @click.pass_context
-def cmc(ctx, scores, test, **kargs):
+def cmc(ctx, scores, evaluation, **kargs):
     """Plot CMC (cumulative match characteristic curve):
-    graphical presentation of results of an identification task test,
+    graphical presentation of results of an identification task eval,
     plotting rank values on the x-axis and the probability of correct identification
     at or below that rank on the y-axis. The values for the axis will be
     computed using :py:func:`bob.measure.cmc`.
 
     You need provide one or more development score file(s) for each experiment.
-    You can also provide test files along with dev files but the flag `--test`
-    is required in that case.Files must be 4- or 5- columns format, see
+    You can also provide eval files along with dev files. If only dev-scores
+    are used, the flag `--no-evaluation` must be used.
+    is required in that case. Files must be 4- or 5- columns format, see
     :py:func:`bob.bio.base.score.load.four_column` and
     :py:func:`bob.bio.base.score.load.five_column` for details.
 
@@ -208,28 +188,29 @@ def cmc(ctx, scores, test, **kargs):
     Examples:
         $ bob bio cmc dev-scores
 
-        $ bob bio cmc --test dev-scores1 test-scores1 dev-scores2
-        test-scores2
+        $ bob bio cmc dev-scores1 eval-scores1 dev-scores2
+        eval-scores2
 
-        $ bob bio cmc --test -o my_roc.pdf dev-scores1 test-scores1
+        $ bob bio cmc -o my_roc.pdf dev-scores1 eval-scores1
     """
-    process = bio_figure.Cmc(ctx, scores, test, FUNC_CMC)
+    process = bio_figure.Cmc(ctx, scores, evaluation, FUNC_CMC)
     process.run()
 
 @click.command()
 @common_options.scores_argument(nargs=-1)
 @common_options.titles_option()
-@common_options.sep_dev_test_option()
+@common_options.sep_dev_eval_option()
 @common_options.output_plot_file_option(default_out='cmc.pdf')
-@common_options.test_option()
+@common_options.eval_option()
 @common_options.semilogx_option(True)
 @common_options.axes_val_option(dflt=None)
 @common_options.axis_fontsize_option()
 @common_options.x_rotation_option()
 @common_options.rank_option()
+@common_options.const_layout_option()
 @verbosity_option()
 @click.pass_context
-def dic(ctx, scores, test, **kargs):
+def dic(ctx, scores, evaluation, **kargs):
     """Plots the Detection & Identification curve over the FAR
 
     This curve is designed to be used in an open set identification protocol, and
@@ -246,110 +227,99 @@ def dic(ctx, scores, test, **kargs):
     .. [LiJain2005] **Stan Li and Anil K. Jain**, *Handbook of Face Recognition*, Springer, 2005
 
     You need provide one or more development score file(s) for each experiment.
-    You can also provide test files along with dev files but the flag `--test`
-    is required in that case.Files must be 4- or 5- columns format, see
+    You can also provide eval files along with dev files. If only dev-scores
+    are used, the flag `--no-evaluation` must be used.
+    is required in that case. Files must be 4- or 5- columns format, see
     :py:func:`bob.bio.base.score.load.four_column` and
     :py:func:`bob.bio.base.score.load.five_column` for details.
 
-
     Examples:
         $ bob bio dic dev-scores
 
-        $ bob bio dic --test dev-scores1 test-scores1 dev-scores2
-        test-scores2
+        $ bob bio dic dev-scores1 eval-scores1 dev-scores2
+        eval-scores2
 
-        $ bob bio dic --test -o my_roc.pdf dev-scores1 test-scores1
+        $ bob bio dic -o my_roc.pdf dev-scores1 eval-scores1
     """
-    process = bio_figure.Dic(ctx, scores, test, FUNC_CMC)
+    process = bio_figure.Dic(ctx, scores, evaluation, FUNC_CMC)
     process.run()
 
 @click.command()
 @common_options.scores_argument(nargs=-1)
 @common_options.output_plot_file_option(default_out='hist.pdf')
-@common_options.test_option()
+@common_options.eval_option()
 @common_options.n_bins_option()
 @common_options.criterion_option()
 @common_options.axis_fontsize_option()
 @common_options.thresholds_option()
+@common_options.const_layout_option()
+@common_options.show_dev_option()
+@common_options.print_filenames_option()
+@common_options.titles_option()
 @verbosity_option()
 @click.pass_context
-def hist(ctx, scores, test, **kwargs):
+def hist(ctx, scores, evaluation, **kwargs):
     """ Plots histograms of positive and negatives along with threshold
     criterion.
 
     You need provide one or more development score file(s) for each experiment.
-    You can also provide test files along with dev files but the flag `--test`
-    is required in that case.
+    You can also provide eval files along with dev files. If only dev-scores
+    are used, the flag `--no-evaluation` must be used.
+    is required in that case. Files must be 4- or 5- columns format, see
+    :py:func:`bob.bio.base.score.load.four_column` and
+    :py:func:`bob.bio.base.score.load.five_column` for details.
+
+    By default, when eval-scores are given, only eval-scores histograms are
+    displayed with threshold line
+    computed from dev-scores. If you want to display dev-scores distributions
+    as well, use ``--show-dev`` option.
 
     Examples:
         $ bob bio hist dev-scores
 
-        $ bob bio hist --test dev-scores1 test-scores1 dev-scores2
-        test-scores2
+        $ bob bio hist dev-scores1 eval-scores1 dev-scores2
+        eval-scores2
 
-        $ bob bio hist --test --criter hter dev-scores1 test-scores1
+        $ bob bio hist --criter --show-dev hter dev-scores1 eval-scores1
     """
-    process = measure_figure.Hist(ctx, scores, test, FUNC_SPLIT)
+    process = bio_figure.Hist(ctx, scores, evaluation, FUNC_SPLIT)
     process.run()
 
 @click.command()
 @common_options.scores_argument(nargs=-1)
 @common_options.titles_option()
-@common_options.sep_dev_test_option()
+@common_options.sep_dev_eval_option()
 @common_options.table_option()
-@common_options.test_option()
+@common_options.eval_option()
 @common_options.output_plot_metric_option()
 @common_options.output_plot_file_option(default_out='eval_plots.pdf')
 @common_options.points_curve_option()
 @common_options.fmr_line_at_option()
 @common_options.cost_option()
 @common_options.rank_option()
-@common_options.cmc_option()
-@common_options.bool_option(
-    'metrics', 'M', 'If set, computes table of threshold with EER, HTER (and '
-    'FAR, if ``--far-value`` provided.)'
-)
 @common_options.far_option()
-@common_options.bool_option(
-    'cllr', 'x', 'If given, Cllr and minCllr will be computed.'
-)
-@common_options.bool_option(
-    'mindcf', 'm', 'If given, minDCF will be computed.'
-)
-@common_options.bool_option(
-    'rr', 'r', 'If given, the Recognition Rate will be computed.'
-)
-@common_options.bool_option(
-    'hist', 'H', 'If given, score histograms will be generated.'
-)
-@common_options.bool_option(
-    'roc', 'R', 'If given, ROC will be generated.'
-)
-@common_options.bool_option(
-    'det', 'D', 'If given, DET will be generated.'
-)
-@common_options.bool_option(
-    'epc', 'E', 'If given, EPC will be generated.'
-)
-@common_options.bool_option(
-    'dic', 'O', 'If given, DIC will be generated.'
-)
+@common_options.const_layout_option()
 @verbosity_option()
 @click.pass_context
-def evaluate(ctx, scores, test, **kwargs):
+def evaluate(ctx, scores, evaluation, **kwargs):
     '''Evalutes score file, runs error analysis on score sets and plot curves.
 
     \b
     1. Computes the threshold using either EER, min. HTER or FAR value
        criteria on development set scores
-    2. Applies the above threshold on test set scores to compute the HTER, if a
-       test-score set is provided
-    3. Computes Cllr and minCllr, minDCF, and recognition rate (if cmc scores
-       provided)
+    2. Applies the above threshold on eval set scores to compute the HTER, if a
+       eval-score set is provided
+    3. Computes Cllr and minCllr and minDCF
     3. Reports error metrics in the console or in a log file
-    4. Plots ROC, EPC, DET, score distributions, CMC (if cmc) and DIC (if cmc)
+    4. Plots ROC, EPC, DET, score distributions
        curves to a multi-page PDF file
 
+    You need provide one or more development score file(s) for each experiment.
+    You can also provide eval files along with dev files. If only dev-scores
+    are used, the flag `--no-evaluation` must be used.
+    is required in that case. Files must be 4- or 5- columns format, see
+    :py:func:`bob.bio.base.score.load.four_column` and
+    :py:func:`bob.bio.base.score.load.five_column` for details.
 
     You need to provide 2 score files for each biometric system in this order:
 
@@ -360,80 +330,60 @@ def evaluate(ctx, scores, test, **kwargs):
     Examples:
         $ bob bio evaluate dev-scores
 
-        $ bob bio evaluate -t -l metrics.txt -o my_plots.pdf dev-scores test-scores
+        $ bob bio evaluate -l metrics.txt -o my_plots.pdf dev-scores eval-scores
+
+        $ bob bio evaluate -o my_plots.pdf /path/to/syst-{1,2,3}/{dev,eval}-scores
     '''
-    log_str=''
+    log_str = ''
     if 'log' in ctx.meta and ctx.meta['log'] is not None:
         log_str = ' %s' % ctx.meta['log']
 
-    if ctx.meta['metrics']:
-        # first time erase if existing file
-        ctx.meta['open_mode'] = 'w'
-        click.echo("Computing metrics with EER%s..." % log_str)
-        ctx.meta['criter'] = 'eer'  # no criterion passed to evaluate
-        ctx.invoke(metrics, scores=scores, test=test)
-        # other times, appends the content
-        ctx.meta['open_mode'] = 'a'
-        click.echo("Computing metrics with HTER%s..." % log_str)
-        ctx.meta['criter'] = 'hter'  # no criterion passed in evaluate
-        ctx.invoke(metrics, scores=scores, test=test)
-        if 'far_value' in ctx.meta and ctx.meta['far_value'] is not None:
-            click.echo("Computing metrics with FAR=%f%s..." %\
-                       (ctx.meta['far_value'], log_str))
-            ctx.meta['criter'] = 'far'  # no criterio % n passed in evaluate
-            ctx.invoke(metrics, scores=scores, test=test)
-
-    if ctx.meta['mindcf']:
-        click.echo("Computing minDCF%s..." % log_str)
-        ctx.meta['criter'] = 'mindcf'  # no criterion passed in evaluate
-        ctx.invoke(metrics, scores=scores, test=test)
-
-    if ctx.meta['cllr']:
-        click.echo("Computing  Cllr and minCllr%s..." % log_str)
-        ctx.meta['criter'] = 'cllr'  # no criterion passed in evaluate
-        ctx.invoke(metrics, scores=scores, test=test)
-
-    if ctx.meta['rr']:
-        click.echo("Computing  recognition rate%s..." % log_str)
-        ctx.meta['criter'] = 'rr'  # no criterion passed in evaluate
-        ctx.invoke(metrics, scores=scores, test=test)
+    # first time erase if existing file
+    ctx.meta['open_mode'] = 'w'
+    click.echo("Computing metrics with EER%s..." % log_str)
+    ctx.meta['criter'] = 'eer'  # no criterion passed to evaluate
+    ctx.invoke(metrics, scores=scores, evaluation=evaluation)
+    # other times, appends the content
+    ctx.meta['open_mode'] = 'a'
+    click.echo("Computing metrics with HTER%s..." % log_str)
+    ctx.meta['criter'] = 'hter'  # no criterion passed in evaluate
+    ctx.invoke(metrics, scores=scores, evaluation=evaluation)
+    if 'far_value' in ctx.meta and ctx.meta['far_value'] is not None:
+        click.echo("Computing metrics with FAR=%f%s..." %\
+        (ctx.meta['far_value'], log_str))
+        ctx.meta['criter'] = 'far'  # no criterio % n passed in evaluate
+        ctx.invoke(metrics, scores=scores, evaluation=evaluation)
+
+    click.echo("Computing minDCF%s..." % log_str)
+    ctx.meta['criter'] = 'mindcf'  # no criterion passed in evaluate
+    ctx.invoke(metrics, scores=scores, evaluation=evaluation)
+
+    click.echo("Computing  Cllr and minCllr%s..." % log_str)
+    ctx.meta['criter'] = 'cllr'  # no criterion passed in evaluate
+    ctx.invoke(metrics, scores=scores, evaluation=evaluation)
 
     # avoid closing pdf file before all figures are plotted
     ctx.meta['closef'] = False
 
-    if test:
-        click.echo("Starting evaluate with dev and test scores...")
+    if evaluation:
+        click.echo("Starting evaluate with dev and eval scores...")
     else:
         click.echo("Starting evaluate with dev scores only...")
 
-    if ctx.meta['roc']:
-        click.echo("Generating ROC in %s..." % ctx.meta['output'])
-        ctx.forward(roc) # use class defaults plot settings
+    click.echo("Generating ROC in %s..." % ctx.meta['output'])
+    ctx.forward(roc) # use class defaults plot settings
 
-    if ctx.meta['det']:
-        click.echo("Generating DET in %s..." % ctx.meta['output'])
-        ctx.forward(det) # use class defaults plot settings
+    click.echo("Generating DET in %s..." % ctx.meta['output'])
+    ctx.forward(det) # use class defaults plot settings
 
-    if test and ctx.meta['epc']:
+    if evaluation:
         click.echo("Generating EPC in %s..." % ctx.meta['output'])
         ctx.forward(epc) # use class defaults plot settings
 
-    if ctx.meta['cmc']:
-        click.echo("Generating CMC in %s..." % ctx.meta['output'])
-        ctx.forward(cmc) # use class defaults plot settings
-
-    if ctx.meta['dic']:
-        click.echo("Generating DIC in %s..." % ctx.meta['output'])
-        ctx.forward(dic) # use class defaults plot settings
-
     # the last one closes the file
-    if ctx.meta['hist']:
-        click.echo("Generating score histograms in %s..." % ctx.meta['output'])
-        ctx.meta['criter'] = 'hter'  # no criterion passed in evaluate
-        ctx.forward(hist)
     ctx.meta['closef'] = True
-    #just to make sure pdf is closed
-    if 'PdfPages' in ctx.meta:
-        ctx.meta['PdfPages'].close()
+    click.echo("Generating score histograms in %s..." % ctx.meta['output'])
+    ctx.meta['criter'] = 'hter'  # no criterion passed in evaluate
+    ctx.forward(hist)
 
     click.echo("Evaluate successfully completed!")
diff --git a/bob/bio/base/script/figure.py b/bob/bio/base/script/figure.py
index 30d5fdef0804f38a98ef5a562d8d658c168f5952..950586d31c187fe24f6638895b68fb68a336ba1b 100644
--- a/bob/bio/base/script/figure.py
+++ b/bob/bio/base/script/figure.py
@@ -16,8 +16,8 @@ class Cmc(measure_figure.PlotBase):
     _semilogx: :obj:`bool`
         If true (default), X-axis will be semilog10
     '''
-    def __init__(self, ctx, scores, test, func_load):
-        super(Cmc, self).__init__(ctx, scores, test, func_load)
+    def __init__(self, ctx, scores, evaluation, func_load):
+        super(Cmc, self).__init__(ctx, scores, evaluation, func_load)
         self._semilogx = True if 'semilogx' not in ctx.meta else\
         ctx.meta['semilogx']
         self._title = 'CMC'
@@ -26,11 +26,11 @@ class Cmc(measure_figure.PlotBase):
         self._max_R = 0
 
     def compute(self, idx, dev_score, dev_file=None,
-                test_score=None, test_file=None):
+                eval_score=None, eval_file=None):
         ''' Plot CMC for dev and eval data using
         :py:func:`bob.measure.plot.cmc`'''
         mpl.figure(1)
-        if self._test:
+        if self._eval:
             linestyle = '-' if not self._split else measure_figure.LINESTYLES[idx % 14]
             rank = plot.cmc(
                 dev_score, logx=self._semilogx,
@@ -44,9 +44,9 @@ class Cmc(measure_figure.PlotBase):
                 linestyle = measure_figure.LINESTYLES[idx % 14]
 
             rank = plot.cmc(
-                test_score, logx=self._semilogx,
+                eval_score, logx=self._semilogx,
                 color=self._colors[idx], linestyle=linestyle,
-                label=self._label('test', test_file, idx)
+                label=self._label('eval', eval_file, idx)
             )
             self._max_R = max(rank, self._max_R)
         else:
@@ -74,8 +74,8 @@ class Dic(measure_figure.PlotBase):
     _rank: :obj:`int`
         Rank to be used to plot DIC (default: 1)
     '''
-    def __init__(self, ctx, scores, test, func_load):
-        super(Dic, self).__init__(ctx, scores, test, func_load)
+    def __init__(self, ctx, scores, evaluation, func_load):
+        super(Dic, self).__init__(ctx, scores, evaluation, func_load)
         self._semilogx = True if 'semilogx' not in ctx.meta else\
                 ctx.meta['semilogx']
         self._rank = 1 if 'rank' not in ctx.meta else ctx.meta['rank']
@@ -84,11 +84,11 @@ class Dic(measure_figure.PlotBase):
         self._y_label = 'DIR'
 
     def compute(self, idx, dev_score, dev_file=None,
-                test_score=None, test_file=None):
+                eval_score=None, eval_file=None):
         ''' Plot DIC for dev and eval data using
         :py:func:`bob.measure.plot.detection_identification_curve`'''
         mpl.figure(1)
-        if self._test:
+        if self._eval:
             linestyle = '-' if not self._split else measure_figure.LINESTYLES[idx % 14]
             plot.detection_identification_curve(
                 dev_score, rank=self._rank, logx=self._semilogx,
@@ -101,9 +101,9 @@ class Dic(measure_figure.PlotBase):
                 linestyle = measure_figure.LINESTYLES[idx % 14]
 
             plot.detection_identification_curve(
-                test_score, rank=self._rank, logx=self._semilogx,
+                eval_score, rank=self._rank, logx=self._semilogx,
                 color=self._colors[idx], linestyle=linestyle,
-                label=self._label('test', test_file, idx)
+                label=self._label('eval', eval_file, idx)
             )
         else:
             rank = plot.detection_identification_curve(
@@ -116,23 +116,24 @@ class Metrics(measure_figure.Metrics):
     ''' Compute metrics from score files'''
     def init_process(self):
         if self._criter == 'rr':
-            self._thres = [None] * len(self.dev_names) if self._thres is None else \
+            self._thres = [None] * self.n_sytem if self._thres is None else \
                     self._thres
 
     def compute(self, idx, dev_score, dev_file=None,
-                test_score=None, test_file=None):
+                eval_score=None, eval_file=None):
         ''' Compute metrics for the given criteria'''
-        headers = ['', 'Development %s' % dev_file]
-        if self._test and test_score is not None:
-            headers.append('Test % s' % test_file)
+        title = self._titles[idx] if self._titles is not None else None
+        headers = ['' or title, 'Development %s' % dev_file]
+        if self._eval and eval_score is not None:
+            headers.append('eval % s' % eval_file)
         if self._criter == 'rr':
             rr = bob.measure.recognition_rate(dev_score, self._thres[idx])
-            dev_rr = "%.3f%%" % (100 * rr)
+            dev_rr = "%.1f%%" % (100 * rr)
             raws = [['RR', dev_rr]]
-            if self._test and test_score is not None:
-                rr = bob.measure.recognition_rate(test_score, self._thres[idx])
-                test_rr = "%.3f%%" % (100 * rr)
-                raws[0].append(test_rr)
+            if self._eval and eval_score is not None:
+                rr = bob.measure.recognition_rate(eval_score, self._thres[idx])
+                eval_rr = "%.1f%%" % (100 * rr)
+                raws[0].append(eval_rr)
             click.echo(
                 tabulate(raws, headers, self._tablefmt), file=self.log_file
             )
@@ -158,23 +159,23 @@ class Metrics(measure_figure.Metrics):
             far, frr = bob.measure.farfrr(
                 dev_score[0], dev_score[1], threshold
             )
-            dev_far_str = "%.3f%%" % (100 * far)
-            dev_frr_str = "%.3f%%" % (100 * frr)
-            dev_mindcf_str = "%.3f%%" % ((cost * far + (1 - cost) * frr) * 100.)
+            dev_far_str = "%.1f%%" % (100 * far)
+            dev_frr_str = "%.1f%%" % (100 * frr)
+            dev_mindcf_str = "%.1f%%" % ((cost * far + (1 - cost) * frr) * 100.)
             raws = [['FAR', dev_far_str],
                     ['FRR', dev_frr_str],
                     ['minDCF', dev_mindcf_str]]
-            if self._test and test_score is not None:
+            if self._eval and eval_score is not None:
                 # apply threshold to development set
                 far, frr = bob.measure.farfrr(
-                    test_score[0], test_score[1], threshold
+                    eval_score[0], eval_score[1], threshold
                 )
-                test_far_str = "%.3f%%" % (100 * far)
-                test_frr_str = "%.3f%%" % (100 * frr)
-                test_mindcf_str = "%.3f%%" % ((cost * far + (1 - cost) * frr) * 100.)
-                raws[0].append(test_far_str)
-                raws[1].append(test_frr_str)
-                raws[2].append(test_mindcf_str)
+                eval_far_str = "%.1f%%" % (100 * far)
+                eval_frr_str = "%.1f%%" % (100 * frr)
+                eval_mindcf_str = "%.1f%%" % ((cost * far + (1 - cost) * frr) * 100.)
+                raws[0].append(eval_far_str)
+                raws[1].append(eval_frr_str)
+                raws[2].append(eval_mindcf_str)
             click.echo(
                 tabulate(raws, headers, self._tablefmt), file=self.log_file
             )
@@ -183,24 +184,36 @@ class Metrics(measure_figure.Metrics):
             min_cllr = bob.measure.calibration.min_cllr(
                 dev_score[0], dev_score[1]
             )
-            dev_cllr_str = "%.3f%%" % cllr
-            dev_min_cllr_str = "%.3f%%" % min_cllr
+            dev_cllr_str = "%.1f%%" % cllr
+            dev_min_cllr_str = "%.1f%%" % min_cllr
             raws = [['Cllr', dev_cllr_str],
                     ['minCllr', dev_min_cllr_str]]
-            if self._test and test_score is not None:
-                cllr = bob.measure.calibration.cllr(test_score[0],
-                                                    test_score[1])
+            if self._eval and eval_score is not None:
+                cllr = bob.measure.calibration.cllr(eval_score[0],
+                                                    eval_score[1])
                 min_cllr = bob.measure.calibration.min_cllr(
-                    test_score[0], test_score[1]
+                    eval_score[0], eval_score[1]
                 )
-                test_cllr_str = "%.3f%%" % cllr
-                test_min_cllr_str = "%.3f%%" % min_cllr
-                raws[0].append(test_cllr_str)
-                raws[1].append(test_min_cllr_str)
+                eval_cllr_str = "%.1f%%" % cllr
+                eval_min_cllr_str = "%.1f%%" % min_cllr
+                raws[0].append(eval_cllr_str)
+                raws[1].append(eval_min_cllr_str)
                 click.echo(
                     tabulate(raws, headers, self._tablefmt), file=self.log_file
                 )
         else:
             super(Metrics, self).compute(
-                idx, dev_score, dev_file, test_score, test_file
+                idx, dev_score, dev_file, eval_score, eval_file
             )
+
+class Hist(measure_figure.Hist):
+    ''' Histograms for biometric scores '''
+
+    def _setup_hist(self, neg, pos):
+        self._title_base = 'Bio scores'
+        self._density_hist(
+            pos, label='Genuines', alpha=0.9, color='C2', **self._kwargs
+        )
+        self._density_hist(
+            neg, label='Zero-effort impostors', alpha=0.8, color='C0', **self._kwargs
+        )
diff --git a/bob/bio/base/test/test_commands.py b/bob/bio/base/test/test_commands.py
index 5eb237bd1f200d6b4cdc6b4b4c2b24dd39808ec6..7c018f0a4a374d307cc188dcd82ccff0a4d8c19b 100644
--- a/bob/bio/base/test/test_commands.py
+++ b/bob/bio/base/test/test_commands.py
@@ -11,7 +11,7 @@ def test_metrics():
     dev1 = pkg_resources.resource_filename('bob.bio.base.test',
                                            'data/dev-4col.txt')
     runner = CliRunner()
-    result = runner.invoke(commands.metrics, [dev1])
+    result = runner.invoke(commands.metrics, ['--no-evaluation', dev1])
     with runner.isolated_filesystem():
         with open('tmp', 'w') as f:
             f.write(result.output)
@@ -24,7 +24,7 @@ def test_metrics():
                                             'data/test-5col.txt')
     with runner.isolated_filesystem():
         result = runner.invoke(
-            commands.metrics, ['--test', dev1, test1, dev2, test2]
+            commands.metrics, [dev1, test1, dev2, test2]
         )
         with open('tmp', 'w') as f:
             f.write(result.output)
@@ -32,18 +32,19 @@ def test_metrics():
 
     with runner.isolated_filesystem():
         result = runner.invoke(
-            commands.metrics, ['-l', 'tmp', '--test', dev1, test1, dev2, test2]
+            commands.metrics, ['-l', 'tmp', '-t', 'A,B',
+                               dev1, test1, dev2, test2]
         )
         assert result.exit_code == 0
     with runner.isolated_filesystem():
         result = runner.invoke(
-            commands.metrics, ['-l', 'tmp', '--test', dev1, test2]
+            commands.metrics, ['-l', 'tmp', dev1, test2]
         )
         assert result.exit_code == 0
 
     with runner.isolated_filesystem():
         result = runner.invoke(
-            commands.metrics, ['-l', 'tmp', '-t', '-T', '0.1',
+            commands.metrics, ['-l', 'tmp', '-T', '0.1',
                                '--criter', 'mindcf', '--cost', 0.9,
                                dev1, test2]
         )
@@ -51,7 +52,7 @@ def test_metrics():
 
     with runner.isolated_filesystem():
         result = runner.invoke(
-            commands.metrics, ['-l', 'tmp',
+            commands.metrics, ['--no-evaluation', '-l', 'tmp',
                                '--criter', 'mindcf', '--cost', 0.9,
                                dev1]
         )
@@ -59,27 +60,28 @@ def test_metrics():
 
     with runner.isolated_filesystem():
         result = runner.invoke(
-            commands.metrics, ['-t', '--criter', 'cllr', dev1, test2]
+            commands.metrics, ['--criter', 'cllr', dev1, test2]
         )
         assert result.exit_code == 0
 
     with runner.isolated_filesystem():
         result = runner.invoke(
-            commands.metrics, ['-l', 'tmp', '--criter', 'cllr', '--cost', 0.9,
-                               dev1]
+            commands.metrics, ['--no-evaluation', '-l', 'tmp', '--criter', 'cllr',
+                               '--cost', 0.9, dev1]
         )
         assert result.exit_code == 0
 
     with runner.isolated_filesystem():
         result = runner.invoke(
-            commands.metrics, ['-t', '--criter', 'rr', '-T',
+            commands.metrics, ['--criter', 'rr', '-T',
                                '0.1', dev1, test2]
         )
         assert result.exit_code == 0
 
     with runner.isolated_filesystem():
         result = runner.invoke(
-            commands.metrics, ['-l', 'tmp', '--criter', 'rr', dev1, dev2]
+            commands.metrics, ['--no-evaluation', '-l', 'tmp', '--criter', 'rr',
+                               dev1, dev2]
         )
         assert result.exit_code == 0
 
@@ -89,7 +91,8 @@ def test_roc():
                                            'data/dev-4col.txt')
     runner = CliRunner()
     with runner.isolated_filesystem():
-        result = runner.invoke(commands.roc, ['--output','test.pdf',dev1])
+        result = runner.invoke(commands.roc, ['--no-evaluation', '--output',
+                                              'test.pdf',dev1])
         if result.output:
             click.echo(result.output)
         assert result.exit_code == 0
@@ -100,7 +103,7 @@ def test_roc():
     test2 = pkg_resources.resource_filename('bob.bio.base.test',
                                             'data/test-5col.txt')
     with runner.isolated_filesystem():
-        result = runner.invoke(commands.roc, ['--test', '--split', '--output',
+        result = runner.invoke(commands.roc, ['--split', '--output',
                                               'test.pdf',
                                               dev1, test1, dev2, test2])
         if result.output:
@@ -108,7 +111,7 @@ def test_roc():
         assert result.exit_code == 0
 
     with runner.isolated_filesystem():
-        result = runner.invoke(commands.roc, ['--test', '--output',
+        result = runner.invoke(commands.roc, ['--output',
                                               'test.pdf', '--titles', 'A,B', 
                                               dev1, test1, dev2, test2])
         if result.output:
@@ -121,7 +124,7 @@ def test_det():
                                            'data/dev-4col.txt')
     runner = CliRunner()
     with runner.isolated_filesystem():
-        result = runner.invoke(commands.det, [dev1])
+        result = runner.invoke(commands.det, ['--no-evaluation', dev1])
         if result.output:
             click.echo(result.output)
         assert result.exit_code == 0
@@ -132,14 +135,14 @@ def test_det():
     test2 = pkg_resources.resource_filename('bob.bio.base.test',
                                             'data/test-5col.txt')
     with runner.isolated_filesystem():
-        result = runner.invoke(commands.det, ['--test', '--split', '--output',
+        result = runner.invoke(commands.det, ['--split', '--output',
                                               'test.pdf', '--titles', 'A,B',
                                               dev1, test1, dev2, test2])
         if result.output:
             click.echo(result.output)
         assert result.exit_code == 0
     with runner.isolated_filesystem():
-        result = runner.invoke(commands.det, ['--test', '--output',
+        result = runner.invoke(commands.det, ['--output',
                                               'test.pdf',
                                               dev1, test1, dev2, test2])
         if result.output:
@@ -180,23 +183,24 @@ def test_hist():
                                             'data/test-5col.txt')
     runner = CliRunner()
     with runner.isolated_filesystem():
-        result = runner.invoke(commands.hist, [dev1])
+        result = runner.invoke(commands.hist, ['--no-evaluation', dev1])
         if result.output:
             click.echo(result.output)
         assert result.exit_code == 0
 
     with runner.isolated_filesystem():
         result = runner.invoke(commands.hist, ['--criter', 'hter', '--output',
-                                               'HISTO.pdf', '-b', 30,
-                                               dev1, dev2])
+                                               'HISTO.pdf', '-b',
+                                               30,'--no-evaluation', dev1, dev2])
         if result.output:
             click.echo(result.output)
         assert result.exit_code == 0
 
     with runner.isolated_filesystem():
         result = runner.invoke(commands.hist, ['--criter', 'eer', '--output',
-                                               'HISTO.pdf', '-b', 30, '-F',
-                                               3, dev1, test1, dev2, test2])
+                                               'HISTO.pdf', '-b', 30, '-F', 3,
+                                               '-t', 'A,B', dev1, test1, dev2,
+                                               test2])
         if result.output:
             click.echo(result.output)
         assert result.exit_code == 0
@@ -206,14 +210,14 @@ def test_cmc():
                                            'data/scores-cmc-5col.txt')
     runner = CliRunner()
     with runner.isolated_filesystem():
-        result = runner.invoke(commands.cmc, [dev1])
+        result = runner.invoke(commands.cmc, ['--no-evaluation', dev1])
         if result.output:
             click.echo(result.output)
         assert result.exit_code == 0
     test1 = pkg_resources.resource_filename('bob.bio.base.test',
                                             'data/scores-cmc-4col.txt')
     with runner.isolated_filesystem():
-        result = runner.invoke(commands.cmc, ['--output', 'test.pdf', '-t',
+        result = runner.invoke(commands.cmc, ['--output', 'test.pdf',
                                               '--titles', 'A,B', '-F', 3,
                                               dev1, test1, dev1, test1])
         if result.output:
@@ -225,14 +229,14 @@ def test_dic():
                                            'data/scores-nonorm-openset-dev')
     runner = CliRunner()
     with runner.isolated_filesystem():
-        result = runner.invoke(commands.dic, [dev1, '--rank', 2])
+        result = runner.invoke(commands.dic, ['--no-evaluation', dev1, '--rank', 2])
         if result.output:
             click.echo(result.output)
         assert result.exit_code == 0
     test1 = pkg_resources.resource_filename('bob.bio.base.test',
                                             'data/scores-nonorm-openset-dev')
     with runner.isolated_filesystem():
-        result = runner.invoke(commands.dic, ['--output', 'test.pdf', '-t',
+        result = runner.invoke(commands.dic, ['--output', 'test.pdf',
                                               '--titles', 'A,B', '-F', 3,
                                               dev1, test1, dev1, test1])
         if result.output:
@@ -251,54 +255,17 @@ def test_evaluate():
                                             'data/test-5col.txt')
     runner = CliRunner()
     with runner.isolated_filesystem():
-        result = runner.invoke(commands.evaluate, ['-l', 'tmp', '-f', 0.03, '-M',
-                                                   '-x', '-m', dev1, dev2])
+        result = runner.invoke(commands.evaluate, ['-l', 'tmp', '-f', 0.03,
+                                                   '--no-evaluation', dev1, dev2])
         assert result.exit_code == 0
-        result = runner.invoke(commands.evaluate, ['-f', 0.02, '-M',
-                                                   '-x', '-m', dev1, dev2])
+        result = runner.invoke(commands.evaluate, ['--no-evaluation', '-f', 0.02,
+                                                   dev1, dev2])
         assert result.exit_code == 0
 
-        result = runner.invoke(commands.evaluate, ['-l', 'tmp', '-f', 0.04, '-M',
-                                                   '-x', '-m', '-t', dev1, test1,
-                                                   dev2, test2])
-        assert result.exit_code == 0
-        result = runner.invoke(commands.evaluate, ['-f', 0.01, '-M', '-t',
-                                                   '-x', '-m', dev1, test1, dev2,
-                                                   test2])
+        result = runner.invoke(commands.evaluate, ['-l', 'tmp', '-f', 0.04,
+                                                   dev1, test1, dev2, test2])
         assert result.exit_code == 0
-
-        result = runner.invoke(commands.evaluate, [dev1, dev2])
-        assert result.exit_code == 0
-
-        result = runner.invoke(commands.evaluate, ['-R', '-D', '-H', '-E',
-                                                   '-o', 'PLOTS.pdf', dev1, dev2])
+        result = runner.invoke(commands.evaluate, ['-f', 0.01,
+                                                   dev1, test1, dev2, test2])
         assert result.exit_code == 0
 
-        result = runner.invoke(commands.evaluate, ['-t', '-R', '-D', '-H', '-E',
-                                                   '-o', 'PLOTS.pdf',
-                                                   test1, dev1, test2, dev2])
-        assert result.exit_code == 0
-
-    cmc = pkg_resources.resource_filename('bob.bio.base.test',
-                                          'data/scores-cmc-4col.txt')
-    cmc2 = pkg_resources.resource_filename('bob.bio.base.test',
-                                           'data/scores-cmc-5col.txt')
-    with runner.isolated_filesystem():
-        result = runner.invoke(commands.evaluate, ['-r', cmc])
-        assert result.exit_code == 0
-        result = runner.invoke(commands.evaluate, ['-r', '-t', cmc, cmc2])
-        assert result.exit_code == 0
-        result = runner.invoke(commands.evaluate, ['-C', '-t', cmc, cmc2])
-        assert result.exit_code == 0
-        result = runner.invoke(commands.evaluate, ['-C', cmc, cmc2])
-        assert result.exit_code == 0
-
-    cmc = pkg_resources.resource_filename('bob.bio.base.test',
-                                          'data/scores-cmc-4col-open-set.txt')
-    cmc2 = pkg_resources.resource_filename('bob.bio.base.test',
-                                          'data/scores-nonorm-openset-dev')
-    with runner.isolated_filesystem():
-        result = runner.invoke(commands.evaluate, ['-O', cmc])
-        assert result.exit_code == 0
-        result = runner.invoke(commands.evaluate, ['-O', '-t', cmc, cmc2])
-        assert result.exit_code == 0
diff --git a/doc/py_api.rst b/doc/py_api.rst
index 4d52687cbea4d63cf1e3855f9095f9f975cec249..335c88a31c87a11ecd9c7ada99fc73d9c440e833 100644
--- a/doc/py_api.rst
+++ b/doc/py_api.rst
@@ -93,6 +93,7 @@ Scoring
    bob.bio.base.tools.compute_scores
    bob.bio.base.tools.concatenate
    bob.bio.base.tools.calibrate
+   bob.bio.base.script.figure.Metrics
 
 Loading data
 ------------
@@ -108,6 +109,13 @@ Loading data
    bob.bio.base.score.load.split_five_column
    bob.bio.base.score.load.cmc_five_column
 
+Plotting
+--------
+.. autosummary::
+   bob.bio.base.script.figure.Cmc
+   bob.bio.base.script.figure.Dic
+   bob.bio.base.script.figure.Hist
+
 OpenBR conversions
 ------------------
 .. autosummary::
@@ -130,4 +138,7 @@ Details
 
 .. automodule:: bob.bio.base.score.load
 .. automodule:: bob.bio.base.score.openbr
+.. automodule:: bob.bio.base.script.figure
+.. automodule:: bob.bio.base.script.commands
+
 .. include:: links.rst