diff --git a/bob/measure/script/commands.py b/bob/measure/script/commands.py
index 61238c127306550c82e474812f8599edafaddc5a..d2c63333d27178615c51904709eeecf82e3b3e91 100644
--- a/bob/measure/script/commands.py
+++ b/bob/measure/script/commands.py
@@ -22,7 +22,7 @@ from bob.extension.scripts.click_helper import (verbosity_option,
 @click.pass_context
 def metrics(ctx, scores, evaluation, **kwargs):
     """Prints a table that contains FtA, FAR, FRR, FMR, FMNR, HTER for a given
-    threshold criterion (eer or hter).
+    threshold criterion (eer or min-hter).
 
     You need to provide one or more development score file(s) for each experiment.
     You can also provide evaluation files along with dev files. If only dev scores
@@ -189,7 +189,7 @@ def hist(ctx, scores, evaluation, **kwargs):
         $ bob measure hist dev-scores1 eval-scores1 dev-scores2
         eval-scores2
 
-        $ bob measure hist --criterion hter --show-dev dev-scores1 eval-scores1
+        $ bob measure hist --criterion min-hter --show-dev dev-scores1 eval-scores1
     """
     process = figure.Hist(ctx, scores, evaluation, load.split)
     process.run()
@@ -247,8 +247,8 @@ def evaluate(ctx, scores, evaluation, **kwargs):
     ctx.invoke(metrics, scores=scores, evaluation=evaluation)
     # second time, appends the content
     ctx.meta['open_mode'] = 'a'
-    click.echo("Computing metrics with HTER...")
-    ctx.meta['criterion'] = 'hter'  # no criterion passed in evaluate
+    click.echo("Computing metrics with min-HTER...")
+    ctx.meta['criterion'] = 'min-hter'  # no criterion passed in evaluate
     ctx.invoke(metrics, scores=scores, evaluation=evaluation)
     if 'log' in ctx.meta:
         click.echo("[metrics] => %s" % ctx.meta['log'])
diff --git a/bob/measure/script/common_options.py b/bob/measure/script/common_options.py
index 764dbdb315108a42843d3c4caed37741c79fb58e..ccaa0f7627d8e5ce15f83b2cd2dd63dc1ddfe69f 100644
--- a/bob/measure/script/common_options.py
+++ b/bob/measure/script/common_options.py
@@ -273,7 +273,7 @@ def output_log_metric_option(**kwargs):
             callback=callback, **kwargs)(func)
     return custom_output_log_file_option
 
-def criterion_option(lcriteria=['eer', 'hter', 'far'], **kwargs):
+def criterion_option(lcriteria=['eer', 'min-hter', 'far'], **kwargs):
     """Get option flag to tell which criteriom is used (default:eer)
 
     Parameters
@@ -284,7 +284,7 @@ def criterion_option(lcriteria=['eer', 'hter', 'far'], **kwargs):
     def custom_criterion_option(func):
         def callback(ctx, param, value):
             list_accepted_crit = lcriteria if lcriteria is not None else \
-                    ['eer', 'hter', 'far']
+                    ['eer', 'min-hter', 'far']
             if value not in list_accepted_crit:
                 raise click.BadParameter('Incorrect value for `--criterion`. '
                                          'Must be one of [`%s`]' %
@@ -294,7 +294,7 @@ def criterion_option(lcriteria=['eer', 'hter', 'far'], **kwargs):
         return click.option(
             '-c', '--criterion', default='eer',
             help='Criterion to compute plots and '
-            'metrics: `eer` (default), `hter`',
+            'metrics: `eer`, `min-hter` or `far`',
             callback=callback, is_eager=True ,**kwargs)(func)
     return custom_criterion_option
 
diff --git a/bob/measure/test_script.py b/bob/measure/test_script.py
index 15311ba480317ecacd00a9dda6cca699928bf3b1..307afe028ef32215fba136b5bed9365f2ec6cf7d 100644
--- a/bob/measure/test_script.py
+++ b/bob/measure/test_script.py
@@ -130,7 +130,8 @@ def test_hist():
         assert result.exit_code == 0, (result.exit_code, result.output)
 
     with runner.isolated_filesystem():
-        result = runner.invoke(commands.hist, ['--no-evaluation', '--criterion', 'hter',
+        result = runner.invoke(commands.hist, ['--no-evaluation', '--criterion',
+                                               'min-hter',
                                                '--output', 'HISTO.pdf',  '-b', 
                                                '30,100', dev1, dev2])
         if result.output:
diff --git a/bob/measure/utils.py b/bob/measure/utils.py
index 15ff8a83f7306576b256525ff15ee129c37fdb52..64c5527e8bc44985828bdb3369b750b2c17e990b 100644
--- a/bob/measure/utils.py
+++ b/bob/measure/utils.py
@@ -90,7 +90,7 @@ def get_thres(criter, neg, pos, far=None):
     Parameters
     ----------
     criter :
-        Criterion (`eer` or `hter`)
+        Criterion (`eer` or `hter` or `far`)
     neg : :py:class:`numpy.ndarray`:
         array of negative scores
         pos : :py:class:`numpy.ndarray`::
@@ -104,7 +104,7 @@ def get_thres(criter, neg, pos, far=None):
     if criter == 'eer':
         from . import eer_threshold
         return eer_threshold(neg, pos)
-    elif criter == 'hter':
+    elif criter == 'min-hter':
         from . import min_hter_threshold
         return min_hter_threshold(neg, pos)
     elif criter == 'far':
diff --git a/doc/guide.rst b/doc/guide.rst
index 95a4614b24883a9b02b92e9d1facf124c68ac8cc..c27e661b46d930d5aaa4b0f352d54be691ca8179 100644
--- a/doc/guide.rst
+++ b/doc/guide.rst
@@ -588,7 +588,7 @@ Evaluate
 
 A convenient command ``evaluate`` is provided to generate multiple metrics and
 plots for a list of experiments. It generates two ``metrics`` outputs with ERR
-and HTER criteria along with ``roc``, ``det``, ``epc``, ``hist`` plots for each
+and min-HTER criteria along with ``roc``, ``det``, ``epc``, ``hist`` plots for each
 experiment. For example:
 
 .. code-block:: sh