diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index b4d1d1ca583eb211cc4b0a5ab9b2243c715bb45e..49337196592599a11942ecd8fc24a4230c5a196d 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -2,20 +2,20 @@
 # See https://pre-commit.com/hooks.html for more hooks
 repos:
   - repo: https://github.com/timothycrosley/isort
-    rev: 5.10.1
+    rev: 5.12.0
     hooks:
       - id: isort
         args: [--settings-path, "pyproject.toml"]
   - repo: https://github.com/psf/black
-    rev: 22.3.0
+    rev: 23.1.0
     hooks:
       - id: black
   - repo: https://github.com/pycqa/flake8
-    rev: 3.9.2
+    rev: 6.0.0
     hooks:
       - id: flake8
   - repo: https://github.com/pre-commit/pre-commit-hooks
-    rev: v4.2.0
+    rev: v4.4.0
     hooks:
       - id: check-ast
       - id: check-case-conflict
diff --git a/conda/meta.yaml b/conda/meta.yaml
index cd42e1d060c3f6854592be089cffb5624c17a1f3..7834daced126c8c03a24c5d538dc264e7afca25d 100644
--- a/conda/meta.yaml
+++ b/conda/meta.yaml
@@ -23,7 +23,7 @@ requirements:
     - setuptools {{ setuptools }}
     - pip {{ pip }}
     - bob.io.base
-    - exposed
+    - clapp
     - numpy {{ numpy }}
     - scipy {{ scipy }}
     - h5py {{ h5py }}
@@ -34,7 +34,7 @@ requirements:
     - python
     - setuptools
     - bob.io.base
-    - exposed
+    - clapp
     - {{ pin_compatible('numpy') }}
     - {{ pin_compatible('scipy') }}
     - {{ pin_compatible('h5py') }}
diff --git a/pyproject.toml b/pyproject.toml
index a55fcbb70d54190226de3f9a09a8699b5226c922..46af727c0d25d3b3add9bdf48c321c41112b449b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -28,7 +28,7 @@
         "setuptools",
         "numpy",
         "bob.io.base",
-        "exposed",
+        "clapp",
         "scipy",
         "h5py",
         "matplotlib",
diff --git a/src/bob/measure/_library.py b/src/bob/measure/_library.py
index eb7df8180e9c344dcae7c03e259c8ab8b3422e15..9477cc4cba68e83cf1682ec18ed0819270d5b45a 100644
--- a/src/bob/measure/_library.py
+++ b/src/bob/measure/_library.py
@@ -457,7 +457,6 @@ def rocch(negatives, positives):
     miss = 0
 
     for i in range(nbins):
-
         retval[0, i] = fa / Nn  # pfa
         retval[1, i] = miss / Nt  # pmiss
         left += int(width[i])
@@ -508,7 +507,6 @@ def rocch2eer(pmiss_pfa):
     epsilon = numpy.finfo(numpy.float64).eps
 
     for i in range(N - 1):
-
         # Define XY matrix
         XY[0, 0] = pmiss_pfa[0, i]  # pfa
         XY[1, 0] = pmiss_pfa[0, i + 1]  # pfa
@@ -666,7 +664,6 @@ def _minimizing_threshold(negatives, positives, criterion, cost=0.5):
 
     # continues until one of the two iterators reaches the end of the list
     while pos_it < max_pos and neg_it < max_neg:
-
         # compute predicate
         current_predicate = criterium(far, frr, cost)
 
@@ -1032,7 +1029,6 @@ def far_threshold(negatives, positives, far_value=0.001, is_sorted=False):
     current_threshold = 0.0
 
     while current_position < total_count:
-
         current_threshold = scores[current_position]
         # keep iterating if values are repeated
         while (
@@ -1225,7 +1221,6 @@ def frr_threshold(negatives, positives, frr_value=0.001, is_sorted=False):
     current_threshold = 0.0
 
     while current_position < total_count:
-
         current_threshold = scores[current_position]
         # keep iterating if values are repeated
         while (
@@ -1340,6 +1335,7 @@ _jit_min_weighted_error_rate_threshold = (
     min_weighted_error_rate_threshold.jit_func
 )
 
+
 # @jit([(numba.float64[:, :],)], nopython=True)
 def ppndf(p):
     """Returns the Deviate Scale equivalent of a false rejection/acceptance ratio
diff --git a/src/bob/measure/plot.py b/src/bob/measure/plot.py
index 1d6a8e614d8b8624fc27ee777583d87eb400b8ce..170d34485f9d55127719a438a04fbc9a480c4d19 100644
--- a/src/bob/measure/plot.py
+++ b/src/bob/measure/plot.py
@@ -62,7 +62,7 @@ def roc(
     min_far=-8,
     tpr=False,
     semilogx=False,
-    **kwargs
+    **kwargs,
 ):
     """Plots Receiver Operating Characteristic (ROC) curve.
 
@@ -268,7 +268,7 @@ def epc(
     test_negatives,
     test_positives,
     npoints=100,
-    **kwargs
+    **kwargs,
 ):
     """Plots Expected Performance Curve (EPC) as defined in the paper:
 
diff --git a/src/bob/measure/script/common_options.py b/src/bob/measure/script/common_options.py
index ddc856faa8192bafb7a253a770d465caf688b72e..a3405fa4280e2396b98003253832990d340798c7 100644
--- a/src/bob/measure/script/common_options.py
+++ b/src/bob/measure/script/common_options.py
@@ -7,8 +7,8 @@ import click
 import matplotlib.pyplot as plt
 import tabulate
 
+from clapp.click import verbosity_option
 from click.types import FLOAT, INT
-from exposed.click import verbosity_option
 from matplotlib.backends.backend_pdf import PdfPages
 
 LOGGER = logging.getLogger(__name__)
@@ -1018,7 +1018,7 @@ def metrics_command(
     criteria=("eer", "min-hter", "far"),
     far_name="FAR",
     check_criteria=True,
-    **kwarg
+    **kwarg,
 ):
     def custom_metrics_command(func):
         func.__doc__ = docstring
diff --git a/src/bob/measure/script/figure.py b/src/bob/measure/script/figure.py
index c0ac5605c662250bb7060a7c421a855d9a7f79ec..ee25b0ee27ac903f2398a73900a7f64503b42f9f 100644
--- a/src/bob/measure/script/figure.py
+++ b/src/bob/measure/script/figure.py
@@ -630,7 +630,7 @@ class PlotBase(MeasureBase):
         lines and close pdf if needed"""
         # draw vertical lines
         if self._far_at is not None:
-            for (line, line_trans) in zip(self._far_at, self._trans_far_val):
+            for line, line_trans in zip(self._far_at, self._trans_far_val):
                 mpl.figure(1)
                 mpl.plot(
                     [line_trans, line_trans],
diff --git a/src/bob/measure/script/gen.py b/src/bob/measure/script/gen.py
index df6581e127a58bd397f89eb89f47d19ef6ea8c50..fd62da49130ce90881c4cc882b5e781e8cf9bcf0 100644
--- a/src/bob/measure/script/gen.py
+++ b/src/bob/measure/script/gen.py
@@ -12,8 +12,8 @@ import click
 import numpy
 import numpy.random
 
+from clapp.click import verbosity_option
 from click.types import FLOAT
-from exposed.click import verbosity_option
 
 logger = logging.getLogger(__name__)
 
@@ -79,7 +79,6 @@ def write_scores_to_file(neg, pos, filename):
     os.makedirs(os.path.dirname(filename), exist_ok=True)
 
     with open(filename, "wt") as f:
-
         for i in pos:
             text = (
                 "1 %f\n" % i if numpy.random.normal(0, 1) > 0.01 else "1 nan\n"
@@ -97,8 +96,8 @@ def write_scores_to_file(neg, pos, filename):
 @click.argument("outdir")
 @click.option("--mean-neg", default=-1, type=FLOAT, show_default=True)
 @click.option("--mean-pos", default=1, type=FLOAT, show_default=True)
-@verbosity_option(logger)
-def gen(outdir, mean_neg, mean_pos, **kwargs):
+@verbosity_option(logger, expose_value=False)
+def gen(outdir, mean_neg, mean_pos):
     """Generate random scores.
 
     Generates random scores for negative and positive scores, whatever they
diff --git a/src/bob/measure/script/measure.py b/src/bob/measure/script/measure.py
index 02eadf3762805ca6d9845ca2813ea9da25eee694..56f98e59a2559e1713a31ae51ddc71d91fcb68f5 100644
--- a/src/bob/measure/script/measure.py
+++ b/src/bob/measure/script/measure.py
@@ -3,8 +3,8 @@
 import click
 import pkg_resources
 
+from clapp.click import AliasedGroup
 from click_plugins import with_plugins
-from exposed.click import AliasedGroup
 
 
 @with_plugins(pkg_resources.iter_entry_points("bob.measure.cli"))
diff --git a/tests/test_error.py b/tests/test_error.py
index 98b2767e6a6cf2d2993496460be54403ff90ed5f..f1655e3294fce52a22110acbd6e3125e1d21f6f1 100644
--- a/tests/test_error.py
+++ b/tests/test_error.py
@@ -66,7 +66,6 @@ def _save(f, data):
 
 
 def test_basic_ratios():
-
     # We test the basic functionaly on FAR and FRR calculation. The first
     # example is separable, with a separation threshold of about 3.0
 
@@ -122,7 +121,6 @@ def test_basic_ratios():
 
 
 def test_for_uncomputable_thresholds():
-
     # in some cases, we cannot compute an FAR or FRR threshold, e.g., when we
     # have too little data or too many equal scores in these cases, the methods
     # should return a threshold which a supports a lower value.
@@ -177,7 +175,6 @@ def test_for_uncomputable_thresholds():
 
 
 def test_indexing():
-
     # This test verifies that the output of correctly_classified_positives() and
     # correctly_classified_negatives() makes sense.
     positives = _load("linsep-positives.hdf5")
@@ -203,7 +200,6 @@ def test_indexing():
 
 
 def test_obvious_thresholds():
-
     M = 10
     neg = numpy.arange(M, dtype=float)
     pos = numpy.arange(M, 2 * M, dtype=float)
@@ -226,7 +222,6 @@ def test_obvious_thresholds():
 
 
 def test_thresholding():
-
     # This example will demonstrate and check the use of eer_threshold() to
     # calculate the threshold that minimizes the EER.
 
@@ -289,7 +284,6 @@ def test_thresholding():
 
 
 def test_empty_raises():
-
     for func in (
         farfrr,
         precision_recall,
@@ -306,7 +300,6 @@ def test_empty_raises():
             func([], [], 0)
 
     for func in (eer_threshold, min_hter_threshold):
-
         with pytest.raises(RuntimeError):
             func(negatives=[], positives=[1.0])
 
@@ -318,7 +311,6 @@ def test_empty_raises():
 
 
 def test_plots():
-
     # This test set is not separable.
     positives = _load("nonsep-positives.hdf5")
     negatives = _load("nonsep-negatives.hdf5")
@@ -383,7 +375,6 @@ def test_plots():
 
 
 def no_test_rocch():
-
     # This example will demonstrate and check the use of eer_rocch_threshold() to
     # calculate the threshold that minimizes the EER on the ROC Convex Hull
 
@@ -422,7 +413,6 @@ def no_test_rocch():
 
 
 def test_cmc():
-
     # tests the CMC calculation
     # test data; should give match characteristics [1/2,1/4,1/3] and CMC
     # [1/3,2/3,1]
@@ -472,7 +462,6 @@ def test_cmc():
 
 
 def no_test_calibration():
-
     # Tests the cllr and min_cllr measures
     # This test set is separable.
     positives = _load("linsep-positives.hdf5")
@@ -549,7 +538,6 @@ def test_open_set_rates():
 
 
 def test_mindcf():
-
     # Tests outlier scores in negative set
     cost = 0.99
     negatives = [-3, -2, -1, -0.5, 4]
@@ -561,7 +549,6 @@ def test_mindcf():
 
 
 def test_roc_auc_score():
-
     positives = _load("nonsep-positives.hdf5")
     negatives = _load("nonsep-negatives.hdf5")
     auc = roc_auc_score(negatives, positives)