diff --git a/src/mednet/engine/evaluator.py b/src/mednet/engine/evaluator.py
index 829b9a6b04db10beb414bba5ed04aadb24b472b9..1add3edb8afe69902925d9847bb18988b51661fe 100644
--- a/src/mednet/engine/evaluator.py
+++ b/src/mednet/engine/evaluator.py
@@ -202,7 +202,7 @@ def run_binary(
         A list of predictions to consider for measurement
     threshold_a_priori
         A threshold to use, evaluated *a priori*, if must report single values.
-        If this value is not provided, a *a posteriori* threshold is calculated
+        If this value is not provided, an *a posteriori* threshold is calculated
         on the input scores.  This is a biased estimator.
 
 
diff --git a/src/mednet/engine/saliency/evaluator.py b/src/mednet/engine/saliency/evaluator.py
index f8a9c04eb46cfc4b17003b2ccbe1873d6e987a4b..aae1accaae37899ad84dc59a03498ed2ecb8547e 100644
--- a/src/mednet/engine/saliency/evaluator.py
+++ b/src/mednet/engine/saliency/evaluator.py
@@ -16,9 +16,10 @@ def _reconcile_metrics(
     completeness: list,
     interpretability: list,
 ) -> list[tuple[str, int, float, float, float]]:
-    """Summarizes samples into a new table containing most important scores.
+    """Summarizes samples into a new table containing the most important
+    scores.
 
-    It returns a list containing a table with completeness and road scorse per
+    It returns a list containing a table with completeness and ROAD scores per
     sample, for the selected dataset.  Only samples for which a completness and
     interpretability scores are availble are returned in the reconciled list.
 
@@ -198,8 +199,7 @@ def _extract_statistics(
     name
         The name of the variable being analysed
     index
-        Which of the indexes on the tuples containing in ``data`` that should
-        be extracted.
+        The index of the tuple contained in ``data`` that should be extracted.
     dataset
         The name of the dataset being analysed
     xlim
diff --git a/src/mednet/engine/saliency/interpretability.py b/src/mednet/engine/saliency/interpretability.py
index 89873d0e7125ef15716d062ef13413da1dc4bb6c..95e184f4f9aee8a2cbc12f1a989adae565a490a0 100644
--- a/src/mednet/engine/saliency/interpretability.py
+++ b/src/mednet/engine/saliency/interpretability.py
@@ -382,8 +382,8 @@ def run(
     target_label: int,
     datamodule: lightning.pytorch.LightningDataModule,
 ) -> dict[str, list[typing.Any]]:
-    """Applies visualization techniques on input CXR, outputs images with
-    overlaid heatmaps and csv files with measurements.
+    """Computes the proportional energy and average saliency focus for a given
+    target label in a datamodule.
 
     Parameters
     ---------
@@ -399,7 +399,7 @@ def run(
 
     Returns
     -------
-        A dictionary where keys are dataset names in the provide datamodule,
+        A dictionary where keys are dataset names in the provided datamodule,
         and values are lists containing sample information alongside metrics
         calculated:
 
diff --git a/src/mednet/scripts/evaluate.py b/src/mednet/scripts/evaluate.py
index 2a5a85cb931fd1d28066493b4b37aeb752186235..ec8365b95a625b3be30e040d1c514ae78003170f 100644
--- a/src/mednet/scripts/evaluate.py
+++ b/src/mednet/scripts/evaluate.py
@@ -114,7 +114,7 @@ def evaluate(
             raise click.BadParameter(
                 f"""The value of --threshold=`{threshold}` does not match one
                 of the database split names ({', '.join(predict_data.keys())})
-                or can be converted to float. Check your input."""
+                or can not be converted to a float. Check your input."""
             )
 
     results: dict[
diff --git a/src/mednet/scripts/saliency/completeness.py b/src/mednet/scripts/saliency/completeness.py
index 41b877d4decb63febb77fc2b20b8965887a4bb89..16d9c8df0e722c3fe54bbdc762cb7e0daa4ee737 100644
--- a/src/mednet/scripts/saliency/completeness.py
+++ b/src/mednet/scripts/saliency/completeness.py
@@ -21,7 +21,7 @@ logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
     cls=ConfigCommand,
     epilog="""Examples:
 
-1. Calculates the ROAD scores for an existing dataset configuration and stores them in .csv files:
+1. Calculates the ROAD scores for an existing dataset configuration and stores them in .json files:
 
    .. code:: sh