From 871b2ddc6faf4e53205be9cdbe73c3131c8185ef Mon Sep 17 00:00:00 2001
From: dcarron <daniel.carron@idiap.ch>
Date: Tue, 23 Jan 2024 16:56:57 +0100
Subject: [PATCH] [doc] Fix typos and clarify docstrings

---
 src/mednet/engine/evaluator.py                 | 2 +-
 src/mednet/engine/saliency/evaluator.py        | 8 ++++----
 src/mednet/engine/saliency/interpretability.py | 6 +++---
 src/mednet/scripts/evaluate.py                 | 2 +-
 src/mednet/scripts/saliency/completeness.py    | 2 +-
 5 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/src/mednet/engine/evaluator.py b/src/mednet/engine/evaluator.py
index 829b9a6b..1add3edb 100644
--- a/src/mednet/engine/evaluator.py
+++ b/src/mednet/engine/evaluator.py
@@ -202,7 +202,7 @@ def run_binary(
         A list of predictions to consider for measurement
     threshold_a_priori
         A threshold to use, evaluated *a priori*, if must report single values.
-        If this value is not provided, a *a posteriori* threshold is calculated
+        If this value is not provided, an *a posteriori* threshold is calculated
         on the input scores.  This is a biased estimator.
 
 
diff --git a/src/mednet/engine/saliency/evaluator.py b/src/mednet/engine/saliency/evaluator.py
index f8a9c04e..aae1acca 100644
--- a/src/mednet/engine/saliency/evaluator.py
+++ b/src/mednet/engine/saliency/evaluator.py
@@ -16,9 +16,10 @@ def _reconcile_metrics(
     completeness: list,
     interpretability: list,
 ) -> list[tuple[str, int, float, float, float]]:
-    """Summarizes samples into a new table containing most important scores.
+    """Summarizes samples into a new table containing the most important
+    scores.
 
-    It returns a list containing a table with completeness and road scorse per
+    It returns a list containing a table with completeness and ROAD scores per
     sample, for the selected dataset.  Only samples for which a completness and
     interpretability scores are availble are returned in the reconciled list.
 
@@ -198,8 +199,7 @@ def _extract_statistics(
     name
         The name of the variable being analysed
     index
-        Which of the indexes on the tuples containing in ``data`` that should
-        be extracted.
+        The index of the tuple contained in ``data`` that should be extracted.
     dataset
         The name of the dataset being analysed
     xlim
diff --git a/src/mednet/engine/saliency/interpretability.py b/src/mednet/engine/saliency/interpretability.py
index 89873d0e..95e184f4 100644
--- a/src/mednet/engine/saliency/interpretability.py
+++ b/src/mednet/engine/saliency/interpretability.py
@@ -382,8 +382,8 @@ def run(
     target_label: int,
     datamodule: lightning.pytorch.LightningDataModule,
 ) -> dict[str, list[typing.Any]]:
-    """Applies visualization techniques on input CXR, outputs images with
-    overlaid heatmaps and csv files with measurements.
+    """Computes the proportional energy and average saliency focus for a given
+    target label in a datamodule.
 
     Parameters
     ---------
@@ -399,7 +399,7 @@ def run(
 
     Returns
     -------
-        A dictionary where keys are dataset names in the provide datamodule,
+        A dictionary where keys are dataset names in the provided datamodule,
         and values are lists containing sample information alongside metrics
         calculated:
 
diff --git a/src/mednet/scripts/evaluate.py b/src/mednet/scripts/evaluate.py
index 2a5a85cb..ec8365b9 100644
--- a/src/mednet/scripts/evaluate.py
+++ b/src/mednet/scripts/evaluate.py
@@ -114,7 +114,7 @@ def evaluate(
             raise click.BadParameter(
                 f"""The value of --threshold=`{threshold}` does not match one
                 of the database split names ({', '.join(predict_data.keys())})
-                or can be converted to float. Check your input."""
+                or can not be converted to a float. Check your input."""
             )
 
     results: dict[
diff --git a/src/mednet/scripts/saliency/completeness.py b/src/mednet/scripts/saliency/completeness.py
index 41b877d4..16d9c8df 100644
--- a/src/mednet/scripts/saliency/completeness.py
+++ b/src/mednet/scripts/saliency/completeness.py
@@ -21,7 +21,7 @@ logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
     cls=ConfigCommand,
     epilog="""Examples:
 
-1. Calculates the ROAD scores for an existing dataset configuration and stores them in .csv files:
+1. Calculates the ROAD scores for an existing dataset configuration and stores them in .json files:
 
    .. code:: sh
 
-- 
GitLab