From 3ebea75ea45f6e47a52dfdd3c6ec54c682b93f1b Mon Sep 17 00:00:00 2001
From: Andre Anjos <andre.dos.anjos@gmail.com>
Date: Wed, 6 May 2020 13:53:17 +0200
Subject: [PATCH] [utils.measure] Fix docs

---
 bob/ip/binseg/utils/measure.py | 55 +++++++++++++++++-----------------
 doc/links.rst                  |  3 ++
 2 files changed, 30 insertions(+), 28 deletions(-)

diff --git a/bob/ip/binseg/utils/measure.py b/bob/ip/binseg/utils/measure.py
index 881ac7c8..35bbd18f 100644
--- a/bob/ip/binseg/utils/measure.py
+++ b/bob/ip/binseg/utils/measure.py
@@ -33,52 +33,51 @@ def base_measures(tp, fp, tn, fn):
     Calculates a bunch of measures from true/false positive and negative counts
 
     This function can return standard machine learning measures from true and
-    false positive counts of positives and negatives.
-
-    For a thorough look into these and alternate names for the returned values,
-    please check Wikipedia's entry on `Precision and Recall`_.
+    false positive counts of positives and negatives.  For a thorough look into
+    these and alternate names for the returned values, please check Wikipedia's
+    entry on `Precision and Recall`_.
 
 
     Parameters
     ----------
 
-    tp : int
-        True positive count, AKA "hit"
+        tp : int
+            True positive count, AKA "hit"
 
-    fp : int
-        False positive count, AKA, "correct rejection"
+        fp : int
+            False positive count, AKA, "correct rejection"
 
-    tn : int
-        True negative count, AKA "false alarm", or "Type I error"
+        tn : int
+            True negative count, AKA "false alarm", or "Type I error"
 
-    fn : int
-        False Negative count, AKA "miss", or "Type II error"
+        fn : int
+            False Negative count, AKA "miss", or "Type II error"
 
 
     Returns
     -------
 
-    precision : float
-        P, AKA positive predictive value (PPV)
-        :math:`\frac{tp}{tp+fp}`
+        precision : float
+            P, AKA positive predictive value (PPV)
+            :math:`\frac{tp}{tp+fp}`
 
-    recall : float
-        R, AKA sensitivity, hit rate, or true positive rate (TPR)
-        :math:`\frac{tp}{p} = \frac{tp}{tp+fn}`
+        recall : float
+            R, AKA sensitivity, hit rate, or true positive rate (TPR)
+            :math:`\frac{tp}{p} = \frac{tp}{tp+fn}`
 
-    specificity : float
-        S, AKA selectivity or true negative rate (TNR).
-        :math:`\frac{tn}{n} = \frac{tn}{tn+fp}`
+        specificity : float
+            S, AKA selectivity or true negative rate (TNR).
+            :math:`\frac{tn}{n} = \frac{tn}{tn+fp}`
 
-    accuracy : float
-        A, :math:`\frac{tp + tn}{p + n} = \frac{tp + tn}{tp + fp + tn + fn}`
+        accuracy : float
+            A, :math:`\frac{tp + tn}{p + n} = \frac{tp + tn}{tp + fp + tn + fn}`
 
-    jaccard : float
-        J, :math:`\frac{tp}{tp+fp+fn}`, see `Jaccard Index`_
+        jaccard : float
+            J, :math:`\frac{tp}{tp+fp+fn}`, see `Jaccard Index`_
 
-    f1_score : float
-        F1, :math:`\frac{2 P R}{P + R} = \frac{2tp}{2tp + fp + fn}`, see
-        `F1-score`_
+        f1_score : float
+            F1, :math:`\frac{2 P R}{P + R} = \frac{2tp}{2tp + fp + fn}`, see
+            `F1-score`_
 
     """
 
diff --git a/doc/links.rst b/doc/links.rst
index ce2c72d6..5d0ffb98 100644
--- a/doc/links.rst
+++ b/doc/links.rst
@@ -9,6 +9,9 @@
 .. _pytorch: https://pytorch.org
 .. _tabulate: https://pypi.org/project/tabulate/
 .. _our paper: https://arxiv.org/abs/1909.03856
+.. _precision and recall: https://en.wikipedia.org/wiki/Precision_and_recall
+.. _f1-score: https://en.wikipedia.org/wiki/F1_score
+.. _jaccard index: https://en.wikipedia.org/wiki/Jaccard_index
 
 .. Raw data websites
 .. _drive: https://www.isi.uu.nl/Research/Databases/DRIVE/
-- 
GitLab