From 359b87034b7ea18ae53fc76384e12bf1e17d2c00 Mon Sep 17 00:00:00 2001
From: Tiago Freitas Pereira <tiagofrepereira@gmail.com>
Date: Fri, 6 Nov 2020 11:47:23 +0100
Subject: [PATCH] [sphinx] Cleaning up documentation

---
 doc/conf.py                |  4 +-
 doc/guide.rst              | 91 --------------------------------------
 doc/index.rst              |  4 --
 doc/nitpick-exceptions.txt |  3 ++
 doc/py_api.rst             |  4 --
 5 files changed, 6 insertions(+), 100 deletions(-)
 create mode 100644 doc/nitpick-exceptions.txt

diff --git a/doc/conf.py b/doc/conf.py
index 360615a..5d32da6 100755
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -234,7 +234,9 @@ from bob.extension.utils import link_documentation, load_requirements
 sphinx_requirements = "extra-intersphinx.txt"
 if os.path.exists(sphinx_requirements):
     intersphinx_mapping = link_documentation(
-        additional_packages=['python', 'numpy'] + load_requirements(sphinx_requirements))
+        additional_packages=['python', 'numpy'] + \
+        load_requirements(sphinx_requirements)
+    )
 else:
     intersphinx_mapping = link_documentation()
 
diff --git a/doc/guide.rst b/doc/guide.rst
index 2ba0ab3..1ce2e6d 100644
--- a/doc/guide.rst
+++ b/doc/guide.rst
@@ -695,97 +695,6 @@ computed, which is defined in more formal way by:
   shutil.rmtree(temp_dir)
 
 
-Score Normalization
--------------------
-
-Score normalization aims to compensate statistical variations in output scores
-due to changes in the conditions across different enrollment and probe samples.
-This is achieved by scaling distributions of system output scores to better
-facilitate the application of a single, global threshold for authentication.
-
-Bob has implemented 3 different strategies to normalize scores and these
-strategies are presented in the next subsections.
-
-Z-Norm
-======
-.. _znorm:
-
-Given a score :math:`s_i`, Z-Norm [Auckenthaler2000]_ and [Mariethoz2005]_
-(zero-normalization) scales this value by the mean (:math:`\mu`) and standard
-deviation (:math:`\sigma`) of an impostor score distribution. This score
-distribution can be computed before hand and it is defined as the following.
-
-.. math::
-
-   zs_i = \frac{s_i - \mu}{\sigma}
-
-
-This scoring technique is implemented in :py:func:`bob.learn.em.znorm`. Follow
-bellow an example of score normalization using :py:func:`bob.learn.em.znorm`.
-
-.. plot:: plot/plot_Znorm.py
-   :include-source: True
-
-.. note::
-
-   Observe how the scores were scaled in the plot above.
-
-
-T-Norm
-======
-.. _tnorm:
-
-T-norm [Auckenthaler2000]_ and [Mariethoz2005]_ (Test-normalization) operates
-in a probe-centric manner. If in the Z-Norm :math:`\mu` and :math:`\sigma` are
-estimated using an impostor set of models and its scores, the t-norm computes
-these statistics using the current probe sample against at set of models in a
-co-hort :math:`\Theta_{c}`. A co-hort can be any semantic organization that is
-sensible to your recognition task, such as sex (male and females), ethnicity,
-age, etc and is defined as the following.
-
-.. math::
-
-   zs_i = \frac{s_i - \mu}{\sigma}
-
-where, :math:`s_i` is :math:`P(x_i | \Theta)` (the score given the claimed
-model), :math:`\mu = \frac{ \sum\limits_{i=0}^{N} P(x_i | \Theta_{c}) }{N}`
-(:math:`\Theta_{c}` are the models of one co-hort) and :math:`\sigma` is the
-standard deviation computed using the same criteria used to compute
-:math:`\mu`.
-
-
-This scoring technique is implemented in :py:func:`bob.learn.em.tnorm`. Follow
-bellow an example of score normalization using :py:func:`bob.learn.em.tnorm`.
-
-.. plot:: plot/plot_Tnorm.py
-   :include-source: True
-
-
-.. note::
-
-   T-norm introduces extra computation during scoring, as the probe samples
-   need to be compared to each cohort model in order to have :math:`\mu` and
-   :math:`\sigma`.
-
-
-ZT-Norm
-=======
-.. _ztnorm:
-
-ZT-Norm [Auckenthaler2000]_ and [Mariethoz2005]_ consists in the application of
-:ref:`Z-Norm <znorm>` followed by a :ref:`T-Norm <tnorm>` and it is implemented
-in :py:func:`bob.learn.em.ztnorm`.
-
-Follow bellow an example of score normalization using
-:py:func:`bob.learn.em.ztnorm`.
-
-.. plot:: plot/plot_ZTnorm.py
-   :include-source: True
-
-.. note::
-
-   Observe how the scores were scaled in the plot above.
-
 
 .. Place here your external references
 .. include:: links.rst
diff --git a/doc/index.rst b/doc/index.rst
index e465e09..ac15c60 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -49,10 +49,6 @@ References
 
 .. [Glembek2009] Glembek, Ondrej, et al. "Comparison of scoring methods used in speaker recognition with joint factor analysis." Acoustics, Speech and Signal Processing, 2009. ICASSP 2009. IEEE International Conference on. IEEE, 2009.
 
-.. [Auckenthaler2000] Auckenthaler, Roland, Michael Carey, and Harvey Lloyd-Thomas. "Score normalization for text-independent speaker verification systems." Digital Signal Processing 10.1 (2000): 42-54.
-
-.. [Mariethoz2005] Mariethoz, Johnny, and Samy Bengio. "A unified framework for score normalization techniques applied to text-independent speaker verification." IEEE signal processing letters 12.7 (2005): 532-535.
-
 
 Indices and tables
 ------------------
diff --git a/doc/nitpick-exceptions.txt b/doc/nitpick-exceptions.txt
new file mode 100644
index 0000000..c8f6698
--- /dev/null
+++ b/doc/nitpick-exceptions.txt
@@ -0,0 +1,3 @@
+py:class bob.learn.em.GMMStats.n
+py:class bob.learn.em.GMMStats.sum_px
+py:class bob.learn.em.GMMStats.sum_pxx
\ No newline at end of file
diff --git a/doc/py_api.rst b/doc/py_api.rst
index 0209c75..ba31cb5 100644
--- a/doc/py_api.rst
+++ b/doc/py_api.rst
@@ -49,12 +49,8 @@ Functions
 .. autosummary::
 
   bob.learn.em.linear_scoring
-  bob.learn.em.tnorm
   bob.learn.em.train
   bob.learn.em.train_jfa
-  bob.learn.em.znorm
-  bob.learn.em.ztnorm
-  bob.learn.em.ztnorm_same_value
 
 Detailed Information
 --------------------
-- 
GitLab