From e5415b70478cb02bd0c66c43ca761a0a3c17dbf1 Mon Sep 17 00:00:00 2001
From: Amir MOHAMMADI <amir.mohammadi@idiap.ch>
Date: Wed, 25 Nov 2020 18:34:39 +0100
Subject: [PATCH] [docs] make sure docs are green

---
 bob/pad/face/database/database.py      |   7 +-
 bob/pad/face/database/replay_mobile.py |  23 +---
 doc/api.rst                            |   5 -
 doc/baselines.rst                      |   2 +-
 doc/index.rst                          |   2 -
 doc/mc_autoencoder_pad.rst             | 176 -------------------------
 doc/other_pad_algorithms.rst           |   2 +-
 doc/pulse.rst                          |  68 ----------
 doc/references.rst                     |  12 +-
 doc/resources.rst                      |  16 ---
 10 files changed, 15 insertions(+), 298 deletions(-)
 delete mode 100644 doc/mc_autoencoder_pad.rst
 delete mode 100644 doc/pulse.rst

diff --git a/bob/pad/face/database/database.py b/bob/pad/face/database/database.py
index 54b7b4ef..b26f0201 100644
--- a/bob/pad/face/database/database.py
+++ b/bob/pad/face/database/database.py
@@ -44,14 +44,9 @@ class VideoPadFile(PadFile):
     ):
         """Loads the video file and returns in a `bob.bio.video.FrameContainer`.
 
-        Parameters
-        ----------
-        frame_selector : :any:`bob.bio.video.FrameSelector`, optional
-            Which frames to select.
-
         Returns
         -------
-        :any:`bob.bio.video.FrameContainer`
+        :any:`bob.bio.video.VideoAsArray`
             The loaded frames inside a frame container.
         """
         path = self.make_path(self.original_directory, self.original_extension)
diff --git a/bob/pad/face/database/replay_mobile.py b/bob/pad/face/database/replay_mobile.py
index 67f9331f..55ae293a 100644
--- a/bob/pad/face/database/replay_mobile.py
+++ b/bob/pad/face/database/replay_mobile.py
@@ -54,28 +54,17 @@ class ReplayMobilePadFile(VideoPadFile):
         """
         Overridden version of the load method defined in the ``VideoPadFile``.
 
-        Parameters
-        ----------
-        directory : str
-            String containing the path to the Replay-Mobile database.
-
-        extension : str
-            Extension of the video files in the Replay-Mobile database.
-
-        frame_selector : :any:`bob.bio.video.FrameSelector`
-            The frame selector to use.
-
         Returns
         -------
-        video_data : :any:`bob.bio.video.FrameContainer`
-            Video data stored in the FrameContainer, see
-            ``bob.bio.video.utils.FrameContainer`` for further details.
+        video_data : :any:`bob.bio.video.VideoAsArray`
+            Video data.
         """
-        directory = directory or self.original_directory
+        # TODO(amir): Handle loading with VideoAsArray and with a transform as
+        # some video files need to be flipped.
         video_data_array = self.f.load(
-            directory=directory, extension=extension)
+            directory=self.original_directory, extension=self.original_extension)
 
-        return frame_selector(video_data_array)
+        return video_data_array
 
     @property
     def annotations(self):
diff --git a/doc/api.rst b/doc/api.rst
index cdd79492..f5cb39a2 100644
--- a/doc/api.rst
+++ b/doc/api.rst
@@ -49,11 +49,6 @@ Feature Extractors
 .. automodule:: bob.pad.face.extractor
 
 
-Matching Algorithms
-------------------------------
-
-.. automodule:: bob.pad.base.algorithm
-
 
 Utilities
 ---------
diff --git a/doc/baselines.rst b/doc/baselines.rst
index c75a8573..cc32faaf 100644
--- a/doc/baselines.rst
+++ b/doc/baselines.rst
@@ -40,7 +40,7 @@ Usually it is a good idea to have at least verbose level 2 (i.e., calling
 
 
 Database setups and baselines are encoded using
-:ref:`bob.bio.base.configuration-files`, all stored inside the package root, in
+``bob.bio.base.configuration-files``, all stored inside the package root, in
 the directory ``bob/pad/face/config``. Documentation for each resource
 is available on the section :ref:`bob.pad.face.resources`.
 
diff --git a/doc/index.rst b/doc/index.rst
index 20747b6c..c9355ac8 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -22,8 +22,6 @@ Users Guide
    installation
    baselines
    other_pad_algorithms
-   pulse
-   mc_autoencoder_pad
    references
    resources
    api
diff --git a/doc/mc_autoencoder_pad.rst b/doc/mc_autoencoder_pad.rst
deleted file mode 100644
index 4c40ed68..00000000
--- a/doc/mc_autoencoder_pad.rst
+++ /dev/null
@@ -1,176 +0,0 @@
-
-
-.. _bob.pad.face.mc_autoencoder_pad:
-
-
-=============================================
- Multi-channel face PAD using autoencoders
-=============================================
-
-This section explains how to run a complete face PAD experiment using multi-channel autoencoder-based face PAD system, as well as a training work-flow.
-
-The system discussed in this section is introduced the following publication [NGM19]_. It is **strongly recommended** to check the publication for better understanding
-of the described work-flow.
-
-.. warning::
-
-   Algorithms introduced in this section might be in the process of publishing. Therefore, it is not
-   allowed to publish results introduced in this section without permission of the owner of the package.
-   If you are planning to use the results from this section, please contact the owner of the package first.
-   Please check the ``setup.py`` for contact information.
-
-
-Running face PAD Experiments
-------------------------------
-
-Please refer to :ref:`bob.pad.face.baselines` section of current documentation for more details on how to run the face PAD experiments and setup the databases.
-
-
-Training multi-channel autoencoder-based face PAD system.
-----------------------------------------------------------------
-
-As introduced in the paper [NGM19]_, the training of the system is composed of three main steps, which are summarize in the following table:
-
-+----------------------+----------------------+---------------------+
-| Train step           | Training data        | DB, classes used    |
-+----------------------+----------------------+---------------------+
-| Train N AEs          | RGB face regions     | CelebA, BF          |
-+----------------------+----------------------+---------------------+
-| Fine-tune N AEs      | MC face regions      | WMCA, BF            |
-+----------------------+----------------------+---------------------+
-| Train an MLP         | MC latent encodings  | WMCA, BF and PA     |
-+----------------------+----------------------+---------------------+
-
-In the above table, **BF** and **PA** stands for samples from **bona-fide** and **presentation attack** classes.
-
-As one can conclude from the table, CelebA and WMCA databases must be installed before the training can take place.
-See :ref:`bob.pad.face.baselines` for databases installation details.
-
-
-1. Train N AEs on RGB data from CelebA
-===========================================
-
-In [NGM19]_ N autoencoders are trained, one for each facial region, here for explanatory purposes, a system containing **one** autoencoder is observed, thus N=1.
-This autoencoder is first pre-trained using RGB images of entire face, which are cropped from CelebA database.
-
-To prepare the training data one can use the following command:
-
-
-.. code-block:: sh
-
-    ./bin/spoof.py \                                        # spoof.py is used to run the preprocessor
-    celeb-a \                                               # run for CelebA database
-    lbp-svm \                                               # required by spoof.py, but unused
-    --skip-extractor-training --skip-extraction --skip-projector-training --skip-projection --skip-score-computation --allow-missing-files \    # execute only preprocessing step
-    --grid idiap \                                          # use grid, only for Idiap users, remove otherwise
-    --groups train \                                        # preprocess only training set of CelebA
-    --preprocessor rgb-face-detect-check-quality-128x128 \  # preprocessor entry point
-    --sub-directory <PATH_TO_STORE_THE_RESULTS>             # define your path here
-
-Running above command, the RGB facial images are aligned and cropped from the training set of the CelebA database. Additionally, a quality assessment is applied to each facial image.
-More specifically, an eye detection algorithm is applied to face images, assuring the deviation of eye coordinates from expected positions is not significant.
-See [NGM19]_ for more details.
-
-Once above script is completed, the data suitable for autoencoder training is located in the folder ``<PATH_TO_STORE_THE_RESULTS>/preprocessed/``. Now the autoencoder can be trained.
-The training procedure is explained in the **Convolutional autoencoder** section in the documentation of the ``bob.learn.pytorch`` package.
-
-.. note::
-
-  Functionality of ``bob.pad.face`` is used to compute the training data. Install and follow the documentation of ``bob.learn.pytorch`` to train the autoencoders. This functional decoupling helps to avoid the dependency of
-  ``bob.pad.face`` from **PyTorch**.
-
-
-.. include:: links.rst
-
-
-2. Fine-tune N AEs on multi-channel data from WMCA (legacy name BATL) database
-=================================================================================
-
-Following the training procedure of [NGM19]_, the autoencoders are next fine-tuned on the multi-channel (**MC**) data from WMCA.
-In this example, MC training data is a stack of gray-scale, NIR, and Depth (BW-NIR-D) facial images.
-
-To prepare the training data one can use the following command:
-
-
-.. code-block:: sh
-
-    ./bin/spoof.py \                                                    # spoof.py is used to run the preprocessor
-    batl-db-rgb-ir-d-grandtest \                                        # WMCA database instance allowing to load RGB-NIR-D channels
-    lbp-svm \                                                           # required by spoof.py, but unused
-    --skip-extractor-training --skip-extraction --skip-projector-training --skip-projection --skip-score-computation --allow-missing-files \    # execute only preprocessing step
-    --grid idiap \                                                      # use grid, only for Idiap users, remove otherwise
-    --preprocessor video-face-crop-align-bw-ir-d-channels-3x128x128 \   # preprocessor entry point
-    --sub-directory <PATH_TO_STORE_THE_RESULTS>                         # define your path here
-
-Once above script is completed, the MC data suitable for autoencoder fine-tuning is located in the folder ``<PATH_TO_STORE_THE_RESULTS>/preprocessed/``.
-Now the autoencoder can be fine-tuned. Again, the fine-tuning procedure is explained in the **Convolutional autoencoder** section in the documentation of the ``bob.learn.pytorch`` package.
-
-
-3. Train an MLP using multi-channel autoencoder latent embeddings from WMCA
-=================================================================================
-
-Once auto-encoders are pre-trained and fine-tuned, the latent embeddings can be computed passing the multi-channel (MC) BW-NIR-D images from the WMCA database through the encoder, see [NGM19]_ for more details. These latent embeddings (feature vectors) are next used to train an MLP classifying input MC samples into bona-fide or attack classes.
-
-The first step to be done is the registration of an extractor computing latent embeddings. To do so, a file defining an instance of **MultiNetPatchExtractor** class must be created:
-
-.. code-block:: sh
-
-    from bob.ip.pytorch_extractor import MultiNetPatchExtractor
-    from bob.bio.video.utils import FrameSelector
-    from bob.bio.video.extractor import Wrapper
-    from torchvision import transforms
-    from bob.learn.pytorch.architectures import ConvAutoencoder
-
-    # transform to be applied to input patches:
-    TRANSFORM = transforms.Compose([transforms.ToTensor(),
-                                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
-                                    ])
-
-    # use latent embeddings in the feature extractor:
-    NETWORK_AE = ConvAutoencoder(return_latent_embedding = True)
-
-    # use specific/unique model for each patch. Models pre-trained on CelebA and fine-tuned on BATL:
-    MODEL_FILE = ["SUBSTITUTE_THE_PATH_TO_PRETRAINED_AE_MODEL"]
-
-    PATCHES_NUM = [0] # patches to be used in the feature extraction process
-    PATCH_RESHAPE_PARAMETERS = [3, 128, 128]  # reshape vectorized patches to this dimensions before passing to the Network
-
-    _image_extractor = MultiNetPatchExtractor(transform = TRANSFORM,
-                                              network = NETWORK_AE,
-                                              model_file = MODEL_FILE,
-                                              patches_num = PATCHES_NUM,
-                                              patch_reshape_parameters = PATCH_RESHAPE_PARAMETERS,
-                                              color_input_flag = True,
-                                              urls = None,
-                                              archive_extension = '.tar.gz')
-
-    extractor = Wrapper(extractor = _image_extractor,
-                        frame_selector = FrameSelector(selection_style = "all"))
-
-Suppose, above configuration file is located in ``bob.pad.face`` package in the following location: ``bob/pad/face/config/extractor/multi_net_patch_extractor.py``. Then it can be registered in ``setup.py`` by adding the following string to the list of registered extractors ``bob.pad.extractor``:
-
-.. code-block:: sh
-
-    'multi-net-patch-extractor = bob.pad.face.config.extractor.multi_net_patch_extractor:extractor',
-
-
-Once an extractor is registered, to compute the latent embeddings (encoder output) the following command can be used:
-
-.. code-block:: sh
-
-    ./bin/spoof.py \                                                        # spoof.py is used to extract embeddings
-    batl-db-rgb-ir-d-grandtest \                                            # WMCA database instance allowing to load RGB-NIR-D channels
-    lbp-svm \                                                               # required by spoof.py, but unused
-    --preprocessor video-face-crop-align-bw-ir-d-channels-3x128x128-vect \  # entry point defining preprocessor
-    --extractor multi-net-patch-extractor \                                 # entry point defining extractor
-    --skip-projector-training --skip-projection --skip-score-computation --allow-missing-files \  # execute preprocessing and extraction only
-    --grid idiap \                                                          # use grid, for Idiap users only, remove otherwise
-    --sub-directory <PATH_TO_STORE_THE_RESULTS>                             # define your path here
-
-.. note::
-
-  Make sure the ``bob.learn.pytorch`` and ``bob.ip.pytorch_extractor`` packages are installed before running above command.
-
-Once above script is completed, the MC latent encodings to be used for MLP training are located in the folder ``<PATH_TO_STORE_THE_RESULTS>/extracted/``.
-Again, the training procedure is explained in the **MLP** section in the documentation of the ``bob.learn.pytorch`` package.
-
diff --git a/doc/other_pad_algorithms.rst b/doc/other_pad_algorithms.rst
index 39e6b918..a32ca601 100644
--- a/doc/other_pad_algorithms.rst
+++ b/doc/other_pad_algorithms.rst
@@ -47,7 +47,7 @@ Usually it is a good idea to have at least verbose level 2 (i.e., calling
 
 
 Database setups and face PAD algorithms are encoded using
-:ref:`bob.bio.base.configuration-files`, all stored inside the package root, in
+``bob.bio.base.configuration-files``, all stored inside the package root, in
 the directory ``bob/pad/face/config``. Documentation for each resource
 is available on the section :ref:`bob.pad.face.resources`.
 
diff --git a/doc/pulse.rst b/doc/pulse.rst
deleted file mode 100644
index 1e9ddbe9..00000000
--- a/doc/pulse.rst
+++ /dev/null
@@ -1,68 +0,0 @@
-
-.. _bob.pad.face.pulse:
-
-===============
-Pulse-based PAD
-===============
-
-In this section, we briefly describe our work made for face
-presentation attack detection using the blood volume pulse,
-inferred from remote photoplesthymograpy.
-
-The basic idea here is to retrieve the pulse signals from
-face video sequences, to derive features from their frequency
-spectrum and then to learn a classifier to discriminate
-between *bonafide* attempts from presentation attacks.
-
-For this purpose, we describe both :py:class:`bob.bio.base.preprocessor.Preprocessor` and
-:py:class:`bob.bio.base.extractor.Extractor` specifically dedicated to this task.
-
-Preprocessors: Pulse Extraction
--------------------------------
-
-Preprocessors basically extract pulse signals from face video 
-sequences. They heavily rely on what has been done in `bob.rppg.base`
-so you may want to have a look at `its documentation <https://www.idiap.ch/software/bob/docs/bob/bob.rppg.base/master/index.html>`_. 
-
-In this package, 4 preprocessors have been implemented:
-
-  1. :py:class:`bob.pad.face.preprocessor.LiPulseExtraction` described in [Li_ICPR_2016]_.
-  
-  2. :py:class:`bob.pad.face.preprocessor.Chrom` described in [CHROM]_.
-
-  3. :py:class:`bob.pad.face.preprocessor.SSR` described in [SSR]_.
-
-  4. :py:class:`bob.pad.face.preprocessor.PPGSecure` described in [NOWARA]_.
-
-
-Extractors: Features from Pulses
---------------------------------
-
-Extractors compute and retrieve features from the pulse signal. All
-implemented extractors act on the frequency spectrum of the pulse signal.
-
-In this package, 3 extractors have been implemented:
-
-  1. :py:class:`bob.pad.face.extractor.LiSpectralFeatures` described in [Li_ICPR_2016]_.
-  
-  2. :py:class:`bob.pad.face.extractor.PPGSecure` described in [NOWARA]_.
-
-  3. :py:class:`bob.pad.face.extractor.LTSS` described in [LTSS]_.
-
-
-
-References
-----------
-
-
-.. [Li_ICPR_2016] *X. Li, J, Komulainen, G. Zhao, P-C Yuen and M. Pietikäinen*
-  **Generalized face anti-spoofing by detecting pulse from face videos**,
-  Intl Conf on Pattern Recognition (ICPR), 2016
-
-.. [CHROM] *de Haan, G. & Jeanne, V*. **Robust Pulse Rate from Chrominance based rPPG**, IEEE Transactions on Biomedical Engineering, 2013. `pdf <http://www.es.ele.tue.nl/~dehaan/pdf/169_ChrominanceBasedPPG.pdf>`__
-
-.. [SSR] *Wang, W., Stuijk, S. and de Haan, G*. **A Novel Algorithm for Remote Photoplesthymograpy: Spatial Subspace Rotation**, IEEE Trans. On Biomedical Engineering, 2015
-
-.. [NOWARA] *E. M. Nowara, A. Sabharwal, A. Veeraraghavan*. **PPGSecure: Biometric Presentation Attack Detection Using Photopletysmograms**, IEEE International Conference on Automatic Face & Gesture Recognition, 2017
-
-.. [LTSS] *H .Muckenhirn, P. Korshunov, M. Magimai-Doss, S Marcel*. **Long-Term Spectral Statistics for Voice Presentation Attack Detection**, IEEE Trans. On Audio, Speech and Language Processing, 2017
diff --git a/doc/references.rst b/doc/references.rst
index 6276842f..6db73d61 100644
--- a/doc/references.rst
+++ b/doc/references.rst
@@ -7,17 +7,17 @@ References
 .. [CAM12]  *I. Chingovska, A. Anjos, and S. Marcel*, **On the effectiveness of local binary patterns in face anti-spoofing**,
             in: Biometrics Special Interest Group (BIOSIG), 2012 BIOSIG - Proceedings of the International Conference of the, 2012, pp. 1-7.
 
-.. [WHJ15]  *Di Wen, Member, IEEE, Hu Han, Member, IEEE and Anil K. Jain, Fellow, IEEE*, **Face Spoof Detection with Image Distortion Analysis**,
-            in: IEEE Transactions on Information Forensics and Security, 2015.
+.. .. [WHJ15]  *Di Wen, Member, IEEE, Hu Han, Member, IEEE and Anil K. Jain, Fellow, IEEE*, **Face Spoof Detection with Image Distortion Analysis**,
+..             in: IEEE Transactions on Information Forensics and Security, 2015.
 
 .. [CBVM16] *A. Costa-Pazo, S. Bhattacharjee, E. Vazquez-Fernandez and S. Marcel*, **The Replay-Mobile Face Presentation-Attack Database**,
             in: Biometrics Special Interest Group (BIOSIG), 2016 BIOSIG - Proceedings of the International Conference of the, 2016, pp. 1-7.
 
-.. [AM11] *A. Anjos and S. Marcel*, **Counter-measures to photo attacks in face recognition: A public database and a baseline**,
-          in: 2011 International Joint Conference on Biometrics (IJCB), Washington, DC, 2011, pp. 1-7.
+.. .. [AM11] *A. Anjos and S. Marcel*, **Counter-measures to photo attacks in face recognition: A public database and a baseline**,
+..           in: 2011 International Joint Conference on Biometrics (IJCB), Washington, DC, 2011, pp. 1-7.
 
 .. [CDSR17] *C. Chen, A. Dantcheva, T. Swearingen, A. Ross*, **Spoofing Faces Using Makeup: An Investigative Study**,
             in: Proc. of 3rd IEEE International Conference on Identity, Security and Behavior Analysis (ISBA), (New Delhi, India), February 2017.
 
-.. [NGM19] *O. Nikisins, A. George, S. Marcel*, **Domain Adaptation in Multi-Channel Autoencoder based Features for Robust Face Anti-Spoofing**,
-            in: Submitted to: 2019 International Conference on Biometrics (ICB), 2019.
+.. .. [NGM19] *O. Nikisins, A. George, S. Marcel*, **Domain Adaptation in Multi-Channel Autoencoder based Features for Robust Face Anti-Spoofing**,
+..             in: Submitted to: 2019 International Conference on Biometrics (ICB), 2019.
diff --git a/doc/resources.rst b/doc/resources.rst
index dd06cc17..766b3cf3 100644
--- a/doc/resources.rst
+++ b/doc/resources.rst
@@ -71,27 +71,13 @@ The configuration files contain at least the following arguments of the ``spoof.
 LBP features of facial region + SVM for REPLAY-ATTACK
 ================================================================================
 
-.. automodule:: bob.pad.face.config.lbp_svm
-   :members:
-
 
 .. _bob.pad.face.resources.face_pad.qm_svm_replayattack:
 
 Image Quality Measures as features of facial region + SVM for REPLAY-ATTACK
 ================================================================================
 
-.. automodule:: bob.pad.face.config.qm_svm
-   :members:
-
-
 
-.. _bob.pad.face.resources.face_pad.qm_lr:
-
-Image Quality Measures as features of facial region + Logistic Regression
-============================================================================================================================
-
-.. automodule:: bob.pad.face.config.qm_lr
-   :members:
 
 
 .. _bob.pad.face.resources.face_pad.qm_one_class_gmm:
@@ -99,6 +85,4 @@ Image Quality Measures as features of facial region + Logistic Regression
 Image Quality Measures as features of facial region + GMM-based one-class classifier (anomaly detector)
 ============================================================================================================================
 
-.. automodule:: bob.pad.face.config.qm_one_class_gmm
-   :members:
 
-- 
GitLab