diff --git a/bob/pad/face/extractor/LTSS.py b/bob/pad/face/extractor/LTSS.py
index 3b3555f11a07774066f0d28ee6437cf90d717875..c1077a8888af2d87088d2d900b1ee5731f49decf 100644
--- a/bob/pad/face/extractor/LTSS.py
+++ b/bob/pad/face/extractor/LTSS.py
@@ -15,29 +15,23 @@ class LTSS(Extractor, object):
   
   The features are described in the following article:
   
-    @Article {
-    Author         = {Muckenhirn, Hannah and Korshunov, Pavel and
-                     Magimai-Doss, Mathew and Marcel, Sebastien }
-    Title          = {Long-Term Spectral Statistics for Voice Presentation
-                     Attack Detection},
-    Journal        = {IEEE/ACM Trans. Audio, Speech and Lang. Proc.},
-    Volume         = {25},
-    Number         = {11},
-    Pages          = {2098--2111},
-    year           = 2017
-    }
+  H. Muckenhirn, P. Korshunov, M. Magimai-Doss, and S. Marcel
+  Long-Term Spectral Statistics for Voice Presentation Attack Detection,
+  IEEE/ACM Trans. Audio, Speech and Language Processing. vol 25, n. 11, 2017
 
   Attributes
   ----------
-  framerate: int
+  window_size : :obj:`int`
+    The size of the window where FFT is computed
+  framerate : :obj:`int`
     The sampling frequency of the signal (i.e the framerate ...) 
-  nfft: int
+  nfft : :obj:`int`
     Number of points to compute the FFT
-  debug: bool
+  debug : :obj:`bool`
     Plot stuff
-  concat: bool
+  concat : :obj:`bool`
     Flag if you would like to concatenate features from the 3 color channels
-  time: int
+  time : :obj:`int`
     The length of the signal to consider (in seconds)
   
   """
@@ -46,19 +40,19 @@ class LTSS(Extractor, object):
 
     Parameters
     ----------
-    window_size: int
+    window_size : :obj:`int`
       The size of the window where FFT is computed
-    framerate: int
+    framerate : :obj:`int`
       The sampling frequency of the signal (i.e the framerate ...) 
-    nfft: int
+    nfft : :obj:`int`
       Number of points to compute the FFT
-    concat: bool
-      Flag if you would like to concatenate features from the 3 color channels
-    debug: bool
+    debug : :obj:`bool`
       Plot stuff
-    time: int
+    concat : :obj:`bool`
+      Flag if you would like to concatenate features from the 3 color channels
+    time : :obj:`int`
       The length of the signal to consider (in seconds)
-
+    
     """
     super(LTSS, self).__init__()
     self.framerate = framerate
@@ -73,12 +67,12 @@ class LTSS(Extractor, object):
 
     Parameters
     ----------
-    signal: numpy.ndarray
+    signal : :py:class:`numpy.ndarray`
       The signal
 
     Returns
     -------
-    ltss: numpy.ndarray
+    :py:class:`numpy.ndarray`
       The spectral statistics of the signal.
 
     """
diff --git a/bob/pad/face/extractor/LiFeatures.py b/bob/pad/face/extractor/LiFeatures.py
index 9fbd4c48d196c6675eaf9c1471f0ab2eef772896..0e25a0b04bf3cc78ba3bb34ab58b25c7a88cec8c 100644
--- a/bob/pad/face/extractor/LiFeatures.py
+++ b/bob/pad/face/extractor/LiFeatures.py
@@ -13,43 +13,34 @@ class LiFeatures(Extractor, object):
   """Compute features from pulse signals in the three color channels.
 
   The features are described in the following article:
-  
-    @InProceedings{li-icpr-2016,
-    Author         = {Li, X. and Komulainen, J. and Zhao, G. and Yuen, P-C.
-                     and Pietik\"ainen, M.},
-    Title          = {Generalized {F}ace {A}nti-spoofing by {D}etecting
-                     {P}ulse {F}rom {F}ace {V}ideos},
-    BookTitle      = {Intl {C}onf. on {P}attern {R}ecognition ({ICPR})},
-    Pages          = {4244-4249},
-    year           = 2016
-    }
-
+ 
+  X. Li, J. Komulainen, G. Zhao, P-C Yuen and M. Pietikainen,
+  Generalized Face Anti-spoofing by Detecting Pulse From Face Videos
+  Intl Conf. on Pattern Recognition (ICPR), 2016.
 
   Attributes
   ----------
-  framerate: int
+  framerate : :obj:`int`
     The sampling frequency of the signal (i.e the framerate ...) 
-  nfft: int
+  nfft : :obj:`int`
     Number of points to compute the FFT
-  debug: bool
+  debug : :obj:`bool`
     Plot stuff
   
   """
-
   def __init__(self, framerate=25, nfft=512, debug=False, **kwargs):
     """Init function
 
     Parameters
     ----------
-    framerate: int
+    framerate : :obj:`int`
       The sampling frequency of the signal (i.e the framerate ...) 
-    nfft: int
+    nfft : :obj:`int`
       Number of points to compute the FFT
-    debug: bool
+    debug : :obj:`bool`
       Plot stuff
-    
+  
     """
-
     super(LiFeatures, self).__init__()
     self.framerate = framerate
     self.nfft = nfft
@@ -61,13 +52,13 @@ class LiFeatures(Extractor, object):
 
     Parameters
     ----------
-    signal: numpy.ndarray 
+    signal : :py:class:`numpy.ndarray` 
       The signal
 
     Returns
-    -------
-    feature: numpy.ndarray 
-     the computed features 
+    ------- 
+    :py:class:`numpy.ndarray` 
+      the computed features 
 
     """
     # sanity check
diff --git a/bob/pad/face/extractor/PPGSecure.py b/bob/pad/face/extractor/PPGSecure.py
index bf4a8ddd0114014dd362a6af67d8b6612554a378..1865ec69967f4fed4cff73289d153f3258317d57 100644
--- a/bob/pad/face/extractor/PPGSecure.py
+++ b/bob/pad/face/extractor/PPGSecure.py
@@ -15,27 +15,17 @@ class PPGSecure(Extractor, object):
   The feature are extracted according to what is described in 
   the following article:
 
-    @InProceedings{nowara-afgr-2017,
-      Author         = {E. M. Nowara and A. Sabharwal and A. Veeraraghavan},
-      Title          = {P{PGS}ecure: {B}iometric {P}resentation {A}ttack
-                       {D}etection {U}sing {P}hotopletysmograms},
-      BookTitle      = {I{EEE} {I}ntl {C}onf on {A}utomatic {F}ace and
-                       {G}esture {R}ecognition ({AFGR})},
-      Volume         = {},
-      Number         = {},
-      Pages          = {56-62},
-      issn           = {},
-      seq-number     = {69},
-      year           = 2017
-    }
+  E.M Nowara, A. Sabharwal and A. Veeraraghavan,
+  "PPGSecure: Biometric Presentation Attack Detection using Photoplethysmograms",
+  IEEE Intl Conf. on Automatic Face and Gesture Recognition, 2017.
 
   Attributes
   ----------
-  framerate: int
+  framerate : :obj:`int`
     The sampling frequency of the signal (i.e the framerate ...) 
-  nfft: int
+  nfft : :obj:`int`
     Number of points to compute the FFT
-  debug: bool
+  debug : :obj:`bool`
     Plot stuff
   
   """
@@ -44,11 +34,11 @@ class PPGSecure(Extractor, object):
 
     Parameters
     ----------
-    framerate: int
+    framerate : :obj:`int`
       The sampling frequency of the signal (i.e the framerate ...) 
-    nfft: int
+    nfft : :obj:`int`
       Number of points to compute the FFT
-    debug: bool
+    debug : :obj:`bool`
       Plot stuff
     
     """
@@ -63,12 +53,12 @@ class PPGSecure(Extractor, object):
 
     Parameters
     ----------
-    signal: numpy.ndarray 
+    signal : :py:class:`numpy.ndarray` 
       The signal
 
     Returns
     -------
-    fft: numpy.ndarray 
+    :py:class:`numpy.ndarray` 
      the computed FFT features 
     
     """
diff --git a/bob/pad/face/preprocessor/Chrom.py b/bob/pad/face/preprocessor/Chrom.py
index ec210f96cf47f5f2a41fb98ae0e8b814fc1fedc3..987492ea9c1d5423c4403aa7e6bc06f48def87be 100644
--- a/bob/pad/face/preprocessor/Chrom.py
+++ b/bob/pad/face/preprocessor/Chrom.py
@@ -25,26 +25,26 @@ class Chrom(Preprocessor, object):
   
   The pulse is extracted according to the CHROM algorithm.
 
-  See the documentation of :py:mod:`bob.rppg.base`
+  See the documentation of `bob.rppg.base`
   
   Attributes
   ----------
-  skin_threshold: float
+  skin_threshold : :obj:`float`
     The threshold for skin color probability
-  skin_init: bool
+  skin_init : :obj:`bool`
     If you want to re-initailize the skin color distribution at each frame
-  framerate: int
+  framerate : :obj:`int`
     The framerate of the video sequence.
-  bp_order: int
+  bp_order : :obj:`int`
     The order of the bandpass filter
-  window_size: int
+  window_size : :obj:`int`
     The size of the window in the overlap-add procedure.
-  motion: float          
+  motion : :obj:`float`
     The percentage of frames you want to select where the 
     signal is "stable". 0 mean all the sequence.
-  debug: boolean          
+  debug : :obj:`bool`          
     Plot some stuff 
-  skin_filter: :py:class:`bob.ip.skincolorfilter.SkinColorFilter` 
+  skin_filter : :py:class:`bob.ip.skincolorfilter.SkinColorFilter` 
     The skin color filter 
 
   """
@@ -54,24 +54,23 @@ class Chrom(Preprocessor, object):
 
     Parameters
     ----------
-    skin_threshold: float
+    skin_threshold : :obj:`float`
       The threshold for skin color probability
-    skin_init: bool
+    skin_init : :obj:`bool`
       If you want to re-initailize the skin color distribution at each frame
-    framerate: int
+    framerate : :obj:`int`
       The framerate of the video sequence.
-    bp_order: int
+    bp_order : :obj:`int`
       The order of the bandpass filter
-    window_size: int
+    window_size : :obj:`int`
       The size of the window in the overlap-add procedure.
-    motion: float          
+    motion : :obj:`float`
       The percentage of frames you want to select where the 
       signal is "stable". 0 mean all the sequence.
-    debug: boolean          
+    debug : :obj:`bool`          
       Plot some stuff 
     
     """
-
     super(Chrom, self).__init__()
     self.skin_threshold = skin_threshold
     self.skin_init = skin_init
@@ -87,14 +86,14 @@ class Chrom(Preprocessor, object):
 
     Parameters
     ----------
-    frames: :py:class:`bob.bio.video.utils.FrameContainer`
+    frames : :py:class:`bob.bio.video.utils.FrameContainer`
       video data 
-    annotations: :py:class:`dict`
+    annotations : :py:class:`dict`
       the face bounding box, as follows: ``{'topleft': (row, col), 'bottomright': (row, col)}``
 
     Returns
     -------
-    pulse: numpy.ndarray 
+    :obj:`numpy.ndarray` 
       The pulse signal
     
     """
@@ -239,4 +238,3 @@ class Chrom(Preprocessor, object):
       pyplot.show()
 
     return pulse
-
diff --git a/bob/pad/face/preprocessor/Li.py b/bob/pad/face/preprocessor/Li.py
index 143497cc5bc679d0b4b696f3b25c4435d50d3a84..5f54a1bb5e14b7a690262bb5a4751b29028d6518 100644
--- a/bob/pad/face/preprocessor/Li.py
+++ b/bob/pad/face/preprocessor/Li.py
@@ -22,7 +22,7 @@ class Li(Preprocessor):
   
   The pulse is extracted according to Li's CVPR 14 algorithm.
 
-  See the documentation of :py:mod:`bob.rppg.base`
+  See the documentation of `bob.rppg.base`
 
   Note that this is a simplified version of the original 
   pulse extraction algorithms (mask detection in each 
@@ -31,17 +31,17 @@ class Li(Preprocessor):
 
   Attributes
   ----------
-  indent: int
+  indent : :obj:`int`
     Indent (in percent of the face width) to apply to keypoints to get the mask.
-  lamda_: int
+  lamda_ : :obj:`int`
     the lamba value of the detrend filter
-  window: int
+  window : :obj:`int`
     The size of the window of the average filter 
-  framerate: int
+  framerate : :obj:`int`
     The framerate of the video sequence.
-  bp_order: int
+  bp_order : :obj:`int`
     The order of the bandpass filter
-  debug: bool
+  debug : :obj:`bool`
     Plot some stuff 
   
   """
@@ -51,19 +51,18 @@ class Li(Preprocessor):
 
     Parameters
     ----------
-    indent: int
+    indent : :obj:`int`
       Indent (in percent of the face width) to apply to keypoints to get the mask.
-    lamda_: int
+    lamda_ : :obj:`int`
       the lamba value of the detrend filter
-    window: int
+    window : :obj:`int`
       The size of the window of the average filter 
-    framerate: int
+    framerate : :obj:`int`
       The framerate of the video sequence.
-    bp_order: int
+    bp_order : :obj:`int`
       The order of the bandpass filter
-    debug: bool
+    debug : :obj:`bool`
       Plot some stuff 
-    
 
     """
     super(Li, self).__init__(**kwargs)
@@ -86,7 +85,7 @@ class Li(Preprocessor):
 
     Returns
     -------
-    pulse: numpy.ndarray 
+    :obj:`numpy.ndarray` 
       The pulse signal, in each color channel (RGB)  
     
     """
diff --git a/bob/pad/face/preprocessor/PPGSecure.py b/bob/pad/face/preprocessor/PPGSecure.py
index 2a09fa761a64187d4170aedd86d68c31817b0930..36b201d1554368f9911a27f96776d88a067015b0 100644
--- a/bob/pad/face/preprocessor/PPGSecure.py
+++ b/bob/pad/face/preprocessor/PPGSecure.py
@@ -22,41 +22,30 @@ class PPGSecure(Preprocessor):
   The pulse is extracted according to what is described in 
   the following article:
 
-    @InProceedings{nowara-afgr-2017,
-      Author         = {E. M. Nowara and A. Sabharwal and A. Veeraraghavan},
-      Title          = {P{PGS}ecure: {B}iometric {P}resentation {A}ttack
-                       {D}etection {U}sing {P}hotopletysmograms},
-      BookTitle      = {I{EEE} {I}ntl {C}onf on {A}utomatic {F}ace and
-                       {G}esture {R}ecognition ({AFGR})},
-      Volume         = {},
-      Number         = {},
-      Pages          = {56-62},
-      issn           = {},
-      seq-number     = {69},
-      year           = 2017
-    }
+  E.M Nowara, A. Sabharwal and A. Veeraraghavan,
+  "PPGSecure: Biometric Presentation Attack Detection using Photoplethysmograms",
+  IEEE Intl Conf. on Automatic Face and Gesture Recognition, 2017.
 
   Attributes
   ----------
-  framerate: int
+  framerate : :obj:`int`
     The framerate of the video sequence.
-  bp_order: int
+  bp_order : :obj:`int` 
     The order of the bandpass filter
-  debug: bool
+  debug : :obj:`bool` 
     Plot some stuff 
   
   """
- 
   def __init__(self, framerate=25, bp_order=32, debug=False, **kwargs):
     """Init function
 
     Parameters
     ----------
-    framerate: int
+    framerate : :obj:`int`
       The framerate of the video sequence.
-    bp_order: int
+    bp_order : :obj:`int` 
       The order of the bandpass filter
-    debug: bool
+    debug : :obj:`bool` 
       Plot some stuff 
     
     """
@@ -83,7 +72,7 @@ class PPGSecure(Preprocessor):
 
     Returns
     -------
-    pulse: numpy.ndarray 
+    :obj:`numpy.ndarray` 
       The pulse signal, in each color channel (RGB)  
     
     """
diff --git a/bob/pad/face/preprocessor/SSR.py b/bob/pad/face/preprocessor/SSR.py
index edb3ed91adc59833fd3524f3cd9f5c713e537d8a..4c5f702acb4299d1a01cb73b05d3311aa2d51ac7 100644
--- a/bob/pad/face/preprocessor/SSR.py
+++ b/bob/pad/face/preprocessor/SSR.py
@@ -23,7 +23,7 @@ class SSR(Preprocessor, object):
   
   The pulse is extracted according to the SSR algorithm.
 
-  See the documentation of :py:mod:`bob.rppg.base`
+  See the documentation of :py:module::`bob.rppg.base`
 
   Attributes
   ----------
@@ -35,7 +35,7 @@ class SSR(Preprocessor, object):
     The temporal stride. 
   debug: boolean          
     Plot some stuff 
-  skin_filter: :py:class:`bob.ip.skincolorfilter.SkinColorFilter` 
+  skin_filter: :py:class::`bob.ip.skincolorfilter.SkinColorFilter` 
     The skin color filter 
 
   """