From 6751c7be93c9692ea1e893a8dade952694da96a3 Mon Sep 17 00:00:00 2001
From: Guillaume HEUSCH <guillaume.heusch@idiap.ch>
Date: Thu, 28 Jun 2018 10:48:05 +0200
Subject: [PATCH] [preprocessor] fixed stuff (mainly docstrings) in pulse
 preprocessors

---
 bob/pad/face/preprocessor/Chrom.py     | 84 +++++++++++++++----------
 bob/pad/face/preprocessor/Li.py        | 87 ++++++++++++++++----------
 bob/pad/face/preprocessor/PPGSecure.py | 81 ++++++++++++++----------
 bob/pad/face/preprocessor/SSR.py       | 66 +++++++++++--------
 4 files changed, 194 insertions(+), 124 deletions(-)

diff --git a/bob/pad/face/preprocessor/Chrom.py b/bob/pad/face/preprocessor/Chrom.py
index 3bd4b1ce..ec210f96 100644
--- a/bob/pad/face/preprocessor/Chrom.py
+++ b/bob/pad/face/preprocessor/Chrom.py
@@ -1,7 +1,10 @@
+#!/usr/bin/env python
+# encoding: utf-8
+
 import numpy
 
-import logging
-logger = logging.getLogger("bob.pad.face")
+from bob.core.log import setup
+logger = setup("bob.pad.face")
 
 from bob.bio.base.preprocessor import Preprocessor
 
@@ -18,38 +21,58 @@ from bob.rppg.chrom.extract_utils import select_stable_frames
 
 
 class Chrom(Preprocessor, object):
-  """
-  This class extract the pulse signal from a video sequence.
+  """Extract pulse signal from a video sequence.
+  
   The pulse is extracted according to the CHROM algorithm.
 
-  **Parameters:**
-
+  See the documentation of :py:mod:`bob.rppg.base`
+  
+  Attributes
+  ----------
   skin_threshold: float
     The threshold for skin color probability
-
   skin_init: bool
     If you want to re-initailize the skin color distribution at each frame
-
   framerate: int
     The framerate of the video sequence.
-
   bp_order: int
     The order of the bandpass filter
-
   window_size: int
     The size of the window in the overlap-add procedure.
-
   motion: float          
     The percentage of frames you want to select where the 
     signal is "stable". 0 mean all the sequence.
-
   debug: boolean          
     Plot some stuff 
+  skin_filter: :py:class:`bob.ip.skincolorfilter.SkinColorFilter` 
+    The skin color filter 
+
   """
+  
   def __init__(self, skin_threshold=0.5, skin_init=False, framerate=25, bp_order=32, window_size=0, motion=0.0, debug=False, **kwargs):
+    """Init function
+
+    Parameters
+    ----------
+    skin_threshold: float
+      The threshold for skin color probability
+    skin_init: bool
+      If you want to re-initailize the skin color distribution at each frame
+    framerate: int
+      The framerate of the video sequence.
+    bp_order: int
+      The order of the bandpass filter
+    window_size: int
+      The size of the window in the overlap-add procedure.
+    motion: float          
+      The percentage of frames you want to select where the 
+      signal is "stable". 0 mean all the sequence.
+    debug: boolean          
+      Plot some stuff 
+    
+    """
 
     super(Chrom, self).__init__()
-
     self.skin_threshold = skin_threshold
     self.skin_init = skin_init
     self.framerate = framerate
@@ -57,35 +80,32 @@ class Chrom(Preprocessor, object):
     self.window_size = window_size
     self.motion = motion
     self.debug = debug
-
     self.skin_filter = bob.ip.skincolorfilter.SkinColorFilter()
 
   def __call__(self, frames, annotations):
-    """
-    Compute the pulse signal for the given frame sequence
-
-    **Parameters:**
-
-    frames: :pyclass: `bob.bio.video.utils.FrameContainer`
-      Video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
-      for further details.
+    """Computes the pulse signal for the given frame sequence
 
+    Parameters
+    ----------
+    frames: :py:class:`bob.bio.video.utils.FrameContainer`
+      video data 
     annotations: :py:class:`dict`
-      A dictionary containing annotations of the face bounding box.
-      Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``
-
-    **Returns:**
+      the face bounding box, as follows: ``{'topleft': (row, col), 'bottomright': (row, col)}``
 
-      pulse: numpy.array of size nb_frames 
-        The pulse signal 
+    Returns
+    -------
+    pulse: numpy.ndarray 
+      The pulse signal
+    
     """
     video = frames.as_array()
     nb_frames = video.shape[0]
-    
+   
+    # the pulse
     chrom = numpy.zeros((nb_frames, 2), dtype='float64')
 
-    # build the bandpass filter one and for all
-    bandpass_filter = build_bandpass_filter(self.framerate, self.bp_order, False)
+    # build the bandpass filter 
+    bandpass_filter = build_bandpass_filter(self.framerate, self.bp_order, plot=False)
 
     counter = 0
     previous_bbox = None
@@ -105,7 +125,7 @@ class Chrom(Preprocessor, object):
         size = (bottomright[0]-topleft[0], bottomright[1]-topleft[1])
         bbox = bob.ip.facedetect.BoundingBox(topleft, size)
         face = crop_face(frame, bbox, bbox.size[1])
-      except (KeyError, ZeroDivisionError) as e:
+      except (KeyError, ZeroDivisionError, TypeError) as e:
         logger.warning("No annotations ... running face detection")
         try:
           bbox, quality = bob.ip.facedetect.detect_single_face(frame)
diff --git a/bob/pad/face/preprocessor/Li.py b/bob/pad/face/preprocessor/Li.py
index 29ba5785..143497cc 100644
--- a/bob/pad/face/preprocessor/Li.py
+++ b/bob/pad/face/preprocessor/Li.py
@@ -1,7 +1,10 @@
+#!/usr/bin/env python
+# encoding: utf-8
+
 import numpy
 
-import logging
-logger = logging.getLogger("bob.pad.face")
+from bob.core.log import setup
+logger = setup("bob.pad.face")
 
 from bob.bio.base.preprocessor import Preprocessor
 
@@ -15,39 +18,55 @@ from bob.rppg.cvpr14.filter_utils import average
 
 
 class Li(Preprocessor):
-  """
-  This class extract the pulse signal from a video sequence.
+  """Extract pulse signal from a video sequence.
   
   The pulse is extracted according to Li's CVPR 14 algorithm.
+
+  See the documentation of :py:mod:`bob.rppg.base`
+
   Note that this is a simplified version of the original 
   pulse extraction algorithms (mask detection in each 
-  frame instead of tranking, no illumination correction,
+  frame instead of tracking, no illumination correction,
   no motion pruning)
 
-  **Parameters:**
-
+  Attributes
+  ----------
   indent: int
     Indent (in percent of the face width) to apply to keypoints to get the mask.
-
   lamda_: int
     the lamba value of the detrend filter
-
   window: int
     The size of the window of the average filter 
-
   framerate: int
     The framerate of the video sequence.
-
   bp_order: int
     The order of the bandpass filter
-
-  debug: boolean          
+  debug: bool
     Plot some stuff 
+  
   """
+  
   def __init__(self, indent = 10, lambda_ = 300, window = 3, framerate = 25, bp_order = 32, debug=False, **kwargs):
+    """Init function
+
+    Parameters
+    ----------
+    indent: int
+      Indent (in percent of the face width) to apply to keypoints to get the mask.
+    lamda_: int
+      the lamba value of the detrend filter
+    window: int
+      The size of the window of the average filter 
+    framerate: int
+      The framerate of the video sequence.
+    bp_order: int
+      The order of the bandpass filter
+    debug: bool
+      Plot some stuff 
+    
 
+    """
     super(Li, self).__init__(**kwargs)
-    
     self.indent = indent
     self.lambda_ = lambda_
     self.window = window
@@ -56,31 +75,28 @@ class Li(Preprocessor):
     self.debug = debug
 
   def __call__(self, frames, annotations=None):
-    """
-    Compute the pulse signal for the given frame sequence
-
-    **Parameters:**
-
-    frames: :pyclass: `bob.bio.video.utils.FrameContainer`
-      Video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
-      for further details.
+    """Computes the pulse signal for the given frame sequence
 
+    Parameters
+    ----------
+    frames: :py:class:`bob.bio.video.utils.FrameContainer`
+      video data 
     annotations: :py:class:`dict`
-      A dictionary containing annotations of the face bounding box.
-      Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``
+      the face bounding box, as follows: ``{'topleft': (row, col), 'bottomright': (row, col)}``
 
-    **Returns:**
-
-      pulse: numpy.array of size (nb_frame, 3)
-        The pulse signal in each color channel (RGB)  
+    Returns
+    -------
+    pulse: numpy.ndarray 
+      The pulse signal, in each color channel (RGB)  
+    
     """
     video = frames.as_array()
     nb_frames = video.shape[0]
 
-    # the meancolor of the face along the sequence
+    # the mean color of the face along the sequence
     face_color = numpy.zeros((nb_frames, 3), dtype='float64')
 
-    # build the bandpass filter one and for all
+    # build the bandpass filter
     bandpass_filter = build_bandpass_filter(self.framerate, self.bp_order, plot=False)
 
     # landmarks detection
@@ -100,6 +116,7 @@ class Li(Preprocessor):
       try:
         ldms = detector(frame)
       except TypeError:
+        logger.warning("Exception caught -> problems with landmarks")
         # looks like one video from replay mobile is upside down !
         rotated_shape = bob.ip.base.rotated_output_shape(frame, 180)
         frame_rotated = numpy.ndarray(rotated_shape, dtype=numpy.float64)
@@ -116,6 +133,11 @@ class Li(Preprocessor):
           face_color[i] = 0
           continue
         frame = frame_rotated
+      
+      # landmarks have not been detected: use the one from previous frame
+      if ldms is None:
+        ldms = previous_ldms
+        logger.warning("Frame {}: no landmarks detected, using the ones from previous frame".format(i))
 
       if self.debug:
         from matplotlib import pyplot
@@ -143,12 +165,13 @@ class Li(Preprocessor):
       pulse[:, i] = filtfilt(bandpass_filter, numpy.array([1]), averaged)
 
     if self.debug: 
+      colors = ['r', 'g', 'b']
       from matplotlib import pyplot
       for i in range(3):
         f, ax = pyplot.subplots(2, sharex=True)
-        ax[0].plot(range(face_color.shape[0]), face_color[:, i], 'g')
+        ax[0].plot(range(face_color.shape[0]), face_color[:, i], colors[i])
         ax[0].set_title('Original color signal')
-        ax[1].plot(range(face_color.shape[0]), pulse[:, i], 'g')
+        ax[1].plot(range(face_color.shape[0]), pulse[:, i], colors[i])
         ax[1].set_title('Pulse signal')
         pyplot.show()
 
diff --git a/bob/pad/face/preprocessor/PPGSecure.py b/bob/pad/face/preprocessor/PPGSecure.py
index 868b909e..847d08af 100644
--- a/bob/pad/face/preprocessor/PPGSecure.py
+++ b/bob/pad/face/preprocessor/PPGSecure.py
@@ -1,7 +1,10 @@
+#!/usr/bin/env python
+# encoding: utf-8
+
 import numpy
 
-import logging
-logger = logging.getLogger("bob.pad.face")
+from bob.core.log import setup
+logger = setup("bob.pad.face")
 
 from bob.bio.base.preprocessor import Preprocessor
 
@@ -33,49 +36,56 @@ class PPGSecure(Preprocessor):
       year           = 2017
     }
 
-  **Parameters:**
-
+  Attributes
+  ----------
   framerate: int
     The framerate of the video sequence.
-
   bp_order: int
     The order of the bandpass filter
-
-  debug: boolean          
+  debug: bool
     Plot some stuff 
+  
   """
+ 
   def __init__(self, framerate=25, bp_order=32, debug=False, **kwargs):
-
-    super(PPGSecure, self).__init__(**kwargs)
+    """Init function
+
+    Parameters
+    ----------
+    framerate: int
+      The framerate of the video sequence.
+    bp_order: int
+      The order of the bandpass filter
+    debug: bool
+      Plot some stuff 
     
+    """
+    super(PPGSecure, self).__init__(**kwargs)
     self.framerate = framerate
     self.bp_order = bp_order
     self.debug = debug
     
-    # build the bandpass filter one and for all
+    # build the bandpass filter
     self.bandpass_filter = build_bandpass_filter(self.framerate, self.bp_order, min_freq=0.5, max_freq=5, plot=False)
     
-    # landmarks detection
+    # landmarks detector
     self.detector = bob.ip.dlib.DlibLandmarkExtraction()
 
   def __call__(self, frames, annotations):
-    """
-    Compute the pulse signal for the given frame sequence
-
-    **Parameters:**
-
-    frames: :pyclass: `bob.bio.video.utils.FrameContainer`
-      Video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
-      for further details.
+    """Compute the pulse signal for the given frame sequence
 
+    Parameters
+    ----------
+    frames: :py:class:`bob.bio.video.utils.FrameContainer`
+      video data 
     annotations: :py:class:`dict`
-      A dictionary containing annotations of the face bounding box.
-      Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``
+      the face bounding box, as follows: ``{'topleft': (row, col), 'bottomright': (row, col)}``
 
-    **Returns:**
-
-      pulses: numpy.array of size (5, nb_frame)
-        The pulse signals from different area of the image 
+    Returns
+    -------
+    pulse: numpy.ndarray 
+      The pulse signal, in each color channel (RGB)  
+    
     """
     video = frames.as_array()
     nb_frames = video.shape[0]
@@ -113,6 +123,11 @@ class PPGSecure(Preprocessor):
           continue
         frame = frame_rotated
 
+      # landmarks have not been detected: use the one from previous frame
+      if ldms is None:
+        ldms = previous_ldms
+        logger.warning("Frame {}: no landmarks detected, using the ones from previous frame".format(i))
+
       if self.debug:
         from matplotlib import pyplot
         display = numpy.copy(frame)
@@ -152,15 +167,17 @@ class PPGSecure(Preprocessor):
 
 
   def _get_masks(self, image, ldms):
-    """ get the 5 masks for rPPG signal extraction
-
-    **Parameters**
+    """Get the 5 masks for rPPG signal extraction
 
-    ldms: numpy.array
+    Parameters
+    ----------
+    ldms: numpy.ndarray
       The landmarks, as retrieved by bob.ip.dlib.DlibLandmarkExtraction()
 
-    **Returns**
-      masks: boolean
+    Returns
+    -------
+      masks: :py:obj:`list` of numpy.ndarray
+        A list containing the different mask as a boolean array
         
     """
     masks = []
@@ -220,5 +237,3 @@ class PPGSecure(Preprocessor):
     masks.append(get_mask(image, mask_points))
 
     return masks
-
-
diff --git a/bob/pad/face/preprocessor/SSR.py b/bob/pad/face/preprocessor/SSR.py
index bcd06e2a..edb3ed91 100644
--- a/bob/pad/face/preprocessor/SSR.py
+++ b/bob/pad/face/preprocessor/SSR.py
@@ -1,7 +1,10 @@
+#!/usr/bin/env python
+# encoding: utf-8
+
 import numpy
 
-import logging
-logger = logging.getLogger("bob.pad.face")
+from bob.core.log import setup
+logger = setup("bob.pad.face")
 
 from bob.bio.base.preprocessor import Preprocessor
 
@@ -16,55 +19,65 @@ from bob.rppg.ssr.ssr_utils import build_P
 
 
 class SSR(Preprocessor, object):
-  """
-  This class extract the pulse signal from a video sequence.
+  """Extract pulse signal from a video sequence.
+  
   The pulse is extracted according to the SSR algorithm.
 
-  **Parameters:**
+  See the documentation of :py:mod:`bob.rppg.base`
 
+  Attributes
+  ----------
   skin_threshold: float
     The threshold for skin color probability
-
   skin_init: bool
     If you want to re-initailize the skin color distribution at each frame
-
   stride: int
     The temporal stride. 
-
   debug: boolean          
     Plot some stuff 
+  skin_filter: :py:class:`bob.ip.skincolorfilter.SkinColorFilter` 
+    The skin color filter 
 
   """
+  
   def __init__(self, skin_threshold=0.5, skin_init=False, stride=25, debug=False, **kwargs):
+    """Init function
+
+    Parameters
+    ----------
+    skin_threshold: float
+      The threshold for skin color probability
+    skin_init: bool
+      If you want to re-initailize the skin color distribution at each frame
+    stride: int
+      The temporal stride. 
+    debug: boolean          
+      Plot some stuff 
 
+    """
     super(SSR, self).__init__()
-
     self.skin_threshold = skin_threshold
     self.skin_init = skin_init
     self.stride = stride
     self.debug = debug
-
     self.skin_filter = bob.ip.skincolorfilter.SkinColorFilter()
 
 
   def __call__(self, frames, annotations):
-    """
-    Compute the pulse signal for the given frame sequence
-
-    **Parameters:**
-
-    frames: :pyclass: `bob.bio.video.utils.FrameContainer`
-      Video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
-      for further details.
+    """Computes the pulse signal for the given frame sequence
 
+    Parameters
+    ----------
+    frames: :py:class:`bob.bio.video.utils.FrameContainer`
+      video data 
     annotations: :py:class:`dict`
-      A dictionary containing annotations of the face bounding box.
-      Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``
+      the face bounding box, as follows: ``{'topleft': (row, col), 'bottomright': (row, col)}``
 
-    **Returns:**
-
-      pulse: numpy.array of size nb_frames 
-        The pulse signal 
+    Returns
+    -------
+    pulse: numpy.ndarray 
+      The pulse signal
+    
     """
     video = frames.as_array()
     nb_frames = video.shape[0]
@@ -88,7 +101,6 @@ class SSR(Preprocessor, object):
         from matplotlib import pyplot
         pyplot.imshow(numpy.rollaxis(numpy.rollaxis(frame, 2),2))
         pyplot.show()
-    
 
       # get the face
       try:
@@ -97,7 +109,7 @@ class SSR(Preprocessor, object):
         size = (bottomright[0]-topleft[0], bottomright[1]-topleft[1])
         bbox = bob.ip.facedetect.BoundingBox(topleft, size)
         face = crop_face(frame, bbox, bbox.size[1])
-      except (KeyError, ZeroDivisionError) as e:
+      except (KeyError, ZeroDivisionError, TypeError) as e:
         logger.warning("No annotations ... running face detection")
         try:
           bbox, quality = bob.ip.facedetect.detect_single_face(frame)
@@ -129,7 +141,7 @@ class SSR(Preprocessor, object):
         pyplot.imshow(numpy.rollaxis(numpy.rollaxis(skin_mask_image, 2),2))
         pyplot.show()
       
-      # nos skin pixels have ben detected ... using the previous ones
+      # no skin pixels have ben detected ... using the previous ones
       if skin_pixels.shape[1] == 0:
         skin_pixels = previous_skin_pixels 
         logger.warn("No skin pixels detected, using the previous ones")
-- 
GitLab