diff --git a/doc/conf.py b/doc/conf.py index dbe0f9feb3d9a5ed168ff8f8aaf4485f7aa239cd..ec09031d2525923a9b6fe549bf81f0710ae463d2 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -25,14 +25,35 @@ extensions = [ 'sphinx.ext.intersphinx', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', + 'matplotlib.sphinxext.plot_directive' ] import sphinx if sphinx.__version__ >= "1.4.1": extensions.append('sphinx.ext.imgmath') + imgmath_image_format = 'svg' else: extensions.append('sphinx.ext.pngmath') +# Be picky about warnings +nitpicky = True + +# Ignores stuff we can't easily resolve on other project's sphinx manuals +nitpick_ignore = [] + +# Allows the user to override warnings from a separate file +if os.path.exists('nitpick-exceptions.txt'): + for line in open('nitpick-exceptions.txt'): + if line.strip() == "" or line.startswith("#"): + continue + dtype, target = line.split(None, 1) + target = target.strip() + try: # python 2.x + target = unicode(target) + except NameError: + pass + nitpick_ignore.append((dtype, target)) + # Always includes todos todo_include_todos = True @@ -111,7 +132,7 @@ pygments_style = 'sphinx' # Some variables which are useful for generated material project_variable = project.replace('.', '_') -short_description = u'Bob wrapper for menpo face keypoint detection package provided by Imperial College, London' +short_description = u'Extraction of facial landmarke using Menopofit' owner = [u'Idiap Research Institute'] @@ -216,8 +237,13 @@ autodoc_default_flags = [ ] # For inter-documentation mapping: -from bob.extension.utils import link_documentation -intersphinx_mapping = link_documentation() +from bob.extension.utils import link_documentation, load_requirements +sphinx_requirements = "extra-intersphinx.txt" +if os.path.exists(sphinx_requirements): + intersphinx_mapping = link_documentation(additional_packages=['python', 'numpy']+load_requirements(sphinx_requirements)) +else: + intersphinx_mapping = link_documentation() + # We want to remove all private (i.e. _. or __.__) members # that are not in the list of accepted functions diff --git a/doc/guide.rst b/doc/guide.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d45e751d07fe12ae8b98526e7d4ab7fd491ff75 --- /dev/null +++ b/doc/guide.rst @@ -0,0 +1,144 @@ +.. py:currentmodule:: bob.ip.facelandmarks + +.. testsetup:: * + + from __future__ import print_function + import math + import bob.io.base + import bob.io.base.test_utils + import bob.io.image + import bob.io.video + import bob.ip.color + import bob.ip.facedetect + import bob.ip.facelandmarks + + import pkg_resources + #lena_file = '/idiap/user/sbhatta/work/git/bob.ip.facelandmarks/data/lena.jpg' + #multi_file = '/idiap/user/sbhatta/work/git/bob.ip.facelandmarks/data/multiple-faces.jpg' + lena_file = bob.io.base.test_utils.datafile('lena.jpg', 'bob.ip.facelandmarks') + multi_file = bob.io.base.test_utils.datafile('multiple-faces.jpg', 'bob.ip.facelandmarks', 'data') + face_image = bob.io.base.load(lena_file) + multi_image = bob.io.base.load(multi_file) + +============= + User Guide +============= + +This Bob package allows you to use the [Menpofit_] package to detect facial landmarks. +Given a gray-level image depicting a human face, this package can be used to extract a specific set of 68 landmarks, +as defined in Menpofit. Please refer to the original Menpofit [documentation_] for implementation details. +Here, we show some examples of how to use the ``bob.ip.facelandmarks`` package. + + +Landmark Detection on a Single Face +----------------------------------- + +The most simple face detection task is to detect a single face in an image. +This task can be accomplished using the ``detect_landmarks()`` function in this package. +The following code-example shows how to extract facial keypoints for a single face in a gray-level input image: + +.. doctest:: + + >>> face_image = bob.io.base.load('lena.jpg') # doctest: +SKIP + >>> gray_image = bob.ip.color.rgb_to_gray(face_image) + >>> key_points = bob.ip.facelandmarks.utils.detect_landmarks(gray_image, 1) + >>> print(key_points[0].landmarks.shape) + (68, 2) + + >>> print(key_points[0].bounding_box.topleft) + (226, 237) + + >>> print(key_points[0].bounding_box.bottomright) + (394, 376) + +This package also provides a handy function, ``draw_landmarks()``, for plotting the extracted facial-landmarks on an image. + +.. doctest:: + + >>> bob.ip.facelandmarks.utils.draw_landmarks(gray_image, key_points) + +The result is shown in the image below. + +.. plot:: plot/single_face_lmks.py + :include-source: False + + + +The ``detect_landmarks()`` function has the following signature: `detect_landmarks(gray_image, top=0, min_quality=0.0)`. + + * ``gray_image``: a numpy-array containing the gray-level input image, and, + * ``top``: positive integer (default=0), specifying the number of faces to be detected in this image. + * ``min_quality``: positive floating-point number (default=0), specifying the minimum acceptable quality for the result of face-detection. + +The first parameter is obligatory, and should be a valid 2D numpy-array representing a gray-level image. +The remaining two parameters are optional. +In the example above, ``top`` is specified as 1, hence, landmarks for only one face are extracted. + +The function ``detect_landmarks()`` first detects faces in the input image, using ``bob.ip.facedetect``, and then uses the result of the face-detection-step for detecting facial-landmarks. + + +If the ``min_quality`` parameter is specified, then bounding-boxes having a quality-value lower than the specified value are ignored. + +The return value of ``detect_landmarks()`` is a list. +When only one face is expected in the input, this list will contain only one element. +Each element in the list is an object with three members, named as follows: + + * ``bounding_box``: an object with two elements (topright, and bottomleft), each of which is a tuple (row,col) giving the coordinates of the top-left and bottom-right corners of the detected face-bounding-box. + * ``quality``: a floating-point number between 0 and 100.0, giving a quality-estimate for the result of the face-detection step. + * ``landmarks``: a numpy-array of shape (68, 2). + +The first two members, ``bounding_box`` and ``quality``, come from ``bob.ip.facedetect``. +The detected bounding-boxes are sorted in order of decreasing quality, and the top-N (where N is the value specified for the parameter ``top``) bounding-boxes are used, one by one, in the landmark-detection step. + +For each detected face, each row in ``landmarks`` represents one of the 68 facial-landmarks, and gives the coordinates (row,col) of that landmark. +As described in the Menpofit documentation, The facial-landmarks are listed in a specific order in the array: + +.. code-block:: python + + jaw_indices = [0, 17] + lbrow_indices = [17, 22] + rbrow_indices = [22, 27] + upper_nose_indices = [27, 31] + lower_nose_indices = [31, 36] + leye_indices = [36, 42] + reye_indices = [42, 48] + outer_mouth_indices = [48, 60] + inner_mouth_indices = [60, 67] + + +If the bounding-box of the desired face is already available (via a preceding call to the function ``face.ip.facedetect.detect_single_face()``), the function ``detect_landmarks_on_boundingbox(gray_image, bounding_box)`` may be used to determine the facial-landmarks within this bounding-box. +Note that the return-value of ``detect_landmarks_on_boundingbox()`` is a 2D numpy-array representing the coordinates of the 68 landmarks (and not an object as in the case of ``detect_landmarks()``). + +.. doctest:: + + >>> gray_image = bob.ip.color.rgb_to_gray(face_image) + >>> my_bounding_box, _ = bob.ip.facedetect.detect_single_face(gray_image) + >>> key_points = bob.ip.facelandmarks.detect_landmarks_on_boundingbox(gray_image, my_bounding_box) + >>> print(key_points.shape) + (68, 2) + + + +Landmark Detection on Multiple Faces +------------------------------------ + +To extract landmarks for multiple faces in the same image, use the ``top`` parameter when calling ``detect_landmarks()``. +In the following example, the input image contains several faces, out of which, landmarks are extracted for the 5 faces with the best face-detection-quality. + +.. doctest:: + + >>> multi_image = bob.io.base.load('multiple-faces.jpg') # doctest: +SKIP + >>> gray_image = bob.ip.color.rgb_to_gray(multi_image) + >>> key_points = bob.ip.facelandmarks.utils.detect_landmarks(gray_image, top=5) + >>> for i in range(5): + ... print(key_points[i].bounding_box.topleft) + (136, 2243) + (1480, 2226) + (1574, 2959) + (853, 913) + (107, 3016) + + +.. _Menpofit: http://www.menpo.org/menpofit/ + +.. _documentation: https://menpofit.readthedocs.io/en/stable/ diff --git a/doc/index.rst b/doc/index.rst index 78ede712901107b2caef2b7b6efeb308a2165770..034ebfbe4d62e16a8dc9fafcd24eb19261648589 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -2,16 +2,15 @@ .. Andre Anjos <andre.anjos@idiap.ch> .. Tue 16 Feb 2016 15:42:29 CET -==================================================== - Bob's Face Landmark Detection Routines using Menpo -==================================================== +======================================================= + Bob's Routines for Face Landmark Detection using Menpo +======================================================= .. todolist:: -This package includes a `Bob`_ interface to `menpo`_ allowing you to detect -landmarks on faces from images and videos. The model was provided by -Epameinondas Antonakos from the `iBug`_ laboratory at Imperial College, London, -UK. +This package includes a `Bob`_ interface to `menpo`_, and allows you to extract +facial-landmarks in images and videos. The pre-trained Menpo model was provided by +Epameinondas Antonakos from the `iBug`_ laboratory at Imperial College, London, UK. Documentation @@ -20,6 +19,7 @@ Documentation .. toctree:: :maxdepth: 2 + guide py_api diff --git a/doc/plot/single_face_lmks.py b/doc/plot/single_face_lmks.py new file mode 100644 index 0000000000000000000000000000000000000000..856181d5200e647b17dd3a4b8bc7c180c4485907 --- /dev/null +++ b/doc/plot/single_face_lmks.py @@ -0,0 +1,31 @@ +import os, sys +import time +import argparse +import numpy as np + +import bob.io.base +import bob.io.base.test_utils +import bob.io.image +import bob.io.video +import bob.ip.color + +import bob.ip.facelandmarks as menpo + +from PIL import Image, ImageDraw +from bob.ip.draw import box, cross, plus + +import matplotlib +from matplotlib import pyplot +import pkg_resources + +#1. load image +color_image = bob.io.base.load(bob.io.base.test_utils.datafile('lena.jpg', 'bob.ip.facelandmarks')) +gray_image = bob.ip.color.rgb_to_gray(color_image) +#2. extract feature-points +frameKeypoints = menpo.utils.detect_landmarks(gray_image, 1) +landmarks = frameKeypoints[0].landmarks +#3. plot landmarks on image +lmkImage = np.copy(gray_image) +menpo.utils.draw_landmarks(lmkImage, frameKeypoints) +pyplot.imshow(lmkImage.astype(np.uint8), cmap=matplotlib.cm.gray) +pyplot.show() diff --git a/doc/py_api.rst b/doc/py_api.rst index 292ae8122dbe3e6603b0036ccd8346e4facb585d..c0f568984a99338cb5f4c48da81cb6a300857a71 100644 --- a/doc/py_api.rst +++ b/doc/py_api.rst @@ -1,14 +1,21 @@ .. vim: set fileencoding=utf-8 : -.. Andre Anjos <andre.anjos@idiap.ch> -.. Tue 16 Feb 2016 15:44:07 CET +.. Manuel Guenther <manuel.guenther@idiap.ch> +.. Wed 14 Jan 16:15:27 CET 2015 ============ Python API ============ -Classes -------- +Functions +--------- + +.. autosummary:: + + bob.ip.facelandmarks.utils.detect_landmarks -.. automodule:: bob.ip.facelandmarks +Detailed Information +-------------------- + +.. automodule:: bob.ip.facelandmarks