Commit 01a77301 by Amir MOHAMMADI

Merge branch 'issue-3' into 'master'

Landmarks as a dictionary Closes #3 See merge request !5
parents f3855c6f 4835ca9d
Pipeline #16364 failed with stages
in 2 minutes 40 seconds
......@@ -15,12 +15,23 @@ from .FaceDetector import FaceDetector
class DlibLandmarkExtraction(object):
"""
Binds to the DLib landmark detection using the shape_predictor_68_face_landmarks,
This facial landmark detector is an implementation of [Kazemi2014]_
Parameters
----------
model: :py:class:`str`
Path to the dlib pretrained model, if **None**, the model will be downloaded.
bob_landmark_format: :py:class:`bool`
If **True**, `__call__` will return the landmarks with Bob dictionary keys ('leye', 'reye', `nose`, .....).
If **False**, `__call__` will return a list with the detected landmarks
"""
def __init__(self, model=None):
"""
"""
def __init__(self, model=None, bob_landmark_format=False):
default_model = os.path.join(DlibLandmarkExtraction.get_dlib_model_path(), "shape_predictor_68_face_landmarks.dat")
if model is None:
......@@ -34,6 +45,8 @@ class DlibLandmarkExtraction(object):
self.face_detector = FaceDetector()
self.predictor = dlib.shape_predictor(self.model)
self.bob_landmark_format = bob_landmark_format
def __call__(self, image, bb=None, xy_output=False):
......@@ -50,10 +63,27 @@ class DlibLandmarkExtraction(object):
raise ValueError("Face not found in the image.")
points = self.predictor(bob_to_dlib_image_convertion(image), bb)
if xy_output:
return list(map(lambda p: (p.x, p.y), points.parts()))
if self.bob_landmark_format:
points = list(map(lambda p: (p.y, p.x), points.parts()))
bob_landmarks = dict()
bob_landmarks['leye'] = ((points[37][0] + points[40][0])//2,
(points[37][1] + points[40][1])//2)
bob_landmarks['reye'] = ((points[43][0] + points[46][0])//2,
(points[43][1] + points[46][1])//2)
bob_landmarks['nose'] = (points[33][0], points[33][1])
bob_landmarks['mouthleft'] = (points[49][0], points[49][1])
bob_landmarks['mouthright'] = (points[55][0], points[55][1])
return bob_landmarks
else:
return list(map(lambda p: (p.y, p.x), points.parts()))
if xy_output:
return list(map(lambda p: (p.x, p.y), points.parts()))
else:
return list(map(lambda p: (p.y, p.x), points.parts()))
@staticmethod
def get_dlib_model_path():
......
......@@ -20,19 +20,17 @@ class FaceDetector(object):
"""
def __init__(self):
"""
"""
self.face_detector = dlib.get_frontal_face_detector()
def detect_all_faces(self, image):
"""
Find all face bounding boxes in an image.
**Parameters**
Parameters
----------
name: image
RGB image
image: 2D or 3D :py:class:`numpy.ndarray`
GRAY scaled or RGB image in the format (CxWxH)
"""
assert image is not None
......@@ -67,6 +65,15 @@ class FaceDetector(object):
return []
def detect_single_face(self, image):
"""
Detect the biggest detected face in an image
Parameters
----------
image: 2D or 3D :py:class:`numpy.ndarray`
GRAY scaled or RGB image in the format (CxWxH)
"""
faces = self.detect_all_faces(image)
if len(faces) > 1 and not all([not f for f in faces]):
......
......@@ -3,14 +3,10 @@
"""Test Units
"""
#==============================================================================
# Import what is needed here:
import numpy as np
from .. import FaceDetector
import pkg_resources
import bob.io.base
def test_face_detector():
......@@ -19,28 +15,39 @@ def test_face_detector():
"""
image = np.zeros((3, 100, 100))
result = FaceDetector().detect_single_face(image)
assert result is None
image = np.ones((3, 100, 100))
result = FaceDetector().detect_single_face(image)
assert result is None
# test on the actual image:
test_file = pkg_resources.resource_filename('bob.ip.dlib', 'data/test_image.hdf5')
f = bob.io.base.HDF5File(test_file) #read only
image = f.read('image') #reads integer
del f
result = FaceDetector().detect_single_face(image)
assert result[0].topleft == (0, 236)
assert result[0].bottomright == (84, 312)
def test_landmark():
"""
Test Landmarks
"""
# test on the actual image:
test_file = bob.io.base.load(pkg_resources.resource_filename('bob.ip.dlib', 'data/test_image.hdf5'))
# Testing bob landmarks
detector = bob.ip.dlib.DlibLandmarkExtraction(bob_landmark_format=False)
points_dlib = detector(test_file)
assert len(points_dlib) == 68
# Testing bob landmarks
detector = bob.ip.dlib.DlibLandmarkExtraction(bob_landmark_format=True)
points_dlib = detector(test_file)
assert np.allclose([p in ['leye', 'reye', 'nose', 'mouthleft', 'mouthright'] for p in points_dlib], True)
import numpy
from bob.ip.facedetect import BoundingBox
import dlib
import bob.io.image
def bob_to_dlib_image_convertion(bob_image, change_color=True):
......@@ -59,6 +60,21 @@ def bounding_box_2_rectangle(bb):
bb.bottomright[1], bb.bottomright[0])
def rectangle_2_bounding_box(rectangle):
"""
Converts dlib.rectangle to bob.ip.facedetect.BoundingBox
"""
assert isinstance(rectangle, dlib.rectangle)
top = numpy.max( [0, rectangle.top()] )
left = numpy.max( [0, rectangle.left()])
height = numpy.min( [top, rectangle.height()] )
width = numpy.min( [left, rectangle.width() ] )
return BoundingBox((top, left),
(height, width))
# gets sphinx autodoc done right - don't remove it
......
......@@ -65,9 +65,9 @@ The detection of landmarks can be done as the following:
>>> import bob.io.base
>>> import bob.io.base.test_utils
>>> dlib_color_image = bob.io.base.load(bob.io.base.test_utils.datafile('testimage.jpg', 'bob.ip.facedetect'))
>>> points = bob.ip.dlib.DlibLandmarkExtraction()(dlib_color_image)
>>> print (points[0])
(192, 77)
>>> points = bob.ip.dlib.DlibLandmarkExtraction(bob_landmark_format=True)(dlib_color_image)
>>> print (points['reye'])
(17, 293)
.. plot:: plot/plot_landmarks.py
:include-source: False
......
......@@ -6,42 +6,39 @@ import pkg_resources
import os
from matplotlib import pyplot
import bob.ip.draw
import bob.ip.facedetect
#print "###################################"
#print os.path.join(pkg_resources.resource_filename(__name__, 'data'), 'multiple-faces.jpg')
# detect multiple bob
bob_color_image = bob.io.base.load(bob.io.base.test_utils.datafile('testimage.jpg', 'bob.ip.facedetect'))
bob_bounding_box, _ = bob.ip.facedetect.detect_single_face(bob_color_image)
# detect multiple dlib
dlib_color_image = bob.io.base.load(bob.io.base.test_utils.datafile('testimage.jpg', 'bob.ip.facedetect'))
dlib_bounding_box, _ = bob.ip.dlib.FaceDetector().detect_single_face(dlib_color_image)
image = bob.io.base.load(bob.io.base.test_utils.datafile('testimage.jpg', 'bob.ip.facedetect'))
bob_image = bob.io.base.load(bob.io.base.test_utils.datafile('testimage.jpg', 'bob.ip.facedetect'))
bounding_box, _ = bob.ip.dlib.FaceDetector().detect_single_face(image)
# landmarks
detector = bob.ip.dlib.DlibLandmarkExtraction()
points_dlib = detector(dlib_color_image)
points_bob = detector(bob_color_image, bb=bob_bounding_box)
for p in points_bob:
bob.ip.draw.plus(bob_color_image, p, radius=10, color=(255, 0, 0))
points = detector(image)
bob_detector = bob.ip.dlib.DlibLandmarkExtraction(bob_landmark_format=True)
bob_points = bob_detector(bob_image)
for p in points_dlib:
bob.ip.draw.plus(dlib_color_image, p, radius=10, color=(255, 0, 0))
for p in points:
bob.ip.draw.plus(image, p, radius=10, color=(255, 0, 0))
for p in bob_points:
bob.ip.draw.plus(bob_image, bob_points[p], radius=10, color=(255, 0, 0))
# face detections
bob.ip.draw.box(bob_color_image, bob_bounding_box.topleft, bob_bounding_box.size, color=(255, 0, 0))
bob.ip.draw.box(dlib_color_image, dlib_bounding_box.topleft, dlib_bounding_box.size, color=(255, 0, 0))
bob.ip.draw.box(image, bounding_box.topleft, bounding_box.size, color=(255, 0, 0))
ax = pyplot.subplot(1, 2, 1)
ax.set_title("Dlib")
pyplot.imshow(bob.ip.dlib.utils.bob_to_dlib_image_convertion(dlib_color_image, change_color=False))
ax.set_title("Dlib landmarks")
pyplot.imshow(bob.ip.dlib.utils.bob_to_dlib_image_convertion(image, change_color=False))
pyplot.axis('off')
ax = pyplot.subplot(1, 2, 2)
ax.set_title("Bob")
pyplot.imshow(bob.ip.dlib.utils.bob_to_dlib_image_convertion(bob_color_image, change_color=False))
ax.set_title("Dlib landmarks for Bob")
pyplot.imshow(bob.ip.dlib.utils.bob_to_dlib_image_convertion(bob_image, change_color=False))
pyplot.axis('off')
......@@ -2,4 +2,4 @@
References
============
.. todo:: Provide the correct references for the algorithms defined in this package.
.. [Kazemi2014] Kazemi, Vahid, and Sullivan Josephine. "One millisecond face alignment with an ensemble of regression trees." 27th IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2014, Columbus, United States, 23 June 2014 through 28 June 2014. IEEE Computer Society, 2014.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment