Skip to content
Snippets Groups Projects
Commit ed38bcbc authored by Olegs NIKISINS's avatar Olegs NIKISINS
Browse files

Merge branch 'dev_branch' into 'master'

Added a face detection functionality in the face cropping preprocessor

See merge request !21
parents 3ea172f0 653737e3
Branches
Tags
1 merge request!21Added a face detection functionality in the face cropping preprocessor
Pipeline #
...@@ -5,22 +5,22 @@ from bob.bio.base.grid import Grid ...@@ -5,22 +5,22 @@ from bob.bio.base.grid import Grid
# Configuration to run on computation cluster: # Configuration to run on computation cluster:
idiap = Grid( idiap = Grid(
training_queue='32G', training_queue='8G-io-big',
number_of_preprocessing_jobs=32, number_of_preprocessing_jobs=32,
preprocessing_queue='4G-io-big', preprocessing_queue='4G-io-big',
number_of_extraction_jobs=32, number_of_extraction_jobs=32,
extraction_queue='8G-io-big', extraction_queue='4G-io-big',
number_of_projection_jobs=32, number_of_projection_jobs=32,
projection_queue='8G-io-big', projection_queue='4G-io-big',
number_of_enrollment_jobs=32, number_of_enrollment_jobs=32,
enrollment_queue='8G-io-big', enrollment_queue='4G-io-big',
number_of_scoring_jobs=1, number_of_scoring_jobs=1,
scoring_queue='8G-io-big', scoring_queue='4G-io-big',
) )
# Configuration to run on user machines: # Configuration to run on user machines:
......
...@@ -17,15 +17,50 @@ MIN_FACE_SIZE = 50 ...@@ -17,15 +17,50 @@ MIN_FACE_SIZE = 50
USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper) USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
RGB_OUTPUT_FLAG = True # Return RGB cropped face using local cropper RGB_OUTPUT_FLAG = True # Return RGB cropped face using local cropper
DETECT_FACES_FLAG = True # find annotations locally replacing the database annotations DETECT_FACES_FLAG = True # find annotations locally replacing the database annotations
FACE_DETECTION_METHOD = "dlib"
rgb_face_detector_dlib = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
cropped_positions = CROPPED_POSITIONS,
fixed_positions = FIXED_POSITIONS,
mask_sigma = MASK_SIGMA,
mask_neighbors = MASK_NEIGHBORS,
mask_seed = None,
check_face_size_flag = CHECK_FACE_SIZE_FLAG,
min_face_size = MIN_FACE_SIZE,
use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
rgb_output_flag = RGB_OUTPUT_FLAG,
detect_faces_flag = DETECT_FACES_FLAG,
face_detection_method = FACE_DETECTION_METHOD)
CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
FIXED_POSITIONS = None
MASK_SIGMA = None # The sigma for random values areas outside image
MASK_NEIGHBORS = 5 # The number of neighbors to consider while extrapolating
MASK_SEED = None # The seed for generating random values during extrapolation
CHECK_FACE_SIZE_FLAG = True # Check the size of the face
MIN_FACE_SIZE = 50
USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
RGB_OUTPUT_FLAG = True # Return RGB cropped face using local cropper
DETECT_FACES_FLAG = True # find annotations locally replacing the database annotations
FACE_DETECTION_METHOD = "mtcnn"
rgb_face_detector_mtcnn = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
cropped_positions = CROPPED_POSITIONS,
fixed_positions = FIXED_POSITIONS,
mask_sigma = MASK_SIGMA,
mask_neighbors = MASK_NEIGHBORS,
mask_seed = None,
check_face_size_flag = CHECK_FACE_SIZE_FLAG,
min_face_size = MIN_FACE_SIZE,
use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
rgb_output_flag = RGB_OUTPUT_FLAG,
detect_faces_flag = DETECT_FACES_FLAG,
face_detection_method = FACE_DETECTION_METHOD)
preprocessor_rgb_face_detect = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
cropped_positions = CROPPED_POSITIONS,
fixed_positions = FIXED_POSITIONS,
mask_sigma = MASK_SIGMA,
mask_neighbors = MASK_NEIGHBORS,
mask_seed = None,
check_face_size_flag = CHECK_FACE_SIZE_FLAG,
min_face_size = MIN_FACE_SIZE,
use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
rgb_output_flag = RGB_OUTPUT_FLAG,
detect_faces_flag = DETECT_FACES_FLAG)
...@@ -84,6 +84,12 @@ class VideoFaceCrop(Preprocessor, object): ...@@ -84,6 +84,12 @@ class VideoFaceCrop(Preprocessor, object):
cropping. cropping.
Default: ``False``. Default: ``False``.
``face_detection_method`` : :py:class:`str`
A package to be used for face detection. Options supported by this
package: "dlib" (dlib is a dependency of this package). If bob.ip.mtcnn
is installed in your system you can use it as-well (bob.ip.mtcnn is NOT
a dependency of this package).
``kwargs`` ``kwargs``
Remaining keyword parameters passed to the Base constructor, such as ``color_channel`` or ``dtype``. Remaining keyword parameters passed to the Base constructor, such as ``color_channel`` or ``dtype``.
""" """
...@@ -101,6 +107,7 @@ class VideoFaceCrop(Preprocessor, object): ...@@ -101,6 +107,7 @@ class VideoFaceCrop(Preprocessor, object):
use_local_cropper_flag = False, use_local_cropper_flag = False,
rgb_output_flag = False, rgb_output_flag = False,
detect_faces_flag = False, detect_faces_flag = False,
face_detection_method = "dlib",
**kwargs): **kwargs):
super(VideoFaceCrop, self).__init__(cropped_image_size = cropped_image_size, super(VideoFaceCrop, self).__init__(cropped_image_size = cropped_image_size,
...@@ -114,6 +121,7 @@ class VideoFaceCrop(Preprocessor, object): ...@@ -114,6 +121,7 @@ class VideoFaceCrop(Preprocessor, object):
use_local_cropper_flag = use_local_cropper_flag, use_local_cropper_flag = use_local_cropper_flag,
rgb_output_flag = rgb_output_flag, rgb_output_flag = rgb_output_flag,
detect_faces_flag = detect_faces_flag, detect_faces_flag = detect_faces_flag,
face_detection_method = face_detection_method,
**kwargs) **kwargs)
self.cropped_image_size = cropped_image_size self.cropped_image_size = cropped_image_size
...@@ -127,6 +135,7 @@ class VideoFaceCrop(Preprocessor, object): ...@@ -127,6 +135,7 @@ class VideoFaceCrop(Preprocessor, object):
self.use_local_cropper_flag = use_local_cropper_flag self.use_local_cropper_flag = use_local_cropper_flag
self.rgb_output_flag = rgb_output_flag self.rgb_output_flag = rgb_output_flag
self.detect_faces_flag = detect_faces_flag self.detect_faces_flag = detect_faces_flag
self.face_detection_method = face_detection_method
# Save also the data stored in the kwargs: # Save also the data stored in the kwargs:
for (k, v) in kwargs.items(): for (k, v) in kwargs.items():
...@@ -276,7 +285,7 @@ class VideoFaceCrop(Preprocessor, object): ...@@ -276,7 +285,7 @@ class VideoFaceCrop(Preprocessor, object):
if self.detect_faces_flag: if self.detect_faces_flag:
annotations = detect_faces_in_video(frames) annotations = detect_faces_in_video(frames, self.face_detection_method)
if len(frames) != len(annotations): # if some annotations are missing if len(frames) != len(annotations): # if some annotations are missing
...@@ -307,7 +316,9 @@ class VideoFaceCrop(Preprocessor, object): ...@@ -307,7 +316,9 @@ class VideoFaceCrop(Preprocessor, object):
name of the file. name of the file.
""" """
self.video_preprocessor.write_data(frames, file_name) if frames: # save file if FrameContainer is not empty, otherwise do nothing.
self.video_preprocessor.write_data(frames, file_name)
#========================================================================== #==========================================================================
......
...@@ -167,6 +167,7 @@ def test_video_face_crop(): ...@@ -167,6 +167,7 @@ def test_video_face_crop():
USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper) USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
RGB_OUTPUT_FLAG = True # Return RGB cropped face using local cropper RGB_OUTPUT_FLAG = True # Return RGB cropped face using local cropper
DETECT_FACES_FLAG = True # find annotations locally replacing the database annotations DETECT_FACES_FLAG = True # find annotations locally replacing the database annotations
FACE_DETECTION_METHOD = "dlib"
preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE, preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
cropped_positions = CROPPED_POSITIONS, cropped_positions = CROPPED_POSITIONS,
...@@ -178,7 +179,8 @@ def test_video_face_crop(): ...@@ -178,7 +179,8 @@ def test_video_face_crop():
min_face_size = MIN_FACE_SIZE, min_face_size = MIN_FACE_SIZE,
use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG, use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
rgb_output_flag = RGB_OUTPUT_FLAG, rgb_output_flag = RGB_OUTPUT_FLAG,
detect_faces_flag = DETECT_FACES_FLAG) detect_faces_flag = DETECT_FACES_FLAG,
face_detection_method = FACE_DETECTION_METHOD)
video, _ = convert_image_to_video_data(image, annotations, 3) video, _ = convert_image_to_video_data(image, annotations, 3)
......
...@@ -7,11 +7,11 @@ This file contains face detection utils. ...@@ -7,11 +7,11 @@ This file contains face detection utils.
#============================================================================== #==============================================================================
# Import here: # Import here:
import bob.ip.dlib # for face detection functionality import importlib
#============================================================================== #==============================================================================
def detect_face_in_image(image): def detect_face_in_image(image, method = "dlib"):
""" """
This function detects a face in the input image. This function detects a face in the input image.
...@@ -20,34 +20,45 @@ def detect_face_in_image(image): ...@@ -20,34 +20,45 @@ def detect_face_in_image(image):
``image`` : 3D :py:class:`numpy.ndarray` ``image`` : 3D :py:class:`numpy.ndarray`
A color image to detect the face in. A color image to detect the face in.
``method`` : :py:class:`str`
A package to be used for face detection. Options supported by this
package: "dlib" (dlib is a dependency of this package). If bob.ip.mtcnn
is installed in your system you can use it as-well (bob.ip.mtcnn is NOT
a dependency of this package).
**Returns:** **Returns:**
``annotations`` : :py:class:`dict` ``annotations`` : :py:class:`dict`
A dictionary containing annotations of the face bounding box. A dictionary containing annotations of the face bounding box.
Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``. Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``.
If no annotations found an empty dictionary is returned.
""" """
bounding_box, _ = bob.ip.dlib.FaceDetector().detect_single_face(image) try:
face_detection_module = importlib.import_module("bob.ip." + method)
except ImportError:
raise ImportError("No module named bob.ip." + method)
annotations = {} if not hasattr(face_detection_module, 'FaceDetector'):
raise AttributeError("bob.ip." + method + " module has no attribute FaceDetector")
if bounding_box is not None: data = face_detection_module.FaceDetector().detect_single_face(image)
annotations['topleft'] = bounding_box.topleft annotations = {}
annotations['bottomright'] = bounding_box.bottomright if ( data is not None ) and ( not all([x is None for x in data]) ):
else: bounding_box = data[0]
annotations['topleft'] = (0, 0) annotations['topleft'] = bounding_box.topleft
annotations['bottomright'] = (0, 0) annotations['bottomright'] = bounding_box.bottomright
return annotations return annotations
#============================================================================== #==============================================================================
def detect_faces_in_video(frame_container): def detect_faces_in_video(frame_container, method = "dlib"):
""" """
This function detects a face in each farme of the input video. This function detects a face in each farme of the input video.
...@@ -56,6 +67,12 @@ def detect_faces_in_video(frame_container): ...@@ -56,6 +67,12 @@ def detect_faces_in_video(frame_container):
``frame_container`` : FrameContainer ``frame_container`` : FrameContainer
FrameContainer containing the frames data. FrameContainer containing the frames data.
``method`` : :py:class:`str`
A package to be used for face detection. Options supported by this
package: "dlib" (dlib is a dependency of this package). If bob.ip.mtcnn
is installed in your system you can use it as-well (bob.ip.mtcnn is NOT
a dependency of this package).
**Returns:** **Returns:**
``annotations`` : :py:class:`dict` ``annotations`` : :py:class:`dict`
...@@ -63,6 +80,7 @@ def detect_faces_in_video(frame_container): ...@@ -63,6 +80,7 @@ def detect_faces_in_video(frame_container):
Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``. Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}`` Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
is the dictionary defining the coordinates of the face bounding box in frame N. is the dictionary defining the coordinates of the face bounding box in frame N.
If no annotations found an empty dictionary is returned.
""" """
annotations = {} annotations = {}
...@@ -71,9 +89,11 @@ def detect_faces_in_video(frame_container): ...@@ -71,9 +89,11 @@ def detect_faces_in_video(frame_container):
image = frame[1] image = frame[1]
frame_annotations = detect_face_in_image(image) frame_annotations = detect_face_in_image(image, method)
if frame_annotations:
annotations[str(idx)] = frame_annotations annotations[str(idx)] = frame_annotations
return annotations return annotations
......
...@@ -22,6 +22,7 @@ eggs = bob.pad.face ...@@ -22,6 +22,7 @@ eggs = bob.pad.face
bob.learn.activation bob.learn.activation
bob.ip.flandmark bob.ip.flandmark
bob.ip.facedetect bob.ip.facedetect
bob.ip.dlib
bob.ip.qualitymeasure bob.ip.qualitymeasure
bob.learn.linear bob.learn.linear
bob.db.base bob.db.base
...@@ -56,6 +57,7 @@ develop = src/bob.extension ...@@ -56,6 +57,7 @@ develop = src/bob.extension
src/bob.learn.activation src/bob.learn.activation
src/bob.ip.flandmark src/bob.ip.flandmark
src/bob.ip.facedetect src/bob.ip.facedetect
src/bob.ip.dlib
src/bob.ip.qualitymeasure src/bob.ip.qualitymeasure
src/bob.learn.linear src/bob.learn.linear
src/bob.db.base src/bob.db.base
...@@ -92,6 +94,7 @@ bob.io.matlab = git git@gitlab.idiap.ch:bob/bob.io.matlab ...@@ -92,6 +94,7 @@ bob.io.matlab = git git@gitlab.idiap.ch:bob/bob.io.matlab
bob.learn.activation = git git@gitlab.idiap.ch:bob/bob.learn.activation bob.learn.activation = git git@gitlab.idiap.ch:bob/bob.learn.activation
bob.ip.flandmark = git git@gitlab.idiap.ch:bob/bob.ip.flandmark bob.ip.flandmark = git git@gitlab.idiap.ch:bob/bob.ip.flandmark
bob.ip.facedetect = git git@gitlab.idiap.ch:bob/bob.ip.facedetect bob.ip.facedetect = git git@gitlab.idiap.ch:bob/bob.ip.facedetect
bob.ip.dlib = git git@gitlab.idiap.ch:bob/bob.ip.dlib
bob.ip.qualitymeasure = git git@gitlab.idiap.ch:bob/bob.ip.qualitymeasure bob.ip.qualitymeasure = git git@gitlab.idiap.ch:bob/bob.ip.qualitymeasure
bob.learn.linear = git git@gitlab.idiap.ch:bob/bob.learn.linear bob.learn.linear = git git@gitlab.idiap.ch:bob/bob.learn.linear
bob.db.base = git git@gitlab.idiap.ch:bob/bob.db.base bob.db.base = git git@gitlab.idiap.ch:bob/bob.db.base
......
...@@ -103,7 +103,8 @@ setup( ...@@ -103,7 +103,8 @@ setup(
# registered preprocessors: # registered preprocessors:
'bob.pad.preprocessor': [ 'bob.pad.preprocessor': [
'empty-preprocessor = bob.pad.face.config.preprocessor.filename:empty_preprocessor', # no preprocessing 'empty-preprocessor = bob.pad.face.config.preprocessor.filename:empty_preprocessor', # no preprocessing
'preprocessor-rgb-face-detect = bob.pad.face.config.preprocessor.video_face_crop:preprocessor_rgb_face_detect', # detect faces locally replacing database annotations 'rgb-face-detect-dlib = bob.pad.face.config.preprocessor.video_face_crop:rgb_face_detector_dlib', # detect faces locally replacing database annotations
'rgb-face-detect-mtcnn = bob.pad.face.config.preprocessor.video_face_crop:rgb_face_detector_mtcnn', # detect faces locally replacing database annotations
], ],
# registered extractors: # registered extractors:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment