Skip to content
Snippets Groups Projects
Commit 96323e57 authored by Olegs NIKISINS's avatar Olegs NIKISINS
Browse files

Moved some class methods to functions, WIP

parent 956906a4
Branches
Tags
1 merge request!57Preprocessor refactoring, VideoFaceCrop+ImageFaceCrop replaced with FaceCropAlign+Wrapper
Pipeline #
...@@ -17,8 +17,144 @@ import bob.ip.color ...@@ -17,8 +17,144 @@ import bob.ip.color
import bob.ip.base import bob.ip.base
#==============================================================================
# Main body: #==========================================================================
def normalize_image_size_in_grayscale(image, annotations, face_size, use_face_alignment):
"""
This function crops the face in the input Gray-scale image given annotations
defining the face bounding box, and eye positions.
The size of the face is also normalized to the pre-defined dimensions.
Two normalization options are available, which are controlled by
``use_face_alignment`` flag, see below.
**Parameters:**
``image`` : 2D :py:class:`numpy.ndarray`
Gray-scale input image.
``annotations`` : :py:class:`dict`
A dictionary containing annotations of the face bounding box,
eye locations and facial landmarks.
Dictionary must be as follows: ``{'topleft': (row, col), 'bottomright': (row, col),
'left_eye': (row, col), 'right_eye': (row, col)}``.
``face_size`` : :py:class:`int`
The size of the face after normalization.
``use_face_alignment`` : :py:class:`bool`
If ``False``, the re-sizing from this publication is used:
"On the Effectiveness of Local Binary Patterns in Face Anti-spoofing"
If ``True`` the facial image is both re-sized and aligned using
positions of the eyes, which are given in the annotations.
**Returns:**
``normbbx`` : 2D :py:class:`numpy.ndarray`
An image of the cropped face of the size (face_size, face_size).
"""
if use_face_alignment:
face_eyes_norm = bob.ip.base.FaceEyesNorm(
eyes_distance=((face_size + 1) / 2.),
crop_size=(face_size, face_size),
eyes_center=(face_size / 4., (face_size - 0.5) / 2.))
right_eye, left_eye = annotations['right_eye'], annotations['left_eye']
normalized_image = face_eyes_norm( image, right_eye = right_eye, left_eye = left_eye )
normbbx=normalized_image.astype('uint8')
else:
cutframe = image[annotations['topleft'][0]:annotations['bottomright'][
0], annotations['topleft'][1]:annotations['bottomright'][1]]
tempbbx = np.ndarray((face_size, face_size), 'float64')
normbbx = np.ndarray((face_size, face_size), 'uint8')
bob.ip.base.scale(cutframe, tempbbx) # normalization
tempbbx_ = tempbbx + 0.5
tempbbx_ = np.floor(tempbbx_)
normbbx = np.cast['uint8'](tempbbx_)
return normbbx
#==========================================================================
def normalize_image_size(image, annotations, face_size,
rgb_output_flag, use_face_alignment):
"""
This function crops the face in the input image given annotations defining
the face bounding box. The size of the face is also normalized to the
pre-defined dimensions. For RGB inputs it is possible to return both
color and gray-scale outputs. This option is controlled by ``rgb_output_flag``.
Two normalization options are available, which are controlled by
``use_face_alignment`` flag, see below.
**Parameters:**
``image`` : 2D or 3D :py:class:`numpy.ndarray`
Input image (RGB or gray-scale).
``annotations`` : :py:class:`dict`
A dictionary containing annotations of the face bounding box,
eye locations and facial landmarks.
Dictionary must be as follows: ``{'topleft': (row, col), 'bottomright': (row, col),
'left_eye': (row, col), 'right_eye': (row, col)}``.
``face_size`` : :py:class:`int`
The size of the face after normalization.
``rgb_output_flag`` : :py:class:`bool`
Return RGB cropped face if ``True``, otherwise a gray-scale image is
returned. Default: ``False``.
``use_face_alignment`` : :py:class:`bool`
If ``False``, the facial image re-sizing from this publication is used:
"On the Effectiveness of Local Binary Patterns in Face Anti-spoofing"
If ``True`` the facial image is both re-sized, and aligned, using
positions of the eyes, which are given in the annotations.
**Returns:**
``face`` : 2D or 3D :py:class:`numpy.ndarray`
An image of the cropped face of the size (face_size, face_size),
RGB 3D or gray-scale 2D.
"""
if len(image.shape) == 3:
if not (rgb_output_flag):
image = bob.ip.color.rgb_to_gray(image)
if len(image.shape) == 2:
image = [image] # make gray-scale image an iterable
result = []
for image_channel in image: # for all color channels in the input image
cropped_face = normalize_image_size_in_grayscale(
image_channel, annotations, face_size, use_face_alignment)
result.append(cropped_face)
face = np.stack(result, axis=0)
face = np.squeeze(face) # squeeze 1-st dimension for gray-scale images
return face
class ImageFaceCrop(Preprocessor): class ImageFaceCrop(Preprocessor):
...@@ -50,151 +186,103 @@ class ImageFaceCrop(Preprocessor): ...@@ -50,151 +186,103 @@ class ImageFaceCrop(Preprocessor):
self.face_size = face_size self.face_size = face_size
self.rgb_output_flag = rgb_output_flag self.rgb_output_flag = rgb_output_flag
#========================================================================== #==========================================================================
def normalize_image_size_in_grayscale(self, image, annotations, face_size, use_face_alignment): def __call__(self, image, annotations):
""" """
This function crops the face in the input Gray-scale image given annotations Call the ``normalize_image_size()`` method of this class.
defining the face bounding box, and eye positions.
The size of the face is also normalized to the pre-defined dimensions.
Two normalization options are available, which are controlled by
``use_face_alignment`` flag, see below.
**Parameters:** **Parameters:**
``image`` : 2D :py:class:`numpy.ndarray` ``image`` : 2D or 3D :py:class:`numpy.ndarray`
Gray-scale input image. Input image (RGB or gray-scale).
``annotations`` : :py:class:`dict` ``annotations`` : :py:class:`dict`
A dictionary containing annotations of the face bounding box, A dictionary containing annotations of the face bounding box.
eye locations and facial landmarks. Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``
Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col),
'left_eye': (row, col), 'right_eye': (row, col)``.
``face_size`` : :py:class:`int`
The size of the face after normalization.
``use_face_alignment`` : :py:class:`bool`
If ``False``, the re-sizing from this publication is used:
"On the Effectiveness of Local Binary Patterns in Face Anti-spoofing"
If ``True`` the facial image is both re-sized and aligned using
positions of the eyes, which are given in the annotations.
**Returns:** **Returns:**
``normbbx`` : 2D :py:class:`numpy.ndarray` ``norm_face_image`` : 2D or 3D :py:class:`numpy.ndarray`
An image of the cropped face of the size (self.face_size, self.face_size). An image of the cropped face of the size (self.face_size, self.face_size),
rgb 3D or gray-scale 2D.
""" """
if use_face_alignment: norm_face_image = self.normalize_image_size(
image, annotations, self.face_size, self.rgb_output_flag,
self.use_face_alignment)
return norm_face_image
face_eyes_norm = bob.ip.base.FaceEyesNorm(
eyes_distance=(face_size / 2.),
crop_size=(face_size, face_size),
eyes_center=(face_size / 4., (face_size - 0.5) / 2.))
right_eye,left_eye=annotations['right_eye'],annotations['left_eye']
normalized_image = face_eyes_norm( image, right_eye = right_eye, left_eye = left_eye )
normbbx=normalized_image.astype('uint8')
else:
cutframe = image[annotations['topleft'][0]:annotations['bottomright'][
0], annotations['topleft'][1]:annotations['bottomright'][1]]
tempbbx = np.ndarray((face_size, face_size), 'float64')
normbbx = np.ndarray((face_size, face_size), 'uint8')
bob.ip.base.scale(cutframe, tempbbx) # normalization
tempbbx_ = tempbbx + 0.5
tempbbx_ = np.floor(tempbbx_)
normbbx = np.cast['uint8'](tempbbx_)
return normbbx
#========================================================================== #==========================================================================
def normalize_image_size(self, image, annotations, face_size, def __call__(self, frames, annotations):
rgb_output_flag):
""" """
This function crops the face in the input image given annotations defining Crop the face in the input video frames given annotations for each frame.
the face bounding box. The size of the face is also normalized to the
pre-defined dimensions. For RGB inputs it is possible to return both
color and gray-scale outputs. This option is controlled by ``rgb_output_flag``.
The algorithm is identical to the following paper:
"On the Effectiveness of Local Binary Patterns in Face Anti-spoofing"
**Parameters:** **Parameters:**
``image`` : 2D or 3D :py:class:`numpy.ndarray` ``frames`` : FrameContainer
Input image (RGB or gray-scale). Video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
for further details.
``annotations`` : :py:class:`dict` ``annotations`` : :py:class:`dict`
A dictionary containing annotations of the face bounding box. A dictionary containing the annotations for each frame in the video.
Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}`` Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
``face_size`` : :py:class:`int` is the dictionary defining the coordinates of the face bounding box in frame N.
The size of the face after normalization.
``rgb_output_flag`` : :py:class:`bool`
Return RGB cropped face if ``True``, otherwise a gray-scale image is
returned. Default: ``False``.
**Returns:** **Returns:**
``face`` : 2D or 3D :py:class:`numpy.ndarray` ``preprocessed_video`` : FrameContainer
An image of the cropped face of the size (self.face_size, self.face_size), Cropped faces stored in the FrameContainer.
rgb 3D or gray-scale 2D.
""" """
if len(image.shape) == 3:
if not (rgb_output_flag):
image = bob.ip.color.rgb_to_gray(image)
if len(image.shape) == 2:
image = [image] # make gray-scale image an iterable
result = []
for image_channel in image: # for all color channels in the input image if self.detect_faces_flag:
cropped_face = self.normalize_image_size_in_grayscale( if self.max_image_size: # max_image_size = 1920
image_channel, annotations, face_size)
result.append(cropped_face) if np.max(frames[0][1].shape) > self.max_image_size:
face = np.stack(result, axis=0) return bob.bio.video.FrameContainer()
face = np.squeeze(face) # squeeze 1-st dimension for gray-scale images try:
return face annotations = detect_face_landmarks_in_video(frames,
self.face_detection_method) #TODO: new dicts
except:
#========================================================================== return bob.bio.video.FrameContainer()
def __call__(self, image, annotations):
"""
Call the ``normalize_image_size()`` method of this class.
**Parameters:** if len(frames) != len(annotations): # if some annotations are missing
``image`` : 2D or 3D :py:class:`numpy.ndarray` ## Select only annotated frames:
Input image (RGB or gray-scale). frames, annotations = self.select_annotated_frames(
frames, annotations)
``annotations`` : :py:class:`dict` preprocessed_video = self.video_preprocessor(
A dictionary containing annotations of the face bounding box. frames=frames, annotations=annotations)
Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``
**Returns:** if self.check_face_size_flag:
preprocessed_video = self.check_face_size(
preprocessed_video, annotations, self.min_face_size)
return preprocessed_video
``norm_face_image`` : 2D or 3D :py:class:`numpy.ndarray`
An image of the cropped face of the size (self.face_size, self.face_size),
rgb 3D or gray-scale 2D.
"""
norm_face_image = self.normalize_image_size(
image, annotations, self.face_size, self.rgb_output_flag)
return norm_face_image
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment