Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
bob.pad.face
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
14
Issues
14
List
Boards
Labels
Milestones
Merge Requests
2
Merge Requests
2
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
bob
bob.pad.face
Commits
fbc8312a
Commit
fbc8312a
authored
Jan 18, 2018
by
Anjith GEORGE
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Alignment added,WIP
parent
5000efc5
Pipeline
#15484
passed with stages
in 11 minutes and 59 seconds
Changes
2
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
48 additions
and
24 deletions
+48
-24
bob/pad/face/preprocessor/ImageFaceCrop.py
bob/pad/face/preprocessor/ImageFaceCrop.py
+27
-14
bob/pad/face/preprocessor/VideoFaceCrop.py
bob/pad/face/preprocessor/VideoFaceCrop.py
+21
-10
No files found.
bob/pad/face/preprocessor/ImageFaceCrop.py
View file @
fbc8312a
...
...
@@ -42,16 +42,17 @@ class ImageFaceCrop(Preprocessor):
"""
#==========================================================================
def
__init__
(
self
,
face_size
,
rgb_output_flag
=
False
):
def
__init__
(
self
,
face_size
,
rgb_output_flag
=
False
,
use_face_alignment
=
False
):
Preprocessor
.
__init__
(
self
,
face_size
=
face_size
,
rgb_output_flag
=
rgb_output_flag
)
self
,
face_size
=
face_size
,
rgb_output_flag
=
rgb_output_flag
,
use_face_alignment
=
use_face_alignment
)
self
.
face_size
=
face_size
self
.
rgb_output_flag
=
rgb_output_flag
self
.
use_face_alignment
=
use_face_alignment
#==========================================================================
def
normalize_image_size_in_grayscale
(
self
,
image
,
annotations
,
face_size
):
def
normalize_image_size_in_grayscale
(
self
,
image
,
annotations
,
face_size
,
use_face_alignment
):
"""
This function crops the face in the input Gray-scale image given annotations
defining the face bounding box. The size of the face is also normalized to the
...
...
@@ -78,21 +79,33 @@ class ImageFaceCrop(Preprocessor):
An image of the cropped face of the size (self.face_size, self.face_size).
"""
cutframe
=
image
[
annotations
[
'topleft'
][
0
]:
annotations
[
'bottomright'
][
0
],
annotations
[
'topleft'
][
1
]:
annotations
[
'bottomright'
][
1
]]
tempbbx
=
np
.
ndarray
((
face_size
,
face_size
),
'float64'
)
normbbx
=
np
.
ndarray
((
face_size
,
face_size
),
'uint8'
)
bob
.
ip
.
base
.
scale
(
cutframe
,
tempbbx
)
# normalization
tempbbx_
=
tempbbx
+
0.5
tempbbx_
=
np
.
floor
(
tempbbx_
)
normbbx
=
np
.
cast
[
'uint8'
](
tempbbx_
)
if
not
use_face_alignment
:
cutframe
=
image
[
annotations
[
'topleft'
][
0
]:
annotations
[
'bottomright'
][
0
],
annotations
[
'topleft'
][
1
]:
annotations
[
'bottomright'
][
1
]]
tempbbx
=
np
.
ndarray
((
face_size
,
face_size
),
'float64'
)
normbbx
=
np
.
ndarray
((
face_size
,
face_size
),
'uint8'
)
bob
.
ip
.
base
.
scale
(
cutframe
,
tempbbx
)
# normalization
tempbbx_
=
tempbbx
+
0.5
tempbbx_
=
np
.
floor
(
tempbbx_
)
normbbx
=
np
.
cast
[
'uint8'
](
tempbbx_
)
else
:
face_eyes_norm
=
bob
.
ip
.
base
.
FaceEyesNorm
(
eyes_distance
=
32.5
,
crop_size
=
(
face_size
,
face_size
),
eyes_center
=
(
16
,
31.75
))
# Add more params,
right_eye
,
left_eye
=
annotations
[
'right_eye'
],
annotations
[
'left_eye'
]
normalized_image
=
face_eyes_norm
(
image
,
right_eye
=
right_eye
,
left_eye
=
left_eye
)
normbbx
=
normalized_image
.
astype
(
'uint8'
)
return
normbbx
#==========================================================================
def
normalize_image_size
(
self
,
image
,
annotations
,
face_size
,
rgb_output_flag
):
rgb_output_flag
,
use_face_alignment
):
"""
This function crops the face in the input image given annotations defining
the face bounding box. The size of the face is also normalized to the
...
...
@@ -140,7 +153,7 @@ class ImageFaceCrop(Preprocessor):
for
image_channel
in
image
:
# for all color channels in the input image
cropped_face
=
self
.
normalize_image_size_in_grayscale
(
image_channel
,
annotations
,
face_size
)
image_channel
,
annotations
,
face_size
,
use_face_alignment
)
result
.
append
(
cropped_face
)
...
...
@@ -172,6 +185,6 @@ class ImageFaceCrop(Preprocessor):
"""
norm_face_image
=
self
.
normalize_image_size
(
image
,
annotations
,
self
.
face_size
,
self
.
rgb_output_flag
)
image
,
annotations
,
self
.
face_size
,
self
.
rgb_output_flag
,
self
.
use_face_alignment
)
return
norm_face_image
bob/pad/face/preprocessor/VideoFaceCrop.py
View file @
fbc8312a
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on
Fri May 12 14:14:23 2017
Created on
Thu Jan 18 12:03:12 2018
@author:
Olegs Nikisins
@author:
Anjith George
"""
#==============================================================================
# Import what is needed here:
...
...
@@ -18,7 +18,9 @@ import numpy as np
from
bob.pad.face.preprocessor.ImageFaceCrop
import
ImageFaceCrop
from
..utils.face_detection_utils
import
detect_faces_in_video
#TODO: Only one is needed
from
..utils.face_detection_utils
import
detect_faces_in_video
,
detect_face_landmarks_in_video
#==============================================================================
# Main body:
...
...
@@ -91,6 +93,11 @@ class VideoFaceCrop(Preprocessor, object):
is installed in your system you can use it as-well (bob.ip.mtcnn is NOT
a dependency of this package).
``use_face_alignment`` : :py:class:`bool`
If set to ``True`` the face will be aligned aligned using the facial landmarks
detected locally. Works only when 'detect_faces_flag==True' and use_local_cropper_flag==True
Default: ``False``.
``kwargs``
Remaining keyword parameters passed to the Base constructor, such as ``color_channel`` or ``dtype``.
"""
...
...
@@ -105,10 +112,11 @@ class VideoFaceCrop(Preprocessor, object):
mask_seed
=
None
,
check_face_size_flag
=
False
,
min_face_size
=
50
,
use_local_cropper_flag
=
False
,
rgb_output_flag
=
False
,
detect_faces_flag
=
False
,
use_local_cropper_flag
=
False
,
#TODO
rgb_output_flag
=
False
,
#TODO: Needs RGB output->config
detect_faces_flag
=
False
,
#TODO
face_detection_method
=
"dlib"
,
use_face_alignment
=
False
,
#TODO: uses aligncrop if true
**
kwargs
):
super
(
VideoFaceCrop
,
self
).
__init__
(
...
...
@@ -124,6 +132,7 @@ class VideoFaceCrop(Preprocessor, object):
rgb_output_flag
=
rgb_output_flag
,
detect_faces_flag
=
detect_faces_flag
,
face_detection_method
=
face_detection_method
,
use_face_alignment
=
use_face_alignment
,
**
kwargs
)
self
.
cropped_image_size
=
cropped_image_size
...
...
@@ -138,6 +147,7 @@ class VideoFaceCrop(Preprocessor, object):
self
.
rgb_output_flag
=
rgb_output_flag
self
.
detect_faces_flag
=
detect_faces_flag
self
.
face_detection_method
=
face_detection_method
self
.
use_face_alignment
=
use_face_alignment
# Save also the data stored in the kwargs:
for
(
k
,
v
)
in
kwargs
.
items
():
...
...
@@ -145,9 +155,10 @@ class VideoFaceCrop(Preprocessor, object):
if
self
.
use_local_cropper_flag
:
preprocessor
=
ImageFaceCrop
(
face_size
=
self
.
cropped_image_size
[
0
],
rgb_output_flag
=
self
.
rgb_output_flag
)
face_size
=
self
.
cropped_image_size
[
0
],
rgb_output_flag
=
self
.
rgb_output_flag
,
use_face_alignment
=
self
.
use_face_alignment
)
else
:
...
...
@@ -303,8 +314,8 @@ class VideoFaceCrop(Preprocessor, object):
if
self
.
detect_faces_flag
:
annotations
=
detect_faces_in_video
(
frames
,
self
.
face_detection_method
)
annotations
=
detect_face
_landmark
s_in_video
(
frames
,
self
.
face_detection_method
)
#TODO: new dicts
if
len
(
frames
)
!=
len
(
annotations
):
# if some annotations are missing
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment