Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.pytorch
Commits
c8ca46ef
Commit
c8ca46ef
authored
Jan 30, 2019
by
Guillaume HEUSCH
Browse files
[datasets] added a wrapper to FaceCropAlign preprocessor from bob.pad.face
parent
508a93e1
Changes
2
Hide whitespace changes
Inline
Side-by-side
bob/learn/pytorch/datasets/__init__.py
View file @
c8ca46ef
from
.casia_webface
import
CasiaDataset
from
.casia_webface
import
CasiaWebFaceDataset
from
.fargo
import
FargoDataset
from
.data_folder
import
DataFolder
# transforms
from
.utils
import
FaceCropper
from
.utils
import
FaceCropAlign
from
.utils
import
RollChannels
from
.utils
import
ToTensor
from
.utils
import
Normalize
...
...
bob/learn/pytorch/datasets/utils.py
View file @
c8ca46ef
...
...
@@ -9,22 +9,77 @@ class FaceCropper():
"""
Class to crop a face, based on eyes position
"""
def
__init__
(
self
,
cropped_height
,
cropped_width
):
def
__init__
(
self
,
cropped_height
,
cropped_width
,
color_channel
=
'rgb'
):
# the face cropper
from
bob.bio.face.preprocessor
import
FaceCrop
cropped_image_size
=
(
cropped_height
,
cropped_width
)
right_eye_pos
=
(
cropped_height
//
5
,
cropped_width
//
4
-
1
)
left_eye_pos
=
(
cropped_height
//
5
,
cropped_width
//
4
*
3
)
cropped_positions
=
{
'leye'
:
left_eye_pos
,
'reye'
:
right_eye_pos
}
self
.
color_channel
=
color_channel
self
.
face_cropper
=
FaceCrop
(
cropped_image_size
=
cropped_image_size
,
cropped_positions
=
cropped_positions
,
color_channel
=
'rgb'
,
color_channel
=
color_channel
,
dtype
=
'uint8'
)
def
__call__
(
self
,
sample
):
cropped
=
self
.
face_cropper
(
sample
[
'image'
],
sample
[
'eyes'
])
sample
[
'image'
]
=
cropped
if
self
.
color_channel
==
'gray'
:
sample
[
'image'
]
=
sample
[
'image'
][...,
numpy
.
newaxis
]
return
sample
class
FaceCropAlign
():
"""
Wrapper to the FaceCropAlign of bob.pad.face preprocessor
"""
def
__init__
(
self
,
face_size
,
rgb_output_flag
=
False
,
use_face_alignment
=
True
,
alignment_type
=
'lightcnn'
,
face_detection_method
=
'mtcnn'
,
):
""" Init function
Parameters
----------
face_size: :obj:`int`
The size of the cropped face (square)
rgb_output_flag: :py:class:`bool`
Return RGB cropped face if True, grayscale otherwise
use_face_alignment: :py:class:`bool`
If set to True, the face will be aligned, using the facial landmarks detected locally
Works only when ``face_detection_method is not None``.
alignment_type: :py:class:`str`
Specifies the alignment type to use if ``use_face_alignment`` is set to ``True``.
Two methods are currently implemented:
``default`` which would do alignment by making eyes horizontally
``lightcnn`` which aligns the face such that eye center and mouth centers are aligned to
predefined positions. This option overrides the face size option as the output required
is always 128x128. This is suitable for use with LightCNN model.
face_detection_method: :py:class:`str`
A package to be used for face detection and landmark detection.
Options supported by this class: "dlib" and "mtcnn"
"""
from
bob.pad.face.preprocessor
import
FaceCropAlign
self
.
face_cropper
=
FaceCropAlign
(
face_size
,
rgb_output_flag
,
use_face_alignment
,
alignment_type
=
alignment_type
,
face_detection_method
=
face_detection_method
,
)
def
__call__
(
self
,
sample
):
cropped
=
self
.
face_cropper
(
sample
[
'image'
])
if
cropped
is
None
:
print
(
"Face not detected ..."
)
cropped
=
numpy
.
zeros
((
128
,
128
))
sample
[
'image'
]
=
cropped
[...,
numpy
.
newaxis
]
return
sample
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment