Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
bob.bio.face
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
2
Issues
2
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
bob
bob.bio.face
Commits
bb5b1b70
Commit
bb5b1b70
authored
Dec 21, 2020
by
Laurent COLBOIS
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Integrated MultiFaceCrop into the baselines
parent
6771bcd8
Pipeline
#46757
passed with stage
in 37 minutes and 55 seconds
Changes
2
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
96 additions
and
75 deletions
+96
-75
bob/bio/face/config/baseline/helpers.py
bob/bio/face/config/baseline/helpers.py
+77
-66
bob/bio/face/helpers.py
bob/bio/face/helpers.py
+19
-9
No files found.
bob/bio/face/config/baseline/helpers.py
View file @
bb5b1b70
...
...
@@ -17,65 +17,71 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
cropped_image_size : tuple
A tuple (HEIGHT, WIDTH) describing the target size of the cropped image.
annotation_type: str
annotation_type: str
or list of str
Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile',
'right-profile' and None
'right-profile' and None
, or a combination of those as a list
Returns
-------
cropped_positions:
The dictionary of cropped positions that will be feeded to the FaceCropper.
The dictionary of cropped positions that will be feeded to the FaceCropper, or a list of such dictionaries if
``annotation_type`` is a list
"""
CROPPED_IMAGE_HEIGHT
,
CROPPED_IMAGE_WIDTH
=
cropped_image_size
if
isinstance
(
annotation_type
,
list
):
return
[
embedding_transformer_default_cropping
(
cropped_image_size
,
item
)
for
item
in
annotation_type
]
else
:
if
annotation_type
==
"bounding-box"
:
CROPPED_IMAGE_HEIGHT
,
CROPPED_IMAGE_WIDTH
=
cropped_image_size
TOP_LEFT_POS
=
(
0
,
0
)
BOTTOM_RIGHT_POS
=
(
CROPPED_IMAGE_HEIGHT
,
CROPPED_IMAGE_WIDTH
)
cropped_positions
=
{
"topleft"
:
TOP_LEFT_POS
,
"bottomright"
:
BOTTOM_RIGHT_POS
}
if
annotation_type
==
"bounding-box"
:
elif
annotation_type
==
"eyes-center"
:
TOP_LEFT_POS
=
(
0
,
0
)
BOTTOM_RIGHT_POS
=
(
CROPPED_IMAGE_HEIGHT
,
CROPPED_IMAGE_WIDTH
)
cropped_positions
=
{
"topleft"
:
TOP_LEFT_POS
,
"bottomright"
:
BOTTOM_RIGHT_POS
}
RIGHT_EYE_POS
=
(
round
(
2
/
7
*
CROPPED_IMAGE_HEIGHT
),
round
(
1
/
3
*
CROPPED_IMAGE_WIDTH
),
)
LEFT_EYE_POS
=
(
round
(
2
/
7
*
CROPPED_IMAGE_HEIGHT
),
round
(
2
/
3
*
CROPPED_IMAGE_WIDTH
),
)
cropped_positions
=
{
"leye"
:
LEFT_EYE_POS
,
"reye"
:
RIGHT_EYE_POS
}
elif
annotation_type
==
"eyes-center"
:
elif
annotation_type
==
"left-profile"
:
RIGHT_EYE_POS
=
(
round
(
2
/
7
*
CROPPED_IMAGE_HEIGHT
),
round
(
1
/
3
*
CROPPED_IMAGE_WIDTH
),
)
LEFT_EYE_POS
=
(
round
(
2
/
7
*
CROPPED_IMAGE_HEIGHT
),
round
(
2
/
3
*
CROPPED_IMAGE_WIDTH
),
)
cropped_positions
=
{
"leye"
:
LEFT_EYE_POS
,
"reye"
:
RIGHT_EYE_POS
}
EYE_POS
=
(
round
(
2
/
7
*
CROPPED_IMAGE_HEIGHT
),
round
(
3
/
8
*
CROPPED_IMAGE_WIDTH
),
)
MOUTH_POS
=
(
round
(
5
/
7
*
CROPPED_IMAGE_HEIGHT
),
round
(
3
/
8
*
CROPPED_IMAGE_WIDTH
),
)
cropped_positions
=
{
"leye"
:
EYE_POS
,
"mouth"
:
MOUTH_POS
}
elif
annotation_type
==
"left-profile"
:
elif
annotation_type
==
"right-profile"
:
EYE_POS
=
(
round
(
2
/
7
*
CROPPED_IMAGE_HEIGHT
),
round
(
3
/
8
*
CROPPED_IMAGE_WIDTH
),
)
MOUTH_POS
=
(
round
(
5
/
7
*
CROPPED_IMAGE_HEIGHT
),
round
(
3
/
8
*
CROPPED_IMAGE_WIDTH
),
)
cropped_positions
=
{
"leye"
:
EYE_POS
,
"mouth"
:
MOUTH_POS
}
EYE_POS
=
(
round
(
2
/
7
*
CROPPED_IMAGE_HEIGHT
),
round
(
5
/
8
*
CROPPED_IMAGE_WIDTH
),
)
MOUTH_POS
=
(
round
(
5
/
7
*
CROPPED_IMAGE_HEIGHT
),
round
(
5
/
8
*
CROPPED_IMAGE_WIDTH
),
)
cropped_positions
=
{
"reye"
:
EYE_POS
,
"mouth"
:
MOUTH_POS
}
elif
annotation_type
==
"right-profile"
:
else
:
EYE_POS
=
(
round
(
2
/
7
*
CROPPED_IMAGE_HEIGHT
),
round
(
5
/
8
*
CROPPED_IMAGE_WIDTH
),
)
MOUTH_POS
=
(
round
(
5
/
7
*
CROPPED_IMAGE_HEIGHT
),
round
(
5
/
8
*
CROPPED_IMAGE_WIDTH
),
)
cropped_positions
=
{
"reye"
:
EYE_POS
,
"mouth"
:
MOUTH_POS
}
cropped_positions
=
None
else
:
return
cropped_positions
cropped_positions
=
None
return
cropped_positions
def
legacy_default_cropping
(
cropped_image_size
,
annotation_type
):
...
...
@@ -91,45 +97,50 @@ def legacy_default_cropping(cropped_image_size, annotation_type):
annotation_type: str
Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile',
'right-profile' and None
'right-profile' and None
, or a combination of those as a list
Returns
-------
cropped_positions:
The dictionary of cropped positions that will be feeded to the FaceCropper.
The dictionary of cropped positions that will be feeded to the FaceCropper, or a list of such dictionaries if
``annotation_type`` is a list
"""
CROPPED_IMAGE_HEIGHT
,
CROPPED_IMAGE_WIDTH
=
cropped_image_size
if
isinstance
(
annotation_type
,
list
):
return
[
legacy_default_cropping
(
cropped_image_size
,
item
)
for
item
in
annotation_type
]
else
:
if
annotation_type
==
"bounding-box"
:
CROPPED_IMAGE_HEIGHT
,
CROPPED_IMAGE_WIDTH
=
cropped_image_size
TOP_LEFT_POS
=
(
0
,
0
)
BOTTOM_RIGHT_POS
=
(
CROPPED_IMAGE_HEIGHT
,
CROPPED_IMAGE_WIDTH
)
cropped_positions
=
{
"topleft"
:
TOP_LEFT_POS
,
"bottomright"
:
BOTTOM_RIGHT_POS
}
if
annotation_type
==
"bounding-box"
:
elif
annotation_type
==
"eyes-center"
:
TOP_LEFT_POS
=
(
0
,
0
)
BOTTOM_RIGHT_POS
=
(
CROPPED_IMAGE_HEIGHT
,
CROPPED_IMAGE_WIDTH
)
cropped_positions
=
{
"topleft"
:
TOP_LEFT_POS
,
"bottomright"
:
BOTTOM_RIGHT_POS
}
RIGHT_EYE_POS
=
(
CROPPED_IMAGE_HEIGHT
//
5
,
CROPPED_IMAGE_WIDTH
//
4
-
1
)
LEFT_EYE_POS
=
(
CROPPED_IMAGE_HEIGHT
//
5
,
CROPPED_IMAGE_WIDTH
//
4
*
3
)
cropped_positions
=
{
"leye"
:
LEFT_EYE_POS
,
"reye"
:
RIGHT_EYE_POS
}
elif
annotation_type
==
"eyes-center"
:
elif
annotation_type
==
"left-profile"
:
# Main reference https://gitlab.idiap.ch/bob/bob.chapter.FRICE/-/blob/master/bob/chapter/FRICE/script/pose.py
EYE_POS
=
(
CROPPED_IMAGE_HEIGHT
//
5
,
CROPPED_IMAGE_WIDTH
//
7
*
3
-
2
)
MOUTH_POS
=
(
CROPPED_IMAGE_HEIGHT
//
3
*
2
,
CROPPED_IMAGE_WIDTH
//
7
*
3
-
2
)
cropped_positions
=
{
"leye"
:
EYE_POS
,
"mouth"
:
MOUTH_POS
}
RIGHT_EYE_POS
=
(
CROPPED_IMAGE_HEIGHT
//
5
,
CROPPED_IMAGE_WIDTH
//
4
-
1
)
LEFT_EYE_POS
=
(
CROPPED_IMAGE_HEIGHT
//
5
,
CROPPED_IMAGE_WIDTH
//
4
*
3
)
cropped_positions
=
{
"leye"
:
LEFT_EYE_POS
,
"reye"
:
RIGHT_EYE_POS
}
elif
annotation_type
==
"righ
t-profile"
:
# Main reference https://gitlab.idiap.ch/bob/bob.chapter.FRICE/-/blob/master/bob/chapter/FRICE/script/pose.py
EYE_POS
=
(
CROPPED_IMAGE_HEIGHT
//
5
,
CROPPED_IMAGE_WIDTH
//
7
*
4
+
2
)
MOUTH_POS
=
(
CROPPED_IMAGE_HEIGHT
//
3
*
2
,
CROPPED_IMAGE_WIDTH
//
7
*
4
+
2
)
cropped_positions
=
{
"r
eye"
:
EYE_POS
,
"mouth"
:
MOUTH_POS
}
elif
annotation_type
==
"lef
t-profile"
:
# Main reference https://gitlab.idiap.ch/bob/bob.chapter.FRICE/-/blob/master/bob/chapter/FRICE/script/pose.py
EYE_POS
=
(
CROPPED_IMAGE_HEIGHT
//
5
,
CROPPED_IMAGE_WIDTH
//
7
*
3
-
2
)
MOUTH_POS
=
(
CROPPED_IMAGE_HEIGHT
//
3
*
2
,
CROPPED_IMAGE_WIDTH
//
7
*
3
-
2
)
cropped_positions
=
{
"l
eye"
:
EYE_POS
,
"mouth"
:
MOUTH_POS
}
else
:
elif
annotation_type
==
"right-profile"
:
# Main reference https://gitlab.idiap.ch/bob/bob.chapter.FRICE/-/blob/master/bob/chapter/FRICE/script/pose.py
EYE_POS
=
(
CROPPED_IMAGE_HEIGHT
//
5
,
CROPPED_IMAGE_WIDTH
//
7
*
4
+
2
)
MOUTH_POS
=
(
CROPPED_IMAGE_HEIGHT
//
3
*
2
,
CROPPED_IMAGE_WIDTH
//
7
*
4
+
2
)
cropped_positions
=
{
"reye"
:
EYE_POS
,
"mouth"
:
MOUTH_POS
}
else
:
cropped_positions
=
None
cropped_positions
=
None
return
cropped_positions
return
cropped_positions
def
embedding_transformer
(
...
...
@@ -156,7 +167,7 @@ def embedding_transformer(
)
transform_extra_arguments
=
(
None
if
cropped_positions
is
None
else
((
"annotations"
,
"annotations"
),)
None
if
(
cropped_positions
is
None
or
fixed_positions
is
not
None
)
else
((
"annotations"
,
"annotations"
),)
)
transformer
=
make_pipeline
(
...
...
bob/bio/face/helpers.py
View file @
bb5b1b70
from
bob.bio.face.preprocessor
import
FaceCrop
,
Scale
from
bob.bio.face.preprocessor
import
FaceCrop
,
MultiFaceCrop
,
Scale
def
face_crop_solver
(
...
...
@@ -17,11 +17,21 @@ def face_crop_solver(
return
Scale
(
cropped_image_size
)
else
:
# Detects the face and crops it without eye detection
return
FaceCrop
(
cropped_image_size
=
cropped_image_size
,
cropped_positions
=
cropped_positions
,
color_channel
=
color_channel
,
fixed_positions
=
fixed_positions
,
dtype
=
dtype
,
annotator
=
annotator
,
)
if
isinstance
(
cropped_positions
,
list
):
return
MultiFaceCrop
(
cropped_image_size
=
cropped_image_size
,
cropped_positions_list
=
cropped_positions
,
fixed_positions_list
=
fixed_positions
,
color_channel
=
color_channel
,
dtype
=
dtype
,
annotation
=
annotator
,
)
else
:
return
FaceCrop
(
cropped_image_size
=
cropped_image_size
,
cropped_positions
=
cropped_positions
,
color_channel
=
color_channel
,
fixed_positions
=
fixed_positions
,
dtype
=
dtype
,
annotator
=
annotator
,
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment