Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
bob.bio.face
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
2
Issues
2
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
bob
bob.bio.face
Commits
6771bcd8
Commit
6771bcd8
authored
Dec 21, 2020
by
Laurent COLBOIS
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added a test and description of the MultiFaceCrop
parent
10ca24c0
Pipeline
#46753
passed with stage
in 37 minutes and 32 seconds
Changes
5
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
94 additions
and
11 deletions
+94
-11
bob/bio/face/preprocessor/FaceCrop.py
bob/bio/face/preprocessor/FaceCrop.py
+41
-10
bob/bio/face/preprocessor/__init__.py
bob/bio/face/preprocessor/__init__.py
+1
-1
bob/bio/face/test/data/cropped_bbox.hdf5
bob/bio/face/test/data/cropped_bbox.hdf5
+0
-0
bob/bio/face/test/data/testimage_bbox.pos
bob/bio/face/test/data/testimage_bbox.pos
+2
-0
bob/bio/face/test/test_preprocessors.py
bob/bio/face/test/test_preprocessors.py
+50
-0
No files found.
bob/bio/face/preprocessor/FaceCrop.py
View file @
6771bcd8
...
...
@@ -373,12 +373,26 @@ class FaceCrop(Base):
self
.
_init_non_pickables
()
class
MultiFaceCrop
(
TransformerMixin
,
BaseEstimator
):
class
MultiFaceCrop
(
Base
):
""" Wraps around FaceCrop to enable a dynamical cropper that can handle several annotation types.
Initialization and usage is similar to the FaceCrop, but the main difference here is that one specifies
a *list* of cropped_positions, and optionally a *list* of associated fixed positions.
For each set of cropped_positions in the list, a new FaceCrop will be instanciated that handles this
exact set of annotations.
When calling the *transform* method, the MultiFaceCrop matches each sample to its associated cropper
based on the received annotation, then performs the cropping of each subset, and finally gathers the results.
In case of ambiguity (when no cropper is a match for the received annotations, or when several croppers
match the received annotations), raises a ValueError.
"""
def
__init__
(
self
,
cropped_image_size
,
cropped_positions_list
,
fixed_positions
=
None
,
fixed_positions
_list
=
None
,
mask_sigma
=
None
,
mask_neighbors
=
5
,
mask_seed
=
None
,
...
...
@@ -388,9 +402,14 @@ class MultiFaceCrop(TransformerMixin, BaseEstimator):
):
assert
isinstance
(
cropped_positions_list
,
list
)
if
fixed_positions_list
is
None
:
fixed_positions_list
=
[
None
]
*
len
(
cropped_positions_list
)
assert
isinstance
(
fixed_positions_list
,
list
)
self
.
croppers
=
{}
for
cropped_positions
in
cropped_positions_list
:
for
cropped_positions
,
fixed_positions
in
zip
(
cropped_positions_list
,
fixed_positions_list
):
assert
len
(
cropped_positions
)
==
2
self
.
croppers
[
tuple
(
cropped_positions
)]
=
FaceCrop
(
cropped_image_size
,
...
...
@@ -408,26 +427,38 @@ class MultiFaceCrop(TransformerMixin, BaseEstimator):
subsets
=
{
k
:
{
"X"
:
[],
"annotations"
:
[]}
for
k
in
self
.
croppers
.
keys
()}
def
assign
(
X_elem
,
annotations_elem
):
# Assign a single sample to its matching cropper
# Compare the received annotations keys to the cropped_positions keys of each cropper
valid_keys
=
[
k
for
k
in
self
.
croppers
.
keys
()
if
set
(
k
).
issubset
(
set
(
annotations_elem
.
keys
()))
]
assert
(
len
(
valid_keys
)
==
1
),
"Cropper selection from the annotations is ambiguous ({} valid croppers)"
.
format
(
len
(
valid_keys
)
)
subsets
[
valid_keys
[
0
]][
"X"
].
append
(
X_elem
)
subsets
[
valid_keys
[
0
]][
"annotations"
].
append
(
annotations_elem
)
# Ensure exactly one cropper is a match
if
len
(
valid_keys
)
!=
1
:
raise
ValueError
(
"Cropper selection from the annotations is ambiguous ({} valid croppers)"
.
format
(
len
(
valid_keys
)
)
)
else
:
# Assign the sample to this particuler cropper
cropper_key
=
valid_keys
[
0
]
subsets
[
cropper_key
][
"X"
].
append
(
X_elem
)
subsets
[
cropper_key
][
"annotations"
].
append
(
annotations_elem
)
# Assign each sample to its matching cropper
for
X_elem
,
annotations_elem
in
zip
(
X
,
annotations
):
assign
(
X_elem
,
annotations_elem
)
# Call each FaceCrop on its sample subset
transformed_subsets
=
{
k
:
self
.
croppers
[
k
].
transform
(
**
subsets
[
k
])
for
k
in
subsets
.
keys
()
}
# Gather the results
return
[
item
for
sublist
in
transformed_subsets
.
values
()
for
item
in
sublist
]
def
fit
(
self
,
X
,
y
=
None
):
...
...
bob/bio/face/preprocessor/__init__.py
View file @
6771bcd8
from
.Base
import
Base
from
.FaceCrop
import
FaceCrop
from
.FaceCrop
import
FaceCrop
,
MultiFaceCrop
from
.TanTriggs
import
TanTriggs
from
.INormLBP
import
INormLBP
...
...
bob/bio/face/test/data/cropped_bbox.hdf5
0 → 100644
View file @
6771bcd8
File added
bob/bio/face/test/data/testimage_bbox.pos
0 → 100644
View file @
6771bcd8
topleft 85 130
bottomright 270 330
bob/bio/face/test/test_preprocessors.py
View file @
6771bcd8
...
...
@@ -151,6 +151,56 @@ def test_face_crop():
# reset the configuration, so that later tests don't get screwed.
cropper
.
color_channel
=
"gray"
def
test_multi_face_crop
():
# read input
image
=
_image
()
eye_annotation
,
bbox_annotation
=
[
bob
.
db
.
base
.
read_annotation_file
(
pkg_resources
.
resource_filename
(
"bob.bio.face.test"
,
"data/"
+
filename
+
".pos"
),
"named"
)
for
filename
in
[
"testimage"
,
"testimage_bbox"
]
]
# define the preprocessor
cropper
=
bob
.
bio
.
face
.
preprocessor
.
MultiFaceCrop
(
cropped_image_size
=
(
CROPPED_IMAGE_HEIGHT
,
CROPPED_IMAGE_WIDTH
),
cropped_positions_list
=
[
{
'leye'
:
LEFT_EYE_POS
,
'reye'
:
RIGHT_EYE_POS
},
{
'topleft'
:
(
0
,
0
),
'bottomright'
:
(
CROPPED_IMAGE_HEIGHT
,
CROPPED_IMAGE_WIDTH
)}
]
)
# execute face cropper
eye_reference
,
bbox_reference
=
[
pkg_resources
.
resource_filename
(
"bob.bio.face.test"
,
"data/"
+
filename
+
".hdf5"
)
for
filename
in
[
"cropped"
,
"cropped_bbox"
]
]
eye_cropped
,
bbox_cropped
=
cropper
.
transform
([
image
,
image
],
[
eye_annotation
,
bbox_annotation
])
# Compare the cropped results to the reference
_compare
(
eye_cropped
,
eye_reference
)
_compare
(
bbox_cropped
,
bbox_reference
)
# test a ValueError is raised if the annotations don't match any cropper
try
:
annot
=
dict
(
landmark_A
=
(
60
,
60
),
landmark_B
=
(
120
,
120
))
cropper
.
transform
([
image
],
[
annot
])
assert
0
,
"MultiFaceCrop did not raise a ValueError for annotations matching no cropper"
except
ValueError
:
pass
# test a ValueError is raised if the annotations match several croppers
try
:
annot
=
{
**
eye_annotation
,
**
bbox_annotation
}
cropper
.
transform
([
image
],
[
annot
])
assert
0
,
"MultiFaceCrop did not raise a ValueError for annotations matching several croppers"
except
ValueError
:
pass
def
test_tan_triggs
():
# read input
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment