Commit 6771bcd8 authored by Laurent COLBOIS's avatar Laurent COLBOIS

Added a test and description of the MultiFaceCrop

parent 10ca24c0
Pipeline #46753 passed with stage
in 37 minutes and 32 seconds
......@@ -373,12 +373,26 @@ class FaceCrop(Base):
self._init_non_pickables()
class MultiFaceCrop(TransformerMixin, BaseEstimator):
class MultiFaceCrop(Base):
""" Wraps around FaceCrop to enable a dynamical cropper that can handle several annotation types.
Initialization and usage is similar to the FaceCrop, but the main difference here is that one specifies
a *list* of cropped_positions, and optionally a *list* of associated fixed positions.
For each set of cropped_positions in the list, a new FaceCrop will be instanciated that handles this
exact set of annotations.
When calling the *transform* method, the MultiFaceCrop matches each sample to its associated cropper
based on the received annotation, then performs the cropping of each subset, and finally gathers the results.
In case of ambiguity (when no cropper is a match for the received annotations, or when several croppers
match the received annotations), raises a ValueError.
"""
def __init__(
self,
cropped_image_size,
cropped_positions_list,
fixed_positions=None,
fixed_positions_list=None,
mask_sigma=None,
mask_neighbors=5,
mask_seed=None,
......@@ -388,9 +402,14 @@ class MultiFaceCrop(TransformerMixin, BaseEstimator):
):
assert isinstance(cropped_positions_list, list)
if fixed_positions_list is None:
fixed_positions_list = [None] * len(cropped_positions_list)
assert isinstance(fixed_positions_list, list)
self.croppers = {}
for cropped_positions in cropped_positions_list:
for cropped_positions, fixed_positions in zip(
cropped_positions_list, fixed_positions_list
):
assert len(cropped_positions) == 2
self.croppers[tuple(cropped_positions)] = FaceCrop(
cropped_image_size,
......@@ -408,26 +427,38 @@ class MultiFaceCrop(TransformerMixin, BaseEstimator):
subsets = {k: {"X": [], "annotations": []} for k in self.croppers.keys()}
def assign(X_elem, annotations_elem):
# Assign a single sample to its matching cropper
# Compare the received annotations keys to the cropped_positions keys of each cropper
valid_keys = [
k
for k in self.croppers.keys()
if set(k).issubset(set(annotations_elem.keys()))
]
assert (
len(valid_keys) == 1
), "Cropper selection from the annotations is ambiguous ({} valid croppers)".format(
len(valid_keys)
)
subsets[valid_keys[0]]["X"].append(X_elem)
subsets[valid_keys[0]]["annotations"].append(annotations_elem)
# Ensure exactly one cropper is a match
if len(valid_keys) != 1:
raise ValueError(
"Cropper selection from the annotations is ambiguous ({} valid croppers)".format(
len(valid_keys)
)
)
else:
# Assign the sample to this particuler cropper
cropper_key = valid_keys[0]
subsets[cropper_key]["X"].append(X_elem)
subsets[cropper_key]["annotations"].append(annotations_elem)
# Assign each sample to its matching cropper
for X_elem, annotations_elem in zip(X, annotations):
assign(X_elem, annotations_elem)
# Call each FaceCrop on its sample subset
transformed_subsets = {
k: self.croppers[k].transform(**subsets[k]) for k in subsets.keys()
}
# Gather the results
return [item for sublist in transformed_subsets.values() for item in sublist]
def fit(self, X, y=None):
......
from .Base import Base
from .FaceCrop import FaceCrop
from .FaceCrop import FaceCrop, MultiFaceCrop
from .TanTriggs import TanTriggs
from .INormLBP import INormLBP
......
topleft 85 130
bottomright 270 330
......@@ -151,6 +151,56 @@ def test_face_crop():
# reset the configuration, so that later tests don't get screwed.
cropper.color_channel = "gray"
def test_multi_face_crop():
# read input
image = _image()
eye_annotation, bbox_annotation = [
bob.db.base.read_annotation_file(
pkg_resources.resource_filename("bob.bio.face.test", "data/" + filename + ".pos"),
"named"
)
for filename in ["testimage", "testimage_bbox"]
]
# define the preprocessor
cropper = bob.bio.face.preprocessor.MultiFaceCrop(
cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
cropped_positions_list=[
{'leye': LEFT_EYE_POS, 'reye': RIGHT_EYE_POS},
{'topleft': (0, 0), 'bottomright': (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)}
]
)
# execute face cropper
eye_reference, bbox_reference = [
pkg_resources.resource_filename(
"bob.bio.face.test", "data/" + filename + ".hdf5"
)
for filename in ["cropped", "cropped_bbox"]
]
eye_cropped, bbox_cropped = cropper.transform([image, image], [eye_annotation, bbox_annotation])
# Compare the cropped results to the reference
_compare(eye_cropped, eye_reference)
_compare(bbox_cropped, bbox_reference)
# test a ValueError is raised if the annotations don't match any cropper
try:
annot = dict(landmark_A=(60, 60), landmark_B=(120, 120))
cropper.transform([image], [annot])
assert 0, "MultiFaceCrop did not raise a ValueError for annotations matching no cropper"
except ValueError:
pass
# test a ValueError is raised if the annotations match several croppers
try:
annot = {**eye_annotation, **bbox_annotation}
cropper.transform([image], [annot])
assert 0, "MultiFaceCrop did not raise a ValueError for annotations matching several croppers"
except ValueError:
pass
def test_tan_triggs():
# read input
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment