Commit bf6eb731 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira

Merge branch 'multifacecrop' into 'master'

MultiFaceCrop

See merge request !92
parents 83f4fde0 9517c050
Pipeline #46781 failed with stages
in 57 minutes and 50 seconds
......@@ -4,7 +4,8 @@ from bob.bio.base.wrappers import wrap_sample_preprocessor
from bob.pipelines import wrap
from bob.bio.face.helpers import face_crop_solver
import numpy as np
import logging
logger = logging.getLogger(__name__)
def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
"""
......@@ -17,16 +18,20 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
cropped_image_size : tuple
A tuple (HEIGHT, WIDTH) describing the target size of the cropped image.
annotation_type: str
annotation_type: str or list of str
Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile',
'right-profile' and None
'right-profile' and None, or a combination of those as a list
Returns
-------
cropped_positions:
The dictionary of cropped positions that will be feeded to the FaceCropper.
The dictionary of cropped positions that will be feeded to the FaceCropper, or a list of such dictionaries if
``annotation_type`` is a list
"""
if isinstance(annotation_type, list):
return [embedding_transformer_default_cropping(cropped_image_size, item) for item in annotation_type]
CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size
if annotation_type == "bounding-box":
......@@ -73,6 +78,7 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
else:
logger.warning(f"Annotation type {annotation_type} is not supported. Input images will be fully scaled.")
cropped_positions = None
return cropped_positions
......@@ -91,14 +97,18 @@ def legacy_default_cropping(cropped_image_size, annotation_type):
annotation_type: str
Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile',
'right-profile' and None
'right-profile' and None, or a combination of those as a list
Returns
-------
cropped_positions:
The dictionary of cropped positions that will be feeded to the FaceCropper.
The dictionary of cropped positions that will be feeded to the FaceCropper, or a list of such dictionaries if
``annotation_type`` is a list
"""
if isinstance(annotation_type, list):
return [legacy_default_cropping(cropped_image_size, item) for item in annotation_type]
CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size
if annotation_type == "bounding-box":
......@@ -127,6 +137,7 @@ def legacy_default_cropping(cropped_image_size, annotation_type):
else:
logger.warning(f"Annotation type {annotation_type} is not supported. Input images will be fully scaled.")
cropped_positions = None
return cropped_positions
......@@ -156,7 +167,7 @@ def embedding_transformer(
)
transform_extra_arguments = (
None if cropped_positions is None else (("annotations", "annotations"),)
None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),)
)
transformer = make_pipeline(
......
from bob.bio.face.preprocessor import FaceCrop, Scale
from bob.bio.face.preprocessor import FaceCrop, MultiFaceCrop, Scale
def face_crop_solver(
......@@ -17,11 +17,21 @@ def face_crop_solver(
return Scale(cropped_image_size)
else:
# Detects the face and crops it without eye detection
return FaceCrop(
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
color_channel=color_channel,
fixed_positions=fixed_positions,
dtype=dtype,
annotator=annotator,
)
if isinstance(cropped_positions, list):
return MultiFaceCrop(
cropped_image_size=cropped_image_size,
cropped_positions_list=cropped_positions,
fixed_positions_list=fixed_positions,
color_channel=color_channel,
dtype=dtype,
annotation=annotator,
)
else:
return FaceCrop(
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
color_channel=color_channel,
fixed_positions=fixed_positions,
dtype=dtype,
annotator=annotator,
)
......@@ -373,12 +373,26 @@ class FaceCrop(Base):
self._init_non_pickables()
class MultiFaceCrop(TransformerMixin, BaseEstimator):
class MultiFaceCrop(Base):
""" Wraps around FaceCrop to enable a dynamical cropper that can handle several annotation types.
Initialization and usage is similar to the FaceCrop, but the main difference here is that one specifies
a *list* of cropped_positions, and optionally a *list* of associated fixed positions.
For each set of cropped_positions in the list, a new FaceCrop will be instanciated that handles this
exact set of annotations.
When calling the *transform* method, the MultiFaceCrop matches each sample to its associated cropper
based on the received annotation, then performs the cropping of each subset, and finally gathers the results.
In case of ambiguity (when no cropper is a match for the received annotations, or when several croppers
match the received annotations), raises a ValueError.
"""
def __init__(
self,
cropped_image_size,
cropped_positions_list,
fixed_positions=None,
fixed_positions_list=None,
mask_sigma=None,
mask_neighbors=5,
mask_seed=None,
......@@ -388,9 +402,14 @@ class MultiFaceCrop(TransformerMixin, BaseEstimator):
):
assert isinstance(cropped_positions_list, list)
if fixed_positions_list is None:
fixed_positions_list = [None] * len(cropped_positions_list)
assert isinstance(fixed_positions_list, list)
self.croppers = {}
for cropped_positions in cropped_positions_list:
for cropped_positions, fixed_positions in zip(
cropped_positions_list, fixed_positions_list
):
assert len(cropped_positions) == 2
self.croppers[tuple(cropped_positions)] = FaceCrop(
cropped_image_size,
......@@ -408,26 +427,38 @@ class MultiFaceCrop(TransformerMixin, BaseEstimator):
subsets = {k: {"X": [], "annotations": []} for k in self.croppers.keys()}
def assign(X_elem, annotations_elem):
# Assign a single sample to its matching cropper
# Compare the received annotations keys to the cropped_positions keys of each cropper
valid_keys = [
k
for k in self.croppers.keys()
if set(k).issubset(set(annotations_elem.keys()))
]
assert (
len(valid_keys) == 1
), "Cropper selection from the annotations is ambiguous ({} valid croppers)".format(
len(valid_keys)
)
subsets[valid_keys[0]]["X"].append(X_elem)
subsets[valid_keys[0]]["annotations"].append(annotations_elem)
# Ensure exactly one cropper is a match
if len(valid_keys) != 1:
raise ValueError(
"Cropper selection from the annotations is ambiguous ({} valid croppers)".format(
len(valid_keys)
)
)
else:
# Assign the sample to this particuler cropper
cropper_key = valid_keys[0]
subsets[cropper_key]["X"].append(X_elem)
subsets[cropper_key]["annotations"].append(annotations_elem)
# Assign each sample to its matching cropper
for X_elem, annotations_elem in zip(X, annotations):
assign(X_elem, annotations_elem)
# Call each FaceCrop on its sample subset
transformed_subsets = {
k: self.croppers[k].transform(**subsets[k]) for k in subsets.keys()
}
# Gather the results
return [item for sublist in transformed_subsets.values() for item in sublist]
def fit(self, X, y=None):
......
from .Base import Base
from .FaceCrop import FaceCrop
from .FaceCrop import FaceCrop, MultiFaceCrop
from .TanTriggs import TanTriggs
from .INormLBP import INormLBP
......
topleft 85 130
bottomright 270 330
......@@ -151,6 +151,56 @@ def test_face_crop():
# reset the configuration, so that later tests don't get screwed.
cropper.color_channel = "gray"
def test_multi_face_crop():
# read input
image = _image()
eye_annotation, bbox_annotation = [
bob.db.base.read_annotation_file(
pkg_resources.resource_filename("bob.bio.face.test", "data/" + filename + ".pos"),
"named"
)
for filename in ["testimage", "testimage_bbox"]
]
# define the preprocessor
cropper = bob.bio.face.preprocessor.MultiFaceCrop(
cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
cropped_positions_list=[
{'leye': LEFT_EYE_POS, 'reye': RIGHT_EYE_POS},
{'topleft': (0, 0), 'bottomright': (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)}
]
)
# execute face cropper
eye_reference, bbox_reference = [
pkg_resources.resource_filename(
"bob.bio.face.test", "data/" + filename + ".hdf5"
)
for filename in ["cropped", "cropped_bbox"]
]
eye_cropped, bbox_cropped = cropper.transform([image, image], [eye_annotation, bbox_annotation])
# Compare the cropped results to the reference
_compare(eye_cropped, eye_reference)
_compare(bbox_cropped, bbox_reference)
# test a ValueError is raised if the annotations don't match any cropper
try:
annot = dict(landmark_A=(60, 60), landmark_B=(120, 120))
cropper.transform([image], [annot])
assert 0, "MultiFaceCrop did not raise a ValueError for annotations matching no cropper"
except ValueError:
pass
# test a ValueError is raised if the annotations match several croppers
try:
annot = {**eye_annotation, **bbox_annotation}
cropper.transform([image], [annot])
assert 0, "MultiFaceCrop did not raise a ValueError for annotations matching several croppers"
except ValueError:
pass
def test_tan_triggs():
# read input
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment