Commit bb5b1b70 authored by Laurent COLBOIS's avatar Laurent COLBOIS

Integrated MultiFaceCrop into the baselines

parent 6771bcd8
Pipeline #46757 passed with stage
in 37 minutes and 55 seconds
......@@ -17,65 +17,71 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
cropped_image_size : tuple
A tuple (HEIGHT, WIDTH) describing the target size of the cropped image.
annotation_type: str
annotation_type: str or list of str
Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile',
'right-profile' and None
'right-profile' and None, or a combination of those as a list
Returns
-------
cropped_positions:
The dictionary of cropped positions that will be feeded to the FaceCropper.
The dictionary of cropped positions that will be feeded to the FaceCropper, or a list of such dictionaries if
``annotation_type`` is a list
"""
CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size
if isinstance(annotation_type, list):
return [embedding_transformer_default_cropping(cropped_image_size, item) for item in annotation_type]
else:
if annotation_type == "bounding-box":
CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
cropped_positions = {"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS}
if annotation_type == "bounding-box":
elif annotation_type == "eyes-center":
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
cropped_positions = {"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS}
RIGHT_EYE_POS = (
round(2 / 7 * CROPPED_IMAGE_HEIGHT),
round(1 / 3 * CROPPED_IMAGE_WIDTH),
)
LEFT_EYE_POS = (
round(2 / 7 * CROPPED_IMAGE_HEIGHT),
round(2 / 3 * CROPPED_IMAGE_WIDTH),
)
cropped_positions = {"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
elif annotation_type == "eyes-center":
elif annotation_type == "left-profile":
RIGHT_EYE_POS = (
round(2 / 7 * CROPPED_IMAGE_HEIGHT),
round(1 / 3 * CROPPED_IMAGE_WIDTH),
)
LEFT_EYE_POS = (
round(2 / 7 * CROPPED_IMAGE_HEIGHT),
round(2 / 3 * CROPPED_IMAGE_WIDTH),
)
cropped_positions = {"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
EYE_POS = (
round(2 / 7 * CROPPED_IMAGE_HEIGHT),
round(3 / 8 * CROPPED_IMAGE_WIDTH),
)
MOUTH_POS = (
round(5 / 7 * CROPPED_IMAGE_HEIGHT),
round(3 / 8 * CROPPED_IMAGE_WIDTH),
)
cropped_positions = {"leye": EYE_POS, "mouth": MOUTH_POS}
elif annotation_type == "left-profile":
elif annotation_type == "right-profile":
EYE_POS = (
round(2 / 7 * CROPPED_IMAGE_HEIGHT),
round(3 / 8 * CROPPED_IMAGE_WIDTH),
)
MOUTH_POS = (
round(5 / 7 * CROPPED_IMAGE_HEIGHT),
round(3 / 8 * CROPPED_IMAGE_WIDTH),
)
cropped_positions = {"leye": EYE_POS, "mouth": MOUTH_POS}
EYE_POS = (
round(2 / 7 * CROPPED_IMAGE_HEIGHT),
round(5 / 8 * CROPPED_IMAGE_WIDTH),
)
MOUTH_POS = (
round(5 / 7 * CROPPED_IMAGE_HEIGHT),
round(5 / 8 * CROPPED_IMAGE_WIDTH),
)
cropped_positions = {"reye": EYE_POS, "mouth": MOUTH_POS}
elif annotation_type == "right-profile":
else:
EYE_POS = (
round(2 / 7 * CROPPED_IMAGE_HEIGHT),
round(5 / 8 * CROPPED_IMAGE_WIDTH),
)
MOUTH_POS = (
round(5 / 7 * CROPPED_IMAGE_HEIGHT),
round(5 / 8 * CROPPED_IMAGE_WIDTH),
)
cropped_positions = {"reye": EYE_POS, "mouth": MOUTH_POS}
cropped_positions = None
else:
return cropped_positions
cropped_positions = None
return cropped_positions
def legacy_default_cropping(cropped_image_size, annotation_type):
......@@ -91,45 +97,50 @@ def legacy_default_cropping(cropped_image_size, annotation_type):
annotation_type: str
Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile',
'right-profile' and None
'right-profile' and None, or a combination of those as a list
Returns
-------
cropped_positions:
The dictionary of cropped positions that will be feeded to the FaceCropper.
The dictionary of cropped positions that will be feeded to the FaceCropper, or a list of such dictionaries if
``annotation_type`` is a list
"""
CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size
if isinstance(annotation_type, list):
return [legacy_default_cropping(cropped_image_size, item) for item in annotation_type]
else:
if annotation_type == "bounding-box":
CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
cropped_positions = {"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS}
if annotation_type == "bounding-box":
elif annotation_type == "eyes-center":
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
cropped_positions = {"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS}
RIGHT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 - 1)
LEFT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 * 3)
cropped_positions = {"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
elif annotation_type == "eyes-center":
elif annotation_type == "left-profile":
# Main reference https://gitlab.idiap.ch/bob/bob.chapter.FRICE/-/blob/master/bob/chapter/FRICE/script/pose.py
EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 7 * 3 - 2)
MOUTH_POS = (CROPPED_IMAGE_HEIGHT // 3 * 2, CROPPED_IMAGE_WIDTH // 7 * 3 - 2)
cropped_positions = {"leye": EYE_POS, "mouth": MOUTH_POS}
RIGHT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 - 1)
LEFT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 * 3)
cropped_positions = {"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
elif annotation_type == "right-profile":
# Main reference https://gitlab.idiap.ch/bob/bob.chapter.FRICE/-/blob/master/bob/chapter/FRICE/script/pose.py
EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 7 * 4 + 2)
MOUTH_POS = (CROPPED_IMAGE_HEIGHT // 3 * 2, CROPPED_IMAGE_WIDTH // 7 * 4 + 2)
cropped_positions = {"reye": EYE_POS, "mouth": MOUTH_POS}
elif annotation_type == "left-profile":
# Main reference https://gitlab.idiap.ch/bob/bob.chapter.FRICE/-/blob/master/bob/chapter/FRICE/script/pose.py
EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 7 * 3 - 2)
MOUTH_POS = (CROPPED_IMAGE_HEIGHT // 3 * 2, CROPPED_IMAGE_WIDTH // 7 * 3 - 2)
cropped_positions = {"leye": EYE_POS, "mouth": MOUTH_POS}
else:
elif annotation_type == "right-profile":
# Main reference https://gitlab.idiap.ch/bob/bob.chapter.FRICE/-/blob/master/bob/chapter/FRICE/script/pose.py
EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 7 * 4 + 2)
MOUTH_POS = (CROPPED_IMAGE_HEIGHT // 3 * 2, CROPPED_IMAGE_WIDTH // 7 * 4 + 2)
cropped_positions = {"reye": EYE_POS, "mouth": MOUTH_POS}
else:
cropped_positions = None
cropped_positions = None
return cropped_positions
return cropped_positions
def embedding_transformer(
......@@ -156,7 +167,7 @@ def embedding_transformer(
)
transform_extra_arguments = (
None if cropped_positions is None else (("annotations", "annotations"),)
None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),)
)
transformer = make_pipeline(
......
from bob.bio.face.preprocessor import FaceCrop, Scale
from bob.bio.face.preprocessor import FaceCrop, MultiFaceCrop, Scale
def face_crop_solver(
......@@ -17,11 +17,21 @@ def face_crop_solver(
return Scale(cropped_image_size)
else:
# Detects the face and crops it without eye detection
return FaceCrop(
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
color_channel=color_channel,
fixed_positions=fixed_positions,
dtype=dtype,
annotator=annotator,
)
if isinstance(cropped_positions, list):
return MultiFaceCrop(
cropped_image_size=cropped_image_size,
cropped_positions_list=cropped_positions,
fixed_positions_list=fixed_positions,
color_channel=color_channel,
dtype=dtype,
annotation=annotator,
)
else:
return FaceCrop(
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
color_channel=color_channel,
fixed_positions=fixed_positions,
dtype=dtype,
annotator=annotator,
)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment