Skip to content
Snippets Groups Projects
Commit e646eff7 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Merge branch 'crop' into 'master'

Changed 112x112 crop

See merge request !101
parents d4021c32 9ca2d984
No related branches found
No related tags found
1 merge request!101Changed 112x112 crop
Pipeline #47970 passed
...@@ -5,8 +5,10 @@ from bob.pipelines import wrap ...@@ -5,8 +5,10 @@ from bob.pipelines import wrap
from bob.bio.face.helpers import face_crop_solver from bob.bio.face.helpers import face_crop_solver
import numpy as np import numpy as np
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def embedding_transformer_default_cropping(cropped_image_size, annotation_type): def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
""" """
Computes the default cropped positions for the FaceCropper used with Facenet-like Computes the default cropped positions for the FaceCropper used with Facenet-like
...@@ -30,7 +32,10 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type): ...@@ -30,7 +32,10 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
``annotation_type`` is a list ``annotation_type`` is a list
""" """
if isinstance(annotation_type, list): if isinstance(annotation_type, list):
return [embedding_transformer_default_cropping(cropped_image_size, item) for item in annotation_type] return [
embedding_transformer_default_cropping(cropped_image_size, item)
for item in annotation_type
]
CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size
...@@ -78,7 +83,9 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type): ...@@ -78,7 +83,9 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
else: else:
logger.warning(f"Annotation type {annotation_type} is not supported. Input images will be fully scaled.") logger.warning(
f"Annotation type {annotation_type} is not supported. Input images will be fully scaled."
)
cropped_positions = None cropped_positions = None
return cropped_positions return cropped_positions
...@@ -107,7 +114,10 @@ def legacy_default_cropping(cropped_image_size, annotation_type): ...@@ -107,7 +114,10 @@ def legacy_default_cropping(cropped_image_size, annotation_type):
``annotation_type`` is a list ``annotation_type`` is a list
""" """
if isinstance(annotation_type, list): if isinstance(annotation_type, list):
return [legacy_default_cropping(cropped_image_size, item) for item in annotation_type] return [
legacy_default_cropping(cropped_image_size, item)
for item in annotation_type
]
CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size
...@@ -137,7 +147,9 @@ def legacy_default_cropping(cropped_image_size, annotation_type): ...@@ -137,7 +147,9 @@ def legacy_default_cropping(cropped_image_size, annotation_type):
else: else:
logger.warning(f"Annotation type {annotation_type} is not supported. Input images will be fully scaled.") logger.warning(
f"Annotation type {annotation_type} is not supported. Input images will be fully scaled."
)
cropped_positions = None cropped_positions = None
return cropped_positions return cropped_positions
...@@ -167,7 +179,9 @@ def embedding_transformer( ...@@ -167,7 +179,9 @@ def embedding_transformer(
) )
transform_extra_arguments = ( transform_extra_arguments = (
None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),) None
if (cropped_positions is None or fixed_positions is not None)
else (("annotations", "annotations"),)
) )
transformer = make_pipeline( transformer = make_pipeline(
...@@ -221,7 +235,7 @@ def embedding_transformer_112x112( ...@@ -221,7 +235,7 @@ def embedding_transformer_112x112(
cropped_image_size = (112, 112) cropped_image_size = (112, 112)
if annotation_type == "eyes-center": if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency # Hard coding eye positions for backward consistency
cropped_positions = {"leye": (32, 77), "reye": (32, 34)} cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
else: else:
# Will use default # Will use default
cropped_positions = embedding_transformer_default_cropping( cropped_positions = embedding_transformer_default_cropping(
...@@ -316,7 +330,9 @@ def crop_80x64(annotation_type, fixed_positions=None, color_channel="gray"): ...@@ -316,7 +330,9 @@ def crop_80x64(annotation_type, fixed_positions=None, color_channel="gray"):
) )
transform_extra_arguments = ( transform_extra_arguments = (
None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),) None
if (cropped_positions is None or fixed_positions is not None)
else (("annotations", "annotations"),)
) )
return face_cropper, transform_extra_arguments return face_cropper, transform_extra_arguments
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment