Skip to content
Snippets Groups Projects
Commit bf618295 authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

Merge branch 'refactor-cropping' into 'master'

Refactor baseline config helpers

See merge request !119
parents ddee3e69 43eedd1f
No related branches found
No related tags found
1 merge request!119Refactor baseline config helpers
Pipeline #51146 passed
Showing
with 399 additions and 527 deletions
......@@ -4,6 +4,7 @@ from . import algorithm
from . import script
from . import database
from . import annotator
from . import utils
from . import test
......
from bob.bio.face.embeddings.mxnet_models import ArcFaceInsightFace
from bob.bio.face.config.baseline.helpers import embedding_transformer_112x112
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
else:
annotation_type = None
fixed_positions = None
memory_demanding = False
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import arcface_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_112x112(
ArcFaceInsightFace(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
color_channel="rgb",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
return arcface_baseline(
embedding=ArcFaceInsightFace(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
pipeline = load(annotation_type, fixed_positions)
......
......@@ -5,21 +5,17 @@ from bob.bio.base.pipelines.vanilla_biometrics import (
VanillaBiometricsPipeline,
)
from bob.pipelines.transformers import SampleLinearize
from bob.bio.face.utils import lookup_config_from_database
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
import bob.ip.color
from sklearn.base import TransformerMixin, BaseEstimator
class ToGray(TransformerMixin, BaseEstimator):
class ToGray(TransformerMixin, BaseEstimator):
def transform(self, X, annotations=None):
return [bob.ip.color.rgb_to_gray(data)[0:10,0:10] for data in X]
return [bob.ip.color.rgb_to_gray(data)[0:10, 0:10] for data in X]
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
......@@ -34,9 +30,7 @@ def load(annotation_type, fixed_positions=None):
transformer = make_pipeline(
wrap(
["sample"],
ToGray(),
transform_extra_arguments=transform_extra_arguments,
["sample"], ToGray(), transform_extra_arguments=transform_extra_arguments,
),
SampleLinearize(),
)
......
from bob.bio.face.embeddings.tf2_inception_resnet import (
FaceNetSanderberg_20170512_110547,
)
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
else:
annotation_type = None
fixed_positions = None
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_160x160(
FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
return facenet_baseline(
embedding=FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
......
......@@ -3,7 +3,11 @@ from bob.bio.base.pipelines.vanilla_biometrics import (
VanillaBiometricsPipeline,
BioAlgorithmLegacy,
)
from bob.bio.face.config.baseline.helpers import crop_80x64
from bob.bio.face.utils import (
lookup_config_from_database,
legacy_default_cropping,
make_cropper,
)
import math
import numpy as np
import bob.bio.face
......@@ -17,20 +21,9 @@ import logging
logger = logging.getLogger(__name__)
#### SOLVING IF THERE'S ANY DATABASE INFORMATION
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
def get_cropper(annotation_type, fixed_positions=None):
# Cropping
face_cropper, transform_extra_arguments = crop_80x64(
annotation_type, fixed_positions, color_channel="gray"
)
return face_cropper, transform_extra_arguments
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def get_pipeline(face_cropper, transform_extra_arguments):
......@@ -80,9 +73,22 @@ def get_pipeline(face_cropper, transform_extra_arguments):
def load(annotation_type, fixed_positions=None):
####### SOLVING THE FACE CROPPER TO BE USED ##########
face_cropper, transform_extra_arguments = get_cropper(
annotation_type, fixed_positions
# Define cropped positions
CROPPED_IMAGE_HEIGHT = 80
CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
cropped_positions = legacy_default_cropping(cropped_image_size, annotation_type)
# Cropping
face_cropper, transform_extra_arguments = make_cropper(
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="gray",
annotator="mtcnn",
)
return get_pipeline(face_cropper, transform_extra_arguments)
......
from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv1_Casia_CenterLoss_2018,
)
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
else:
annotation_type = None
fixed_positions = None
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_160x160(
InceptionResnetv1_Casia_CenterLoss_2018(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
return facenet_baseline(
embedding=InceptionResnetv1_Casia_CenterLoss_2018(
memory_demanding=memory_demanding
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv1_MsCeleb_CenterLoss_2018,
)
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
else:
annotation_type = None
fixed_positions = None
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_160x160(
InceptionResnetv1_MsCeleb_CenterLoss_2018(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
return facenet_baseline(
embedding=InceptionResnetv1_MsCeleb_CenterLoss_2018(
memory_demanding=memory_demanding
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv2_Casia_CenterLoss_2018,
)
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
else:
annotation_type = None
fixed_positions = None
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_160x160(
InceptionResnetv2_Casia_CenterLoss_2018(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
return facenet_baseline(
embedding=InceptionResnetv2_Casia_CenterLoss_2018(
memory_demanding=memory_demanding
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv2_MsCeleb_CenterLoss_2018,
)
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
else:
annotation_type = None
fixed_positions = None
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_160x160(
InceptionResnetv2_MsCeleb_CenterLoss_2018(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
return facenet_baseline(
embedding=InceptionResnetv2_MsCeleb_CenterLoss_2018(
memory_demanding=memory_demanding
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
......@@ -3,7 +3,11 @@ from bob.bio.base.pipelines.vanilla_biometrics import (
VanillaBiometricsPipeline,
BioAlgorithmLegacy,
)
from bob.bio.face.config.baseline.helpers import crop_80x64
from bob.bio.face.utils import (
lookup_config_from_database,
legacy_default_cropping,
make_cropper,
)
import numpy as np
import bob.bio.face
from sklearn.pipeline import make_pipeline
......@@ -18,20 +22,27 @@ import logging
logger = logging.getLogger(__name__)
#### SOLVING IF THERE'S ANY DATABASE INFORMATION
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
####### SOLVING THE FACE CROPPER TO BE USED ##########
def load(annotation_type, fixed_positions=None):
# Define cropped positions
CROPPED_IMAGE_HEIGHT = 80
CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
cropped_positions = legacy_default_cropping(cropped_image_size, annotation_type)
# Cropping
face_cropper, transform_extra_arguments = crop_80x64(
annotation_type, fixed_positions, color_channel="gray"
face_cropper, transform_extra_arguments = make_cropper(
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="gray",
annotator="mtcnn",
)
preprocessor = bob.bio.face.preprocessor.TanTriggs(
......
......@@ -3,7 +3,11 @@ from bob.bio.base.pipelines.vanilla_biometrics import (
VanillaBiometricsPipeline,
BioAlgorithmLegacy,
)
from bob.bio.face.config.baseline.helpers import crop_80x64
from bob.bio.face.utils import (
lookup_config_from_database,
legacy_default_cropping,
make_cropper,
)
import math
import numpy as np
import bob.bio.face
......@@ -13,20 +17,9 @@ import bob.math
#### SOLVING IF THERE'S ANY DATABASE INFORMATION
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
def get_cropper(annotation_type, fixed_positions=None):
# Cropping
face_cropper, transform_extra_arguments = crop_80x64(
annotation_type, fixed_positions, color_channel="gray"
)
return face_cropper, transform_extra_arguments
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def get_pipeline(face_cropper, transform_extra_arguments):
......@@ -70,10 +63,22 @@ def get_pipeline(face_cropper, transform_extra_arguments):
def load(annotation_type, fixed_positions=None):
# Define cropped positions
CROPPED_IMAGE_HEIGHT = 80
CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
cropped_positions = legacy_default_cropping(cropped_image_size, annotation_type)
####### SOLVING THE FACE CROPPER TO BE USED ##########
face_cropper, transform_extra_arguments = get_cropper(
annotation_type, fixed_positions
# Cropping
face_cropper, transform_extra_arguments = make_cropper(
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="gray",
annotator="mtcnn",
)
return get_pipeline(face_cropper, transform_extra_arguments)
......
from bob.bio.face.embeddings.mobilenet_v2 import MobileNetv2_MsCeleb_ArcFace_2021
from bob.bio.face.config.baseline.helpers import embedding_transformer_112x112
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import arcface_baseline
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
else:
annotation_type = None
fixed_positions = None
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_112x112(
MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
return arcface_baseline(
embedding=MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
pipeline = load(annotation_type, fixed_positions)
......
from bob.bio.face.embeddings.resnet50 import Resnet50_MsCeleb_ArcFace_2021
from bob.bio.face.config.baseline.helpers import embedding_transformer_112x112
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import arcface_baseline
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
else:
annotation_type = None
fixed_positions = None
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_112x112(
Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
return arcface_baseline(
embedding=Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.embeddings.resnet50 import Resnet50_VGG2_ArcFace_2021
from bob.bio.face.config.baseline.helpers import embedding_transformer_112x112
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import arcface_baseline
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
else:
annotation_type = None
fixed_positions = None
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_112x112(
Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
return arcface_baseline(
embedding=Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.utils import (
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
def arcface_baseline(embedding, annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=embedding,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
def facenet_baseline(embedding, annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=embedding,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
from bob.extension import rc
from bob.bio.face.embeddings.tf2_inception_resnet import InceptionResnetv2
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.face.config.baseline.helpers import (
embedding_transformer_default_cropping,
embedding_transformer,
)
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
from sklearn.pipeline import make_pipeline
from bob.pipelines.wrappers import wrap
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
else:
annotation_type = None
fixed_positions = None
def load(annotation_type, fixed_positions=None):
CROPPED_IMAGE_SIZE = (160, 160)
CROPPED_POSITIONS = embedding_transformer_default_cropping(
CROPPED_IMAGE_SIZE, annotation_type=annotation_type
)
extractor_path = rc["bob.bio.face.tf2.casia-webface-inception-v2"]
embedding = InceptionResnetv2(
checkpoint_path=extractor_path, memory_demanding=memory_demanding
)
transformer = embedding_transformer(
CROPPED_IMAGE_SIZE,
embedding,
annotation_type,
CROPPED_POSITIONS,
fixed_positions,
return facenet_baseline(
embedding=embedding,
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.preprocessor import FaceCrop, MultiFaceCrop, Scale
def face_crop_solver(
cropped_image_size,
cropped_positions=None,
color_channel="rgb",
fixed_positions=None,
annotator=None,
dtype="uint8",
):
"""
Decide which face cropper to use.
"""
# If there's not cropped positions, just resize
if cropped_positions is None:
return Scale(cropped_image_size)
else:
# Detects the face and crops it without eye detection
if isinstance(cropped_positions, list):
return MultiFaceCrop(
cropped_image_size=cropped_image_size,
cropped_positions_list=cropped_positions,
fixed_positions_list=fixed_positions,
color_channel=color_channel,
dtype=dtype,
annotation=annotator,
)
else:
return FaceCrop(
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
color_channel=color_channel,
fixed_positions=fixed_positions,
dtype=dtype,
annotator=annotator,
)
......@@ -114,42 +114,12 @@ class FaceCrop(Base):
allow_upside_down_normalized_faces=False,
**kwargs,
):
# call base class constructor
Base.__init__(self, **kwargs)
if isinstance(cropped_image_size, int):
cropped_image_size = (cropped_image_size, cropped_image_size)
if isinstance(cropped_positions, str):
face_size = cropped_image_size[0]
if cropped_positions == "eyes-center":
eyes_distance = (face_size + 1) / 2.0
eyes_center = (face_size / 4.0, (face_size - 0.5) / 2.0)
right_eye = (eyes_center[0], eyes_center[1] - eyes_distance / 2)
left_eye = (eyes_center[0], eyes_center[1] + eyes_distance / 2)
cropped_positions = {"reye": right_eye, "leye": left_eye}
elif cropped_positions == "bounding-box":
cropped_positions = {
"topleft": (0, 0),
"bottomright": cropped_image_size,
}
else:
raise ValueError(
f"Got {cropped_positions} as cropped_positions "
"while only eyes and bbox strings are supported."
)
# call base class constructor
self.cropped_image_size = cropped_image_size
self.cropped_positions = cropped_positions
self.fixed_positions = fixed_positions
self.mask_sigma = mask_sigma
self.mask_neighbors = mask_neighbors
self.mask_seed = mask_seed
# check parameters
assert len(cropped_positions) == 2
if fixed_positions:
......
import bob.bio.face
from sklearn.pipeline import make_pipeline
from bob.bio.base.wrappers import wrap_sample_preprocessor
from bob.pipelines import wrap
from bob.bio.face.helpers import face_crop_solver
import numpy as np
import logging
from .preprocessor import FaceCrop
from .preprocessor import MultiFaceCrop
from .preprocessor import Scale
from bob.pipelines import wrap
from sklearn.pipeline import make_pipeline
logger = logging.getLogger(__name__)
def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
def lookup_config_from_database(database):
"""
Computes the default cropped positions for the FaceCropper used with Facenet-like
Embedding extractors, proportionally to the target image size
Read configuration values that might be already defined in the database configuration
file.
"""
if database is not None:
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding
if hasattr(database, "memory_demanding")
else False
)
else:
annotation_type = None
fixed_positions = None
memory_demanding = False
return annotation_type, fixed_positions, memory_demanding
def dnn_default_cropping(cropped_image_size, annotation_type):
"""
Computes the default cropped positions for the FaceCropper used with Neural-Net based
extractors, proportionally to the target image size
Parameters
......@@ -21,7 +43,7 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
A tuple (HEIGHT, WIDTH) describing the target size of the cropped image.
annotation_type: str or list of str
Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile',
Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile',
'right-profile' and None, or a combination of those as a list
Returns
......@@ -33,8 +55,7 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
"""
if isinstance(annotation_type, list):
return [
embedding_transformer_default_cropping(cropped_image_size, item)
for item in annotation_type
dnn_default_cropping(cropped_image_size, item) for item in annotation_type
]
CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size
......@@ -93,7 +114,7 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
def legacy_default_cropping(cropped_image_size, annotation_type):
"""
Computes the default cropped positions for the FaceCropper used with legacy extractors,
Computes the default cropped positions for the FaceCropper used with legacy extractors,
proportionally to the target image size
......@@ -103,7 +124,7 @@ def legacy_default_cropping(cropped_image_size, annotation_type):
A tuple (HEIGHT, WIDTH) describing the target size of the cropped image.
annotation_type: str
Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile',
Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile',
'right-profile' and None, or a combination of those as a list
Returns
......@@ -155,27 +176,75 @@ def legacy_default_cropping(cropped_image_size, annotation_type):
return cropped_positions
def embedding_transformer(
def pad_default_cropping(cropped_image_size, annotation_type):
"""
Computes the default cropped positions for the FaceCropper used in PAD applications,
proportionally to the target image size
Parameters
----------
cropped_image_size : tuple
A tuple (HEIGHT, WIDTH) describing the target size of the cropped image.
annotation_type: str
Type of annotations. Possible values are: `bounding-box`, `eyes-center` and None, or a combination of those as a list
Returns
-------
cropped_positions:
The dictionary of cropped positions that will be feeded to the FaceCropper, or a list of such dictionaries if
``annotation_type`` is a list
"""
if cropped_image_size[0] != cropped_image_size[1]:
logger.warning(
"PAD cropping is designed for a square cropped image size. Got : {}".format(
cropped_image_size
)
)
else:
face_size = cropped_image_size[0]
if annotation_type == "eyes-center":
eyes_distance = (face_size + 1) / 2.0
eyes_center = (face_size / 4.0, (face_size - 0.5) / 2.0)
right_eye = (eyes_center[0], eyes_center[1] - eyes_distance / 2)
left_eye = (eyes_center[0], eyes_center[1] + eyes_distance / 2)
cropped_positions = {"reye": right_eye, "leye": left_eye}
elif annotation_type == "bounding-box":
cropped_positions = {
"topleft": (0, 0),
"bottomright": cropped_image_size,
}
else:
logger.warning(
f"Annotation type {annotation_type} is not supported. Input images will be fully scaled."
)
cropped_positions = None
return cropped_positions
def make_cropper(
cropped_image_size,
embedding,
annotation_type,
cropped_positions,
fixed_positions=None,
color_channel="rgb",
annotator=None,
):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
.. warning::
This will resize images to the requested `image_size`
Solve the face FaceCropper and additionally returns the necessary
transform_extra_arguments for wrapping the cropper with a SampleWrapper.
"""
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
annotator=annotator,
color_channel=color_channel,
dtype="float64",
)
......@@ -185,159 +254,111 @@ def embedding_transformer(
else (("annotations", "annotations"),)
)
transformer = make_pipeline(
wrap(
["sample"],
face_cropper,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], embedding),
)
return transformer
return face_cropper, transform_extra_arguments
def embedding_transformer_160x160(
embedding, annotation_type, fixed_positions, color_channel="rgb"
def embedding_transformer(
cropped_image_size,
embedding,
cropped_positions,
fixed_positions=None,
color_channel="rgb",
annotator=None,
):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
.. warning::
This will resize images to :math:`160 \times 160`
"""
cropped_positions = embedding_transformer_default_cropping(
(160, 160), annotation_type
)
This will resize images to the requested `image_size`
return embedding_transformer(
(160, 160),
embedding,
annotation_type,
cropped_positions,
fixed_positions,
"""
face_cropper, transform_extra_arguments = make_cropper(
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel=color_channel,
annotator=annotator,
)
def embedding_transformer_112x112(
embedding, annotation_type, fixed_positions, color_channel="rgb"
):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
.. warning::
This will resize images to :math:`112 \times 112`
"""
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
# Will use default
cropped_positions = embedding_transformer_default_cropping(
cropped_image_size, annotation_type
)
return embedding_transformer(
cropped_image_size,
embedding,
annotation_type,
cropped_positions,
fixed_positions,
color_channel=color_channel,
transformer = make_pipeline(
wrap(
["sample"],
face_cropper,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], embedding),
)
return transformer
def embedding_transformer_224x224(
embedding, annotation_type, fixed_positions, color_channel="rgb"
def face_crop_solver(
cropped_image_size,
cropped_positions=None,
color_channel="rgb",
fixed_positions=None,
annotator=None,
dtype="uint8",
):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
.. warning::
This will resize images to :math:`112 \times 112`
Decide which face cropper to use.
"""
cropped_image_size = (224, 224)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {"leye": (65, 150), "reye": (65, 77)}
# If there's not cropped positions, just resize
if cropped_positions is None:
return Scale(cropped_image_size)
else:
# Will use default
cropped_positions = embedding_transformer_default_cropping(
cropped_image_size, annotation_type
)
return embedding_transformer(
cropped_image_size,
embedding,
annotation_type,
cropped_positions,
fixed_positions,
color_channel=color_channel,
)
def crop_80x64(annotation_type, fixed_positions=None, color_channel="gray"):
# Detects the face and crops it without eye detection
if isinstance(cropped_positions, list):
return MultiFaceCrop(
cropped_image_size=cropped_image_size,
cropped_positions_list=cropped_positions,
fixed_positions_list=fixed_positions,
color_channel=color_channel,
dtype=dtype,
annotator=annotator,
)
else:
return FaceCrop(
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
color_channel=color_channel,
fixed_positions=fixed_positions,
dtype=dtype,
annotator=annotator,
)
def get_default_cropped_positions(mode, cropped_image_size, annotation_type):
"""
Crops a face to :math:`80 \times 64`
Computes the default cropped positions for the FaceCropper,
proportionally to the target image size
Parameters
----------
mode: str
Which default cropping to use. Available modes are : `legacy` (legacy baselines), `facenet`, `arcface`,
and `pad`.
annotation_type: str
Type of annotations. Possible values are: `bounding-box`, `eyes-center` and None
fixed_positions: tuple
A tuple containing the annotations. This is used in case your input is already registered
with fixed positions (eyes or bounding box)
color_channel: str
cropped_image_size : tuple
A tuple (HEIGHT, WIDTH) describing the target size of the cropped image.
annotation_type: str
Type of annotations. Possible values are: `bounding-box`, `eyes-center` and None, or a combination of those as a list
Returns
-------
face_cropper:
A face cropper to be used
transform_extra_arguments:
The parameters to the transformer
cropped_positions:
The dictionary of cropped positions that will be feeded to the FaceCropper, or a list of such dictionaries if
``annotation_type`` is a list
"""
color_channel = color_channel
dtype = np.float64
# Cropping
CROPPED_IMAGE_HEIGHT = 80
CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
cropped_positions = legacy_default_cropping(cropped_image_size, annotation_type)
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
dtype=dtype,
)
transform_extra_arguments = (
None
if (cropped_positions is None or fixed_positions is not None)
else (("annotations", "annotations"),)
)
return face_cropper, transform_extra_arguments
if mode == "legacy":
return legacy_default_cropping(cropped_image_size, annotation_type)
elif mode in ["dnn", "facenet", "arcface"]:
return dnn_default_cropping(cropped_image_size, annotation_type)
elif mode == "pad":
return pad_default_cropping(cropped_image_size, annotation_type)
else:
raise ValueError("Unknown default cropping mode `{}`".format(mode))
......@@ -26,7 +26,7 @@ extensions = [
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
#'matplotlib.sphinxext.plot_directive'
'matplotlib.sphinxext.plot_directive'
]
# Be picky about warnings
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment