From 3ba8d3cbc5bc5cd8907be7c9d770902492839805 Mon Sep 17 00:00:00 2001
From: Laurent COLBOIS <lcolbois@.idiap.ch>
Date: Wed, 12 May 2021 12:17:07 +0200
Subject: [PATCH] Refactor DNN baselines

---
 .../config/baseline/arcface_insightface.py    |  23 +++-
 .../config/baseline/facenet_sanderberg.py     |  20 +++-
 bob/bio/face/config/baseline/helpers.py       | 105 +++---------------
 .../inception_resnetv1_casiawebface.py        |  21 +++-
 .../baseline/inception_resnetv1_msceleb.py    |  21 +++-
 .../inception_resnetv2_casiawebface.py        |  21 +++-
 .../baseline/inception_resnetv2_msceleb.py    |  21 +++-
 .../mobilenetv2_msceleb_arcface_2021.py       |  24 +++-
 .../baseline/resnet50_msceleb_arcface_2021.py |  25 ++++-
 .../baseline/resnet50_vgg2_arcface_2021.py    |  24 +++-
 .../config/baseline/tf2_inception_resnet.py   |  25 ++---
 11 files changed, 184 insertions(+), 146 deletions(-)

diff --git a/bob/bio/face/config/baseline/arcface_insightface.py b/bob/bio/face/config/baseline/arcface_insightface.py
index b2de000f..db164c30 100644
--- a/bob/bio/face/config/baseline/arcface_insightface.py
+++ b/bob/bio/face/config/baseline/arcface_insightface.py
@@ -1,7 +1,8 @@
 from bob.bio.face.embeddings.mxnet_models import ArcFaceInsightFace
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_112x112,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -12,10 +13,22 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_112x112(
-        ArcFaceInsightFace(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # DEFINE CROPPING
+    cropped_image_size = (112, 112)
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {
+            "leye": (55, 81),
+            "reye": (55, 42),
+        }
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=ArcFaceInsightFace(memory_demanding=memory_demanding),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
         color_channel="rgb",
     )
 
diff --git a/bob/bio/face/config/baseline/facenet_sanderberg.py b/bob/bio/face/config/baseline/facenet_sanderberg.py
index 20e3651a..bdefacb8 100644
--- a/bob/bio/face/config/baseline/facenet_sanderberg.py
+++ b/bob/bio/face/config/baseline/facenet_sanderberg.py
@@ -2,9 +2,11 @@ from bob.bio.face.embeddings.tf2_inception_resnet import (
     FaceNetSanderberg_20170512_110547,
 )
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_160x160,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
+
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
     VanillaBiometricsPipeline,
@@ -14,12 +16,20 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
+    # DEFINE CROPPING
+    cropped_image_size = (160, 160)
+    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
 
-    transformer = embedding_transformer_160x160(
-        FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # ASSEMBLE TRANSFORMER
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
     )
+
     algorithm = Distance()
 
     return VanillaBiometricsPipeline(transformer, algorithm)
diff --git a/bob/bio/face/config/baseline/helpers.py b/bob/bio/face/config/baseline/helpers.py
index 070ac4dd..0e0aefec 100644
--- a/bob/bio/face/config/baseline/helpers.py
+++ b/bob/bio/face/config/baseline/helpers.py
@@ -27,10 +27,10 @@ def lookup_config_from_database():
     return annotation_type, fixed_positions, memory_demanding
 
 
-def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
+def dnn_default_cropping(cropped_image_size, annotation_type):
     """
-    Computes the default cropped positions for the FaceCropper used with Facenet-like
-    Embedding extractors, proportionally to the target image size
+    Computes the default cropped positions for the FaceCropper used with Neural-Net based
+    extractors, proportionally to the target image size
 
 
     Parameters
@@ -51,8 +51,7 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
     """
     if isinstance(annotation_type, list):
         return [
-            embedding_transformer_default_cropping(cropped_image_size, item)
-            for item in annotation_type
+            dnn_default_cropping(cropped_image_size, item) for item in annotation_type
         ]
 
     CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size
@@ -175,7 +174,6 @@ def legacy_default_cropping(cropped_image_size, annotation_type):
 
 def make_cropper(
     cropped_image_size,
-    annotation_type,
     cropped_positions,
     fixed_positions=None,
     color_channel="rgb",
@@ -183,12 +181,12 @@ def make_cropper(
 ):
 
     face_cropper = face_crop_solver(
-        cropped_image_size,
-        color_channel=color_channel,
+        cropped_image_size=cropped_image_size,
         cropped_positions=cropped_positions,
         fixed_positions=fixed_positions,
-        dtype="float64",
         annotator=annotator,
+        color_channel=color_channel,
+        dtype="float64",
     )
 
     transform_extra_arguments = (
@@ -203,10 +201,10 @@ def make_cropper(
 def embedding_transformer(
     cropped_image_size,
     embedding,
-    annotation_type,
     cropped_positions,
     fixed_positions=None,
     color_channel="rgb",
+    annotator=None,
 ):
     """
     Creates a pipeline composed by and FaceCropper and an Embedding extractor.
@@ -216,18 +214,12 @@ def embedding_transformer(
        This will resize images to the requested `image_size`
 
     """
-    face_cropper = face_crop_solver(
-        cropped_image_size,
-        color_channel=color_channel,
+    face_cropper, transform_extra_arguments = make_cropper(
+        cropped_image_size=cropped_image_size,
         cropped_positions=cropped_positions,
         fixed_positions=fixed_positions,
-        dtype="float64",
-    )
-
-    transform_extra_arguments = (
-        None
-        if (cropped_positions is None or fixed_positions is not None)
-        else (("annotations", "annotations"),)
+        color_channel=color_channel,
+        annotator=annotator,
     )
 
     transformer = make_pipeline(
@@ -242,66 +234,6 @@ def embedding_transformer(
     return transformer
 
 
-def embedding_transformer_160x160(
-    embedding, annotation_type, fixed_positions, color_channel="rgb"
-):
-    """
-    Creates a pipeline composed by and FaceCropper and an Embedding extractor.
-    This transformer is suited for Facenet based architectures
-
-    .. warning::
-       This will resize images to :math:`160 \times 160`
-
-    """
-    cropped_positions = embedding_transformer_default_cropping(
-        (160, 160), annotation_type
-    )
-
-    return embedding_transformer(
-        (160, 160),
-        embedding,
-        annotation_type,
-        cropped_positions,
-        fixed_positions,
-        color_channel=color_channel,
-    )
-
-
-def embedding_transformer_112x112(
-    embedding, annotation_type, fixed_positions, color_channel="rgb"
-):
-    """
-    Creates a pipeline composed by and FaceCropper and an Embedding extractor.
-    This transformer is suited for Facenet based architectures
-
-    .. warning::
-       This will resize images to :math:`112 \times 112`
-
-    """
-    cropped_image_size = (112, 112)
-    if annotation_type == "eyes-center":
-        # Hard coding eye positions for backward consistency
-        cropped_positions = {
-            "leye": (55, 81),
-            "reye": (55, 42),
-        }
-
-    else:
-        # Will use default
-        cropped_positions = embedding_transformer_default_cropping(
-            cropped_image_size, annotation_type
-        )
-
-    return embedding_transformer(
-        cropped_image_size,
-        embedding,
-        annotation_type,
-        cropped_positions,
-        fixed_positions,
-        color_channel=color_channel,
-    )
-
-
 def embedding_transformer_224x224(
     embedding, annotation_type, fixed_positions, color_channel="rgb"
 ):
@@ -319,15 +251,12 @@ def embedding_transformer_224x224(
         cropped_positions = {"leye": (65, 150), "reye": (65, 77)}
     else:
         # Will use default
-        cropped_positions = embedding_transformer_default_cropping(
-            cropped_image_size, annotation_type
-        )
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
 
     return embedding_transformer(
-        cropped_image_size,
-        embedding,
-        annotation_type,
-        cropped_positions,
-        fixed_positions,
+        cropped_image_size=cropped_image_size,
+        embedding=embedding,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
         color_channel=color_channel,
     )
diff --git a/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py b/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
index 90bd0111..b532c815 100644
--- a/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
+++ b/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
@@ -2,8 +2,9 @@ from bob.bio.face.embeddings.tf2_inception_resnet import (
     InceptionResnetv1_Casia_CenterLoss_2018,
 )
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_160x160,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -14,10 +15,20 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_160x160(
-        InceptionResnetv1_Casia_CenterLoss_2018(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # DEFINE CROPPING
+    cropped_image_size = (160, 160)
+    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    # ASSEMBLE TRANSFORMER
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=InceptionResnetv1_Casia_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py b/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
index c4597096..db94eddd 100644
--- a/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
+++ b/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
@@ -2,8 +2,9 @@ from bob.bio.face.embeddings.tf2_inception_resnet import (
     InceptionResnetv1_MsCeleb_CenterLoss_2018,
 )
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_160x160,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -14,10 +15,20 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_160x160(
-        InceptionResnetv1_MsCeleb_CenterLoss_2018(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # DEFINE CROPPING
+    cropped_image_size = (160, 160)
+    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    # ASSEMBLE TRANSFORMER
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=InceptionResnetv1_MsCeleb_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py b/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
index cb82dee8..c20d856c 100644
--- a/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
+++ b/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
@@ -2,8 +2,9 @@ from bob.bio.face.embeddings.tf2_inception_resnet import (
     InceptionResnetv2_Casia_CenterLoss_2018,
 )
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_160x160,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -14,10 +15,20 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_160x160(
-        InceptionResnetv2_Casia_CenterLoss_2018(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # DEFINE CROPPING
+    cropped_image_size = (160, 160)
+    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    # ASSEMBLE TRANSFORMER
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=InceptionResnetv2_Casia_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py b/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
index fa502308..d44585eb 100644
--- a/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
+++ b/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
@@ -2,8 +2,9 @@ from bob.bio.face.embeddings.tf2_inception_resnet import (
     InceptionResnetv2_MsCeleb_CenterLoss_2018,
 )
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_160x160,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -14,10 +15,20 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_160x160(
-        InceptionResnetv2_MsCeleb_CenterLoss_2018(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # DEFINE CROPPING
+    cropped_image_size = (160, 160)
+    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    # ASSEMBLE TRANSFORMER
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=InceptionResnetv2_MsCeleb_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py b/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
index aae95a2a..2ca17ebe 100644
--- a/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
+++ b/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
@@ -1,7 +1,8 @@
 from bob.bio.face.embeddings.mobilenet_v2 import MobileNetv2_MsCeleb_ArcFace_2021
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_112x112,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -12,10 +13,23 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_112x112(
-        MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # DEFINE CROPPING
+    cropped_image_size = (112, 112)
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {
+            "leye": (55, 81),
+            "reye": (55, 42),
+        }
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py b/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
index f60903ac..c5056662 100644
--- a/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
+++ b/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
@@ -1,8 +1,10 @@
 from bob.bio.face.embeddings.resnet50 import Resnet50_MsCeleb_ArcFace_2021
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_112x112,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
+
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
     VanillaBiometricsPipeline,
@@ -12,10 +14,23 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_112x112(
-        Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # DEFINE CROPPING
+    cropped_image_size = (112, 112)
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {
+            "leye": (55, 81),
+            "reye": (55, 42),
+        }
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py b/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
index 0a6941d0..05eaa49b 100644
--- a/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
+++ b/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
@@ -1,7 +1,8 @@
 from bob.bio.face.embeddings.resnet50 import Resnet50_VGG2_ArcFace_2021
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_112x112,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -12,10 +13,23 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_112x112(
-        Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # DEFINE CROPPING
+    cropped_image_size = (112, 112)
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {
+            "leye": (55, 81),
+            "reye": (55, 42),
+        }
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/tf2_inception_resnet.py b/bob/bio/face/config/baseline/tf2_inception_resnet.py
index f71e0a78..619bcf82 100644
--- a/bob/bio/face/config/baseline/tf2_inception_resnet.py
+++ b/bob/bio/face/config/baseline/tf2_inception_resnet.py
@@ -2,9 +2,9 @@ from bob.extension import rc
 from bob.bio.face.embeddings.tf2_inception_resnet import InceptionResnetv2
 from bob.bio.face.preprocessor import FaceCrop
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_default_cropping,
-    embedding_transformer,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
 
 from sklearn.pipeline import make_pipeline
@@ -18,23 +18,22 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    CROPPED_IMAGE_SIZE = (160, 160)
-    CROPPED_POSITIONS = embedding_transformer_default_cropping(
-        CROPPED_IMAGE_SIZE, annotation_type=annotation_type
-    )
+    # DEFINE CROPPING
+    cropped_image_size = (160, 160)
+    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
 
     extractor_path = rc["bob.bio.face.tf2.casia-webface-inception-v2"]
-
     embedding = InceptionResnetv2(
         checkpoint_path=extractor_path, memory_demanding=memory_demanding
     )
-
+    # ASSEMBLE TRANSFORMER
     transformer = embedding_transformer(
-        CROPPED_IMAGE_SIZE,
-        embedding,
-        annotation_type,
-        CROPPED_POSITIONS,
-        fixed_positions,
+        cropped_image_size=cropped_image_size,
+        embedding=embedding,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
     )
 
     algorithm = Distance()
-- 
GitLab