Commit 456dab07 authored by Laurent COLBOIS's avatar Laurent COLBOIS

Merge branch 'fix-ijbc' of gitlab.idiap.ch:bob/bob.bio.face into fix-ijbc

parents 9c79dad2 24356f07
......@@ -58,6 +58,7 @@ from .Base import Base
from .bobipfacedetect import BobIpFacedetect
from .bobipflandmark import BobIpFlandmark
from .bobipmtcnn import BobIpMTCNN
from .bobiptinyface import BobIpTinyface
# gets sphinx autodoc done right - don't remove it
......@@ -84,6 +85,7 @@ __appropriate__(
BobIpFacedetect,
BobIpFlandmark,
BobIpMTCNN,
BobIpTinyface,
)
__all__ = [_ for _ in dir() if not _.startswith('_')]
__all__ = [_ for _ in dir() if not _.startswith('_')]
\ No newline at end of file
import bob.ip.facedetect.tinyface
from . import Base
class BobIpTinyface(Base):
"""Annotator using tinyface in bob.ip.facedetect"""
def __init__(self, prob_thresh=0.5, **kwargs):
super(BobIpTinyface, self).__init__(**kwargs)
self.detector = bob.ip.facedetect.tinyface.TinyFacesDetector(
prob_thresh=prob_thresh
)
@property
def prob_thresh(self):
return self.detector.prob_thresh
def annotate(self, image, **kwargs):
"""Annotates an image using tinyface
Parameters
----------
image : numpy.array
An RGB image in Bob format.
**kwargs
Ignored.
Returns
-------
dict
Annotations with (topleft, bottomright) keys (or None).
"""
# return the annotations for the first/largest face
annotations = self.detector.detect(image)
if annotations is not None:
return annotations[0]
else:
return None
from bob.bio.face.annotator import BobIpTinyface
annotator = BobIpTinyface()
\ No newline at end of file
from bob.bio.face.embeddings.pytorch import afffe_baseline
from bob.bio.face.utils import lookup_config_from_database
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None, memory_demanding=False):
return afffe_baseline(annotation_type, fixed_positions, memory_demanding)
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.embeddings.mxnet_models import ArcFaceInsightFace
from bob.bio.face.embeddings.mxnet import arcface_insightFace_lresnet100
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import arcface_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
return arcface_baseline(
embedding=ArcFaceInsightFace(memory_demanding=memory_demanding),
return arcface_insightFace_lresnet100(
annotation_type=annotation_type,
fixed_positions=fixed_positions,
memory_demanding=memory_demanding,
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.embeddings.tf2_inception_resnet import (
FaceNetSanderberg_20170512_110547,
)
from bob.bio.face.embeddings.tensorflow import facenet_sanderberg_20170512_110547
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
return facenet_baseline(
embedding=FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
def load(annotation_type, fixed_positions=None, memory_demanding=False):
return facenet_sanderberg_20170512_110547(
annotation_type, fixed_positions, memory_demanding
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv1_Casia_CenterLoss_2018,
)
from bob.bio.face.embeddings.tensorflow import inception_resnet_v1_casia_centerloss_2018
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
return facenet_baseline(
embedding=InceptionResnetv1_Casia_CenterLoss_2018(
memory_demanding=memory_demanding
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
def load(annotation_type, fixed_positions=None, memory_demanding=None):
return inception_resnet_v1_casia_centerloss_2018(
annotation_type, fixed_positions, memory_demanding
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv1_MsCeleb_CenterLoss_2018,
from bob.bio.face.embeddings.tensorflow import (
inception_resnet_v1_msceleb_centerloss_2018,
)
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
return facenet_baseline(
embedding=InceptionResnetv1_MsCeleb_CenterLoss_2018(
memory_demanding=memory_demanding
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
def load(annotation_type, fixed_positions=None, memory_demanding=None):
return inception_resnet_v1_msceleb_centerloss_2018(
annotation_type, fixed_positions, memory_demanding
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv2_Casia_CenterLoss_2018,
)
from bob.bio.face.embeddings.tensorflow import inception_resnet_v2_casia_centerloss_2018
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
return facenet_baseline(
embedding=InceptionResnetv2_Casia_CenterLoss_2018(
memory_demanding=memory_demanding
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
def load(annotation_type, fixed_positions=None, memory_demanding=None):
return inception_resnet_v2_casia_centerloss_2018(
annotation_type, fixed_positions, memory_demanding
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv2_MsCeleb_CenterLoss_2018,
from bob.bio.face.embeddings.tensorflow import (
inception_resnet_v2_msceleb_centerloss_2018,
)
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
return facenet_baseline(
embedding=InceptionResnetv2_MsCeleb_CenterLoss_2018(
memory_demanding=memory_demanding
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
def load(annotation_type, fixed_positions=None, memory_demanding=None):
return inception_resnet_v2_msceleb_centerloss_2018(
annotation_type, fixed_positions, memory_demanding
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.embeddings.pytorch import iresnet100
from bob.bio.face.utils import lookup_config_from_database
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None, memory_demanding=False):
return iresnet100(annotation_type, fixed_positions, memory_demanding)
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.embeddings.pytorch import iresnet34
from bob.bio.face.utils import lookup_config_from_database
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None, memory_demanding=False):
return iresnet34(annotation_type, fixed_positions, memory_demanding)
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.embeddings.pytorch import iresnet50
from bob.bio.face.utils import lookup_config_from_database
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None, memory_demanding=False):
return iresnet50(annotation_type, fixed_positions, memory_demanding)
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.embeddings.mobilenet_v2 import MobileNetv2_MsCeleb_ArcFace_2021
from bob.bio.face.embeddings.tensorflow import mobilenetv2_msceleb_arcface_2021
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import arcface_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
return arcface_baseline(
embedding=MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
def load(annotation_type, fixed_positions=None, memory_demanding=None):
return mobilenetv2_msceleb_arcface_2021(
annotation_type, fixed_positions, memory_demanding
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.embeddings.resnet50 import Resnet50_MsCeleb_ArcFace_2021
from bob.bio.face.embeddings.tensorflow import resnet50_msceleb_arcface_2021
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import arcface_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
return arcface_baseline(
embedding=Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
def load(annotation_type, fixed_positions=None, memory_demanding=None):
return resnet50_msceleb_arcface_2021(
annotation_type, fixed_positions, memory_demanding
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.embeddings.tensorflow import resnet50_msceleb_arcface_20210521
from bob.bio.face.utils import lookup_config_from_database
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None, memory_demanding=None):
return resnet50_msceleb_arcface_20210521(
annotation_type, fixed_positions, memory_demanding
)
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.embeddings.resnet50 import Resnet50_VGG2_ArcFace_2021
from bob.bio.face.embeddings.tensorflow import resnet50_vgg2_arcface_2021
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import arcface_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
return arcface_baseline(
embedding=Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
def load(annotation_type, fixed_positions=None, memory_demanding=None):
return resnet50_vgg2_arcface_2021(
annotation_type, fixed_positions, memory_demanding
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.utils import (
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
def arcface_baseline(embedding, annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=embedding,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
def facenet_baseline(embedding, annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=embedding,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
from bob.extension import rc
from bob.bio.face.embeddings.tf2_inception_resnet import InceptionResnetv2
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
extractor_path = rc["bob.bio.face.tf2.casia-webface-inception-v2"]
embedding = InceptionResnetv2(
checkpoint_path=extractor_path, memory_demanding=memory_demanding
)
return facenet_baseline(
embedding=embedding,
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.embeddings.opencv import vgg16_oxford_baseline
from bob.bio.face.utils import lookup_config_from_database
annotation_type, fixed_positions, _ = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
return vgg16_oxford_baseline(annotation_type, fixed_positions)
pipeline = load(annotation_type, fixed_positions)
#!/usr/bin/env python
from bob.bio.face.database import IJBCDatabase
from bob.extension import rc
database = IJBCDatabase()
......@@ -97,7 +97,7 @@ class CasiaAfricaDatabase(CSVDataset):
One of the database protocols. Options are "ID-V-All-Ep1", "ID-V-All-Ep2" and "ID-V-All-Ep3"
"""
def __init__(self, protocol):
def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None):
# Downloading model if not exists
urls = CasiaAfricaDatabase.urls()
......@@ -107,9 +107,6 @@ class CasiaAfricaDatabase(CSVDataset):
file_hash="324bd69b581477d30606417be8e30d2a",
)
self.annotation_type = "eyes-center"
self.fixed_positions = None
directory = (
rc["bob.db.casia-africa.directory"]
if rc["bob.db.casia-africa.directory "]
......@@ -117,8 +114,9 @@ class CasiaAfricaDatabase(CSVDataset):
)
super().__init__(
filename,
protocol,
name="casia-africa",
dataset_protocol_path=filename,
protocol=protocol,
csv_to_sample_loader=make_pipeline(
CSVToSampleLoaderBiometrics(
data_loader=bob.io.base.load,
......@@ -128,6 +126,8 @@ class CasiaAfricaDatabase(CSVDataset):
),
EyesAnnotations(),
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
@staticmethod
......
......@@ -54,15 +54,13 @@ class CBSRNirVis2Database(CSVDataset):
One of the database protocols.
"""
def __init__(self, protocol):
def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None):
# Downloading model if not exists
urls = CBSRNirVis2Database.urls()
filename = get_file(
"cbsr_nir_vis_2.tar.gz", urls, file_hash="116da4537c1099915cdc0f08feb651bd",
)
self.annotation_type = "eyes-center"
self.fixed_positions = None
directory = (
rc["bob.db.cbsr-nir-vis-2.directory"]
......@@ -81,8 +79,9 @@ class CBSRNirVis2Database(CSVDataset):
raise ValueError("File `{0}` not found".format(str(new_filename)))
super().__init__(
filename,
protocol,
name="cbsr-nir-vis2",
dataset_protocol_path=filename,
protocol=protocol,
csv_to_sample_loader=make_pipeline(
CSVToSampleLoaderBiometrics(
data_loader=load,
......@@ -91,6 +90,8 @@ class CBSRNirVis2Database(CSVDataset):
),
EyesAnnotations(),
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
@staticmethod
......
......@@ -20,7 +20,7 @@ class FRGCDatabase(CSVDataset):
Face Recognition Grand Test dataset
"""
def __init__(self, protocol):
def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None):
# Downloading model if not exists
urls = FRGCDatabase.urls()
......@@ -28,12 +28,10 @@ class FRGCDatabase(CSVDataset):
"frgc.tar.gz", urls, file_hash="328d2c71ae19a41679defa9585b3140f"
)
self.annotation_type = "eyes-center"
self.fixed_positions = None
super().__init__(
filename,
protocol,
name="frgc",
dataset_protocol_path=filename,
protocol=protocol,
csv_to_sample_loader=make_pipeline(
CSVToSampleLoaderBiometrics(
data_loader=bob.io.base.load,
......@@ -44,6 +42,8 @@ class FRGCDatabase(CSVDataset):
),
EyesAnnotations(),
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
@staticmethod
......
......@@ -5,16 +5,21 @@ from bob.extension import rc
import os
import bob.io.image
from functools import partial
import uuid
from bob.pipelines.utils import hash_string
def _make_sample_from_template_row(row, image_directory):
# Appending this hash, so we can handle parallel writting done correctly
# paying the penalty of having duplicate files
hashstr = str(uuid.uuid4())
return DelayedSample(
load=partial(
bob.io.image.load, path=os.path.join(image_directory, row["FILENAME"])
),
load=partial(bob.io.image.load, os.path.join(image_directory, row["FILENAME"])),
reference_id=str(row["TEMPLATE_ID"]),
subject_id=str(row["SUBJECT_ID"]),
key=os.path.splitext(row["FILENAME"])[0],
key=os.path.splitext(row["FILENAME"])[0] + "-" + hashstr,
annotations={
"topleft": (float(row["FACE_Y"]), float(row["FACE_X"])),
"bottomright": (
......@@ -27,25 +32,72 @@ def _make_sample_from_template_row(row, image_directory):
def _make_sample_set_from_template_group(template_group, image_directory):
samples = list(
template_group.apply(
_make_sample_from_template_row, axis=1, image_directory=image_directory
)
)
return SampleSet(
samples, reference_id=samples[0].template_id, subject_id=samples[0].subject_id
samples, reference_id=samples[0].reference_id, subject_id=samples[0].subject_id
)
class IJBCDatabase(Database):
"""
This package contains the access API and descriptions for the IARPA Janus Benchmark C -- IJB-C database.
The actual raw data can be downloaded from the original web page: http://www.nist.gov/programs-projects/face-challenges (note that not everyone might be eligible for downloading the data).
Included in the database, there are list files defining verification as well as closed- and open-set identification protocols.
For verification, two different protocols are provided.
For the ``1:1`` protocol, gallery and probe templates are combined using several images and video frames for each subject.
Compared gallery and probe templates share the same gender and skin tone -- these have been matched to make the comparisions more realistic and difficult.
For closed-set identification, the gallery of the ``1:1`` protocol is used, while probes stem from either only images, mixed images and video frames, or plain videos.
For open-set identification, the same probes are evaluated, but the gallery is split into two parts, either of which is left out to provide unknown probe templates, i.e., probe templates with no matching subject in the gallery.
In any case, scores are computed between all (active) gallery templates and all probes.
The IJB-C dataset provides additional evaluation protocols for face detection and clustering, but these are (not yet) part of this interface.
.. warning::
To use this dataset protocol, you need to have the original files of the IJBC datasets.
Once you have it downloaded, please run the following command to set the path for Bob
.. code-block:: sh
bob config set bob.bio.face.ijbc.directory [IJBC PATH]
The code below allows you to fetch the galery and probes of the "1:1" protocol.