Commit 0c5f4b21 authored by Anjith GEORGE's avatar Anjith GEORGE

Added MCXFace dataset (from HQ-WMCA)

parent 3161603c
Pipeline #50121 passed with stage
in 26 minutes and 58 seconds
from import MCXFaceDatabase
# In case protocol is comming from chain loading
if "protocol" not in locals():
protocol = "COLOR-COLOR-split1"
database = MCXFaceDatabase(protocol=protocol)
......@@ -17,6 +17,8 @@ from .morph import MorphDatabase
from .casia_africa import CasiaAfricaDatabase
from .pola_thermal import PolaThermalDatabase
from .cbsr_nir_vis_2 import CBSRNirVis2Database
from .mcxface import MCXFaceDatabase
# gets sphinx autodoc done right - don't remove it
......@@ -52,5 +54,6 @@ __appropriate__(
__all__ = [_ for _ in dir() if not _.startswith("_")]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Anjith George <>
MCXFace database implementation
from import CSVDataset
from import CSVToSampleLoaderBiometrics
from import EyesAnnotations
from bob.extension import rc
from import get_file
from sklearn.pipeline import make_pipeline
class MCXFaceDatabase(CSVDataset):
Collected at Idiap for the BATL ODIN project, the MCXFace is derived from the HQ-WMCA dataset.
The database implements several Face Recognition
protocols, ie between the same modalities and heterogeneous face recognition as well.
The database has only the train and dev splits due to the limited number of subjects.
A total of 51 subjects are present in the dataset collected across several sessions. 30 subjects are in
train fold and 20 subjects are in dev fold.
The dataset contains the following channels:
COLOR: From Basler Camera
DEPTH: From Intel D415
THERMAL: From Xenics Gobi
NIR: Is again from Basler NIR camera, we use wavelength 850nm
SWIR: From Xenics Bobcat we use only 1300nm wavelength.
All the channels are spatially and temporally registered meaning, one
can share the annotations provided between channels. The left and right
eye centers are provided as annotations.
The protocols are as follows: SOURCE_TARGET_split, where the SOURCE
is the modality used for enrollment, and TARGET is the modality used
as probes. We implement several combinations as the protocols. In addition,
we have normal recognition protocols where both source and target are the same
modalities. For each of these, we have also prepared 5 different splits by
randomly splitting the clients between train and dev sets. Subjects who have
only one session are always assigned to the training fold.
.. warning::
Use the command below to set the path of the real data::
$ bob config set [PATH-TO-MCXFACE-DATA]
protocol: str
One of the database protocols.
def __init__(self, protocol):
# Downloading model if not exists
urls = MCXFaceDatabase.urls()
filename = get_file(
"mcxface.tar.gz", urls, file_hash="c4b73aa7cee7dc2b9bfc2b20d48db5b8",
self.annotation_type = "eyes-center"
self.fixed_positions = None
directory = (
if rc[""]
else ""
def load(path):
Images in this dataset are stored as 8-bit jpg
def protocols():
return ['COLOR-COLOR-split1',
def urls():
return [
......@@ -115,6 +115,7 @@ setup(
"casia-africa =",
"pola-thermal =",
"cbsr-nir-vis-2 =",
"mcxface =",
"": [
"facedetect =",
......@@ -186,6 +187,7 @@ setup(
"resnet50-msceleb-arcface-2021 =",
"resnet50-vgg2-arcface-2021 =",
"mobilenetv2-msceleb-arcface-2021 =",
"mcxface ="
"": [
"display-face-annotations =",
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment