Skip to content
Snippets Groups Projects
Commit d4fbeef1 authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

Merge branch 'dask-annotators' into 'master'

Adaptation of Annotator to Tranformer

See merge request !73
parents b7d39a99 18461d15
No related branches found
No related tags found
1 merge request!73Adaptation of Annotator to Tranformer
Pipeline #45883 passed
import bob.bio.base.annotator
import bob.bio.face.preprocessor # import for documentation
from bob.bio.base.annotator.FailSafe import translate_kwargs
class Base(bob.bio.base.annotator.Annotator):
......@@ -21,3 +21,22 @@ class Base(bob.bio.base.annotator.Annotator):
The extra arguments that may be passed.
"""
raise NotImplementedError()
def transform(self, samples, **kwargs):
"""Annotates an image and returns annotations in a dictionary.
All annotator should add at least the ``topleft`` and ``bottomright``
coordinates. Some currently known annotation points such as ``reye``
and ``leye`` are formalized in
:any:`bob.bio.face.preprocessor.FaceCrop`.
Parameters
----------
sample : Sample
The image int the sample object should be a Bob format
(#Channels, Height, Width) RGB image.
**kwargs
Extra arguments that may be passed.
"""
kwargs = translate_kwargs(kwargs, len(samples))
return [self.annotate(sample, **kw) for sample, kw in zip(samples, kwargs)]
......@@ -41,9 +41,6 @@ class BobIpFacedetect(Base):
eye_estimate=False,
**kwargs):
super(BobIpFacedetect, self).__init__(**kwargs)
self.sampler = bob.ip.facedetect.Sampler(
scale_factor=scale_base, lowest_scale=lowest_scale,
distance=distance)
if cascade is None:
self.cascade = bob.ip.facedetect.default_cascade()
else:
......@@ -51,6 +48,15 @@ class BobIpFacedetect(Base):
bob.io.base.HDF5File(cascade))
self.detection_overlap = detection_overlap
self.eye_estimate = eye_estimate
self.scale_base = scale_base
self.lowest_scale = lowest_scale
self.distance = distance
self.fit()
def fit(self, X=None, y=None, **kwargs):
self.sampler_ = bob.ip.facedetect.Sampler(
scale_factor=self.scale_base, lowest_scale=self.lowest_scale,
distance=self.distance)
def annotate(self, image, **kwargs):
"""Return topleft and bottomright and expected eye positions
......@@ -71,7 +77,7 @@ class BobIpFacedetect(Base):
if image.ndim == 3:
image = bob.ip.color.rgb_to_gray(image)
bbx, quality = bob.ip.facedetect.detect_single_face(
image, self.cascade, self.sampler, self.detection_overlap)
image, self.cascade, self.sampler_, self.detection_overlap)
landmarks = bounding_box_to_annotations(bbx)
landmarks['quality'] = quality
......
from . import Base
import bob.ip.color
import bob.ip.flandmark
class BobIpFlandmark(Base):
......@@ -21,6 +20,7 @@ class BobIpFlandmark(Base):
def __init__(self, **kwargs):
super(BobIpFlandmark, self).__init__(**kwargs)
import bob.ip.flandmark
self.flandmark = bob.ip.flandmark.Flandmark()
def annotate(self, image, annotations, **kwargs):
......
......@@ -2,12 +2,25 @@ from . import Base
class BobIpMTCNN(Base):
"""Annotator using mtcnn in bob.ip.tensorflow_extractor"""
"""Annotator using mtcnn in bob.ip.facedetect"""
def __init__(self, **kwargs):
def __init__(self, min_size=40, factor=0.709, thresholds=(0.6, 0.7, 0.7), **kwargs):
super(BobIpMTCNN, self).__init__(**kwargs)
from bob.ip.tensorflow_extractor import MTCNN
self.detector = MTCNN()
from bob.ip.facedetect.mtcnn import MTCNN
self.detector = MTCNN(min_size=min_size, factor=factor, thresholds=thresholds)
@property
def min_size(self):
return self.detector.min_size
@property
def factor(self):
return self.detector.factor
@property
def thresholds(self):
return self.detector.thresholds
def annotate(self, image, **kwargs):
"""Annotates an image using mtcnn
......@@ -26,9 +39,6 @@ class BobIpMTCNN(Base):
mouthleft, mouthright, quality).
"""
# return the annotations for the first/largest face
annotations = self.detector.annotations(image)
if annotations:
......
from bob.bio.base.annotator import FailSafe
from bob.bio.face.annotator import BobIpFacedetect, BobIpFlandmark
# FLandmark requires the 'topleft' and 'bottomright' annotations
annotator = FailSafe(
[BobIpFacedetect(), BobIpFlandmark()],
required_keys=('reye', 'leye'))
......@@ -2,47 +2,83 @@ import bob.io.base
import bob.io.base.test_utils
import bob.io.image
from bob.bio.face.annotator import (
BobIpFacedetect, BobIpFlandmark,
BobIpFacedetect,
BobIpFlandmark,
min_face_size_validator)
from bob.bio.base.annotator import FailSafe
from bob.bio.face.annotator import BobIpMTCNN
import numpy
face_image = bob.io.base.load(bob.io.base.test_utils.datafile(
'testimage.jpg', 'bob.ip.facedetect'))
from bob.bio.base.test.utils import is_library_available
face_image = bob.io.base.load(
bob.io.base.test_utils.datafile(
'testimage.jpg', 'bob.ip.facedetect'
)
)
def _assert_mtcnn(annot):
"""
Verifies that the MTCNN annotations are correct for ``faceimage.jpg``
"""
assert type(annot) is dict, annot
assert [int(x) for x in annot['topleft']] == [68, 76], annot
assert [int(x) for x in annot['bottomright']] == [344, 274], annot
assert [int(x) for x in annot['reye']] == [180, 129], annot
assert [int(x) for x in annot['leye']] == [175, 220], annot
assert numpy.allclose(annot['quality'], 0.9998975), annot
def _assert_bob_ip_facedetect(annot):
assert annot['topleft'] == (110, 82), annot
assert annot['bottomright'] == (334, 268), annot
assert numpy.allclose(annot['quality'], 39.209601948013685), annot
@is_library_available("tensorflow")
def test_mtcnn_annotator():
"""
The MTCNN annotator should return the correct annotations.
"""
mtcnn_annotator = BobIpMTCNN()
batch = [face_image]
annot_batch = mtcnn_annotator(batch)
_assert_mtcnn(annot_batch[0])
def test_bob_ip_facedetect():
batch = [face_image]
annot = BobIpFacedetect()(batch)
_assert_bob_ip_facedetect(annot[0])
def test_bob_ip_facedetect_eyes():
batch = [face_image]
annot = BobIpFacedetect(eye_estimate=True)(batch)
_assert_bob_ip_facedetect(annot[0])
assert [int(x) for x in annot[0]['reye']] == [175, 128], annot
assert [int(x) for x in annot[0]['leye']] == [175, 221], annot
def test_fail_safe():
annotator = FailSafe(
[BobIpFacedetect(eye_estimate=True)],
required_keys=('reye', 'leye'),
)
batch = [face_image]
annot = annotator(batch)
_assert_bob_ip_facedetect(annot[0])
assert [int(x) for x in annot[0]['reye']] == [175, 128], annot
assert [int(x) for x in annot[0]['leye']] == [175, 221], annot
def notest_bob_ip_facedetect():
annot = BobIpFacedetect()(face_image)
_assert_bob_ip_facedetect(annot)
def notest_bob_ip_facedetect_eyes():
annot = BobIpFacedetect(eye_estimate=True)(face_image)
_assert_bob_ip_facedetect(annot)
assert [int(x) for x in annot['reye']] == [175, 128], annot
assert [int(x) for x in annot['leye']] == [175, 221], annot
def notest_bob_ip_flandmark():
def test_bob_ip_flandmark():
annotator = FailSafe(
[BobIpFacedetect(), BobIpFlandmark()],
required_keys=('reye', 'leye'),
)
annot = annotator(face_image)
_assert_bob_ip_facedetect(annot)
assert [int(x) for x in annot['reye']] == [183, 127], annot
assert [int(x) for x in annot['leye']] == [174, 223], annot
def notest_min_face_size_validator():
batch = [face_image]
annot = annotator(batch)
print(annot)
_assert_bob_ip_facedetect(annot[0])
assert [int(x) for x in annot[0]['reye']] == [183, 127], annot
assert [int(x) for x in annot[0]['leye']] == [174, 223], annot
def test_min_face_size_validator():
valid = {
'topleft': (0, 0),
'bottomright': (32, 32),
......
......@@ -20,4 +20,5 @@ bob.ip.facedetect
bob.pipelines
matplotlib # for plotting
six
scikit-image
\ No newline at end of file
scikit-image
scikit-learn # for pipelines Tranformers
......@@ -122,7 +122,6 @@ setup(
'replay-img-spoof = bob.bio.face.config.database.replay:replay_spoof',
'replaymobile-img-licit = bob.bio.face.config.database.replaymobile:replaymobile_licit',
'replaymobile-img-spoof = bob.bio.face.config.database.replaymobile:replaymobile_spoof',
'fargo = bob.bio.face.config.database.fargo:database',
],
......@@ -134,6 +133,10 @@ setup(
],
'bob.bio.transformer':[
'facedetect-eye-estimate = bob.bio.face.config.annotator.facedetect_eye_estimate:transformer',
'facedetect = bob.bio.face.config.annotator.facedetect:transformer',
'flandmark = bob.bio.face.config.annotator.flandmark:annotator',
'mtcnn = bob.bio.face.config.annotator.mtcnn:transformer',
'facenet-sanderberg = bob.bio.face.config.baseline.facenet_sanderberg:transformer',
'inception-resnetv1-casiawebface = bob.bio.face.config.baseline.inception_resnetv1_casiawebface:transformer',
'inception-resnetv2-casiawebface = bob.bio.face.config.baseline.inception_resnetv2_casiawebface:transformer',
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment