Commit a1398ccc authored by Yu LINGHU's avatar Yu LINGHU Committed by Tiago de Freitas Pereira
Browse files

recommit all changes

parent dac06638
......@@ -26,6 +26,8 @@ class BobIpTinyface(Base):
Annotations with (topleft, bottomright) keys (or None).
"""
annotations = self.tinyface.detect(image)
if annotations is not None:
return annotations[0]
else:
......
......@@ -61,3 +61,4 @@ transformer = make_pipeline(
# Assemble the Vanilla Biometric pipeline and execute
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.face.annotator import BobIpTinyface
from bob.bio.face.extractor import MxNetModel
from bob.bio.base.algorithm import Distance
from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
import scipy.spatial
from bob.bio.base.pipelines.vanilla_biometrics import Distance
from sklearn.pipeline import make_pipeline
from bob.pipelines import wrap
from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
annotator_transformer = BobIpTinyface()
preprocessor_transformer = FaceCrop(cropped_image_size=(112,112), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',annotator=annotator_transformer)
extractor_transformer = MxNetModel()
algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True)
transformer = make_pipeline(
wrap(["sample"], preprocessor_transformer),
wrap(["sample"], extractor_transformer)
)
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.base.transformers.preprocessor import PreprocessorTransformer
from bob.bio.face.extractor import opencv_model
from bob.bio.face.extractor import OpenCVModel
from bob.bio.base.extractor import Extractor
from bob.bio.base.transformers import ExtractorTransformer
from bob.bio.base.algorithm import Distance
......@@ -35,15 +35,6 @@ preprocessor_transformer = FaceCrop(
fixed_positions=fixed_positions,
)
cropped_positions = {"leye": (100, 140), "reye": (100, 95)}
# Preprocessor
preprocessor_transformer = FaceCrop(
cropped_image_size=(224, 224),
cropped_positions={"leye": (100, 140), "reye": (100, 95)},
color_channel="rgb",
fixed_positions=fixed_positions,
)
transform_extra_arguments = (
None
if (cropped_positions is None or fixed_positions is not None)
......@@ -52,7 +43,7 @@ transform_extra_arguments = (
# Extractor
extractor_transformer = opencv_model()
extractor_transformer = OpenCVModel()
# Algorithm
......
......@@ -23,8 +23,6 @@ else:
fixed_positions = None
cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
preprocessor_transformer = FaceCrop(
......@@ -40,12 +38,6 @@ transform_extra_arguments = (
else (("annotations", "annotations"),)
)
transform_extra_arguments = (
None
if (cropped_positions is None or fixed_positions is not None)
else (("annotations", "annotations"),)
)
extractor_transformer = PyTorchLoadedModel()
......@@ -69,3 +61,4 @@ transformer = make_pipeline(
# Assemble the Vanilla Biometric pipeline and execute
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer
......@@ -24,8 +24,6 @@ else:
fixed_positions = None
cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
preprocessor_transformer = FaceCrop(
......@@ -42,13 +40,6 @@ transform_extra_arguments = (
)
transform_extra_arguments = (
None
if (cropped_positions is None or fixed_positions is not None)
else (("annotations", "annotations"),)
)
model = InceptionResnetV1(pretrained="vggface2").eval()
extractor_transformer = PyTorchLibraryModel(model=model)
......
......@@ -234,7 +234,7 @@ In this baseline, we use :py:class:`bob.bio.face.preprocessor.FaceCrop` with ``
and ``cropped_image_size=(160,160)``
as preprocessor, Inception Resnet v2 in [TFP18]_ as extractor, and ``distance-cosine`` as the algorithm. By testing on LFW database, we get the following ROC plot:
.. figure:: img/tensorflow_pipe.png
.. figure:: img/tensorflow_lfw_pipe.png
:figwidth: 75%
:align: center
:alt: Face recognition results of LFW database.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment