Skip to content
Snippets Groups Projects
Commit 038e214b authored by Xinyi ZHANG's avatar Xinyi ZHANG Committed by Tiago de Freitas Pereira
Browse files

”new”

parent a0973f78
No related branches found
No related tags found
1 merge request!112Feature extractors
Showing with 120 additions and 190 deletions
import bob.ip.facedetect import bob.ip.facedetect
def bounding_box_to_annotations(bbx): def bounding_box_to_annotations(bbx):
"""Converts :any:`bob.ip.facedetect.BoundingBox` to dictionary annotations. """Converts :any:`bob.ip.facedetect.BoundingBox` to dictionary annotations.
......
...@@ -2,14 +2,14 @@ import bob.ip.facedetect.tinyface ...@@ -2,14 +2,14 @@ import bob.ip.facedetect.tinyface
from . import Base from . import Base
import cv2 as cv import cv2 as cv
class BobIpTinyface(Base): class BobIpTinyface(Base):
"""Annotator using tinyface in bob.ip.facedetect""" """Annotator using tinyface in bob.ip.facedetect"""
def __init__(self, **kwargs): def __init__(self, **kwargs):
super(BobIpTinyface, self).__init__(**kwargs) super(BobIpTinyface, self).__init__(**kwargs)
self.tinyface = bob.ip.facedetect.tinyface.TinyFacesDetector(prob_thresh=0.5) self.tinyface = bob.ip.facedetect.tinyface.TinyFacesDetector(prob_thresh=0.5)
def annotate(self, image, **kwargs): def annotate(self, image, **kwargs):
"""Annotates an image using tinyface """Annotates an image using tinyface
...@@ -25,15 +25,11 @@ class BobIpTinyface(Base): ...@@ -25,15 +25,11 @@ class BobIpTinyface(Base):
dict dict
Annotations with (topleft, bottomright) keys (or None). Annotations with (topleft, bottomright) keys (or None).
""" """
annotations = self.tinyface.detect(image)
annotations = self.tinyface.detect(image)
if annotations is not None: if annotations is not None:
r = annotations[0] r = annotations[0]
return {'topleft':(r[0],r[1]), 'bottomright':(r[2],r[3])} return {"topleft": (r[0], r[1]), "bottomright": (r[2], r[3])}
else: else:
return None return None
...@@ -23,25 +23,36 @@ else: ...@@ -23,25 +23,36 @@ else:
fixed_positions = None fixed_positions = None
cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
cropped_positions={'leye':(49,72), 'reye':(49,38)} preprocessor_transformer = FaceCrop(
cropped_image_size=(112, 112),
preprocessor_transformer = FaceCrop(cropped_image_size=(112,112), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',fixed_positions=fixed_positions) cropped_positions={"leye": (49, 72), "reye": (49, 38)},
color_channel="rgb",
transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),)) fixed_positions=fixed_positions,
)
transform_extra_arguments = (
None
if (cropped_positions is None or fixed_positions is not None)
else (("annotations", "annotations"),)
)
extractor_transformer = mxnet_model() extractor_transformer = mxnet_model()
algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True) algorithm = Distance(
distance_function=scipy.spatial.distance.cosine, is_distance_function=True
)
# Chain the Transformers together # Chain the Transformers together
transformer = make_pipeline( transformer = make_pipeline(
wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments), wrap(
["sample"],
preprocessor_transformer,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], extractor_transformer) wrap(["sample"], extractor_transformer)
# Add more transformers here if needed # Add more transformers here if needed
) )
...@@ -50,5 +61,3 @@ transformer = make_pipeline( ...@@ -50,5 +61,3 @@ transformer = make_pipeline(
# Assemble the Vanilla Biometric pipeline and execute # Assemble the Vanilla Biometric pipeline and execute
pipeline = VanillaBiometricsPipeline(transformer, algorithm) pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer transformer = pipeline.transformer
...@@ -26,27 +26,41 @@ else: ...@@ -26,27 +26,41 @@ else:
fixed_positions = None fixed_positions = None
cropped_positions = {"leye": (98, 144), "reye": (98, 76)}
# Preprocessor
preprocessor_transformer = FaceCrop(
cropped_image_size=(224, 224),
cropped_positions={"leye": (98, 144), "reye": (98, 76)},
color_channel="rgb",
fixed_positions=fixed_positions,
)
cropped_positions={"leye": (98, 144), "reye": (98, 76)} transform_extra_arguments = (
#Preprocessor None
preprocessor_transformer = FaceCrop(cropped_image_size=(224,224), cropped_positions={"leye": (98, 144), "reye": (98, 76)}, color_channel='rgb',fixed_positions=fixed_positions) if (cropped_positions is None or fixed_positions is not None)
else (("annotations", "annotations"),)
transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),)) )
#Extractor # Extractor
extractor_transformer = opencv_model() extractor_transformer = opencv_model()
#Algorithm # Algorithm
algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True) algorithm = Distance(
distance_function=scipy.spatial.distance.cosine, is_distance_function=True
)
## Creation of the pipeline ## Creation of the pipeline
# Chain the Transformers together # Chain the Transformers together
transformer = make_pipeline( transformer = make_pipeline(
wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments), wrap(
["sample"],
preprocessor_transformer,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], extractor_transformer) wrap(["sample"], extractor_transformer)
# Add more transformers here if needed # Add more transformers here if needed
) )
......
...@@ -23,23 +23,36 @@ else: ...@@ -23,23 +23,36 @@ else:
fixed_positions = None fixed_positions = None
cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
cropped_positions={'leye':(49,72), 'reye':(49,38)} preprocessor_transformer = FaceCrop(
cropped_image_size=(224, 224),
preprocessor_transformer = FaceCrop(cropped_image_size=(224,224), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',fixed_positions=fixed_positions) cropped_positions={"leye": (49, 72), "reye": (49, 38)},
color_channel="rgb",
transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),)) fixed_positions=fixed_positions,
)
transform_extra_arguments = (
None
if (cropped_positions is None or fixed_positions is not None)
else (("annotations", "annotations"),)
)
extractor_transformer = pytorch_loaded_model() extractor_transformer = pytorch_loaded_model()
algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True) algorithm = Distance(
distance_function=scipy.spatial.distance.cosine, is_distance_function=True
)
# Chain the Transformers together # Chain the Transformers together
transformer = make_pipeline( transformer = make_pipeline(
wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments), wrap(
["sample"],
preprocessor_transformer,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], extractor_transformer) wrap(["sample"], extractor_transformer)
# Add more transformers here if needed # Add more transformers here if needed
) )
...@@ -48,60 +61,3 @@ transformer = make_pipeline( ...@@ -48,60 +61,3 @@ transformer = make_pipeline(
# Assemble the Vanilla Biometric pipeline and execute # Assemble the Vanilla Biometric pipeline and execute
pipeline = VanillaBiometricsPipeline(transformer, algorithm) pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer transformer = pipeline.transformer
...@@ -54,6 +54,7 @@ transformer = make_pipeline( ...@@ -54,6 +54,7 @@ transformer = make_pipeline(
# Assemble the Vanilla Biometric pipeline and execute # Assemble the Vanilla Biometric pipeline and execute
pipeline = VanillaBiometricsPipeline(transformer, algorithm) pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer transformer = pipeline.transformer
<<<<<<< HEAD
...@@ -111,3 +112,5 @@ transformer = pipeline.transformer ...@@ -111,3 +112,5 @@ transformer = pipeline.transformer
=======
>>>>>>> new
...@@ -24,23 +24,38 @@ else: ...@@ -24,23 +24,38 @@ else:
# Preprocessor # Preprocessor
cropped_positions={'leye':(49,72), 'reye':(49,38)} cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
preprocessor_transformer = FaceCrop(cropped_image_size=(160,160), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',fixed_positions=fixed_positions) preprocessor_transformer = FaceCrop(
cropped_image_size=(160, 160),
cropped_positions={"leye": (49, 72), "reye": (49, 38)},
color_channel="rgb",
fixed_positions=fixed_positions,
)
transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),)) transform_extra_arguments = (
None
if (cropped_positions is None or fixed_positions is not None)
else (("annotations", "annotations"),)
)
# Extractor # Extractor
extractor_transformer = tf_model() extractor_transformer = tf_model()
# Algorithm # Algorithm
algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True) algorithm = Distance(
distance_function=scipy.spatial.distance.cosine, is_distance_function=True
)
# Chain the Transformers together # Chain the Transformers together
transformer = make_pipeline( transformer = make_pipeline(
wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments), wrap(
["sample"],
preprocessor_transformer,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], extractor_transformer) wrap(["sample"], extractor_transformer)
# Add more transformers here if needed # Add more transformers here if needed
) )
...@@ -49,60 +64,3 @@ transformer = make_pipeline( ...@@ -49,60 +64,3 @@ transformer = make_pipeline(
# Assemble the Vanilla Biometric pipeline and execute # Assemble the Vanilla Biometric pipeline and execute
pipeline = VanillaBiometricsPipeline(transformer, algorithm) pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer transformer = pipeline.transformer
...@@ -9,7 +9,7 @@ from .opencv_caffe import opencv_model ...@@ -9,7 +9,7 @@ from .opencv_caffe import opencv_model
# gets sphinx autodoc done right - don't remove it # gets sphinx autodoc done right - don't remove it
def __appropriate__(*args): def __appropriate__(*args):
"""Says object was actually declared here, and not in the import module. """Says object was actually declared here, and not in the import module.
Fixing sphinx warnings of not being able to find classes, when path is shortened. Fixing sphinx warnings of not being able to find classes, when path is shortened.
Parameters: Parameters:
...@@ -19,7 +19,9 @@ def __appropriate__(*args): ...@@ -19,7 +19,9 @@ def __appropriate__(*args):
<https://github.com/sphinx-doc/sphinx/issues/3048>` <https://github.com/sphinx-doc/sphinx/issues/3048>`
""" """
for obj in args: obj.__module__ = __name__ for obj in args:
obj.__module__ = __name__
__appropriate__( __appropriate__(
DCTBlocks, DCTBlocks,
...@@ -30,5 +32,5 @@ __appropriate__( ...@@ -30,5 +32,5 @@ __appropriate__(
pytorch_library_model, pytorch_library_model,
tf_model, tf_model,
opencv_model, opencv_model,
) )
__all__ = [_ for _ in dir() if not _.startswith('_')] __all__ = [_ for _ in dir() if not _.startswith("_")]
...@@ -12,9 +12,11 @@ import mxnet as mx ...@@ -12,9 +12,11 @@ import mxnet as mx
from mxnet import gluon from mxnet import gluon
import warnings import warnings
from bob.extension import rc from bob.extension import rc
mxnet_resnet_directory = rc["bob.extractor_model.mxnet"] mxnet_resnet_directory = rc["bob.extractor_model.mxnet"]
mxnet_weight_directory = rc["bob.extractor_weights.mxnet"] mxnet_weight_directory = rc["bob.extractor_weights.mxnet"]
class mxnet_model(TransformerMixin, BaseEstimator): class mxnet_model(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition models under MxNet Interfaces. """Extracts features using deep face recognition models under MxNet Interfaces.
...@@ -57,8 +59,10 @@ class mxnet_model(TransformerMixin, BaseEstimator): ...@@ -57,8 +59,10 @@ class mxnet_model(TransformerMixin, BaseEstimator):
with warnings.catch_warnings(): with warnings.catch_warnings():
warnings.simplefilter("ignore") warnings.simplefilter("ignore")
deserialized_net = gluon.nn.SymbolBlock.imports(mxnet_resnet_directory, ['data'], mxnet_weight_directory, ctx=ctx) deserialized_net = gluon.nn.SymbolBlock.imports(
mxnet_resnet_directory, ["data"], mxnet_weight_directory, ctx=ctx
)
self.model = deserialized_net self.model = deserialized_net
def transform(self, X): def transform(self, X):
...@@ -76,16 +80,15 @@ class mxnet_model(TransformerMixin, BaseEstimator): ...@@ -76,16 +80,15 @@ class mxnet_model(TransformerMixin, BaseEstimator):
feature : 2D, 3D, or 4D :py:class:`numpy.ndarray` (floats) feature : 2D, 3D, or 4D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image. The list of features extracted from the image.
""" """
if self.model is None: if self.model is None:
self.load_model() self._load_model()
X = check_array(X, allow_nd=True) X = check_array(X, allow_nd=True)
X = mx.nd.array(X) X = mx.nd.array(X)
return self.model(X,).asnumpy() return self.model(X,).asnumpy()
def __getstate__(self): def __getstate__(self):
# Handling unpicklable objects # Handling unpicklable objects
......
...@@ -44,7 +44,6 @@ class opencv_model(TransformerMixin, BaseEstimator): ...@@ -44,7 +44,6 @@ class opencv_model(TransformerMixin, BaseEstimator):
use_gpu: True or False. use_gpu: True or False.
""" """
def __init__(self, use_gpu=False, **kwargs): def __init__(self, use_gpu=False, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
self.model = None self.model = None
...@@ -64,7 +63,7 @@ class opencv_model(TransformerMixin, BaseEstimator): ...@@ -64,7 +63,7 @@ class opencv_model(TransformerMixin, BaseEstimator):
def _load_model(self): def _load_model(self):
net = cv2.dnn.readNetFromCaffe(opencv_model_prototxt,opencv_model_directory) net = cv2.dnn.readNetFromCaffe(opencv_model_prototxt, opencv_model_directory)
self.model = net self.model = net
...@@ -83,16 +82,15 @@ class opencv_model(TransformerMixin, BaseEstimator): ...@@ -83,16 +82,15 @@ class opencv_model(TransformerMixin, BaseEstimator):
feature : 2D or 3D :py:class:`numpy.ndarray` (floats) feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image. The list of features extracted from the image.
""" """
if self.model is None: if self.model is None:
self.load_model() self._load_model()
img = np.array(X) img = np.array(X)
self.model.setInput(img) self.model.setInput(img)
return self.model.forward()
return self.model.forward()
def __getstate__(self): def __getstate__(self):
# Handling unpicklable objects # Handling unpicklable objects
......
...@@ -16,6 +16,7 @@ import imp ...@@ -16,6 +16,7 @@ import imp
pytorch_model_directory = rc["bob.extractor_model.pytorch"] pytorch_model_directory = rc["bob.extractor_model.pytorch"]
pytorch_weight_directory = rc["bob.extractor_weights.pytorch"] pytorch_weight_directory = rc["bob.extractor_weights.pytorch"]
class pytorch_loaded_model(TransformerMixin, BaseEstimator): class pytorch_loaded_model(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition models under PyTorch Interface, especially for the models and weights that need to load by hand. """Extracts features using deep face recognition models under PyTorch Interface, especially for the models and weights that need to load by hand.
...@@ -30,7 +31,7 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator): ...@@ -30,7 +31,7 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator):
**Parameters:** **Parameters:**
use_gpu: True or False. use_gpu: True or False.
""" """
def __init__(self, use_gpu=False, **kwargs): def __init__(self, use_gpu=False, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
...@@ -52,12 +53,12 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator): ...@@ -52,12 +53,12 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator):
def _load_model(self): def _load_model(self):
MainModel = imp.load_source('MainModel', pytorch_model_directory) MainModel = imp.load_source("MainModel", pytorch_model_directory)
network = torch.load(pytorch_weight_directory) network = torch.load(pytorch_weight_directory)
network.eval() network.eval()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
network.to(self.device) network.to(self.device)
self.model = network self.model = network
...@@ -77,7 +78,7 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator): ...@@ -77,7 +78,7 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator):
feature : 2D or 3D :py:class:`numpy.ndarray` (floats) feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image. The list of features extracted from the image.
""" """
if self.model is None: if self.model is None:
self.load_model() self.load_model()
...@@ -85,7 +86,6 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator): ...@@ -85,7 +86,6 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator):
return self.model(X).detach().numpy() return self.model(X).detach().numpy()
def __getstate__(self): def __getstate__(self):
# Handling unpicklable objects # Handling unpicklable objects
...@@ -95,14 +95,8 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator): ...@@ -95,14 +95,8 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator):
def _more_tags(self): def _more_tags(self):
return {"stateless": True, "requires_fit": False} return {"stateless": True, "requires_fit": False}
class pytorch_library_model(TransformerMixin, BaseEstimator): class pytorch_library_model(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition with registered model frames in the PyTorch Library. """Extracts features using deep face recognition with registered model frames in the PyTorch Library.
...@@ -153,7 +147,6 @@ class pytorch_library_model(TransformerMixin, BaseEstimator): ...@@ -153,7 +147,6 @@ class pytorch_library_model(TransformerMixin, BaseEstimator):
return self.model(X).detach().numpy() return self.model(X).detach().numpy()
def __getstate__(self): def __getstate__(self):
# Handling unpicklable objects # Handling unpicklable objects
......
...@@ -17,6 +17,7 @@ from tensorflow import keras ...@@ -17,6 +17,7 @@ from tensorflow import keras
tf_model_directory = rc["bob.extractor_model.tf"] tf_model_directory = rc["bob.extractor_model.tf"]
class tf_model(TransformerMixin, BaseEstimator): class tf_model(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition models under TensorFlow Interface. """Extracts features using deep face recognition models under TensorFlow Interface.
...@@ -71,7 +72,7 @@ class tf_model(TransformerMixin, BaseEstimator): ...@@ -71,7 +72,7 @@ class tf_model(TransformerMixin, BaseEstimator):
feature : 2D or 3D :py:class:`numpy.ndarray` (floats) feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image. The list of features extracted from the image.
""" """
if self.model is None: if self.model is None:
self.load_model() self.load_model()
...@@ -80,10 +81,8 @@ class tf_model(TransformerMixin, BaseEstimator): ...@@ -80,10 +81,8 @@ class tf_model(TransformerMixin, BaseEstimator):
X = to_channels_last(X) X = to_channels_last(X)
predict = self.model.predict(X) predict = self.model.predict(X)
return predict return predict
def __getstate__(self): def __getstate__(self):
# Handling unpicklable objects # Handling unpicklable objects
......
...@@ -22,4 +22,4 @@ mxnet ...@@ -22,4 +22,4 @@ mxnet
opencv-python opencv-python
six six
scikit-image scikit-image
scikit-learn # for pipelines Tranformers scikit-learn # for pipelines Tranformers
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment