From 038e214beb6e517a89c1cccbaefe7df1c147378b Mon Sep 17 00:00:00 2001 From: Xinyi ZHANG <xzhang@vws101.idiap.ch> Date: Sun, 11 Apr 2021 22:02:15 +0200 Subject: [PATCH] =?UTF-8?q?=E2=80=9Dnew=E2=80=9D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- bob/bio/face/annotator/__init__.py | 1 - bob/bio/face/annotator/bobiptinyface.py | 12 +-- bob/bio/face/config/baseline/mxnet_pipe.py | 31 ++++--- bob/bio/face/config/baseline/opencv_pipe.py | 32 +++++-- .../face/config/baseline/pytorch_pipe_v1.py | 84 +++++-------------- .../face/config/baseline/pytorch_pipe_v2.py | 3 + bob/bio/face/config/baseline/tf_pipe.py | 82 +++++------------- bob/bio/face/extractor/__init__.py | 10 ++- bob/bio/face/extractor/mxnet_resnet.py | 13 +-- bob/bio/face/extractor/opencv_caffe.py | 10 +-- bob/bio/face/extractor/pytorch_model.py | 25 ++---- bob/bio/face/extractor/tf_model.py | 5 +- requirements.txt | 2 +- 13 files changed, 120 insertions(+), 190 deletions(-) diff --git a/bob/bio/face/annotator/__init__.py b/bob/bio/face/annotator/__init__.py index 8e778f61..e06a34ae 100644 --- a/bob/bio/face/annotator/__init__.py +++ b/bob/bio/face/annotator/__init__.py @@ -1,6 +1,5 @@ import bob.ip.facedetect - def bounding_box_to_annotations(bbx): """Converts :any:`bob.ip.facedetect.BoundingBox` to dictionary annotations. diff --git a/bob/bio/face/annotator/bobiptinyface.py b/bob/bio/face/annotator/bobiptinyface.py index 595f7849..d2790c82 100644 --- a/bob/bio/face/annotator/bobiptinyface.py +++ b/bob/bio/face/annotator/bobiptinyface.py @@ -2,14 +2,14 @@ import bob.ip.facedetect.tinyface from . import Base import cv2 as cv + class BobIpTinyface(Base): """Annotator using tinyface in bob.ip.facedetect""" - + def __init__(self, **kwargs): super(BobIpTinyface, self).__init__(**kwargs) self.tinyface = bob.ip.facedetect.tinyface.TinyFacesDetector(prob_thresh=0.5) - def annotate(self, image, **kwargs): """Annotates an image using tinyface @@ -25,15 +25,11 @@ class BobIpTinyface(Base): dict Annotations with (topleft, bottomright) keys (or None). """ - - annotations = self.tinyface.detect(image) + annotations = self.tinyface.detect(image) if annotations is not None: r = annotations[0] - return {'topleft':(r[0],r[1]), 'bottomright':(r[2],r[3])} + return {"topleft": (r[0], r[1]), "bottomright": (r[2], r[3])} else: return None - - - diff --git a/bob/bio/face/config/baseline/mxnet_pipe.py b/bob/bio/face/config/baseline/mxnet_pipe.py index b458dc70..eeb06200 100644 --- a/bob/bio/face/config/baseline/mxnet_pipe.py +++ b/bob/bio/face/config/baseline/mxnet_pipe.py @@ -23,25 +23,36 @@ else: fixed_positions = None +cropped_positions = {"leye": (49, 72), "reye": (49, 38)} -cropped_positions={'leye':(49,72), 'reye':(49,38)} - -preprocessor_transformer = FaceCrop(cropped_image_size=(112,112), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',fixed_positions=fixed_positions) - -transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),)) - - +preprocessor_transformer = FaceCrop( + cropped_image_size=(112, 112), + cropped_positions={"leye": (49, 72), "reye": (49, 38)}, + color_channel="rgb", + fixed_positions=fixed_positions, +) +transform_extra_arguments = ( + None + if (cropped_positions is None or fixed_positions is not None) + else (("annotations", "annotations"),) +) extractor_transformer = mxnet_model() -algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True) +algorithm = Distance( + distance_function=scipy.spatial.distance.cosine, is_distance_function=True +) # Chain the Transformers together transformer = make_pipeline( - wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments), + wrap( + ["sample"], + preprocessor_transformer, + transform_extra_arguments=transform_extra_arguments, + ), wrap(["sample"], extractor_transformer) # Add more transformers here if needed ) @@ -50,5 +61,3 @@ transformer = make_pipeline( # Assemble the Vanilla Biometric pipeline and execute pipeline = VanillaBiometricsPipeline(transformer, algorithm) transformer = pipeline.transformer - - diff --git a/bob/bio/face/config/baseline/opencv_pipe.py b/bob/bio/face/config/baseline/opencv_pipe.py index c7f8cd8c..aac2d3f1 100644 --- a/bob/bio/face/config/baseline/opencv_pipe.py +++ b/bob/bio/face/config/baseline/opencv_pipe.py @@ -26,27 +26,41 @@ else: fixed_positions = None +cropped_positions = {"leye": (98, 144), "reye": (98, 76)} +# Preprocessor +preprocessor_transformer = FaceCrop( + cropped_image_size=(224, 224), + cropped_positions={"leye": (98, 144), "reye": (98, 76)}, + color_channel="rgb", + fixed_positions=fixed_positions, +) -cropped_positions={"leye": (98, 144), "reye": (98, 76)} -#Preprocessor -preprocessor_transformer = FaceCrop(cropped_image_size=(224,224), cropped_positions={"leye": (98, 144), "reye": (98, 76)}, color_channel='rgb',fixed_positions=fixed_positions) - -transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),)) +transform_extra_arguments = ( + None + if (cropped_positions is None or fixed_positions is not None) + else (("annotations", "annotations"),) +) -#Extractor +# Extractor extractor_transformer = opencv_model() -#Algorithm -algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True) +# Algorithm +algorithm = Distance( + distance_function=scipy.spatial.distance.cosine, is_distance_function=True +) ## Creation of the pipeline # Chain the Transformers together transformer = make_pipeline( - wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments), + wrap( + ["sample"], + preprocessor_transformer, + transform_extra_arguments=transform_extra_arguments, + ), wrap(["sample"], extractor_transformer) # Add more transformers here if needed ) diff --git a/bob/bio/face/config/baseline/pytorch_pipe_v1.py b/bob/bio/face/config/baseline/pytorch_pipe_v1.py index 9b03a70d..ec5c4d9d 100644 --- a/bob/bio/face/config/baseline/pytorch_pipe_v1.py +++ b/bob/bio/face/config/baseline/pytorch_pipe_v1.py @@ -23,23 +23,36 @@ else: fixed_positions = None +cropped_positions = {"leye": (49, 72), "reye": (49, 38)} -cropped_positions={'leye':(49,72), 'reye':(49,38)} - -preprocessor_transformer = FaceCrop(cropped_image_size=(224,224), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',fixed_positions=fixed_positions) - -transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),)) +preprocessor_transformer = FaceCrop( + cropped_image_size=(224, 224), + cropped_positions={"leye": (49, 72), "reye": (49, 38)}, + color_channel="rgb", + fixed_positions=fixed_positions, +) +transform_extra_arguments = ( + None + if (cropped_positions is None or fixed_positions is not None) + else (("annotations", "annotations"),) +) extractor_transformer = pytorch_loaded_model() -algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True) +algorithm = Distance( + distance_function=scipy.spatial.distance.cosine, is_distance_function=True +) # Chain the Transformers together transformer = make_pipeline( - wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments), + wrap( + ["sample"], + preprocessor_transformer, + transform_extra_arguments=transform_extra_arguments, + ), wrap(["sample"], extractor_transformer) # Add more transformers here if needed ) @@ -48,60 +61,3 @@ transformer = make_pipeline( # Assemble the Vanilla Biometric pipeline and execute pipeline = VanillaBiometricsPipeline(transformer, algorithm) transformer = pipeline.transformer - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/bob/bio/face/config/baseline/pytorch_pipe_v2.py b/bob/bio/face/config/baseline/pytorch_pipe_v2.py index 3a01f467..8cb2af0f 100644 --- a/bob/bio/face/config/baseline/pytorch_pipe_v2.py +++ b/bob/bio/face/config/baseline/pytorch_pipe_v2.py @@ -54,6 +54,7 @@ transformer = make_pipeline( # Assemble the Vanilla Biometric pipeline and execute pipeline = VanillaBiometricsPipeline(transformer, algorithm) transformer = pipeline.transformer +<<<<<<< HEAD @@ -111,3 +112,5 @@ transformer = pipeline.transformer +======= +>>>>>>> â€new†diff --git a/bob/bio/face/config/baseline/tf_pipe.py b/bob/bio/face/config/baseline/tf_pipe.py index 2e0a8ae2..22c24d95 100644 --- a/bob/bio/face/config/baseline/tf_pipe.py +++ b/bob/bio/face/config/baseline/tf_pipe.py @@ -24,23 +24,38 @@ else: # Preprocessor -cropped_positions={'leye':(49,72), 'reye':(49,38)} +cropped_positions = {"leye": (49, 72), "reye": (49, 38)} -preprocessor_transformer = FaceCrop(cropped_image_size=(160,160), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',fixed_positions=fixed_positions) +preprocessor_transformer = FaceCrop( + cropped_image_size=(160, 160), + cropped_positions={"leye": (49, 72), "reye": (49, 38)}, + color_channel="rgb", + fixed_positions=fixed_positions, +) -transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),)) +transform_extra_arguments = ( + None + if (cropped_positions is None or fixed_positions is not None) + else (("annotations", "annotations"),) +) # Extractor extractor_transformer = tf_model() # Algorithm -algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True) +algorithm = Distance( + distance_function=scipy.spatial.distance.cosine, is_distance_function=True +) # Chain the Transformers together transformer = make_pipeline( - wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments), + wrap( + ["sample"], + preprocessor_transformer, + transform_extra_arguments=transform_extra_arguments, + ), wrap(["sample"], extractor_transformer) # Add more transformers here if needed ) @@ -49,60 +64,3 @@ transformer = make_pipeline( # Assemble the Vanilla Biometric pipeline and execute pipeline = VanillaBiometricsPipeline(transformer, algorithm) transformer = pipeline.transformer - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/bob/bio/face/extractor/__init__.py b/bob/bio/face/extractor/__init__.py index a5484ea1..32b97163 100644 --- a/bob/bio/face/extractor/__init__.py +++ b/bob/bio/face/extractor/__init__.py @@ -9,7 +9,7 @@ from .opencv_caffe import opencv_model # gets sphinx autodoc done right - don't remove it def __appropriate__(*args): - """Says object was actually declared here, and not in the import module. + """Says object was actually declared here, and not in the import module. Fixing sphinx warnings of not being able to find classes, when path is shortened. Parameters: @@ -19,7 +19,9 @@ def __appropriate__(*args): <https://github.com/sphinx-doc/sphinx/issues/3048>` """ - for obj in args: obj.__module__ = __name__ + for obj in args: + obj.__module__ = __name__ + __appropriate__( DCTBlocks, @@ -30,5 +32,5 @@ __appropriate__( pytorch_library_model, tf_model, opencv_model, - ) -__all__ = [_ for _ in dir() if not _.startswith('_')] +) +__all__ = [_ for _ in dir() if not _.startswith("_")] diff --git a/bob/bio/face/extractor/mxnet_resnet.py b/bob/bio/face/extractor/mxnet_resnet.py index aab050bf..73ab9f22 100644 --- a/bob/bio/face/extractor/mxnet_resnet.py +++ b/bob/bio/face/extractor/mxnet_resnet.py @@ -12,9 +12,11 @@ import mxnet as mx from mxnet import gluon import warnings from bob.extension import rc + mxnet_resnet_directory = rc["bob.extractor_model.mxnet"] mxnet_weight_directory = rc["bob.extractor_weights.mxnet"] + class mxnet_model(TransformerMixin, BaseEstimator): """Extracts features using deep face recognition models under MxNet Interfaces. @@ -57,8 +59,10 @@ class mxnet_model(TransformerMixin, BaseEstimator): with warnings.catch_warnings(): warnings.simplefilter("ignore") - deserialized_net = gluon.nn.SymbolBlock.imports(mxnet_resnet_directory, ['data'], mxnet_weight_directory, ctx=ctx) - + deserialized_net = gluon.nn.SymbolBlock.imports( + mxnet_resnet_directory, ["data"], mxnet_weight_directory, ctx=ctx + ) + self.model = deserialized_net def transform(self, X): @@ -76,16 +80,15 @@ class mxnet_model(TransformerMixin, BaseEstimator): feature : 2D, 3D, or 4D :py:class:`numpy.ndarray` (floats) The list of features extracted from the image. """ - + if self.model is None: - self.load_model() + self._load_model() X = check_array(X, allow_nd=True) X = mx.nd.array(X) return self.model(X,).asnumpy() - def __getstate__(self): # Handling unpicklable objects diff --git a/bob/bio/face/extractor/opencv_caffe.py b/bob/bio/face/extractor/opencv_caffe.py index 647fc724..d816e042 100644 --- a/bob/bio/face/extractor/opencv_caffe.py +++ b/bob/bio/face/extractor/opencv_caffe.py @@ -44,7 +44,6 @@ class opencv_model(TransformerMixin, BaseEstimator): use_gpu: True or False. """ - def __init__(self, use_gpu=False, **kwargs): super().__init__(**kwargs) self.model = None @@ -64,7 +63,7 @@ class opencv_model(TransformerMixin, BaseEstimator): def _load_model(self): - net = cv2.dnn.readNetFromCaffe(opencv_model_prototxt,opencv_model_directory) + net = cv2.dnn.readNetFromCaffe(opencv_model_prototxt, opencv_model_directory) self.model = net @@ -83,16 +82,15 @@ class opencv_model(TransformerMixin, BaseEstimator): feature : 2D or 3D :py:class:`numpy.ndarray` (floats) The list of features extracted from the image. """ - + if self.model is None: - self.load_model() + self._load_model() img = np.array(X) self.model.setInput(img) - - return self.model.forward() + return self.model.forward() def __getstate__(self): # Handling unpicklable objects diff --git a/bob/bio/face/extractor/pytorch_model.py b/bob/bio/face/extractor/pytorch_model.py index 52c4f172..96a57cb9 100644 --- a/bob/bio/face/extractor/pytorch_model.py +++ b/bob/bio/face/extractor/pytorch_model.py @@ -16,6 +16,7 @@ import imp pytorch_model_directory = rc["bob.extractor_model.pytorch"] pytorch_weight_directory = rc["bob.extractor_weights.pytorch"] + class pytorch_loaded_model(TransformerMixin, BaseEstimator): """Extracts features using deep face recognition models under PyTorch Interface, especially for the models and weights that need to load by hand. @@ -30,7 +31,7 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator): **Parameters:** use_gpu: True or False. - """ + """ def __init__(self, use_gpu=False, **kwargs): super().__init__(**kwargs) @@ -52,12 +53,12 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator): def _load_model(self): - MainModel = imp.load_source('MainModel', pytorch_model_directory) + MainModel = imp.load_source("MainModel", pytorch_model_directory) network = torch.load(pytorch_weight_directory) network.eval() - - self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - + + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + network.to(self.device) self.model = network @@ -77,7 +78,7 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator): feature : 2D or 3D :py:class:`numpy.ndarray` (floats) The list of features extracted from the image. """ - + if self.model is None: self.load_model() @@ -85,7 +86,6 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator): return self.model(X).detach().numpy() - def __getstate__(self): # Handling unpicklable objects @@ -95,14 +95,8 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator): def _more_tags(self): return {"stateless": True, "requires_fit": False} - - - - - - - - + + class pytorch_library_model(TransformerMixin, BaseEstimator): """Extracts features using deep face recognition with registered model frames in the PyTorch Library. @@ -153,7 +147,6 @@ class pytorch_library_model(TransformerMixin, BaseEstimator): return self.model(X).detach().numpy() - def __getstate__(self): # Handling unpicklable objects diff --git a/bob/bio/face/extractor/tf_model.py b/bob/bio/face/extractor/tf_model.py index 9e83ec38..00141078 100644 --- a/bob/bio/face/extractor/tf_model.py +++ b/bob/bio/face/extractor/tf_model.py @@ -17,6 +17,7 @@ from tensorflow import keras tf_model_directory = rc["bob.extractor_model.tf"] + class tf_model(TransformerMixin, BaseEstimator): """Extracts features using deep face recognition models under TensorFlow Interface. @@ -71,7 +72,7 @@ class tf_model(TransformerMixin, BaseEstimator): feature : 2D or 3D :py:class:`numpy.ndarray` (floats) The list of features extracted from the image. """ - + if self.model is None: self.load_model() @@ -80,10 +81,8 @@ class tf_model(TransformerMixin, BaseEstimator): X = to_channels_last(X) predict = self.model.predict(X) - return predict - def __getstate__(self): # Handling unpicklable objects diff --git a/requirements.txt b/requirements.txt index 02e30ac5..bcd1763c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,4 +22,4 @@ mxnet opencv-python six scikit-image -scikit-learn # for pipelines Tranformers +scikit-learn # for pipelines Tranformers \ No newline at end of file -- GitLab