diff --git a/bob/bio/face/annotator/__init__.py b/bob/bio/face/annotator/__init__.py index 8e778f616503380d8d88c266d036e08d1355a96b..e06a34aec272550003d15ea93b8f3c15ff393213 100644 --- a/bob/bio/face/annotator/__init__.py +++ b/bob/bio/face/annotator/__init__.py @@ -1,6 +1,5 @@ import bob.ip.facedetect - def bounding_box_to_annotations(bbx): """Converts :any:`bob.ip.facedetect.BoundingBox` to dictionary annotations. diff --git a/bob/bio/face/annotator/bobiptinyface.py b/bob/bio/face/annotator/bobiptinyface.py index 595f784986dd9aa2ad7444980c3aa863429ecfc5..d2790c826d61e4510cfa1e2a61d51166f9d1e270 100644 --- a/bob/bio/face/annotator/bobiptinyface.py +++ b/bob/bio/face/annotator/bobiptinyface.py @@ -2,14 +2,14 @@ import bob.ip.facedetect.tinyface from . import Base import cv2 as cv + class BobIpTinyface(Base): """Annotator using tinyface in bob.ip.facedetect""" - + def __init__(self, **kwargs): super(BobIpTinyface, self).__init__(**kwargs) self.tinyface = bob.ip.facedetect.tinyface.TinyFacesDetector(prob_thresh=0.5) - def annotate(self, image, **kwargs): """Annotates an image using tinyface @@ -25,15 +25,11 @@ class BobIpTinyface(Base): dict Annotations with (topleft, bottomright) keys (or None). """ - - annotations = self.tinyface.detect(image) + annotations = self.tinyface.detect(image) if annotations is not None: r = annotations[0] - return {'topleft':(r[0],r[1]), 'bottomright':(r[2],r[3])} + return {"topleft": (r[0], r[1]), "bottomright": (r[2], r[3])} else: return None - - - diff --git a/bob/bio/face/config/baseline/mxnet_pipe.py b/bob/bio/face/config/baseline/mxnet_pipe.py index b458dc70f8bd61c4d75cdee26e2227cc5737cc63..eeb062004bc0e55b3db402dfa8e39be42d9ed3cf 100644 --- a/bob/bio/face/config/baseline/mxnet_pipe.py +++ b/bob/bio/face/config/baseline/mxnet_pipe.py @@ -23,25 +23,36 @@ else: fixed_positions = None +cropped_positions = {"leye": (49, 72), "reye": (49, 38)} -cropped_positions={'leye':(49,72), 'reye':(49,38)} - -preprocessor_transformer = FaceCrop(cropped_image_size=(112,112), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',fixed_positions=fixed_positions) - -transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),)) - - +preprocessor_transformer = FaceCrop( + cropped_image_size=(112, 112), + cropped_positions={"leye": (49, 72), "reye": (49, 38)}, + color_channel="rgb", + fixed_positions=fixed_positions, +) +transform_extra_arguments = ( + None + if (cropped_positions is None or fixed_positions is not None) + else (("annotations", "annotations"),) +) extractor_transformer = mxnet_model() -algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True) +algorithm = Distance( + distance_function=scipy.spatial.distance.cosine, is_distance_function=True +) # Chain the Transformers together transformer = make_pipeline( - wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments), + wrap( + ["sample"], + preprocessor_transformer, + transform_extra_arguments=transform_extra_arguments, + ), wrap(["sample"], extractor_transformer) # Add more transformers here if needed ) @@ -50,5 +61,3 @@ transformer = make_pipeline( # Assemble the Vanilla Biometric pipeline and execute pipeline = VanillaBiometricsPipeline(transformer, algorithm) transformer = pipeline.transformer - - diff --git a/bob/bio/face/config/baseline/opencv_pipe.py b/bob/bio/face/config/baseline/opencv_pipe.py index c7f8cd8cf005f81900f56ee354086065ede667e4..aac2d3f1c45559e35545fb66a5c02ca97aa76790 100644 --- a/bob/bio/face/config/baseline/opencv_pipe.py +++ b/bob/bio/face/config/baseline/opencv_pipe.py @@ -26,27 +26,41 @@ else: fixed_positions = None +cropped_positions = {"leye": (98, 144), "reye": (98, 76)} +# Preprocessor +preprocessor_transformer = FaceCrop( + cropped_image_size=(224, 224), + cropped_positions={"leye": (98, 144), "reye": (98, 76)}, + color_channel="rgb", + fixed_positions=fixed_positions, +) -cropped_positions={"leye": (98, 144), "reye": (98, 76)} -#Preprocessor -preprocessor_transformer = FaceCrop(cropped_image_size=(224,224), cropped_positions={"leye": (98, 144), "reye": (98, 76)}, color_channel='rgb',fixed_positions=fixed_positions) - -transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),)) +transform_extra_arguments = ( + None + if (cropped_positions is None or fixed_positions is not None) + else (("annotations", "annotations"),) +) -#Extractor +# Extractor extractor_transformer = opencv_model() -#Algorithm -algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True) +# Algorithm +algorithm = Distance( + distance_function=scipy.spatial.distance.cosine, is_distance_function=True +) ## Creation of the pipeline # Chain the Transformers together transformer = make_pipeline( - wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments), + wrap( + ["sample"], + preprocessor_transformer, + transform_extra_arguments=transform_extra_arguments, + ), wrap(["sample"], extractor_transformer) # Add more transformers here if needed ) diff --git a/bob/bio/face/config/baseline/pytorch_pipe_v1.py b/bob/bio/face/config/baseline/pytorch_pipe_v1.py index 9b03a70dd1934073e3541f423d649c4c492af138..ec5c4d9d619110d81c0c576088112aee10d6d282 100644 --- a/bob/bio/face/config/baseline/pytorch_pipe_v1.py +++ b/bob/bio/face/config/baseline/pytorch_pipe_v1.py @@ -23,23 +23,36 @@ else: fixed_positions = None +cropped_positions = {"leye": (49, 72), "reye": (49, 38)} -cropped_positions={'leye':(49,72), 'reye':(49,38)} - -preprocessor_transformer = FaceCrop(cropped_image_size=(224,224), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',fixed_positions=fixed_positions) - -transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),)) +preprocessor_transformer = FaceCrop( + cropped_image_size=(224, 224), + cropped_positions={"leye": (49, 72), "reye": (49, 38)}, + color_channel="rgb", + fixed_positions=fixed_positions, +) +transform_extra_arguments = ( + None + if (cropped_positions is None or fixed_positions is not None) + else (("annotations", "annotations"),) +) extractor_transformer = pytorch_loaded_model() -algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True) +algorithm = Distance( + distance_function=scipy.spatial.distance.cosine, is_distance_function=True +) # Chain the Transformers together transformer = make_pipeline( - wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments), + wrap( + ["sample"], + preprocessor_transformer, + transform_extra_arguments=transform_extra_arguments, + ), wrap(["sample"], extractor_transformer) # Add more transformers here if needed ) @@ -48,60 +61,3 @@ transformer = make_pipeline( # Assemble the Vanilla Biometric pipeline and execute pipeline = VanillaBiometricsPipeline(transformer, algorithm) transformer = pipeline.transformer - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/bob/bio/face/config/baseline/pytorch_pipe_v2.py b/bob/bio/face/config/baseline/pytorch_pipe_v2.py index 3a01f46784eac8f0b890efde0a291b505e42f092..8cb2af0f71f2453e6b5631fb95c226edd5292c8b 100644 --- a/bob/bio/face/config/baseline/pytorch_pipe_v2.py +++ b/bob/bio/face/config/baseline/pytorch_pipe_v2.py @@ -54,6 +54,7 @@ transformer = make_pipeline( # Assemble the Vanilla Biometric pipeline and execute pipeline = VanillaBiometricsPipeline(transformer, algorithm) transformer = pipeline.transformer +<<<<<<< HEAD @@ -111,3 +112,5 @@ transformer = pipeline.transformer +======= +>>>>>>> â€new†diff --git a/bob/bio/face/config/baseline/tf_pipe.py b/bob/bio/face/config/baseline/tf_pipe.py index 2e0a8ae2af52fcc85ac9e1a49e8bc3cf0cd2772a..22c24d95abcf88bf1acb9c9fe9f587fbd7abeccd 100644 --- a/bob/bio/face/config/baseline/tf_pipe.py +++ b/bob/bio/face/config/baseline/tf_pipe.py @@ -24,23 +24,38 @@ else: # Preprocessor -cropped_positions={'leye':(49,72), 'reye':(49,38)} +cropped_positions = {"leye": (49, 72), "reye": (49, 38)} -preprocessor_transformer = FaceCrop(cropped_image_size=(160,160), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',fixed_positions=fixed_positions) +preprocessor_transformer = FaceCrop( + cropped_image_size=(160, 160), + cropped_positions={"leye": (49, 72), "reye": (49, 38)}, + color_channel="rgb", + fixed_positions=fixed_positions, +) -transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),)) +transform_extra_arguments = ( + None + if (cropped_positions is None or fixed_positions is not None) + else (("annotations", "annotations"),) +) # Extractor extractor_transformer = tf_model() # Algorithm -algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True) +algorithm = Distance( + distance_function=scipy.spatial.distance.cosine, is_distance_function=True +) # Chain the Transformers together transformer = make_pipeline( - wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments), + wrap( + ["sample"], + preprocessor_transformer, + transform_extra_arguments=transform_extra_arguments, + ), wrap(["sample"], extractor_transformer) # Add more transformers here if needed ) @@ -49,60 +64,3 @@ transformer = make_pipeline( # Assemble the Vanilla Biometric pipeline and execute pipeline = VanillaBiometricsPipeline(transformer, algorithm) transformer = pipeline.transformer - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/bob/bio/face/extractor/__init__.py b/bob/bio/face/extractor/__init__.py index a5484ea167a1afedc102965674fe8ed2ae9f5cdb..32b97163775852dbca316f538672be6bccf2095d 100644 --- a/bob/bio/face/extractor/__init__.py +++ b/bob/bio/face/extractor/__init__.py @@ -9,7 +9,7 @@ from .opencv_caffe import opencv_model # gets sphinx autodoc done right - don't remove it def __appropriate__(*args): - """Says object was actually declared here, and not in the import module. + """Says object was actually declared here, and not in the import module. Fixing sphinx warnings of not being able to find classes, when path is shortened. Parameters: @@ -19,7 +19,9 @@ def __appropriate__(*args): <https://github.com/sphinx-doc/sphinx/issues/3048>` """ - for obj in args: obj.__module__ = __name__ + for obj in args: + obj.__module__ = __name__ + __appropriate__( DCTBlocks, @@ -30,5 +32,5 @@ __appropriate__( pytorch_library_model, tf_model, opencv_model, - ) -__all__ = [_ for _ in dir() if not _.startswith('_')] +) +__all__ = [_ for _ in dir() if not _.startswith("_")] diff --git a/bob/bio/face/extractor/mxnet_resnet.py b/bob/bio/face/extractor/mxnet_resnet.py index aab050bfadd851e015e8b19e736a52aa854807e9..73ab9f22a070ac198e2f8d7dfbcb6c23bb1f4221 100644 --- a/bob/bio/face/extractor/mxnet_resnet.py +++ b/bob/bio/face/extractor/mxnet_resnet.py @@ -12,9 +12,11 @@ import mxnet as mx from mxnet import gluon import warnings from bob.extension import rc + mxnet_resnet_directory = rc["bob.extractor_model.mxnet"] mxnet_weight_directory = rc["bob.extractor_weights.mxnet"] + class mxnet_model(TransformerMixin, BaseEstimator): """Extracts features using deep face recognition models under MxNet Interfaces. @@ -57,8 +59,10 @@ class mxnet_model(TransformerMixin, BaseEstimator): with warnings.catch_warnings(): warnings.simplefilter("ignore") - deserialized_net = gluon.nn.SymbolBlock.imports(mxnet_resnet_directory, ['data'], mxnet_weight_directory, ctx=ctx) - + deserialized_net = gluon.nn.SymbolBlock.imports( + mxnet_resnet_directory, ["data"], mxnet_weight_directory, ctx=ctx + ) + self.model = deserialized_net def transform(self, X): @@ -76,16 +80,15 @@ class mxnet_model(TransformerMixin, BaseEstimator): feature : 2D, 3D, or 4D :py:class:`numpy.ndarray` (floats) The list of features extracted from the image. """ - + if self.model is None: - self.load_model() + self._load_model() X = check_array(X, allow_nd=True) X = mx.nd.array(X) return self.model(X,).asnumpy() - def __getstate__(self): # Handling unpicklable objects diff --git a/bob/bio/face/extractor/opencv_caffe.py b/bob/bio/face/extractor/opencv_caffe.py index 647fc724fe4e1087e488f89e24ed42de750552fa..d816e0421b514e8623972b5375df7036fe91c039 100644 --- a/bob/bio/face/extractor/opencv_caffe.py +++ b/bob/bio/face/extractor/opencv_caffe.py @@ -44,7 +44,6 @@ class opencv_model(TransformerMixin, BaseEstimator): use_gpu: True or False. """ - def __init__(self, use_gpu=False, **kwargs): super().__init__(**kwargs) self.model = None @@ -64,7 +63,7 @@ class opencv_model(TransformerMixin, BaseEstimator): def _load_model(self): - net = cv2.dnn.readNetFromCaffe(opencv_model_prototxt,opencv_model_directory) + net = cv2.dnn.readNetFromCaffe(opencv_model_prototxt, opencv_model_directory) self.model = net @@ -83,16 +82,15 @@ class opencv_model(TransformerMixin, BaseEstimator): feature : 2D or 3D :py:class:`numpy.ndarray` (floats) The list of features extracted from the image. """ - + if self.model is None: - self.load_model() + self._load_model() img = np.array(X) self.model.setInput(img) - - return self.model.forward() + return self.model.forward() def __getstate__(self): # Handling unpicklable objects diff --git a/bob/bio/face/extractor/pytorch_model.py b/bob/bio/face/extractor/pytorch_model.py index 52c4f172d1c74f7018429261a072d8185bd69289..96a57cb9e1f45cd2a85a643146e69b3d873e787a 100644 --- a/bob/bio/face/extractor/pytorch_model.py +++ b/bob/bio/face/extractor/pytorch_model.py @@ -16,6 +16,7 @@ import imp pytorch_model_directory = rc["bob.extractor_model.pytorch"] pytorch_weight_directory = rc["bob.extractor_weights.pytorch"] + class pytorch_loaded_model(TransformerMixin, BaseEstimator): """Extracts features using deep face recognition models under PyTorch Interface, especially for the models and weights that need to load by hand. @@ -30,7 +31,7 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator): **Parameters:** use_gpu: True or False. - """ + """ def __init__(self, use_gpu=False, **kwargs): super().__init__(**kwargs) @@ -52,12 +53,12 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator): def _load_model(self): - MainModel = imp.load_source('MainModel', pytorch_model_directory) + MainModel = imp.load_source("MainModel", pytorch_model_directory) network = torch.load(pytorch_weight_directory) network.eval() - - self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - + + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + network.to(self.device) self.model = network @@ -77,7 +78,7 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator): feature : 2D or 3D :py:class:`numpy.ndarray` (floats) The list of features extracted from the image. """ - + if self.model is None: self.load_model() @@ -85,7 +86,6 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator): return self.model(X).detach().numpy() - def __getstate__(self): # Handling unpicklable objects @@ -95,14 +95,8 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator): def _more_tags(self): return {"stateless": True, "requires_fit": False} - - - - - - - - + + class pytorch_library_model(TransformerMixin, BaseEstimator): """Extracts features using deep face recognition with registered model frames in the PyTorch Library. @@ -153,7 +147,6 @@ class pytorch_library_model(TransformerMixin, BaseEstimator): return self.model(X).detach().numpy() - def __getstate__(self): # Handling unpicklable objects diff --git a/bob/bio/face/extractor/tf_model.py b/bob/bio/face/extractor/tf_model.py index 9e83ec389dafb05b1d9031c52338e235a9086ef8..001410786ae28278fea055c212bd9d5bd6a31972 100644 --- a/bob/bio/face/extractor/tf_model.py +++ b/bob/bio/face/extractor/tf_model.py @@ -17,6 +17,7 @@ from tensorflow import keras tf_model_directory = rc["bob.extractor_model.tf"] + class tf_model(TransformerMixin, BaseEstimator): """Extracts features using deep face recognition models under TensorFlow Interface. @@ -71,7 +72,7 @@ class tf_model(TransformerMixin, BaseEstimator): feature : 2D or 3D :py:class:`numpy.ndarray` (floats) The list of features extracted from the image. """ - + if self.model is None: self.load_model() @@ -80,10 +81,8 @@ class tf_model(TransformerMixin, BaseEstimator): X = to_channels_last(X) predict = self.model.predict(X) - return predict - def __getstate__(self): # Handling unpicklable objects diff --git a/requirements.txt b/requirements.txt index 02e30ac5109f4d32cb93c768ec7a78195f25de94..bcd1763c12212b5e0c1ea6a1bbe82674f26f7f28 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,4 +22,4 @@ mxnet opencv-python six scikit-image -scikit-learn # for pipelines Tranformers +scikit-learn # for pipelines Tranformers \ No newline at end of file