Commit 1c13eb81 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Transformed the preprocessors to transformers

parent a965ac9d
......@@ -2,11 +2,11 @@ import numpy
import bob.io.image
import bob.ip.color
from bob.bio.base.preprocessor import Preprocessor
from sklearn.base import TransformerMixin, BaseEstimator
class Base (Preprocessor):
"""Performs color space adaptations and data type corrections for the given
class Base(TransformerMixin, BaseEstimator):
"""Performs color space adaptations and data type corrections for the given
image.
**Parameters:**
......@@ -18,14 +18,18 @@ class Base (Preprocessor):
The specific color channel, which should be extracted from the image.
"""
def __init__(self, dtype=None, color_channel='gray', **kwargs):
Preprocessor.__init__(self, dtype=str(dtype),
color_channel=color_channel, **kwargs)
self.channel = color_channel
self.dtype = dtype
def __init__(self, dtype=None, color_channel="gray", **kwargs):
self.channel = color_channel
self.dtype = dtype
def color_channel(self, image):
"""color_channel(image) -> channel
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
def fit(self, X, y=None):
return self
def color_channel(self, image):
"""color_channel(image) -> channel
Returns the channel of the given image, which was selected in the
constructor. Currently, gray, red, green and blue channels are supported.
......@@ -40,31 +44,34 @@ class Base (Preprocessor):
channel : 2D or 3D :py:class:`numpy.ndarray`
The extracted color channel.
"""
if image.ndim == 2:
if self.channel == 'rgb':
return bob.ip.color.gray_to_rgb(image)
if self.channel != 'gray':
raise ValueError("There is no rule to extract a " +
self.channel + " image from a gray level image!")
return image
if self.channel == 'rgb':
return image
if self.channel == 'gray':
return bob.ip.color.rgb_to_gray(image)
if self.channel == 'red':
return image[0, :, :]
if self.channel == 'green':
return image[1, :, :]
if self.channel == 'blue':
return image[2, :, :]
raise ValueError(
"The image channel '%s' is not known or not yet implemented",
self.channel)
def data_type(self, image):
"""data_type(image) -> image
if image.ndim == 2:
if self.channel == "rgb":
return bob.ip.color.gray_to_rgb(image)
if self.channel != "gray":
raise ValueError(
"There is no rule to extract a "
+ self.channel
+ " image from a gray level image!"
)
return image
if self.channel == "rgb":
return image
if self.channel == "gray":
return bob.ip.color.rgb_to_gray(image)
if self.channel == "red":
return image[0, :, :]
if self.channel == "green":
return image[1, :, :]
if self.channel == "blue":
return image[2, :, :]
raise ValueError(
"The image channel '%s' is not known or not yet implemented", self.channel
)
def data_type(self, image):
"""data_type(image) -> image
Converts the given image into the data type specified in the constructor of
this class. If no data type was specified, or the ``image`` is ``None``, no
......@@ -80,12 +87,12 @@ class Base (Preprocessor):
image : 2D or 3D :py:class:`numpy.ndarray`
The image converted to the desired data type, if any.
"""
if self.dtype is not None and image is not None:
image = image.astype(self.dtype)
return image
if self.dtype is not None and image is not None:
image = image.astype(self.dtype)
return image
def __call__(self, image, annotations=None):
"""__call__(image, annotations = None) -> image
def transform(self, image, annotations=None):
"""__call__(image, annotations = None) -> image
Extracts the desired color channel and converts to the desired data type.
......@@ -102,7 +109,7 @@ class Base (Preprocessor):
image : 2D :py:class:`numpy.ndarray`
The image converted converted to the desired color channel and type.
"""
assert isinstance(image, numpy.ndarray) and image.ndim in (2, 3)
# convert to grayscale
image = self.color_channel(image)
return self.data_type(image)
assert isinstance(image, numpy.ndarray) and image.ndim in (2, 3)
# convert to grayscale
image = self.color_channel(image)
return self.data_type(image)
......@@ -8,9 +8,9 @@ import numpy
import logging
from .Base import Base
from bob.bio.base.preprocessor import Preprocessor
logger = logging.getLogger("bob.bio.face")
from sklearn.utils import check_array
class FaceCrop(Base):
......@@ -122,15 +122,12 @@ class FaceCrop(Base):
Base.__init__(self, **kwargs)
# call base class constructor
Preprocessor.__init__(
self,
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
mask_sigma=mask_sigma,
mask_neighbors=mask_neighbors,
mask_seed=mask_seed,
)
self.cropped_image_size = cropped_image_size
self.cropped_positions = cropped_positions
self.fixed_positions = fixed_positions
self.mask_sigma = mask_sigma
self.mask_neighbors = mask_neighbors
self.mask_seed = mask_seed
# check parameters
assert len(cropped_positions) == 2
......@@ -282,61 +279,71 @@ class FaceCrop(Base):
# check if the required keys are available
return all(key in annotations for key in self.cropped_keys)
def __call__(self, image, annotations=None):
def transform(self, X, annotations=None):
"""Aligns the given image according to the given annotations.
First, the desired color channel is extracted from the given image.
Afterward, the face is cropped, according to the given ``annotations`` (or
to ``fixed_positions``, see :py:meth:`crop_face`). Finally, the resulting
face is converted to the desired data type.
First, the desired color channel is extracted from the given image.
Afterward, the face is cropped, according to the given ``annotations`` (or
to ``fixed_positions``, see :py:meth:`crop_face`). Finally, the resulting
face is converted to the desired data type.
Parameters
----------
image : 2D or 3D :py:class:`numpy.ndarray`
The face image to be processed.
annotations : dict or ``None``
The annotations that fit to the given image.
Parameters
----------
image : 2D or 3D :py:class:`numpy.ndarray`
The face image to be processed.
annotations : dict or ``None``
The annotations that fit to the given image.
Returns
-------
face : 2D :py:class:`numpy.ndarray`
The cropped face.
"""
# if annotations are missing and cannot do anything else return None.
if (
not self.is_annotations_valid(annotations)
and not self.fixed_positions
and self.annotator is None
):
logger.warn(
"Cannot crop face without valid annotations or "
"fixed_positions or an annotator. Returning None. "
"The annotations were: {}".format(annotations)
)
return None
# convert to the desired color channel
image = self.color_channel(image)
# annotate the image if annotations are missing
if (
not self.is_annotations_valid(annotations)
and not self.fixed_positions
and self.annotator is not None
):
annotations = self.annotator(image, annotations=annotations)
if not self.is_annotations_valid(annotations):
Returns
-------
face : 2D :py:class:`numpy.ndarray`
The cropped face.
"""
def _crop(image, annot):
# if annotations are missing and cannot do anything else return None.
if (
not self.is_annotations_valid(annot)
and not self.fixed_positions
and self.annotator is None
):
logger.warn(
"The annotator failed and the annotations are missing too"
". Returning None."
"Cannot crop face without valid annotations or "
"fixed_positions or an annotator. Returning None. "
"The annotations were: {}".format(annot)
)
return None
# crop face
image = self.crop_face(image, annotations)
# convert to the desired color channel
image = self.color_channel(image)
# annotate the image if annotations are missing
if (
not self.is_annotations_valid(annot)
and not self.fixed_positions
and self.annotator is not None
):
annot = self.annotator(image, annotations=annot)
if not self.is_annotations_valid(annot):
logger.warn(
"The annotator failed and the annot are missing too"
". Returning None."
)
return None
# crop face
return self.data_type(self.crop_face(image, annot))
X = check_array(X, allow_nd=True)
# convert data type
return self.data_type(image)
if isinstance(annotations, list):
cropped_images = []
for image, annot in zip(X, annotations):
cropped_images.append(_crop(image, annot))
return cropped_images
else:
return _crop(X, annotations)
def __getstate__(self):
d = dict(self.__dict__)
......
......@@ -9,7 +9,7 @@ import numpy
from .Base import Base
from .utils import load_cropper_only
from bob.bio.base.preprocessor import Preprocessor
from sklearn.utils import check_array
import logging
......@@ -79,16 +79,13 @@ class FaceDetect(Base):
# call base class constructors
Base.__init__(self, **kwargs)
Preprocessor.__init__(
self,
face_cropper=face_cropper,
cascade=cascade,
use_flandmark=use_flandmark,
detection_overlap=detection_overlap,
distance=distance,
scale_base=scale_base,
lowest_scale=lowest_scale,
)
self.face_cropper = face_cropper
self.cascade=cascade
self.use_flandmark=use_flandmark
self.detection_overlap=detection_overlap
self.distance=distance
self.scale_base=scale_base
self.lowest_scale=lowest_scale
assert face_cropper is not None
......@@ -194,7 +191,7 @@ class FaceDetect(Base):
# apply face cropping
return self.cropper.crop_face(image, annotations)
def __call__(self, image, annotations=None):
def transform(self, X, annotations=None):
"""__call__(image, annotations = None) -> face
Aligns the given image according to the detected face bounding box or the detected facial features.
......@@ -216,14 +213,29 @@ class FaceDetect(Base):
face : 2D :py:class:`numpy.ndarray`
The cropped face.
"""
# convert to the desired color channel
image = self.color_channel(image)
def _crop(image, annotation):
# convert to the desired color channel
image = self.color_channel(image)
# detect face and crop it
image = self.crop_face(image)
# convert data type
return self.data_type(image)
# detect face and crop it
image = self.crop_face(image)
# convert data type
return self.data_type(image)
X = check_array(X, allow_nd=True)
cropped_images = []
if isinstance(annotations, list):
cropped_images = []
for image, annot in zip(X, annotations):
cropped_images.append(_crop(image, annot))
return cropped_images
else:
return _crop(X, annotations)
def __getstate__(self):
d = dict(self.__dict__)
......
......@@ -22,88 +22,87 @@ import bob.ip.base
import numpy
from .Base import Base
from .utils import load_cropper
from bob.bio.base.preprocessor import Preprocessor
class HistogramEqualization (Base):
"""Crops the face (if desired) and performs histogram equalization to photometrically enhance the image.
**Parameters:**
class HistogramEqualization(Base):
"""Crops the face (if desired) and performs histogram equalization to photometrically enhance the image.
face_cropper : str or :py:class:`bob.bio.face.preprocessor.FaceCrop` or :py:class:`bob.bio.face.preprocessor.FaceDetect` or ``None``
The face image cropper that should be applied to the image.
If ``None`` is selected, no face cropping is performed.
Otherwise, the face cropper might be specified as a registered resource, a configuration file, or an instance of a preprocessor.
Parameters:
-----------
.. note:: The given class needs to contain a ``crop_face`` method.
face_cropper : str or :py:class:`bob.bio.face.preprocessor.FaceCrop` or :py:class:`bob.bio.face.preprocessor.FaceDetect` or ``None``
The face image cropper that should be applied to the image.
If ``None`` is selected, no face cropping is performed.
Otherwise, the face cropper might be specified as a registered resource, a configuration file, or an instance of a preprocessor.
kwargs
Remaining keyword parameters passed to the :py:class:`Base` constructor, such as ``color_channel`` or ``dtype``.
"""
.. note:: The given class needs to contain a ``crop_face`` method.
def __init__(
self,
face_cropper,
**kwargs
):
kwargs
Remaining keyword parameters passed to the :py:class:`Base` constructor, such as ``color_channel`` or ``dtype``.
"""
Base.__init__(self, **kwargs)
def __init__(self, face_cropper, **kwargs):
# call base class constructor with its set of parameters
Preprocessor.__init__(
self,
face_cropper = face_cropper,
)
Base.__init__(self, **kwargs)
self.cropper = load_cropper(face_cropper)
self.face_cropper = (face_cropper,)
self.cropper = load_cropper(face_cropper)
def equalize_histogram(self, image):
"""equalize_histogram(image) -> equalized
def equalize_histogram(self, image):
"""equalize_histogram(image) -> equalized
Performs the histogram equalization on the given image.
Performs the histogram equalization on the given image.
**Parameters:**
**Parameters:**
image : 2D :py:class:`numpy.ndarray`
The image to berform histogram equalization with.
The image will be transformed to type ``uint8`` before computing the histogram.
image : 2D :py:class:`numpy.ndarray`
The image to berform histogram equalization with.
The image will be transformed to type ``uint8`` before computing the histogram.
**Returns:**
**Returns:**
equalized : 2D :py:class:`numpy.ndarray` (float)
The photometrically enhanced image.
"""
heq = numpy.ndarray(image.shape)
bob.ip.base.histogram_equalization(numpy.round(image).astype(numpy.uint8), heq)
return heq
equalized : 2D :py:class:`numpy.ndarray` (float)
The photometrically enhanced image.
"""
heq = numpy.ndarray(image.shape)
bob.ip.base.histogram_equalization(numpy.round(image).astype(numpy.uint8), heq)
return heq
def transform(self, X, annotations=None):
"""
Aligns the given image according to the given annotations.
First, the desired color channel is extracted from the given image.
Afterward, the face is eventually cropped using the ``face_cropper`` specified in the constructor.
Then, the image is photometrically enhanced using histogram equalization.
Finally, the resulting face is converted to the desired data type.
def __call__(self, image, annotations = None):
"""__call__(image, annotations = None) -> face
**Parameters:**
Aligns the given image according to the given annotations.
X : 2D or 3D :py:class:`numpy.ndarray`
The face image to be processed.
First, the desired color channel is extracted from the given image.
Afterward, the face is eventually cropped using the ``face_cropper`` specified in the constructor.
Then, the image is photometrically enhanced using histogram equalization.
Finally, the resulting face is converted to the desired data type.
annotations : dict or ``None``
The annotations that fit to the given image.
Might be ``None``, when the ``face_cropper`` is ``None`` or of type :py:class:`FaceDetect`.
**Parameters:**
**Returns:**
image : 2D or 3D :py:class:`numpy.ndarray`
The face image to be processed.
face : 2D :py:class:`numpy.ndarray`
The cropped and photometrically enhanced face.
"""
annotations : dict or ``None``
The annotations that fit to the given image.
Might be ``None``, when the ``face_cropper`` is ``None`` or of type :py:class:`FaceDetect`.
def _crop(image, annotations):
image = self.color_channel(image)
if self.cropper is not None:
image = self.cropper.crop_face(image, annotations)
image = self.equalize_histogram(image)
return self.data_type(image)
**Returns:**
face : 2D :py:class:`numpy.ndarray`
The cropped and photometrically enhanced face.
"""
image = self.color_channel(image)
if self.cropper is not None:
image = self.cropper.crop_face(image, annotations)
image = self.equalize_histogram(image)
return self.data_type(image)
if isinstance(annotations, list):
cropped_images = []
for img, annot in zip(X, annotations):
cropped_images.append(_crop(img, annot))
return cropped_images
else:
return _crop(X, annotations)
......@@ -22,7 +22,6 @@ import bob.ip.base
import numpy
from .Base import Base
from .utils import load_cropper
from bob.bio.base.preprocessor import Preprocessor
class INormLBP(Base):
......@@ -65,19 +64,16 @@ class INormLBP(Base):
# call base class constructors
Base.__init__(self, **kwargs)
Preprocessor.__init__(
self,
face_cropper=face_cropper,
radius=radius,
is_circular=is_circular,
compare_to_average=compare_to_average,
elbp_type=elbp_type,
)
self.face_cropper = face_cropper
self.radius = radius
self.is_circular = is_circular
self.compare_to_average = compare_to_average
self.elbp_type = elbp_type
self.radius = radius
self.is_circular = is_circular
self.compare_to_average = compare_to_average
self.elbp_type = elbp_type
self.elbp_type = elbp_type
self.cropper = load_cropper(face_cropper)
self._init_non_pickables()
......@@ -95,7 +91,7 @@ class INormLBP(Base):
border_handling="wrap",
)
def __call__(self, image, annotations=None):
def transform(self, X, annotations=None):
"""__call__(image, annotations = None) -> face
Aligns the given image according to the given annotations.
......@@ -120,18 +116,24 @@ class INormLBP(Base):
The cropped and photometrically enhanced face.
"""
image = self.color_channel(image)
if self.cropper is not None:
def _crop(image, annotations):
image = self.color_channel(image)
if self.cropper is not None:
# TODO: IN DASK, SELF.CROPPER IS A FUNCTOOLS
# WE NEED TO THINK HOW TO PROPERLY APPROACH THIS ISSUE
if not isinstance(self.cropper, bob.bio.face.preprocessor.FaceCrop):
self.cropper = self.cropper()
if not isinstance(self.cropper, bob.bio.face.preprocessor.FaceCrop):
self.cropper = self.cropper()
image = self.cropper.crop_face(image, annotations)
image = self.lbp_extractor(image)
return self.data_type(image)
image = self.cropper.crop_face(image, annotations)
image = self.lbp_extractor(image)
return self.data_type(image)
if isinstance(annotations, list):
cropped_images = []
for img, annot in zip(X, annotations):
cropped_images.append(_crop(img, annot))
return cropped_images
else:
return _crop(X, annotations)
def __getstate__(self):
d = dict(self.__dict__)
......
......@@ -23,7 +23,6 @@ import numpy
import math
from .Base import Base
from .utils import load_cropper
from bob.bio.base.preprocessor import Preprocessor
class SelfQuotientImage(Base):
......@@ -50,7 +49,9 @@ class SelfQuotientImage(Base):
Base.__init__(self, **kwargs)
# call base class constructor with its set of parameters
Preprocessor.__init__(self, face_cropper=face_cropper, sigma=sigma)
self.face_cropper=face_cropper
self.sigma=sigma
self.cropper = load_cropper(face_cropper)
......@@ -65,7 +66,7 @@ class SelfQuotientImage(Base):
size_min=self.size, sigma=self.sigma
)
def __call__(self, image, annotations=None):
def transform(self, X, annotations=None):
"""__call__(image, annotations = None) -> face
Aligns the given image according to the given annotations.
......@@ -89,11 +90,21 @@ class SelfQuotientImage(Base):