Commit 1a3753af authored by Manuel Günther's avatar Manuel Günther

Implemented loading of images using the new read_original_data interface

parent 902bc003
......@@ -6,6 +6,7 @@ import pyvision
import numpy
import bob.bio.base
import bob.bio.face
from .. import utils
class LDAIR (bob.bio.face.preprocessor.FaceCrop):
"""This class defines a wrapper for the `facerec2010.baseline.lda.LRLDA` class to be used as an image :py:class:`bob.bio.base.preprocessor.Preprocessor`.
......@@ -24,7 +25,13 @@ class LDAIR (bob.bio.face.preprocessor.FaceCrop):
"""
def __init__(self, REGION_ARGS = facerec2010.baseline.lda.CohortLDA_REGIONS, REGION_KEYWORDS = facerec2010.baseline.lda.CohortLDA_KEYWORDS, face_detector = None):
bob.bio.base.preprocessor.Preprocessor.__init__(self, REGION_ARGS=str(REGION_ARGS), REGION_KEYWORDS=str(REGION_KEYWORDS), face_detector=str(face_detector), read_original_data=self.read_original_data)
bob.bio.base.preprocessor.Preprocessor.__init__(
self,
read_original_data=utils.read_pyvision_image,
face_detector=str(face_detector),
REGION_ARGS=str(REGION_ARGS),
REGION_KEYWORDS=str(REGION_KEYWORDS),
)
self.ldair = facerec2010.baseline.lda.LRLDA(REGION_ARGS, **REGION_KEYWORDS)
self.layers = len(REGION_ARGS)
self.face_detector = bob.bio.face.preprocessor.utils.load_cropper(face_detector)
......@@ -124,22 +131,3 @@ class LDAIR (bob.bio.face.preprocessor.FaceCrop):
return self.face_detector.crop_face(image, annotations)
return self.crop_face(image, annotations)
def read_original_data(self, image_file):
"""read_original_data(image_file) -> image
Reads the original images using functionality from pyvision.
**Parameters:**
image_file : str
The image file to be read, can contain a gray level or a color image.
**Returns:**
image : :py:class:`pyvision.Image`
The image read from file.
"""
# we use pyvision to read the images. Hence, we don't have to struggle with conversion here
return pyvision.Image(str(image_file))
......@@ -7,6 +7,8 @@ import PIL
import numpy
import bob.bio.base
import bob.bio.face
from .. import utils
class LRPCA (bob.bio.face.preprocessor.FaceCrop):
"""This class defines a wrapper for the :py:class:`facerec2010.baseline.lda.LRPCA` class to be used as an image :py:class:`bob.bio.base.preprocessor.Preprocessor`.
......@@ -22,7 +24,12 @@ class LRPCA (bob.bio.face.preprocessor.FaceCrop):
"""
def __init__(self, TUNING = facerec2010.baseline.lrpca.GBU_TUNING, face_detector = None):
bob.bio.base.preprocessor.Preprocessor.__init__(self, TUNING=str(TUNING), face_detector=str(face_detector), read_original_data=self.read_original_data)
bob.bio.base.preprocessor.Preprocessor.__init__(
self,
read_original_data=utils.read_pyvision_image,
face_detector=str(face_detector),
TUNING=str(TUNING)
)
self.lrpca = facerec2010.baseline.lrpca.LRPCA(**TUNING)
self.face_detector = bob.bio.face.preprocessor.utils.load_cropper(face_detector)
......@@ -111,23 +118,3 @@ class LRPCA (bob.bio.face.preprocessor.FaceCrop):
return self.face_detector.crop_face(image, annotations)
return self.crop_face(image, annotations)
def read_original_data(self, image_file):
"""read_original_data(image_file) -> image
Reads the original images using functionality from pyvision.
**Parameters:**
image_file : str
The image file to be read, can contain a gray level or a color image.
**Returns:**
image : :py:class:`pyvision.Image`
The image read from file.
"""
# we use pyvision to read the images. Hence, we don't have to struggle with conversion here
return pyvision.Image(str(image_file))
......@@ -19,7 +19,8 @@ from bob.bio.face.test.test_preprocessors import _annotation, _compare
def _image(preprocessor):
return preprocessor.read_original_data(pkg_resources.resource_filename('bob.bio.face.test', 'data/testimage.jpg'))
f = bob.bio.base.database.BioFile(1, 'data/testimage', 2)
return preprocessor.read_original_data(f, pkg_resources.resource_filename('bob.bio.face', 'test'), ".jpg")
def test_lrpca():
......
......@@ -3,6 +3,7 @@
import pickle
import bob.io.base
import pyvision
def load_pickle(file_like):
"""load_pickle(file_like) -> data
......@@ -42,3 +43,28 @@ def save_pickle(data, file_like):
hdf5 = file_like if isinstance(file_like, bob.io.base.HDF5File) else bob.io.base.HDF5File(file_like, 'w')
hdf5.set("Data", pickle.dumps(data))
def read_pyvision_image(biofile, directory, extension):
"""read_pyvision_image(biofile, directory, extension) -> image
Reads the original images using functionality from pyvision.
**Parameters:**
``biofile`` : :py:class:`bob.bio.base.database.BioFile` or one of its derivatives
The file to read the original data.
``directory`` : str
The base directory of the database.
``extension`` : str or ``None``
The extension of the original data.
Might be ``None`` if the ``biofile`` itself has the extension stored.
**Returns:**
image : :py:class:`pyvision.Image`
The image read from file.
"""
return pyvision.Image(biofile.make_path(directory, extension))
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment