FingerCrop.py 17.7 KB
Newer Older
Pedro TOME's avatar
Pedro TOME committed
1 2
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
3 4 5 6

import math
import numpy
from PIL import Image
Pedro TOME's avatar
Pedro TOME committed
7 8 9

import bob.io.base

10 11
from bob.bio.base.preprocessor import Preprocessor

Pedro TOME's avatar
Pedro TOME committed
12
from .. import utils
13

Pedro TOME's avatar
Pedro TOME committed
14 15

class FingerCrop (Preprocessor):
Olegs NIKISINS's avatar
Olegs NIKISINS committed
16
  """
17
  Extracts the mask heuristically and pre-processes fingervein images.
18 19 20 21 22

  Based on the implementation: E.C. Lee, H.C. Lee and K.R. Park. Finger vein
  recognition using minutia-based alignment and local binary pattern-based
  feature extraction. International Journal of Imaging Systems and
  Technology. Vol. 19, No. 3, pp. 175-178, September 2009.
23

24 25 26 27 28 29 30 31 32
  Finger orientation is based on B. Huang, Y. Dai, R. Li, D. Tang and W. Li,
  Finger-vein authentication based on wide line detector and pattern
  normalization, Proceedings on 20th International Conference on Pattern
  Recognition (ICPR), 2010.

  The ``konomask`` option is based on the work of M. Kono, H. Ueki and S.
  Umemura. Near-infrared finger vein patterns for personal identification,
  Applied Optics, Vol. 41, Issue 35, pp. 7429-7436 (2002).

33 34
  In this implementation, the finger image is (in this order):

35 36 37 38 39 40 41 42 43 44 45
    1. The mask is extracted (if ``annotation`` is not chosen as a parameter to
       ``fingercontour``). Other mask extraction options correspond to
       heuristics developed by Lee et al. (2009) or Kono et al. (2002)
    2. The finger is normalized (made horizontal), via a least-squares
       normalization procedure concerning the center of the annotated area,
       width-wise. Before normalization, the image is padded to avoid loosing
       pixels corresponding to veins during the rotation
    3. (optionally) Post processed with histogram-equalization to enhance vein
       information. Notice that only the area inside the mask is used for
       normalization. Areas outside of the mask (where the mask is ``False``
       are set to black)
46

47

48
  Parameters:
49

50 51 52
    mask_h (:py:obj:`int`, optional): Height of contour mask in pixels, must
      be an even number (used by the methods ``leemaskMod`` or
      ``leemaskMatlab``)
53

54 55
    mask_w (:py:obj:`int`, optional): Width of the contour mask in pixels
      (used by the methods ``leemaskMod`` or ``leemaskMatlab``)
56

57 58 59 60
    padding_width (:py:obj:`int`, optional): How much padding (in pixels) to
      add around the borders of the input image. We normally always keep this
      value on its default (5 pixels). This parameter is always used before
      normalizing the finger orientation.
61

62 63 64
    padding_constant (:py:obj:`int`, optional): What is the value of the pixels
      added to the padding. This number should be a value between 0 and 255.
      (From Pedro Tome: for UTFVP (high-quality samples), use 0. For the VERA
65
      Fingervein database (low-quality samples), use 51 (that corresponds to
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
      0.2 in a float image with values between 0 and 1). This parameter is
      always used before normalizing the finger orientation.

    fingercontour (:py:obj:`str`, optional): Select between three finger
      contour implementations: ``"leemaskMod"``, ``"leemaskMatlab"``,
      ``"konomask"`` or ``annotation``. (From Pedro Tome: the option
      ``leemaskMatlab`` was just implemented for testing purposes so we could
      compare with MAT files generated from Matlab code of other authors. He
      only used it with the UTFVP database, using ``leemaskMod`` with that
      database yields slight worse results.)

    postprocessing (:py:obj:`str`, optional): Select between ``HE`` (histogram
      equalization, as with :py:func:`skimage.exposure.equalize_hist`) or
      ``None`` (the default).

81
  """
Pedro TOME's avatar
Pedro TOME committed
82

83

84 85 86
  def __init__(self, mask_h = 4, mask_w = 40,
      padding_width = 5, padding_constant = 51,
      fingercontour = 'leemaskMod', postprocessing = None, **kwargs):
Pedro TOME's avatar
Pedro TOME committed
87

88
    Preprocessor.__init__(self,
Pedro TOME's avatar
Pedro TOME committed
89 90
        mask_h = mask_h,
        mask_w = mask_w,
91 92
        padding_width = padding_width,
        padding_constant = padding_constant,
Pedro TOME's avatar
Pedro TOME committed
93 94 95
        fingercontour = fingercontour,
        postprocessing = postprocessing,
        **kwargs
96
        )
Pedro TOME's avatar
Pedro TOME committed
97 98 99

    self.mask_h = mask_h
    self.mask_w = mask_w
100

Pedro TOME's avatar
Pedro TOME committed
101 102 103
    self.fingercontour = fingercontour
    self.postprocessing = postprocessing

104 105
    self.padding_width = padding_width
    self.padding_constant = padding_constant
106 107


Pedro TOME's avatar
Pedro TOME committed
108
  def __konomask__(self, image, sigma):
Olegs NIKISINS's avatar
Olegs NIKISINS committed
109 110
    """
    Finger vein mask extractor.
111 112 113 114 115

    Based on the work of M. Kono, H. Ueki and S. Umemura. Near-infrared finger
    vein patterns for personal identification, Applied Optics, Vol. 41, Issue
    35, pp. 7429-7436 (2002).

116 117
    """

118 119 120
    padded_image = numpy.pad(image, self.padding_width, 'constant',
        constant_values = self.padding_constant)

Pedro TOME's avatar
Pedro TOME committed
121
    sigma = 5
122
    img_h,img_w = padded_image.shape
123

Pedro TOME's avatar
Pedro TOME committed
124 125 126 127 128 129 130 131 132 133 134 135
    # Determine lower half starting point
    if numpy.mod(img_h,2) == 0:
        half_img_h = img_h/2 + 1
    else:
        half_img_h = numpy.ceil(img_h/2)

    #Construct filter kernel
    winsize = numpy.ceil(4*sigma)

    x = numpy.arange(-winsize, winsize+1)
    y = numpy.arange(-winsize, winsize+1)
    X, Y = numpy.meshgrid(x, y)
136

Pedro TOME's avatar
Pedro TOME committed
137
    hy = (-Y/(2*math.pi*sigma**4))*numpy.exp(-(X**2 + Y**2)/(2*sigma**2))
138

Pedro TOME's avatar
Pedro TOME committed
139
    # Filter the image with the directional kernel
140
    fy = utils.imfilter(padded_image, hy)
Pedro TOME's avatar
Pedro TOME committed
141 142 143 144 145 146 147 148

    # Upper part of filtred image
    img_filt_up = fy[0:half_img_h,:]
    y_up = img_filt_up.argmax(axis=0)

    # Lower part of filtred image
    img_filt_lo = fy[half_img_h-1:,:]
    y_lo = img_filt_lo.argmin(axis=0)
149 150

    # Fill region between upper and lower edges
151
    finger_mask = numpy.ndarray(padded_image.shape, numpy.bool)
Pedro TOME's avatar
Pedro TOME committed
152 153 154
    finger_mask[:,:] = False

    for i in range(0,img_w):
155
      finger_mask[y_up[i]:y_lo[i]+padded_image.shape[0]-half_img_h+2,i] = True
156

157 158 159 160 161
    if not self.padding_width:
      return finger_mask
    else:
      w = self.padding_width
      return finger_mask[w:-w,w:-w]
Pedro TOME's avatar
Pedro TOME committed
162

163

Pedro TOME's avatar
Pedro TOME committed
164
  def __leemaskMod__(self, image):
Olegs NIKISINS's avatar
Olegs NIKISINS committed
165 166
    """
    A method to calculate the finger mask.
167

168 169 170 171
    Based on the work of Finger vein recognition using minutia-based alignment
    and local binary pattern-based feature extraction, E.C. Lee, H.C. Lee and
    K.R. Park, International Journal of Imaging Systems and Technology, Volume
    19, Issue 3, September 2009, Pages 175--178, doi: 10.1002/ima.20193
172

173 174
    This code is a variant of the Matlab implementation by Bram Ton, available
    at:
175

176
    https://nl.mathworks.com/matlabcentral/fileexchange/35752-finger-region-localisation/content/lee_region.m
Pedro TOME's avatar
Pedro TOME committed
177

178 179 180 181
    In this variant from Pedro Tome, the technique of filtering the image with
    a horizontal filter is also applied on the vertical axis.


182
    Parameters:
183

Olegs NIKISINS's avatar
Olegs NIKISINS committed
184
    image (numpy.ndarray): raw image to use for finding the mask, as 2D array
185 186 187
        of unsigned 8-bit integers


Olegs NIKISINS's avatar
Olegs NIKISINS committed
188
    **Returns:**
189

Olegs NIKISINS's avatar
Olegs NIKISINS committed
190
    numpy.ndarray: A 2D boolean array with the same shape of the input image
191 192
        representing the cropping mask. ``True`` values indicate where the
        finger is.
Pedro TOME's avatar
Pedro TOME committed
193

Olegs NIKISINS's avatar
Olegs NIKISINS committed
194
    numpy.ndarray: A 2D array with 64-bit floats indicating the indexes where
195 196 197 198 199
       the mask, for each column, starts and ends on the original image. The
       same of this array is (2, number of columns on input image).

    """

200 201
    padded_image = numpy.pad(image, self.padding_width, 'constant',
        constant_values = self.padding_constant)
202

203
    img_h,img_w = padded_image.shape
204 205 206 207 208 209 210

    # Determine lower half starting point
    half_img_h = img_h/2
    half_img_w = img_w/2

    # Construct mask for filtering (up-bottom direction)
    mask = numpy.ones((self.mask_h, self.mask_w), dtype='float64')
211
    mask[int(self.mask_h/2):,:] = -1.0
212

213
    img_filt = utils.imfilter(padded_image, mask)
214

Pedro TOME's avatar
Pedro TOME committed
215
    # Upper part of filtred image
216
    img_filt_up = img_filt[:int(half_img_h),:]
Pedro TOME's avatar
Pedro TOME committed
217
    y_up = img_filt_up.argmax(axis=0)
218

219
    # Lower part of filtred image
220
    img_filt_lo = img_filt[int(half_img_h):,:]
Pedro TOME's avatar
Pedro TOME committed
221
    y_lo = img_filt_lo.argmin(axis=0)
222

223
    img_filt = utils.imfilter(padded_image, mask.T)
224

225
    # Left part of filtered image
226
    img_filt_lf = img_filt[:,:int(half_img_w)]
Pedro TOME's avatar
Pedro TOME committed
227
    y_lf = img_filt_lf.argmax(axis=1)
228

229
    # Right part of filtred image
230
    img_filt_rg = img_filt[:,int(half_img_w):]
Pedro TOME's avatar
Pedro TOME committed
231
    y_rg = img_filt_rg.argmin(axis=1)
232

233
    finger_mask = numpy.zeros(padded_image.shape, dtype='bool')
234

Pedro TOME's avatar
Pedro TOME committed
235 236
    for i in range(0,y_up.size):
        finger_mask[y_up[i]:y_lo[i]+img_filt_lo.shape[0]+1,i] = True
237

Pedro TOME's avatar
Pedro TOME committed
238 239 240
    # Left region
    for i in range(0,y_lf.size):
        finger_mask[i,0:y_lf[i]+1] = False
241

242 243
    # Right region has always the finger ending, crop the padding with the
    # meadian
244
    finger_mask[:,int(numpy.median(y_rg)+img_filt_rg.shape[1]):] = False
245

246 247 248 249 250
    if not self.padding_width:
      return finger_mask
    else:
      w = self.padding_width
      return finger_mask[w:-w,w:-w]
251 252


Pedro TOME's avatar
Pedro TOME committed
253
  def __leemaskMatlab__(self, image):
Olegs NIKISINS's avatar
Olegs NIKISINS committed
254 255
    """
    A method to calculate the finger mask.
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275

    Based on the work of Finger vein recognition using minutia-based alignment
    and local binary pattern-based feature extraction, E.C. Lee, H.C. Lee and
    K.R. Park, International Journal of Imaging Systems and Technology, Volume
    19, Issue 3, September 2009, Pages 175--178, doi: 10.1002/ima.20193

    This code is based on the Matlab implementation by Bram Ton, available at:

    https://nl.mathworks.com/matlabcentral/fileexchange/35752-finger-region-localisation/content/lee_region.m

    In this method, we calculate the mask of the finger independently for each
    column of the input image. Firstly, the image is convolved with a [1,-1]
    filter of size ``(self.mask_h, self.mask_w)``. Then, the upper and lower
    parts of the resulting filtered image are separated. The location of the
    maxima in the upper part is located. The same goes for the location of the
    minima in the lower part. The mask is then calculated, per column, by
    considering it starts in the point where the maxima is in the upper part
    and goes up to the point where the minima is detected on the lower part.


Olegs NIKISINS's avatar
Olegs NIKISINS committed
276
    **Parameters:**
277

Olegs NIKISINS's avatar
Olegs NIKISINS committed
278
    image (numpy.ndarray): raw image to use for finding the mask, as 2D array
279 280 281
        of unsigned 8-bit integers


Olegs NIKISINS's avatar
Olegs NIKISINS committed
282
    **Returns:**
283

Olegs NIKISINS's avatar
Olegs NIKISINS committed
284
    numpy.ndarray: A 2D boolean array with the same shape of the input image
285 286 287
        representing the cropping mask. ``True`` values indicate where the
        finger is.

Olegs NIKISINS's avatar
Olegs NIKISINS committed
288
    numpy.ndarray: A 2D array with 64-bit floats indicating the indexes where
289 290 291 292
       the mask, for each column, starts and ends on the original image. The
       same of this array is (2, number of columns on input image).

    """
Pedro TOME's avatar
Pedro TOME committed
293

294 295 296 297
    padded_image = numpy.pad(image, self.padding_width, 'constant',
        constant_values = self.padding_constant)

    img_h,img_w = padded_image.shape
298

Pedro TOME's avatar
Pedro TOME committed
299
    # Determine lower half starting point
300
    half_img_h = int(img_h/2)
301 302

    # Construct mask for filtering
303
    mask = numpy.ones((self.mask_h,self.mask_w), dtype='float64')
304
    mask[int(self.mask_h/2):,:] = -1.0
Pedro TOME's avatar
Pedro TOME committed
305

306
    img_filt = utils.imfilter(padded_image, mask)
307

308 309
    # Upper part of filtered image
    img_filt_up = img_filt[:half_img_h,:]
Pedro TOME's avatar
Pedro TOME committed
310 311
    y_up = img_filt_up.argmax(axis=0)

312 313
    # Lower part of filtered image
    img_filt_lo = img_filt[half_img_h:,:]
Pedro TOME's avatar
Pedro TOME committed
314
    y_lo = img_filt_lo.argmin(axis=0)
315

316 317 318
    # Translation: for all columns of the input image, set to True all pixels
    # of the mask from index where the maxima occurred in the upper part until
    # the index where the minima occurred in the lower part.
319
    finger_mask = numpy.zeros(padded_image.shape, dtype='bool')
320 321
    for i in range(img_filt.shape[1]):
      finger_mask[y_up[i]:(y_lo[i]+img_filt_lo.shape[0]+1), i] = True
322

323 324 325 326 327
    if not self.padding_width:
      return finger_mask
    else:
      w = self.padding_width
      return finger_mask[w:-w,w:-w]
328

Pedro TOME's avatar
Pedro TOME committed
329

330
  def __huangnormalization__(self, image, mask):
Olegs NIKISINS's avatar
Olegs NIKISINS committed
331 332
    """
    Simple finger normalization.
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348

    Based on B. Huang, Y. Dai, R. Li, D. Tang and W. Li, Finger-vein
    authentication based on wide line detector and pattern normalization,
    Proceedings on 20th International Conference on Pattern Recognition (ICPR),
    2010.

    This implementation aligns the finger to the centre of the image using an
    affine transformation. Elliptic projection which is described in the
    referenced paper is not included.

    In order to defined the affine transformation to be performed, the
    algorithm first calculates the center for each edge (column wise) and
    calculates the best linear fit parameters for a straight line passing
    through those points.


Olegs NIKISINS's avatar
Olegs NIKISINS committed
349
    **Parameters:**
350

Olegs NIKISINS's avatar
Olegs NIKISINS committed
351
    image (numpy.ndarray): raw image to normalize as 2D array of unsigned
352 353
        8-bit integers

Olegs NIKISINS's avatar
Olegs NIKISINS committed
354
    mask (numpy.ndarray): mask to normalize as 2D array of booleans
355 356


Olegs NIKISINS's avatar
Olegs NIKISINS committed
357
    **Returns:**
358

Olegs NIKISINS's avatar
Olegs NIKISINS committed
359
    numpy.ndarray: A 2D boolean array with the same shape and data type of
360 361
        the input image representing the newly aligned image.

Olegs NIKISINS's avatar
Olegs NIKISINS committed
362
    numpy.ndarray: A 2D boolean array with the same shape and data type of
363
        the input mask representing the newly aligned mask.
364

365
    """
Pedro TOME's avatar
Pedro TOME committed
366 367

    img_h, img_w = image.shape
368

369
    # Calculates the mask edges along the columns
370
    edges = numpy.zeros((2, mask.shape[1]), dtype=int)
371

372 373
    edges[0,:] = mask.argmax(axis=0) # get upper edges
    edges[1,:] = len(mask) - numpy.flipud(mask).argmax(axis=0) - 1
374

375
    bl = edges.mean(axis=0) #baseline
376
    x = numpy.arange(0, edges.shape[1])
Pedro TOME's avatar
Pedro TOME committed
377
    A = numpy.vstack([x, numpy.ones(len(x))]).T
378

Pedro TOME's avatar
Pedro TOME committed
379 380
    # Fit a straight line through the base line points
    w = numpy.linalg.lstsq(A,bl)[0] # obtaining the parameters
381

Pedro TOME's avatar
Pedro TOME committed
382 383 384
    angle = -1*math.atan(w[0])  # Rotation
    tr = img_h/2 - w[1]         # Translation
    scale = 1.0                 # Scale
385 386

    #Affine transformation parameters
Pedro TOME's avatar
Pedro TOME committed
387 388 389
    sx=sy=scale
    cosine = math.cos(angle)
    sine = math.sin(angle)
390

Pedro TOME's avatar
Pedro TOME committed
391 392 393 394
    a = cosine/sx
    b = -sine/sy
    #b = sine/sx
    c = 0 #Translation in x
395

Pedro TOME's avatar
Pedro TOME committed
396 397 398 399 400
    d = sine/sx
    e = cosine/sy
    f = tr #Translation in y
    #d = -sine/sy
    #e = cosine/sy
401 402 403 404 405 406 407
    #f = 0

    g = 0
    h = 0
    #h=tr
    i = 1

Pedro TOME's avatar
Pedro TOME committed
408 409 410
    T = numpy.matrix([[a,b,c],[d,e,f],[g,h,i]])
    Tinv = numpy.linalg.inv(T)
    Tinvtuple = (Tinv[0,0],Tinv[0,1], Tinv[0,2], Tinv[1,0],Tinv[1,1],Tinv[1,2])
411

412 413
    def _afftrans(img):
      '''Applies the affine transform on the resulting image'''
Pedro TOME's avatar
Pedro TOME committed
414

415 416 417 418 419 420 421 422 423 424
      t = Image.fromarray(img.astype('uint8'))
      w, h = t.size #pillow image is encoded w, h
      w += 2*self.padding_width
      h += 2*self.padding_width
      t = t.transform(
          (w,h),
          Image.AFFINE,
          Tinvtuple,
          resample=Image.BICUBIC,
          fill=self.padding_constant)
Pedro TOME's avatar
Pedro TOME committed
425

426
      return numpy.array(t).astype(img.dtype)
427

428
    return _afftrans(image), _afftrans(mask)
429

Pedro TOME's avatar
Pedro TOME committed
430

431
  def __HE__(self, image, mask):
Olegs NIKISINS's avatar
Olegs NIKISINS committed
432 433
    """
    Applies histogram equalization on the input image inside the mask.
434 435 436 437

    In this implementation, only the pixels that lie inside the mask will be
    used to calculate the histogram equalization parameters. Because of this
    particularity, we don't use Bob's implementation for histogram equalization
438
    and have one based exclusively on scikit-image.
Pedro TOME's avatar
Pedro TOME committed
439

440

Olegs NIKISINS's avatar
Olegs NIKISINS committed
441
    **Parameters:**
442

Olegs NIKISINS's avatar
Olegs NIKISINS committed
443
    image (numpy.ndarray): raw image to be filtered, as 2D array of
444 445
          unsigned 8-bit integers

Olegs NIKISINS's avatar
Olegs NIKISINS committed
446
    mask (numpy.ndarray): mask of the same size of the image, but composed
447 448 449
          of boolean values indicating which values should be considered for
          the histogram equalization

450

Olegs NIKISINS's avatar
Olegs NIKISINS committed
451
    **Returns:**
452

Olegs NIKISINS's avatar
Olegs NIKISINS committed
453
    numpy.ndarray: normalized image as a 2D array of unsigned 8-bit integers
454

455
    """
456
    from skimage.exposure import equalize_hist
457
    from skimage.exposure import rescale_intensity
458

459
    retval = rescale_intensity(equalize_hist(image, mask=mask), out_range = (0, 255))
460

461 462
    # make the parts outside the mask totally black
    retval[~mask] = 0
463

464
    return retval
Pedro TOME's avatar
Pedro TOME committed
465

466

467
  def __call__(self, data, annotations=None):
468
    """Reads the input image or (image, mask) and prepares for fex.
469

470
    Parameters:
Pedro TOME's avatar
Pedro TOME committed
471

472 473 474 475 476
      data (numpy.ndarray, tuple): Either a :py:class:`numpy.ndarray`
        containing a gray-scaled image with dtype ``uint8`` or a 2-tuple
        containing both the gray-scaled image and a mask, with the same size of
        the image, with dtype ``bool`` containing the points which should be
        considered part of the finger
Pedro TOME's avatar
Pedro TOME committed
477

478

479
    Returns:
Pedro TOME's avatar
Pedro TOME committed
480

481
      numpy.ndarray: The image, preprocessed and normalized
Pedro TOME's avatar
Pedro TOME committed
482

483 484
      numpy.ndarray: A mask, of the same size of the image, indicating where
      the valid data for the object is.
485

Olegs NIKISINS's avatar
Olegs NIKISINS committed
486
    """
487

488 489 490 491 492
    if isinstance(data, numpy.ndarray):
      image = data
      mask = None
    else:
      image, mask = data
493 494

    ## Finger edges and contour extraction:
André Anjos's avatar
André Anjos committed
495 496 497
    if self.fingercontour == 'none': #image is sufficiently clean
      mask = numpy.ones(image.shape, dtype='bool')
    elif self.fingercontour == 'leemaskMatlab':
498
      mask = self.__leemaskMatlab__(image) #for UTFVP
499
    elif self.fingercontour == 'leemaskMod':
500
      mask = self.__leemaskMod__(image) #for VERA
501
    elif self.fingercontour == 'konomask':
502 503 504 505 506 507 508
      mask = self.__konomask__(image, sigma=5)
    elif self.fingercontour == 'annotation':
      if mask is None:
        raise RuntimeError("Cannot use fingercontour=annotation - the " \
            "current sample being processed does not provide a mask")
    else:
      raise RuntimeError("Please choose between leemaskMod, leemaskMatlab, " \
André Anjos's avatar
André Anjos committed
509 510
          "konomask, annotation or none for parameter 'fingercontour'. %s " \
          "is not valid" % self.fingercontour)
511

Pedro TOME's avatar
Pedro TOME committed
512
    ## Finger region normalization:
André Anjos's avatar
André Anjos committed
513 514 515 516
    if self.fingercontour == 'none': #don't normalize
      image_norm, mask_norm = image, mask
    else:
      image_norm, mask_norm = self.__huangnormalization__(image, mask)
Pedro TOME's avatar
Pedro TOME committed
517

518
    ## veins enhancement:
519
    if self.postprocessing == 'HE':
520
      image_norm = self.__HE__(image_norm, mask_norm)
521

522
    ## returns the normalized image and the finger mask
523
    return image_norm, mask_norm
Pedro TOME's avatar
Pedro TOME committed
524 525


526 527
  def write_data(self, data, filename):
    '''Overrides the default method implementation to handle our tuple'''
Pedro TOME's avatar
Pedro TOME committed
528

529 530
    f = bob.io.base.HDF5File(filename, 'w')
    f.set('image', data[0])
531
    f.set('mask', data[1])
532

Pedro TOME's avatar
Pedro TOME committed
533

534 535
  def read_data(self, filename):
    '''Overrides the default method implementation to handle our tuple'''
536

537
    f = bob.io.base.HDF5File(filename, 'r')
538
    return f.read('image'), f.read('mask')