Commit cc356cc3 authored by André Anjos's avatar André Anjos 💬

Merge branch 'simplifications' into 'master'

Simplifications

* Re-implements vein ROI processing and analysis based on Teo's original implementation
* Doc improvements
* Allow verafinger to output finger masks from annotations
* Simplify FingerCrop to a maximum

See merge request !18
parents 413b80be 8318f214
Pipeline #5237 passed with stages
in 13 minutes
......@@ -5,8 +5,8 @@
:target: http://pythonhosted.org/bob.bio.vein/index.html
.. image:: http://img.shields.io/badge/docs-latest-orange.png
:target: https://www.idiap.ch/software/bob/docs/latest/bob/bob.bio.vein/master/index.html
.. image:: https://gitlab.idiap.ch/bob/bob.bio.vein/badges/master/build.svg
:target: https://gitlab.idiap.ch/bob/bob.bio.vein/commits/master
.. image:: https://gitlab.idiap.ch/bob/bob.bio.vein/badges/v2.1.0/build.svg
:target: https://gitlab.idiap.ch/bob/bob.bio.vein/commits/v2.1.0
.. image:: https://img.shields.io/badge/gitlab-project-0000c0.svg
:target: https://gitlab.idiap.ch/bob/bob.bio.vein
.. image:: http://img.shields.io/pypi/v/bob.bio.vein.png
......
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
import bob.sp
import bob.ip.base
import numpy
import math
import scipy.signal
from bob.bio.base.algorithm import Algorithm
class MiuraMatch (Algorithm):
"""Finger vein matching: match ratio
"""Finger vein matching: match ratio via cross-correlation
The method is based on "cross-correlation" between a model and a probe image.
It convolves the binary image(s) representing the model with the binary image
representing the probe (rotated by 180 degrees), and evaluates how they
cross-correlate. If the model and probe are very similar, the output of the
correlation corresponds to a single scalar and approaches a maximum. The
value is then normalized by the sum of the pixels lit in both binary images.
Therefore, the output of this method is a floating-point number in the range
:math:`[0, 0.5]`. The higher, the better match.
In case model and probe represent images from the same vein structure, but
are misaligned, the output is not guaranteed to be accurate. To mitigate this
aspect, Miura et al. proposed to add a *small* cropping factor to the model
image, assuming not much information is available on the borders (``ch``, for
the vertical direction and ``cw``, for the horizontal direction). This allows
the convolution to yield searches for different areas in the probe image. The
maximum value is then taken from the resulting operation. The convolution
result is normalized by the pixels lit in both the cropped model image and
the matching pixels on the probe that yield the maximum on the resulting
convolution.
For this to work properly, input images are supposed to be binary in nature,
with zeros and ones.
Based on N. Miura, A. Nagasaka, and T. Miyatake. Feature extraction of finger
vein patterns based on repeated line tracking and its application to personal
identification. Machine Vision and Applications, Vol. 15, Num. 4, pp.
194--203, 2004
Parameters:
**Parameters:**
ch (:py:class:`int`, optional): Maximum search displacement in y-direction.
ch : :py:class:`int`
Optional : Maximum search displacement in y-direction. Different
defult values based on the different features.
cw (:py:class:`int`, optional): Maximum search displacement in x-direction.
cw : :py:class:`int`
Optional : Maximum search displacement in x-direction. Different
defult values based on the different features.
"""
def __init__(self,
......@@ -58,39 +73,21 @@ class MiuraMatch (Algorithm):
return numpy.array(enroll_features)
def convfft(self, t, a):
# Determine padding size in x and y dimension
size_t = numpy.array(t.shape)
size_a = numpy.array(a.shape)
outsize = size_t + size_a - 1
# Determine 2D cross correlation in Fourier domain
taux = numpy.zeros(outsize)
taux[0:size_t[0],0:size_t[1]] = t
Ft = bob.sp.fft(taux.astype(numpy.complex128))
aaux = numpy.zeros(outsize)
aaux[0:size_a[0],0:size_a[1]] = a
Fa = bob.sp.fft(aaux.astype(numpy.complex128))
def score(self, model, probe):
"""Computes the score between the probe and the model.
convta = numpy.real(bob.sp.ifft(Ft*Fa))
Parameters:
[w, h] = size_t-size_a+1
output = convta[size_a[0]-1:size_a[0]-1+w, size_a[1]-1:size_a[1]-1+h]
model (numpy.ndarray): The model of the user to test the probe agains
return output
probe (numpy.ndarray): The probe to test
def score(self, model, probe):
"""
Computes the score of the probe and the model.
Returns:
**Parameters:**
score (float): Value between 0 and 0.5, larger value means a better match
score : :py:class:`float`
Value between 0 and 0.5, larger value is better match
"""
#print model.shape
#print probe.shape
I=probe.astype(numpy.float64)
......@@ -100,22 +97,36 @@ class MiuraMatch (Algorithm):
n_models = model.shape[0]
scores = []
for i in range(n_models):
R=model[i,:].astype(numpy.float64)
# iterate over all models for a given individual
for md in model:
# erode model by (ch, cw)
R = md.astype(numpy.float64)
h, w = R.shape
crop_R = R[self.ch:h-self.ch, self.cw:w-self.cw]
rotate_R = numpy.zeros((crop_R.shape[0], crop_R.shape[1]))
bob.ip.base.rotate(crop_R, rotate_R, 180)
#FFT for scoring!
#Nm=bob.sp.ifft(bob.sp.fft(I)*bob.sp.fft(rotate_R))
Nm = self.convfft(I, rotate_R)
#Nm2 = scipy.signal.convolve2d(I, rotate_R, 'valid')
# correlates using scipy - fastest option available iff the self.ch and
# self.cw are height (>30). In this case, the number of components
# returned by the convolution is high and using an FFT-based method
# yields best results. Otherwise, you may try the other options bellow
# -> check our test_correlation() method on the test units for more
# details and benchmarks.
Nm = scipy.signal.fftconvolve(I, numpy.rot90(crop_R, k=2), 'valid')
# 2nd best: use convolve2d or correlate2d directly;
# Nm = scipy.signal.convolve2d(I, numpy.rot90(crop_R, k=2), 'valid')
# 3rd best: use correlate2d
# Nm = scipy.signal.correlate2d(I, crop_R, 'valid')
# figures out where the maximum is on the resulting matrix
t0, s0 = numpy.unravel_index(Nm.argmax(), Nm.shape)
# this is our output
Nmm = Nm[t0,s0]
#Nmm = Nm.max()
#mi = numpy.argwhere(Nmm == Nm)
#t0, s0 = mi.flatten()[:2]
# normalizes the output by the number of pixels lit on the input
# matrices, taking into consideration the surface that produced the
# result (i.e., the eroded model and part of the probe)
scores.append(Nmm/(sum(sum(crop_R)) + sum(sum(I[t0:t0+h-2*self.ch, s0:s0+w-2*self.cw]))))
return numpy.mean(scores)
......@@ -17,12 +17,12 @@ You can download the raw data of the `UTFVP`_ database by following the link.
.. include:: links.rst
"""
from bob.bio.vein.database import UtfvpBioDatabase
from bob.bio.vein.database.utfvp import Database
utfvp_directory = "[YOUR_UTFVP_DIRECTORY]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
database = UtfvpBioDatabase(
database = Database(
original_directory = utfvp_directory,
original_extension = '.png',
)
......
......@@ -15,12 +15,12 @@ the link.
"""
from bob.bio.vein.database import VerafingerBioDatabase
from bob.bio.vein.database.verafinger import Database
verafinger_directory = "[YOUR_VERAFINGER_DIRECTORY]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
database = VerafingerBioDatabase(
database = Database(
original_directory = verafinger_directory,
original_extension = '.png',
)
......
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
from .database import VeinBioFile
from .verafinger import VerafingerBioDatabase
from .utfvp import UtfvpBioDatabase
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# Wed 20 July 14:43:22 CEST 2016
# Thu 03 Nov 2016 12:23:52 CET
"""Single sample API"""
"""
Verification API for bob.db.voxforge
"""
from bob.bio.base.database.file import BioFile
class VeinBioFile(BioFile):
def __init__(self, client_id, path, file_id):
"""
Initializes this File object with an File equivalent for
VoxForge database.
"""
super(VeinBioFile, self).__init__(client_id=client_id, path=path, file_id=file_id)
"""A "sample" object that is specific to vein recognition experiments
Parameters:
f (object): Low-level file (or sample) object that is kept inside
"""
def __init__(self, f):
super(VeinBioFile, self).__init__(
client_id=f.model_id,
path=f.path,
file_id=f.id,
)
# keep copy of original low-level database file object
self.f = f
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tue 27 Sep 2016 16:49:05 CEST
# Fri 04 Nov 2016 14:46:53 CET
from .database import VeinBioFile
from bob.bio.base.database import BioDatabase
from bob.bio.base.database import BioFile, BioDatabase
class UtfvpBioDatabase(BioDatabase):
class File(BioFile):
"""
Implements extra properties of vein files for the UTFVP Fingervein database
Parameters:
f (object): Low-level file (or sample) object that is kept inside
"""
def __init__(self, f):
super(File, self).__init__(client_id=f.client_id, path=f.path,
file_id=f.id)
self.__f = f
class Database(BioDatabase):
"""
Implements verification API for querying UTFVP Fingervein database.
"""
def __init__(self, **kwargs):
super(UtfvpBioDatabase, self).__init__(name='utfvp',
**kwargs)
super(Database, self).__init__(name='utfvp', **kwargs)
from bob.db.utfvp.query import Database as LowLevelDatabase
self.__db = LowLevelDatabase()
def model_ids_with_protocol(self, groups=None, protocol=None, **kwargs):
protocol = protocol if protocol is not None else self.protocol
return self.__db.model_ids(groups=groups, protocol=protocol)
def objects(self, groups=None, protocol=None, purposes=None,
model_ids=None, **kwargs):
retval = self.__db.objects(groups=groups, protocol=protocol,
purposes=purposes, model_ids=model_ids, **kwargs)
return [VeinBioFile(client_id=f.client_id, path=f.path, file_id=f.id) for f in retval]
return [File(f) for f in retval]
......@@ -3,46 +3,85 @@
# Tue 27 Sep 2016 16:48:57 CEST
from .database import VeinBioFile
from bob.bio.base.database import BioDatabase
from bob.bio.base.database import BioFile, BioDatabase
class VerafingerBioDatabase(BioDatabase):
"""
Implements verification API for querying Vera Fingervein database.
"""
class File(BioFile):
"""
Implements extra properties of vein files for the Vera Fingervein database
def __init__(self, **kwargs):
super(VerafingerBioDatabase, self).__init__(name='verafinger',
**kwargs)
from bob.db.verafinger.query import Database as LowLevelDatabase
self.__db = LowLevelDatabase()
Parameters:
self.low_level_group_names = ('train', 'dev')
self.high_level_group_names = ('world', 'dev')
f (object): Low-level file (or sample) object that is kept inside
def groups(self):
"""
return self.convert_names_to_highlevel(self.__db.groups(),
self.low_level_group_names, self.high_level_group_names)
def __init__(self, f):
def client_id_from_model_id(self, model_id, group='dev'):
"""Required as ``model_id != client_id`` on this database"""
super(File, self).__init__(client_id=f.unique_finger_name, path=f.path,
file_id=f.id)
self.__f = f
return self.__db.finger_name_from_model_id(model_id)
def model_ids_with_protocol(self, groups=None, protocol=None, **kwargs):
def mask(self):
"""Returns the binary mask from the ROI annotations available"""
groups = self.convert_names_to_lowlevel(groups,
self.low_level_group_names, self.high_level_group_names)
return self.__db.model_ids(groups=groups, protocol=protocol)
from ..preprocessor.utils import poly_to_mask
def objects(self, groups=None, protocol=None, purposes=None,
model_ids=None, **kwargs):
# The size of images in this database is (250, 665) pixels (h, w)
return poly_to_mask((250, 665), self.__f.roi())
groups = self.convert_names_to_lowlevel(groups,
self.low_level_group_names, self.high_level_group_names)
retval = self.__db.objects(groups=groups, protocol=protocol,
purposes=purposes, model_ids=model_ids, **kwargs)
return [VeinBioFile(client_id=f.model_id, path=f.path, file_id=f.id) for f in retval]
def load(self, *args, **kwargs):
"""(Overrides base method) Loads both image and mask"""
image = super(File, self).load(*args, **kwargs)
return image, self.mask()
class Database(BioDatabase):
"""
Implements verification API for querying Vera Fingervein database.
"""
def __init__(self, **kwargs):
super(Database, self).__init__(name='verafinger', **kwargs)
from bob.db.verafinger.query import Database as LowLevelDatabase
self.__db = LowLevelDatabase()
self.low_level_group_names = ('train', 'dev')
self.high_level_group_names = ('world', 'dev')
def groups(self):
return self.convert_names_to_highlevel(self.__db.groups(),
self.low_level_group_names, self.high_level_group_names)
def client_id_from_model_id(self, model_id, group='dev'):
"""Required as ``model_id != client_id`` on this database"""
return self.__db.finger_name_from_model_id(model_id)
def model_ids_with_protocol(self, groups=None, protocol=None, **kwargs):
groups = self.convert_names_to_lowlevel(groups,
self.low_level_group_names, self.high_level_group_names)
return self.__db.model_ids(groups=groups, protocol=protocol)
def objects(self, groups=None, protocol=None, purposes=None,
model_ids=None, **kwargs):
groups = self.convert_names_to_lowlevel(groups,
self.low_level_group_names, self.high_level_group_names)
retval = self.__db.objects(groups=groups, protocol=protocol,
purposes=purposes, model_ids=model_ids, **kwargs)
return [File(f) for f in retval]
......@@ -85,10 +85,11 @@ class RepeatedLineTracking (Extractor):
hWo = numpy.round(hW*math.sqrt(2)/2) # half width for oblique directions
# Omit unreachable borders
finger_mask[0:self.r+hW,:] = 0
finger_mask[finger_mask.shape[0]-(self.r+hW):,:] = 0
finger_mask[:,0:self.r+hW] = 0
finger_mask[:,finger_mask.shape[1]-(self.r+hW):] = 0
border = int(self.r+hW)
finger_mask[0:border,:] = 0
finger_mask[finger_mask.shape[0]-border:,:] = 0
finger_mask[:,0:border] = 0
finger_mask[:,finger_mask.shape[1]-border:] = 0
## Uniformly distributed starting points
aux = numpy.argwhere( (finger_mask > 0) == True )
......@@ -153,7 +154,7 @@ class RepeatedLineTracking (Extractor):
else:
# Left direction
xp = Nc[i,0] - self.r
Vdepths[i] = finger_image[yp + hW, xp] - 2*finger_image[yp,xp] + finger_image[yp - hW, xp]
Vdepths[i] = finger_image[int(yp + hW), int(xp)] - 2*finger_image[int(yp),int(xp)] + finger_image[int(yp - hW), int(xp)]
elif (Nc[i,0] == xc):
# Vertical plane
xp = Nc[i,0]
......@@ -163,7 +164,7 @@ class RepeatedLineTracking (Extractor):
else:
# Up direction
yp = Nc[i,1] - self.r
Vdepths[i] = finger_image[yp, xp + hW] - 2*finger_image[yp,xp] + finger_image[yp, xp - hW]
Vdepths[i] = finger_image[int(yp), int(xp + hW)] - 2*finger_image[int(yp),int(xp)] + finger_image[int(yp), int(xp - hW)]
## Oblique directions
if ( (Nc[i,0] > xc) and (Nc[i,1] < yc) ) or ( (Nc[i,0] < xc) and (Nc[i,1] > yc) ):
......@@ -176,7 +177,7 @@ class RepeatedLineTracking (Extractor):
# Bottom left
xp = Nc[i,0] - ro
yp = Nc[i,1] + ro
Vdepths[i] = finger_image[yp - hWo, xp - hWo] - 2*finger_image[yp,xp] + finger_image[yp + hWo, xp + hWo]
Vdepths[i] = finger_image[int(yp - hWo), int(xp - hWo)] - 2*finger_image[int(yp),int(xp)] + finger_image[int(yp + hWo), int(xp + hWo)]
else:
# Diagonal, down \
if (Nc[i,0] < xc and Nc[i,1] < yc):
......@@ -187,7 +188,7 @@ class RepeatedLineTracking (Extractor):
# Bottom right
xp = Nc[i,0] + ro
yp = Nc[i,1] + ro
Vdepths[i] = finger_image[yp + hWo, xp - hWo] - 2*finger_image[yp,xp] + finger_image[yp - hWo, xp + hWo]
Vdepths[i] = finger_image[int(yp + hWo), int(xp - hWo)] - 2*finger_image[int(yp),int(xp)] + finger_image[int(yp - hWo), int(xp + hWo)]
# End search of candidates
index = numpy.argmax(Vdepths) #Determine best candidate
# Register tracking information
......
This diff is collapsed.
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
"""Utilities for preprocessing vein imagery"""
import numpy
def assert_points(area, points):
"""Checks all points fall within the determined shape region, inclusively
This assertion function, test all points given in ``points`` fall within a
certain area provided in ``area``.
Parameters:
area (tuple): A tuple containing the size of the limiting area where the
points should all be in.
points (numpy.ndarray): A 2D numpy ndarray with any number of rows (points)
and 2 columns (representing ``y`` and ``x`` coordinates respectively), or
any type convertible to this format. This array contains the points that
will be checked for conformity. In case one of the points doesn't fall
into the determined area an assertion is raised.
Raises:
AssertionError: In case one of the input points does not fall within the
area defined.
"""
for k in points:
assert 0 <= k[0] < area[0] and 0 <= k[1] < area[1], \
"Point (%d, %d) is not inside the region determined by area " \
"(%d, %d)" % (k[0], k[1], area[0], area[1])
def fix_points(area, points):
"""Checks/fixes all points so they fall within the determined shape region
Points which are lying outside the determined area will be brought into the
area by moving the offending coordinate to the border of the said area.
Parameters:
area (tuple): A tuple containing the size of the limiting area where the
points should all be in.
points (numpy.ndarray): A 2D :py:class:`numpy.ndarray` with any number of
rows (points) and 2 columns (representing ``y`` and ``x`` coordinates
respectively), or any type convertible to this format. This array
contains the points that will be checked/fixed for conformity. In case
one of the points doesn't fall into the determined area, it is silently
corrected so it does.
Returns:
numpy.ndarray: A **new** array of points with corrected coordinates
"""
retval = numpy.array(points).copy()
retval[retval<0] = 0 #floor at 0 for both axes
y, x = retval[:,0], retval[:,1]
y[y>=area[0]] = area[0] - 1
x[x>=area[1]] = area[1] - 1
return retval
def poly_to_mask(shape, points):
"""Generates a binary mask from a set of 2D points
Parameters:
shape (tuple): A tuple containing the size of the output mask in height and
width, for Bob compatibility ``(y, x)``.
points (list): A list of tuples containing the polygon points that form a
region on the target mask. A line connecting these points will be drawn
and all the points in the mask that fall on or within the polygon line,
will be set to ``True``. All other points will have a value of ``False``.
Returns:
numpy.ndarray: A 2D numpy ndarray with ``dtype=bool`` with the mask
generated with the determined shape, using the points for the polygon.
"""
from PIL import Image, ImageDraw
# n.b.: PIL images are (x, y), while Bob shapes are represented in (y, x)!
mask = Image.new('L', (shape[1], shape[0]))
# coverts whatever comes in into a list of tuples for PIL
fixed = tuple(map(tuple, numpy.roll(fix_points(shape, points), 1, 1)))
# draws polygon
ImageDraw.Draw(mask).polygon(fixed, fill=255)
return numpy.array(mask, dtype=numpy.bool)
def mask_to_image(mask, dtype=numpy.uint8):
"""Converts a binary (boolean) mask into an integer or floating-point image
This function converts a boolean binary mask into an image of the desired
type by setting the points where ``False`` is set to 0 and points where
``True`` is set to the most adequate value taking into consideration the
destination data type ``dtype``. Here are support types and their ranges:
* numpy.uint8: ``[0, (2^8)-1]``
* numpy.uint16: ``[0, (2^16)-1]``
* numpy.uint32: ``[0, (2^32)-1]``
* numpy.uint64: ``[0, (2^64)-1]``
* numpy.float32: ``[0, 1.0]`` (fixed)
* numpy.float64: ``[0, 1.0]`` (fixed)
* numpy.float128: ``[0, 1.0]`` (fixed)
All other types are currently unsupported.
Parameters:
mask (numpy.ndarray): A 2D numpy ndarray with boolean data type, containing
the mask that will be converted into an image.
dtype (numpy.dtype): A valid numpy data-type from the list above for the
resulting image
Returns:
numpy.ndarray: With the designated data type, containing the binary image
formed from the mask.
Raises:
TypeError: If the type is not supported by this function
"""
dtype = numpy.dtype(dtype)
retval = mask.astype(dtype)
if dtype in (numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64):
retval[retval == 1] = numpy.iinfo(dtype).max
elif dtype in (numpy.float32, numpy.float64, numpy.float128):
pass
else:
raise TypeError("Data type %s is unsupported" % dtype)
return retval
def show_image(image):
"""Shows a single image
Parameters:
image (numpy.ndarray): A 2D numpy.ndarray compose of 8-bit unsigned
integers containing the original image
"""
from PIL import Image
img = Image.fromarray(image)
img.show()
def show_mask_over_image(image, mask, color='red'):
"""Plots the mask over the image of a finger, for debugging purposes
Parameters:
image (numpy.ndarray): A 2D numpy.ndarray compose of 8-bit unsigned
integers containing the original image
mask (numpy.ndarray): A 2D numpy.ndarray compose of boolean values
containing the calculated mask
"""
from PIL import Image
img = Image.fromarray(image).convert(mode='RGBA')
msk = Image.fromarray((~mask).astype('uint8')*80)
red = Image.new('RGBA', img.size, color=color)
img.paste(red, mask=msk)
img.show()
def jaccard_index(a, b):
"""Calculates the intersection over union for two masks
This function calculates the Jaccard index:
.. math::
J(A,B) &= \\frac{|A \cap B|}{|A \\cup B|} \\\\
&= \\frac{|A \cap B|}{|A|+|B|-|A \\cup B|}
Parameters: