From 331df5842f0113fbc776f37db7030128f102bd75 Mon Sep 17 00:00:00 2001 From: Andre Anjos <andre.anjos@idiap.ch> Date: Fri, 8 Jul 2016 16:44:03 +0200 Subject: [PATCH] [extractors] Porting and cleanup --- bob/bio/vein/__init__.py | 26 +-- .../vein/extractors/LocalBinaryPatterns.py | 101 ++++++----- bob/bio/vein/extractors/MaximumCurvature.py | 163 +++++++++--------- .../extractors/NormalisedCrossCorrelation.py | 60 +++---- bob/bio/vein/extractors/PrincipalCurvature.py | 81 +++++---- .../vein/extractors/RepeatedLineTracking.py | 139 +++++++-------- bob/bio/vein/extractors/WideLineDetector.py | 89 +++++----- bob/bio/vein/extractors/__init__.py | 10 -- bob/bio/vein/utils.py | 14 +- 9 files changed, 320 insertions(+), 363 deletions(-) diff --git a/bob/bio/vein/__init__.py b/bob/bio/vein/__init__.py index a60a22b..48ebe31 100644 --- a/bob/bio/vein/__init__.py +++ b/bob/bio/vein/__init__.py @@ -1,33 +1,13 @@ #!/usr/bin/env python -# Pedro Tome <Pedro.Tome@idiap.ch> - -import configurations -import tools -import preprocessing -import features -import tests -import script -#import utils - +# vim: set fileencoding=utf-8 : def get_config(): """Returns a string containing the configuration information. """ - import pkg_resources - - packages = pkg_resources.require(__name__) - this = packages[0] - deps = packages[1:] - - retval = "%s: %s (%s)\n" % (this.key, this.version, this.location) - retval += " - python dependencies:\n" - for d in deps: retval += " - %s: %s (%s)\n" % (d.key, d.version, d.location) - - return retval.strip() + import bob.extension + return bob.extension.get_config(__name__) # gets sphinx autodoc done right - don't remove it __all__ = [_ for _ in dir() if not _.startswith('_')] - - diff --git a/bob/bio/vein/extractors/LocalBinaryPatterns.py b/bob/bio/vein/extractors/LocalBinaryPatterns.py index dd6e0db..20c08eb 100644 --- a/bob/bio/vein/extractors/LocalBinaryPatterns.py +++ b/bob/bio/vein/extractors/LocalBinaryPatterns.py @@ -5,18 +5,14 @@ import bob.ip.base import bob.io.base import numpy -import math -#from math import pi -#from mumpy import sqrt -import scipy.signal -from facereclib.features.Extractor import Extractor -from .. import utils -#from facereclib.utils import histogram +from bob.bio.base.features.Extractor import Extractor + class LocalBinaryPatterns (Extractor): - - """LBP feature extractor, paramters fixed based on - L. Mirmohamadsadeghi and A. Drygajlo. Palm vein recognition uisng local texture patterns, IET Biometrics, pp. 1-9, 2013. + """LBP feature extractor + + Parameters fixed based on L. Mirmohamadsadeghi and A. Drygajlo. Palm vein + recognition uisng local texture patterns, IET Biometrics, pp. 1-9, 2013. """ def __init__( @@ -34,9 +30,8 @@ class LocalBinaryPatterns (Extractor): lbp_add_average = False, # histogram options sparse_histogram = False, - split_histogram = None - - ): + split_histogram = None, + ): # call base class constructor Extractor.__init__( @@ -53,15 +48,15 @@ class LocalBinaryPatterns (Extractor): lbp_compare_to_average = lbp_compare_to_average, lbp_add_average = lbp_add_average, sparse_histogram = sparse_histogram, - split_histogram = split_histogram - ) - + split_histogram = split_histogram, + ) + # block parameters self.m_block_size = block_size if isinstance(block_size, (tuple, list)) else (block_size, block_size) self.m_block_overlap = block_overlap if isinstance(block_overlap, (tuple, list)) else (block_overlap, block_overlap) if self.m_block_size[0] < self.m_block_overlap[0] or self.m_block_size[1] < self.m_block_overlap[1]: raise ValueError("The overlap is bigger than the block size. This won't work. Please check your setup!") - + self.m_lbp = bob.ip.base.LBP( neighbors = lbp_neighbor_count, radius = float(lbp_radius), @@ -70,76 +65,78 @@ class LocalBinaryPatterns (Extractor): add_average_bit = lbp_add_average, uniform = lbp_uniform, rotation_invariant = lbp_rotation_invariant, - border_handling = 'wrap' - ) + border_handling = 'wrap', + ) self.m_split = split_histogram self.m_sparse = sparse_histogram if self.m_sparse and self.m_split: - raise ValueError("Sparse histograms cannot be split! Check your setup!") + raise ValueError("Sparse histograms cannot be split! Check your setup.") def __fill__(self, lbphs_array, lbphs_blocks, j): """Copies the given array into the given blocks""" + # fill array in the desired shape - #For debugging - #import ipdb; ipdb.set_trace() + #For debugging + #import ipdb; ipdb.set_trace() for b in range(self.m_n_blocks): lbphs_array[b * self.m_n_bins : (b+1) * self.m_n_bins] = lbphs_blocks[b][:] - + def lbp_features(self, finger_image, mask): - """Computes and returns the LBP features for the given input fingervein image""" - - # For debugging - #import ipdb; ipdb.set_trace() - + """Computes and returns the LBP features for the given input fingervein + image""" + + # For debugging + #import ipdb; ipdb.set_trace() + finger_image = finger_image.astype(numpy.float64) finger_mask = numpy.zeros(mask.shape) - finger_mask[mask == True] = 1 - + finger_mask[mask == True] = 1 + # Mask the vein image with the finger region - finger_image = finger_image*finger_mask - + finger_image = finger_image*finger_mask + # Computes LBP histograms abs_blocks = bob.ip.base.lbphs(finger_image, self.m_lbp, self.m_block_size, self.m_block_overlap) # Converts to Blitz array (of different dimensionalities) self.m_n_bins = abs_blocks.shape[1] self.m_n_blocks = abs_blocks.shape[0] - + shape = self.m_n_bins * self.m_n_blocks - - # create new array + + # create new array lbphs_array = numpy.zeros(shape, 'float64') - - #For debugging - #import ipdb; ipdb.set_trace() - + + #For debugging + #import ipdb; ipdb.set_trace() + # fill the array with the absolute values of the Gabor wavelet transform self.__fill__(lbphs_array, abs_blocks, 0) - + # return the concatenated list of all histograms return lbphs_array - - - def __call__(self, image): + + + def __call__(self, image): """Reads the input image, extract the features based on LBP of the fingervein image, and writes the resulting template""" - #For debugging - + #For debugging + finger_image = image[0] #Normalized image with histogram equalization - finger_mask = image[1] - - return self.lbp_features(finger_image, finger_mask) - - + finger_mask = image[1] + + return self.lbp_features(finger_image, finger_mask) + + def save_feature(self, feature, feature_file): f = bob.io.base.HDF5File(feature_file, 'w') f.set('feature', feature) - + def read_feature(self, feature_file): f = bob.io.base.HDF5File(feature_file, 'r') image = f.read('feature') - return (image) \ No newline at end of file + return image diff --git a/bob/bio/vein/extractors/MaximumCurvature.py b/bob/bio/vein/extractors/MaximumCurvature.py index e3b1958..08d3b2c 100644 --- a/bob/bio/vein/extractors/MaximumCurvature.py +++ b/bob/bio/vein/extractors/MaximumCurvature.py @@ -2,25 +2,26 @@ # vim: set fileencoding=utf-8 : # Pedro Tome <Pedro.Tome@idiap.ch> +import math +import numpy + import bob.core import bob.io.base -import numpy -import math -#from math import pi -#from mumpy import sqrt -import scipy.signal -from facereclib.features.Extractor import Extractor +from bob.bio.base.features.Extractor import Extractor + from .. import utils + class MaximumCurvature (Extractor): - - """MiuraMax feature extractor based on - N. Miura, A. Nagasaka, and T. Miyatake, Extraction of Finger-Vein Pattern Using Maximum Curvature Points in Image Profiles. - Proceedings on IAPR conference on machine vision applications, 9 (2005), pp. 347--350 + """MiuraMax feature extractor + + Based on N. Miura, A. Nagasaka, and T. Miyatake, Extraction of Finger-Vein + Pattern Using Maximum Curvature Points in Image Profiles. Proceedings on IAPR + conference on machine vision applications, 9 (2005), pp. 347--350 """ - - + + def __init__( self, sigma = 5, #Sigma used for determining derivatives @@ -33,65 +34,66 @@ class MaximumCurvature (Extractor): sigma = sigma, gpu = gpu ) - + # block parameters self.sigma = sigma self.gpu = gpu - - - def maximum_curvature(self, image, mask): - """Computes and returns the Maximum Curvature features for the given input fingervein image""" - + + + def maximum_curvature(self, image, mask): + """Computes and returns the Maximum Curvature features for the given input + fingervein image""" + if image.dtype != numpy.uint8: image = bob.core.convert(image,numpy.uint8,(0,255),(0,1)) #No es necesario pasarlo a uint8, en matlab lo dejan en float64. Comprobar si varian los resultados en vera database y ajustar. finger_mask = numpy.zeros(mask.shape) - finger_mask[mask == True] = 1 - + finger_mask[mask == True] = 1 + winsize = numpy.ceil(4*self.sigma) - + x = numpy.arange(-winsize, winsize+1) y = numpy.arange(-winsize, winsize+1) X, Y = numpy.meshgrid(x, y) - + h = (1/(2*math.pi*self.sigma**2))*numpy.exp(-(X**2 + Y**2)/(2*self.sigma**2)) hx = (-X/(self.sigma**2))*h hxx = ((X**2 - self.sigma**2)/(self.sigma**4))*h hy = hx.T hyy = hxx.T hxy = ((X*Y)/(self.sigma**4))*h - + # Do the actual filtering - + fx = utils.imfilter(image, hx, self.gpu, conv=False) fxx = utils.imfilter(image, hxx, self.gpu, conv=False) fy = utils.imfilter(image, hy, self.gpu, conv=False) fyy = utils.imfilter(image, hyy, self.gpu, conv=False) fxy = utils.imfilter(image, hxy, self.gpu, conv=False) - + f1 = 0.5*numpy.sqrt(2)*(fx + fy) # \ # f2 = 0.5*numpy.sqrt(2)*(fx - fy) # / # f11 = 0.5*fxx + fxy + 0.5*fyy # \\ # f22 = 0.5*fxx - fxy + 0.5*fyy # // # - + img_h, img_w = image.shape #Image height and width - + # Calculate curvatures k = numpy.zeros((img_h, img_w, 4)) k[:,:,0] = (fxx/((1 + fx**2)**(3/2)))*finger_mask # hor # k[:,:,1] = (fyy/((1 + fy**2)**(3/2)))*finger_mask # ver # k[:,:,2] = (f11/((1 + f1**2)**(3/2)))*finger_mask # \ # k[:,:,3] = (f22/((1 + f2**2)**(3/2)))*finger_mask # / # - + # Scores Vt = numpy.zeros(image.shape) Wr = 0 - + # Horizontal direction bla = k[:,:,0] > 0 - for y in range(0,img_h): - for x in range(0,img_w): + for y in range(0,img_h): + for x in range(0,img_w): if (bla[y,x]): Wr = Wr + 1 if ( Wr > 0 and (x == (img_w-1) or not bla[y,x]) ): @@ -100,23 +102,23 @@ class MaximumCurvature (Extractor): pos_end = x else: pos_end = x - 1 - - pos_start = pos_end - Wr + 1 # Start pos of concave + + pos_start = pos_end - Wr + 1 # Start pos of concave if (pos_start == pos_end): I=numpy.argmax(k[y,pos_start,0]) - else: + else: I=numpy.argmax(k[y,pos_start:pos_end+1,0]) - + pos_max = pos_start + I Scr = k[y,pos_max,0]*Wr Vt[y,pos_max] = Vt[y,pos_max] + Scr - Wr = 0 - + Wr = 0 + # Vertical direction bla = k[:,:,1] > 0 - for x in range(0,img_w): - for y in range(0,img_h): + for x in range(0,img_w): + for y in range(0,img_h): if (bla[y,x]): Wr = Wr + 1 if ( Wr > 0 and (y == (img_h-1) or not bla[y,x]) ): @@ -124,20 +126,20 @@ class MaximumCurvature (Extractor): # Reached edge of image pos_end = y else: - pos_end = y - 1 - + pos_end = y - 1 + pos_start = pos_end - Wr + 1 # Start pos of concave if (pos_start == pos_end): I=numpy.argmax(k[pos_start,x,1]) else: I=numpy.argmax(k[pos_start:pos_end+1,x,1]) - - pos_max = pos_start + I + + pos_max = pos_start + I Scr = k[pos_max,x,1]*Wr - + Vt[pos_max,x] = Vt[pos_max,x] + Scr Wr = 0 - + # Direction: \ # bla = k[:,:,2] > 0 for start in range(0,img_w+img_h-1): @@ -149,11 +151,11 @@ class MaximumCurvature (Extractor): x = 0 y = start - img_w + 1 done = False - + while (not done): if(bla[y,x]): Wr = Wr + 1 - + if ( Wr > 0 and (y == img_h-1 or x == img_w-1 or not bla[y,x]) ): if (y == img_h-1 or x == img_w-1): # Reached edge of image @@ -162,10 +164,10 @@ class MaximumCurvature (Extractor): else: pos_x_end = x - 1 pos_y_end = y - 1 - + pos_x_start = pos_x_end - Wr + 1 pos_y_start = pos_y_end - Wr + 1 - + if (pos_y_start == pos_y_end and pos_x_start == pos_x_end): d = k[pos_y_start, pos_x_start, 2] elif (pos_y_start == pos_y_end): @@ -174,23 +176,23 @@ class MaximumCurvature (Extractor): d = numpy.diag(k[pos_y_start:pos_y_end+1, pos_x_start, 2]) else: d = numpy.diag(k[pos_y_start:pos_y_end+1, pos_x_start:pos_x_end+1, 2]) - - I = numpy.argmax(d) - - pos_x_max = pos_x_start + I - pos_y_max = pos_y_start + I - + + I = numpy.argmax(d) + + pos_x_max = pos_x_start + I + pos_y_max = pos_y_start + I + Scr = k[pos_y_max,pos_x_max,2]*Wr - + Vt[pos_y_max,pos_x_max] = Vt[pos_y_max,pos_x_max] + Scr Wr = 0 - + if((x == img_w-1) or (y == img_h-1)): done = True else: x = x + 1 y = y + 1 - + # Direction: / bla = k[:,:,3] > 0 for start in range(0,img_w+img_h-1): @@ -202,7 +204,7 @@ class MaximumCurvature (Extractor): x = 0 y = img_w+img_h-start-1 done = False - + while (not done): if(bla[y,x]): Wr = Wr + 1 @@ -214,10 +216,10 @@ class MaximumCurvature (Extractor): else: pos_x_end = x - 1 pos_y_end = y + 1 - + pos_x_start = pos_x_end - Wr + 1 pos_y_start = pos_y_end + Wr - 1 - + if (pos_y_start == pos_y_end and pos_x_start == pos_x_end): d = k[pos_y_end, pos_x_start, 3] elif (pos_y_start == pos_y_end): @@ -226,19 +228,19 @@ class MaximumCurvature (Extractor): d = numpy.diag(numpy.flipud(k[pos_y_end:pos_y_start+1, pos_x_start, 3])) else: d = numpy.diag(numpy.flipud(k[pos_y_end:pos_y_start+1, pos_x_start:pos_x_end+1, 3])) - - I = numpy.argmax(d) - pos_x_max = pos_x_start + I - pos_y_max = pos_y_start - I + + I = numpy.argmax(d) + pos_x_max = pos_x_start + I + pos_y_max = pos_y_start - I Scr = k[pos_y_max,pos_x_max,3]*Wr Vt[pos_y_max,pos_x_max] = Vt[pos_y_max,pos_x_max] + Scr Wr = 0 - + if((x == img_w-1) or (y == 0)): done = True else: x = x + 1 - y = y - 1 + y = y - 1 ## Connection of vein centres Cd = numpy.zeros((img_h, img_w, 4)) @@ -248,34 +250,31 @@ class MaximumCurvature (Extractor): Cd[y,x,1] = min(numpy.amax(Vt[y+1:y+3,x]), numpy.amax(Vt[y-2:y,x])) # Vert # Cd[y,x,2] = min(numpy.amax(Vt[y-2:y,x-2:x]), numpy.amax(Vt[y+1:y+3,x+1:x+3])) # \ # Cd[y,x,3] = min(numpy.amax(Vt[y+1:y+3,x-2:x]), numpy.amax(Vt[y-2:y,x+1:x+3])) # / # - + #Veins img_veins = numpy.amax(Cd,axis=2) - + # Binarise the vein image md = numpy.median(img_veins[img_veins>0]) img_veins_bin = img_veins > md return img_veins_bin.astype(numpy.float64) - - - def __call__(self, image): + + + def __call__(self, image): """Reads the input image, extract the features based on Maximum Curvature of the fingervein image, and writes the resulting template""" - + finger_image = image[0] #Normalized image with or without histogram equalization - finger_mask = image[1] - - return self.maximum_curvature(finger_image, finger_mask) - + finger_mask = image[1] + + return self.maximum_curvature(finger_image, finger_mask) + def save_feature(self, feature, feature_file): f = bob.io.base.HDF5File(feature_file, 'w') f.set('feature', feature) - + def read_feature(self, feature_file): f = bob.io.base.HDF5File(feature_file, 'r') image = f.read('feature') - return (image) - - - \ No newline at end of file + return image diff --git a/bob/bio/vein/extractors/NormalisedCrossCorrelation.py b/bob/bio/vein/extractors/NormalisedCrossCorrelation.py index 1bb7250..75a9415 100644 --- a/bob/bio/vein/extractors/NormalisedCrossCorrelation.py +++ b/bob/bio/vein/extractors/NormalisedCrossCorrelation.py @@ -2,48 +2,44 @@ # vim: set fileencoding=utf-8 : # Pedro Tome <Pedro.Tome@idiap.ch> -import bob.core +import numpy + import bob.io.base -import numpy -from facereclib.features.Extractor import Extractor +from bob.bio.base.features.Extractor import Extractor + class NormalisedCrossCorrelation (Extractor): - - """Normalised Cross-Correlation feature extractor based on - M. Kono, H. Ueki, and S.Umemura. Near-infrared finger vein patterns for personal - identification. Appl. Opt. 41(35):7429-7436, 2002 + """Normalised Cross-Correlation feature extractor + + Based on M. Kono, H. Ueki, and S.Umemura. Near-infrared finger vein patterns + for personal identification. Appl. Opt. 41(35):7429-7436, 2002 """ - def __init__( - self, - - ): - - # call base class constructor - Extractor.__init__( - self, - - ) - - # block parameters - - def __call__(self, image, mask): - """Reads the input image, extract the features based on Normalised Cross-Correlation of the fingervein image, and writes the resulting template""" - + def __init__(self): + Extractor.__init__(self) + + + def __call__(self, image, mask): + """Reads the input image, extract the features based on Normalised + Cross-Correlation of the fingervein image, and writes the resulting + template""" + finger_image = image #Normalized image with histogram equalization - finger_mask = mask - - image_vein = finger_image*finger_mask - - #TODO - - return image_vein.astype(numpy.float64) - + finger_mask = mask + + image_vein = finger_image*finger_mask + + #TODO + + return image_vein.astype(numpy.float64) + + def save_feature(self, feature, feature_file): f = bob.io.base.HDF5File(feature_file, 'w') f.set('feature', feature) - + + def read_feature(self, feature_file): f = bob.io.base.HDF5File(feature_file, 'r') image = f.read('feature') diff --git a/bob/bio/vein/extractors/PrincipalCurvature.py b/bob/bio/vein/extractors/PrincipalCurvature.py index 88de36c..5ea46c2 100644 --- a/bob/bio/vein/extractors/PrincipalCurvature.py +++ b/bob/bio/vein/extractors/PrincipalCurvature.py @@ -4,48 +4,46 @@ import bob.io.base import numpy -import math -#from math import pi -#from mumpy import sqrt -import scipy.signal -from facereclib.features.Extractor import Extractor -from .. import utils + + +from bob.bio.base.features.Extractor import Extractor class MaximumCurvature (Extractor): - - """MiuraMax feature extractor based on - J.H. Choi, W. Song, T. Kim, S.R. Lee and H.C. Kim, Finger vein extraction using gradient normalization and principal curvature. - Proceedings on Image Processing: Machine Vision Applications II, SPIE 7251, (2009) + """MiuraMax feature extractor + + Based on J.H. Choi, W. Song, T. Kim, S.R. Lee and H.C. Kim, Finger vein + extraction using gradient normalization and principal curvature. Proceedings + on Image Processing: Machine Vision Applications II, SPIE 7251, (2009) """ - + def __init__( self, sigma = 2, # Gaussian standard deviation applied - threshold = 1.3, # Percentage of maximum used for hard thresholding - gpu = False - - ): + threshold = 1.3, # Percentage of maximum used for hard thresholding + gpu = False, + ): # call base class constructor Extractor.__init__( self, sigma = sigma, threshold = threshold, - gpu = gpu - ) - + gpu = gpu, + ) + # block parameters self.sigma = sigma self.threshold = threshold self.gpu = gpu - - - def principal_curvature(self, image, mask): - """Computes and returns the Maximum Curvature features for the given input fingervein image""" - + + + def principal_curvature(self, image, mask): + """Computes and returns the Maximum Curvature features for the given input + fingervein image""" + finger_mask = numpy.zeros(mask.shape) - finger_mask[mask == True] = 1 - + finger_mask[mask == True] = 1 + sigma = numpy.sqrt(self.sigma**2/2) gx = ut_gauss(img,sigma,1,0) @@ -55,9 +53,9 @@ class MaximumCurvature (Extractor): # Apply threshold gamma = (self.threshold/100)*max(max(Gmag)) - + indices = find(Gmag < gamma) - + gx[indices] = 0 gy[indices] = 0 @@ -65,7 +63,7 @@ class MaximumCurvature (Extractor): Gmag( find(Gmag == 0) ) = 1 # Avoid dividing by zero gx = gx/Gmag gy = gy/Gmag - + hxx = ut_gauss(gx,sigma,1,0) hxy = ut_gauss(gx,sigma,0,1) hyy = ut_gauss(gy,sigma,0,1) @@ -80,31 +78,30 @@ class MaximumCurvature (Extractor): veins = veins*finger_mask - + # Binarise the vein image by otsu md = numpy.median(img_veins[img_veins>0]) img_veins_bin = img_veins > md return img_veins_bin.astype(numpy.float64) - - - def __call__(self, image): - """Reads the input image, extract the features based on Principal Curvature of the fingervein image, and writes the resulting template""" - + + + def __call__(self, image): + """Reads the input image, extract the features based on Principal Curvature + of the fingervein image, and writes the resulting template""" + finger_image = image[0] #Normalized image with or without histogram equalization - finger_mask = image[1] - - return self.principal_curvature(finger_image, finger_mask) - + finger_mask = image[1] + + return self.principal_curvature(finger_image, finger_mask) + def save_feature(self, feature, feature_file): f = bob.io.base.HDF5File(feature_file, 'w') f.set('feature', feature) - + + def read_feature(self, feature_file): f = bob.io.base.HDF5File(feature_file, 'r') image = f.read('feature') return (image) - - - \ No newline at end of file diff --git a/bob/bio/vein/extractors/RepeatedLineTracking.py b/bob/bio/vein/extractors/RepeatedLineTracking.py index e559c8b..8dce5ec 100644 --- a/bob/bio/vein/extractors/RepeatedLineTracking.py +++ b/bob/bio/vein/extractors/RepeatedLineTracking.py @@ -2,23 +2,23 @@ # vim: set fileencoding=utf-8 : # Pedro Tome <Pedro.Tome@idiap.ch> +import numpy +import math + import bob.core import bob.io.base import bob.ip.base -import numpy -import math -#from math import pi -#from mumpy import sqrt -import scipy.signal -from facereclib.features.Extractor import Extractor -from .. import utils +from bob.bio.base.features.Extractor import Extractor + class RepeatedLineTracking (Extractor): - - """Repeated Line Tracking feature extractor based on - N. Miura, A. Nagasaka, and T. Miyatake. Feature extraction of finger vein patterns based on repeated line tracking and its application - to personal identification. Machine Vision and Applications, Vol. 15, Num. 4, pp. 194--203, 2004 + """Repeated Line Tracking feature extractor + + Based on N. Miura, A. Nagasaka, and T. Miyatake. Feature extraction of finger + vein patterns based on repeated line tracking and its application to personal + identification. Machine Vision and Applications, Vol. 15, Num. 4, pp. + 194--203, 2004 """ def __init__( @@ -26,18 +26,18 @@ class RepeatedLineTracking (Extractor): iterations = 3000, # Maximum number of iterations r = 1, # Distance between tracking point and cross section of the profile profile_w = 21, # Width of profile (Error: profile_w must be odd) - rescale = True - ): - + rescale = True, + ): + # call base class constructor Extractor.__init__( self, iterations = iterations, r = r, profile_w = profile_w, - rescale = rescale - ) - + rescale = rescale, + ) + # block parameters self.iterations = iterations self.r = r @@ -46,23 +46,24 @@ class RepeatedLineTracking (Extractor): def repeated_line_tracking(self, finger_image, mask): - """Computes and returns the MiuraMax features for the given input fingervein image""" - + """Computes and returns the MiuraMax features for the given input + fingervein image""" + #Convert image to uint8 if finger_image.dtype != numpy.uint8: finger_image = bob.core.convert(finger_image,numpy.uint8,(0,255),(0,1)) finger_mask = numpy.zeros(mask.shape) - finger_mask[mask == True] = 1 - + finger_mask[mask == True] = 1 + # Rescale image if required if self.rescale == True: - scaling_factor = 0.6 + scaling_factor = 0.6 finger_image = bob.ip.base.scale(finger_image,scaling_factor) finger_mask = bob.ip.base.scale(finger_mask,scaling_factor) #To eliminate residuals from the scalation of the binary mask finger_mask = scipy.ndimage.binary_dilation(finger_mask, structure=numpy.ones((1,1))).astype(int) - + p_lr = 0.5 # Probability of goin left or right p_ud = 0.25 # Probability of going up or down @@ -72,24 +73,24 @@ class RepeatedLineTracking (Extractor): # Check if progile w is even if (self.profile_w.__mod__(2) == 0): print ('Error: profile_w must be odd') - + ro = numpy.round(self.r*math.sqrt(2)/2) # r for oblique directions hW = (self.profile_w-1)/2 # half width for horz. and vert. directions hWo = numpy.round(hW*math.sqrt(2)/2) # half width for oblique directions - + # Omit unreachable borders finger_mask[0:self.r+hW,:] = 0 finger_mask[finger_mask.shape[0]-(self.r+hW):,:] = 0 finger_mask[:,0:self.r+hW] = 0 finger_mask[:,finger_mask.shape[1]-(self.r+hW):] = 0 - + ## Uniformly distributed starting points aux = numpy.argwhere( (finger_mask > 0) == True ) indices = numpy.random.permutation(aux) indices = indices[0:self.iterations,:] # Limit to number of iterations - ## Iterate through all starting points - for it in range(0,self.iterations): + ## Iterate through all starting points + for it in range(0,self.iterations): yc = indices[it,0] # Current tracking point, y xc = indices[it,1] # Current tracking point, x @@ -98,17 +99,17 @@ class RepeatedLineTracking (Extractor): if (numpy.random.random_sample() >= 0.5): Dlr = -1 # Going left else: - Dlr = 1 # Going right - + Dlr = 1 # Going right + # Going up or down ? if (numpy.random.random_sample() >= 0.5): Dud = -1 # Going up else: - Dud = 1 # Going down - + Dud = 1 # Going down + # Initialize locus-positition table Tc Tc = numpy.zeros(finger_image.shape, numpy.bool) - + #Dlr = -1; Dud=-1; LET OP Vl = 1 while (Vl > 0): @@ -116,27 +117,27 @@ class RepeatedLineTracking (Extractor): Nr = numpy.zeros([3,3], numpy.bool) Rnd = numpy.random.random_sample() #Rnd = 0.8 LET OP - if (Rnd < p_lr): + if (Rnd < p_lr): # Going left or right Nr[:,1+Dlr] = True - elif (Rnd >= p_lr) and (Rnd < (p_lr + p_ud)): + elif (Rnd >= p_lr) and (Rnd < (p_lr + p_ud)): # Going up or down Nr[1+Dud,:] = True - else: + else: # Going any direction Nr = numpy.ones([3,3], numpy.bool) Nr[1,1] = False - #tmp = numpy.argwhere( (~Tc[yc-2:yc+1,xc-2:xc+1] & Nr & finger_mask[yc-2:yc+1,xc-2:xc+1].astype(numpy.bool)).T.reshape(-1) == True ) + #tmp = numpy.argwhere( (~Tc[yc-2:yc+1,xc-2:xc+1] & Nr & finger_mask[yc-2:yc+1,xc-2:xc+1].astype(numpy.bool)).T.reshape(-1) == True ) tmp = numpy.argwhere( (~Tc[yc-1:yc+2,xc-1:xc+2] & Nr & finger_mask[yc-1:yc+2,xc-1:xc+2].astype(numpy.bool)).T.reshape(-1) == True ) Nc = numpy.concatenate((xc + filtermask[tmp,0],yc + filtermask[tmp,1]),axis=1) if (Nc.size==0): Vl=-1 continue - + ## Detect dark line direction near current tracking point Vdepths = numpy.zeros((Nc.shape[0],1)) # Valley depths - for i in range(0,Nc.shape[0]): - ## Horizontal or vertical + for i in range(0,Nc.shape[0]): + ## Horizontal or vertical if (Nc[i,1] == yc): # Horizontal plane yp = Nc[i,1] @@ -157,7 +158,7 @@ class RepeatedLineTracking (Extractor): # Up direction yp = Nc[i,1] - self.r Vdepths[i] = finger_image[yp, xp + hW] - 2*finger_image[yp,xp] + finger_image[yp, xp - hW] - + ## Oblique directions if ( (Nc[i,0] > xc) and (Nc[i,1] < yc) ) or ( (Nc[i,0] < xc) and (Nc[i,1] > yc) ): # Diagonal, up / @@ -184,70 +185,70 @@ class RepeatedLineTracking (Extractor): # End search of candidates index = numpy.argmax(Vdepths) #Determine best candidate # Register tracking information - Tc[yc, xc] = True + Tc[yc, xc] = True # Increase value of tracking space Tr[yc, xc] = Tr[yc, xc] + 1 # Move tracking point xc = Nc[index, 0] yc = Nc[index, 1] - + img_veins = Tr - + # Binarise the vein image md = numpy.median(img_veins[img_veins>0]) img_veins_bin = img_veins > md img_veins_bin = scipy.ndimage.binary_closing(img_veins_bin, structure=numpy.ones((2,2))).astype(int) - #import ipdb; ipdb.set_trace() + #import ipdb; ipdb.set_trace() #img_veins_bin2 = scipy.ndimage.binary_closing(img_veins_bin, structure=numpy.ones((2,2))).astype(int) #from PIL import Image - + #Image.fromarray(bob.core.convert(img_veins_bin,numpy.uint8,(0,255),(0,1))).show() #skel = self.skeletonize(img_veins_bin2) #Image.fromarray(bob.core.convert(skel,numpy.uint8,(0,255),(0,1))).show() - return img_veins_bin.astype(numpy.float64) - + return img_veins_bin.astype(numpy.float64) + + def skeletonize(self, img): import scipy.ndimage.morphology as m - h1 = numpy.array([[0, 0, 0],[0, 1, 0],[1, 1, 1]]) - m1 = numpy.array([[1, 1, 1],[0, 0, 0],[0, 0, 0]]) - h2 = numpy.array([[0, 0, 0],[1, 1, 0],[0, 1, 0]]) - m2 = numpy.array([[0, 1, 1],[0, 0, 1],[0, 0, 0]]) - hit_list = [] + h1 = numpy.array([[0, 0, 0],[0, 1, 0],[1, 1, 1]]) + m1 = numpy.array([[1, 1, 1],[0, 0, 0],[0, 0, 0]]) + h2 = numpy.array([[0, 0, 0],[1, 1, 0],[0, 1, 0]]) + m2 = numpy.array([[0, 1, 1],[0, 0, 1],[0, 0, 0]]) + hit_list = [] miss_list = [] - for k in range(4): + for k in range(4): hit_list.append(numpy.rot90(h1, k)) hit_list.append(numpy.rot90(h2, k)) miss_list.append(numpy.rot90(m1, k)) - miss_list.append(numpy.rot90(m2, k)) + miss_list.append(numpy.rot90(m2, k)) img = img.copy() while True: last = img - for hit, miss in zip(hit_list, miss_list): - hm = m.binary_hit_or_miss(img, hit, miss) - img = numpy.logical_and(img, numpy.logical_not(hm)) - if numpy.all(img == last): + for hit, miss in zip(hit_list, miss_list): + hm = m.binary_hit_or_miss(img, hit, miss) + img = numpy.logical_and(img, numpy.logical_not(hm)) + if numpy.all(img == last): break return img - def __call__(self, image): - """Reads the input image, extract the features based on Maximum Curvature of the fingervein image, and writes the resulting template""" - + def __call__(self, image): + """Reads the input image, extract the features based on Maximum Curvature + of the fingervein image, and writes the resulting template""" + finger_image = image[0] #Normalized image with or without histogram equalization - finger_mask = image[1] - - return self.repeated_line_tracking(finger_image, finger_mask) - + finger_mask = image[1] + + return self.repeated_line_tracking(finger_image, finger_mask) + def save_feature(self, feature, feature_file): f = bob.io.base.HDF5File(feature_file, 'w') f.set('feature', feature) - + def read_feature(self, feature_file): f = bob.io.base.HDF5File(feature_file, 'r') image = f.read('feature') return (image) - - \ No newline at end of file diff --git a/bob/bio/vein/extractors/WideLineDetector.py b/bob/bio/vein/extractors/WideLineDetector.py index 26d6e06..505e891 100644 --- a/bob/bio/vein/extractors/WideLineDetector.py +++ b/bob/bio/vein/extractors/WideLineDetector.py @@ -2,24 +2,23 @@ # vim: set fileencoding=utf-8 : # Pedro Tome <Pedro.Tome@idiap.ch> -import bob.core import bob.io.base import bob.ip.base import numpy -import math -#from math import pi -#from mumpy import sqrt -import scipy.signal +import scipy import scipy.misc -from facereclib.features.Extractor import Extractor -from .. import utils + +from bob.bio.base.features.Extractor import Extractor + class WideLineDetector (Extractor): - - """Wide Line Detector feature extractor based on - B. Huang, Y. Dai, R. Li, D. Tang and W. Li. Finger-vein authentication based on wide line detector and pattern - normalization, Proceedings on 20th International Conference on Pattern Recognition (ICPR), 2010 + """Wide Line Detector feature extractor + + Based on B. Huang, Y. Dai, R. Li, D. Tang and W. Li. Finger-vein + authentication based on wide line detector and pattern normalization, + Proceedings on 20th International Conference on Pattern Recognition (ICPR), + 2010. """ def __init__( @@ -27,9 +26,8 @@ class WideLineDetector (Extractor): radius = 5, #Radius of the circular neighbourhood region threshold = 1, #Neigborhood threshold g = 41, #Sum of neigbourhood threshold - rescale = True - - ): + rescale = True, + ): # call base class constructor Extractor.__init__( @@ -37,9 +35,9 @@ class WideLineDetector (Extractor): radius = radius, threshold = threshold, g = g, - rescale = rescale - ) - + rescale = rescale, + ) + # block parameters self.radius = radius self.threshold = threshold @@ -48,13 +46,14 @@ class WideLineDetector (Extractor): def wide_line_detector(self, finger_image, mask): - """Computes and returns the Wide Line Detector features for the given input fingervein image""" - - finger_image = finger_image.astype(numpy.float64) + """Computes and returns the Wide Line Detector features for the given input + fingervein image""" + + finger_image = finger_image.astype(numpy.float64) finger_mask = numpy.zeros(mask.shape) - finger_mask[mask == True] = 1 - + finger_mask[mask == True] = 1 + # Rescale image if required if self.rescale == True: scaling_factor = 0.24 @@ -68,40 +67,42 @@ class WideLineDetector (Extractor): x = numpy.arange((-1)*self.radius, self.radius+1) y = numpy.arange((-1)*self.radius, self.radius+1) X, Y = numpy.meshgrid(x,y) - + N = X**2 + Y**2 <= self.radius**2 # Neighbourhood mask - - img_h, img_w = finger_image.shape #Image height and width - + + img_h, img_w = finger_image.shape #Image height and width + veins = numpy.zeros(finger_image.shape) - - for y in range(self.radius,img_h-self.radius): - for x in range(self.radius,img_w-self.radius): + + for y in range(self.radius,img_h-self.radius): + for x in range(self.radius,img_w-self.radius): s=((finger_image[y-self.radius:y+self.radius+1,x-self.radius:x+self.radius+1] - finger_image[y,x]) <= self.threshold) m = (s*N).sum() veins[y,x] = float(m <= self.g) - + # Mask the vein image with the finger region img_veins_bin = veins*finger_mask - + return img_veins_bin - - - def __call__(self, image): - """Reads the input image, extract the features based on Wide Line Detector of the fingervein image, and writes the resulting template""" - #For debugging - + + + def __call__(self, image): + """Reads the input image, extract the features based on Wide Line Detector + of the fingervein image, and writes the resulting template""" + #For debugging + finger_image = image[0] #Normalized image with histogram equalization - finger_mask = image[1] - - return self.wide_line_detector(finger_image, finger_mask) - - + finger_mask = image[1] + + return self.wide_line_detector(finger_image, finger_mask) + + def save_feature(self, feature, feature_file): f = bob.io.base.HDF5File(feature_file, 'w') f.set('feature', feature) - + + def read_feature(self, feature_file): f = bob.io.base.HDF5File(feature_file, 'r') image = f.read('feature') - return (image) \ No newline at end of file + return (image) diff --git a/bob/bio/vein/extractors/__init__.py b/bob/bio/vein/extractors/__init__.py index 596d0be..e69de29 100644 --- a/bob/bio/vein/extractors/__init__.py +++ b/bob/bio/vein/extractors/__init__.py @@ -1,10 +0,0 @@ -#!/usr/bin/env python -# vim: set fileencoding=utf-8 : - -"""Feature Extraction""" - -from NormalisedCrossCorrelation import NormalisedCrossCorrelation -from MaximumCurvature import MaximumCurvature -from RepeatedLineTracking import RepeatedLineTracking -from WideLineDetector import WideLineDetector -from LocalBinaryPatterns import LocalBinaryPatterns diff --git a/bob/bio/vein/utils.py b/bob/bio/vein/utils.py index 3c75c1a..7e84f45 100644 --- a/bob/bio/vein/utils.py +++ b/bob/bio/vein/utils.py @@ -1,9 +1,7 @@ #!/usr/bin/env python # vim: set fileencoding=utf-8 : -# Pedro Tome <Pedro.Tome@idiap.ch> -import numpy.random -import scipy.ndimage +import numpy import scipy.signal import bob.ip.base import bob.sp @@ -12,23 +10,21 @@ import bob.core def imfilter(a, b, gpu=False, conv=True): """imfilter function based on MATLAB implementation.""" + if (a.dtype == numpy.uint8): - a= bob.core.convert(a,numpy.float64,(0,1)) + a= bob.core.convert(a,numpy.float64,(0,1)) M, N = a.shape if conv == True: - b = bob.ip.base.rotate(b, 180) + b = bob.ip.base.rotate(b, 180) shape = numpy.array((0,0)) shape[0] = a.shape[0] + b.shape[0] - 1 shape[1] = a.shape[1] + b.shape[1] - 1 a_ext = numpy.ndarray(shape=shape, dtype=numpy.float64) bob.sp.extrapolate_nearest(a, a_ext) - + if gpu == True: import xbob.cusp return xbob.cusp.conv(a_ext, b) else: return scipy.signal.convolve2d(a_ext, b, 'valid') #return = self.convfft(a_ext, b) - - - -- GitLab