Commit 21816276 authored by André Anjos's avatar André Anjos 💬
Browse files

More clean-up and documentation

parent 39f46cf7
Pipeline #2877 canceled with stage
in 2 minutes and 14 seconds
......@@ -18,7 +18,6 @@ class HammingDistance (Algorithm):
# some similarity functions might need a GaborWaveletTransform class, so we have to provide the parameters here as well...
ch = 8, # Maximum search displacement in y-direction
cw = 5, # Maximum search displacement in x-direction
gpu = False,
):
# call base class constructor
......@@ -34,7 +33,6 @@ class HammingDistance (Algorithm):
self.ch = ch
self.cw = cw
self.gpu = gpu
def enroll(self, enroll_features):
"""Enrolls the model by computing an average graph for each model"""
......@@ -54,11 +52,7 @@ class HammingDistance (Algorithm):
bob.ip.base.rotate(crop_R, rotate_R, 180)
#FFT for scoring!
#Nm=bob.sp.ifft(bob.sp.fft(I)*bob.sp.fft(rotate_R))
if self.gpu == True:
import xbob.cusp
Nm = xbob.cusp.conv(I, rotate_R);
else:
Nm = scipy.signal.convolve2d(I, rotate_R, 'valid');
Nm = scipy.signal.convolve2d(I, rotate_R, 'valid');
t0, s0 = numpy.unravel_index(Nm.argmax(), Nm.shape)
Nmm = Nm[t0,s0]
#Nmm = Nm.max()
......
......@@ -24,7 +24,6 @@ class MiuraMatch (Algorithm):
# some similarity functions might need a GaborWaveletTransform class, so we have to provide the parameters here as well...
ch = 8, # Maximum search displacement in y-direction
cw = 5, # Maximum search displacement in x-direction
gpu = False,
):
# call base class constructor
......@@ -40,7 +39,6 @@ class MiuraMatch (Algorithm):
self.ch = ch
self.cw = cw
self.gpu = gpu
def enroll(self, enroll_features):
......@@ -95,13 +93,8 @@ class MiuraMatch (Algorithm):
bob.ip.base.rotate(crop_R, rotate_R, 180)
#FFT for scoring!
#Nm=bob.sp.ifft(bob.sp.fft(I)*bob.sp.fft(rotate_R))
if self.gpu == True:
Nm = self.convfft(I, rotate_R)
#import xbob.cusp
#Nm = xbob.cusp.conv(I, rotate_R);
else:
Nm = self.convfft(I, rotate_R)
#Nm2 = scipy.signal.convolve2d(I, rotate_R, 'valid')
Nm = self.convfft(I, rotate_R)
#Nm2 = scipy.signal.convolve2d(I, rotate_R, 'valid')
t0, s0 = numpy.unravel_index(Nm.argmax(), Nm.shape)
Nmm = Nm[t0,s0]
......
......@@ -3,9 +3,6 @@
from ..algorithms import MiuraMatch
huangwl_tool = MiuraMatch(ch=18, cw=28)
huangwl_gpu_tool = MiuraMatch(ch=18, cw=28, gpu=True)
miuramax_tool = MiuraMatch(ch=80, cw=90)
miuramax_gpu_tool = MiuraMatch(ch=80, cw=90, gpu=True)
miurarlt_tool = MiuraMatch(ch=65, cw=55)
miurarlt_gpu_tool = MiuraMatch(ch=65, cw=55, gpu=True)
huangwl = MiuraMatch(ch=18, cw=28)
miuramax = MiuraMatch(ch=80, cw=90)
miurarlt = MiuraMatch(ch=65, cw=55)
......@@ -2,15 +2,4 @@
# vim: set fileencoding=utf-8 :
from ...extractors import MaximumCurvature
# Parameters
SIGMA_DERIVATES = 5 #Sigma used for determining derivatives
GPU_ACCELERATION = False
#Define feature extractor
feature_extractor = MaximumCurvature(
sigma = SIGMA_DERIVATES,
gpu = GPU_ACCELERATION,
)
feature_extractor = MaximumCurvature(sigma = 5)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
import bob.bio.base.grid import Grid
grid = Grid(
training_queue = '8G',
number_of_preprocessing_jobs = 32,
preprocessing_queue = '4G-io-big',
number_of_extraction_jobs = 32,
extraction_queue = '4G-io-big',
number_of_projection_jobs = 32,
projection_queue = {},
number_of_enrollment_jobs = 32,
enrollment_queue = {},
number_of_scoring_jobs = 32,
scoring_queue = {'queue': 'q_gpu'},
)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
import bob.bio.base.grid import Grid
grid = Grid(
training_queue = '8G',
number_of_preprocessing_jobs = 32,
preprocessing_queue = '4G-io-big',
number_of_extraction_jobs = 32,
extraction_queue = '4G-io-big',
number_of_projection_jobs = 32,
projection_queue = {},
number_of_enrollment_jobs = 32,
enrollment_queue = {},
number_of_scoring_jobs = 32,
scoring_queue = '4G-io-big',
)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
import bob.bio.base.grid import Grid
grid = Grid(
training_queue = '8G',
number_of_preprocessing_jobs = 1000,
preprocessing_queue = {},
number_of_extraction_jobs = 1000,
extraction_queue = {},
number_of_projection_jobs = 1000,
projection_queue = {},
number_of_enrollment_jobs = 100,
enrollment_queue = '2G',
number_of_scoring_jobs = 1500,
scoring_queue = {'queue': 'q_gpu'},
)
......@@ -10,20 +10,15 @@ CONTOUR_MASK_WIDTH = 40 # Width of the mask
PADDING_OFFSET = 5
PADDING_THRESHOLD = 0.2 #Threshold for padding black zones
PREPROCESSING = None
FINGERCONTOUR = 'leemaskMod' # Options: 'leemaskMatlab', 'konomask'
POSTPROCESSING = 'CircGabor' # Options: None, 'HE', 'HFE', 'CircGabor'
GPU_ACCELERATION = False
# define the preprocessor
preprocessor = FingerCrop(
mask_h=CONTOUR_MASK_HEIGHT,
mask_w=CONTOUR_MASK_WIDTH,
padding_offset=PADDING_OFFSET,
padding_threshold=PADDING_THRESHOLD,
preprocessing=PREPROCESSING,
fingercontour=FINGERCONTOUR,
postprocessing=POSTPROCESSING,
gpu=GPU_ACCELERATION,
)
......@@ -14,16 +14,12 @@ PREPROCESSING = None
FINGERCONTOUR = 'leemaskMod' # Options: 'leemaskMatlab', 'konomask'
POSTPROCESSING = 'HE' # Options: None, 'HE', 'HFE', 'CircGabor'
GPU_ACCELERATION = False
# define the preprocessor
preprocessor = FingerCrop(
mask_h=CONTOUR_MASK_HEIGHT,
mask_w=CONTOUR_MASK_WIDTH,
padding_offset=PADDING_OFFSET,
padding_threshold=PADDING_THRESHOLD,
preprocessing=PREPROCESSING,
fingercontour=FINGERCONTOUR,
postprocessing=POSTPROCESSING,
gpu=GPU_ACCELERATION
)
......@@ -15,8 +15,6 @@ PREPROCESSING = None
FINGERCONTOUR = 'leemaskMod' # Options: 'leemaskMatlab', 'konomask'
POSTPROCESSING = 'HFE' # Options: None, 'HE', 'HFE', 'CircGabor'
GPU_ACCELERATION = False
# define the preprocessor
preprocessor = FingerCrop(
mask_h=CONTOUR_MASK_HEIGHT,
......@@ -26,5 +24,4 @@ preprocessor = FingerCrop(
preprocessing=PREPROCESSING,
fingercontour=FINGERCONTOUR,
postprocessing=POSTPROCESSING,
gpu=GPU_ACCELERATION,
)
......@@ -11,20 +11,15 @@ CONTOUR_MASK_WIDTH = 40 # Width of the mask
PADDING_OFFSET = 5
PADDING_THRESHOLD = 0.2 #Threshold for padding black zones
PREPROCESSING = None
FINGERCONTOUR = 'leemaskMod' # Options: 'leemaskMod', leemaskMatlab', 'konomask'
POSTPROCESSING = None # Options: None, 'HE', 'HFE', 'CircGabor'
GPU_ACCELERATION = False
# define the preprocessor
preprocessor = FingerCrop(
mask_h=CONTOUR_MASK_HEIGHT,
mask_w=CONTOUR_MASK_WIDTH,
padding_offset=PADDING_OFFSET,
padding_threshold=PADDING_THRESHOLD,
preprocessing=PREPROCESSING,
fingercontour=FINGERCONTOUR,
postprocessing=POSTPROCESSING,
gpu=GPU_ACCELERATION
)
......@@ -18,25 +18,17 @@ class MaximumCurvature (Extractor):
Based on N. Miura, A. Nagasaka, and T. Miyatake, Extraction of Finger-Vein
Pattern Using Maximum Curvature Points in Image Profiles. Proceedings on IAPR
conference on machine vision applications, 9 (2005), pp. 347--350
"""
Parameters:
sigma (int, Optional): Sigma used for determining derivatives
def __init__(
self,
sigma = 5, #Sigma used for determining derivatives
gpu = False
):
"""
# call base class constructor
Extractor.__init__(
self,
sigma = sigma,
gpu = gpu
)
# block parameters
def __init__(self, sigma = 5):
Extractor.__init__(self, sigma = sigma)
self.sigma = sigma
self.gpu = gpu
def maximum_curvature(self, image, mask):
......@@ -65,11 +57,11 @@ class MaximumCurvature (Extractor):
# Do the actual filtering
fx = utils.imfilter(image, hx, self.gpu, conv=False)
fxx = utils.imfilter(image, hxx, self.gpu, conv=False)
fy = utils.imfilter(image, hy, self.gpu, conv=False)
fyy = utils.imfilter(image, hyy, self.gpu, conv=False)
fxy = utils.imfilter(image, hxy, self.gpu, conv=False)
fx = utils.imfilter(image, hx, conv=False)
fxx = utils.imfilter(image, hxx, conv=False)
fy = utils.imfilter(image, hy, conv=False)
fyy = utils.imfilter(image, hyy, conv=False)
fxy = utils.imfilter(image, hxy, conv=False)
f1 = 0.5*numpy.sqrt(2)*(fx + fy) # \ #
f2 = 0.5*numpy.sqrt(2)*(fx - fy) # / #
......@@ -263,7 +255,7 @@ class MaximumCurvature (Extractor):
def __call__(self, image):
"""Reads the input image, extract the features based on Maximum Curvature of the fingervein image, and writes the resulting template"""
finger_image = image[0] #Normalized image with or without histogram equalization
finger_image = image[0] #Normalized image with or without histogram equalization
finger_mask = image[1]
return self.maximum_curvature(finger_image, finger_mask)
......@@ -20,7 +20,6 @@ class PrincipalCurvature (Extractor):
self,
sigma = 2, # Gaussian standard deviation applied
threshold = 1.3, # Percentage of maximum used for hard thresholding
gpu = False,
):
# call base class constructor
......@@ -28,13 +27,11 @@ class PrincipalCurvature (Extractor):
self,
sigma = sigma,
threshold = threshold,
gpu = gpu,
)
# block parameters
self.sigma = sigma
self.threshold = threshold
self.gpu = gpu
def principal_curvature(self, image, mask):
......
......@@ -3,6 +3,7 @@ from .NormalisedCrossCorrelation import NormalisedCrossCorrelation
from .PrincipalCurvature import PrincipalCurvature
from .RepeatedLineTracking import RepeatedLineTracking
from .WideLineDetector import WideLineDetector
from .MaximumCurvature import MaximumCurvature
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
......@@ -17,14 +17,41 @@ from .. import utils
class FingerCrop (Preprocessor):
"""Fingervein mask
"""Extracts the mask and pre-processes fingervein images
Based on the implementation: E.C. Lee, H.C. Lee and K.R. Park. Finger vein
recognition using minutia-based alignment and local binary pattern-based
feature extraction. International Journal of Imaging Systems and
Technology. Vol. 19, No. 3, pp. 175-178, September 2009.
Parameters:
mask_h (int, Optional): Height of contour mask in pixels
mask_w (int, Optional): Width of the contour mask in pixels
padding_offset (int, Optional):
padding_threshold (float, Optional):
fingercontour (str, Optional): Select between three finger contour
implementations: leemaskMod, leemaskMatlab or konomask. (From Pedro Tome:
the option ``leemaskMatlab`` was just implemented for testing purposes so
we could compare with MAT files generated from Matlab code of other
authors. He only used it with the UTFVP database, using ``leemaskMod``
with that database yields slight worse results.)
postprocessing (str, Optional): Select between ``HE`` (histogram
equalization, as with :py:func:`bob.ip.base.histogram_equalization`),
``HFE`` (high-frequency emphasis filter, with hard-coded parameters - see
implementation) or ``CircGabor`` (circular Gabor filter with band-width
1.12 octaves and standard deviation of 5 pixels (this is hard-coded). By
default, no postprocessing is applied on the image.
"""
def __init__(
self,
mask_h = 4, # Height of the mask
......@@ -33,56 +60,33 @@ class FingerCrop (Preprocessor):
padding_offset = 5, #Always the same
padding_threshold = 0.2, #0 for UTFVP database (high quality), 0.2 for VERA database (low quality)
preprocessing = None,
fingercontour = 'leemaskMatlab',
fingercontour = 'leemaskMod',
postprocessing = None,
gpu = False,
color_channel = 'gray', # the color channel to extract from colored images, if colored images are in the database
**kwargs # parameters to be written in the __str__ method
**kwargs
):
"""Parameters of the constructor of this preprocessor:
color_channel
In case of color images, which color channel should be used?
mask_h
Height of the cropped mask of a fingervein image
mask_w
Height of the cropped mask of a fingervein image
"""
# call base class constructor
Preprocessor.__init__(
self,
Preprocessor.__init__(self,
mask_h = mask_h,
mask_w = mask_w,
padding_offset = padding_offset,
padding_threshold = padding_threshold,
preprocessing = preprocessing,
fingercontour = fingercontour,
postprocessing = postprocessing,
gpu = gpu,
color_channel = color_channel,
**kwargs
)
)
self.mask_h = mask_h
self.mask_w = mask_w
self.preprocessing = preprocessing
self.fingercontour = fingercontour
self.postprocessing = postprocessing
self.padding_offset = padding_offset
self.padding_threshold = padding_threshold
self.gpu = gpu
self.color_channel = color_channel
def __konomask__(self, image, sigma):
......@@ -109,7 +113,7 @@ class FingerCrop (Preprocessor):
hy = (-Y/(2*math.pi*sigma**4))*numpy.exp(-(X**2 + Y**2)/(2*sigma**2))
# Filter the image with the directional kernel
fy = utils.imfilter(image, hy, self.gpu, conv=False)
fy = utils.imfilter(image, hy, conv=False)
# Upper part of filtred image
img_filt_up = fy[0:half_img_h,:]
......@@ -155,7 +159,7 @@ class FingerCrop (Preprocessor):
mask[0:self.mask_h/2,:] = -1
mask[self.mask_h/2:,:] = 1
img_filt = utils.imfilter(image, mask, self.gpu, conv=True)
img_filt = utils.imfilter(image, mask, conv=True)
# Upper part of filtred image
img_filt_up = img_filt[0:half_img_h-1,:]
......@@ -165,7 +169,7 @@ class FingerCrop (Preprocessor):
img_filt_lo = img_filt[half_img_h-1:,:]
y_lo = img_filt_lo.argmin(axis=0)
img_filt = utils.imfilter(image, mask.T, self.gpu, conv=True)
img_filt = utils.imfilter(image, mask.T, conv=True)
# Left part of filtered image
img_filt_lf = img_filt[:,0:half_img_w]
......@@ -215,7 +219,7 @@ class FingerCrop (Preprocessor):
mask[0:self.mask_h/2,:] = -1
mask[self.mask_h/2:,:] = 1
img_filt = utils.imfilter(image, mask, self.gpu, conv=True)
img_filt = utils.imfilter(image, mask, conv=True)
# Upper part of filtred image
img_filt_up = img_filt[0:numpy.floor(img_h/2),:]
......@@ -228,6 +232,7 @@ class FingerCrop (Preprocessor):
for i in range(0,y_up.size):
img_filt[y_up[i]:y_lo[i]+img_filt_lo.shape[0],i]=1
import ipdb; ipdb.set_trace()
finger_mask = numpy.ndarray(image.shape, numpy.bool)
finger_mask[:,:] = False
......@@ -321,30 +326,51 @@ class FingerCrop (Preprocessor):
return bob.core.convert(image_new,numpy.uint8,(0,255),(0,1))
def __CLAHE__(self, image):
""" Contrast-limited adaptive histogram equalization (CLAHE).
"""
#TODO
return true
def __HE__(self, image):
"""Applies histogram equalization on the input image
def __HE__(self, image):
#Umbralization based on the pixels non zero
imageEnhance = numpy.zeros(image.shape)
imageEnhance = imageEnhance.astype(numpy.uint8)
Parameters:
image (numpy.ndarray): raw image to be filtered, as 2D array of
unsigned 8-bit integers
Returns:
bob.ip.base.histogram_equalization(image, imageEnhance)
numpy.ndarray: normalized image as a 2D array of unsigned 8-bit
integers
return imageEnhance
"""
#Umbralization based on the pixels non zero
retval = numpy.zeros(image.shape, dtype=numpy.uint8)
bob.ip.base.histogram_equalization(image, retval)
return retval
def __circularGabor__(self, image, bw, sigma):
""" CIRCULARGABOR Construct a circular gabor filter
"""Applies a circular gabor filter on the input image, with parameters
Parameters:
bw = bandwidth, (1.12 octave)
sigma = standard deviation, (5 pixels)
image (numpy.ndarray): raw image to be filtered, as 2D array of
unsigned 8-bit integers
bw (float): bandwidth (1.12 octave)
sigma (int): standard deviation (5 pixels)
Returns:
numpy.ndarray: normalized image as a 2D array of unsigned 8-bit
integers
"""
#Convert image to doubles
# Converts image to doubles
image_new = bob.core.convert(image,numpy.float64,(0,1),(0,255))
img_h, img_w = image_new.shape
......@@ -352,10 +378,9 @@ class FingerCrop (Preprocessor):
sz = numpy.fix(8*numpy.max([sigma,sigma]))
if numpy.mod(sz,2) == 0:
sz = sz+1
if numpy.mod(sz,2) == 0: sz = sz+1
#Construct filter kernel
#Constructs filter kernel
winsize = numpy.fix(sz/2)
x = numpy.arange(-winsize, winsize+1)
......@@ -369,26 +394,26 @@ class FingerCrop (Preprocessor):
# Without normalisation
#gaborfilter = numpy.exp(-0.5*(X**2/sigma**2+Y**2/sigma**2))*numpy.cos(2*math.pi*fc*numpy.sqrt(X**2+Y**2))
imageEnhance = utils.imfilter(image, gaborfilter, self.gpu, conv=False)
imageEnhance = utils.imfilter(image, gaborfilter, conv=False)
imageEnhance = numpy.abs(imageEnhance)
imageEnhance = bob.core.convert(imageEnhance,numpy.uint8,(0,255),(imageEnhance.min(),imageEnhance.max()))
return imageEnhance
return bob.core.convert(imageEnhance,numpy.uint8, (0,255),
(imageEnhance.min(),imageEnhance.max()))
def __HFE__(self,image):
""" High Frequency Enphasis Filtering (HFE)
""" High Frequency Emphasis Filtering (HFE)
"""
### Parameters
### Hard-coded parameters for the HFE filtering
D0 = 0.025
a = 0.6
b = 1.2
n = 2.0
#Convert image to doubles
image_new = bob.core.convert(image,numpy.float64,(0,1),(0,255))
# converts image to doubles
image_new = bob.core.convert(image,numpy.float64, (0,1), (0,255))
img_h, img_w = image_new.shape
# DFT
......@@ -399,123 +424,67 @@ class FingerCrop (Preprocessor):
col = numpy.arange(1,img_h+1)
y = (numpy.tile(col,(img_w,1)).T - (numpy.fix(img_h/2)+1))/img_h
#D is the distance from point (u,v) to the centre of the frequency rectangle.
# D is the distance from point (u,v) to the centre of the
# frequency rectangle.
radius = numpy.sqrt(x**2 + y**2)
f = a + b / (1.0 + (D0 / radius)**(2*n))
Ffreq = Ffreq * f
#Inverse DFT
imageEnhance = bob.sp.ifft(bob.sp.ifftshift(Ffreq))