Commit 21816276 authored by André Anjos's avatar André Anjos

More clean-up and documentation

parent 39f46cf7
Pipeline #2877 canceled with stage
in 2 minutes and 14 seconds
......@@ -18,7 +18,6 @@ class HammingDistance (Algorithm):
# some similarity functions might need a GaborWaveletTransform class, so we have to provide the parameters here as well...
ch = 8, # Maximum search displacement in y-direction
cw = 5, # Maximum search displacement in x-direction
gpu = False,
):
# call base class constructor
......@@ -34,7 +33,6 @@ class HammingDistance (Algorithm):
self.ch = ch
self.cw = cw
self.gpu = gpu
def enroll(self, enroll_features):
"""Enrolls the model by computing an average graph for each model"""
......@@ -54,11 +52,7 @@ class HammingDistance (Algorithm):
bob.ip.base.rotate(crop_R, rotate_R, 180)
#FFT for scoring!
#Nm=bob.sp.ifft(bob.sp.fft(I)*bob.sp.fft(rotate_R))
if self.gpu == True:
import xbob.cusp
Nm = xbob.cusp.conv(I, rotate_R);
else:
Nm = scipy.signal.convolve2d(I, rotate_R, 'valid');
Nm = scipy.signal.convolve2d(I, rotate_R, 'valid');
t0, s0 = numpy.unravel_index(Nm.argmax(), Nm.shape)
Nmm = Nm[t0,s0]
#Nmm = Nm.max()
......
......@@ -24,7 +24,6 @@ class MiuraMatch (Algorithm):
# some similarity functions might need a GaborWaveletTransform class, so we have to provide the parameters here as well...
ch = 8, # Maximum search displacement in y-direction
cw = 5, # Maximum search displacement in x-direction
gpu = False,
):
# call base class constructor
......@@ -40,7 +39,6 @@ class MiuraMatch (Algorithm):
self.ch = ch
self.cw = cw
self.gpu = gpu
def enroll(self, enroll_features):
......@@ -95,13 +93,8 @@ class MiuraMatch (Algorithm):
bob.ip.base.rotate(crop_R, rotate_R, 180)
#FFT for scoring!
#Nm=bob.sp.ifft(bob.sp.fft(I)*bob.sp.fft(rotate_R))
if self.gpu == True:
Nm = self.convfft(I, rotate_R)
#import xbob.cusp
#Nm = xbob.cusp.conv(I, rotate_R);
else:
Nm = self.convfft(I, rotate_R)
#Nm2 = scipy.signal.convolve2d(I, rotate_R, 'valid')
Nm = self.convfft(I, rotate_R)
#Nm2 = scipy.signal.convolve2d(I, rotate_R, 'valid')
t0, s0 = numpy.unravel_index(Nm.argmax(), Nm.shape)
Nmm = Nm[t0,s0]
......
......@@ -3,9 +3,6 @@
from ..algorithms import MiuraMatch
huangwl_tool = MiuraMatch(ch=18, cw=28)
huangwl_gpu_tool = MiuraMatch(ch=18, cw=28, gpu=True)
miuramax_tool = MiuraMatch(ch=80, cw=90)
miuramax_gpu_tool = MiuraMatch(ch=80, cw=90, gpu=True)
miurarlt_tool = MiuraMatch(ch=65, cw=55)
miurarlt_gpu_tool = MiuraMatch(ch=65, cw=55, gpu=True)
huangwl = MiuraMatch(ch=18, cw=28)
miuramax = MiuraMatch(ch=80, cw=90)
miurarlt = MiuraMatch(ch=65, cw=55)
......@@ -2,15 +2,4 @@
# vim: set fileencoding=utf-8 :
from ...extractors import MaximumCurvature
# Parameters
SIGMA_DERIVATES = 5 #Sigma used for determining derivatives
GPU_ACCELERATION = False
#Define feature extractor
feature_extractor = MaximumCurvature(
sigma = SIGMA_DERIVATES,
gpu = GPU_ACCELERATION,
)
feature_extractor = MaximumCurvature(sigma = 5)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
import bob.bio.base.grid import Grid
grid = Grid(
training_queue = '8G',
number_of_preprocessing_jobs = 32,
preprocessing_queue = '4G-io-big',
number_of_extraction_jobs = 32,
extraction_queue = '4G-io-big',
number_of_projection_jobs = 32,
projection_queue = {},
number_of_enrollment_jobs = 32,
enrollment_queue = {},
number_of_scoring_jobs = 32,
scoring_queue = {'queue': 'q_gpu'},
)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
import bob.bio.base.grid import Grid
grid = Grid(
training_queue = '8G',
number_of_preprocessing_jobs = 32,
preprocessing_queue = '4G-io-big',
number_of_extraction_jobs = 32,
extraction_queue = '4G-io-big',
number_of_projection_jobs = 32,
projection_queue = {},
number_of_enrollment_jobs = 32,
enrollment_queue = {},
number_of_scoring_jobs = 32,
scoring_queue = '4G-io-big',
)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
import bob.bio.base.grid import Grid
grid = Grid(
training_queue = '8G',
number_of_preprocessing_jobs = 1000,
preprocessing_queue = {},
number_of_extraction_jobs = 1000,
extraction_queue = {},
number_of_projection_jobs = 1000,
projection_queue = {},
number_of_enrollment_jobs = 100,
enrollment_queue = '2G',
number_of_scoring_jobs = 1500,
scoring_queue = {'queue': 'q_gpu'},
)
......@@ -10,20 +10,15 @@ CONTOUR_MASK_WIDTH = 40 # Width of the mask
PADDING_OFFSET = 5
PADDING_THRESHOLD = 0.2 #Threshold for padding black zones
PREPROCESSING = None
FINGERCONTOUR = 'leemaskMod' # Options: 'leemaskMatlab', 'konomask'
POSTPROCESSING = 'CircGabor' # Options: None, 'HE', 'HFE', 'CircGabor'
GPU_ACCELERATION = False
# define the preprocessor
preprocessor = FingerCrop(
mask_h=CONTOUR_MASK_HEIGHT,
mask_w=CONTOUR_MASK_WIDTH,
padding_offset=PADDING_OFFSET,
padding_threshold=PADDING_THRESHOLD,
preprocessing=PREPROCESSING,
fingercontour=FINGERCONTOUR,
postprocessing=POSTPROCESSING,
gpu=GPU_ACCELERATION,
)
......@@ -14,16 +14,12 @@ PREPROCESSING = None
FINGERCONTOUR = 'leemaskMod' # Options: 'leemaskMatlab', 'konomask'
POSTPROCESSING = 'HE' # Options: None, 'HE', 'HFE', 'CircGabor'
GPU_ACCELERATION = False
# define the preprocessor
preprocessor = FingerCrop(
mask_h=CONTOUR_MASK_HEIGHT,
mask_w=CONTOUR_MASK_WIDTH,
padding_offset=PADDING_OFFSET,
padding_threshold=PADDING_THRESHOLD,
preprocessing=PREPROCESSING,
fingercontour=FINGERCONTOUR,
postprocessing=POSTPROCESSING,
gpu=GPU_ACCELERATION
)
......@@ -15,8 +15,6 @@ PREPROCESSING = None
FINGERCONTOUR = 'leemaskMod' # Options: 'leemaskMatlab', 'konomask'
POSTPROCESSING = 'HFE' # Options: None, 'HE', 'HFE', 'CircGabor'
GPU_ACCELERATION = False
# define the preprocessor
preprocessor = FingerCrop(
mask_h=CONTOUR_MASK_HEIGHT,
......@@ -26,5 +24,4 @@ preprocessor = FingerCrop(
preprocessing=PREPROCESSING,
fingercontour=FINGERCONTOUR,
postprocessing=POSTPROCESSING,
gpu=GPU_ACCELERATION,
)
......@@ -11,20 +11,15 @@ CONTOUR_MASK_WIDTH = 40 # Width of the mask
PADDING_OFFSET = 5
PADDING_THRESHOLD = 0.2 #Threshold for padding black zones
PREPROCESSING = None
FINGERCONTOUR = 'leemaskMod' # Options: 'leemaskMod', leemaskMatlab', 'konomask'
POSTPROCESSING = None # Options: None, 'HE', 'HFE', 'CircGabor'
GPU_ACCELERATION = False
# define the preprocessor
preprocessor = FingerCrop(
mask_h=CONTOUR_MASK_HEIGHT,
mask_w=CONTOUR_MASK_WIDTH,
padding_offset=PADDING_OFFSET,
padding_threshold=PADDING_THRESHOLD,
preprocessing=PREPROCESSING,
fingercontour=FINGERCONTOUR,
postprocessing=POSTPROCESSING,
gpu=GPU_ACCELERATION
)
......@@ -18,25 +18,17 @@ class MaximumCurvature (Extractor):
Based on N. Miura, A. Nagasaka, and T. Miyatake, Extraction of Finger-Vein
Pattern Using Maximum Curvature Points in Image Profiles. Proceedings on IAPR
conference on machine vision applications, 9 (2005), pp. 347--350
"""
Parameters:
sigma (int, Optional): Sigma used for determining derivatives
def __init__(
self,
sigma = 5, #Sigma used for determining derivatives
gpu = False
):
"""
# call base class constructor
Extractor.__init__(
self,
sigma = sigma,
gpu = gpu
)
# block parameters
def __init__(self, sigma = 5):
Extractor.__init__(self, sigma = sigma)
self.sigma = sigma
self.gpu = gpu
def maximum_curvature(self, image, mask):
......@@ -65,11 +57,11 @@ class MaximumCurvature (Extractor):
# Do the actual filtering
fx = utils.imfilter(image, hx, self.gpu, conv=False)
fxx = utils.imfilter(image, hxx, self.gpu, conv=False)
fy = utils.imfilter(image, hy, self.gpu, conv=False)
fyy = utils.imfilter(image, hyy, self.gpu, conv=False)
fxy = utils.imfilter(image, hxy, self.gpu, conv=False)
fx = utils.imfilter(image, hx, conv=False)
fxx = utils.imfilter(image, hxx, conv=False)
fy = utils.imfilter(image, hy, conv=False)
fyy = utils.imfilter(image, hyy, conv=False)
fxy = utils.imfilter(image, hxy, conv=False)
f1 = 0.5*numpy.sqrt(2)*(fx + fy) # \ #
f2 = 0.5*numpy.sqrt(2)*(fx - fy) # / #
......@@ -263,7 +255,7 @@ class MaximumCurvature (Extractor):
def __call__(self, image):
"""Reads the input image, extract the features based on Maximum Curvature of the fingervein image, and writes the resulting template"""
finger_image = image[0] #Normalized image with or without histogram equalization
finger_image = image[0] #Normalized image with or without histogram equalization
finger_mask = image[1]
return self.maximum_curvature(finger_image, finger_mask)
......@@ -20,7 +20,6 @@ class PrincipalCurvature (Extractor):
self,
sigma = 2, # Gaussian standard deviation applied
threshold = 1.3, # Percentage of maximum used for hard thresholding
gpu = False,
):
# call base class constructor
......@@ -28,13 +27,11 @@ class PrincipalCurvature (Extractor):
self,
sigma = sigma,
threshold = threshold,
gpu = gpu,
)
# block parameters
self.sigma = sigma
self.threshold = threshold
self.gpu = gpu
def principal_curvature(self, image, mask):
......
......@@ -3,6 +3,7 @@ from .NormalisedCrossCorrelation import NormalisedCrossCorrelation
from .PrincipalCurvature import PrincipalCurvature
from .RepeatedLineTracking import RepeatedLineTracking
from .WideLineDetector import WideLineDetector
from .MaximumCurvature import MaximumCurvature
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
This diff is collapsed.
......@@ -2,7 +2,14 @@
# vim: set fileencoding=utf-8 :
"""Test Units
"""Unit tests against references extracted from
Matlab code from Bram Ton available on the matlab central website:
https://www.mathworks.com/matlabcentral/fileexchange/35754-wide-line-detector
This code implements the detector described in [HDLTL10] (see the references in
the generated sphinx documentation)
"""
import os
......@@ -24,8 +31,6 @@ def F(parts):
def test_finger_crop():
#Test finger vein image preprocessors
input_filename = F(('preprocessors', '0019_3_1_120509-160517.png'))
output_img_filename = F(('preprocessors',
'0019_3_1_120509-160517_img_lee_huang.mat'))
......@@ -35,10 +40,9 @@ def test_finger_crop():
img = bob.io.base.load(input_filename)
from bob.bio.vein.preprocessors.FingerCrop import FingerCrop
FC = FingerCrop(4, 40, False, False)
#FC = FingerCrop(4, 40, False, 5, 0.2, False)
preprocess = FingerCrop(fingercontour='leemaskMatlab')
output_img, finger_mask_norm, finger_mask2, spoofingValue = FC(img)
output_img, finger_mask_norm = preprocess(img)
# Load Matlab reference
output_img_ref = bob.io.base.load(output_img_filename)
......@@ -143,11 +147,3 @@ def test_miura_match():
score_imp = MM.score(template_vein, probe_imp_vein)
assert numpy.isclose(score_imp, 0.172906739278421)
if False: #testing gpu enabled calculations
MM = MiuraMatch(ch=18, cw=28, gpu=True)
score_gen = MM.score(template_vein, probe_gen_vein)
assert numpy.isclose(score_gen, 0.382689335394127)
score_imp = MM.score(template_vein, probe_imp_vein)
assert numpy.isclose(score_imp, 0.172906739278421)
......@@ -8,23 +8,37 @@ import bob.sp
import bob.core
def imfilter(a, b, gpu=False, conv=True):
"""imfilter function based on MATLAB implementation."""
def imfilter(a, b, conv=True):
"""Applies a 2D filtering between images
if (a.dtype == numpy.uint8):
a= bob.core.convert(a,numpy.float64,(0,1))
M, N = a.shape
if conv == True:
This implementation was created to work exactly like the Matlab one.
Parameters:
a (numpy.ndarray): A 2-dimensional :py:class:`numpy.ndarray` which
represents the image to be filtered. The dtype of the array is supposed
to be 64-floats. You can also pass an 8-bit unsigned integer array,
loaded from a file (for example). In this case it will be scaled as
with :py:func:`bob.core.convert` and the range reset to ``[0.0, 1.0]``.
b (numpy.ndarray): A 64-bit float 2-dimensional :py:class:`numpy.ndarray`
which represents the filter to be applied to the image
conv (bool, Optional): If set, then rotates the filter ``b`` by 180 degrees
before applying it to the image ``a``, with
:py:func:`bob.ip.base.rotate`.
"""
if a.dtype == numpy.uint8:
a = bob.core.convert(a, numpy.float64, (0,1))
if conv:
b = bob.ip.base.rotate(b, 180)
shape = numpy.array((0,0))
shape[0] = a.shape[0] + b.shape[0] - 1
shape[1] = a.shape[1] + b.shape[1] - 1
shape = (a.shape[0] + b.shape[0] - 1, a.shape[1] + b.shape[1] - 1)
a_ext = numpy.ndarray(shape=shape, dtype=numpy.float64)
bob.sp.extrapolate_nearest(a, a_ext)
if gpu == True:
import xbob.cusp
return xbob.cusp.conv(a_ext, b)
else:
return scipy.signal.convolve2d(a_ext, b, 'valid')
#return = self.convfft(a_ext, b)
return scipy.signal.convolve2d(a_ext, b, 'valid')
......@@ -7,6 +7,7 @@ extensions = bob.buildout
eggs = bob.bio.vein
bob.bio.base
bob.db.base
bob.measure
bob.extension
gridtk
develop = src/bob.db.vera
......
......@@ -75,7 +75,8 @@ Usually it is a good idea to have at least verbose level 2 (i.e., calling
In the remainder of this section we introduce baseline experiments you can
readily run with this tool without further configuration.
readily run with this tool without further configuration. Baselines examplified
in this guide were published in [TVM14]_.
Repeated Line-Tracking with Miura Matching
......@@ -84,24 +85,51 @@ Repeated Line-Tracking with Miura Matching
You can find the description of this method on the paper from Miura *et al.*
[MNM04]_.
To run the baseline on the `VERA fingervein`_ database, using the ``1vsAll``
protocol (1-fold cross-validation), do the following:
To run the baseline on the `VERA fingervein`_ database, using the ``nom``
protocol (called ``Full`` in [TVM14]_), do the following:
.. code-block:: sh
./bin/verify.py --database=vera --protocol=1vsAll --preprocessor=none --extractor=repeatedlinetracking --algorithm=match-rlt --sub-directory="vera-1vsall-mnm04" --verbose --verbose
$ ./bin/verify.py --database=vera --protocol=nom --preprocessor=none --extractor=repeatedlinetracking --algorithm=match-rlt --sub-directory="rlt" --verbose --verbose
This command line selects the following implementations for the toolchain:
.. tip::
If you have more processing cores on your local machine and don't want to
submit your job for SGE execution, you can run it in parallel by adding the
options ``?``.
This command line selects and runs the following implementations for the
toolchain:
* Database: Use the base Bob API for the VERA database implementation,
protocol variant ``nom`` which corresponds to the ``Full`` evaluation
protocol described in [TVM14]_
* Preprocessor: Simple finger cropping, with no extra pre-processing and no
histogram equalization, as defined in [LLP09]_
* Feature extractor: Repeated line tracking, as explained in [MNM04]_
* Matching algorithm: "Miura" matching, as explained on the same paper
* Subdirectory: This is the subdirectory in which the scores and intermediate
results of this baseline will be stored.
* Database: Use the base Bob API for the VERA database implementation,
protocol variant ``1vsAll`` which corresponds to the 1-fold
cross-validation evaluation protocol described in [TVM14]_
* Preprocessor: Simple finger cropping, with no extra pre-processing
* Feature extractor: Repeated line tracking, as explained in [MNM04]_
* Matching algorithm: "Miura" matching, as explained on the same paper
As the tool runs, you'll see printouts that show how it advances through
preprocessing, feature extraction and matching.
preprocessing, feature extraction and matching. To complete the evaluation,
run the commands bellow, that will output the equal error rate (EER) and plot
the detector error trade-off (DET) curve with the performance:
.. code-block:: sh
$ ./bin/bob_eval_threshold.py --scores <path-to>/vera/rlt/nom/nonorm/scores-dev --criterium=eer
('Threshold:', 0.32023322499999995)
FAR : 24.318% (46866/192720)
FRR : 24.318% (107/440)
HTER: 24.318%
$ ./bin/evaluate.py --dev-files <path-to>/vera/rlt/nom/nonorm/scores-dev --det det.pdf -l "vera-nom-mnm04" -rr
The Recognition Rate of the development set of 'rlt' is 48.409%
To view the DET curve stored in
$ xdg-open det.pdf #to view the DET curve
Available Resources
......
......@@ -57,12 +57,9 @@ setup(
# registered fingervein recognition algorithms
'bob.bio.algorithm': [
'match-wld = bob.bio.vein.configurations.algorithms:huangwl_tool',
'match-wld-gpu = bob.bio.vein.configurations.algorithms:huangwl_gpu_tool',
'match-mc = bob.bio.vein.configurations.algorithms:miuramax_tool',
'match-mc-gpu = bob.bio.vein.configurations.algorithms:miuramax_gpu_tool',
'match-rlt = bob.bio.vein.configurations.algorithms:miurarlt_tool',
'match-rlt-gpu = bob.bio.vein.configurations.algorithms:miurarlt_gpu_tool',
'match-wld = bob.bio.vein.configurations.algorithms:huangwl',
'match-mc = bob.bio.vein.configurations.algorithms:miuramax',
'match-rlt = bob.bio.vein.configurations.algorithms:miurarlt',
#'match-lbp = bob.bio.face.configurations.algorithms.lgbphs:tool',
],
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment