Commit 37d62f41 authored by André Anjos's avatar André Anjos

Merge branch 'fv3d' into 'master'

3DFV and multiple fixes See merge request !35
parents 249ba342 2bb9408f
Pipeline #13143 passed with stages
in 14 minutes 58 seconds
include README.rst bootstrap-buildout.py buildout.cfg COPYING
recursive-include doc *.py *.rst
recursive-include bob/bio/vein/tests *.png *.mat *.txt *.npy
recursive-include bob/bio/vein/tests *.png *.mat *.txt *.hdf5
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
import numpy
import skimage.feature
from bob.bio.base.algorithm import Algorithm
class Correlate (Algorithm):
"""Correlate probe and model without cropping
The method is based on "cross-correlation" between a model and a probe image.
The difference between this and :py:class:`MiuraMatch` is that **no**
cropping takes place on this implementation. We simply fill the excess
boundary with zeros and extract the valid correlation region between the
probe and the model using :py:func:`skimage.feature.match_template`.
"""
def __init__(self):
# call base class constructor
Algorithm.__init__(
self,
multiple_model_scoring = None,
multiple_probe_scoring = None
)
def enroll(self, enroll_features):
"""Enrolls the model by computing an average graph for each model"""
# return the generated model
return numpy.array(enroll_features)
def score(self, model, probe):
"""Computes the score between the probe and the model.
Parameters:
model (numpy.ndarray): The model of the user to test the probe agains
probe (numpy.ndarray): The probe to test
Returns:
float: Value between 0 and 0.5, larger value means a better match
"""
I=probe.astype(numpy.float64)
if len(model.shape) == 2:
model = numpy.array([model])
scores = []
# iterate over all models for a given individual
for md in model:
R = md.astype(numpy.float64)
Nm = skimage.feature.match_template(I, R)
# figures out where the maximum is on the resulting matrix
t0, s0 = numpy.unravel_index(Nm.argmax(), Nm.shape)
# this is our output
scores.append(Nm[t0,s0])
return numpy.mean(scores)
......@@ -47,8 +47,8 @@ class MiuraMatch (Algorithm):
"""
def __init__(self,
ch = 8, # Maximum search displacement in y-direction
cw = 5, # Maximum search displacement in x-direction
ch = 80, # Maximum search displacement in y-direction
cw = 90, # Maximum search displacement in x-direction
):
# call base class constructor
......@@ -94,8 +94,6 @@ class MiuraMatch (Algorithm):
if len(model.shape) == 2:
model = numpy.array([model])
n_models = model.shape[0]
scores = []
# iterate over all models for a given individual
......@@ -103,7 +101,7 @@ class MiuraMatch (Algorithm):
# erode model by (ch, cw)
R = md.astype(numpy.float64)
h, w = R.shape
h, w = R.shape #same as I
crop_R = R[self.ch:h-self.ch, self.cw:w-self.cw]
# correlates using scipy - fastest option available iff the self.ch and
......@@ -127,6 +125,6 @@ class MiuraMatch (Algorithm):
# normalizes the output by the number of pixels lit on the input
# matrices, taking into consideration the surface that produced the
# result (i.e., the eroded model and part of the probe)
scores.append(Nmm/(sum(sum(crop_R)) + sum(sum(I[t0:t0+h-2*self.ch, s0:s0+w-2*self.cw]))))
scores.append(Nmm/(crop_R.sum() + I[t0:t0+h-2*self.ch, s0:s0+w-2*self.cw].sum()))
return numpy.mean(scores)
This diff is collapsed.
from .MiuraMatch import MiuraMatch
from .MiuraMatchRotationFast import MiuraMatchRotationFast
from .Correlate import Correlate
from .HammingDistance import HammingDistance
# gets sphinx autodoc done right - don't remove it
def __appropriate__(*args):
"""Says object was actually declared here, an not on the import module.
Parameters:
*args: An iterable of objects to modify
Resolves `Sphinx referencing issues
<https://github.com/sphinx-doc/sphinx/issues/3048>`
"""
for obj in args: obj.__module__ = __name__
__appropriate__(
MiuraMatch,
MiuraMatchRotationFast,
Correlate,
HammingDistance,
)
__all__ = [_ for _ in dir() if not _.startswith('_')]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
"""`3D Fingervein`_ is a database for biometric fingervein recognition
The `3D Fingervein`_ Database for finger vein recognition consists of 13614
images from 141 subjects collected in various acquisition campaigns.
You can download the raw data of the `3D Fingervein`_ database by following
the link.
"""
from ..database.fv3d import Database
_fv3d_directory = "[YOUR_FV3D_DIRECTORY]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
database = Database(
original_directory = _fv3d_directory,
original_extension = '.png',
)
"""The :py:class:`bob.bio.base.database.BioDatabase` derivative with fv3d
database settings
.. warning::
This class only provides a programmatic interface to load data in an orderly
manner, respecting usage protocols. It does **not** contain the raw
datafiles. You should procure those yourself.
Notice that ``original_directory`` is set to ``[YOUR_FV3D_DIRECTORY]``. You
must make sure to create ``${HOME}/.bob_bio_databases.txt`` setting this value
to the place where you actually installed the `3D Fingervein`_ Database, as
explained in the section :ref:`bob.bio.vein.baselines`.
"""
protocol = 'central'
"""The default protocol to use for tests
You may modify this at runtime by specifying the option ``--protocol`` on the
command-line of ``verify.py`` or using the keyword ``protocol`` on a
configuration file that is loaded **after** this configuration resource.
"""
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
'''Grid configurations for bob.bio.vein'''
import bob.bio.base
grid = bob.bio.base.grid.Grid(
number_of_preprocessing_jobs = 48,
number_of_extraction_jobs = 48,
number_of_projection_jobs = 48,
number_of_enrollment_jobs = 48,
number_of_scoring_jobs = 48,
training_queue = '4G-io-big',
preprocessing_queue = '4G-io-big',
extraction_queue = '4G-io-big',
projection_queue = '4G-io-big',
enrollment_queue = '4G-io-big',
scoring_queue = '4G-io-big'
)
'''Defines an SGE grid configuration for running at Idiap
This grid configuration will use 48 slots for each of the stages defined below.
The queue ``4G-io-big`` corresponds to the following settings:
* ``queue``: ``q1d`` (in this queue you have a maximum of 48 slots according
to: https://secure.idiap.ch/intranet/system/computing/ComputationGrid
* ``memfree``: ``4G`` (this is the minimum amount of memory you can take -
the lower, the more probable your job will be allocated faster)
* ``io_big``: SET (this flag must be set so your job runs downstairs and not
on people's workstations
Notice the queue names do not directly correspond SGE grid queue names. These
are names only known to :py:mod:`bob.bio.base.grid` and are translated
from there to settings which are finally passed to ``gridtk``.
To use this configuration file, just add it to your ``verify.py`` commandline.
For example::
$ verify.py <other-options> gridio4g48
'''
......@@ -20,13 +20,20 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
this resource.
"""
from ..preprocessor import FingerCrop
preprocessor = FingerCrop()
from ..preprocessor import NoCrop, TomesLeeMask, HuangNormalization, \
NoFilter, Preprocessor
preprocessor = Preprocessor(
crop=NoCrop(),
mask=TomesLeeMask(),
normalize=HuangNormalization(),
filter=NoFilter(),
)
"""Preprocessing using gray-level based finger cropping and no post-processing
"""
from ..extractor import MaximumCurvature
extractor = MaximumCurvature(sigma = 5)
extractor = MaximumCurvature()
"""Features are the output of the maximum curvature algorithm, as described on
[MNM05]_.
......@@ -36,7 +43,7 @@ Defaults taken from [TV13]_.
# Notice the values of ch and cw are different than those from the
# repeated-line tracking baseline.
from ..algorithm import MiuraMatch
algorithm = MiuraMatch(ch=80, cw=90)
algorithm = MiuraMatch()
"""Miura-matching algorithm with specific settings for search displacement
Defaults taken from [TV13]_.
......
......@@ -20,28 +20,21 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
this resource.
"""
from ..preprocessor import FingerCrop
preprocessor = FingerCrop()
from ..preprocessor import NoCrop, TomesLeeMask, HuangNormalization, \
NoFilter, Preprocessor
preprocessor = Preprocessor(
crop=NoCrop(),
mask=TomesLeeMask(),
normalize=HuangNormalization(),
filter=NoFilter(),
)
"""Preprocessing using gray-level based finger cropping and no post-processing
"""
from ..extractor import RepeatedLineTracking
# Maximum number of iterations
NUMBER_ITERATIONS = 3000
# Distance between tracking point and cross section of profile
DISTANCE_R = 1
# Width of profile
PROFILE_WIDTH = 21
extractor = RepeatedLineTracking(
iterations=NUMBER_ITERATIONS,
r=DISTANCE_R,
profile_w=PROFILE_WIDTH,
seed=0, #Sets numpy.random.seed() to this value
)
extractor = RepeatedLineTracking()
"""Features are the output of repeated-line tracking, as described on [MNM04]_.
Defaults taken from [TV13]_.
......
......@@ -19,11 +19,11 @@ You can download the raw data of the `UTFVP`_ database by following the link.
from ..database.utfvp import Database
utfvp_directory = "[YOUR_UTFVP_DIRECTORY]"
_utfvp_directory = "[YOUR_UTFVP_DIRECTORY]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
database = Database(
original_directory = utfvp_directory,
original_directory = _utfvp_directory,
original_extension = '.png',
)
"""The :py:class:`bob.bio.base.database.BioDatabase` derivative with UTFVP settings
......
......@@ -10,18 +10,16 @@ Occidentale in Sion, in Switzerland. The reference citation is [TVM14]_.
You can download the raw data of the `VERA Fingervein`_ database by following
the link.
.. include:: links.rst
"""
from ..database.verafinger import Database
verafinger_directory = "[YOUR_VERAFINGER_DIRECTORY]"
_verafinger_directory = "[YOUR_VERAFINGER_DIRECTORY]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
database = Database(
original_directory = verafinger_directory,
original_directory = _verafinger_directory,
original_extension = '.png',
)
"""The :py:class:`bob.bio.base.database.BioDatabase` derivative with Verafinger
......
......@@ -20,27 +20,21 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
this resource.
"""
from ..preprocessor import FingerCrop
preprocessor = FingerCrop()
from ..preprocessor import NoCrop, TomesLeeMask, HuangNormalization, \
NoFilter, Preprocessor
preprocessor = Preprocessor(
crop=NoCrop(),
mask=TomesLeeMask(),
normalize=HuangNormalization(),
filter=NoFilter(),
)
"""Preprocessing using gray-level based finger cropping and no post-processing
"""
from ..extractor import WideLineDetector
# Radius of the circular neighbourhood region
RADIUS_NEIGHBOURHOOD_REGION = 5
NEIGHBOURHOOD_THRESHOLD = 1
#Sum of neigbourhood threshold
SUM_NEIGHBOURHOOD = 41
RESCALE = True
extractor = WideLineDetector(
radius=RADIUS_NEIGHBOURHOOD_REGION,
threshold=NEIGHBOURHOOD_THRESHOLD,
g=SUM_NEIGHBOURHOOD,
rescale=RESCALE
)
extractor = WideLineDetector()
"""Features are the output of the maximum curvature algorithm, as described on
[HDLTL10]_.
......
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
'''Database definitions for Vein Recognition'''
import numpy
class AnnotatedArray(numpy.ndarray):
"""Defines a numpy array subclass that can carry its own metadata
Copied from: https://docs.scipy.org/doc/numpy-1.12.0/user/basics.subclassing.html#slightly-more-realistic-example-attribute-added-to-existing-array
"""
def __new__(cls, input_array, metadata=None):
obj = numpy.asarray(input_array).view(cls)
obj.metadata = metadata if metadata is not None else dict()
return obj
def __array_finalize__(self, obj):
if obj is None: return
self.metadata = getattr(obj, 'metadata', dict())
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Fri 13 Jan 2017 14:46:06 CET
import numpy
from bob.bio.base.database import BioFile, BioDatabase
from . import AnnotatedArray
from ..preprocessor.utils import poly_to_mask
class File(BioFile):
"""
Implements extra properties of vein files for the 3D Fingervein database
Parameters:
f (object): Low-level file (or sample) object that is kept inside
"""
def __init__(self, f):
super(File, self).__init__(client_id=f.finger.unique_name, path=f.path,
file_id=f.id)
self.__f = f
def load(self, *args, **kwargs):
"""(Overrides base method) Loads both image and mask"""
image = super(File, self).load(*args, **kwargs)
image = numpy.rot90(image, -1)
if not self.__f.has_roi():
return image
else:
roi = self.__f.roi()
# calculates the 90 degrees anti-clockwise rotated RoI points
w, h = image.shape
roi = [(x,h-y) for (y,x) in roi]
return AnnotatedArray(image, metadata=dict(roi=roi))
class Database(BioDatabase):
"""
Implements verification API for querying the 3D Fingervein database.
"""
def __init__(self, **kwargs):
super(Database, self).__init__(name='fv3d', **kwargs)
from bob.db.fv3d.query import Database as LowLevelDatabase
self.__db = LowLevelDatabase()
self.low_level_group_names = ('train', 'dev', 'eval')
self.high_level_group_names = ('world', 'dev', 'eval')
def groups(self):
return self.convert_names_to_highlevel(self.__db.groups(),
self.low_level_group_names, self.high_level_group_names)
def client_id_from_model_id(self, model_id, group='dev'):
"""Required as ``model_id != client_id`` on this database"""
return self.__db.finger_name_from_model_id(model_id)
def model_ids_with_protocol(self, groups=None, protocol=None, **kwargs):
groups = self.convert_names_to_lowlevel(groups,
self.low_level_group_names, self.high_level_group_names)
return self.__db.model_ids(groups=groups, protocol=protocol)
def objects(self, groups=None, protocol=None, purposes=None,
model_ids=None, **kwargs):
groups = self.convert_names_to_lowlevel(groups,
self.low_level_group_names, self.high_level_group_names)
retval = self.__db.objects(groups=groups, protocol=protocol,
purposes=purposes, model_ids=model_ids, **kwargs)
return [File(f) for f in retval]
def annotations(self, file):
return None
......@@ -42,9 +42,14 @@ class Database(BioDatabase):
model_ids=None, **kwargs):
retval = self._db.objects(groups=groups, protocol=protocol,
purposes=purposes, model_ids=model_ids, **kwargs)
purposes=purposes, model_ids=model_ids, **kwargs)
return [File(f) for f in retval]
def annotations(self, file):
return None
def client_id_from_model_id(self, model_id, group='dev'):
"""Required as ``model_id != client_id`` on this database"""
return self._db.get_client_id_from_model_id(model_id)
......@@ -5,6 +5,9 @@
from bob.bio.base.database import BioFile, BioDatabase
from . import AnnotatedArray
from ..preprocessor.utils import poly_to_mask
class File(BioFile):
"""
......@@ -20,23 +23,16 @@ class File(BioFile):
def __init__(self, f):
super(File, self).__init__(client_id=f.unique_finger_name, path=f.path,
file_id=f.id)
file_id=f.id)
self.__f = f
def mask(self):
"""Returns the binary mask from the ROI annotations available"""
from ..preprocessor.utils import poly_to_mask
# The size of images in this database is (250, 665) pixels (h, w)
return poly_to_mask((250, 665), self.__f.roi())
def load(self, *args, **kwargs):
"""(Overrides base method) Loads both image and mask"""
image = super(File, self).load(*args, **kwargs)
return image, self.mask()
roi = self.__f.roi()
return AnnotatedArray(image, metadata=dict(roi=roi))
class Database(BioDatabase):
......@@ -56,28 +52,32 @@ class Database(BioDatabase):
def groups(self):
return self.convert_names_to_highlevel(self._db.groups(),
self.low_level_group_names, self.high_level_group_names)
self.low_level_group_names, self.high_level_group_names)
def client_id_from_model_id(self, model_id, group='dev'):
"""Required as ``model_id != client_id`` on this database"""
return self._db.finger_name_from_model_id(model_id)
def model_ids_with_protocol(self, groups=None, protocol=None, **kwargs):
groups = self.convert_names_to_lowlevel(groups,
self.low_level_group_names, self.high_level_group_names)
self.low_level_group_names, self.high_level_group_names)
return self._db.model_ids(groups=groups, protocol=protocol)
def objects(self, groups=None, protocol=None, purposes=None,
model_ids=None, **kwargs):
groups = self.convert_names_to_lowlevel(groups,
self.low_level_group_names, self.high_level_group_names)
self.low_level_group_names, self.high_level_group_names)
retval = self._db.objects(groups=groups, protocol=protocol,
purposes=purposes, model_ids=model_ids, **kwargs)
purposes=purposes, model_ids=model_ids,
**kwargs)
return [File(f) for f in retval]
def annotations(self, file):
return None
return None
This diff is collapsed.
This diff is collapsed.
from .FingerCrop import FingerCrop
from .crop import Cropper, FixedCrop, NoCrop
from .mask import Padder, Masker, FixedMask, NoMask, AnnotatedRoIMask
from .mask import KonoMask, LeeMask, TomesLeeMask, WatershedMask
from .normalize import Normalizer, NoNormalization, HuangNormalization
from .filters import Filter, NoFilter, HistogramEqualization
from .preprocessor import Preprocessor
# gets sphinx autodoc done right - don't remove it
def __appropriate__(*args):
"""Says object was actually declared here, an not on the import module.
Parameters:
*args: An iterable of objects to modify
Resolves `Sphinx referencing issues
<https://github.com/sphinx-doc/sphinx/issues/3048>`
"""
for obj in args: obj.__module__ = __name__
__appropriate__(
Cropper,
FixedCrop,
NoCrop,
Padder,
Masker,
FixedMask,
NoMask,
AnnotatedRoIMask,
KonoMask,
LeeMask,
TomesLeeMask,
WatershedMask,
Normalizer,
NoNormalization,
HuangNormalization,
Filter,
NoFilter,
HistogramEqualization,
Preprocessor,
)
__all__ = [_ for _ in dir() if not _.startswith('_')]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
'''Base utilities for pre-cropping images'''
import numpy
class Cropper(object):
"""This is the base class for all croppers
It defines the minimum requirements for all derived cropper classes.
"""
def __init__(self):
pass
def __call__(self, image):
"""Overwrite this method to implement your masking method
Parameters:
image (numpy.ndarray): A 2D numpy array of type ``uint8`` with the
input image
Returns:
numpy.ndarray: A 2D numpy array of the same type as the input, with
cropped rows and columns as per request
"""
raise NotImplemented('You must implement the __call__ slot')
class FixedCrop(Cropper):
"""Implements cropping using a fixed suppression of border pixels
The defaults supress no lines from the image and returns an image like the
original. If an :py:class:`bob.bio.vein.database.AnnotatedArray` is passed,
then we also check for its ``.metadata['roi']`` component and correct it so
that annotated RoI points are consistent on the cropped image.
.. note::
Before choosing values, note you're responsible for knowing what is the
orientation of images fed into this cropper.