Commit dbbb3a78 authored by André Anjos's avatar André Anjos 💬

Revamped Teo's code to simplify it; Add thorough tests

parent 413b80be
Pipeline #5184 failed with stages
in 3 minutes and 18 seconds
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tiago de Freitas Pereira <>
# Wed 20 July 14:43:22 CEST 2016
# Thu 03 Nov 2016 12:23:52 CET
"""Single sample API"""
Verification API for bob.db.voxforge
from import BioFile
class VeinBioFile(BioFile):
def __init__(self, client_id, path, file_id):
Initializes this File object with an File equivalent for
VoxForge database.
super(VeinBioFile, self).__init__(client_id=client_id, path=path, file_id=file_id)
"""A "sample" object that is specific to vein recognition experiments
f (object): Low-level file (or sample) object that is kept inside
def __init__(self, f):
super(VeinBioFile, self).__init__(
# keep copy of original low-level database file object
self.f = f
......@@ -3,8 +3,36 @@
# Tue 27 Sep 2016 16:48:57 CEST
from .database import VeinBioFile
from import BioDatabase
from import BioFile, BioDatabase
from import BioFile
class VerafingerBioFile(BioFile):
Implements extra properties of vein files
f (object): Low-level file (or sample) object that is kept inside
def __init__(self, f):
super(VerafingerBioFile, self).__init__(
self.f = f
def roi(self):
"""Returns the binary mask from the ROI annotations available"""
points = self.f.roi()
class VerafingerBioDatabase(BioDatabase):
......@@ -45,4 +73,4 @@ class VerafingerBioDatabase(BioDatabase):
self.low_level_group_names, self.high_level_group_names)
retval = self.__db.objects(groups=groups, protocol=protocol,
purposes=purposes, model_ids=model_ids, **kwargs)
return [VeinBioFile(client_id=f.model_id, path=f.path, for f in retval]
return [VeinBioFile(f) for f in retval]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
"""Utilities for preprocessing vein imagery"""
import numpy
def assert_points(area, points):
"""Checks all points fall within the determined shape region, inclusively
This assertion function, test all points given in ``points`` fall within a
certain area provided in ``area``.
area (tuple): A tuple containing the size of the limiting area where the
points should all be in.
points (numpy.ndarray): A 2D numpy ndarray with any number of rows (points)
and 2 columns (representing ``y`` and ``x`` coordinates respectively), or
any type convertible to this format. This array contains the points that
will be checked for conformity. In case one of the points doesn't fall
into the determined area an assertion is raised.
AssertionError: In case one of the input points does not fall within the
area defined.
for k in points:
assert 0 <= k[0] < area[0] and 0 <= k[1] < area[1], \
"Point (%d, %d) is not inside the region determined by area " \
"(%d, %d)" % (k[0], k[1], area[0], area[1])
def fix_points(area, points):
"""Checks/fixes all points so they fall within the determined shape region
Points which are lying outside the determined area will be brought into the
area by moving the offending coordinate to the border of the said area.
area (tuple): A tuple containing the size of the limiting area where the
points should all be in.
points (numpy.ndarray): A 2D :py:class:`numpy.ndarray` with any number of
rows (points) and 2 columns (representing ``y`` and ``x`` coordinates
respectively), or any type convertible to this format. This array
contains the points that will be checked/fixed for conformity. In case
one of the points doesn't fall into the determined area, it is silently
corrected so it does.
numpy.ndarray: A **new** array of points with corrected coordinates
retval = numpy.array(points).copy()
retval[retval<0] = 0 #floor at 0 for both axes
y, x = retval[:,0], retval[:,1]
y[y>=area[0]] = area[0] - 1
x[x>=area[1]] = area[1] - 1
return retval
def poly_to_mask(shape, points):
"""Generates a binary mask from a set of 2D points
shape (tuple): A tuple containing the size of the output mask in height and
width, for Bob compatibility ``(y, x)``.
points (list): A list of tuples containing the polygon points that form a
region on the target mask. A line connecting these points will be drawn
and all the points in the mask that fall on or within the polygon line,
will be set to ``True``. All other points will have a value of ``False``.
numpy.ndarray: A 2D numpy ndarray with ``dtype=bool`` with the mask
generated with the determined shape, using the points for the polygon.
from PIL import Image, ImageDraw
# n.b.: PIL images are (x, y), while Bob shapes are represented in (y, x)!
mask ='L', (shape[1], shape[0]))
# coverts whatever comes in into a list of tuples for PIL
fixed = tuple(map(tuple, numpy.roll(fix_points(shape, points), 1, 1)))
# draws polygon
ImageDraw.Draw(mask).polygon(fixed, fill=255)
return numpy.array(mask, dtype=numpy.bool)
def mask_to_image(mask, dtype=numpy.uint8):
"""Converts a binary (boolean) mask into an integer or floating-point image
This function converts a boolean binary mask into an image of the desired
type by setting the points where ``False`` is set to 0 and points where
``True`` is set to the most adequate value taking into consideration the
destination data type ``dtype``. Here are support types and their ranges:
* numpy.uint8: ``[0, (2^8)-1]``
* numpy.uint16: ``[0, (2^16)-1]``
* numpy.uint32: ``[0, (2^32)-1]``
* numpy.uint64: ``[0, (2^64)-1]``
* numpy.float32: ``[0, 1.0]`` (fixed)
* numpy.float64: ``[0, 1.0]`` (fixed)
* numpy.float128: ``[0, 1.0]`` (fixed)
All other types are currently unsupported.
mask (numpy.ndarray): A 2D numpy ndarray with boolean data type, containing
the mask that will be converted into an image.
dtype (numpy.dtype): A valid numpy data-type from the list above for the
resulting image
numpy.ndarray: With the designated data type, containing the binary image
formed from the mask.
TypeError: If the type is not supported by this function
dtype = numpy.dtype(dtype)
retval = mask.astype(dtype)
if dtype in (numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64):
retval[retval == 1] = numpy.iinfo(dtype).max
elif dtype in (numpy.float32, numpy.float64, numpy.float128):
raise TypeError("Data type %s is unsupported" % dtype)
return retval
from .utils import ManualRoiCut
from .utils import ConstructVeinImage
from .utils import NormalizeImageRotation
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
# -*- coding: utf-8 -*-
Created on Fri Aug 5 17:12:41 2016
# import what is needed:
import numpy as np
from PIL import Image, ImageDraw, ImageFilter
import scipy.ndimage
from scipy.signal import convolve2d
import scipy.ndimage.filters as fi
import os
import six
class ManualRoiCut():
Class for manual roi extraction -- ``ManualRoiCut``.
annotation (``File``, :py:class:`list`): The name of annotation file, with full path containing
ROI annotation data (``Bob`` format, ``(x, y)``) **or** the list of annotation
points (tuples) in ``Bob`` format -- ``(x, y)``. A *fail-safe* operation is implemented
ensuring that the annotation points are inside the image to be annotated.
image (``File``, :py:class:`numpy.ndarray`), optional: The name of the image to be annotation -
full path or image data as :py:class:`numpy.ndarray`. Image is an optional parameter
because it isn't needed to generate ROI binary mask.
sizes (``tuple``): optional - a tuple of image size in ``Bob`` format ``(x,y)``.
This parameter is used **if** no image is given to generate binary mask.
A ``uint8`` :py:class:`numpy.ndarray` 2D array (image) containing ROI mask.
Value ``1`` determines ROI area, value ``0`` -- outside ROI area. ``uint8``
is chosen so that annotations could be used in the ```` platform
(there seems to be problems when saving / loading ``bool`` objects).
- generate ROI mask::
from import ManualRoiCut
roi = ManualRoiCut(roi_annotation_points).roi_mask()
- replace image's outside-ROI with value ``pixel_value``::
from import ManualRoiCut
image_cutted = ManualRoiCut(roi_annotation_points, image).roi_image(pixel_value=0)
def __init__(self,annotation, image = None, sizes = (480, 480)):
if image is not None:
if isinstance(image, six.string_types):
if os.path.exists(image):
image =
self.image = np.array(image)
raise IOError("Doesn't exist file: {}".format(annotation))
return 1
self.image = np.array(image)
self.size_y = self.image.shape[0]
self.size_x = self.image.shape[1]
self.image = None
self.size_y = sizes[1]
self.size_x = sizes[0]
if isinstance(annotation, six.string_types):
if os.path.exists(annotation):
with open(annotation,'r') as f:
retval = np.loadtxt(f, ndmin=2)
self.annotation = list([tuple([self.__test_size__(k[1],self.size_y), self.__test_size__(k[0],self.size_x)]) for k in retval])
raise IOError("Doesn' t exist file: {}".format(annotation))
return 1
else :
# Convert from Bob format(x,y) to regular (y, x)
self.annotation = list([tuple([self.__test_size__(k[1],self.size_y), self.__test_size__(k[0],self.size_x)]) for k in annotation])
def __test_size__(self, test_value, max_value):
if test_value >= 0 and test_value < max_value:
return test_value
elif test_value >= 0 and test_value < 60000:
return max_value
return 0
def roi_mask(self):
"""Method ``roi_mask`` - generates ROI mask.
A ``uint8`` :py:class:`numpy.ndarray` 2D array (image)
containing ROI mask. Value ``1`` determines ROI area, ``0`` -- outside
ROI area.
mask ='L', (self.size_x, self.size_y), 0)
ImageDraw.Draw(mask).polygon(self.annotation, outline=1, fill=1)
mask = np.array(mask, dtype = np.uint8)
mask = 0 < mask
return mask
def roi_image(self, pixel_value = 0):
"""Method roi_image - replaces outside ROI pixel values with ``pixel_value``
(default - 0).
pixel_value (``integer``): if given, outside-ROI region is replaced with this
value. By default replaced with 0.
A copy of image that class was initialized with, outside ROI pixel
values are replaced with ``pixel_value``.
if self.image is not None:
mask = self.roi_mask()
self.image[mask == 0] = pixel_value
return self.image
raise IOError("No input image given, can't perform non-ROI region removal")
return 1
def ConstructVeinImage(annotation_dictionary, center = False):
Constructs a binary image from manual annotations. The class is made to be used with
the ``bob.db.biowave_v1`` database.
The returned 2D array (see ``return value``, below) corresponds to a person's
vein pattern, marked by human-expert.
annotation_dictionary (:py:class:`dict`): Dictionary containing image and annotation data.
Such :py:class:`dict` can be returned by the high level ``bob.db.biowave_v1``
implementation of the ``bob.db.biowave_v1`` database. It is supposed to contain
fields (as can be returned by the ``bob.db.biowave_v1`` high level implementation):
- ``image``
- ``roi_annotations``
- ``vein_annotations``
Although only the ``image.shape[0]``, ``image.shape[1]`` and variable
``vein_annotations`` are used.
center (:py:class:`bool`): Flag, if set to ``True``, annotations are centered.
:py:class:`numpy.ndarray` : A 2D array with ``uint8`` values - value ``1``
represents annotated vein object. The output image is constructed using
annotation information - points.
Each line's points are connected and 5 pixels wide line is drawn. After
all lines are drawn, lines are smoothed using Median filter with
size 5x5 pixels.
Example to import the utils and run the function::
from import ConstructVeinImage
vein_image = ConstructVeinImage(annotation_dictionary, center = True)
image = annotation_dictionary["image"]
#roi_annotations = annotation_dictionary["roi_annotations"]
vein_annotations = annotation_dictionary["vein_annotations"]
im ='L', (image.shape[0], image.shape[1]), (0))
draw = ImageDraw.Draw(im)
if center == True:
xes_all = [point[1] for line in vein_annotations for point in line]
yes_all = [point[0] for line in vein_annotations for point in line]
for line in vein_annotations:
xes = [point[1] - np.round(np.mean(xes_all)) + 239 for point in line]
yes = [point[0] - np.round(np.mean(yes_all)) + 239 for point in line]
for point in range(len(line) - 1):
draw.line((xes[point],yes[point], xes[point+1], yes[point+1]), fill=1, width = 5)
for line in vein_annotations:
xes = [point[1] for point in line]
yes = [point[0] for point in line]
for point in range(len(line) - 1):
draw.line((xes[point],yes[point], xes[point+1], yes[point+1]), fill=1, width = 5)
im = im.filter(ImageFilter.MedianFilter(5))
im = np.array(im, dtype = np.uint8)
return im
# help functions for the ``NormalizeImageRotation`` function
def __rotate_point__(x,y, angle):
[xp, yp] = __rotate_point__(x,y, angle)
if type(x) is list:
if len(x) != len(y):
raise IOError("Length of x and y should be equal")
xp = []
yp = []
for nr in range(len(x)):
xp.append(x[nr] * np.cos(np.radians(angle)) - y[nr] * np.sin(np.radians(angle)))
yp.append(y[nr] * np.cos(np.radians(angle)) + x[nr] * np.sin(np.radians(angle)))
xp = x * np.cos(np.radians(angle)) - y * np.sin(np.radians(angle))
yp = y * np.cos(np.radians(angle)) + x * np.sin(np.radians(angle))
return int(np.round(xp)), int(np.round(yp))
def __guss_mask__(guss_size=27, sigma=6):
"""Returns a 2D Gaussian kernel array."""
inp = np.zeros((guss_size, guss_size))
inp[guss_size//2, guss_size//2] = 1
return fi.gaussian_filter(inp, sigma)
def __ramp__(a):
a = np.array(a)
a[a < 0]=0
return a
def __vein_filter__(image, a = 3, b = 4, sigma = 4, guss_size = 15, only_lines = True, dark_lines = True):
Vein filter
if dark_lines == True:
Z = 1
Z = -1
if type(image) != np.ndarray:
image = np.array(image, dtype = np.float)
padsize = 2*a+b
gaussian_mask = __guss_mask__(guss_size, sigma)
f2 = np.lib.pad(image, ((padsize, padsize), (padsize, padsize)), 'edge')
f2 = convolve2d(f2, gaussian_mask, mode='same')
result = np.zeros(image.shape)
for angle in np.arange(0,179,11.25 / 2):
[ap, bp] = __rotate_point__(-b,-2*a, angle)
mask_1 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
[ap, bp] = __rotate_point__(-b,-1*a, angle)
mask_2 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
[ap, bp] = __rotate_point__(-b, 0, angle)
mask_3 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
[ap, bp] = __rotate_point__(-b, 1*a, angle)
mask_4 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
[ap, bp] = __rotate_point__(-b, 2*a, angle)
mask_5 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
[ap, bp] = __rotate_point__(+b,-2*a, angle)
mask_6 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
[ap, bp] = __rotate_point__(+b,-1*a, angle)
mask_7 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
[ap, bp] = __rotate_point__(+b, 0, angle)
mask_8 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
[ap, bp] = __rotate_point__(+b, 1*a, angle)
mask_9 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
[ap, bp] = __rotate_point__(+b, 2*a, angle)
mask_10 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp]
amplitude_rez = __ramp__(Z*(mask_1+mask_5+mask_6+mask_10)*3 \
if only_lines == True:
col = np.zeros((6,image.shape[0], image.shape[1]))
col[0] = np.minimum(__ramp__(-Z*mask_2+Z*mask_1),__ramp__(-Z*mask_2+Z*mask_5))
col[1] = np.minimum(__ramp__(-Z*mask_3+Z*mask_1),__ramp__(-Z*mask_3+Z*mask_5))
col[2] = np.minimum(__ramp__(-Z*mask_4+Z*mask_1),__ramp__(-Z*mask_4+Z*mask_5))
col[3] = np.minimum(__ramp__(-Z*mask_7+Z*mask_6),__ramp__(-Z*mask_7+Z*mask_10))
col[4] = np.minimum(__ramp__(-Z*mask_8+Z*mask_6),__ramp__(-Z*mask_8+Z*mask_10))
col[5] = np.minimum(__ramp__(-Z*mask_9+Z*mask_6),__ramp__(-Z*mask_9+Z*mask_10))
angle_rez = np.min(col, axis = 0)
amplitude_rez[angle_rez==0] = 0
result = result + amplitude_rez*np.exp(1j*2*(angle - 90)*np.pi/180)
result = np.abs(result) * np.exp(1j*np.angle(result)/2)
return result
def __get_rotatation_angle__(image, dark_lines = False):
angle = get_rotatation_angle(image)
Returns the rotation angle in deg.
result = __vein_filter__(image, a = 4, b = 1, sigma = 2, guss_size = 15, only_lines = True, dark_lines = False)
result_nonzero = result[np.abs(result) > np.abs(result).max() / 2]
result_angle = np.angle(result_nonzero, deg=True)
angle = result_angle.mean()
return angle
def __rotate_image__(image, angle):
image = rotate_image(image, angle)
image = scipy.ndimage.rotate(image, angle, reshape = False, cval=0)
image[image > 255] = 255
image[image < 0] = 0
return image
def __align_image__(image, precision = 0.5, iterations = 25, dark_lines = False):
[image, rotation_angle, angle_error] = align_image(image, precision = 0.5, iterations = 25)
rotation_angle = 0
angle_error = __get_rotatation_angle__(image, dark_lines)
if abs(angle_error) <= precision:
return image, rotation_angle, angle_error
for k in range(iterations):
rotation_angle = rotation_angle + (angle_error * 0.33)
image = __rotate_image__(image, angle_error * 0.33)
angle_error = __get_rotatation_angle__(image, dark_lines)
if abs(angle_error) <= precision or k == iterations - 1:
return image, rotation_angle, angle_error
def NormalizeImageRotation(image, dark_lines = False):
function ``NormalizeImageRotation`` - automatically rotates image by a self-defined angle.
So far tested only with annotations (binary images). Algorithm iteratively
searches for rotation angle such that when image is filtered with the
``vein filter`` (As published in the BIOSIG 2015), the ``mean`` filtered
image's vector angle (for the pixels in filtered image with a magnitude at least 1/2 of the
maximal value of the filtered image) is ``+/- 0.5`` [deg].
image (:py:class:`numpy.ndarray`) : A 2D array containing input image.
Currently tested only with binary images.
dark_lines (:py:class:`bool`) : A flag (default value - ``False``)
that determines what kind of lines algorithm is going to search for to
align the image. With default value ``False`` it will search for *whiter than
background* lines (as is the case with annotations). If set
to ``True`` -- will search for *darker than background* lines
(as is the case with vein images).
:py:class:`numpy.ndarray` : A 2D array with rotated input image
Example to import the utils and use the function::
from import NormalizeImageRotation
image = NormalizeImageRotation(image, dark_lines = False)
[rotated_image, rotation_angle, angle_error] = __align_image__(image = image, dark_lines = dark_lines)
rotated_image = np.array(rotated_image, dtype = image.dtype)
return rotated_image
11 91
8 322
76 320
114 307
140 300
176 292
225 292
269 288
330 287
405 288
436 290
456 288
468 276
473 242
472 208
470 184
466 146
455 116
440 93
424 77
397 69
358 64
298 60
247 52
201 38
160 25
130 7
106 7
81 16
46 46
22 71
......@@ -93,7 +93,7 @@ def test_finger_crop():
assert numpy.mean(numpy.abs(preproc - preproc_ref)) < 1.3e2
def test_miuramax():
def test_max_curvature():
#Maximum Curvature method against Matlab reference
......@@ -118,7 +118,7 @@ def test_miuramax():
assert numpy.mean(numpy.abs(output_img - output_img_ref)) < 8e-3
def test_miurarlt():
def test_repeated_line_tracking():
#Repeated Line Tracking method against Matlab reference
......@@ -143,7 +143,7 @@ def test_miurarlt():
assert numpy.mean(numpy.abs(output_img - output_img_ref)) < 0.5
def test_huangwl():
def test_wide_line_detector():
#Wide Line Detector method against Matlab reference
......@@ -187,67 +187,135 @@ def test_miura_match():
score_imp = MM.score(template_vein, probe_imp_vein)
assert numpy.isclose(score_imp, 0.172906739278421)
def test_manualRoiCut():
Test ManualRoitCut
from import ManualRoiCut
image_path = F(('preprocessors', '0019_3_1_120509-160517.png'))
annotation_path = F(('preprocessors', '0019_3_1_120509-160517.txt'))
c = ManualRoiCut(annotation_path, image_path)
mask_1 = c.roi_mask()
image_1 = c.roi_image()
# create mask using size:
c = ManualRoiCut(annotation_path, sizes=(672,380))
mask_2 = c.roi_mask()