diff --git a/bob/bio/vein/database/database.py b/bob/bio/vein/database/database.py index 6e78633b77722d413a3334f7c8a6ff968f997083..2d43d4331eb1dd9783860c487b5a8945258e1240 100644 --- a/bob/bio/vein/database/database.py +++ b/bob/bio/vein/database/database.py @@ -1,19 +1,29 @@ #!/usr/bin/env python # vim: set fileencoding=utf-8 : -# Tiago de Freitas Pereira <tiago.pereira@idiap.ch> -# Wed 20 July 14:43:22 CEST 2016 +# Thu 03 Nov 2016 12:23:52 CET + +"""Single sample API""" -""" - Verification API for bob.db.voxforge -""" from bob.bio.base.database.file import BioFile class VeinBioFile(BioFile): - def __init__(self, client_id, path, file_id): - """ - Initializes this File object with an File equivalent for - VoxForge database. - """ - super(VeinBioFile, self).__init__(client_id=client_id, path=path, file_id=file_id) + """A "sample" object that is specific to vein recognition experiments + + + Parameters: + + f (object): Low-level file (or sample) object that is kept inside + + """ + + def __init__(self, f): + super(VeinBioFile, self).__init__( + client_id=f.model_id, + path=f.path, + file_id=f.id, + ) + + # keep copy of original low-level database file object + self.f = f diff --git a/bob/bio/vein/database/verafinger.py b/bob/bio/vein/database/verafinger.py index 23d3ff269d74257aeeb20af97616b10633fb7fb8..d3f49fddea72a2ca2189574d913e76fe7cf28175 100644 --- a/bob/bio/vein/database/verafinger.py +++ b/bob/bio/vein/database/verafinger.py @@ -3,8 +3,36 @@ # Tue 27 Sep 2016 16:48:57 CEST -from .database import VeinBioFile -from bob.bio.base.database import BioDatabase +from bob.bio.base.database import BioFile, BioDatabase +from bob.bio.base.database.file import BioFile + + +class VerafingerBioFile(BioFile): + """ + Implements extra properties of vein files + + + Parameters: + + f (object): Low-level file (or sample) object that is kept inside + + """ + + def __init__(self, f): + + super(VerafingerBioFile, self).__init__( + client_id=f.model_id, + path=f.path, + file_id=f.id, + ) + self.f = f + + + def roi(self): + """Returns the binary mask from the ROI annotations available""" + + points = self.f.roi() + class VerafingerBioDatabase(BioDatabase): @@ -45,4 +73,4 @@ class VerafingerBioDatabase(BioDatabase): self.low_level_group_names, self.high_level_group_names) retval = self.__db.objects(groups=groups, protocol=protocol, purposes=purposes, model_ids=model_ids, **kwargs) - return [VeinBioFile(client_id=f.model_id, path=f.path, file_id=f.id) for f in retval] + return [VeinBioFile(f) for f in retval] diff --git a/bob/bio/vein/preprocessor/utils.py b/bob/bio/vein/preprocessor/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..590390afe43801e185de8ebbde6ebec79249a6c7 --- /dev/null +++ b/bob/bio/vein/preprocessor/utils.py @@ -0,0 +1,164 @@ +#!/usr/bin/env python +# vim: set fileencoding=utf-8 : + +"""Utilities for preprocessing vein imagery""" + +import numpy + + +def assert_points(area, points): + """Checks all points fall within the determined shape region, inclusively + + This assertion function, test all points given in ``points`` fall within a + certain area provided in ``area``. + + + Parameters: + + area (tuple): A tuple containing the size of the limiting area where the + points should all be in. + + points (numpy.ndarray): A 2D numpy ndarray with any number of rows (points) + and 2 columns (representing ``y`` and ``x`` coordinates respectively), or + any type convertible to this format. This array contains the points that + will be checked for conformity. In case one of the points doesn't fall + into the determined area an assertion is raised. + + + Raises: + + AssertionError: In case one of the input points does not fall within the + area defined. + + """ + + for k in points: + assert 0 <= k[0] < area[0] and 0 <= k[1] < area[1], \ + "Point (%d, %d) is not inside the region determined by area " \ + "(%d, %d)" % (k[0], k[1], area[0], area[1]) + + +def fix_points(area, points): + """Checks/fixes all points so they fall within the determined shape region + + Points which are lying outside the determined area will be brought into the + area by moving the offending coordinate to the border of the said area. + + + Parameters: + + area (tuple): A tuple containing the size of the limiting area where the + points should all be in. + + points (numpy.ndarray): A 2D :py:class:`numpy.ndarray` with any number of + rows (points) and 2 columns (representing ``y`` and ``x`` coordinates + respectively), or any type convertible to this format. This array + contains the points that will be checked/fixed for conformity. In case + one of the points doesn't fall into the determined area, it is silently + corrected so it does. + + + Returns: + + numpy.ndarray: A **new** array of points with corrected coordinates + + """ + + retval = numpy.array(points).copy() + + retval[retval<0] = 0 #floor at 0 for both axes + y, x = retval[:,0], retval[:,1] + y[y>=area[0]] = area[0] - 1 + x[x>=area[1]] = area[1] - 1 + + return retval + + +def poly_to_mask(shape, points): + """Generates a binary mask from a set of 2D points + + + Parameters: + + shape (tuple): A tuple containing the size of the output mask in height and + width, for Bob compatibility ``(y, x)``. + + points (list): A list of tuples containing the polygon points that form a + region on the target mask. A line connecting these points will be drawn + and all the points in the mask that fall on or within the polygon line, + will be set to ``True``. All other points will have a value of ``False``. + + + Returns: + + numpy.ndarray: A 2D numpy ndarray with ``dtype=bool`` with the mask + generated with the determined shape, using the points for the polygon. + + """ + from PIL import Image, ImageDraw + + # n.b.: PIL images are (x, y), while Bob shapes are represented in (y, x)! + mask = Image.new('L', (shape[1], shape[0])) + + # coverts whatever comes in into a list of tuples for PIL + fixed = tuple(map(tuple, numpy.roll(fix_points(shape, points), 1, 1))) + + # draws polygon + ImageDraw.Draw(mask).polygon(fixed, fill=255) + + return numpy.array(mask, dtype=numpy.bool) + + +def mask_to_image(mask, dtype=numpy.uint8): + """Converts a binary (boolean) mask into an integer or floating-point image + + This function converts a boolean binary mask into an image of the desired + type by setting the points where ``False`` is set to 0 and points where + ``True`` is set to the most adequate value taking into consideration the + destination data type ``dtype``. Here are support types and their ranges: + + * numpy.uint8: ``[0, (2^8)-1]`` + * numpy.uint16: ``[0, (2^16)-1]`` + * numpy.uint32: ``[0, (2^32)-1]`` + * numpy.uint64: ``[0, (2^64)-1]`` + * numpy.float32: ``[0, 1.0]`` (fixed) + * numpy.float64: ``[0, 1.0]`` (fixed) + * numpy.float128: ``[0, 1.0]`` (fixed) + + All other types are currently unsupported. + + + Parameters: + + mask (numpy.ndarray): A 2D numpy ndarray with boolean data type, containing + the mask that will be converted into an image. + + dtype (numpy.dtype): A valid numpy data-type from the list above for the + resulting image + + + Returns: + + numpy.ndarray: With the designated data type, containing the binary image + formed from the mask. + + + Raises: + + TypeError: If the type is not supported by this function + + """ + + dtype = numpy.dtype(dtype) + retval = mask.astype(dtype) + + if dtype in (numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64): + retval[retval == 1] = numpy.iinfo(dtype).max + + elif dtype in (numpy.float32, numpy.float64, numpy.float128): + pass + + else: + raise TypeError("Data type %s is unsupported" % dtype) + + return retval diff --git a/bob/bio/vein/preprocessor/utils/__init__.py b/bob/bio/vein/preprocessor/utils/__init__.py deleted file mode 100644 index 9992cb1d2637003c25066c4930c6b9962eac2333..0000000000000000000000000000000000000000 --- a/bob/bio/vein/preprocessor/utils/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .utils import ManualRoiCut -from .utils import ConstructVeinImage -from .utils import NormalizeImageRotation - -# gets sphinx autodoc done right - don't remove it -__all__ = [_ for _ in dir() if not _.startswith('_')] diff --git a/bob/bio/vein/preprocessor/utils/utils.py b/bob/bio/vein/preprocessor/utils/utils.py deleted file mode 100644 index 00d749c5e433f1a32742d73e2140e70137c866d6..0000000000000000000000000000000000000000 --- a/bob/bio/vein/preprocessor/utils/utils.py +++ /dev/null @@ -1,359 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Fri Aug 5 17:12:41 2016 -""" - -# import what is needed: -import numpy as np -from PIL import Image, ImageDraw, ImageFilter -import scipy.ndimage -from scipy.signal import convolve2d -import scipy.ndimage.filters as fi -import os -import six - - -class ManualRoiCut(): - """ - Class for manual roi extraction -- ``ManualRoiCut``. - - Args: - annotation (``File``, :py:class:`list`): The name of annotation file, with full path containing - ROI annotation data (``Bob`` format, ``(x, y)``) **or** the list of annotation - points (tuples) in ``Bob`` format -- ``(x, y)``. A *fail-safe* operation is implemented - ensuring that the annotation points are inside the image to be annotated. - image (``File``, :py:class:`numpy.ndarray`), optional: The name of the image to be annotation - - full path or image data as :py:class:`numpy.ndarray`. Image is an optional parameter - because it isn't needed to generate ROI binary mask. - sizes (``tuple``): optional - a tuple of image size in ``Bob`` format ``(x,y)``. - This parameter is used **if** no image is given to generate binary mask. - - Returns: - A ``uint8`` :py:class:`numpy.ndarray` 2D array (image) containing ROI mask. - Value ``1`` determines ROI area, value ``0`` -- outside ROI area. ``uint8`` - is chosen so that annotations could be used in the ``bob.bio.vein`` platform - (there seems to be problems when saving / loading ``bool`` objects). - - Examples: - - generate ROI mask:: - - from bob.bio.vein.preprocessors.utils import ManualRoiCut - roi = ManualRoiCut(roi_annotation_points).roi_mask() - - - replace image's outside-ROI with value ``pixel_value``:: - - from bob.bio.vein.preprocessors.utils import ManualRoiCut - image_cutted = ManualRoiCut(roi_annotation_points, image).roi_image(pixel_value=0) - """ - def __init__(self,annotation, image = None, sizes = (480, 480)): - if image is not None: - if isinstance(image, six.string_types): - if os.path.exists(image): - image = Image.open(image) - self.image = np.array(image) - else: - raise IOError("Doesn't exist file: {}".format(annotation)) - return 1 - else: - self.image = np.array(image) - self.size_y = self.image.shape[0] - self.size_x = self.image.shape[1] - else: - self.image = None - self.size_y = sizes[1] - self.size_x = sizes[0] - if isinstance(annotation, six.string_types): - if os.path.exists(annotation): - with open(annotation,'r') as f: - retval = np.loadtxt(f, ndmin=2) - self.annotation = list([tuple([self.__test_size__(k[1],self.size_y), self.__test_size__(k[0],self.size_x)]) for k in retval]) - else: - raise IOError("Doesn' t exist file: {}".format(annotation)) - return 1 - else : - # Convert from Bob format(x,y) to regular (y, x) - self.annotation = list([tuple([self.__test_size__(k[1],self.size_y), self.__test_size__(k[0],self.size_x)]) for k in annotation]) - - - def __test_size__(self, test_value, max_value): - if test_value >= 0 and test_value < max_value: - return test_value - elif test_value >= 0 and test_value < 60000: - return max_value - else: - return 0 - - - def roi_mask(self): - """Method ``roi_mask`` - generates ROI mask. - - Returns: - A ``uint8`` :py:class:`numpy.ndarray` 2D array (image) - containing ROI mask. Value ``1`` determines ROI area, ``0`` -- outside - ROI area. - """ - mask = Image.new('L', (self.size_x, self.size_y), 0) - ImageDraw.Draw(mask).polygon(self.annotation, outline=1, fill=1) - mask = np.array(mask, dtype = np.uint8) - mask = 0 < mask - return mask - - - def roi_image(self, pixel_value = 0): - """Method roi_image - replaces outside ROI pixel values with ``pixel_value`` - (default - 0). - - Args: - pixel_value (``integer``): if given, outside-ROI region is replaced with this - value. By default replaced with 0. - - Returns: - A copy of image that class was initialized with, outside ROI pixel - values are replaced with ``pixel_value``. - """ - if self.image is not None: - mask = self.roi_mask() - self.image[mask == 0] = pixel_value - return self.image - else: - raise IOError("No input image given, can't perform non-ROI region removal") - return 1 - - -def ConstructVeinImage(annotation_dictionary, center = False): - """ - Constructs a binary image from manual annotations. The class is made to be used with - the ``bob.db.biowave_v1`` database. - - The returned 2D array (see ``return value``, below) corresponds to a person's - vein pattern, marked by human-expert. - - Args: - annotation_dictionary (:py:class:`dict`): Dictionary containing image and annotation data. - Such :py:class:`dict` can be returned by the high level ``bob.db.biowave_v1`` - implementation of the ``bob.db.biowave_v1`` database. It is supposed to contain - fields (as can be returned by the ``bob.db.biowave_v1`` high level implementation): - - - ``image`` - - ``roi_annotations`` - - ``vein_annotations`` - - Although only the ``image.shape[0]``, ``image.shape[1]`` and variable - ``vein_annotations`` are used. - center (:py:class:`bool`): Flag, if set to ``True``, annotations are centered. - - Returns: - :py:class:`numpy.ndarray` : A 2D array with ``uint8`` values - value ``1`` - represents annotated vein object. The output image is constructed using - annotation information - points. - Each line's points are connected and 5 pixels wide line is drawn. After - all lines are drawn, lines are smoothed using Median filter with - size 5x5 pixels. - - Examples: - Example to import the utils and run the function:: - - from bob.bio.vein.preprocessors.utils import ConstructVeinImage - vein_image = ConstructVeinImage(annotation_dictionary, center = True) - """ - image = annotation_dictionary["image"] - #roi_annotations = annotation_dictionary["roi_annotations"] - vein_annotations = annotation_dictionary["vein_annotations"] - - im = Image.new('L', (image.shape[0], image.shape[1]), (0)) - draw = ImageDraw.Draw(im) - if center == True: - xes_all = [point[1] for line in vein_annotations for point in line] - yes_all = [point[0] for line in vein_annotations for point in line] - for line in vein_annotations: - xes = [point[1] - np.round(np.mean(xes_all)) + 239 for point in line] - yes = [point[0] - np.round(np.mean(yes_all)) + 239 for point in line] - for point in range(len(line) - 1): - draw.line((xes[point],yes[point], xes[point+1], yes[point+1]), fill=1, width = 5) - else: - for line in vein_annotations: - xes = [point[1] for point in line] - yes = [point[0] for point in line] - for point in range(len(line) - 1): - draw.line((xes[point],yes[point], xes[point+1], yes[point+1]), fill=1, width = 5) - im = im.filter(ImageFilter.MedianFilter(5)) - im = np.array(im, dtype = np.uint8) - return im - - -# help functions for the ``NormalizeImageRotation`` function -def __rotate_point__(x,y, angle): - """ - [xp, yp] = __rotate_point__(x,y, angle) - """ - if type(x) is list: - if len(x) != len(y): - raise IOError("Length of x and y should be equal") - xp = [] - yp = [] - for nr in range(len(x)): - xp.append(x[nr] * np.cos(np.radians(angle)) - y[nr] * np.sin(np.radians(angle))) - yp.append(y[nr] * np.cos(np.radians(angle)) + x[nr] * np.sin(np.radians(angle))) - else: - xp = x * np.cos(np.radians(angle)) - y * np.sin(np.radians(angle)) - yp = y * np.cos(np.radians(angle)) + x * np.sin(np.radians(angle)) - - return int(np.round(xp)), int(np.round(yp)) - - -def __guss_mask__(guss_size=27, sigma=6): - """Returns a 2D Gaussian kernel array.""" - inp = np.zeros((guss_size, guss_size)) - inp[guss_size//2, guss_size//2] = 1 - return fi.gaussian_filter(inp, sigma) - - -def __ramp__(a): - a = np.array(a) - a[a < 0]=0 - return a - - -def __vein_filter__(image, a = 3, b = 4, sigma = 4, guss_size = 15, only_lines = True, dark_lines = True): - """ - Vein filter - """ - if dark_lines == True: - Z = 1 - else: - Z = -1 - - if type(image) != np.ndarray: - image = np.array(image, dtype = np.float) - - padsize = 2*a+b - gaussian_mask = __guss_mask__(guss_size, sigma) - - f2 = np.lib.pad(image, ((padsize, padsize), (padsize, padsize)), 'edge') - f2 = convolve2d(f2, gaussian_mask, mode='same') - - result = np.zeros(image.shape) - - for angle in np.arange(0,179,11.25 / 2): - [ap, bp] = __rotate_point__(-b,-2*a, angle) - mask_1 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp] - - [ap, bp] = __rotate_point__(-b,-1*a, angle) - mask_2 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp] - - [ap, bp] = __rotate_point__(-b, 0, angle) - mask_3 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp] - - [ap, bp] = __rotate_point__(-b, 1*a, angle) - mask_4 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp] - - [ap, bp] = __rotate_point__(-b, 2*a, angle) - mask_5 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp] - - [ap, bp] = __rotate_point__(+b,-2*a, angle) - mask_6 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp] - - [ap, bp] = __rotate_point__(+b,-1*a, angle) - mask_7 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp] - - [ap, bp] = __rotate_point__(+b, 0, angle) - mask_8 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp] - - [ap, bp] = __rotate_point__(+b, 1*a, angle) - mask_9 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp] - - [ap, bp] = __rotate_point__(+b, 2*a, angle) - mask_10 = f2[padsize+ap:-padsize+ap,padsize+bp:-padsize+bp] - - amplitude_rez = __ramp__(Z*(mask_1+mask_5+mask_6+mask_10)*3 \ - -Z*(mask_2+mask_3+mask_4+mask_7+mask_8+mask_9)*2) - - if only_lines == True: - col = np.zeros((6,image.shape[0], image.shape[1])) - col[0] = np.minimum(__ramp__(-Z*mask_2+Z*mask_1),__ramp__(-Z*mask_2+Z*mask_5)) - col[1] = np.minimum(__ramp__(-Z*mask_3+Z*mask_1),__ramp__(-Z*mask_3+Z*mask_5)) - col[2] = np.minimum(__ramp__(-Z*mask_4+Z*mask_1),__ramp__(-Z*mask_4+Z*mask_5)) - col[3] = np.minimum(__ramp__(-Z*mask_7+Z*mask_6),__ramp__(-Z*mask_7+Z*mask_10)) - col[4] = np.minimum(__ramp__(-Z*mask_8+Z*mask_6),__ramp__(-Z*mask_8+Z*mask_10)) - col[5] = np.minimum(__ramp__(-Z*mask_9+Z*mask_6),__ramp__(-Z*mask_9+Z*mask_10)) - angle_rez = np.min(col, axis = 0) - amplitude_rez[angle_rez==0] = 0 - - result = result + amplitude_rez*np.exp(1j*2*(angle - 90)*np.pi/180) - - result = np.abs(result) * np.exp(1j*np.angle(result)/2) - return result - - -def __get_rotatation_angle__(image, dark_lines = False): - """ - angle = get_rotatation_angle(image) - - Returns the rotation angle in deg. - """ - result = __vein_filter__(image, a = 4, b = 1, sigma = 2, guss_size = 15, only_lines = True, dark_lines = False) - result_nonzero = result[np.abs(result) > np.abs(result).max() / 2] - result_angle = np.angle(result_nonzero, deg=True) - angle = result_angle.mean() - return angle - - -def __rotate_image__(image, angle): - """ - image = rotate_image(image, angle) - """ - image = scipy.ndimage.rotate(image, angle, reshape = False, cval=0) - image[image > 255] = 255 - image[image < 0] = 0 - return image - - -def __align_image__(image, precision = 0.5, iterations = 25, dark_lines = False): - """ - [image, rotation_angle, angle_error] = align_image(image, precision = 0.5, iterations = 25) - """ - rotation_angle = 0 - angle_error = __get_rotatation_angle__(image, dark_lines) - if abs(angle_error) <= precision: - return image, rotation_angle, angle_error - for k in range(iterations): - rotation_angle = rotation_angle + (angle_error * 0.33) - image = __rotate_image__(image, angle_error * 0.33) - angle_error = __get_rotatation_angle__(image, dark_lines) - #print(rotation_angle) - if abs(angle_error) <= precision or k == iterations - 1: - return image, rotation_angle, angle_error - - -def NormalizeImageRotation(image, dark_lines = False): - """ - function ``NormalizeImageRotation`` - automatically rotates image by a self-defined angle. - - So far tested only with annotations (binary images). Algorithm iteratively - searches for rotation angle such that when image is filtered with the - ``vein filter`` (As published in the BIOSIG 2015), the ``mean`` filtered - image's vector angle (for the pixels in filtered image with a magnitude at least 1/2 of the - maximal value of the filtered image) is ``+/- 0.5`` [deg]. - - Args: - image (:py:class:`numpy.ndarray`) : A 2D array containing input image. - Currently tested only with binary images. - dark_lines (:py:class:`bool`) : A flag (default value - ``False``) - that determines what kind of lines algorithm is going to search for to - align the image. With default value ``False`` it will search for *whiter than - background* lines (as is the case with annotations). If set - to ``True`` -- will search for *darker than background* lines - (as is the case with vein images). - - Returns: - :py:class:`numpy.ndarray` : A 2D array with rotated input image - - Examples: - Example to import the utils and use the function:: - - from bob.bio.vein.preprocessors.utils import NormalizeImageRotation - image = NormalizeImageRotation(image, dark_lines = False) - """ - [rotated_image, rotation_angle, angle_error] = __align_image__(image = image, dark_lines = dark_lines) - rotated_image = np.array(rotated_image, dtype = image.dtype) - return rotated_image diff --git a/bob/bio/vein/tests/preprocessors/ConstructAnnotations.npy b/bob/bio/vein/tests/preprocessors/ConstructAnnotations.npy deleted file mode 100644 index 33c4f26d2a7bb119f5bea4750e7daa67d3759744..0000000000000000000000000000000000000000 Binary files a/bob/bio/vein/tests/preprocessors/ConstructAnnotations.npy and /dev/null differ diff --git a/bob/bio/vein/tests/preprocessors/ConstructAnnotations.png b/bob/bio/vein/tests/preprocessors/ConstructAnnotations.png deleted file mode 100644 index 3c87424906f75834b90c9158454101dc27bed444..0000000000000000000000000000000000000000 Binary files a/bob/bio/vein/tests/preprocessors/ConstructAnnotations.png and /dev/null differ diff --git a/bob/bio/vein/tests/preprocessors/ConstructAnnotations.txt b/bob/bio/vein/tests/preprocessors/ConstructAnnotations.txt deleted file mode 100644 index 83beedf262d2f40b46b17c4b0d248f33574954c6..0000000000000000000000000000000000000000 --- a/bob/bio/vein/tests/preprocessors/ConstructAnnotations.txt +++ /dev/null @@ -1,31 +0,0 @@ -11 91 -8 322 -76 320 -114 307 -140 300 -176 292 -225 292 -269 288 -330 287 -405 288 -436 290 -456 288 -468 276 -473 242 -472 208 -470 184 -466 146 -455 116 -440 93 -424 77 -397 69 -358 64 -298 60 -247 52 -201 38 -160 25 -130 7 -106 7 -81 16 -46 46 -22 71 diff --git a/bob/bio/vein/tests/test.py b/bob/bio/vein/tests/test.py index e55d10537c97622c6c5006856ccb2830e3534187..d1bb88fda68c2269bc32f8ad19002c46b3ce3a69 100644 --- a/bob/bio/vein/tests/test.py +++ b/bob/bio/vein/tests/test.py @@ -93,7 +93,7 @@ def test_finger_crop(): assert numpy.mean(numpy.abs(preproc - preproc_ref)) < 1.3e2 -def test_miuramax(): +def test_max_curvature(): #Maximum Curvature method against Matlab reference @@ -118,7 +118,7 @@ def test_miuramax(): assert numpy.mean(numpy.abs(output_img - output_img_ref)) < 8e-3 -def test_miurarlt(): +def test_repeated_line_tracking(): #Repeated Line Tracking method against Matlab reference @@ -143,7 +143,7 @@ def test_miurarlt(): assert numpy.mean(numpy.abs(output_img - output_img_ref)) < 0.5 -def test_huangwl(): +def test_wide_line_detector(): #Wide Line Detector method against Matlab reference @@ -187,67 +187,135 @@ def test_miura_match(): score_imp = MM.score(template_vein, probe_imp_vein) assert numpy.isclose(score_imp, 0.172906739278421) - -def test_manualRoiCut(): - """ - Test ManualRoitCut - """ - from bob.bio.vein.preprocessor.utils import ManualRoiCut - image_path = F(('preprocessors', '0019_3_1_120509-160517.png')) - annotation_path = F(('preprocessors', '0019_3_1_120509-160517.txt')) - - c = ManualRoiCut(annotation_path, image_path) - mask_1 = c.roi_mask() - image_1 = c.roi_image() - # create mask using size: - c = ManualRoiCut(annotation_path, sizes=(672,380)) - mask_2 = c.roi_mask() - - # loading image: - image = bob.io.base.load(image_path) - c = ManualRoiCut(annotation_path, image) - mask_3 = c.roi_mask() - image_3 = c.roi_image() - # load text file: - with open(annotation_path,'r') as f: - retval = numpy.loadtxt(f, ndmin=2) - - # carefully -- this is BOB format --- (x,y) - annotation = list([tuple([k[0], k[1]]) for k in retval]) - c = ManualRoiCut(annotation, image) - mask_4 = c.roi_mask() - image_4 = c.roi_image() - - assert (mask_1 == mask_2).all() - assert (mask_1 == mask_3).all() - assert (mask_1 == mask_4).all() - assert (image_1 == image_3).all() - assert (image_1 == image_4).all() - -def test_ConstructAnnotations(): - """ - Test ConstructAnnotations preprocessor - """ - image_filename = F( ( 'preprocessors', 'ConstructAnnotations.png' ) ) - roi_annotations_filename = F( ( 'preprocessors', 'ConstructAnnotations.txt' ) ) - vein_annotations_filename = F( ( 'preprocessors', 'ConstructAnnotations.npy' ) ) - - image = bob.io.base.load( image_filename ) - roi_annotations = np.loadtxt(roi_annotations_filename, dtype='uint16') - roi_annotations = [tuple([point[0], point[1]]) for point in roi_annotations] - fp = open(vein_annotations_filename, 'rb') - vein_annotations = np.load(fp) - vein_annotations = vein_annotations['arr_0'].tolist() - fp.close() - vein_annotations = [[tuple([point[0], point[1]]) for point in line] for line in vein_annotations] - - annotation_dictionary = {"image" : image, "roi_annotations" : roi_annotations, "vein_annotations" : vein_annotations} - from bob.bio.vein.preprocessor.utils import ConstructVeinImage - from bob.bio.vein.preprocessor.utils import NormalizeImageRotation - output = ConstructVeinImage(annotation_dictionary, center = True) - output = NormalizeImageRotation(output, dark_lines = False) - assert np.array_equal(output, image) - - - - + + +def test_assert_points(): + + # Tests that point assertion works as expected + from ..preprocessor import utils + + area = (10, 5) + inside = [(0,0), (3,2), (9, 4)] + utils.assert_points(area, inside) #should not raise + + def _check_outside(point): + # should raise, otherwise it is an error + try: + utils.assert_points(area, [point]) + except AssertionError as e: + assert str(point) in str(e) + else: + raise AssertionError("Did not assert %s is outside of %s" % (point, area)) + + outside = [(-1, 0), (10, 0), (0, 5), (10, 5), (15,12)] + for k in outside: _check_outside(k) + + +def test_fix_points(): + + # Tests that point clipping works as expected + from ..preprocessor import utils + + area = (10, 5) + inside = [(0,0), (3,2), (9, 4)] + fixed = utils.fix_points(area, inside) + assert numpy.array_equal(inside, fixed), '%r != %r' % (inside, fixed) + + fixed = utils.fix_points(area, [(-1, 0)]) + assert numpy.array_equal(fixed, [(0, 0)]) + + fixed = utils.fix_points(area, [(10, 0)]) + assert numpy.array_equal(fixed, [(9, 0)]) + + fixed = utils.fix_points(area, [(0, 5)]) + assert numpy.array_equal(fixed, [(0, 4)]) + + fixed = utils.fix_points(area, [(10, 5)]) + assert numpy.array_equal(fixed, [(9, 4)]) + + fixed = utils.fix_points(area, [(15, 12)]) + assert numpy.array_equal(fixed, [(9, 4)]) + + +def test_poly_to_mask(): + + # Tests we can generate a mask out of a polygon correctly + from ..preprocessor import utils + + area = (10, 9) #10 rows, 9 columns + polygon = [(2, 2), (2, 7), (7, 7), (7, 2)] #square shape, (y, x) format + mask = utils.poly_to_mask(area, polygon) + nose.tools.eq_(mask.dtype, numpy.bool) + + # This should be the output: + expected = numpy.array([ + [False, False, False, False, False, False, False, False, False], + [False, False, False, False, False, False, False, False, False], + [False, False, True, True, True, True, True, True, False], + [False, False, True, True, True, True, True, True, False], + [False, False, True, True, True, True, True, True, False], + [False, False, True, True, True, True, True, True, False], + [False, False, True, True, True, True, True, True, False], + [False, False, True, True, True, True, True, True, False], + [False, False, False, False, False, False, False, False, False], + [False, False, False, False, False, False, False, False, False], + ]) + assert numpy.array_equal(mask, expected) + + polygon = [(3, 2), (5, 7), (8, 7), (7, 3)] #trapezoid, (y, x) format + mask = utils.poly_to_mask(area, polygon) + nose.tools.eq_(mask.dtype, numpy.bool) + + # This should be the output: + expected = numpy.array([ + [False, False, False, False, False, False, False, False, False], + [False, False, False, False, False, False, False, False, False], + [False, False, False, False, False, False, False, False, False], + [False, False, True, False, False, False, False, False, False], + [False, False, True, True, True, False, False, False, False], + [False, False, False, True, True, True, True, True, False], + [False, False, False, True, True, True, True, True, False], + [False, False, False, True, True, True, True, True, False], + [False, False, False, False, False, False, False, True, False], + [False, False, False, False, False, False, False, False, False], + ]) + assert numpy.array_equal(mask, expected) + + +def test_mask_to_image(): + + # Tests we can correctly convert a boolean array into an image + # that makes sense according to the data types + from ..preprocessor import utils + + sample = numpy.array([False, True]) + nose.tools.eq_(sample.dtype, numpy.bool) + + def _check_uint(n): + conv = utils.mask_to_image(sample, 'uint%d' % n) + nose.tools.eq_(conv.dtype, getattr(numpy, 'uint%d' % n)) + target = [0, (2**n)-1] + assert numpy.array_equal(conv, target), '%r != %r' % (conv, target) + + _check_uint(8) + _check_uint(16) + _check_uint(32) + _check_uint(64) + + def _check_float(n): + conv = utils.mask_to_image(sample, 'float%d' % n) + nose.tools.eq_(conv.dtype, getattr(numpy, 'float%d' % n)) + assert numpy.array_equal(conv, [0, 1.0]), '%r != %r' % (conv, target) + + _check_float(32) + _check_float(64) + _check_float(128) + + + # This should be unsupported + try: + conv = utils.mask_to_image(sample, 'int16') + except TypeError as e: + assert 'int16' in str(e) + else: + raise AssertionError('Conversion to int16 did not trigger a TypeError')