diff --git a/bob/bio/face/config/extractor/luan.py b/bob/bio/face/config/extractor/luan.py new file mode 100644 index 0000000000000000000000000000000000000000..474a8a661df465311b7d291b1ef9efc77239ae32 --- /dev/null +++ b/bob/bio/face/config/extractor/luan.py @@ -0,0 +1,6 @@ +#!/usr/bin/env python + +import bob.bio.base +import bob.bio.face + +extractor = bob.bio.face.extractor.LuanExtractor() diff --git a/bob/bio/face/config/preprocessor/luan.py b/bob/bio/face/config/preprocessor/luan.py new file mode 100644 index 0000000000000000000000000000000000000000..8e375ac1b579127932300c6a8484395a0a63ba63 --- /dev/null +++ b/bob/bio/face/config/preprocessor/luan.py @@ -0,0 +1,18 @@ +import bob.bio.face + +# This is the size of the image that this model expects +CROPPED_IMAGE_HEIGHT = 96 +CROPPED_IMAGE_WIDTH = 96 + +RIGHT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 - 1) +LEFT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 * 3) + +face_cropper = bob.bio.face.preprocessor.FaceCrop( + cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH), + cropped_positions = {'leye' : LEFT_EYE_POS, 'reye' : RIGHT_EYE_POS}, + color_channel='rgb' +) + +preprocessor = bob.bio.face.preprocessor.FaceDetect(face_cropper=face_cropper, color_channel='rgb') + + diff --git a/bob/bio/face/extractor/Luan.py b/bob/bio/face/extractor/Luan.py new file mode 100644 index 0000000000000000000000000000000000000000..7fe3559bed0abeee168aad275e9197a961319a3c --- /dev/null +++ b/bob/bio/face/extractor/Luan.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python +# encoding: utf-8 + +"""Features for face recognition""" + +import numpy +import bob.io.base +from bob.bio.base.extractor import Extractor + +import bob.io.base +import tensorflow as tf +import bob.ip.base + +from bob.learn.drgan.networks import DRGAN + +class LuanExtractor(Extractor): + """ + + **Parameters:** + + """ + + def __init__(self): + + Extractor.__init__(self, skip_extractor_training=True) + + # not relevant (discriminator) + self.identity_dim = 208 + self.conditional_dim = 13 + + # relevant and to be checked with the saved model + self.latent_dim = 320 + self.image_size = [96, 96, 3] + + import tensorflow as tf + self.session = tf.Session() + + self.drgan = DRGAN(image_size=96, z_dim=100, gf_dim=32, gfc_dim=320, c_dim=3, checkpoint_dir='') + data_shape = [1] + self.image_size + + self.X = tf.placeholder(tf.float32, shape=data_shape) + self.encode = self.drgan.generator_encoder(self.X, is_reuse=False, is_training=False) + + self.saver = tf.train.Saver() + + + def __call__(self, image): + """__call__(image) -> feature + + Extract features + + **Parameters:** + + image : 3D :py:class:`numpy.ndarray` (floats) + The image to extract the features from. + + **Returns:** + + feature : 2D :py:class:`numpy.ndarray` (floats) + The extracted features + """ + def bob2skimage(bob_image): + """ + Convert bob color image to the skcit image + """ + + if len(bob_image.shape) == 3: + skimage = numpy.zeros(shape=(bob_image.shape[1], bob_image.shape[2], 3), dtype=numpy.uint8) + skimage[:,:,0] = bob_image[0, :,:] + skimage[:,:,1] = bob_image[1, :,:] + skimage[:,:,2] = bob_image[2, :,:] + else: + skimage = numpy.zeros(shape=(bob_image.shape[0], bob_image.shape[1], 1)) + skimage[:,:,0] = bob_image[:,:] + return skimage + + def rescaleToUint8(image): + result = numpy.zeros_like(image) + for channel in range(image.shape[2]): + min_image = numpy.min(image[:, :, channel]) + max_image = numpy.max(image[:, :, channel]) + if (max_image - min_image) != 0: + result[:, :, channel] = 255.0*((image[:, :, channel] - min_image) / (max_image - min_image)) + else: + result[:, :, channel] = 0 + result = result.astype('uint8') + return result + + # encode the provided image + image = rescaleToUint8(image) + #from matplotlib import pyplot + #pyplot.imshow(numpy.rollaxis(image, 0, 3)) + #pyplot.show() + # + image = bob2skimage(image) + image = numpy.array(image/127.5 - 1).astype(numpy.float32) + + shape = [1] + list(image.shape) + img = numpy.reshape(image, tuple(shape)) + # + #pyplot.imshow(image) + #pyplot.show() + + + encoded_id = self.session.run(self.encode, feed_dict={self.X : img}) + return encoded_id + + # re-define the train function to get it non-documented + def train(*args, **kwargs): raise NotImplementedError("This function is not implemented and should not be called.") + + def load(self, extractor_file): + self.saver.restore(self.session, extractor_file) diff --git a/bob/bio/face/extractor/__init__.py b/bob/bio/face/extractor/__init__.py index b1b7826d09d9f3e5d8b24d428cbebebf3aa5562f..298d936f13aa628b8129dbeabd0a631341830292 100644 --- a/bob/bio/face/extractor/__init__.py +++ b/bob/bio/face/extractor/__init__.py @@ -2,6 +2,7 @@ from .DCTBlocks import DCTBlocks from .GridGraph import GridGraph from .LGBPHS import LGBPHS from .Eigenface import Eigenface +from .Luan import LuanExtractor # gets sphinx autodoc done right - don't remove it def __appropriate__(*args): @@ -22,5 +23,6 @@ __appropriate__( GridGraph, LGBPHS, Eigenface, + LuanExtractor, ) __all__ = [_ for _ in dir() if not _.startswith('_')] diff --git a/setup.py b/setup.py index e55963a87f982d6d2051496c274190bcb0060452..5b12961cafb9b6e5b042e01887cc08acc0604fc8 100644 --- a/setup.py +++ b/setup.py @@ -163,6 +163,8 @@ setup( # histogram equalization w/o face-crop 'self-quotient = bob.bio.face.config.preprocessor.self_quotient_image:preprocessor_no_crop', # self quotient image w/o face-crop + + 'drgan = bob.bio.face.config.preprocessor.luan:preprocessor', ], 'bob.bio.extractor': [ @@ -170,6 +172,7 @@ setup( 'grid-graph = bob.bio.face.config.extractor.grid_graph:extractor', # Grid graph 'lgbphs = bob.bio.face.config.extractor.lgbphs:extractor', # LGBPHS 'eigenface = bob.bio.face.config.extractor.eigenface:extractor', # Eigenface + 'luan = bob.bio.face.config.extractor.luan:extractor', ], 'bob.bio.algorithm': [