Skip to content
Snippets Groups Projects
Commit 050c1380 authored by Manuel Günther's avatar Manuel Günther
Browse files

Added face algorithms from facereclib

parent d77ef348
No related branches found
No related tags found
No related merge requests found
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Manuel Guenther <Manuel.Guenther@idiap.ch>
import bob.ip.gabor
import bob.io.base
import numpy
import math
from bob.bio.base.algorithm import Algorithm
class GaborJet (Algorithm):
"""Algorithm chain for computing Gabor jets, Gabor graphs, and Gabor graph comparisons"""
def __init__(
self,
# parameters for the tool
gabor_jet_similarity_type,
multiple_feature_scoring = 'max_jet',
# some similarity functions might need a GaborWaveletTransform class, so we have to provide the parameters here as well...
gabor_directions = 8,
gabor_scales = 5,
gabor_sigma = 2. * math.pi,
gabor_maximum_frequency = math.pi / 2.,
gabor_frequency_step = math.sqrt(.5),
gabor_power_of_k = 0,
gabor_dc_free = True
):
# call base class constructor
Algorithm.__init__(
self,
gabor_jet_similarity_type = gabor_jet_similarity_type,
multiple_feature_scoring = multiple_feature_scoring,
gabor_directions = gabor_directions,
gabor_scales = gabor_scales,
gabor_sigma = gabor_sigma,
gabor_maximum_frequency = gabor_maximum_frequency,
gabor_frequency_step = gabor_frequency_step,
gabor_power_of_k = gabor_power_of_k,
gabor_dc_free = gabor_dc_free,
multiple_model_scoring = None,
multiple_probe_scoring = None
)
# the Gabor wavelet transform; used by (some of) the Gabor jet similarities
gwt = bob.ip.gabor.Transform(
number_of_scales = gabor_scales,
number_of_directions = gabor_directions,
sigma = gabor_sigma,
k_max = gabor_maximum_frequency,
k_fac = gabor_frequency_step,
power_of_k = gabor_power_of_k,
dc_free = gabor_dc_free
)
# jet comparison function
self.similarity_function = bob.ip.gabor.Similarity(gabor_jet_similarity_type, gwt)
# how to proceed with multiple features per model
self.jet_scoring = {
'average_model' : None, # compute an average model
'average' : numpy.average, # compute the average similarity
'min_jet' : min, # for each jet location, compute the minimum similarity
'max_jet' : max, # for each jet location, compute the maximum similarity
'med_jet' : numpy.median, # for each jet location, compute the median similarity
'min_graph' : numpy.average, # for each model graph, compute the minimum average similarity
'max_graph' : numpy.average, # for each model graph, compute the maximum average similarity
'med_graph' : numpy.average, # for each model graph, compute the median average similarity
}[multiple_feature_scoring]
self.graph_scoring = {
'average_model' : None, # compute an average model
'average' : numpy.average, # compute the average similarity
'min_jet' : numpy.average, # for each jet location, compute the minimum similarity
'max_jet' : numpy.average, # for each jet location, compute the maximum similarity
'med_jet' : numpy.average, # for each jet location, compute the median similarity
'min_graph' : min, # for each model graph, compute the minimum average similarity
'max_graph' : max, # for each model graph, compute the maximum average similarity
'med_graph' : numpy.median, # for each model graph, compute the median average similarity
}[multiple_feature_scoring]
def _check_feature(self, feature):
assert isinstance(feature, list)
assert len(feature)
assert all(isinstance(f, bob.ip.gabor.Jet) for f in feature)
def enroll(self, enroll_features):
"""Enrolls the model by computing an average graph for each model"""
[self._check_feature(feature) for feature in enroll_features]
assert len(enroll_features)
assert all(len(feature) == len(enroll_features[0]) for feature in enroll_features)
# re-organize the jets to have a collection of jets per node
jets_per_node = [[enroll_features[g][n] for g in range(len(enroll_features))] for n in range(len(enroll_features[0]))]
if self.jet_scoring is not None:
return jets_per_node
# compute average model, and keep a list with a single jet per node
return [[bob.ip.gabor.Jet(jets_per_node[n])] for n in range(len(jets_per_node))]
def save_model(self, model, model_file):
"""Saves the enrolled model of Gabor jets to file."""
f = bob.io.base.HDF5File(model_file, 'w')
# several model graphs
f.set("NumberOfNodes", len(model))
for g in range(len(model)):
name = "Node-" + str(g+1)
f.create_group(name)
f.cd(name)
bob.ip.gabor.save_jets(model[g], f)
f.cd("..")
f.close()
def read_model(self, model_file):
f = bob.io.base.HDF5File(model_file)
count = f.get("NumberOfNodes")
model = []
for g in range(count):
name = "Node-" + str(g+1)
f.cd(name)
model.append(bob.ip.gabor.load_jets(f))
f.cd("..")
return model
def read_probe(self, probe_file):
return bob.ip.gabor.load_jets(bob.io.base.HDF5File(probe_file))
def score(self, model, probe):
"""Computes the score of the probe and the model"""
self._check_feature(probe)
[self._check_feature(m) for m in model]
assert len(model) == len(probe)
# select jet score averaging function
jet_scoring = numpy.average if self.jet_scoring is None else self.jet_scoring
graph_scoring = numpy.average if self.graph_scoring is None else self.graph_scoring
local_scores = [jet_scoring([self.similarity_function(m, pro) for m in mod]) for mod, pro in zip(model, probe)]
return graph_scoring(local_scores)
def score_for_multiple_probes(self, model, probes):
"""This function computes the score between the given model graph(s) and several given probe graphs."""
[self._check_feature(probe) for probe in probes]
graph_scoring = numpy.average if self.graph_scoring is None else self.graph_scoring
return graph_scoring([self.score(model, probe) for probe in probes])
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Manuel Guenther <Manuel.Guenther@idiap.ch>
import bob.math
import numpy
from bob.bio.base.algorithm import Algorithm
class LGBPHS (Algorithm):
"""Tool chain for computing local Gabor binary pattern histogram sequences"""
def __init__(
self,
distance_function = bob.math.chi_square,
is_distance_function = True,
multiple_probe_scoring = 'average'
):
"""Initializes the local Gabor binary pattern histogram sequence tool"""
# call base class constructor
Algorithm.__init__(
self,
distance_function = str(distance_function),
is_distance_function = is_distance_function,
multiple_model_scoring = None,
multiple_probe_scoring = multiple_probe_scoring
)
# remember distance function
self.distance_function = distance_function
self.factor = -1. if is_distance_function else 1
def _is_sparse(self, feature):
assert isinstance(feature, numpy.ndarray)
return feature.ndim == 2
def _check_feature(self, feature, sparse):
assert isinstance(feature, numpy.ndarray)
if sparse:
# check that we have a 2D array
assert feature.ndim == 2
assert feature.shape[0] == 2
else:
assert feature.ndim == 1
def enroll(self, enroll_features):
"""Enrolling model by taking the average of all features"""
assert len(enroll_features)
sparse = self._is_sparse(enroll_features[0])
[self._check_feature(feature, sparse) for feature in enroll_features]
if sparse:
# get all indices for the sparse model
values = {}
# iterate through all sparse features
for feature in enroll_features:
# collect the values by index
for j in range(feature.shape[1]):
index = int(feature[0,j])
value = feature[1,j] / float(len(enroll_features))
# add up values
if index in values:
values[index] += value
else:
values[index] = value
# create model containing all the used indices
model = numpy.ndarray((2, len(values)), dtype = numpy.float64)
for i, index in enumerate(sorted(values.keys())):
model[0,i] = index
model[1,i] = values[index]
else:
model = numpy.zeros(enroll_features[0].shape, dtype = numpy.float64)
# add up models
for feature in enroll_features:
model += feature
# normalize by number of models
model /= float(len(enroll_features))
# return averaged model
return model
def score(self, model, probe):
"""Computes the score using the specified histogram measure; returns a similarity value (bigger -> better)"""
sparse = self._is_sparse(probe)
self._check_feature(model, sparse)
self._check_feature(probe, sparse)
if sparse:
# assure that the probe is sparse as well
return self.factor * self.distance_function(model[0,:], model[1,:], probe[0,:], probe[1,:])
else:
return self.factor * self.distance_function(model, probe)
from .GaborJet import GaborJet
from .LGBPHS import LGBPHS
#!/usr/bin/env python
import bob.bio.face
import math
algorithm = bob.bio.face.algorithm.GaborJet(
# Gabor jet comparison
gabor_jet_similarity_type = 'PhaseDiffPlusCanberra',
multiple_feature_scoring = 'max_jet',
# Gabor wavelet setup
gabor_sigma = math.sqrt(2.) * math.pi,
)
#!/usr/bin/env python
import bob.bio.face
import bob.math
algorithm = bob.bio.face.algorithm.LGBPHS(
distance_function = bob.math.histogram_intersection,
is_distance_function = False
)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Manuel Guenther <Manuel.Guenther@idiap.ch>
# @date: Thu May 24 10:41:42 CEST 2012
#
# Copyright (C) 2011-2012 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import bob.io.base
import bob.ip.gabor
import unittest
import os
import numpy
import math
import tempfile
import facereclib
from nose.plugins.skip import SkipTest
import pkg_resources
regenerate_refs = False
seed_value = 5489
def test_gabor_jet():
jets = bob.bio.base.load_resource("gabor-jet", "algorithm")
assert isinstance(jets, bob.bio.face.algorithm.GaborJet)
assert isinstance(jets, bob.bio.base.algorithm.Algorithm)
assert not jets.performs_projection
assert not jets.requires_projector_training
assert not jets.use_projected_features_for_enrollment
assert not jets.split_training_features_by_client
assert not jets.requires_enroller_training
# read input
feature = bob.ip.gabor.load_jets(bob.io.base.HDF5File(pkg_resources.resource_filename("bob.bio.face.test", "data/graph_regular.hdf5")))
# enroll
model = jets.enroll([feature, feature])
assert len(model) == len(feature)
assert all(len(m) == 2 for m in model)
assert all(model[n][i] == feature[n] for n in range(len(feature)) for i in range(2))
# score
assert abs(jets.score(model, feature) - 1.) < 1e-8
assert abs(jets.score_for_multiple_probes(model, [feature, feature]) - 1.) < 1e-8
# test averaging
jets = bob.bio.face.algorithm.GaborJet(
"PhaseDiffPlusCanberra",
multiple_feature_scoring = "average_model"
)
model = jets.enroll([feature, feature])
assert len(model) == len(feature)
assert all(len(m) == 1 for m in model)
# absoulte values must be identical
assert all(numpy.allclose(model[n][0].abs, feature[n].abs) for n in range(len(model)))
# phases might differ with 2 Pi
for n in range(len(model)):
for j in range(len(model[n][0].phase)):
assert any(abs(model[n][0].phase[j] - feature[n].phase[j] - k*2.*math.pi) < 1e-5 for k in (0, -2, 2))
assert abs(jets.score(model, feature) - 1.) < 1e-8
assert abs(jets.score_for_multiple_probes(model, [feature, feature]) - 1.) < 1e-8
def test_lgbphs():
lgbphs = bob.bio.base.load_resource("lgbphs", "algorithm")
assert isinstance(lgbphs, bob.bio.face.algorithm.LGBPHS)
assert isinstance(lgbphs, bob.bio.base.algorithm.Algorithm)
assert not lgbphs.performs_projection
assert not lgbphs.requires_projector_training
assert not lgbphs.use_projected_features_for_enrollment
assert not lgbphs.split_training_features_by_client
assert not lgbphs.requires_enroller_training
# read input
feature1 = bob.bio.base.load(pkg_resources.resource_filename('bob.bio.face.test', 'data/lgbphs_sparse.hdf5'))
feature2 = bob.bio.base.load(pkg_resources.resource_filename('bob.bio.face.test', 'data/lgbphs_with_phase.hdf5'))
# enroll model from sparse features
model1 = lgbphs.enroll([feature1, feature1])
assert model1.shape == feature1.shape
assert numpy.allclose(model1, feature1)
# enroll from non-sparse features
model2 = lgbphs.enroll([feature2, feature2])
assert model2.shape == feature2.shape
assert numpy.allclose(model2, feature2)
# score without phase and sparse
reference = 40960.
assert abs(lgbphs.score(model1, feature1) - reference) < 1e-5
assert abs(lgbphs.score_for_multiple_probes(model1, [feature1, feature1]) - reference) < 1e-5
# score with phase, but non-sparse
# reference doubles since we have two times more features
reference *= 2.
assert abs(lgbphs.score(model2, feature2) - reference) < 1e-5
assert abs(lgbphs.score_for_multiple_probes(model2, [feature2, feature2]) - reference) < 1e-5
"""
def test09_plda(self):
# read input
feature = facereclib.utils.load(self.input_dir('linearize.hdf5'))
# assure that the config file is readable
tool = self.config('pca+plda')
self.assertTrue(isinstance(tool, facereclib.tools.PLDA))
# here, we use a reduced complexity for test purposes
tool = facereclib.tools.PLDA(
subspace_dimension_of_f = 2,
subspace_dimension_of_g = 2,
subspace_dimension_pca = 10,
plda_training_iterations = 1,
INIT_SEED = seed_value,
)
self.assertFalse(tool.performs_projection)
self.assertTrue(tool.requires_enroller_training)
# train the projector
t = tempfile.mkstemp('pca+plda.hdf5', prefix='frltest_')[1]
tool.train_enroller(facereclib.utils.tests.random_training_set_by_id(feature.shape, count=20, minimum=0., maximum=255.), t)
if regenerate_refs:
import shutil
shutil.copy2(t, self.reference_dir('pca+plda_enroller.hdf5'))
# load the projector file
tool.load_enroller(self.reference_dir('pca+plda_enroller.hdf5'))
# compare the resulting machines
test_file = bob.io.base.HDF5File(t)
test_file.cd('/pca')
pca_machine = bob.learn.linear.Machine(test_file)
test_file.cd('/plda')
plda_machine = bob.learn.em.PLDABase(test_file)
# TODO: compare the PCA machines
#self.assertEqual(pca_machine, tool.m_pca_machine)
# TODO: compare the PLDA machines
#self.assertEqual(plda_machine, tool.m_plda_base_machine)
os.remove(t)
# enroll model
model = tool.enroll([feature])
if regenerate_refs:
model.save(bob.io.base.HDF5File(self.reference_dir('pca+plda_model.hdf5'), 'w'))
# TODO: compare the models with the reference
#reference_model = tool.read_model(self.reference_dir('pca+plda_model.hdf5'))
#self.assertEqual(model, reference_model)
# score
sim = tool.score(model, feature)
self.assertAlmostEqual(sim, 0.)
# score with a concatenation of the probe
self.assertAlmostEqual(tool.score_for_multiple_probes(model, [feature, feature]), 0.)
"""
......@@ -127,6 +127,8 @@ setup(
],
'bob.bio.algorithm': [
'gabor-jet = bob.bio.face.config.algorithm.gabor_jet:algorithm', # Gabor jet comparison
'lgbphs = bob.bio.face.config.algorithm.lgbphs:algorithm', # LGBPHS histograms
],
},
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment