Commit f5a99f3e authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Pickled algorithms

parent a51faf5f
Pipeline #39181 failed with stage
in 8 minutes and 52 seconds
......@@ -10,8 +10,9 @@ import math
from bob.bio.base.algorithm import Algorithm
class GaborJet (Algorithm):
"""Computes a comparison of lists of Gabor jets using a similarity function of :py:class:`bob.ip.gabor.Similarity`.
class GaborJet(Algorithm):
"""Computes a comparison of lists of Gabor jets using a similarity function of :py:class:`bob.ip.gabor.Similarity`.
The model enrollment simply stores all extracted Gabor jets for all enrollment features.
By default (i.e., ``multiple_feature_scoring = 'max_jet'``), the scoring uses an advanced local strategy.
......@@ -40,85 +41,100 @@ class GaborJet (Algorithm):
Please assure that this class and the :py:class:`bob.bio.face.extractor.GridGraph` class get the same configuration, otherwise unexpected things might happen.
"""
def __init__(
self,
# parameters for the tool
gabor_jet_similarity_type,
multiple_feature_scoring = 'max_jet',
# some similarity functions might need a GaborWaveletTransform class, so we have to provide the parameters here as well...
gabor_directions = 8,
gabor_scales = 5,
gabor_sigma = 2. * math.pi,
gabor_maximum_frequency = math.pi / 2.,
gabor_frequency_step = math.sqrt(.5),
gabor_power_of_k = 0,
gabor_dc_free = True
):
# call base class constructor
Algorithm.__init__(
def __init__(
self,
gabor_jet_similarity_type = gabor_jet_similarity_type,
multiple_feature_scoring = multiple_feature_scoring,
gabor_directions = gabor_directions,
gabor_scales = gabor_scales,
gabor_sigma = gabor_sigma,
gabor_maximum_frequency = gabor_maximum_frequency,
gabor_frequency_step = gabor_frequency_step,
gabor_power_of_k = gabor_power_of_k,
gabor_dc_free = gabor_dc_free,
multiple_model_scoring = None,
multiple_probe_scoring = None
)
# the Gabor wavelet transform; used by (some of) the Gabor jet similarities
gwt = bob.ip.gabor.Transform(
number_of_scales = gabor_scales,
number_of_directions = gabor_directions,
sigma = gabor_sigma,
k_max = gabor_maximum_frequency,
k_fac = gabor_frequency_step,
power_of_k = gabor_power_of_k,
dc_free = gabor_dc_free
)
# jet comparison function
self.similarity_function = bob.ip.gabor.Similarity(gabor_jet_similarity_type, gwt)
# how to proceed with multiple features per model
self.jet_scoring = {
'average_model' : None, # compute an average model
'average' : numpy.average, # compute the average similarity
'min_jet' : min, # for each jet location, compute the minimum similarity
'max_jet' : max, # for each jet location, compute the maximum similarity
'med_jet' : numpy.median, # for each jet location, compute the median similarity
'min_graph' : numpy.average, # for each model graph, compute the minimum average similarity
'max_graph' : numpy.average, # for each model graph, compute the maximum average similarity
'med_graph' : numpy.average, # for each model graph, compute the median average similarity
}[multiple_feature_scoring]
self.graph_scoring = {
'average_model' : None, # compute an average model
'average' : numpy.average, # compute the average similarity
'min_jet' : numpy.average, # for each jet location, compute the minimum similarity
'max_jet' : numpy.average, # for each jet location, compute the maximum similarity
'med_jet' : numpy.average, # for each jet location, compute the median similarity
'min_graph' : min, # for each model graph, compute the minimum average similarity
'max_graph' : max, # for each model graph, compute the maximum average similarity
'med_graph' : numpy.median, # for each model graph, compute the median average similarity
}[multiple_feature_scoring]
def _check_feature(self, feature):
# import ipdb; ipdb.set_trace()
assert isinstance(feature, list) or isinstance(feature, numpy.ndarray)
assert len(feature)
assert all(isinstance(f, bob.ip.gabor.Jet) for f in feature)
def enroll(self, enroll_features):
"""enroll(enroll_features) -> model
# parameters for the tool
gabor_jet_similarity_type,
multiple_feature_scoring="max_jet",
# some similarity functions might need a GaborWaveletTransform class, so we have to provide the parameters here as well...
gabor_directions=8,
gabor_scales=5,
gabor_sigma=2.0 * math.pi,
gabor_maximum_frequency=math.pi / 2.0,
gabor_frequency_step=math.sqrt(0.5),
gabor_power_of_k=0,
gabor_dc_free=True,
):
# call base class constructor
Algorithm.__init__(
self,
gabor_jet_similarity_type=gabor_jet_similarity_type,
multiple_feature_scoring=multiple_feature_scoring,
gabor_directions=gabor_directions,
gabor_scales=gabor_scales,
gabor_sigma=gabor_sigma,
gabor_maximum_frequency=gabor_maximum_frequency,
gabor_frequency_step=gabor_frequency_step,
gabor_power_of_k=gabor_power_of_k,
gabor_dc_free=gabor_dc_free,
multiple_model_scoring=None,
multiple_probe_scoring=None,
)
self.gabor_jet_similarity_type=gabor_jet_similarity_type
self.multiple_feature_scoring=multiple_feature_scoring
self.gabor_directions=gabor_directions
self.gabor_scales=gabor_scales
self.gabor_sigma=gabor_sigma
self.gabor_maximum_frequency=gabor_maximum_frequency
self.gabor_frequency_step=gabor_frequency_step
self.gabor_power_of_k=gabor_power_of_k
self.gabor_dc_free=gabor_dc_free
self.gabor_jet_similarity_type = gabor_jet_similarity_type
self._init_non_pickables()
def _init_non_pickables(self):
# the Gabor wavelet transform; used by (some of) the Gabor jet similarities
self.gwt = bob.ip.gabor.Transform(
number_of_scales=self.gabor_scales,
number_of_directions=self.gabor_directions,
sigma=self.gabor_sigma,
k_max=self.gabor_maximum_frequency,
k_fac=self.gabor_frequency_step,
power_of_k=self.gabor_power_of_k,
dc_free=self.gabor_dc_free,
)
# jet comparison function
self.similarity_function = bob.ip.gabor.Similarity(
self.gabor_jet_similarity_type, self.gwt
)
# how to proceed with multiple features per model
self.jet_scoring = {
"average_model": None, # compute an average model
"average": numpy.average, # compute the average similarity
"min_jet": min, # for each jet location, compute the minimum similarity
"max_jet": max, # for each jet location, compute the maximum similarity
"med_jet": numpy.median, # for each jet location, compute the median similarity
"min_graph": numpy.average, # for each model graph, compute the minimum average similarity
"max_graph": numpy.average, # for each model graph, compute the maximum average similarity
"med_graph": numpy.average, # for each model graph, compute the median average similarity
}[self.multiple_feature_scoring]
self.graph_scoring = {
"average_model": None, # compute an average model
"average": numpy.average, # compute the average similarity
"min_jet": numpy.average, # for each jet location, compute the minimum similarity
"max_jet": numpy.average, # for each jet location, compute the maximum similarity
"med_jet": numpy.average, # for each jet location, compute the median similarity
"min_graph": min, # for each model graph, compute the minimum average similarity
"max_graph": max, # for each model graph, compute the maximum average similarity
"med_graph": numpy.median, # for each model graph, compute the median average similarity
}[self.multiple_feature_scoring]
def _check_feature(self, feature):
# import ipdb; ipdb.set_trace()
assert isinstance(feature, list) or isinstance(feature, numpy.ndarray)
assert len(feature)
assert all(isinstance(f, bob.ip.gabor.Jet) for f in feature)
def enroll(self, enroll_features):
"""enroll(enroll_features) -> model
Enrolls the model using one of several strategies.
Commonly, the bunch graph strategy [WFK97]_ is applied, by storing several Gabor jets for each node.
......@@ -139,22 +155,26 @@ class GaborJet (Algorithm):
Each sub-list contains a list of jets, which correspond to the same node.
When ``multiple_feature_scoring = 'average_model'`` each sub-list contains a single :py:class:`bob.ip.gabor.Jet`.
"""
[self._check_feature(feature) for feature in enroll_features]
assert len(enroll_features)
assert all(len(feature) == len(enroll_features[0]) for feature in enroll_features)
# re-organize the jets to have a collection of jets per node
jets_per_node = [[enroll_features[g][n] for g in range(len(enroll_features))] for n in range(len(enroll_features[0]))]
[self._check_feature(feature) for feature in enroll_features]
assert len(enroll_features)
assert all(
len(feature) == len(enroll_features[0]) for feature in enroll_features
)
if self.jet_scoring is not None:
return jets_per_node
# re-organize the jets to have a collection of jets per node
jets_per_node = [
[enroll_features[g][n] for g in range(len(enroll_features))]
for n in range(len(enroll_features[0]))
]
# compute average model, and keep a list with a single jet per node
return [[bob.ip.gabor.Jet(jets_per_node[n])] for n in range(len(jets_per_node))]
if self.jet_scoring is not None:
return jets_per_node
# compute average model, and keep a list with a single jet per node
return [[bob.ip.gabor.Jet(jets_per_node[n])] for n in range(len(jets_per_node))]
def write_model(self, model, model_file):
"""Writes the model enrolled by the :py:meth:`enroll` function to the given file.
def write_model(self, model, model_file):
"""Writes the model enrolled by the :py:meth:`enroll` function to the given file.
**Parameters:**
......@@ -164,20 +184,19 @@ class GaborJet (Algorithm):
model_file : str or :py:class:`bob.io.base.HDF5File`
The name of the file or the file opened for writing.
"""
f = bob.io.base.HDF5File(model_file, 'w')
# several model graphs
f.set("NumberOfNodes", len(model))
for g in range(len(model)):
name = "Node-" + str(g+1)
f.create_group(name)
f.cd(name)
bob.ip.gabor.save_jets(model[g], f)
f.cd("..")
f.close()
def read_model(self, model_file):
"""read_model(model_file) -> model
f = bob.io.base.HDF5File(model_file, "w")
# several model graphs
f.set("NumberOfNodes", len(model))
for g in range(len(model)):
name = "Node-" + str(g + 1)
f.create_group(name)
f.cd(name)
bob.ip.gabor.save_jets(model[g], f)
f.cd("..")
f.close()
def read_model(self, model_file):
"""read_model(model_file) -> model
Reads the model written by the :py:meth:`write_model` function from the given file.
......@@ -191,19 +210,18 @@ class GaborJet (Algorithm):
model : [[:py:class:`bob.ip.gabor.Jet`]]
The list of Gabor jets read from file.
"""
f = bob.io.base.HDF5File(model_file)
count = f.get("NumberOfNodes")
model = []
for g in range(count):
name = "Node-" + str(g+1)
f.cd(name)
model.append(bob.ip.gabor.load_jets(f))
f.cd("..")
return model
def score(self, model, probe):
"""score(model, probe) -> score
f = bob.io.base.HDF5File(model_file)
count = f.get("NumberOfNodes")
model = []
for g in range(count):
name = "Node-" + str(g + 1)
f.cd(name)
model.append(bob.ip.gabor.load_jets(f))
f.cd("..")
return model
def score(self, model, probe):
"""score(model, probe) -> score
Computes the score of the probe and the model using the desired Gabor jet similarity function and the desired score fusion strategy.
......@@ -220,19 +238,23 @@ class GaborJet (Algorithm):
score : float
The fused similarity score.
"""
self._check_feature(probe)
[self._check_feature(m) for m in model]
assert len(model) == len(probe)
# select jet score averaging function
jet_scoring = numpy.average if self.jet_scoring is None else self.jet_scoring
graph_scoring = numpy.average if self.graph_scoring is None else self.graph_scoring
local_scores = [jet_scoring([self.similarity_function(m, pro) for m in mod]) for mod, pro in zip(model, probe)]
return graph_scoring(local_scores)
def score_for_multiple_probes(self, model, probes):
"""score(model, probes) -> score
self._check_feature(probe)
[self._check_feature(m) for m in model]
assert len(model) == len(probe)
# select jet score averaging function
jet_scoring = numpy.average if self.jet_scoring is None else self.jet_scoring
graph_scoring = (
numpy.average if self.graph_scoring is None else self.graph_scoring
)
local_scores = [
jet_scoring([self.similarity_function(m, pro) for m in mod])
for mod, pro in zip(model, probe)
]
return graph_scoring(local_scores)
def score_for_multiple_probes(self, model, probes):
"""score(model, probes) -> score
This function computes the score between the given model graph(s) and several given probe graphs.
The same local scoring strategy as for several model jets is applied, but this time the local scoring strategy is applied between all graphs from the model and probes.
......@@ -252,35 +274,84 @@ class GaborJet (Algorithm):
score : float
The fused similarity score.
"""
[self._check_feature(probe) for probe in probes]
[self._check_feature(m) for m in model]
assert all(len(model) == len(probe) for probe in probes)
jet_scoring = numpy.average if self.jet_scoring is None else self.jet_scoring
graph_scoring = numpy.average if self.graph_scoring is None else self.graph_scoring
local_scores = [jet_scoring([self.similarity_function(m, probe[n]) for m in model[n] for probe in probes]) for n in range(len(model))]
return graph_scoring(local_scores)
def score_for_multiple_models(self, models, probe):
self._check_feature(probe)
[self._check_feature(m) for model in models for m in model]
jet_scoring = numpy.average if self.jet_scoring is None else self.jet_scoring
graph_scoring = numpy.average if self.graph_scoring is None else self.graph_scoring
scores = []
for model in models:
local_scores = local_scores = [jet_scoring([self.similarity_function(m, pro) for m in mod]) for mod, pro in zip(model, probe)]
scores.append(graph_scoring(local_scores))
return scores
# overwrite functions to avoid them being documented.
def train_projector(*args, **kwargs) : raise NotImplementedError("This function is not implemented and should not be called.")
def load_projector(*args, **kwargs) : pass
def project(*args, **kwargs) : raise NotImplementedError("This function is not implemented and should not be called.")
def write_feature(*args, **kwargs) : raise NotImplementedError("This function is not implemented and should not be called.")
def read_feature(*args, **kwargs) : raise NotImplementedError("This function is not implemented and should not be called.")
def train_enroller(*args, **kwargs) : raise NotImplementedError("This function is not implemented and should not be called.")
def load_enroller(*args, **kwargs) : pass
[self._check_feature(probe) for probe in probes]
[self._check_feature(m) for m in model]
assert all(len(model) == len(probe) for probe in probes)
jet_scoring = numpy.average if self.jet_scoring is None else self.jet_scoring
graph_scoring = (
numpy.average if self.graph_scoring is None else self.graph_scoring
)
local_scores = [
jet_scoring(
[
self.similarity_function(m, probe[n])
for m in model[n]
for probe in probes
]
)
for n in range(len(model))
]
return graph_scoring(local_scores)
def score_for_multiple_models(self, models, probe):
self._check_feature(probe)
[self._check_feature(m) for model in models for m in model]
jet_scoring = numpy.average if self.jet_scoring is None else self.jet_scoring
graph_scoring = (
numpy.average if self.graph_scoring is None else self.graph_scoring
)
scores = []
for model in models:
local_scores = local_scores = [
jet_scoring([self.similarity_function(m, pro) for m in mod])
for mod, pro in zip(model, probe)
]
scores.append(graph_scoring(local_scores))
return scores
# overwrite functions to avoid them being documented.
def train_projector(*args, **kwargs):
raise NotImplementedError(
"This function is not implemented and should not be called."
)
def load_projector(*args, **kwargs):
pass
def project(*args, **kwargs):
raise NotImplementedError(
"This function is not implemented and should not be called."
)
def write_feature(*args, **kwargs):
raise NotImplementedError(
"This function is not implemented and should not be called."
)
def read_feature(*args, **kwargs):
raise NotImplementedError(
"This function is not implemented and should not be called."
)
def train_enroller(*args, **kwargs):
raise NotImplementedError(
"This function is not implemented and should not be called."
)
def load_enroller(*args, **kwargs):
pass
def __getstate__(self):
d = dict(self.__dict__)
d.pop("gwt")
d.pop("similarity_function")
return d
def __setstate__(self, d):
self.__dict__ = d
self._init_non_pickables()
......@@ -4,6 +4,7 @@ from bob.pipelines.utils import assert_picklable
### Preprocessors
def test_face_crop():
CROPPED_IMAGE_HEIGHT = 64
CROPPED_IMAGE_WIDTH = 64
......@@ -39,38 +40,65 @@ def test_TanTriggs():
def test_SQI():
face_crop = bob.bio.face.preprocessor.SelfQuotientImage(face_cropper="face-crop-eyes")
face_crop = bob.bio.face.preprocessor.SelfQuotientImage(
face_cropper="face-crop-eyes"
)
assert_picklable(face_crop)
def test_HistogramEqualization():
face_crop = bob.bio.face.preprocessor.HistogramEqualization(face_cropper="face-crop-eyes")
face_crop = bob.bio.face.preprocessor.HistogramEqualization(
face_cropper="face-crop-eyes"
)
assert_picklable(face_crop)
### Extractors
def test_DCT():
extractor = bob.bio.face.extractor.DCTBlocks()
assert_picklable(extractor)
def test_GridGraph():
extractor = bob.bio.face.extractor.GridGraph(node_distance = 24)
extractor = bob.bio.face.extractor.GridGraph(node_distance=24)
assert_picklable(extractor)
cropper = bob.bio.base.load_resource('face-crop-eyes', 'preprocessor', preferred_package='bob.bio.face')
cropper = bob.bio.base.load_resource(
"face-crop-eyes", "preprocessor", preferred_package="bob.bio.face"
)
eyes = cropper.cropped_positions
extractor = bob.bio.face.extractor.GridGraph(eyes = eyes)
extractor = bob.bio.face.extractor.GridGraph(eyes=eyes)
assert_picklable(extractor)
def test_LGBPHS():
import math
extractor = bob.bio.face.extractor.LGBPHS(
block_size = 8,
block_overlap = 0,
gabor_directions = 4,
gabor_scales = 2,
gabor_sigma = math.sqrt(2.) * math.pi,
sparse_histogram = True
block_size=8,
block_overlap=0,
gabor_directions=4,
gabor_scales=2,
gabor_sigma=math.sqrt(2.0) * math.pi,
sparse_histogram=True,
)
assert_picklable(extractor)
## Algorithms
def test_GaborJet():
algorithm = bob.bio.face.algorithm.GaborJet(
"PhaseDiffPlusCanberra", multiple_feature_scoring="average_model"
)
assert_picklable(algorithm)
def test_Histogram():
algorithm = bob.bio.face.algorithm.Histogram()
assert_picklable(algorithm)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment