Commit b0094952 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Fixed all transformers

Updates some tests
parent f60a4f8e
Pipeline #44061 failed with stage
in 5 minutes and 31 seconds
......@@ -296,7 +296,6 @@ class GaborJet(Algorithm):
return graph_scoring(local_scores)
def score_for_multiple_models(self, models, probe):
self._check_feature(probe)
[self._check_feature(m) for model in models for m in model]
......
......@@ -193,10 +193,7 @@ class FaceNetSanderberg(TransformerMixin, BaseEstimator):
features = self.session.run(self.embeddings, feed_dict=feed_dict)
return features
if isinstance(X, list):
return [_transform(i) for i in X]
else:
return _transform(X)
return [_transform(i) for i in X]
@staticmethod
def get_modelpath():
......
......@@ -81,10 +81,7 @@ class TensorflowCompatV1(TransformerMixin, BaseEstimator):
return self.session.run(self.embedding, feed_dict={self.input_tensor: data},)
if isinstance(X, SampleBatch):
return [_transform(x) for x in X]
else:
return _transform(X)
return [_transform(x) for x in X]
def load_model(self):
import tensorflow as tf
......
......@@ -128,10 +128,7 @@ class DCTBlocks(TransformerMixin, BaseEstimator):
# Computes DCT features
return self.dct_features(image)
if isinstance(X, SampleBatch):
return [_extract(x) for x in X]
else:
return _extract(X)
return [_extract(x) for x in X]
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
......
......@@ -274,11 +274,7 @@ class LGBPHS(TransformerMixin, BaseEstimator):
# return the concatenated list of all histograms
return self._sparsify(lgbphs_array)
if isinstance(X, SampleBatch):
return [_extract(x) for x in X]
else:
return _extract(X)
return [_extract(x) for x in X]
def __getstate__(self):
d = self.__dict__.copy()
......
......@@ -300,8 +300,7 @@ class FaceCrop(Base):
The cropped face.
"""
def _crop(image, annot):
def _crop(image, annot):
# if annotations are missing and cannot do anything else return None.
if (
not self.is_annotations_valid(annot)
......@@ -335,15 +334,10 @@ class FaceCrop(Base):
# crop face
return self.data_type(self.crop_face(image, annot))
if isinstance(X, SampleBatch):
if annotations is None:
return [_crop(data) for data in X]
else:
return [_crop(data, annot) for data, annot in zip(X, annotations)]
if annotations is None:
return [_crop(data, None) for data in X]
else:
return _crop(X, annotations)
return [_crop(data, annot) for data, annot in zip(X, annotations)]
def __getstate__(self):
......
......@@ -224,17 +224,10 @@ class FaceDetect(Base):
# convert data type
return self.data_type(image)
if isinstance(X, SampleBatch):
if annotations is None:
return [_crop(data) for data in X]
else:
return [_crop(data, annot) for data, annot in zip(X, annotations)]
if annotations is None:
return [_crop(data) for data in X]
else:
return _crop(X, annotations)
return [_crop(data, annot) for data, annot in zip(X, annotations)]
def __getstate__(self):
d = dict(self.__dict__)
......
......@@ -96,15 +96,12 @@ class HistogramEqualization(Base):
def _crop(image, annotations):
image = self.color_channel(image)
if self.cropper is not None:
image = self.cropper.transform(image, annotations)
image = self.cropper.transform([image], [annotations])[0]
image = self.equalize_histogram(image)
return self.data_type(image)
if isinstance(X, SampleBatch):
if annotations is None:
return [_crop(data) for data in X]
else:
return [_crop(data, annot) for data, annot in zip(X, annotations)]
if annotations is None:
return [_crop(data) for data in X]
else:
return _crop(X, annotations)
return [_crop(data, annot) for data, annot in zip(X, annotations)]
......@@ -120,19 +120,14 @@ class INormLBP(Base):
def _crop(image, annotations=None):
image = self.color_channel(image)
if self.cropper is not None:
image = self.cropper.transform(image, annotations=annotations)
image = self.cropper.transform([image], annotations=[annotations])[0]
image = self.lbp_extractor(image)
return self.data_type(image)
if isinstance(X, SampleBatch):
if annotations is None:
return [_crop(data) for data in X]
else:
return [_crop(data, annot) for data, annot in zip(X, annotations)]
if annotations is None:
return [_crop(data) for data in X]
else:
return _crop(X, annotations)
return [_crop(data, annot) for data, annot in zip(X, annotations)]
def __getstate__(self):
d = dict(self.__dict__)
......
......@@ -93,18 +93,14 @@ class SelfQuotientImage(Base):
def _crop(image, annotations):
image = self.color_channel(image)
if self.cropper is not None:
image = self.cropper.transform(image, annotations)
image = self.cropper.transform([image], [annotations])[0]
image = self.self_quotient(image)
return self.data_type(image)
if isinstance(X, SampleBatch):
if annotations is None:
return [_crop(data) for data in X]
else:
return [_crop(data, annot) for data, annot in zip(X, annotations)]
if annotations is None:
return [_crop(data) for data in X]
else:
return _crop(X, annotations)
return [_crop(data, annot) for data, annot in zip(X, annotations)]
def __getstate__(self):
d = dict(self.__dict__)
......
......@@ -110,18 +110,15 @@ class TanTriggs(Base):
def _crop(image, annotations=None):
image = self.color_channel(image)
if self.cropper is not None:
image = self.cropper.transform(image, annotations)
image = self.cropper.transform([image], [annotations])[0]
image = self.tan_triggs(image)
return self.data_type(image)
if isinstance(X, SampleBatch):
if annotations is None:
return [_crop(data) for data in X]
else:
return [_crop(data, annot) for data, annot in zip(X, annotations)]
if annotations is None:
return [_crop(data) for data in X]
else:
return _crop(X, annotations)
return [_crop(data, annot) for data, annot in zip(X, annotations)]
def __getstate__(self):
......
......@@ -62,7 +62,7 @@ def run_baseline(baseline, samples_for_training=[]):
probes = get_fake_sample_set(purpose="probe")
# Regular pipeline
pipeline = load_resource(baseline, "pipeline")
pipeline = load_resource(baseline, "pipeline")
scores = pipeline(samples_for_training, biometric_references, probes)
assert len(scores) == 1
assert len(scores[0]) == 1
......@@ -72,7 +72,7 @@ def run_baseline(baseline, samples_for_training=[]):
checkpoint_pipeline = checkpoint_vanilla_biometrics(
copy.deepcopy(pipeline), base_dir=d
)
)
checkpoint_scores = checkpoint_pipeline([], biometric_references, probes)
assert len(checkpoint_scores) == 1
assert len(checkpoint_scores[0]) == 1
......
......@@ -13,14 +13,13 @@ def test_facenet():
transformer = FaceNetSanderberg()
# Raw data
data = np.random.rand(3, 160, 160).astype("uint8")
output = transformer.transform(data)
output = transformer.transform([data])[0]
assert output.size == 128, output.shape
# Sample Batch
sample = Sample(data)
transformer_sample = wrap(["sample"], transformer)
output = [s.data for s in transformer_sample.transform([sample])][0]
output = [s.data for s in transformer_sample.transform([sample])][0]
assert output.size == 128, output.shape
......@@ -36,7 +35,7 @@ def test_idiap_inceptionv2_msceleb():
np.random.seed(10)
transformer = InceptionResnetv2_MsCeleb()
data = (np.random.rand(3, 160, 160) * 255).astype("uint8")
output = transformer.transform(data)
output = transformer.transform([data])[0]
assert output.size == 128, output.shape
# Sample Batch
......@@ -54,7 +53,7 @@ def test_idiap_inceptionv2_casia():
np.random.seed(10)
transformer = InceptionResnetv2_CasiaWebFace()
data = np.random.rand(3, 160, 160).astype("uint8")
output = transformer.transform(data)
output = transformer.transform([data])[0]
assert output.size == 128, output.shape
# Sample Batch
......@@ -71,7 +70,7 @@ def test_idiap_inceptionv1_msceleb():
np.random.seed(10)
transformer = InceptionResnetv1_MsCeleb()
data = np.random.rand(3, 160, 160).astype("uint8")
output = transformer.transform(data)
output = transformer.transform([data])[0]
assert output.size == 128, output.shape
# Sample Batch
......@@ -88,7 +87,7 @@ def test_idiap_inceptionv1_casia():
np.random.seed(10)
transformer = InceptionResnetv1_CasiaWebFace()
data = np.random.rand(3, 160, 160).astype("uint8")
output = transformer.transform(data)
output = transformer.transform([data])[0]
assert output.size == 128, output.shape
# Sample Batch
......@@ -109,7 +108,7 @@ def test_arface_insight_tf():
np.random.seed(10)
transformer = ArcFace_InsightFaceTF()
data = np.random.rand(3, 112, 112).astype("uint8")
output = transformer.transform(data)
output = transformer.transform([data])[0]
assert output.size == 512, output.shape
# Sample Batch
......
......@@ -78,7 +78,7 @@ def test_dct_blocks():
dct = bob.bio.face.extractor.DCTBlocks(8, (0, 0), 15)
# extract features
feature = dct.transform(data)
feature = dct.transform([data])[0]
assert feature.ndim == 2
# feature dimension is one lower than the block size, since blocks are normalized by default
assert feature.shape == (80, 14)
......@@ -158,7 +158,7 @@ def test_lgbphs():
)
# extract feature
feature = lgbphs.transform(data)
feature = lgbphs.transform([data])[0]
assert feature.ndim == 2
reference = pkg_resources.resource_filename(
......@@ -175,7 +175,7 @@ def test_lgbphs():
gabor_sigma=math.sqrt(2.0) * math.pi,
use_gabor_phases=True,
)
feature = lgbphs.transform(data)
feature = lgbphs.transform([data])[0]
assert feature.ndim == 1
reference = pkg_resources.resource_filename(
......
......@@ -118,7 +118,8 @@ def test_face_crop():
reference = pkg_resources.resource_filename(
"bob.bio.face.test", "data/cropped.hdf5"
)
ref_image = _compare(cropper.transform(image, annotation), reference)
ref_image = _compare(cropper.transform([image], [annotation]), reference)
# test the preprocessor with fixed eye positions (which correspond to th ones
fixed_cropper = bob.bio.face.preprocessor.FaceCrop(
......@@ -126,12 +127,12 @@ def test_face_crop():
cropper.cropped_positions,
fixed_positions={"reye": annotation["reye"], "leye": annotation["leye"]},
)
# result must be identical to the original face cropper (same eyes are used)
_compare(fixed_cropper.transform(image), reference)
# result must be identical to the original face cropper (same eyes are used)
_compare(fixed_cropper.transform([image]), reference)
# check color cropping
cropper.channel = "rgb"
cropped = cropper.transform(image, annotation)
cropped = cropper.transform([image], [annotation])[0]
assert cropped.ndim == 3
assert cropped.shape[0] == 3
assert cropped.shape[1:] == ref_image.shape
......@@ -142,7 +143,7 @@ def test_face_crop():
# test a ValueError is raised if eye annotations are swapped
try:
annot = dict(reye=annotation["leye"], leye=annotation["reye"])
cropper.transform(image, annot)
cropper.transform([image], [annot])
assert 0, "FaceCrop did not raise a ValueError for swapped eye annotations"
except ValueError:
pass
......@@ -171,7 +172,7 @@ def test_face_detect():
reference = pkg_resources.resource_filename(
"bob.bio.face.test", "data/detected.hdf5"
)
_compare(cropper.transform(image, annotation), reference)
_compare(cropper.transform([image], [annotation]), reference)
assert abs(cropper.quality - 39.209601948013685) < 1e-5
# execute face detector with flandmark
......@@ -182,7 +183,7 @@ def test_face_detect():
reference = pkg_resources.resource_filename(
"bob.bio.face.test", "data/flandmark.hdf5"
)
_compare(cropper.transform(image, annotation), reference)
_compare(cropper.transform([image], [annotation]), reference)
assert abs(cropper.quality - 39.209601948013685) < 1e-5
......@@ -203,7 +204,7 @@ def test_tan_triggs():
# execute face cropper
_compare(
preprocessor.transform(image, annotation),
preprocessor.transform([image], [annotation]),
pkg_resources.resource_filename(
"bob.bio.face.test", "data/tan_triggs_cropped.hdf5"
)
......@@ -214,7 +215,7 @@ def test_tan_triggs():
assert preprocessor.cropper is None
# result must be identical to the original face cropper (same eyes are used)
_compare(
preprocessor.transform(image, annotation),
preprocessor.transform([image], [annotation]),
pkg_resources.resource_filename(
"bob.bio.face.test", "data/tan_triggs_none.hdf5"
)
......@@ -247,7 +248,7 @@ def test_inorm_lbp():
assert isinstance(preprocessor.cropper, bob.bio.face.preprocessor.FaceCrop)
# execute preprocessor
_compare(
preprocessor.transform(image, annotation),
preprocessor.transform([image], [annotation]),
pkg_resources.resource_filename(
"bob.bio.face.test", "data/inorm_lbp_cropped.hdf5"
)
......@@ -288,7 +289,7 @@ def test_heq():
assert isinstance(preprocessor.cropper, bob.bio.face.preprocessor.FaceCrop)
# execute preprocessor
_compare(
preprocessor.transform(image, annotation),
preprocessor.transform([image],[annotation]),
pkg_resources.resource_filename(
"bob.bio.face.test", "data/histogram_cropped.hdf5"
),
......@@ -331,7 +332,7 @@ def test_sqi():
assert isinstance(preprocessor.cropper, bob.bio.face.preprocessor.FaceCrop)
# execute preprocessor
_compare(
preprocessor.transform(image, annotation),
preprocessor.transform([image], [annotation]),
pkg_resources.resource_filename(
"bob.bio.face.test", "data/self_quotient_cropped.hdf5"
)
......
......@@ -77,19 +77,16 @@ def test_gabor_graph():
transformer = load_resource("gabor_graph", "transformer")
fake_sample = get_fake_sample()
transformed_sample = transformer.transform([fake_sample])[0]
transformed_data = transformed_sample.data
assert len(transformed_sample.data) == 80
assert len(transformed_sample.data) == 400
def test_lgbphs():
def test_lgbphs():
transformer = load_resource("lgbphs", "transformer")
fake_sample = get_fake_sample()
transformed_sample = transformer.transform([fake_sample])[0]
transformed_data = transformed_sample.data
assert transformed_sample.data.shape == (2, 44014)
assert transformed_sample.data.shape == (2, 220267)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment