test_preprocessors.py 11.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Manuel Guenther <Manuel.Guenther@idiap.ch>
# @date: Thu May 24 10:41:42 CEST 2012
#
# Copyright (C) 2011-2012 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.


import numpy
import pkg_resources
23
import pytest
24
25
26
27
28
29

regenerate_refs = False

import bob.bio.base
import bob.bio.face

30
from bob.bio.base.test.utils import is_library_available
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
31
from bob.bio.base.utils.annotations import read_annotation_file
32
from bob.bio.face.color import rgb_to_gray
33
from bob.bio.face.preprocessor import BoundingBoxAnnotatorCrop
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
34
from bob.bio.face.preprocessor.croppers import FaceCropBoundingBox, FaceEyesNorm
35

36
37
38
39
40
41
42
43
44
# Cropping
CROPPED_IMAGE_HEIGHT = 80
CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5

# eye positions for frontal images
RIGHT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 - 1)
LEFT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 * 3)


45
46
47
48
49
50
51
52
53
54
55
56
57
58
def _compare(
    data,
    reference,
    write_function=bob.bio.base.save,
    read_function=bob.bio.base.load,
    atol=1e-5,
    rtol=1e-8,
):
    # write reference?
    if regenerate_refs:
        write_function(data, reference)

    # compare reference
    reference = read_function(reference)
59
    numpy.testing.assert_allclose(data, reference, rtol=rtol, atol=atol)
60
    return reference
61
62
63


def _image():
64
    return bob.io.base.load(
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
65
66
67
        pkg_resources.resource_filename(
            "bob.bio.face.test", "data/testimage.jpg"
        )
68
69
    )

70
71

def _annotation():
72
    return read_annotation_file(
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
73
74
75
        pkg_resources.resource_filename(
            "bob.bio.face.test", "data/testimage.json"
        ),
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
76
        "json",
77
    )
78
79
80


def test_base():
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
81
82
83
    base = bob.bio.face.preprocessor.Base(
        color_channel="gray", dtype=numpy.float64
    )
84
    assert isinstance(base, bob.bio.face.preprocessor.Base)
85

86
87
    # read input
    image = _image()
88

89
    preprocessed = base.transform([image])[0]
90

91
92
    assert preprocessed.ndim == 2
    assert preprocessed.dtype == numpy.float64
93
    assert numpy.allclose(preprocessed, rgb_to_gray(image))
94

95
    # color output
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
96
97
98
    base = bob.bio.face.preprocessor.Base(
        color_channel="rgb", dtype=numpy.uint8
    )
99
    colored = base.transform([rgb_to_gray(image)])[0]
100

101
102
    assert colored.ndim == 3
    assert colored.dtype == numpy.uint8
103
    assert all(
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
104
105
        numpy.allclose(colored[c], rgb_to_gray(image).astype("uint8"))
        for c in range(3)
106
    )
107

108
    colored = base.transform([image])[0]
109
110
111
    assert colored.ndim == 3
    assert colored.dtype == numpy.uint8
    assert numpy.all(colored == image)
112

113
114

def test_face_crop():
115
116
117
    # read input
    image, annotation = _image(), _annotation()

118
119
    # define the preprocessor
    cropper = bob.bio.face.preprocessor.FaceCrop(
120
121
        cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
        cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
122
        dtype=int,
123
    )
124

125
126
127
128
129
130
131
    assert isinstance(cropper, bob.bio.face.preprocessor.FaceCrop)
    assert isinstance(cropper, bob.bio.face.preprocessor.Base)

    # execute face cropper
    reference = pkg_resources.resource_filename(
        "bob.bio.face.test", "data/cropped.hdf5"
    )
132

133
    ref_image = _compare(cropper.transform([image], [annotation])[0], reference)
134
135
136
137
138

    # test the preprocessor with fixed eye positions (which correspond to th ones
    fixed_cropper = bob.bio.face.preprocessor.FaceCrop(
        cropper.cropped_image_size,
        cropper.cropped_positions,
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
139
140
141
142
        fixed_positions={
            "reye": annotation["reye"],
            "leye": annotation["leye"],
        },
143
        dtype=int,
144
    )
145

146
    # result must be identical to the original face cropper (same eyes are used)
147
    _compare(fixed_cropper.transform([image])[0], reference)
148
149

    # check color cropping
150
    cropper.color_channel = "rgb"
151
    cropped = cropper.transform([image], [annotation])[0]
152
153
154
    assert cropped.ndim == 3
    assert cropped.shape[0] == 3
    assert cropped.shape[1:] == ref_image.shape
155
    assert numpy.allclose(rgb_to_gray(cropped), ref_image, atol=1.0, rtol=1.0)
156
157
158
159

    # test a ValueError is raised if eye annotations are swapped
    try:
        annot = dict(reye=annotation["leye"], leye=annotation["reye"])
160
        cropper.transform([image], [annot])
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
161
162
163
        assert (
            0
        ), "FaceCrop did not raise a ValueError for swapped eye annotations"
164
165
166
167
    except ValueError:
        pass

    # reset the configuration, so that later tests don't get screwed.
168
    cropper.color_channel = "gray"
169

170
171
172
173
174
175
176
177
178
179
180

class FakeAnnotator(bob.bio.face.annotator.Base):
    def annotate(self, X):
        return None


@is_library_available("tensorflow")
def test_bounding_box_annotator_crop():
    # read input
    image = _image()
    _, bbox_annotation = [
181
        read_annotation_file(
182
            pkg_resources.resource_filename(
183
                "bob.bio.face.test", "data/" + filename + ".json"
184
            ),
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
185
            "json",
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
        )
        for filename in ["testimage", "testimage_bbox"]
    ]

    final_image_size = (112, 112)
    reference_eyes_location = {
        "leye": (55, 72),
        "reye": (55, 40),
    }

    eyes_cropper = FaceEyesNorm(reference_eyes_location, final_image_size)
    face_cropper = BoundingBoxAnnotatorCrop(
        eyes_cropper=eyes_cropper, annotator="mtcnn"
    )

    # Cropping and checking
    crops = face_cropper.transform([image], [bbox_annotation])[0]
    assert crops.shape == (3, 112, 112)

Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
205
    # Testing with face anotattor
206
207
208
209
210
211
212
213
214
    face_cropper = BoundingBoxAnnotatorCrop(
        eyes_cropper=eyes_cropper, annotator=FakeAnnotator()
    )

    # Cropping and checking
    crops = face_cropper.transform([image], [bbox_annotation])[0]
    assert crops.shape == (3, 112, 112)


215
216
217
218
def test_multi_face_crop():
    # read input
    image = _image()
    eye_annotation, bbox_annotation = [
219
        read_annotation_file(
220
            pkg_resources.resource_filename(
221
                "bob.bio.face.test", "data/" + filename + ".json"
222
            ),
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
223
            "json",
224
225
226
        )
        for filename in ["testimage", "testimage_bbox"]
    ]
227
228

    # define the preprocessor
229
    eyes_cropper = bob.bio.face.preprocessor.FaceCrop(
230
        cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
231
        cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
232
        dtype=int,
233
234
235
236
237
238
239
    )

    face_cropper = bob.bio.face.preprocessor.FaceCrop(
        cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
        cropper=FaceCropBoundingBox(
            final_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
        ),
240
        dtype=int,
241
242
243
244
    )

    cropper = bob.bio.face.preprocessor.MultiFaceCrop(
        croppers_list=[eyes_cropper, face_cropper]
245
246
247
248
249
250
251
252
253
254
    )

    # execute face cropper
    eye_reference, bbox_reference = [
        pkg_resources.resource_filename(
            "bob.bio.face.test", "data/" + filename + ".hdf5"
        )
        for filename in ["cropped", "cropped_bbox"]
    ]

255
256
257
    eye_cropped, bbox_cropped = cropper.transform(
        [image, image], [eye_annotation, bbox_annotation]
    )
258

259
260
    # Compare the cropped results to the reference
    _compare(eye_cropped, eye_reference)
261
262
263

    bob.io.base.save(bbox_cropped.astype("uint8"), bbox_reference)
    _compare(bbox_cropped.astype("uint8"), bbox_reference)
264

265
    # test a ValueError is raised if the annotations don't match any cropper
266
    with pytest.raises(ValueError):
267
268
269
        annot = dict(landmark_A=(60, 60), landmark_B=(120, 120))
        cropper.transform([image], [annot])

270
271
    # test that the first annotator is taken when multiple exist
    annot = {**eye_annotation, **bbox_annotation}
272
    eye_cropped = cropper.transform([image], [annot])[0]
273
    _compare(eye_cropped, eye_reference)
274

275
276

def test_tan_triggs():
277
278
279
    # read input
    image, annotation = _image(), _annotation()

280
    face_cropper = bob.bio.face.preprocessor.FaceCrop(
281
282
        cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
        cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
283
    )
284

Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
285
286
287
    preprocessor = bob.bio.face.preprocessor.TanTriggs(
        face_cropper=face_cropper
    )
288

289
    assert isinstance(preprocessor, bob.bio.face.preprocessor.TanTriggs)
290
    assert isinstance(preprocessor, bob.bio.face.preprocessor.Base)
291
292
293
294
    assert isinstance(preprocessor.cropper, bob.bio.face.preprocessor.FaceCrop)

    # execute face cropper
    _compare(
295
        preprocessor.transform([image], [annotation])[0],
296
297
        pkg_resources.resource_filename(
            "bob.bio.face.test", "data/tan_triggs_cropped.hdf5"
298
        ),
299
300
        atol=1e-3,
        rtol=1e-3,
301
302
303
    )

    # test the preprocessor without cropping
304
    preprocessor = bob.bio.face.preprocessor.TanTriggs(face_cropper=None)
305
    assert preprocessor.cropper is None
306

307
308
    # result must be identical to the original face cropper (same eyes are used)
    _compare(
309
        preprocessor.transform([image], [annotation])[0],
310
311
        pkg_resources.resource_filename(
            "bob.bio.face.test", "data/tan_triggs_none.hdf5"
312
        ),
313
314
        atol=1e-3,
        rtol=1e-3,
315
316
    )

317
318

def test_inorm_lbp():
319
320
    # read input
    image, annotation = _image(), _annotation()
321
    face_cropper = bob.bio.face.preprocessor.FaceCrop(
322
323
        cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
        cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
324
    )
325

326
    preprocessor = bob.bio.face.preprocessor.INormLBP(
327
        face_cropper=face_cropper, dtype=numpy.float64
328
    )
329

330
331
332
    assert isinstance(preprocessor, bob.bio.face.preprocessor.INormLBP)
    assert isinstance(preprocessor, bob.bio.face.preprocessor.Base)
    assert isinstance(preprocessor.cropper, bob.bio.face.preprocessor.FaceCrop)
333

334
335
    # execute preprocessor
    _compare(
336
        preprocessor.transform([image], [annotation])[0],
337
338
        pkg_resources.resource_filename(
            "bob.bio.face.test", "data/inorm_lbp_cropped.hdf5"
339
        ),
340
341
342
    )

    # load the preprocessor without cropping
343
    preprocessor = bob.bio.face.preprocessor.INormLBP(
344
        face_cropper=None,
345
346
    )
    assert preprocessor.cropper is None
347

348
349

def test_heq():
350
351
352
    # read input
    image, annotation = _image(), _annotation()

353
    face_cropper = bob.bio.face.preprocessor.FaceCrop(
354
355
        cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
        cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
356
357
    )
    preprocessor = bob.bio.face.preprocessor.HistogramEqualization(
358
        face_cropper=face_cropper
359
    )
360

Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
361
362
363
    assert isinstance(
        preprocessor, bob.bio.face.preprocessor.HistogramEqualization
    )
364
    assert isinstance(preprocessor, bob.bio.face.preprocessor.Base)
365
    assert isinstance(preprocessor.cropper, bob.bio.face.preprocessor.FaceCrop)
366

367
368
    # execute preprocessor
    _compare(
369
        preprocessor.transform([image], [annotation])[0],
370
371
372
373
374
375
        pkg_resources.resource_filename(
            "bob.bio.face.test", "data/histogram_cropped.hdf5"
        ),
    )

    # load the preprocessor without cropping
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
376
377
378
    preprocessor = bob.bio.face.preprocessor.HistogramEqualization(
        face_cropper=None
    )
379
380
    assert preprocessor.cropper is None
    # load the preprocessor landmark detection