galbally_iqm_features.py 27.7 KB
Newer Older
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :

'''
Created on 25 Sep 2015

@author: sbhatta
'''


import numpy as np
import scipy.signal as ssg
import scipy.ndimage.filters as snf

import bob.ip.base


"""
Amir Mohammadi's avatar
Amir Mohammadi committed
19
20
21
22
23
24
compute_quality_features is the main function to be called, to extract a set of
image quality-features computed for the input image
:param image: 2d numpy array. Should contain input image of size [M,N] (i.e. M
    rows x N cols).
:return featSet: a tuple of float-scalars, each representing one image-quality
    measure.
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
25
"""
Amir Mohammadi's avatar
Amir Mohammadi committed
26
27


Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
28
29
def compute_quality_features(image):
    """Extract a set of image quality-features computed for the input image.
30
31
32

    Parameters:

Amir Mohammadi's avatar
Amir Mohammadi committed
33
34
35
36
    image (:py:class:`numpy.ndarray`): A ``uint8`` array with 2 or 3
        dimensions, representing the input image of shape [M,N] (M rows x N
        cols). If 2D, image should contain a gray-image of shape [M,N]. If 3D,
        image should have a shape [3,M,N], and should contain an RGB-image.
37

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
38

39
40
    Returns:

Amir Mohammadi's avatar
Amir Mohammadi committed
41
42
43
44
45
46
47
    featSet (:py:class:`numpy.ndarray`): a 1D numpy array of 18 float32
        scalars, each representing one image-quality measure. This function
        returns a subset of the image-quality features (for face anti-spoofing)
        that have been described by Galbally et al. in their paper:
        "Image Quality Assessment for Fake Biometric Detection: Application to
        Iris, Fingerprint, and Face Recognition", IEEE Trans. on Image
        Processing Vol 23(2), 2014.
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
48
    """
49
50
    gray_image = None
    #print("shape of input image:")
Amir Mohammadi's avatar
Amir Mohammadi committed
51
    # print(image.shape)
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
52
    if len(image.shape) == 3:
Amir Mohammadi's avatar
Amir Mohammadi committed
53
54
55
        if(image.shape[0] == 3):
            # compute gray-level image for input color-frame
            gray_image = matlab_rgb2gray(image)
56
#             print(gray_image.shape)
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
57
58
59
60
61
        else:
            print('error. Wrong kind of input image')
    else:
        if len(image.shape) == 2:
            gray_image = image
62
#             print(gray_image.shape)
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
63
64
65
        else:
            print('error -- wrong kind of input')

Amir Mohammadi's avatar
Amir Mohammadi committed
66
    if gray_image is not None:
67

Amir Mohammadi's avatar
Amir Mohammadi committed
68
        gwin = gauss_2d((3, 3), 0.5)  # set up the smoothing-filter
69
#         print("computing degraded version of image")
Amir Mohammadi's avatar
Amir Mohammadi committed
70
71
72
        smoothed = ssg.convolve2d(
            gray_image, gwin, boundary='symm', mode='same')

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
73
        """
Amir Mohammadi's avatar
Amir Mohammadi committed
74
75
76
77
78
79
        Some of the image-quality measures computed here require a reference
        image. For these measures, we use the input-image itself as a
        reference-image, and we compute the quality-measure of a smoothed
        version of the input-image. The assumption in this approach is that
        smoothing degrades a spoof-image more than it does a genuine image (see
        Galbally's paper referenced above).
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
80
        """
81
#         print("computing galbally quality features")
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
82
        featSet = image_quality_measures(gray_image, smoothed)
Amir Mohammadi's avatar
Amir Mohammadi committed
83

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
84
85
86
87
88
89
90
        return featSet

    else:
        return None


"""
Amir Mohammadi's avatar
Amir Mohammadi committed
91
92
93
94
95
actually computes various measures of similarity between the two input images,
but also returns some descriptors of the reference-image that are independent
of any other image. Returns a tuple of 18 values, each of which is a float-
scalar. The quality measures computed in this function correspond to the Image-
quality features discussed in Galbally et al., 2014.
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
96
"""
Amir Mohammadi's avatar
Amir Mohammadi committed
97
98


Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
99
def image_quality_measures(refImage, testImage):
Amir Mohammadi's avatar
Amir Mohammadi committed
100
101
102
103
104
105
106
107
108
109
    """Compute image-quality measures for testImage and return a tuple of
       quality-measures. Some of the quality-measures require a reference-
       image, but others are 'no-reference' measures.
       :input refImage: 2d numpy array. Should represent input 8-bit gray-level
           image of size [M,N].
       :input testImage: 2d numpy array. Should represent input 8-bit gray-
           level image of size [M,N]..
       :return a tuple of 18 values, each of which is a float-scalar. The
            quality measures computed in this function correspond to the Image-
            quality features discussed in Galbally et al., 2014.
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
110
    """
Amir Mohammadi's avatar
Amir Mohammadi committed
111
112
113
114
115
116
117
118
    assert len(refImage.shape) == 2, "refImage should be a 2D array"
    assert len(testImage.shape) == 2, "testImage should be a 2D array"
    assert (refImage.shape[0] == testImage.shape[0]
            ), "The two images should have the same width"
    assert (refImage.shape[1] == testImage.shape[1]
            ), "The two images should have the same height"

    diffImg = refImage.astype(np.float) - testImage.astype(np.float)
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
119
120
121
    diffSq = np.square(diffImg)
    sumDiffSq = np.sum(diffSq)
    absDiffImg = np.absolute(diffImg)
Amir Mohammadi's avatar
Amir Mohammadi committed
122

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
123
124
    refSq = np.square(refImage.astype(np.float))
    sumRefSq = np.sum(refSq)
Amir Mohammadi's avatar
Amir Mohammadi committed
125
126
127
128
129
130
131
132
133

    # number of pixels in each image
    numPx = refImage.shape[0] * refImage.shape[1]
    maxPxVal = 255.0

    # 1 MSE
    mse00 = float(sumDiffSq) / float(numPx)

    # 2 PSNR
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
134
    psnr01 = np.inf
Amir Mohammadi's avatar
Amir Mohammadi committed
135
136
137
138
139
140
141
    if mse00 > 0:
        psnr01 = 10.0 * np.log10(maxPxVal * maxPxVal / mse00)

    # 3 AD: Average difference
    ad02 = float(np.sum(diffImg)) / float(numPx)

    # 4 SC: structural content
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
142
143
    testSq = np.square(testImage.astype(np.float))
    sumTestSq = np.sum(testSq)
Amir Mohammadi's avatar
Amir Mohammadi committed
144
145
146
147
148
149
150
151
152
    sc03 = np.inf
    if sumTestSq > 0:
        sc03 = float(sumRefSq) / float(sumTestSq)

    # 5 NK: normalized cross-correlation
    imgProd = refImage * testImage  # element-wise product
    nk04 = float(np.sum(imgProd)) / float(sumRefSq)

    # 6 MD: Maximum difference
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
153
    md05 = float(np.amax(absDiffImg))
Amir Mohammadi's avatar
Amir Mohammadi committed
154
155
156
157
158
159
160
161
162
163
164
165
166
167

    # 7 LMSE: Laplacian MSE scipy implementation of laplacian is different from
    # Matlab's version, especially at the image-borders To significant
    # differences between scipy...laplace and Matlab's del2() are:
    #    a. Matlab del2() divides the convolution result by 4, so the ratio
    #       (scipy.laplace() result)/(del2()-result) is 4
    #    b. Matlab does a different kind of processing at the boundaries, so
    #       the results at the boundaries are different in the 2 calls.
    # In Galbally's Matlab code, there is a factor of 4, which I have dropped
    # (no difference in result),
    # because this is implicit in scipy.ndimage.filters.laplace()
    # mode can be 'wrap', 'reflect', 'nearest', 'mirror', or ['constant' with
    # a specified value]
    op = snf.laplace(refImage, mode='reflect')
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
168
169
170
171
    opSq = np.square(op)
    sum_opSq = np.sum(opSq)
    tmp1 = (op - (snf.laplace(testImage, mode='reflect')))
    num_op = np.square(tmp1)
Amir Mohammadi's avatar
Amir Mohammadi committed
172
173
174
    lmse06 = float(np.sum(num_op)) / float(sum_opSq)

    # 8 NAE: normalized abs. error
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
175
    sumRef = np.sum(np.absolute(refImage))
Amir Mohammadi's avatar
Amir Mohammadi committed
176
177
178
179
180
181
182
183
184
185
    nae07 = float(np.sum(absDiffImg)) / float(sumRef)

    # 9 SNRv: SNR in db
    snrv08 = 10.0 * np.log10(float(sumRefSq) / float(sumDiffSq))

    # 10 RAMDv: R-averaged max diff (r=10)
    # implementation below is same as what Galbally does in Matlab
    r = 10
    # the [::-1] flips the sorted vector, so that it is in descending order
    sorted = np.sort(diffImg.flatten())[::-1]
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
186
    topsum = np.sum(sorted[0:r])
Amir Mohammadi's avatar
Amir Mohammadi committed
187
188
189
190
191
    ramdv09 = np.sqrt(float(topsum) / float(r))

    # 11,12: MAS: Mean Angle Similarity, MAMS: Mean Angle-Magnitude Similarity
    mas10, mams11 = angle_similarity(refImage, testImage, diffImg)

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
192
    fftRef = np.fft.fft2(refImage)
Amir Mohammadi's avatar
Amir Mohammadi committed
193
194
195
196
197
198
199
200
201
202
203
204
205
    # fftTest = np.fft.fft2(testImage)

    # 13, 14: SME: spectral magnitude error; SPE: spectral phase error
    # spectralSimilarity(fftRef, fftTest, numPx)
    sme12, spe13 = spectral_similarity(refImage, testImage)

    # 15 TED: Total edge difference
    # ted14 = edge_similarity(refImage, testImage)

    # 16 TCD: Total corner difference
    # tcd15 = corner_similarity(refImage, testImage)

    # 17, 18: GME: gradient-magnitude error; GPE: gradient phase error
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
206
    gme16, gpe17 = gradient_similarity(refImage, testImage)
Amir Mohammadi's avatar
Amir Mohammadi committed
207
208

    # 19 SSIM
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
209
    ssim18, _ = ssim(refImage, testImage)
Amir Mohammadi's avatar
Amir Mohammadi committed
210
211

    # 20 VIF
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
212
    vif19 = vif(refImage, testImage)
Amir Mohammadi's avatar
Amir Mohammadi committed
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240

    # 21,22,23,24,25: RRED, BIQI, JQI, NIQE: these parameters are not computed
    # here.

    # 26 HLFI: high-low frequency index (implemented as done by Galbally in
    # Matlab).
    hlfi25 = high_low_freq_index(fftRef, refImage.shape[1])

    return np.asarray(
        (mse00,
         psnr01,
         ad02,
         sc03,
         nk04,
         md05,
         lmse06,
         nae07,
         snrv08,
         ramdv09,
         mas10,
         mams11,
         sme12,
         gme16,
         gpe17,
         ssim18,
         vif19,
         hlfi25),
        dtype=np.float32)
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
241
242
243
244
245


"""
Matlab-like RGB to gray...
"""
Amir Mohammadi's avatar
Amir Mohammadi committed
246
247
248
249
250


def matlab_rgb2gray(rgbImage):
    '''converts color rgbImage to gray to produce exactly the same result as
    Matlab would.
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
251
252
253
    Inputs:
    rgbimage: numpy array of shape [3, height, width]
    Return:
Amir Mohammadi's avatar
Amir Mohammadi committed
254
255
    numpy array of shape [height, width] containing a gray-image with floating-
    point pixel values, in the range[(16.0/255) .. (235.0/255)]
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
256
    '''
Amir Mohammadi's avatar
Amir Mohammadi committed
257
258
259
    # g1 = 0.299*rgbFrame[0,:,:] + 0.587*rgbFrame[1,:,:] +
    # 0.114*rgbFrame[2,:,:] #standard coeffs CCIR601
    # this is how it's done in matlab...
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
260
    rgbImage = rgbImage / 255.0
Amir Mohammadi's avatar
Amir Mohammadi committed
261
262
263
264
265
266
267
268
    C0 = 65.481 / 255.0
    C1 = 128.553 / 255.0
    C2 = 24.966 / 255.0
    scaleMin = 16.0 / 255.0
    # scaleMax = 235.0/255.0
    gray = scaleMin + \
        (C0 * rgbImage[0, :, :] + C1 * rgbImage[1, :, :] +
         C2 * rgbImage[2, :, :])
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
269
270
271
272
    return gray


"""
Amir Mohammadi's avatar
Amir Mohammadi committed
273
274
275
SSIM: Structural Similarity index between two gray-level images. The dynamic
range is assumed to be 0..255.
Ref:Z. Wang, A.C. Bovik, H.R. Sheikh and E.P. Simoncelli:
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
276
277
278
279
    "Image Quality Assessment: From error measurement to Structural Similarity"
    IEEE Trans. on Image Processing, 13(1), 01/2004
    @param refImage: 2D numpy array (reference image)
    @param testImage: 2D numpy array (test image)
Amir Mohammadi's avatar
Amir Mohammadi committed
280
281
282
283
284
285
    Both input images should have the same dimensions. This is assumed, and not
    verified in this function
    @return ssim: float-scalar. The mean structural similarity between the 2
        input images.
    @return ssim_map: the SSIM index map of the test image (this map is smaller
        than the test image).
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
286
"""
Amir Mohammadi's avatar
Amir Mohammadi committed
287
288


Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
289
290
def ssim(refImage, testImage):
    """Compute and return SSIM between two images.
291
292
293
294
295
296
297
298

    Parameters:

    refImage: 2D numpy array (reference image)
    testImage: 2D numpy array (test image)

    Returns:

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
299
    Returns ssim and ssim_map
Amir Mohammadi's avatar
Amir Mohammadi committed
300
301
302
303
    ssim: float-scalar. The mean structural similarity between the 2 input
        images.
    ssim_map: the SSIM index map of the test image (this map is smaller than
        the test image).
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
304
    """
Amir Mohammadi's avatar
Amir Mohammadi committed
305
306
307
308
309
310
311
312
    M = refImage.shape[0]
    N = refImage.shape[1]

    winSz = 11  # window size for gaussian filter
    winSgm = 1.5  # sigma for gaussian filter

    # input image should be at least 11x11 in size.
    if(M < winSz) or (N < winSz):
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
313
314
        ssim_index = -np.inf
        ssim_map = -np.inf
Amir Mohammadi's avatar
Amir Mohammadi committed
315

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
316
        return ssim_index, ssim_map
Amir Mohammadi's avatar
Amir Mohammadi committed
317
318

    # construct the gaussian filter
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
319
    gwin = gauss_2d((winSz, winSz), winSgm)
Amir Mohammadi's avatar
Amir Mohammadi committed
320
321
322
323

    # constants taken from the initial matlab implementation provided by
    # Bovik's lab.
    K1 = 0.01
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
324
    K2 = 0.03
Amir Mohammadi's avatar
Amir Mohammadi committed
325
326
327
328
329
330
331
332
    L = 255  # dynamic range.

    C1 = (K1 * L) * (K1 * L)
    C2 = (K2 * L) * (K2 * L)
    # refImage=refImage.astype(np.float)
    # testImage=testImage.astype(np.float)

    # ssg is scipy.signal
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
333
334
    mu1 = ssg.convolve2d(refImage, gwin, mode='valid')
    mu2 = ssg.convolve2d(testImage, gwin, mode='valid')
Amir Mohammadi's avatar
Amir Mohammadi committed
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357

    mu1Sq = mu1 * mu1
    mu2Sq = mu2 * mu2
    mu1_mu2 = mu1 * mu2

    sigma1_sq = ssg.convolve2d(
        (refImage * refImage),
        gwin,
        mode='valid') - mu1Sq
    sigma2_sq = ssg.convolve2d(
        (testImage * testImage),
        gwin,
        mode='valid') - mu1Sq
    sigma12 = ssg.convolve2d(
        (refImage * testImage),
        gwin,
        mode='valid') - mu1_mu2

    assert (C1 > 0 and C2 > 0), "Conditions for computing ssim with this "
    "code are not met. Set the Ks and L to values > 0."
    num1 = (2.0 * mu1_mu2 + C1) * (2.0 * sigma12 + C2)
    den1 = (mu1Sq + mu2Sq + C1) * (sigma1_sq + sigma2_sq + C2)
    ssim_map = num1 / den1
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
358
359

    ssim = np.average(ssim_map)
Amir Mohammadi's avatar
Amir Mohammadi committed
360

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
361
362
363
364
    return ssim, ssim_map


def vif(refImage, testImage):
Amir Mohammadi's avatar
Amir Mohammadi committed
365
366
367
368
369
370
371
372
373
374
375
376
    """
    VIF: Visual Information Fidelity measure.
    Ref: H.R. Sheikh and A.C. Bovik: "Image Information and Visual Quality",
    IEEE Trans. Image Processing. Adapted from Galbally's matlab code, which
    was provided by Bovik et al's LIVE lab.
        @param refImage: 2D numpy array (reference image)
        @param testImage: 2D numpy array (test image)
        Both input images should have the same dimensions. This is assumed, and
        not verified in this function
        @return vifp: float-scalar. Measure of visual information fidelity
            between the 2 input images
    """
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
377
    sigma_nsq = 2.0
Amir Mohammadi's avatar
Amir Mohammadi committed
378
379
380
381
382
383
384
385
386
    num = 0
    den = 0

    # sc is scale, taking values (1,2,3,4)
    for sc in range(1, 5):
        N = (2**(4 - sc + 1)) + 1
        win = gauss_2d((N, N), (float(N) / 5.0))

        if sc > 1:
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
387
388
            refImage = ssg.convolve2d(refImage, win, mode='valid')
            testImage = ssg.convolve2d(testImage, win, mode='valid')
Amir Mohammadi's avatar
Amir Mohammadi committed
389
390
            # downsample by factor 2 in each direction
            refImage = refImage[::2, ::2]
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
391
            testImage = testImage[::2, ::2]
Amir Mohammadi's avatar
Amir Mohammadi committed
392

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
393
394
        mu1 = ssg.convolve2d(refImage, win, mode='valid')
        mu2 = ssg.convolve2d(testImage, win, mode='valid')
Amir Mohammadi's avatar
Amir Mohammadi committed
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
        mu1Sq = mu1 * mu1
        mu2Sq = mu2 * mu2
        mu1_mu2 = mu1 * mu2

        sigma1_sq = ssg.convolve2d(
            (refImage * refImage), win, mode='valid') - mu1Sq
        sigma2_sq = ssg.convolve2d(
            (testImage * testImage), win, mode='valid') - mu2Sq
        sigma12 = ssg.convolve2d(
            (refImage * testImage),
            win,
            mode='valid') - mu1_mu2

        sigma1_sq[sigma1_sq < 0] = 0  # set negative filter responses to 0.
        sigma2_sq[sigma2_sq < 0] = 0

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
411
        g = sigma12 / (sigma1_sq + 1e-10)
Amir Mohammadi's avatar
Amir Mohammadi committed
412
413
414
        sv_sq = sigma2_sq - g * sigma12

        g[(sigma1_sq < 1e-10)] = 0
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
415
        sv_sq[sigma1_sq < 1e-10] = sigma2_sq[sigma1_sq < 1e-10]
Amir Mohammadi's avatar
Amir Mohammadi committed
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
        sigma1_sq[sigma1_sq < 1e-10] = 0

        g[(sigma2_sq < 1e-10)] = 0
        sv_sq[sigma2_sq < 1e-10] = 0

        sv_sq[g < 0] = sigma2_sq[g < 0]
        g[g < 0] = 0
        # sic. As implemented in the original matlab version...
        sv_sq[sv_sq <= 1e-10] = 1e-10

        m1 = g * g * sigma1_sq
        m2 = sv_sq + sigma_nsq
        m3 = np.log10(1 + m1 / m2)

        m4 = np.log10(1 + (sigma1_sq / sigma_nsq))

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
432
433
        num += np.sum(m3)
        den += np.sum(m4)
Amir Mohammadi's avatar
Amir Mohammadi committed
434
435

    vifp = num / den
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
436
437
438
439
    return vifp


def high_low_freq_index(imgFFT, ncols):
Amir Mohammadi's avatar
Amir Mohammadi committed
440
441
442
443
444
445
446
447
448
    """
    HLFI: relative difference between high- and low-frequency energy in image.
    Ref: I. Avcibas, N. Memon, B. Sankur: "Steganalysis using image quality
    metrics", IEEE Trans. Image Processing, 12, 2003.
    @param imgFFT: 2D numpy array of complex numbers, representing Fourier
        transform of test image.
    @param ncols: int. Number of columns in image.
    @return float-scalar.
    """
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
449

Amir Mohammadi's avatar
Amir Mohammadi committed
450
451
    N = ncols
    colHalf = int(round(N / 2))  # (N/2) + (N % 2) #round it up
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
452
    freqSel = 0.15
Amir Mohammadi's avatar
Amir Mohammadi committed
453
454
455
456
457

    freqCol = round(freqSel * N)
    lowFreqColHalf = int(round(freqCol / 2.0))

    fftRes = imgFFT  # np.fft.fft2(image)
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
458
459
    fftMag = np.abs(fftRes)
    totalEnergy = np.sum(fftMag)
Amir Mohammadi's avatar
Amir Mohammadi committed
460
461
462
    # print(totalEnergy)

    lowIdx = colHalf - lowFreqColHalf
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
463
464
465
    hiIdx = colHalf + lowFreqColHalf
    LowFreqMag = fftMag[:, lowIdx:hiIdx]
    lowFreqMagTotal = np.sum(LowFreqMag)
Amir Mohammadi's avatar
Amir Mohammadi committed
466
467

    fftMag[:, lowIdx:hiIdx] = 0
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
468
    highFreqMagTotal = np.sum(fftMag)
Amir Mohammadi's avatar
Amir Mohammadi committed
469
470
471
472

    highLowFreqIQ = np.abs(
        lowFreqMagTotal - highFreqMagTotal) / float(totalEnergy)

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
473
474
    return highLowFreqIQ

Amir Mohammadi's avatar
Amir Mohammadi committed
475

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
476
def gradient_similarity(refImage, testImage):
Amir Mohammadi's avatar
Amir Mohammadi committed
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
    """
    Image similarity based on gradient. Computes the mean phase and magnitude
    difference of gradient between input reference and test images.
    Ref: I. Avcibas, N. Memon, B. Sankur: "Steganalysis using image quality
    metrics", IEEE Trans. Image Processing, 12, 2003.
        @param refImage: 2D numpy array (reference image)
        @param testImage: 2D numpy array (test image)
        Both input images should have the same dimensions. This is assumed, and
        not verified in this function.
        @return difGradMag: float-scalar. Mean difference in gradient-
            magnitude.
        @return difGradPhase: float-scalar. Mean difference in gradient-phase.
    """

    # we assume that testImage is of the same shape as refImage
    numPx = refImage.shape[0] * refImage.shape[1]

    # compute gradient (a la matlab) for reference image
    # 5: spacing of 5 pixels between 2 sites of grad. evaluation.
    refGrad = np.gradient(refImage, 5, 5)

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
498
499
    refReal = refGrad[0]
    refImag = refGrad[1]
Amir Mohammadi's avatar
Amir Mohammadi committed
500
501
502
    refGradComplex = refReal + 1j * refImag

    refMag = np.abs(refGradComplex)
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
503
504
    refPhase = np.arctan2(refImag, refReal)

Amir Mohammadi's avatar
Amir Mohammadi committed
505
506
507
508
    # compute gradient for test image
    # 5: spacing of 5 pixels between 2 sites of grad. evaluation. It applies
    # to both dims.
    testGrad = np.gradient(testImage, 5)
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
509
510
    testReal = testGrad[0]
    testImag = testGrad[1]
Amir Mohammadi's avatar
Amir Mohammadi committed
511
512
513
514
515
516
517
518
519
520
521
    testGradComplex = testReal + 1j * testImag

    testMag = np.abs(testGradComplex)
    testPhase = np.arctan2(testImag, testReal)

    absPhaseDiff = np.abs(refPhase - testPhase)
    difGradPhase = (np.sum(absPhaseDiff)) / float(numPx)

    absMagDiff = np.abs(refMag - testMag)
    difGradMag = float(np.sum(absMagDiff)) / float(numPx)

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
522
523
    return difGradMag, difGradPhase

Amir Mohammadi's avatar
Amir Mohammadi committed
524

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
525
def testRegionalMax():
Amir Mohammadi's avatar
Amir Mohammadi committed
526
    A = 10 * np.ones([10, 10])
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
527
528
    A[1:4, 1:4] = 22
    A[5:8, 5:8] = 33
Amir Mohammadi's avatar
Amir Mohammadi committed
529
530
531
    A[1, 6] = 44
    A[2, 7] = 45
    A[3, 8] = 44
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
532
    rm = regionalmax(A)
533
534
    print(A)
    print(rm)
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
535

Amir Mohammadi's avatar
Amir Mohammadi committed
536

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
537
def regionalmax(img):
Amir Mohammadi's avatar
Amir Mohammadi committed
538
539
540
541
542
543
544
545
546
    """
    find local maxima using 3x3 mask.
    Used in corner_similarity()
    Should produce results very similar to matlabs imregionalmax()
    @param img: 2d numpy array. Image-like, containing a 'cornerness'-index for
        every pixel.
    @return regmax: 2d numpy array. Binary image showing corners (which are
        regions of local maxima in input image).
    """
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
547
548
    h = img.shape[0]
    w = img.shape[1]
Amir Mohammadi's avatar
Amir Mohammadi committed
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563

    # extend input image borders by repeating border-values
    b = np.zeros((img.shape[0] + 2, img.shape[1] + 2))
    b[1:-1, 1:-1] = img
    b[0, :] = b[1, :]
    b[:, 0] = b[:, 1]
    b[-1, :] = b[-2, :]
    b[:, -1] = b[:, -2]

    # will contain the output bitmap showing local maxima.
    regmax = np.zeros((h, w), dtype='uint8')

    for i in range(1, h + 1):
        for j in range(1, w + 1):
            subim = b[i - 1:i + 2, j - 1:j + 2]
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
564
565
            lmax = np.amax(subim)
            lmin = np.amin(subim)
Amir Mohammadi's avatar
Amir Mohammadi committed
566
567
568
569
            if b[i, j] == lmax and b[i, j] > lmin:
                regmax[i - 1, j - 1] = 1

    for i in range(1, h):
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
570
        for j in range(w):
Amir Mohammadi's avatar
Amir Mohammadi committed
571
572
573
574
575
576
577
            if regmax[i, j] == 1:
                imin = i - 1
                if imin < 0:
                    imin = 0
                imax = i + 2
                if imax > h:
                    imax = h
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
578
                for k in range(imin, imax):
Amir Mohammadi's avatar
Amir Mohammadi committed
579
580
581
582
583
584
                    jmin = j - 1
                    if jmin < 0:
                        jmin = 0
                    jmax = j + 2
                    if jmax > w:
                        jmax = w
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
585
                    for l in range(jmin, jmax):
Amir Mohammadi's avatar
Amir Mohammadi committed
586
587
                        if(img[k, l] == img[i, j]):
                            regmax[k, l] = 1
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
588

Amir Mohammadi's avatar
Amir Mohammadi committed
589
    return regmax
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
590
591
592


def cornerMetric(image):
Amir Mohammadi's avatar
Amir Mohammadi committed
593
594
595
596
597
598
599
600
601
602
    """
    returns a 'cornerness' image, where each pixel-value specifies the 'degree
    of cornerness' of the corresponding pixel in input image The 'cornerness'
    image is of size (N-2, M-2) for an input image of size (N,M) (no cornerness
    computed for the border pixel of input.)
    @param image: 2d numpy array. Input image for which cornerness needs to be
        computed.
    @return cornerness: 2d numpy array giving a 'cornerness'-value for the
        input image.
    """
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
603
    image = image.astype(np.float)
Amir Mohammadi's avatar
Amir Mohammadi committed
604

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
605
    sensitivity_factor = 0.4
Amir Mohammadi's avatar
Amir Mohammadi committed
606
607
608
    gwin = gauss_2d((5, 5), 1.5)

    vfilt = np.array([-1, 0, 1], ndmin=2)
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
609
    hfilt = vfilt.T
Amir Mohammadi's avatar
Amir Mohammadi committed
610
    A = ssg.convolve2d(image, vfilt, boundary='symm', mode='same')
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
611
    B = ssg.convolve2d(image, hfilt, boundary='symm', mode='same')
Amir Mohammadi's avatar
Amir Mohammadi committed
612
613
    # crop out the valid portions of the filter-response (same size for both A
    # and B)
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
614
615
    A = A[1:-2, 1:-2]
    B = B[1:-2, 1:-2]
Amir Mohammadi's avatar
Amir Mohammadi committed
616
617
618
619
620
621
622

    # compute products of A, B, C
    C = A * B
    A = A * A
    B = B * B

    # filter A, B, and C
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
623
624
625
626
627
    A = ssg.convolve2d(A, gwin, boundary='symm', mode='valid')
    B = ssg.convolve2d(B, gwin, boundary='symm', mode='valid')
    C = ssg.convolve2d(C, gwin, boundary='symm', mode='valid')

    ABsum = A + B
Amir Mohammadi's avatar
Amir Mohammadi committed
628
629
    cornerness = (A * B) - (C * C) - sensitivity_factor * (ABsum * ABsum)

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
630
631
    return cornerness

Amir Mohammadi's avatar
Amir Mohammadi committed
632

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
633
def corner_similarity(refImage, testImage):
Amir Mohammadi's avatar
Amir Mohammadi committed
634
635
636
637
638
639
640
641
642
    '''
    compute the corner-based similarity between 2 images (how close are the
    numbers of corners found in the two images?).
    returns an index between 0 and 1. The smaller the better.
    @param refImage: 2D numpy array (reference image)
    @param testImage: 2D numpy array (test image)
    @return float-scalar.
    '''

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
643
644
    C = cornerMetric(refImage)
    C_peaks = regionalmax(C)
Amir Mohammadi's avatar
Amir Mohammadi committed
645
646
647

    # imshow(C_peaks)

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
648
649
    CG = cornerMetric(testImage)
    CG_peaks = regionalmax(CG)
Amir Mohammadi's avatar
Amir Mohammadi committed
650

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
651
652
    nCornersRef = np.sum(C_peaks)
    nCornersTest = np.sum(CG_peaks)
Amir Mohammadi's avatar
Amir Mohammadi committed
653
654
    # print('CornerSim:: %f %f', %(nCornersRef, nCornersTest) )

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
655
    maxCorners = max(nCornersRef, nCornersTest)
Amir Mohammadi's avatar
Amir Mohammadi committed
656
657
658

    qCornersDiff = np.fabs(nCornersRef - nCornersTest) / float(maxCorners)

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
659
660
    return qCornersDiff

Amir Mohammadi's avatar
Amir Mohammadi committed
661

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
662
def edge_similarity(refImage, testImage):
Amir Mohammadi's avatar
Amir Mohammadi committed
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
    """
    Similarity between the edge-maps of the two input images.
    Ref: I. Avcibas, N. Memon, B. Sankur: "Steganalysis using image quality
    metrics", IEEE Trans. Image Processing, 12, 2003.
    @param refImage: 2D numpy array (reference image)
    @param testImage: 2D numpy array (test image)
    @return float-scalar
    """

    # bob..sobel returns filter-responses which need to be thresholded to get
    # the edge-map
    thinning = 1
    refImage = refImage.astype(np.float)

    # compute edge map for reference image
    # returns 3D image. 1st dim is the edge-direction. 1st component is
    # vertical; 2nd component is hor. responses
    refSobel_sep = bob.ip.base.sobel(refImage)
    refSobelX = refSobel_sep[0, :, :]
    refSobelY = refSobel_sep[1, :, :]
    refEdge = edge_thinning(refSobelX[:, :], refSobelY[:, :], thinning)

    # compute edge map for test image
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
686
    testSobel_sep = bob.ip.base.sobel(testImage)
Amir Mohammadi's avatar
Amir Mohammadi committed
687
688
689
    testSobelX = testSobel_sep[0, :, :]
    testSobelY = testSobel_sep[1, :, :]
    testEdge = edge_thinning(testSobelX[:, :], testSobelY[:, :], thinning)
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
690

Amir Mohammadi's avatar
Amir Mohammadi committed
691
    numPx = refImage.shape[0] * refImage.shape[1]
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
692
693
    numRefEdgePx = np.sum(refEdge)
    numTestEdgePx = np.sum(testEdge)
Amir Mohammadi's avatar
Amir Mohammadi committed
694
    qEdgeD = np.abs(numRefEdgePx - numTestEdgePx) / float(numPx)
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
695
696
697

    return qEdgeD

Amir Mohammadi's avatar
Amir Mohammadi committed
698

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
699
def edge_thinning(bx, by, thinning=1):
Amir Mohammadi's avatar
Amir Mohammadi committed
700
701
702
703
704
705
706
707
708
709
710
711
712
713
    """
    function to perform edge-thining in the same way as done in Matlab. Called
    in edge_similarity()
    Returns a binary edge-map (uint8 image).
    @param  bx: vertical edge-filter responses (for example, response of 1 of
        the two Sobel filters)
    @param  by: horizontal edge-filter responses
    @param  thinning: [0,1]. Default:1, implies 'do edge-thinning'. If set to
        0, no edge-thinning is done. bx and by should be of the same shape
    """
    assert(len(bx.shape) == 2) and \
        (len(by.shape) == 2), "bx and by should be 2D arrays."
    assert(bx.shape[0] == by.shape[0]) and \
        (bx.shape[1] == by.shape[1]), "bx and by should have the same shape."
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
714
715
    m = bx.shape[0]
    n = by.shape[1]
Amir Mohammadi's avatar
Amir Mohammadi committed
716
717
718
719
720
721
722
723
724
725
726
727
728
729
    # will contain the resulting edge-map.
    e = np.zeros([m, n], dtype=np.uint8)

    # compute the edge-strength from the 2 directional filter-responses
    b = np.sqrt(bx * bx + by * by)

    # compute the threshold a la Matlab (as described in "Digital Image
    # Processing" book by W.K. Pratt.
    scale = 4
    cutoff = scale * np.mean(b)

    # np.spacing(1) is the same as eps in matlab.
    myEps = np.spacing(1) * 100.0
    # compute the edge-map a la Matlab
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749

    if not thinning:
        e = b > cutoff
    else:
        b1 = np.ones_like(b, dtype=bool)
        b2 = np.ones_like(b, dtype=bool)
        b3 = np.ones_like(b, dtype=bool)
        b4 = np.ones_like(b, dtype=bool)

        c1 = b > cutoff

        b1[:, 1:] = (np.roll(b, 1, axis=1) < b)[:, 1:]
        b2[:, :-1] = (np.roll(b, -1, axis=1) < b)[:, :-1]
        c2 = (bx >= (by - myEps)) & b1 & b2

        b3[1:, :] = (np.roll(b, 1, axis=0) < b)[1:, :]
        b4[:-1, 1:] = (np.roll(b, -1, axis=0) < b)[:-1, 1:]
        c3 = (by >= (bx - myEps)) & b3 & b4

        e = c1 & (c2 | c3)
Amir Mohammadi's avatar
Amir Mohammadi committed
750

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
751
752
753
754
    return e


def spectral_similarity(refImage, testImage):
Amir Mohammadi's avatar
Amir Mohammadi committed
755
756
757
758
759
760
761
762
763
764
765
766
767
    """
    @param refImage: 2D numpy array (reference image)
    @param testImage: 2D numpy array (test image)
    @return sme: float-scalar. Mean difference in magnitudes of spectra of the
        two images.
    @return spe: float-scalar. Mean difference in phases of spectra of the two
        images.
    """

    # assume that ref and test images have the same shape
    rows = refImage.shape[0]
    cols = refImage.shape[1]
    numPx = rows * cols
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
768
769
    fftRef = np.fft.rfft2(refImage)
    fftTest = np.fft.rfft2(testImage)
Amir Mohammadi's avatar
Amir Mohammadi committed
770
771
772

    refMag = np.abs(fftRef)
    testMag = np.abs(fftTest)
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
773
    absMagDiff = np.abs(refMag - testMag)
Amir Mohammadi's avatar
Amir Mohammadi committed
774
775
776
777
    # SME: spectral magnitude error
    sme = np.sum(absMagDiff * absMagDiff) / float(numPx)

    # SPE: spectral phase error
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
778
779
780
    refPhase = np.angle(fftRef)
    testPhase = np.angle(fftTest)
    absPhaseDiff = np.abs(refPhase - testPhase)
Amir Mohammadi's avatar
Amir Mohammadi committed
781
    spe = np.sum(absPhaseDiff * absPhaseDiff) / float(numPx)
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
782
783
784
785
786

    return sme, spe


def angle_similarity(refImage, testImage, diffImage):
Amir Mohammadi's avatar
Amir Mohammadi committed
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
    """
    Cosine-Similarity between the the rows of the two input images.
    Ref: I. Avcibas, N. Memon, B. Sankur: "Steganalysis using image quality
    metrics", IEEE Trans. Image Processing, 12, 2003.
    @param refImage: 2D numpy array (reference image)
    @param testImage: 2D numpy array (test image)
    @param diffImage: 2D numpy array. Difference between refImage and
        testImage. Not strictly necessary as input but precomputed here, to
        save computation time.
    @return mas: float-scalar. Mean angle-similarity.
    @return mams: float-scalar. Mean angle-magnitude similarity.
    """
    mas = None
    mams = None

    numPx = refImage.shape[0] * refImage.shape[1]

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
804
805
806
    refNorm = np.linalg.norm(refImage, axis=1)
    testNorm = np.linalg.norm(testImage, axis=1)
    diffNorm = np.linalg.norm(diffImage, axis=1)
Amir Mohammadi's avatar
Amir Mohammadi committed
807
    magnitVec = diffNorm / 255.0  # Galbally divides by sqrt(255**2)
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
808
    magnitVec = np.reshape(magnitVec, (refImage.shape[0], 1))
Amir Mohammadi's avatar
Amir Mohammadi committed
809

810
811
812
813
814
815
816
    # np.einsum('ij,ij->i',a,b) is equivalent to np.diag(np.dot(a,b.T))
    cosTheta = np.einsum('ij,ij->i', refImage, testImage) / \
        (refNorm * testNorm)
    cosTheta[cosTheta < -1.0] = -1.0
    cosTheta[cosTheta > 1.0] = 1.0
    cosTheta = np.nan_to_num(cosTheta)
    thetaVec = np.arccos(cosTheta).reshape((refImage.shape[0], 1))
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
817

Amir Mohammadi's avatar
Amir Mohammadi committed
818
    tmp2 = thetaVec * 2.0 / np.pi
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
819

Amir Mohammadi's avatar
Amir Mohammadi committed
820
821
    # MAS: mean angle similarity
    mas = 1.0 - (sum(tmp2) / float(numPx))
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
822

Amir Mohammadi's avatar
Amir Mohammadi committed
823
824
825
826
827
    tmp3 = 1.0 - tmp2
    tmp4 = 1.0 - magnitVec
    chi = 1.0 - (tmp3 * tmp4)
    # MAMS: mean angle-magnitude similarity
    mams = sum(chi) / float(numPx)
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
828

Amir Mohammadi's avatar
Amir Mohammadi committed
829
    return (float(mas), float(mams))
Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
830
831


Amir Mohammadi's avatar
Amir Mohammadi committed
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
def gauss_2d(shape=(3, 3), sigma=0.5):
    """
    Returns a 2D gaussian-filter matrix equivalent of matlab
    fspecial('gaussian'...)

    Works correctly.
    @param shape: tuple defining the size of the desired filter in each
        dimension. Elements of tuple should be 2 positive odd-integers.
    @param sigma: float-scalar
    @return h: 2D numpy array. Contains weights for 2D gaussian filter.
    2D gaussian mask - should give the same result as MATLAB's
    fspecial('gaussian',[shape],[sigma])
    """
    m, n = [(ss - 1.) / 2. for ss in shape]
    y, x = np.ogrid[-m:m + 1, -n:n + 1]
    h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))
    h[h < np.finfo(h.dtype).eps * h.max()] = 0
    sumh = h.sum()
    if sumh != 0:
        h /= sumh
    return h

Sushil BHATTACHARJEE's avatar
Sushil BHATTACHARJEE committed
854

Amir Mohammadi's avatar
Amir Mohammadi committed
855
856
857
858
859
860
861
862
863
def gaussianSmooth(image):
    '''
    returns a smoothed version of input gray-level image smoothing is done
    using a fixed 3x3 gaussian filter (sigma=0.5)
    '''
    # perform Gaussian filtering
    # radius=1 creates a 3x3 filter.
    gaussian = bob.ip.base.Gaussian(sigma=(0.5, 0.5), radius=(1, 1))
    return gaussian(image)