Commit dceded86 authored by Sushil BHATTACHARJEE's avatar Sushil BHATTACHARJEE

initial commit

parent d8c4a37a
.. vim: set fileencoding=utf-8 :
.. Sat 3 Dec 20:18:15 2016 CET
.. image:: http://img.shields.io/badge/docs-stable-yellow.png
:target: http://pythonhosted.org/bob.ip.qualitymeasure/index.html
.. image:: http://img.shields.io/badge/docs-latest-orange.png
:target: https://www.idiap.ch/software/bob/docs/latest/bob/bob.ip.qualitymeasure/master/index.html
.. image:: https://gitlab.idiap.ch/bob/bob.ip.qualitymeasure/badges/master/build.svg
:target: https://gitlab.idiap.ch/bob/bob.ip.qualitymeasure/commits/master
.. image:: https://img.shields.io/badge/gitlab-project-0000c0.svg
:target: https://gitlab.idiap.ch/bob/bob.ip.qualitymeasure
.. image:: http://img.shields.io/pypi/v/bob.ip.qualitymeasure.png
:target: https://pypi.python.org/pypi/bob.ip.qualitymeasure
.. image:: http://img.shields.io/pypi/dm/bob.ip.qualitymeasure.png
:target: https://pypi.python.org/pypi/bob.ip.qualitymeasure
==================================================
Bob's library of image-quality feature-extractors
==================================================
This package is part of the signal-processing and machine learning toolbox
Bob_. It provides functions for extracting image-quality features proposed
for PAD experiments by different research group.
Installation
------------
Follow our `installation`_ instructions. Then, using the Python interpreter
provided by the distribution, bootstrap and buildout this package::
$ python bootstrap-buildout.py
$ ./bin/buildout
Contact
-------
For questions or reporting issues to this software package, contact our
development `mailing list`_.
.. Place your references here:
.. _bob: https://www.idiap.ch/software/bob
.. _installation: https://gitlab.idiap.ch/bob/bob/wikis/Installation
.. _mailing list: https://groups.google.com/forum/?fromgroups#!forum/bob-devel
# see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
# see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
def get_config():
"""
Returns a string containing the configuration information.
"""
import bob.extension
return bob.extension.get_config(__name__)
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
'''
Created on 13 Oct 2015
@author: sbhatta
'''
import os, sys
import argparse
import bob.io.base
import bob.io.video
import bob.ip.color
import numpy as np
import galbally_iqm_features as iqm
import antispoofing.utils.db as bobdb
#'''
#Matlab-like RGB to gray...
# @param: rgbImage : numpy array for the form: [3,h,w] where h is the height of the image and w is the width of the image.
# Returns a y-image in floating-point format (range [(16/255) .. (235/255)])
#'''
#def matlab_rgb2gray(rgbImage):
# #g1 = 0.299*rgbFrame[0,:,:] + 0.587*rgbFrame[1,:,:] + 0.114*rgbFrame[2,:,:] #standard coeffs CCIR601
#
# #this is how it's done in matlab...
# rgbImage = rgbImage / 255.0
# C0 = 65.481/255.0
# C1 = 128.553/255.0
# C2 = 24.966/255.0
# scaleMin = 16.0/255.0
# #scaleMax = 235.0/255.0
# gray = scaleMin + (C0*rgbImage[0,:,:] + C1*rgbImage[1,:,:] + C2*rgbImage[2,:,:])
#
# return gray
# """
# loads a video, and returns a feature-vector for each frame of video
# """
# def computeIQM_1video(vidPath):
# inputVideo = bob.io.video.reader(vidPath)
# vin = inputVideo.load()
# numframes = vin.shape[0]
# fset = np.zeros([numframes, 21])
# for f in range(numframes):
# rgbFrame = vin[f,:,:,:]
# grayFrame = matlab_rgb2gray(rgbFrame) #compute gray-level image for input color-frame
# bobQFeats = np.asarray(iqm.computeQualityFeatures(grayFrame)) # computeQualityFeatures() returns a tuple
# fset[f,:] = bobQFeats
#
# return fset
#
'''
computes image-quality features for a set of frames comprising a video.
@param video4d: a '4d' video (N frames, each frame representing an r-g-b image).
returns a set of feature-vectors, 1 vector per frame of video4d
'''
def computeVideoIQM(video4d):
numframes = video4d.shape[0]
#process first frame separately, to get the no. of iqm features
f=0
rgbFrame = video4d[f,:,:,:]
grayFrame = matlab_rgb2gray(rgbFrame) #compute gray-level image for input color-frame
iqmSet = iqm.compute_quality_features(grayFrame)
numIQMs = len(iqmSet)
#now initialize fset to store iqm features for all frames of input video.
fset = np.zeros([numframes, numIQMs])
bobQFeats = np.asarray(iqmSet) # computeQualityFeatures() returns a tuple
fset[f,:] = bobQFeats
for f in range(1,numframes):
rgbFrame = video4d[f,:,:,:]
# grayFrame = matlab_rgb2gray(rgbFrame) #compute gray-level image for input color-frame
# bobQFeats = np.asarray(iqm.compute_quality_features(grayFrame)) # computeQualityFeatures() returns a tuple
bobQFeats = np.asarray(iqm.compute_quality_features(rgbFrame)) # computeQualityFeatures() returns a tuple
fset[f,:] = bobQFeats
return fset
'''
loads a video, and returns a feature-vector for each frame of video
'''
def computeIQM_1video(vidPath):
inputVideo = bob.io.video.reader(vidPath)
vin = inputVideo.load()
return computeVideoIQM(vin)
def main(command_line_parameters=None):
#code for parsing command line args.
argParser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
argParser.add_argument('-f', '--print_num_files', action='store_true', dest='printNumFiles',
default=False, help='Option to print no. of files that will be processed. (Default: %(default)s)')
argParser.add_argument('-i', '--input_videofile', dest='inpFile', default = None,
help='filename of video to be processed (including complete path). Video expected in .mov format.')
argParser.add_argument('-o', '--output_featurefile', dest='outFile', default = None,
help='filename where computed features will be stored. Output file will be in hdf5 format.')
args = argParser.parse_args(command_line_parameters)
#make sure the user specifies a folder where feature-files exist
if not args.inpFile: argParser.error('Specify parameter --input_videofile')
if not args.outFile: argParser.error('Specify parameter --output_featurefile')
#1. load video file
infile = args.inpFile #k.make_path(videoRoot, '.mov')
#2. compute features, 1 vector per frame of input video.
bobIqmFeats = computeIQM_1video(infile)
#3. save features in file
outfile = args.outFile #k.make_path(featRoot, '.h5')
ohf = bob.io.base.HDF5File(outfile, 'w')
ohf.set('bobiqm', bobIqmFeats)
del ohf
if __name__ == '__main__':
main(sys.argv[1:])
'''
Created on 13 Oct 2015
@author: sbhatta
'''
import os, sys
import argparse
import bob.io.base
import bob.io.image #under the hood: loads Bob plugin for image file
import bob.io.video
import bob.ip.color
import numpy as np
import msu_iqa_features as iqa
#import MSU_MaskedIQAFeats as iqa
import antispoofing.utils.db as bobdb
'''
computes image-quality features for a set of frames comprising a video.
@param video4d: a '4d' video (N frames, each frame representing an r-g-b image).
returns a set of feature-vectors, 1 vector per frame of video4d
'''
def computeVideoIQA(video4d, validFrames):
numframes = video4d.shape[0]
#process first frame separately, to get the no. of iqm features
numValidFrames = np.sum(validFrames)
k=0
while validFrames[k] == 0: k+=1
print 'first valid frame: ', k
rgbFrame = video4d[k,:,:,:]
iqmSet = iqa.computeMsuIQAFeatures(rgbFrame)
numIQMs = len(iqmSet)
#now initialize fset to store iqm features for all frames of input video.
fset = np.zeros([numValidFrames, numIQMs])
msuQFeats = np.asarray(iqmSet) # computeQualityFeatures() returns a tuple
fset[0,:] = msuQFeats
print 'fset shape:', fset.shape
j=1
for f in range(k+1,numframes):
if validFrames[f]==1:
rgbFrame = video4d[f,:,:,:]
#grayFrame = matlab_rgb2gray(rgbFrame) #compute gray-level image for input color-frame
msuQFeats = np.asarray(iqa.computeMsuIQAFeatures(rgbFrame)) # computeQualityFeatures() returns a tuple
fset[j,:] = msuQFeats
#print j, f
j += 1
return fset
'''
loads a video, and returns a feature-vector for each frame of video
'''
def computeIQA_1video(videoFile, frameQualFile):
inputVideo = bob.io.video.reader(videoFile)
#load input video
vin = inputVideo.load()
numFrames = vin.shape[0]
if frameQualFile is not None:
f = bob.io.base.HDF5File(frameQualFile) #read only
validFrames = (f.read('/frameQuality')).flatten() #reads list of frame-quality indicators
validFrames[np.where(validFrames <> 1)]=0
else:
validFrames = np.ones(numFrames)
#print validFrames
# print type(validFrames)
numValidFrames = np.sum(validFrames)
print 'valid frames:', numValidFrames, 'of', numFrames
#bob.io.base.save(vin[0,:,:,:].astype('uint8'), '/idiap/temp/sbhatta/msudb_colorImg.png')
import time
startTime = time.time()
fset = computeVideoIQA(vin, validFrames)
print("Time for one video --- %s seconds ---" % (time.time() - startTime))
return fset
'''
'''
def parse_arguments(arguments):
#code for parsing command line args.
argParser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
# # verbose
argParser.add_argument('-v', '--verbose', dest='verbose', metavar='INT', type=int, choices=[0, 1, 2], default=1,
help='Prints (hopefully helpful) messages (Default: %(default)s)')
argParser.add_argument('-db', '--dbase_path', dest='dbPath', default = None, #'/idiap/user/sbhatta/work/Antispoofing/ImageQualityMeasures',
help='path where database videos exist.')
argParser.add_argument('-op', '--output_path', dest='outPath', default = None,
help='path where face-files will be stored.')
argParser.add_argument('-nf', '--numFiles', action='store_true', dest='numFiles',
default=False, help='Option to print no. of files that will be processed. (Default: %(default)s)')
argParser.add_argument('-f', '--singleFile', dest='singleFile', default=None,
help='filename (including complete path) of video to be used to test this code: %(default)s)')
argParser.add_argument('-ve', '--video_ext', dest='vidExtn', default=None, choices = ['avi', 'mov', 'mp4'],
help='filename (including complete path) of video to be used to test this code: %(default)s)')
bobdb.Database.create_parser(argParser, implements_any_of='video')
args = argParser.parse_args(arguments)
database = args.cls(args)
if args.singleFile is None:
#make sure the user specifies a folder where feature-files exist
if not args.dbPath: argParser.error('Specify parameter --dbase_path')
else:
folder = os.path.dirname(args.singleFile)
filename = os.path.basename(args.singleFile)
args.dbPath = folder
args.singleFile = filename
if not args.outPath: argParser.error('Specify parameter --output_path')
return (args, database)
'''
'''
def main(arguments):
args, database = parse_arguments(arguments)
inpDir = args.dbPath
outDir = args.outPath
assert os.path.exists(inpDir), "Input database folder %s does not exist" %inpDir
if args.verbose>0: print 'Loading data from',inpDir
if args.singleFile is None:
tr_realFiles, tr_attackFiles = database.get_train_data()
dv_realFiles, dv_attackFiles = database.get_devel_data()
ts_realFiles, ts_attackFiles = database.get_test_data()
allFiles = tr_realFiles + dv_realFiles + ts_realFiles + tr_attackFiles + dv_attackFiles + ts_attackFiles
del tr_realFiles, tr_attackFiles, dv_realFiles, dv_attackFiles, ts_realFiles, ts_attackFiles
numFiles = len(allFiles)
if args.numFiles:
print 'Number of files to be processed:',numFiles
print 'exiting'
return
# print numFiles
# numFiles = 1 #test
# if we are on a grid environment, just find what I have to process.
fileSet = allFiles[0:numFiles]
if os.environ.has_key('SGE_TASK_ID'):
pos = int(os.environ['SGE_TASK_ID']) - 1
if pos >= numFiles:
raise RuntimeError, "Grid request for job %d on a setup with %d jobs" % (pos, numFiles)
fileSet = [allFiles[pos]] # objects = [objects[pos]]
print 'processing', len(fileSet), ' files'
k1=0
for k in fileSet:
#1. load video file
print 'filenum:', k1
# infile = k.make_path(videoRoot, '.avi')
# outfile = k.make_path(featRoot, '.h5')
print k
if args.vidExtn is None:
inVidFile = k.videofile(inpDir) #k.make_path(inpDir, '.avi')
else:
inVidFile = k.make_path(inpDir, ('.' + args.vidExtn))
inFrameQFile = None #k.make_path(inpDir, '_frameQ.h5')
outFeatFile = k.make_path(outDir, '.h5')
head, tail = os.path.split(outFeatFile)
if not os.path.exists(head): os.makedirs(head) #create output folder, if it doesn't exist
print inFrameQFile
print outFeatFile
# if True: #not os.path.isfile(outFeatFile):
msuIQAFeats = computeIQA_1video(inVidFile, inFrameQFile)
#4. save features in file
ohf = bob.io.base.HDF5File(outFeatFile, 'w')
ohf.set('msuiqa', msuIQAFeats)
del ohf
# assert 0>1, 'stop'
k1 += 1
else:
# test feature-computation with a single file specified as input
filePart = os.path.splitext(args.singleFile)[0]
inVidFile = os.path.join(args.dbPath, filePart)+ '.avi'
inFrameQFile = os.path.join(args.dbPath, filePart)+ '_frameQ.h5'
outFeatFile = os.path.join(outDir, filePart)+ '.h5'
head, tail = os.path.split(outFeatFile)
if not os.path.exists(head): os.makedirs(head) #create output folder, if it doesn't exist
print 'single file:', inVidFile
print inFrameQFile
print outFeatFile
msuIQAFeats = computeIQA_1video(inVidFile, inFrameQFile)
#4. save features in file
ohf = bob.io.base.HDF5File(outFeatFile, 'w')
ohf.set('msuiqa', msuIQAFeats)
del ohf
# special fn to extract first frame from video-file and store it as hdf5
def extractTestFrame():
videoFile = '/idiap/home/sbhatta/work/git/refactoring/bob.db.msu_mfsd_mod/bob/db/msu_mfsd_mod/test_images/real/real_client022_android_SD_scene01.mp4'
inputVideo = bob.io.video.reader(videoFile)
#load input video
vin = inputVideo.load()
numFrames = vin.shape[0]
outframe = vin[0]
outfile = '/idiap/home/sbhatta/work/git/refactoring/bob.db.msu_mfsd_mod/bob/db/msu_mfsd_mod/test_images/real_client022_android_SD_scene01_frame0_correct.hdf5'
ohf = bob.io.base.HDF5File(outfile, 'w')
ohf.set('color_frame', outframe)
del ohf
if __name__ == '__main__':
# extractTestFrame()
main(sys.argv[1:])
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
'''
Created on 25 Sep 2015
@author: sbhatta
'''
#import re
#import os
import math
import numpy as np
import scipy as sp
import scipy.signal as ssg
import scipy.ndimage.filters as snf
import bob.ip.base
"""
Main function to be called, to extract a set of image quality-features computed for the input image
:param image: 2d numpy array. Should contain input image of size [M,N] (i.e. M rows x N cols).
:return featSet: a tuple of float-scalars, each representing one image-quality measure.
"""
def compute_quality_features(image):
"""Extract a set of image quality-features computed for the input image.
:param image: 2d or 3d numpy array. Should represent input image of shape [M,N] (M rows x N cols).
If 2D, image should contain a gray-image of shape [M,N].
If 3d, image should have a shape [3,M,N], and should contain an RGB-image.
:return featSet: a tuple of float-scalars, each representing one image-quality measure.
This function returns a subset of the image-quality features (for face anti-spoofing) that have been
described by Galbally et al. in their paper:
"Image Quality Assessment for Fake Biometric Detection: Application to Iris, Fingerprint, and Face Recognition",
IEEE Trans. on Image Processing Vol 23(2), 2014.
"""
if len(image.shape) == 3:
if(image.shape[0]==3):
gray_image = matlab_rgb2gray(rgbFrame) #compute gray-level image for input color-frame
else:
print('error. Wrong kind of input image')
else:
if len(image.shape) == 2:
gray_image = image
else:
print('error -- wrong kind of input')
gwin = gauss_2d((3,3), 0.5)
if gray_image:
smoothed = ssg.convolve2d(gray_image, gwin, boundary='symm', mode='same')
"""
Some of the image-quality measures computed here require a reference image.
For these measures, we use the input-image itself as a reference-image, and we compute
the quality-measure of a smoothed version of the input-image. The assumption in this
approach is that smoothing degrades a spoof-image more than it does a genuine image
(see Galbally's paper referenced above).
"""
featSet = image_quality_measures(gray_image, smoothed)
return featSet
else:
return None
"""
actually computes various measures of similarity between the two input images, but also returns some descriptors of the reference-image that are independent of any other image.
Returns a tuple of 18 values, each of which is a float-scalar.
The quality measures computed in this function correspond to the Image-quality features discussed in Galbally et al., 2014.
"""
def image_quality_measures(refImage, testImage):
"""Compute image-quality measures for testImage and return a tuple of quality-measures.
Some of the quality-measures require a reference-image, but others are 'no-reference' measures.
:input refImage: 2d numpy array. Should represent input 8-bit gray-level image of size [M,N].
:input testImage: 2d numpy array. Should represent input 8-bit gray-level image of size [M,N]..
:return a tuple of 18 values, each of which is a float-scalar.
The quality measures computed in this function correspond to the Image-quality features discussed in Galbally et al., 2014.
"""
assert len(refImage.shape)==2, "refImage should be a 2D array"
assert len(testImage.shape)==2, "testImage should be a 2D array"
assert (refImage.shape[0] == testImage.shape[0]), "The two images should have the same width"
assert (refImage.shape[1] == testImage.shape[1]), "The two images should have the same height"
diffImg = refImage.astype(np.float) - testImage.astype(np.float)
diffSq = np.square(diffImg)
sumDiffSq = np.sum(diffSq)
absDiffImg = np.absolute(diffImg)
refSq = np.square(refImage.astype(np.float))
sumRefSq = np.sum(refSq)
numPx = refImage.shape[0]*refImage.shape[1] #number of pixels in each image
maxPxVal = 255.0;
#1 MSE
mse00 = float(sumDiffSq)/float(numPx)
#2 PSNR
psnr01 = np.inf
if mse00>0:
psnr01 = 10.0*np.log10(maxPxVal*maxPxVal/mse00)
#3 AD: Average difference
ad02 = float(np.sum(diffImg))/float(numPx)
#4 SC: structural content
testSq = np.square(testImage.astype(np.float))
sumTestSq = np.sum(testSq)
sc03=np.inf
if sumTestSq>0:
sc03 = float(sumRefSq)/float(sumTestSq)
#5 NK: normalized cross-correlation
imgProd = refImage * testImage # element-wise product
nk04 = float(np.sum(imgProd))/float(sumRefSq)
#6 MD: Maximum difference
md05 = float(np.amax(absDiffImg))
#7 LMSE: Laplacian MSE
#scipy implementation of laplacian is different from Matlab's version, especially at the image-borders
# To significant differences between scipy...laplace and Matlab's del2() are:
# a. Matlab del2() divides the convolution result by 4, so the ratio (scipy.laplace() result)/(del2()-result) is 4
# b. Matlab does a different kind of processing at the boundaries, so the results at the boundaries are different in the 2 calls.
#In Galbally's Matlab code, there is a factor of 4, which I have dropped (no difference in result),
#because this is implicit in scipy.ndimage.filters.laplace()
op = snf.laplace(refImage, mode='reflect') #mode can be 'wrap', 'reflect', 'nearest', 'mirror', or ['constant' with a specified value]
opSq = np.square(op)
sum_opSq = np.sum(opSq)
tmp1 = (op - (snf.laplace(testImage, mode='reflect')))
num_op = np.square(tmp1)
lmse06 = float(np.sum(num_op))/float(sum_opSq)
#8 NAE: normalized abs. error
sumRef = np.sum(np.absolute(refImage))
nae07 = float(np.sum(absDiffImg))/float(sumRef)
#9 SNRv: SNR in db
snrv08 = 10.0*np.log10(float(sumRefSq)/float(sumDiffSq))
#10 RAMDv: R-averaged max diff (r=10)
#implementation below is same as what Galbally does in Matlab
r=10
sorted = np.sort(diffImg.flatten())[::-1] #the [::-1] flips the sorted vector, so that it is in descending order
topsum = np.sum(sorted[0:r])
ramdv09 = np.sqrt(float(topsum)/float(r))
#11,12: MAS: Mean Angle Similarity, MAMS: Mean Angle-Magnitude Similarity
mas10, mams11 = angle_similarity(refImage, testImage, diffImg)
fftRef = np.fft.fft2(refImage)
# fftTest = np.fft.fft2(testImage)
#13, 14: SME: spectral magnitude error; SPE: spectral phase error
sme12, spe13 = spectral_similarity(refImage, testImage) #spectralSimilarity(fftRef, fftTest, numPx)
#15 TED: Total edge difference
ted14 = edge_similarity(refImage, testImage)
#16 TCD: Total corner difference
tcd15 = corner_similarity(refImage, testImage)
#17, 18: GME: gradient-magnitude error; GPE: gradient phase error
gme16, gpe17 = gradient_similarity(refImage, testImage)
#19 SSIM
ssim18, _ = ssim(refImage, testImage)
#20 VIF
vif19 = vif(refImage, testImage)
#21,22,23,24,25: RRED, BIQI, JQI, NIQE: these parameters are not computed here.
#26 HLFI: high-low frequency index (implemented as done by Galbally in Matlab).
hlfi25=high_low_freq_index(fftRef, refImage.shape[1])
return (mse00, psnr01, ad02, sc03, nk04, md05, lmse06, nae07, snrv08, ramdv09, mas10, mams11, sme12, gme16, gpe17, ssim18, vif19, hlfi25)
"""
Matlab-like RGB to gray...
"""
def matlab_rgb2gray(rgbImage):
'''converts color rgbImage to gray to produce exactly the same result as Matlab would.
Inputs:
rgbimage: numpy array of shape [3, height, width]
Return:
numpy array of shape [height, width] containing a gray-image with floating-point pixel values, in the range[(16.0/255) .. (235.0/255)]
'''
#g1 = 0.299*rgbFrame[0,:,:] + 0.587*rgbFrame[1,:,:] + 0.114*rgbFrame[2,:,:] #standard coeffs CCIR601
#this is how it's done in matlab...
rgbImage = rgbImage / 255.0
C0 = 65.481/255.0
C1 = 128.553/255.0
C2 = 24.966/255.0
scaleMin = 16.0/255.0
#scaleMax = 235.0/255.0
gray = scaleMin + (C0*rgbImage[0,:,:] + C1*rgbImage[1,:,:] + C2*rgbImage[2,:,:])
return gray
"""
SSIM: Structural Similarity index between two gray-level images. The dynamic range is assumed to be 0..255.
Ref:Z. Wang, A.C. Bovik, H.R. Sheikh and E.P. Simoncelli:
"Image Quality Assessment: From error measurement to Structural Similarity"
IEEE Trans. on Image Processing, 13(1), 01/2004
@param refImage: 2D numpy array (reference image)
@param testImage: 2D numpy array (test image)
Both input images should have the same dimensions. This is assumed, and not verified in this function
@return ssim: float-scalar. The mean structural similarity between the 2 input images.
@return ssim_map: the SSIM index map of the test image (this map is smaller than the test image).
"""
def ssim(refImage, testImage):
"""Compute and return SSIM between two images.
@param refImage: 2D numpy array (reference image)
@param testImage: 2D numpy array (test image)
Returns ssim and ssim_map
@return ssim: float-scalar. The mean structural similarity between the 2 input images.
@return ssim_map: the SSIM index map of the test image (this map is smaller than the test image).
"""
M=refImage.shape[0]
N=refImage.shape[1]
winSz=11 #window size for gaussian filter
winSgm = 1.5 # sigma for gaussian filter
#input image should be at least 11x11 in size.
if(M<winSz) or (N<winSz):
ssim_index = -np.inf
ssim_map = -np.inf
return ssim_index, ssim_map
#construct the gaussian filter
gwin = gauss_2d((winSz, winSz), winSgm)
K1 = 0.01 # constants taken from the initial matlab implementation provided by Bovik's lab.
K2 = 0.03
L = 255 #dynamic range.
C1 = (K1*L)*(K1*L)
C2 = (K2*L)*(K2*L)
#refImage=refImage.astype(np.float)
#testImage=testImage.astype(np.float)