Commit a12ea594 authored by Sushil BHATTACHARJEE's avatar Sushil BHATTACHARJEE

added specularity-features code; updated user-guide; removed prints

parent b9a272d1
......@@ -44,20 +44,20 @@ def compute_quality_features(image):
if len(image.shape) == 3:
if(image.shape[0]==3):
gray_image = matlab_rgb2gray(image) #compute gray-level image for input color-frame
print(gray_image.shape)
# print(gray_image.shape)
else:
print('error. Wrong kind of input image')
else:
if len(image.shape) == 2:
gray_image = image
print(gray_image.shape)
# print(gray_image.shape)
else:
print('error -- wrong kind of input')
if gray_image is not None:
gwin = gauss_2d((3,3), 0.5) # set up the smoothing-filter
print("computing degraded version of image")
# print("computing degraded version of image")
smoothed = ssg.convolve2d(gray_image, gwin, boundary='symm', mode='same')
"""
......@@ -67,7 +67,7 @@ def compute_quality_features(image):
approach is that smoothing degrades a spoof-image more than it does a genuine image
(see Galbally's paper referenced above).
"""
print("computing galbally quality features")
# print("computing galbally quality features")
featSet = image_quality_measures(gray_image, smoothed)
return featSet
......
......@@ -4,19 +4,17 @@ Created on 9 Feb 2016
@author: sbhatta
'''
#import re
#import os
import math
import numpy as np
import scipy as sp
import scipy.signal as ssg
import scipy.ndimage.filters as snf
import galbally_iqm_features as iqm
import bob.ip.base
import bob.ip.color
import galbally_iqm_features as iqm
import tan_specular_highlights as tsh
########## Utility functions ###########
'''
......@@ -103,12 +101,12 @@ def sobelEdgeMap(image, orientation='both'):
########### End of Aux. functions ##############
'''
'''
#def computeMsuIQAFeatures(rgbImage, printFV=False):
def compute_msu_iqa_features(rgbImage):
print("computing msu iqa features")
assert len(rgbImage.shape)==3, 'computeMsuIQAFeatures():: image should be a 3D array (containing a rgb image)'
# print("computing msu iqa features")
assert len(rgbImage.shape)==3, 'compute_msu_iqa_features():: image should be a 3D array (containing a rgb image)'
# hsv = np.zeros_like(rgbImage)
# bob.ip.color.rgb_to_hsv(rgbImage, hsv)
# h = hsv[0,:,:]
......@@ -135,48 +133,76 @@ def compute_msu_iqa_features(rgbImage):
# calculate mean, deviation and skewness of each channel
# use histogram shifting for the hue channel
#print h.shape
# print h.shape
momentFeatsH = calmoment_shift(h)
#print 'H-moments:', momentFeatsH
# print 'H-moments:', momentFeatsH
momentFeats = momentFeatsH.copy()
momentFeatsS = calmoment(s)
#print 'S-moments:', momentFeatsS
# print 'S-moments:', momentFeatsS
momentFeats = np.hstack((momentFeats, momentFeatsS))
momentFeatsV = calmoment(v)
#print 'V-moments:', momentFeatsV
# print 'V-moments:', momentFeatsV
momentFeats = np.hstack((momentFeats, momentFeatsV))
speckleFeats = compute_iqa_specularity_features(rgbImage, startEps=0.06)
fv = momentFeats.copy()
#print('moment features: ')
#print(fv)
#stack the various feature-values in the same order as in MSU's matlab code.
fv = speckleFeats.copy()
fv = np.hstack((fv, momentFeats))
fv = np.hstack((fv, colorHist))
fv = np.hstack((fv, totNumColors))
fv = np.hstack((fv, blurFeat))
fv = np.hstack((fv, pinaBlur))
# print('computed msu features')
return fv
"""
Implements the method proposed by Marziliano et al. for determining the average width of vertical edges, as a measure of blurredness in an image.
This function is a Python version of the Matlab code provided by MSU.
def compute_iqa_specularity_features(rgbImage, startEps=0.05):
"""Returns three features characterizing the specularity present in input color image.
First the specular and diffuse components of the input image are separated using the
"""
#separate the specular and diffuse components of input color image.
speckleFreeImg, diffuseImg, speckleImg = tsh.remove_highlights(rgbImage, startEps, verboseFlag=False)
#speckleImg contains the specular-component
if len(speckleImg.shape)==3:
speckleImg = speckleImg[0]
speckleImg = speckleImg.clip(min=0)
speckleMean = np.mean(speckleImg)
lowSpeckleThresh = speckleMean*1.5 #factors 1.5 and 4.0 are proposed by Wen et al. in their paper and matlab code.
hiSpeckleThresh = speckleMean*4.0
# print speckleMean, lowSpeckleThresh, hiSpeckleThresh
specklePixels = speckleImg[np.where(np.logical_and(speckleImg >= lowSpeckleThresh, speckleImg<hiSpeckleThresh))]
r = float(specklePixels.flatten().shape[0])/(speckleImg.shape[0]*speckleImg.shape[1]) #percentage of specular pixels in image
m = np.mean(specklePixels) #mean-specularity (of specular-pixels)
s = np.std(specklePixels) #std. of specularity (of specular-pixels)
return np.asarray((r,m/150.0,s/150.0), dtype=np.float32) #scaling by factor of 150 is as done by Wen et al. in their matlab code.
def marzilianoBlur(image):
"""Method proposed by Marziliano et al. for determining the average width of vertical edges, as a measure of blurredness in an image.
(Reimplemented from the Matlab code provided by MSU.)
:param image: 2D gray-level (face) image
:param regionMask: (optional) 2D matrix (binary image), where 1s mark the pixels belonging to a region of interest, and 0s indicate pixels outside ROI.
"""
:param image: 2D gray-level (face) image
:param regionMask: (optional) 2D matrix (binary image), where 1s mark the pixels belonging to a region of interest, and 0s indicate pixels outside ROI.
"""
def marzilianoBlur(image):
assert len(image.shape)==2, 'marzilianoBlur():: input image should be a 2D array (gray level image)'
edgeMap = sobelEdgeMap(image, 'vertical') # compute vertical edge-map of image using sobel
#There will be some difference between the result of this function and the Matlab version, because the
#edgeMap produced by sobelEdgeMap() is not exactly the same as that produced by Matlab's edge() function.
# Test edge-map generated in Matlab produces the same result as the matlab version of MarzilianoBlur().
# edgeMap = bob.io.base.load('/idiap/temp/sbhatta/msudb_faceEdgeMap.png')
# imshow(edgeMap)
#Test edge-map generated in Matlab produces the same result as the matlab version of MarzilianoBlur().
blurImg = image
C = blurImg.shape[1] #number of cols in image
......@@ -249,19 +275,14 @@ def marzilianoBlur(image):
return blurMetric
"""
returns the first 3 statistical moments (mean, standard-dev., skewness) and 2 other first-order statistical measures of input image
:param channel: 2D array containing gray-image-like data
"""
def calmoment( channel, regionMask=None ):
""" returns the first 3 statistical moments (mean, standard-dev., skewness) and 2 other first-order statistical measures of input image
:param channel: 2D array containing gray-image-like data
"""
assert len(channel.shape) == 2, 'calmoment():: channel should be a 2D array (a single color-channel)'
t = np.arange(0.05, 1.05, 0.05) + 0.025 # t = 0.05:0.05:1;
# t = np.arange(0.05, 1.05, 0.05) + 0.025 # t = 0.05:0.05:1;
# np.insert(t, 0, -np.inf)
# t[-1]= np.inf
# print type(t)
# print t
nPix = np.prod(channel.shape) # pixnum = length(channel(:));
m = np.mean(channel) # m = mean(channel(:));
......
......@@ -29,17 +29,12 @@ def computeVideoIQM(video4d):
#process first frame separately, to get the no. of iqm features
f=0
#rgbFrame = video4d[f,:,:,:]
rgbFrame = video4d[f]
print(rgbFrame.shape)
print('processing frame #: %d' %f)
iqmSet = iqm.compute_quality_features(rgbFrame) #iqmSet = iqm.compute_quality_features(grayFrame)
numIQM = len(iqmSet)
print(numIQM)
iqaSet = iqa.compute_msu_iqa_features(rgbFrame)
numIQA = len(iqaSet)
print(numIQA)
print(iqaSet.shape)
print(iqmSet.shape)
#now initialize fset to store iqm features for all frames of input video.
bobfset = np.zeros([numframes, numIQM])
......@@ -48,9 +43,9 @@ def computeVideoIQM(video4d):
msufset[f] = iqaSet
for f in range(1,numframes):
print('frame #: %d' %f)
print('processing frame #: %d' %f)
rgbFrame = video4d[f]
print(rgbFrame.shape)
# print(rgbFrame.shape)
bobQFeats = iqm.compute_quality_features(rgbFrame)
msuQFeats = iqa.compute_msu_iqa_features(rgbFrame)
bobfset[f] = bobQFeats
......@@ -91,10 +86,12 @@ def main(command_line_parameters=None):
(bobIqmFeats, msuIqaFeats) = computeIQM_1video(infile)
#2. save features in file
outfile = args.outFile
print("Saving features in output file: %s" %outfile)
ohf = bob.io.base.HDF5File(outfile, 'w')
ohf.set('bobiqm', bobIqmFeats)
ohf.set('msuiqa', msuIqaFeats)
del ohf
print('Done')
if __name__ == '__main__':
......
This diff is collapsed.
......@@ -40,13 +40,16 @@ The examples below show how to use the functions in the two modules.
Note that both feature-sets are extracted from still-images. However, in face-PAD experiments, we typically process videos.
Therefore, the examples below use a video as input, but show how to extract image-quality features for a single frame.
Note also, that in the examples below, the input to the feature-extraction functions are full-frames. If you wish to extract features only for the face-region, you will have to first construct an image containing only the region of interest, and pass that as the parameter to the feature-extraction functions.
Computing Galbally's image-quality measures
-------------------------------------------
The function ``compute_quality_features()`` (in the module galbally_iqm_features) can be used to compute 18 image-quality measures
proposed by Galbally et al. Note that Galbally et al. proposed 25 features in their paper. This package implements the following
18 features from their paper, namely:
[mse, psnr, ad, sc, nk, md, lmse, nae, snrv, ramdv, mas, mams, sme, gme, gpe, ssim, vif, hlfi].
Therefore, the function ``galbally_iqm_features::compute_quality_features()`` returns a tuple of 18 scalars, in the order listed above.
The function ``galbally_iqm_features::compute_quality_features()`` returns a 18-D numpy array, containing the feature-values in the order listed above.
.. doctest::
......@@ -70,13 +73,14 @@ is considered to represent a color RGB image, and is first converted to a gray-l
If the input is 2-dimensional (say, a numpy array of shape [480, 720]), then it is considered to represent a gray-level
image, and the RGB-to-gray conversion step is skipped.
Computing Wen's image-quality measures
--------------------------------------
Computing Wen's (MSU) image-quality measures
--------------------------------------------
The code below shows how to compute the image-quality features proposed by Wen et al. (Here, we refer to these features as
'MSU features'.)
These features are computed from a RGB color-image. The 2 feature-types (image-blur, color-diversity) all together form
a 118-D feature-vector.
The function ``compute_msu_iqa_features()`` (from the module ``msu_iqa_features``) returns a 1D numpy array of length 118.
These features are computed from a RGB color-image. The 3 feature-types (specularity, image-blur, color-diversity) all together form
a 121-D feature-vector.
The function ``compute_msu_iqa_features()`` (from the module ``msu_iqa_features``) returns a 1D numpy array of length 121.
.. doctest::
......@@ -85,7 +89,7 @@ The function ``compute_msu_iqa_features()`` (from the module ``msu_iqa_features`
>>> rgb_frame = video4d[0]
>>> msuf_set = iqa.compute_msu_iqa_features(rgb_frame)
>>> print(len(msuf_set))
118
121
.. _Bob: https://www.idiap.ch/software/bob/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment