Commit 8559a22f authored by Sushil BHATTACHARJEE's avatar Sushil BHATTACHARJEE

code update; added example script

parent 9988d042
......@@ -38,20 +38,26 @@ def compute_quality_features(image):
"Image Quality Assessment for Fake Biometric Detection: Application to Iris, Fingerprint, and Face Recognition",
IEEE Trans. on Image Processing Vol 23(2), 2014.
"""
gray_image = None
#print("shape of input image:")
#print(image.shape)
if len(image.shape) == 3:
if(image.shape[0]==3):
gray_image = matlab_rgb2gray(rgbFrame) #compute gray-level image for input color-frame
gray_image = matlab_rgb2gray(image) #compute gray-level image for input color-frame
print(gray_image.shape)
else:
print('error. Wrong kind of input image')
else:
if len(image.shape) == 2:
gray_image = image
print(gray_image.shape)
else:
print('error -- wrong kind of input')
gwin = gauss_2d((3,3), 0.5)
if gray_image:
if gray_image is not None:
gwin = gauss_2d((3,3), 0.5) # set up the smoothing-filter
print("computing degraded version of image")
smoothed = ssg.convolve2d(gray_image, gwin, boundary='symm', mode='same')
"""
......@@ -61,6 +67,7 @@ def compute_quality_features(image):
approach is that smoothing degrades a spoof-image more than it does a genuine image
(see Galbally's paper referenced above).
"""
print("computing galbally quality features")
featSet = image_quality_measures(gray_image, smoothed)
return featSet
......
......@@ -91,12 +91,12 @@ def sobelEdgeMap(image, orientation='both'):
refSobelX = refSobel_sep[0,:,:]
refSobelY = refSobel_sep[1,:,:]
if orientation is 'horizontal':
refEdge = iqm.edgeThinning(refSobelX[:,:], refSobelX[:,:], thinning)
refEdge = iqm.edge_thinning(refSobelX[:,:], refSobelX[:,:], thinning)
else:
if orientation is 'vertical':
refEdge = iqm.edgeThinning(refSobelY[:,:], refSobelY[:,:], thinning)
refEdge = iqm.edge_thinning(refSobelY[:,:], refSobelY[:,:], thinning)
else:
refEdge = iqm.edgeThinning(refSobelX[:,:], refSobelY[:,:], thinning)
refEdge = iqm.edge_thinning(refSobelX[:,:], refSobelY[:,:], thinning)
return refEdge
......@@ -106,7 +106,8 @@ def sobelEdgeMap(image, orientation='both'):
'''
'''
#def computeMsuIQAFeatures(rgbImage, printFV=False):
def computeMsuIQAFeatures(rgbImage):
def compute_msu_iqa_features(rgbImage):
print("computing msu iqa features")
assert len(rgbImage.shape)==3, 'computeMsuIQAFeatures():: image should be a 3D array (containing a rgb image)'
# hsv = np.zeros_like(rgbImage)
# bob.ip.color.rgb_to_hsv(rgbImage, hsv)
......@@ -115,7 +116,7 @@ def computeMsuIQAFeatures(rgbImage):
# v = hsv[2,:,:]
h,s,v = matlab_rgb2hsv(rgbImage) #defined above. Calls Bob's rgb_to_hsv() after rescaling the input image.
#print "computeMsuIQAFeatures():: check bob.ip.color.rgb_to_hsv conversion"
#print "compute_msu_iqa_features():: check bob.ip.color.rgb_to_hsv conversion"
grayImage = np.zeros_like(h, dtype='uint8')
bob.ip.color.rgb_to_gray(rgbImage, grayImage)
......@@ -147,7 +148,8 @@ def computeMsuIQAFeatures(rgbImage):
momentFeats = np.hstack((momentFeats, momentFeatsV))
fv = momentFeats.copy()
#print 'moment features:', fv
#print('moment features: ')
#print(fv)
fv = np.hstack((fv, colorHist))
fv = np.hstack((fv, totNumColors))
......@@ -367,7 +369,7 @@ def rgbhist(image, maxval, nBins, normType=0):
for i in range(0, numPix): # for i=1:size(I,1)*size(I,2)
p = (im[i,:]).astype(float) # p = double(im(i,:));
p = np.floor(p/decimator) # p = floor(p/(maxval/nBins))+1;
p = (np.floor(p/decimator)).astype(np.uint32) # p = floor(p/(maxval/nBins))+1;
H[p[0], p[1], p[2]] += 1 # H(p(1),p(2),p(3)) = H(p(1),p(2),p(3)) + 1;
# end
#totalNBins = np.prod(H.shape)
......
'''
Created on 13 Oct 2015
@author: sbhatta
'''
import os, sys
import argparse
import bob.io.base
import bob.io.image #under the hood: loads Bob plugin for image file
import bob.io.video
import bob.ip.color
import numpy as np
import msu_iqa_features as iqa
#import MSU_MaskedIQAFeats as iqa
import antispoofing.utils.db as bobdb
'''
computes image-quality features for a set of frames comprising a video.
@param video4d: a '4d' video (N frames, each frame representing an r-g-b image).
returns a set of feature-vectors, 1 vector per frame of video4d
'''
def computeVideoIQA(video4d, validFrames):
numframes = video4d.shape[0]
#process first frame separately, to get the no. of iqm features
numValidFrames = np.sum(validFrames)
k=0
while validFrames[k] == 0: k+=1
print 'first valid frame: ', k
rgbFrame = video4d[k,:,:,:]
iqmSet = iqa.computeMsuIQAFeatures(rgbFrame)
numIQMs = len(iqmSet)
#now initialize fset to store iqm features for all frames of input video.
fset = np.zeros([numValidFrames, numIQMs])
msuQFeats = np.asarray(iqmSet) # computeQualityFeatures() returns a tuple
fset[0,:] = msuQFeats
print 'fset shape:', fset.shape
j=1
for f in range(k+1,numframes):
if validFrames[f]==1:
rgbFrame = video4d[f,:,:,:]
#grayFrame = matlab_rgb2gray(rgbFrame) #compute gray-level image for input color-frame
msuQFeats = np.asarray(iqa.computeMsuIQAFeatures(rgbFrame)) # computeQualityFeatures() returns a tuple
fset[j,:] = msuQFeats
#print j, f
j += 1
return fset
'''
loads a video, and returns a feature-vector for each frame of video
'''
def computeIQA_1video(videoFile, frameQualFile):
inputVideo = bob.io.video.reader(videoFile)
#load input video
vin = inputVideo.load()
numFrames = vin.shape[0]
if frameQualFile is not None:
f = bob.io.base.HDF5File(frameQualFile) #read only
validFrames = (f.read('/frameQuality')).flatten() #reads list of frame-quality indicators
validFrames[np.where(validFrames <> 1)]=0
else:
validFrames = np.ones(numFrames)
#print validFrames
# print type(validFrames)
numValidFrames = np.sum(validFrames)
print 'valid frames:', numValidFrames, 'of', numFrames
#bob.io.base.save(vin[0,:,:,:].astype('uint8'), '/idiap/temp/sbhatta/msudb_colorImg.png')
import time
startTime = time.time()
fset = computeVideoIQA(vin, validFrames)
print("Time for one video --- %s seconds ---" % (time.time() - startTime))
return fset
'''
'''
def parse_arguments(arguments):
#code for parsing command line args.
argParser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
# # verbose
argParser.add_argument('-v', '--verbose', dest='verbose', metavar='INT', type=int, choices=[0, 1, 2], default=1,
help='Prints (hopefully helpful) messages (Default: %(default)s)')
argParser.add_argument('-db', '--dbase_path', dest='dbPath', default = None, #'/idiap/user/sbhatta/work/Antispoofing/ImageQualityMeasures',
help='path where database videos exist.')
argParser.add_argument('-op', '--output_path', dest='outPath', default = None,
help='path where face-files will be stored.')
argParser.add_argument('-nf', '--numFiles', action='store_true', dest='numFiles',
default=False, help='Option to print no. of files that will be processed. (Default: %(default)s)')
argParser.add_argument('-f', '--singleFile', dest='singleFile', default=None,
help='filename (including complete path) of video to be used to test this code: %(default)s)')
argParser.add_argument('-ve', '--video_ext', dest='vidExtn', default=None, choices = ['avi', 'mov', 'mp4'],
help='filename (including complete path) of video to be used to test this code: %(default)s)')
bobdb.Database.create_parser(argParser, implements_any_of='video')
args = argParser.parse_args(arguments)
database = args.cls(args)
if args.singleFile is None:
#make sure the user specifies a folder where feature-files exist
if not args.dbPath: argParser.error('Specify parameter --dbase_path')
else:
folder = os.path.dirname(args.singleFile)
filename = os.path.basename(args.singleFile)
args.dbPath = folder
args.singleFile = filename
if not args.outPath: argParser.error('Specify parameter --output_path')
return (args, database)
'''
'''
def main(arguments):
args, database = parse_arguments(arguments)
inpDir = args.dbPath
outDir = args.outPath
assert os.path.exists(inpDir), "Input database folder %s does not exist" %inpDir
if args.verbose>0: print 'Loading data from',inpDir
if args.singleFile is None:
tr_realFiles, tr_attackFiles = database.get_train_data()
dv_realFiles, dv_attackFiles = database.get_devel_data()
ts_realFiles, ts_attackFiles = database.get_test_data()
allFiles = tr_realFiles + dv_realFiles + ts_realFiles + tr_attackFiles + dv_attackFiles + ts_attackFiles
del tr_realFiles, tr_attackFiles, dv_realFiles, dv_attackFiles, ts_realFiles, ts_attackFiles
numFiles = len(allFiles)
if args.numFiles:
print 'Number of files to be processed:',numFiles
print 'exiting'
return
# print numFiles
# numFiles = 1 #test
# if we are on a grid environment, just find what I have to process.
fileSet = allFiles[0:numFiles]
if os.environ.has_key('SGE_TASK_ID'):
pos = int(os.environ['SGE_TASK_ID']) - 1
if pos >= numFiles:
raise RuntimeError, "Grid request for job %d on a setup with %d jobs" % (pos, numFiles)
fileSet = [allFiles[pos]] # objects = [objects[pos]]
print 'processing', len(fileSet), ' files'
k1=0
for k in fileSet:
#1. load video file
print 'filenum:', k1
# infile = k.make_path(videoRoot, '.avi')
# outfile = k.make_path(featRoot, '.h5')
print k
if args.vidExtn is None:
inVidFile = k.videofile(inpDir) #k.make_path(inpDir, '.avi')
else:
inVidFile = k.make_path(inpDir, ('.' + args.vidExtn))
inFrameQFile = None #k.make_path(inpDir, '_frameQ.h5')
outFeatFile = k.make_path(outDir, '.h5')
head, tail = os.path.split(outFeatFile)
if not os.path.exists(head): os.makedirs(head) #create output folder, if it doesn't exist
print inFrameQFile
print outFeatFile
# if True: #not os.path.isfile(outFeatFile):
msuIQAFeats = computeIQA_1video(inVidFile, inFrameQFile)
#4. save features in file
ohf = bob.io.base.HDF5File(outFeatFile, 'w')
ohf.set('msuiqa', msuIQAFeats)
del ohf
# assert 0>1, 'stop'
k1 += 1
else:
# test feature-computation with a single file specified as input
filePart = os.path.splitext(args.singleFile)[0]
inVidFile = os.path.join(args.dbPath, filePart)+ '.avi'
inFrameQFile = os.path.join(args.dbPath, filePart)+ '_frameQ.h5'
outFeatFile = os.path.join(outDir, filePart)+ '.h5'
head, tail = os.path.split(outFeatFile)
if not os.path.exists(head): os.makedirs(head) #create output folder, if it doesn't exist
print 'single file:', inVidFile
print inFrameQFile
print outFeatFile
msuIQAFeats = computeIQA_1video(inVidFile, inFrameQFile)
#4. save features in file
ohf = bob.io.base.HDF5File(outFeatFile, 'w')
ohf.set('msuiqa', msuIQAFeats)
del ohf
# special fn to extract first frame from video-file and store it as hdf5
def extractTestFrame():
videoFile = '/idiap/home/sbhatta/work/git/refactoring/bob.db.msu_mfsd_mod/bob/db/msu_mfsd_mod/test_images/real/real_client022_android_SD_scene01.mp4'
inputVideo = bob.io.video.reader(videoFile)
#load input video
vin = inputVideo.load()
numFrames = vin.shape[0]
outframe = vin[0]
outfile = '/idiap/home/sbhatta/work/git/refactoring/bob.db.msu_mfsd_mod/bob/db/msu_mfsd_mod/test_images/real_client022_android_SD_scene01_frame0_correct.hdf5'
ohf = bob.io.base.HDF5File(outfile, 'w')
ohf.set('color_frame', outframe)
del ohf
if __name__ == '__main__':
# extractTestFrame()
main(sys.argv[1:])
......@@ -2,7 +2,7 @@
# vim: set fileencoding=utf-8 :
'''
Created on 13 Oct 2015
Created on 08 Mar 2017
@author: sbhatta
'''
......@@ -14,113 +14,86 @@ import bob.io.base
import bob.io.video
import bob.ip.color
import numpy as np
import galbally_iqm_features as iqm
import antispoofing.utils.db as bobdb
#'''
#Matlab-like RGB to gray...
# @param: rgbImage : numpy array for the form: [3,h,w] where h is the height of the image and w is the width of the image.
# Returns a y-image in floating-point format (range [(16/255) .. (235/255)])
#'''
#def matlab_rgb2gray(rgbImage):
# #g1 = 0.299*rgbFrame[0,:,:] + 0.587*rgbFrame[1,:,:] + 0.114*rgbFrame[2,:,:] #standard coeffs CCIR601
#
# #this is how it's done in matlab...
# rgbImage = rgbImage / 255.0
# C0 = 65.481/255.0
# C1 = 128.553/255.0
# C2 = 24.966/255.0
# scaleMin = 16.0/255.0
# #scaleMax = 235.0/255.0
# gray = scaleMin + (C0*rgbImage[0,:,:] + C1*rgbImage[1,:,:] + C2*rgbImage[2,:,:])
#
# return gray
# """
# loads a video, and returns a feature-vector for each frame of video
# """
# def computeIQM_1video(vidPath):
# inputVideo = bob.io.video.reader(vidPath)
# vin = inputVideo.load()
# numframes = vin.shape[0]
# fset = np.zeros([numframes, 21])
# for f in range(numframes):
# rgbFrame = vin[f,:,:,:]
# grayFrame = matlab_rgb2gray(rgbFrame) #compute gray-level image for input color-frame
# bobQFeats = np.asarray(iqm.computeQualityFeatures(grayFrame)) # computeQualityFeatures() returns a tuple
# fset[f,:] = bobQFeats
#
# return fset
#
from bob.ip.qualitymeasure import galbally_iqm_features as iqm
from bob.ip.qualitymeasure import msu_iqa_features as iqa
'''
computes image-quality features for a set of frames comprising a video.
def computeVideoIQM(video4d):
'''computes image-quality features for a set of frames comprising a video.
@param video4d: a '4d' video (N frames, each frame representing an r-g-b image).
returns a set of feature-vectors, 1 vector per frame of video4d
'''
def computeVideoIQM(video4d):
'''
numframes = video4d.shape[0]
numframes=3
#print(numframes)
#process first frame separately, to get the no. of iqm features
f=0
rgbFrame = video4d[f,:,:,:]
grayFrame = matlab_rgb2gray(rgbFrame) #compute gray-level image for input color-frame
iqmSet = iqm.compute_quality_features(grayFrame)
numIQMs = len(iqmSet)
#rgbFrame = video4d[f,:,:,:]
rgbFrame = video4d[f]
print(rgbFrame.shape)
iqmSet = iqm.compute_quality_features(rgbFrame) #iqmSet = iqm.compute_quality_features(grayFrame)
numIQM = len(iqmSet)
iqaSet = iqa.compute_msu_iqa_features(rgbFrame)
numIQA = len(iqaSet)
#now initialize fset to store iqm features for all frames of input video.
fset = np.zeros([numframes, numIQMs])
bobQFeats = np.asarray(iqmSet) # computeQualityFeatures() returns a tuple
fset[f,:] = bobQFeats
bobfset = np.zeros([numframes, numIQM])
bobQFeats = np.asarray(iqmSet) # compute_quality_features() returns a tuple
bobfset[f] = bobQFeats
msufset = np.zeros([numframes, numIQA])
msuQFeats = np.asarray(iqaSet)
msufset[f] = msuQFeats
for f in range(1,numframes):
rgbFrame = video4d[f,:,:,:]
print('frame #: %d' %f)
rgbFrame = video4d[f]
print(rgbFrame.shape)
# grayFrame = matlab_rgb2gray(rgbFrame) #compute gray-level image for input color-frame
# bobQFeats = np.asarray(iqm.compute_quality_features(grayFrame)) # computeQualityFeatures() returns a tuple
bobQFeats = np.asarray(iqm.compute_quality_features(rgbFrame)) # computeQualityFeatures() returns a tuple
fset[f,:] = bobQFeats
msuQFeats = iqa.compute_msu_iqa_features(rgbFrame)
bobfset[f] = bobQFeats
msufset[f] = msuQFeats
return fset
return (bobfset, msufset)
'''
loads a video, and returns a feature-vector for each frame of video
'''
def computeIQM_1video(vidPath):
""" loads a video, and returns 2 arrays of feature-vectors -- one per feature-family.
Each array contains one feature-vector per frame
"""
#1. load video from input path
inputVideo = bob.io.video.reader(vidPath)
vin = inputVideo.load()
#2. compute and return feature-sets
return computeVideoIQM(vin)
def main(command_line_parameters=None):
"""Computes image-quality features for specified video-file, and stores the feature-arrays in specified output hdf5 file"""
#code for parsing command line args.
argParser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
argParser.add_argument('-f', '--print_num_files', action='store_true', dest='printNumFiles',
default=False, help='Option to print no. of files that will be processed. (Default: %(default)s)')
argParser.add_argument('-i', '--input_videofile', dest='inpFile', default = None,
help='filename of video to be processed (including complete path). Video expected in .mov format.')
argParser.add_argument('-i', '--input_videofile', dest='inpVidFile', default = None,
help='filename of video to be processed (including complete path). ')
argParser.add_argument('-o', '--output_featurefile', dest='outFile', default = None,
help='filename where computed features will be stored. Output file will be in hdf5 format.')
args = argParser.parse_args(command_line_parameters)
#make sure the user specifies a folder where feature-files exist
if not args.inpFile: argParser.error('Specify parameter --input_videofile')
if not args.inpVidFile: argParser.error('Specify parameter --input_videofile')
if not args.outFile: argParser.error('Specify parameter --output_featurefile')
#1. load video file
infile = args.inpFile #k.make_path(videoRoot, '.mov')
#2. compute features, 1 vector per frame of input video.
bobIqmFeats = computeIQM_1video(infile)
#3. save features in file
outfile = args.outFile #k.make_path(featRoot, '.h5')
#1. compute features, 1 vector per frame of input video, per feature-family (galbally,msu).
infile = args.inpVidFile
(bobIqmFeats, msuIqaFeats) = computeIQM_1video(infile)
#2. save features in file
outfile = args.outFile
ohf = bob.io.base.HDF5File(outfile, 'w')
ohf.set('bobiqm', bobIqmFeats)
ohf.set('msuiqa', msuIqaFeats)
del ohf
......
......@@ -34,7 +34,7 @@ setup(
entry_points={
# scripts should be declared using this entry:
'console_scripts': [
'detect_landmarks.py = bob.ip.facelandmarks.script.detect_landmarks:main',
'compute_qualityfeatures.py = bob.ip.qualitymeasure.script.compute_qualitymeasures:main',
],
},
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment