Commit dceded86 authored by Sushil BHATTACHARJEE's avatar Sushil BHATTACHARJEE
Browse files

initial commit

parent d8c4a37a
.. vim: set fileencoding=utf-8 :
.. Sat 3 Dec 20:18:15 2016 CET
.. image:: http://img.shields.io/badge/docs-stable-yellow.png
:target: http://pythonhosted.org/bob.ip.qualitymeasure/index.html
.. image:: http://img.shields.io/badge/docs-latest-orange.png
:target: https://www.idiap.ch/software/bob/docs/latest/bob/bob.ip.qualitymeasure/master/index.html
.. image:: https://gitlab.idiap.ch/bob/bob.ip.qualitymeasure/badges/master/build.svg
:target: https://gitlab.idiap.ch/bob/bob.ip.qualitymeasure/commits/master
.. image:: https://img.shields.io/badge/gitlab-project-0000c0.svg
:target: https://gitlab.idiap.ch/bob/bob.ip.qualitymeasure
.. image:: http://img.shields.io/pypi/v/bob.ip.qualitymeasure.png
:target: https://pypi.python.org/pypi/bob.ip.qualitymeasure
.. image:: http://img.shields.io/pypi/dm/bob.ip.qualitymeasure.png
:target: https://pypi.python.org/pypi/bob.ip.qualitymeasure
==================================================
Bob's library of image-quality feature-extractors
==================================================
This package is part of the signal-processing and machine learning toolbox
Bob_. It provides functions for extracting image-quality features proposed
for PAD experiments by different research group.
Installation
------------
Follow our `installation`_ instructions. Then, using the Python interpreter
provided by the distribution, bootstrap and buildout this package::
$ python bootstrap-buildout.py
$ ./bin/buildout
Contact
-------
For questions or reporting issues to this software package, contact our
development `mailing list`_.
.. Place your references here:
.. _bob: https://www.idiap.ch/software/bob
.. _installation: https://gitlab.idiap.ch/bob/bob/wikis/Installation
.. _mailing list: https://groups.google.com/forum/?fromgroups#!forum/bob-devel
# see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
# see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
def get_config():
"""
Returns a string containing the configuration information.
"""
import bob.extension
return bob.extension.get_config(__name__)
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
'''
Created on 13 Oct 2015
@author: sbhatta
'''
import os, sys
import argparse
import bob.io.base
import bob.io.video
import bob.ip.color
import numpy as np
import galbally_iqm_features as iqm
import antispoofing.utils.db as bobdb
#'''
#Matlab-like RGB to gray...
# @param: rgbImage : numpy array for the form: [3,h,w] where h is the height of the image and w is the width of the image.
# Returns a y-image in floating-point format (range [(16/255) .. (235/255)])
#'''
#def matlab_rgb2gray(rgbImage):
# #g1 = 0.299*rgbFrame[0,:,:] + 0.587*rgbFrame[1,:,:] + 0.114*rgbFrame[2,:,:] #standard coeffs CCIR601
#
# #this is how it's done in matlab...
# rgbImage = rgbImage / 255.0
# C0 = 65.481/255.0
# C1 = 128.553/255.0
# C2 = 24.966/255.0
# scaleMin = 16.0/255.0
# #scaleMax = 235.0/255.0
# gray = scaleMin + (C0*rgbImage[0,:,:] + C1*rgbImage[1,:,:] + C2*rgbImage[2,:,:])
#
# return gray
# """
# loads a video, and returns a feature-vector for each frame of video
# """
# def computeIQM_1video(vidPath):
# inputVideo = bob.io.video.reader(vidPath)
# vin = inputVideo.load()
# numframes = vin.shape[0]
# fset = np.zeros([numframes, 21])
# for f in range(numframes):
# rgbFrame = vin[f,:,:,:]
# grayFrame = matlab_rgb2gray(rgbFrame) #compute gray-level image for input color-frame
# bobQFeats = np.asarray(iqm.computeQualityFeatures(grayFrame)) # computeQualityFeatures() returns a tuple
# fset[f,:] = bobQFeats
#
# return fset
#
'''
computes image-quality features for a set of frames comprising a video.
@param video4d: a '4d' video (N frames, each frame representing an r-g-b image).
returns a set of feature-vectors, 1 vector per frame of video4d
'''
def computeVideoIQM(video4d):
numframes = video4d.shape[0]
#process first frame separately, to get the no. of iqm features
f=0
rgbFrame = video4d[f,:,:,:]
grayFrame = matlab_rgb2gray(rgbFrame) #compute gray-level image for input color-frame
iqmSet = iqm.compute_quality_features(grayFrame)
numIQMs = len(iqmSet)
#now initialize fset to store iqm features for all frames of input video.
fset = np.zeros([numframes, numIQMs])
bobQFeats = np.asarray(iqmSet) # computeQualityFeatures() returns a tuple
fset[f,:] = bobQFeats
for f in range(1,numframes):
rgbFrame = video4d[f,:,:,:]
# grayFrame = matlab_rgb2gray(rgbFrame) #compute gray-level image for input color-frame
# bobQFeats = np.asarray(iqm.compute_quality_features(grayFrame)) # computeQualityFeatures() returns a tuple
bobQFeats = np.asarray(iqm.compute_quality_features(rgbFrame)) # computeQualityFeatures() returns a tuple
fset[f,:] = bobQFeats
return fset
'''
loads a video, and returns a feature-vector for each frame of video
'''
def computeIQM_1video(vidPath):
inputVideo = bob.io.video.reader(vidPath)
vin = inputVideo.load()
return computeVideoIQM(vin)
def main(command_line_parameters=None):
#code for parsing command line args.
argParser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
argParser.add_argument('-f', '--print_num_files', action='store_true', dest='printNumFiles',
default=False, help='Option to print no. of files that will be processed. (Default: %(default)s)')
argParser.add_argument('-i', '--input_videofile', dest='inpFile', default = None,
help='filename of video to be processed (including complete path). Video expected in .mov format.')
argParser.add_argument('-o', '--output_featurefile', dest='outFile', default = None,
help='filename where computed features will be stored. Output file will be in hdf5 format.')
args = argParser.parse_args(command_line_parameters)
#make sure the user specifies a folder where feature-files exist
if not args.inpFile: argParser.error('Specify parameter --input_videofile')
if not args.outFile: argParser.error('Specify parameter --output_featurefile')
#1. load video file
infile = args.inpFile #k.make_path(videoRoot, '.mov')
#2. compute features, 1 vector per frame of input video.
bobIqmFeats = computeIQM_1video(infile)
#3. save features in file
outfile = args.outFile #k.make_path(featRoot, '.h5')
ohf = bob.io.base.HDF5File(outfile, 'w')
ohf.set('bobiqm', bobIqmFeats)
del ohf
if __name__ == '__main__':
main(sys.argv[1:])
'''
Created on 13 Oct 2015
@author: sbhatta
'''
import os, sys
import argparse
import bob.io.base
import bob.io.image #under the hood: loads Bob plugin for image file
import bob.io.video
import bob.ip.color
import numpy as np
import msu_iqa_features as iqa
#import MSU_MaskedIQAFeats as iqa
import antispoofing.utils.db as bobdb
'''
computes image-quality features for a set of frames comprising a video.
@param video4d: a '4d' video (N frames, each frame representing an r-g-b image).
returns a set of feature-vectors, 1 vector per frame of video4d
'''
def computeVideoIQA(video4d, validFrames):
numframes = video4d.shape[0]
#process first frame separately, to get the no. of iqm features
numValidFrames = np.sum(validFrames)
k=0
while validFrames[k] == 0: k+=1
print 'first valid frame: ', k
rgbFrame = video4d[k,:,:,:]
iqmSet = iqa.computeMsuIQAFeatures(rgbFrame)
numIQMs = len(iqmSet)
#now initialize fset to store iqm features for all frames of input video.
fset = np.zeros([numValidFrames, numIQMs])
msuQFeats = np.asarray(iqmSet) # computeQualityFeatures() returns a tuple
fset[0,:] = msuQFeats
print 'fset shape:', fset.shape
j=1
for f in range(k+1,numframes):
if validFrames[f]==1:
rgbFrame = video4d[f,:,:,:]
#grayFrame = matlab_rgb2gray(rgbFrame) #compute gray-level image for input color-frame
msuQFeats = np.asarray(iqa.computeMsuIQAFeatures(rgbFrame)) # computeQualityFeatures() returns a tuple
fset[j,:] = msuQFeats
#print j, f
j += 1
return fset
'''
loads a video, and returns a feature-vector for each frame of video
'''
def computeIQA_1video(videoFile, frameQualFile):
inputVideo = bob.io.video.reader(videoFile)
#load input video
vin = inputVideo.load()
numFrames = vin.shape[0]
if frameQualFile is not None:
f = bob.io.base.HDF5File(frameQualFile) #read only
validFrames = (f.read('/frameQuality')).flatten() #reads list of frame-quality indicators
validFrames[np.where(validFrames <> 1)]=0
else:
validFrames = np.ones(numFrames)
#print validFrames
# print type(validFrames)
numValidFrames = np.sum(validFrames)
print 'valid frames:', numValidFrames, 'of', numFrames
#bob.io.base.save(vin[0,:,:,:].astype('uint8'), '/idiap/temp/sbhatta/msudb_colorImg.png')
import time
startTime = time.time()
fset = computeVideoIQA(vin, validFrames)
print("Time for one video --- %s seconds ---" % (time.time() - startTime))
return fset
'''
'''
def parse_arguments(arguments):
#code for parsing command line args.
argParser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
# # verbose
argParser.add_argument('-v', '--verbose', dest='verbose', metavar='INT', type=int, choices=[0, 1, 2], default=1,
help='Prints (hopefully helpful) messages (Default: %(default)s)')
argParser.add_argument('-db', '--dbase_path', dest='dbPath', default = None, #'/idiap/user/sbhatta/work/Antispoofing/ImageQualityMeasures',
help='path where database videos exist.')
argParser.add_argument('-op', '--output_path', dest='outPath', default = None,
help='path where face-files will be stored.')
argParser.add_argument('-nf', '--numFiles', action='store_true', dest='numFiles',
default=False, help='Option to print no. of files that will be processed. (Default: %(default)s)')
argParser.add_argument('-f', '--singleFile', dest='singleFile', default=None,
help='filename (including complete path) of video to be used to test this code: %(default)s)')
argParser.add_argument('-ve', '--video_ext', dest='vidExtn', default=None, choices = ['avi', 'mov', 'mp4'],
help='filename (including complete path) of video to be used to test this code: %(default)s)')
bobdb.Database.create_parser(argParser, implements_any_of='video')
args = argParser.parse_args(arguments)
database = args.cls(args)
if args.singleFile is None:
#make sure the user specifies a folder where feature-files exist
if not args.dbPath: argParser.error('Specify parameter --dbase_path')
else:
folder = os.path.dirname(args.singleFile)
filename = os.path.basename(args.singleFile)
args.dbPath = folder
args.singleFile = filename
if not args.outPath: argParser.error('Specify parameter --output_path')
return (args, database)
'''
'''
def main(arguments):
args, database = parse_arguments(arguments)
inpDir = args.dbPath
outDir = args.outPath
assert os.path.exists(inpDir), "Input database folder %s does not exist" %inpDir
if args.verbose>0: print 'Loading data from',inpDir
if args.singleFile is None:
tr_realFiles, tr_attackFiles = database.get_train_data()
dv_realFiles, dv_attackFiles = database.get_devel_data()
ts_realFiles, ts_attackFiles = database.get_test_data()
allFiles = tr_realFiles + dv_realFiles + ts_realFiles + tr_attackFiles + dv_attackFiles + ts_attackFiles
del tr_realFiles, tr_attackFiles, dv_realFiles, dv_attackFiles, ts_realFiles, ts_attackFiles
numFiles = len(allFiles)
if args.numFiles:
print 'Number of files to be processed:',numFiles
print 'exiting'
return
# print numFiles
# numFiles = 1 #test
# if we are on a grid environment, just find what I have to process.
fileSet = allFiles[0:numFiles]
if os.environ.has_key('SGE_TASK_ID'):
pos = int(os.environ['SGE_TASK_ID']) - 1
if pos >= numFiles:
raise RuntimeError, "Grid request for job %d on a setup with %d jobs" % (pos, numFiles)
fileSet = [allFiles[pos]] # objects = [objects[pos]]
print 'processing', len(fileSet), ' files'
k1=0
for k in fileSet:
#1. load video file
print 'filenum:', k1
# infile = k.make_path(videoRoot, '.avi')
# outfile = k.make_path(featRoot, '.h5')
print k
if args.vidExtn is None:
inVidFile = k.videofile(inpDir) #k.make_path(inpDir, '.avi')
else:
inVidFile = k.make_path(inpDir, ('.' + args.vidExtn))
inFrameQFile = None #k.make_path(inpDir, '_frameQ.h5')
outFeatFile = k.make_path(outDir, '.h5')
head, tail = os.path.split(outFeatFile)
if not os.path.exists(head): os.makedirs(head) #create output folder, if it doesn't exist
print inFrameQFile
print outFeatFile
# if True: #not os.path.isfile(outFeatFile):
msuIQAFeats = computeIQA_1video(inVidFile, inFrameQFile)
#4. save features in file
ohf = bob.io.base.HDF5File(outFeatFile, 'w')
ohf.set('msuiqa', msuIQAFeats)
del ohf
# assert 0>1, 'stop'
k1 += 1
else:
# test feature-computation with a single file specified as input
filePart = os.path.splitext(args.singleFile)[0]
inVidFile = os.path.join(args.dbPath, filePart)+ '.avi'
inFrameQFile = os.path.join(args.dbPath, filePart)+ '_frameQ.h5'
outFeatFile = os.path.join(outDir, filePart)+ '.h5'
head, tail = os.path.split(outFeatFile)
if not os.path.exists(head): os.makedirs(head) #create output folder, if it doesn't exist
print 'single file:', inVidFile
print inFrameQFile
print outFeatFile
msuIQAFeats = computeIQA_1video(inVidFile, inFrameQFile)
#4. save features in file
ohf = bob.io.base.HDF5File(outFeatFile, 'w')
ohf.set('msuiqa', msuIQAFeats)
del ohf
# special fn to extract first frame from video-file and store it as hdf5
def extractTestFrame():
videoFile = '/idiap/home/sbhatta/work/git/refactoring/bob.db.msu_mfsd_mod/bob/db/msu_mfsd_mod/test_images/real/real_client022_android_SD_scene01.mp4'
inputVideo = bob.io.video.reader(videoFile)
#load input video
vin = inputVideo.load()
numFrames = vin.shape[0]
outframe = vin[0]
outfile = '/idiap/home/sbhatta/work/git/refactoring/bob.db.msu_mfsd_mod/bob/db/msu_mfsd_mod/test_images/real_client022_android_SD_scene01_frame0_correct.hdf5'
ohf = bob.io.base.HDF5File(outfile, 'w')
ohf.set('color_frame', outframe)
del ohf
if __name__ == '__main__':
# extractTestFrame()
main(sys.argv[1:])
This diff is collapsed.
'''
Created on 9 Feb 2016
@author: sbhatta
'''
#import re
#import os
import math
import numpy as np
import scipy as sp
import scipy.signal as ssg
import scipy.ndimage.filters as snf
import IQMFeatures as iqm
import bob.ip.base
import bob.ip.color
########## Utility functions ###########
'''
Matlab-like RGB to gray...
@param: rgbImage : numpy array for the form: [3,h,w] where h is the height of the image and w is the width of the image.
Returns a y-image in floating-point format (range [(16/255) .. (235/255)])
'''
def matlab_rgb2gray(rgbImage):
#g1 = 0.299*rgbFrame[0,:,:] + 0.587*rgbFrame[1,:,:] + 0.114*rgbFrame[2,:,:] #standard coeffs CCIR601
#this is how it's done in matlab...
rgbImage = rgbImage / 255.0
C0 = 65.481/255.0
C1 = 128.553/255.0
C2 = 24.966/255.0
scaleMin = 16.0/255.0
#scaleMax = 235.0/255.0
gray = scaleMin + (C0*rgbImage[0,:,:] + C1*rgbImage[1,:,:] + C2*rgbImage[2,:,:])
return gray
'''
'''
def matlab_rgb2hsv(rgbImage):
# first normalize the range of values to 0-1
isUint8 = True
if isUint8: rgbImage = rgbImage.astype(np.float64)/255.0
hsv = np.zeros_like(rgbImage)
bob.ip.color.rgb_to_hsv(rgbImage, hsv)
h = hsv[0,:,:]
s = hsv[1,:,:]
v = hsv[2,:,:]
#
return (h, s, v)
def imshow(image):
import matplotlib
from matplotlib import pyplot as plt
if len(image.shape)==3:
#imshow() expects color image in a slightly different format, so first rearrange the 3d data for imshow...
outImg = image.tolist()
print len(outImg)
result = np.dstack((outImg[0], outImg[1]))
outImg = np.dstack((result, outImg[2]))
plt.imshow((outImg*255.0).astype(np.uint8)) #[:,:,1], cmap=mpl.cm.gray)
else:
if(len(image.shape)==2):
#display gray image.
plt.imshow(image.astype(np.uint8), cmap=matplotlib.cm.gray)
plt.show()
########### End of Utilities ##############
########### Auxilliary functions ##############
"""
"""
def sobelEdgeMap(image, orientation='both'):
#bob..sobel returns filter-responses which need to be thresholded to get the edge-map
thinning=1
refImage=image.astype(np.float)
#compute edge map for reference image
refSobel_sep = bob.ip.base.sobel(refImage) #returns 3D image. 1st dim is the edge-direction. 1st component is vertical; 2nd component is hor. responses
refSobelX = refSobel_sep[0,:,:]
refSobelY = refSobel_sep[1,:,:]
if orientation is 'horizontal':
refEdge = iqm.edgeThinning(refSobelX[:,:], refSobelX[:,:], thinning)
else:
if orientation is 'vertical':
refEdge = iqm.edgeThinning(refSobelY[:,:], refSobelY[:,:], thinning)
else:
refEdge = iqm.edgeThinning(refSobelX[:,:], refSobelY[:,:], thinning)
return refEdge
########### End of Aux. functions ##############
'''
'''
#def computeMsuIQAFeatures(rgbImage, printFV=False):
def computeMsuIQAFeatures(rgbImage):
assert len(rgbImage.shape)==3, 'computeMsuIQAFeatures():: image should be a 3D array (containing a rgb image)'
# hsv = np.zeros_like(rgbImage)
# bob.ip.color.rgb_to_hsv(rgbImage, hsv)
# h = hsv[0,:,:]
# s = hsv[1,:,:]
# v = hsv[2,:,:]
h,s,v = matlab_rgb2hsv(rgbImage) #defined above. Calls Bob's rgb_to_hsv() after rescaling the input image.
#print "computeMsuIQAFeatures():: check bob.ip.color.rgb_to_hsv conversion"
grayImage = np.zeros_like(h, dtype='uint8')
bob.ip.color.rgb_to_gray(rgbImage, grayImage)
blurFeat = blurriness(grayImage)
# print 'blurriness:', blurFeat
pinaBlur = marzilianoBlur(grayImage)
pinaBlur /= 30.0
# print 'pinaBlur:',pinaBlur
colorHist, totNumColors = calColorHist(rgbImage)
totNumColors /= 2000.0 #as done in Matlab code provided by MSU.
# print "color hist shape:", colorHist.shape
# print colorHist[0:11]
# print 'totNumColors', totNumColors
# calculate mean, deviation and skewness of each channel
# use histogram shifting for the hue channel
#print h.shape
momentFeatsH = calmoment_shift(h)
#print 'H-moments:', momentFeatsH
momentFeats = momentFeatsH.copy()
momentFeatsS = calmoment(s)
#print 'S-moments:', momentFeatsS
momentFeats = np.hstack((momentFeats, momentFeatsS))
momentFeatsV = calmoment(v)
#print 'V-moments:', momentFeatsV
momentFeats = np.hstack((momentFeats, momentFeatsV))
fv = momentFeats.copy()
#print 'moment features:', fv
fv = np.hstack((fv, colorHist))
fv = np.hstack((fv, totNumColors))
fv = np.hstack((fv, blurFeat))
fv = np.hstack((fv, pinaBlur))