Commit 25391245 authored by Sushil BHATTACHARJEE's avatar Sushil BHATTACHARJEE

Merge branch 'cleanup' into 'master'

Code clean-up

See merge request !1
parents 208a18f3 dbdd3b4f
Pipeline #8443 passed with stages
in 11 minutes and 44 seconds
......@@ -4,6 +4,7 @@
from .galbally_iqm_features import compute_quality_features
from .msu_iqa_features import compute_msu_iqa_features
def get_config():
"""
Returns a string containing the configuration information.
......@@ -13,7 +14,5 @@ def get_config():
return bob.extension.get_config(__name__)
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
This diff is collapsed.
......@@ -2,7 +2,6 @@
# vim: set fileencoding=utf-8 :
def get_config():
"""
Returns a string containing the configuration information.
......@@ -12,7 +11,5 @@ def get_config():
return bob.extension.get_config(__name__)
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
......@@ -7,7 +7,7 @@ Created on 08 Mar 2017
@author: sbhatta
'''
import os, sys
import sys
import argparse
import bob.io.base
......@@ -18,81 +18,97 @@ from bob.ip.qualitymeasure import galbally_iqm_features as iqm
from bob.ip.qualitymeasure import msu_iqa_features as iqa
def computeVideoIQM(video4d):
def computeVideoIQM(video4d):
'''computes image-quality features for a set of frames comprising a video.
@param video4d: a '4d' video (N frames, each frame representing an r-g-b image).
returns a set of feature-vectors, 1 vector per frame of video4d
@param video4d: a '4d' video (N frames, each frame representing an r-g-b
image). returns a set of feature-vectors, 1 vector per frame of
video4d
'''
numframes = video4d.shape[0]
numframes=3
#print(numframes)
#process first frame separately, to get the no. of iqm features
f=0
numframes = 3
# print(numframes)
# process first frame separately, to get the no. of iqm features
f = 0
rgbFrame = video4d[f]
print('processing frame #: %d' %f)
iqmSet = iqm.compute_quality_features(rgbFrame) #iqmSet = iqm.compute_quality_features(grayFrame)
print('processing frame #: %d' % f)
# iqmSet = iqm.compute_quality_features(grayFrame)
iqmSet = iqm.compute_quality_features(rgbFrame)
numIQM = len(iqmSet)
iqaSet = iqa.compute_msu_iqa_features(rgbFrame)
numIQA = len(iqaSet)
#now initialize fset to store iqm features for all frames of input video.
# now initialize fset to store iqm features for all frames of input video.
bobfset = np.zeros([numframes, numIQM])
bobfset[f] = iqmSet
msufset = np.zeros([numframes, numIQA])
msufset[f] = iqaSet
for f in range(1,numframes):
print('processing frame #: %d' %f)
for f in range(1, numframes):
print('processing frame #: %d' % f)
rgbFrame = video4d[f]
# print(rgbFrame.shape)
bobQFeats = iqm.compute_quality_features(rgbFrame)
msuQFeats = iqa.compute_msu_iqa_features(rgbFrame)
bobQFeats = iqm.compute_quality_features(rgbFrame)
msuQFeats = iqa.compute_msu_iqa_features(rgbFrame)
bobfset[f] = bobQFeats
msufset[f] = msuQFeats
return (bobfset, msufset)
def computeIQM_1video(vidPath):
""" loads a video, and returns 2 arrays of feature-vectors -- one per feature-family.
Each array contains one feature-vector per frame
""" loads a video, and returns 2 arrays of feature-vectors -- one per
feature-family. Each array contains one feature-vector per frame
"""
#1. load video from input path
# 1. load video from input path
inputVideo = bob.io.video.reader(vidPath)
vin = inputVideo.load()
#2. compute and return feature-sets
# 2. compute and return feature-sets
return computeVideoIQM(vin)
def main(command_line_parameters=None):
"""Computes image-quality features for specified video-file, and stores the feature-arrays in specified output hdf5 file"""
argParser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
argParser.add_argument('-i', '--input_videofile', dest='inpVidFile', default = None,
help='filename of video to be processed (including complete path). ')
argParser.add_argument('-o', '--output_featurefile', dest='outFile', default = None,
help='filename where computed features will be stored. Output file will be in hdf5 format.')
"""Computes image-quality features for specified video-file, and stores the
feature-arrays in specified output hdf5 file"""
argParser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
argParser.add_argument(
'-i',
'--input_videofile',
dest='inpVidFile',
default=None,
help='filename of video to be processed (including complete path). ')
argParser.add_argument(
'-o',
'--output_featurefile',
dest='outFile',
default=None,
help='filename where computed features will be stored. Output file '
'will be in hdf5 format.')
args = argParser.parse_args(command_line_parameters)
#make sure the user specifies a folder where feature-files exist
if not args.inpVidFile: argParser.error('Specify parameter --input_videofile')
if not args.outFile: argParser.error('Specify parameter --output_featurefile')
# make sure the user specifies a folder where feature-files exist
if not args.inpVidFile:
argParser.error('Specify parameter --input_videofile')
if not args.outFile:
argParser.error('Specify parameter --output_featurefile')
#1. compute features, 1 vector per frame of input video, per feature-family (galbally,msu).
# 1. compute features, 1 vector per frame of input video, per
# feature-family (galbally,msu).
infile = args.inpVidFile
(bobIqmFeats, msuIqaFeats) = computeIQM_1video(infile)
#2. save features in file
# 2. save features in file
outfile = args.outFile
print("Saving features in output file: %s" %outfile)
print("Saving features in output file: %s" % outfile)
ohf = bob.io.base.HDF5File(outfile, 'w')
ohf.set('bobiqm', bobIqmFeats)
ohf.set('msuiqa', msuIqaFeats)
del ohf
print('Done')
if __name__ == '__main__':
main(sys.argv[1:])
......@@ -15,9 +15,6 @@ import bob.io.base.test_utils
import bob.io.video
import bob.ip.color
#from .utils import detect_landmarks, draw_landmarks, save_landmarks, Result
#from .utils import detect_landmarks_on_boundingbox
#from .script.detect_landmarks import main as app
from . import galbally_iqm_features as iqm
from . import msu_iqa_features as iqa
......@@ -25,81 +22,87 @@ from . import msu_iqa_features as iqa
REF_VIDEO_FILE = 'real_client001_android_SD_scene01.mp4'
REF_FEATURE_FILE = 'real_client001_android_SD_scene01_ref_feats.h5'
F = lambda n: pkg_resources.resource_filename(__name__, os.path.join('data', n))
def F(n):
return pkg_resources.resource_filename(
__name__, os.path.join('data', n))
input_video_file = F(REF_VIDEO_FILE)
assert os.path.isfile(input_video_file), "File: not found: %s" % input_video_file
assert os.path.isfile(
input_video_file), "File: not found: %s" % input_video_file
inputVideo = bob.io.video.reader(input_video_file)
video_data = inputVideo.load()
numframes=3
numframes = 3
def load_reference_features():
ref_feat_file = F(REF_FEATURE_FILE)
assert os.path.isfile(ref_feat_file), "File: not found: %s" % ref_feat_file
rf = bob.io.base.HDF5File(ref_feat_file)
assert rf.has_key('/bobiqm'), "Key: /bobiqm not found in file %s" % ref_feat_file
assert rf.has_key('/msuiqa'), "Key: /msuiqa not found in file %s" % ref_feat_file
galbally_ref_features = rf.read('/bobiqm')
msu_ref_features = rf.read('/msuiqa')
del rf
return (galbally_ref_features, msu_ref_features)
#load reference-features into global vars.
ref_feat_file = F(REF_FEATURE_FILE)
assert os.path.isfile(ref_feat_file), "File: not found: %s" % ref_feat_file
rf = bob.io.base.HDF5File(ref_feat_file)
assert rf.has_key('/bobiqm'), "Key: /bobiqm not found in file %s" % ref_feat_file
assert rf.has_key('/msuiqa'), "Key: /msuiqa not found in file %s" % ref_feat_file
galbally_ref_features = rf.read('/bobiqm')
msu_ref_features = rf.read('/msuiqa')
del rf
return (galbally_ref_features, msu_ref_features)
# load reference-features into global vars.
galbally_ref_features, msu_ref_features = load_reference_features()
def test_galbally_feat_extr():
iqm_len=18 # change this, if you add more features to galbally_iqm_features module.
bobfset = np.zeros([numframes, iqm_len]) # feature-array to hold features for several frames
f=0
# change this, if you add more features to galbally_iqm_features module.
iqm_len = 18
# feature-array to hold features for several frames
bobfset = np.zeros([numframes, iqm_len])
f = 0
#process first frame separately, to get the no. of iqm features
# process first frame separately, to get the no. of iqm features
rgbFrame = video_data[f]
iqmSet = iqm.compute_quality_features(rgbFrame)
numIQM = len(iqmSet)
#test: check that numIQM is the same as expected iqm_len (18)
nose.tools.eq_(numIQM, iqm_len)
#store features for first frame in feature-array
# test: check that numIQM is the same as expected iqm_len (18)
nose.tools.eq_(numIQM, iqm_len)
# store features for first frame in feature-array
bobfset[f] = iqmSet
#now store iqm features for remaining test-frames of input video.
for f in range(1,numframes):
# now store iqm features for remaining test-frames of input video.
for f in range(1, numframes):
rgbFrame = video_data[f]
bobfset[f] = iqm.compute_quality_features(rgbFrame)
#bobQFeats = iqm.compute_quality_features(rgbFrame)
#bobfset[f] = bobQFeats
#test: compare feature-values in bobfset[] with those loaded from hdf5 file
nose.tools.assert_true((bobfset==galbally_ref_features).all())
#np.allclose(A,B)
# test: compare feature-values in bobfset[] with those loaded from hdf5 file
nose.tools.assert_true((bobfset == galbally_ref_features).all())
# np.allclose(A,B)
def test_msu_feat_extr():
iqa_len = 121 # change this, if you change the no. of features in msu_iqa_features module.
msufset = np.zeros([numframes, iqa_len]) # feature-array to hold features for several frames
f=0
# change this, if you change the no. of features in msu_iqa_features module.
iqa_len = 121
# feature-array to hold features for several frames
msufset = np.zeros([numframes, iqa_len])
f = 0
#process first frame separately, to get the no. of iqa features
# process first frame separately, to get the no. of iqa features
rgbFrame = video_data[f]
iqaSet = iqa.compute_msu_iqa_features(rgbFrame)
numIQA = len(iqaSet)
#test: check that numIQA matches the expected iqa_len(121)
nose.tools.eq_(numIQA, iqa_len)
#store features for first frame in feature-array
# test: check that numIQA matches the expected iqa_len(121)
nose.tools.eq_(numIQA, iqa_len)
# store features for first frame in feature-array
msufset[f] = iqaSet
#now store iqm features for remaining test-frames of input video.
for f in range(1,numframes):
# now store iqm features for remaining test-frames of input video.
for f in range(1, numframes):
rgbFrame = video_data[f]
msuQFeats = iqa.compute_msu_iqa_features(rgbFrame)
msuQFeats = iqa.compute_msu_iqa_features(rgbFrame)
msufset[f] = msuQFeats
#test: compare feature-values in bobfset[] with those loaded from hdf5 file
nose.tools.assert_true((msufset==msu_ref_features).all())
#np.allclose(A,B)
# test: compare feature-values in bobfset[] with those loaded from hdf5 file
nose.tools.assert_true((msufset == msu_ref_features).all())
......@@ -2,6 +2,8 @@
.. Sushil Bhattacharjee <sushil.bhattacharjee@idiap.ch>
.. Tue 08 Mar 2017 15:42:29 CET
.. _bob.ip.qualitymeasure:
===============================================================
Bob's Routines for Image-Quality Measures for PAD Applications
===============================================================
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment