Commit a12ea594 authored by Sushil BHATTACHARJEE's avatar Sushil BHATTACHARJEE

added specularity-features code; updated user-guide; removed prints

parent b9a272d1
......@@ -44,20 +44,20 @@ def compute_quality_features(image):
if len(image.shape) == 3:
if(image.shape[0]==3):
gray_image = matlab_rgb2gray(image) #compute gray-level image for input color-frame
print(gray_image.shape)
# print(gray_image.shape)
else:
print('error. Wrong kind of input image')
else:
if len(image.shape) == 2:
gray_image = image
print(gray_image.shape)
# print(gray_image.shape)
else:
print('error -- wrong kind of input')
if gray_image is not None:
gwin = gauss_2d((3,3), 0.5) # set up the smoothing-filter
print("computing degraded version of image")
# print("computing degraded version of image")
smoothed = ssg.convolve2d(gray_image, gwin, boundary='symm', mode='same')
"""
......@@ -67,7 +67,7 @@ def compute_quality_features(image):
approach is that smoothing degrades a spoof-image more than it does a genuine image
(see Galbally's paper referenced above).
"""
print("computing galbally quality features")
# print("computing galbally quality features")
featSet = image_quality_measures(gray_image, smoothed)
return featSet
......
......@@ -4,19 +4,17 @@ Created on 9 Feb 2016
@author: sbhatta
'''
#import re
#import os
import math
import numpy as np
import scipy as sp
import scipy.signal as ssg
import scipy.ndimage.filters as snf
import galbally_iqm_features as iqm
import bob.ip.base
import bob.ip.color
import galbally_iqm_features as iqm
import tan_specular_highlights as tsh
########## Utility functions ###########
'''
......@@ -103,12 +101,12 @@ def sobelEdgeMap(image, orientation='both'):
########### End of Aux. functions ##############
'''
'''
#def computeMsuIQAFeatures(rgbImage, printFV=False):
def compute_msu_iqa_features(rgbImage):
print("computing msu iqa features")
assert len(rgbImage.shape)==3, 'computeMsuIQAFeatures():: image should be a 3D array (containing a rgb image)'
# print("computing msu iqa features")
assert len(rgbImage.shape)==3, 'compute_msu_iqa_features():: image should be a 3D array (containing a rgb image)'
# hsv = np.zeros_like(rgbImage)
# bob.ip.color.rgb_to_hsv(rgbImage, hsv)
# h = hsv[0,:,:]
......@@ -135,48 +133,76 @@ def compute_msu_iqa_features(rgbImage):
# calculate mean, deviation and skewness of each channel
# use histogram shifting for the hue channel
#print h.shape
# print h.shape
momentFeatsH = calmoment_shift(h)
#print 'H-moments:', momentFeatsH
# print 'H-moments:', momentFeatsH
momentFeats = momentFeatsH.copy()
momentFeatsS = calmoment(s)
#print 'S-moments:', momentFeatsS
# print 'S-moments:', momentFeatsS
momentFeats = np.hstack((momentFeats, momentFeatsS))
momentFeatsV = calmoment(v)
#print 'V-moments:', momentFeatsV
# print 'V-moments:', momentFeatsV
momentFeats = np.hstack((momentFeats, momentFeatsV))
speckleFeats = compute_iqa_specularity_features(rgbImage, startEps=0.06)
fv = momentFeats.copy()
#print('moment features: ')
#print(fv)
#stack the various feature-values in the same order as in MSU's matlab code.
fv = speckleFeats.copy()
fv = np.hstack((fv, momentFeats))
fv = np.hstack((fv, colorHist))
fv = np.hstack((fv, totNumColors))
fv = np.hstack((fv, blurFeat))
fv = np.hstack((fv, pinaBlur))
# print('computed msu features')
return fv
"""
Implements the method proposed by Marziliano et al. for determining the average width of vertical edges, as a measure of blurredness in an image.
This function is a Python version of the Matlab code provided by MSU.
def compute_iqa_specularity_features(rgbImage, startEps=0.05):
"""Returns three features characterizing the specularity present in input color image.
First the specular and diffuse components of the input image are separated using the
"""
#separate the specular and diffuse components of input color image.
speckleFreeImg, diffuseImg, speckleImg = tsh.remove_highlights(rgbImage, startEps, verboseFlag=False)
#speckleImg contains the specular-component
if len(speckleImg.shape)==3:
speckleImg = speckleImg[0]
speckleImg = speckleImg.clip(min=0)
speckleMean = np.mean(speckleImg)
lowSpeckleThresh = speckleMean*1.5 #factors 1.5 and 4.0 are proposed by Wen et al. in their paper and matlab code.
hiSpeckleThresh = speckleMean*4.0
# print speckleMean, lowSpeckleThresh, hiSpeckleThresh
specklePixels = speckleImg[np.where(np.logical_and(speckleImg >= lowSpeckleThresh, speckleImg<hiSpeckleThresh))]
r = float(specklePixels.flatten().shape[0])/(speckleImg.shape[0]*speckleImg.shape[1]) #percentage of specular pixels in image
m = np.mean(specklePixels) #mean-specularity (of specular-pixels)
s = np.std(specklePixels) #std. of specularity (of specular-pixels)
return np.asarray((r,m/150.0,s/150.0), dtype=np.float32) #scaling by factor of 150 is as done by Wen et al. in their matlab code.
def marzilianoBlur(image):
"""Method proposed by Marziliano et al. for determining the average width of vertical edges, as a measure of blurredness in an image.
(Reimplemented from the Matlab code provided by MSU.)
:param image: 2D gray-level (face) image
:param regionMask: (optional) 2D matrix (binary image), where 1s mark the pixels belonging to a region of interest, and 0s indicate pixels outside ROI.
"""
:param image: 2D gray-level (face) image
:param regionMask: (optional) 2D matrix (binary image), where 1s mark the pixels belonging to a region of interest, and 0s indicate pixels outside ROI.
"""
def marzilianoBlur(image):
assert len(image.shape)==2, 'marzilianoBlur():: input image should be a 2D array (gray level image)'
edgeMap = sobelEdgeMap(image, 'vertical') # compute vertical edge-map of image using sobel
#There will be some difference between the result of this function and the Matlab version, because the
#edgeMap produced by sobelEdgeMap() is not exactly the same as that produced by Matlab's edge() function.
# Test edge-map generated in Matlab produces the same result as the matlab version of MarzilianoBlur().
# edgeMap = bob.io.base.load('/idiap/temp/sbhatta/msudb_faceEdgeMap.png')
# imshow(edgeMap)
#Test edge-map generated in Matlab produces the same result as the matlab version of MarzilianoBlur().
blurImg = image
C = blurImg.shape[1] #number of cols in image
......@@ -249,19 +275,14 @@ def marzilianoBlur(image):
return blurMetric
"""
returns the first 3 statistical moments (mean, standard-dev., skewness) and 2 other first-order statistical measures of input image
:param channel: 2D array containing gray-image-like data
"""
def calmoment( channel, regionMask=None ):
""" returns the first 3 statistical moments (mean, standard-dev., skewness) and 2 other first-order statistical measures of input image
:param channel: 2D array containing gray-image-like data
"""
assert len(channel.shape) == 2, 'calmoment():: channel should be a 2D array (a single color-channel)'
t = np.arange(0.05, 1.05, 0.05) + 0.025 # t = 0.05:0.05:1;
# t = np.arange(0.05, 1.05, 0.05) + 0.025 # t = 0.05:0.05:1;
# np.insert(t, 0, -np.inf)
# t[-1]= np.inf
# print type(t)
# print t
nPix = np.prod(channel.shape) # pixnum = length(channel(:));
m = np.mean(channel) # m = mean(channel(:));
......
......@@ -29,17 +29,12 @@ def computeVideoIQM(video4d):
#process first frame separately, to get the no. of iqm features
f=0
#rgbFrame = video4d[f,:,:,:]
rgbFrame = video4d[f]
print(rgbFrame.shape)
print('processing frame #: %d' %f)
iqmSet = iqm.compute_quality_features(rgbFrame) #iqmSet = iqm.compute_quality_features(grayFrame)
numIQM = len(iqmSet)
print(numIQM)
iqaSet = iqa.compute_msu_iqa_features(rgbFrame)
numIQA = len(iqaSet)
print(numIQA)
print(iqaSet.shape)
print(iqmSet.shape)
#now initialize fset to store iqm features for all frames of input video.
bobfset = np.zeros([numframes, numIQM])
......@@ -48,9 +43,9 @@ def computeVideoIQM(video4d):
msufset[f] = iqaSet
for f in range(1,numframes):
print('frame #: %d' %f)
print('processing frame #: %d' %f)
rgbFrame = video4d[f]
print(rgbFrame.shape)
# print(rgbFrame.shape)
bobQFeats = iqm.compute_quality_features(rgbFrame)
msuQFeats = iqa.compute_msu_iqa_features(rgbFrame)
bobfset[f] = bobQFeats
......@@ -91,10 +86,12 @@ def main(command_line_parameters=None):
(bobIqmFeats, msuIqaFeats) = computeIQM_1video(infile)
#2. save features in file
outfile = args.outFile
print("Saving features in output file: %s" %outfile)
ohf = bob.io.base.HDF5File(outfile, 'w')
ohf.set('bobiqm', bobIqmFeats)
ohf.set('msuiqa', msuIqaFeats)
del ohf
print('Done')
if __name__ == '__main__':
......
'''
Created on May 9, 2016
@author: sbhatta
#------------------------------#
reference:
"separating reflection components of textured surfaces using a single image"
by Robby T. Tan, Katsushi Ikeuchi,
IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI),
27(2), pp.179-193, February, 2005
This Python implementation is inspired by the C++ code provided by Prof. Robby Tan:
http://tanrobby.github.io/code.html#
http://tanrobby.github.io/code/highlight.zip
The main difference between this implementation and the original C++ implementation is that
here the init_labels() function also ignores pixels marked G_DIFFUSE. This leads to a smaller
number of iterations per epsilon value, while producing the same result as the C++ code.
#------------------------------#
'''
import os, sys
import argparse
import numpy as np
np.seterr(divide='ignore', invalid='ignore')
import math
from scipy import ndimage
from scipy import misc
import bob.io.image
import bob.io.base
#special-pixel-markers. The values don't matter, but they should all remain different from each other.
G_SPECULARX = 10
G_SPECULARY = 11
G_DIFFUSE = 12
G_BOUNDARY = 13
G_NOISE = 14
G_CAMERA_DARK = 15
'''
main function that processes the input srcImage and returns the separate components.
'''
def remove_highlights(srcImage, startEps=0.5, verboseFlag=True):
"""Returns the separate reflection components (highlights and diffuse components) comprising the input color RGB image.
Input:
srcImage: numpy array of shape (3, maxY, maxX), containing a RGB image. (maxY, maxX) give the image-size (num. rows, num. cols).
startEps: floating point (small value), determines the number of iterations of algorithm. epsilon is initialized to this value.
At each iteration epsilon is reduced by 0.01, and if it is not less than 0.0, another iteration is performed.
Thus, a value of 0.5 leads to 50 iterations. Specify a smaller value if less iterations are desired.
verboseFlag: flag to indicate whether to print some intermediate values or not. Specify 'False' if you want no message printed.
Outputs:
sfi: numpy array of shape (3, maxY, maxX), containing speckle-free image
diffuse: numpy array of shape (3, maxY, maxX), containing the diffuse component
speckleResidue: numpy array of shape (3, maxY, maxX), containing specular component.
"""
assert (len(srcImage.shape) == 3 and srcImage.shape[0]==3), "remove_highlights():: input srcImage should be a numpy array of shape (3, maxY, maxX) representing a RGB image"
srcImage = srcImage.astype(float)
#initialize resulting diffuse-image
diffuse = np.copy(srcImage) #this copy will be updated in zIteration()
srcPixelStatus, sfi, srcMaxChroma, srcMax, srcTotal = specular_free_image(diffuse)
# sfi is the specular-free image.
epsilon=startEps
step=0.01
while(epsilon>=0.0):
if verboseFlag: print '*'
diffuse, srcPixelStatus = iteration(diffuse, srcPixelStatus, sfi, epsilon, verboseFlag)
epsilon -= step
if verboseFlag: print 'ep:', epsilon
speckleResidue = srcImage - diffuse
return sfi, diffuse, speckleResidue #speckleResidue is 3D but for every pixel all channels have the same value.
'''
returns the specular-free image corresponding to the input color image 'src'.
Called in zRemoveHighlights()
'''
def specular_free_image(src, srcPixelStatus=None):
"""Generates specular-free version of input RGB color image src.
Inputs:
src: numpy array of shape (3, maxY, maxX) containing a RGB image.
srcPixelStatus: numpy array of shape (maxX, maxY) containing a marker per pixel, indicating the type of pixel.
This array is updated inside this function, so if the input param. is None, it is first allocated and initialized to 0-values.
Outputs:
srcPixelStatus: numpy array of shape (maxX, maxY) containing a marker per pixel, indicating the type of pixel.
sfi: numpy array of shape (3, maxY, maxX) containing the specular-free image corresponding to src.
srcMaxChroma: numpy array of shape (maxX, maxY) containing the max. chroma for each pixel.
srcMax: numpy array of shape (maxX, maxY) containing the max. among the 3 color-intensities, for each pixel in src.
srcTotal: numpy array of shape (maxX, maxY) containing pixel-wise sum of color-intensities in src.
"""
# allocate output image
cLambda = 0.6 # arbitrary constant. See paper referenced above.
camDark = 10.0 # threshold to determine if a pixel is too dark
lambdaConst = 3.0*cLambda - 1.0
# assert(len(src.shape) == 3 and src.shape[0] == 3), "zSpecularFreeImage():: input src should be a 3D numpy array representing a RGB image (first dim should be 3)"
if srcPixelStatus is None:
srcPixelStatus = np.zeros((src.shape[1], src.shape[2]))
else:
assert(src.shape[1:-1] == srcPixelStatus.shape), "specular_free_image():: srcPixelStatus should be None, or should match the pixel-layout of src."
for y in range(src.shape[1]):
for x in range(src.shape[2]):
if np.all(src[:,y,x]<camDark):
srcPixelStatus[y,x] = G_CAMERA_DARK
srcMaxChroma, srcMax, srcTotal = max_chroma(src)
numer = srcMax*(3.0*srcMaxChroma - 1.0)
denom = srcMaxChroma*lambdaConst
dI = np.divide(numer, denom)
sI = (srcTotal - dI)/3.0
drgb = src.astype(float) - sI #src: 3d array (rgb image); sI: 2D array matching pixel-layout of src
drgb[np.where(np.isinf(drgb))]=0
drgb[np.where(np.isnan(drgb))]=0
drgb[np.where(drgb<0)]=0
drgb[np.where(drgb>255)]=255
sfi=drgb
return srcPixelStatus, sfi, srcMaxChroma, srcMax, srcTotal
'''
implements one step of the iterative process to convert specular pixels in src to diffuse.
Called in zRemoveHighlights()
'''
def iteration(src, srcPixelStatus, sfi, epsilon, verboseFlag=True):
"""Iteratively converts each specular-pixel to diffuse.
Inputs:
src: numpy array of shape (3, maxY, maxX) containing a RGB image.
srcPixelStatus: numpy array of shape (maxX, maxY) containing a marker per pixel, indicating the type of pixel.
sfi: numpy array of shape (3, maxY, maxX) containing the speckle-free image corresponding to src
epsilon: floating-point (small) value.
verboseFlag: indicator to print something in every loop.
Outputs:
src: numpy array of shape (3, maxY, maxX) containing updated input image
srcPixelStatus: numpy array of shape (maxX, maxY) containing updated pixel-markers
"""
thR=0.1
thG=0.1
pcount=0
# assert len(src.shape)==3, "zIteration():: input src should be a 3D numpy array representing a RGB image"
count, srcPixelStatus = init_labels(src, sfi, epsilon, srcPixelStatus)
while(1):
srcMaxChroma, srcMax, srcTotal = max_chroma(src)
srcChroma, srcTotal = rgb_chroma(src, srcTotal)
maxY = src.shape[1]
maxX = src.shape[2]
red_chroma_diff_x = np.diff(srcChroma[0,:,:], axis=1)
red_chroma_diff_y = np.diff(srcChroma[0,:,:], axis=0)
grn_chroma_diff_x = np.diff(srcChroma[1,:,:], axis=1)
grn_chroma_diff_y = np.diff(srcChroma[1,:,:], axis=0)
if verboseFlag: print '.'
for y in range(maxY-1):
for x in range(maxX-1):
if(srcPixelStatus[y,x] <> G_CAMERA_DARK):
drx = red_chroma_diff_x[y,x]
dgx = grn_chroma_diff_x[y,x]
dry = red_chroma_diff_y[y,x]
dgy = grn_chroma_diff_y[y,x]
if(srcPixelStatus[y,x] == G_SPECULARX):
if((abs(drx) > thR) and (abs(dgx) > thG)): # if it is a boundary in the x direction
#pixel right
srcPixelStatus[y,x] = G_BOUNDARY
continue
elif (abs( srcMaxChroma[y,x]- srcMaxChroma[y,x+1] ) <0.01): # if it is a noise
srcPixelStatus[y,x] = G_NOISE
continue
else: # reduce the specularity at x direction
if (srcMaxChroma[y,x] < srcMaxChroma[y,x+1]):
iro=src[:,y,x]
pStat = srcPixelStatus[y,x]
refMaxChroma = srcMaxChroma[y,x+1]
iroTotal = srcTotal[y,x]
iroMax = srcMax[y,x]
iroMaxChroma = srcMaxChroma[y,x]
pStat, iroRes = specular_to_diffuse(iro, iroMax, iroMaxChroma, iroTotal, refMaxChroma)
#zSpecular2Diffuse(src(y,x),src(y,x+1).zMaxChroma())
if pStat == G_NOISE: #update pixelStatus only if zSpecular2Diffuse() returned pStat as G_NOISE.
srcPixelStatus[y,x]=pStat
else:
src[:,y,x]=iroRes
else:
iro=src[:,y,x+1]
pStat = srcPixelStatus[y,x+1]
refMaxChroma = srcMaxChroma[y,x]
iroTotal = srcTotal[y,x+1]
iroMax = srcMax[y,x+1]
iroMaxChroma = srcMaxChroma[y,x+1]
pStat, iroRes = specular_to_diffuse(iro, iroMax, iroMaxChroma, iroTotal, refMaxChroma)
#zSpecular2Diffuse(src(y,x+1),src(y,x).zMaxChroma())
if pStat == G_NOISE: #update pixelStatus only if zSpecular2Diffuse() returned pStat as G_NOISE.
srcPixelStatus[y,x+1]=pStat
else:
src[:,y,x+1]=iroRes
srcPixelStatus[y,x] = G_DIFFUSE
srcPixelStatus[y,x+1] = G_DIFFUSE
if(srcPixelStatus[y,x] == G_SPECULARY):
if((abs(dry) > thR) and (abs(dgy) > thG)): # if it is a boundary in the y direction
# pixel right
srcPixelStatus[y,x] = G_BOUNDARY
continue
elif (abs(srcMaxChroma[y,x]-srcMaxChroma[y+1,x])<0.01): # if it is a noise
srcPixelStatus[y,x] = G_NOISE
continue
else: # reduce the specularity in y direction
if(srcMaxChroma[y,x] < srcMaxChroma[y+1,x]):
iro=src[:,y,x]
pStat = srcPixelStatus[y,x]
refMaxChroma = srcMaxChroma[y+1,x]
iroTotal = srcTotal[y,x]
iroMax = srcMax[y,x]
iroMaxChroma = srcMaxChroma[y,x]
pStat, iroRes = specular_to_diffuse(iro, iroMax, iroMaxChroma, iroTotal, refMaxChroma)
#C call: zSpecular2Diffuse(src(y,x),src(y+1,x).zMaxChroma())
if pStat == G_NOISE: #update pixelStatus only if zSpecular2Diffuse() returned pStat as G_NOISE.
srcPixelStatus[y,x]=pStat
else:
src[:,y,x]=iroRes
else:
iro=src[:,y+1,x]
pStat = srcPixelStatus[y+1,x]
pMaxChroma = srcMaxChroma[y+1,x]
iroTotal = srcTotal[y+1,x]
iroMax = srcMax[y+1,x]
iroMaxChroma = srcMaxChroma[y+1,x]
pStat, iroRes = specular_to_diffuse(iro, iroMax, iroMaxChroma, iroTotal, pMaxChroma)
#zSpecular2Diffuse(src(y+1,x),src(y,x).max_chroma())
if pStat == G_NOISE: #update pixelStatus only if zSpecular2Diffuse() returned pStat as G_NOISE.
srcPixelStatus[y+1,x]=pStat
else:
src[:,y+1,x]=iroRes
srcPixelStatus[y,x] = G_DIFFUSE
srcPixelStatus[y+1,x] = G_DIFFUSE
pcount=count
count, srcPixelStatus = init_labels(src, sfi, epsilon, srcPixelStatus)
if(count==0 or pcount<=count): #Robby Tan's original C++ code checks if count<0, but that doesn't make sense as count cannot be negative.
break # break out of the while-loop
srcPixelStatus = reset_labels(srcPixelStatus)
return src, srcPixelStatus
'''
initializes the labels at the beginning and end of each iteration to compute diffuse pixels.
Called in zIteration()
'''
def init_labels(src, sfi, epsilon, srcPixelStatus):
"""Generates initial labels for all pixels in src (input RGB image).
Inputs:
src: numpy array of shape (3, maxY, maxX) containing a RGB image.
sfi: numpy array of shape (3, maxY, maxX) containing a speckle-free image corresponding to src.
epsilon: positive floating point (small) value
srcPixelStatus: numpy array of shape (maxX, maxY) containing pixel-markers corresponding to src.
Returns:
count: number of pixels marked as specular in src.
srcPixelStatus: numpy array of shape (maxX, maxY) containing updated pixel-markers corresponding to src.
"""
# to have initial labels
count=0
zTotalSrc = np.sum(src, axis=0)
diff_x_src = np.diff(zTotalSrc, axis=1)
diff_y_src = np.diff(zTotalSrc, axis=0)
zTotalSfi = np.sum(sfi, axis=0)
diff_x_sfi = np.diff(zTotalSfi, axis=1)
diff_y_sfi = np.diff(zTotalSfi, axis=0)
dlog_src_x = np.log(abs(diff_x_src))
dlog_src_y = np.log(abs(diff_y_src))
dlog_sfi_x = np.log(abs(diff_x_sfi))
dlog_sfi_y = np.log(abs(diff_y_sfi))
dlogx = abs(dlog_src_x - dlog_sfi_x)
dlogy = abs(dlog_src_y - dlog_sfi_y)
maxY = src.shape[1]
maxX = src.shape[2]
for y in range(1, maxY-1):
for x in range(1, maxX-1):
pStat = srcPixelStatus[y,x]
# if pStat <> G_BOUNDARY and pStat <> G_NOISE and pStat <> G_CAMERA_DARK and pStat <> G_DIFFUSE:
if pStat not in (G_BOUNDARY, G_NOISE, G_CAMERA_DARK, G_DIFFUSE):
#Robby Tan's original C++ code doesn't check for pStat<>G_DIFFUSE, but this speeds up the processing a lot, and doesn't seem to affect the results.
if dlogx[y,x] > epsilon:
pStat = G_SPECULARX
count+=1
elif dlogy[y,x] > epsilon:
pStat = G_SPECULARY
count+=1
else:
pStat = G_DIFFUSE
srcPixelStatus[y,x]=pStat
return count, srcPixelStatus # count is the number of specular pixels
'''
Converts a single specular pixel to diffuse.
Called in zIteration()
'''
def specular_to_diffuse(iro, iroMax, iroMaxChroma, iroTotal, refMaxChroma):
"""Converts a color-pixel from speckled to diffuse, by subtracting a certain amount from its intensity value in each color channel.
Inputs:
iro: 3-element column vector containing the rgb-color values of the pixel to be processed.
iroMax: max value among the elements of iro
iroMaxChroma: max-chroma for this pixel
iroTotal: sum of the 3 elements of iro.
refMaxChroma: chroma-value of a neighboring pixel for comparison
Returns:
pixelStatus: a value (marker) indicating whether the pixel is considered as noise or diffuse, after processing.
iro: updated pixel-color-values.
"""
c=iroMaxChroma
pixelStatus = 0 #arbitrary initialization
numr = (iroMax*(3.0*c - 1.0))
denm = (c*(3.0*refMaxChroma - 1.0))
if abs(denm)> 0.000000001: #to avoid div. by zero.
dI = numr / denm
sI = (iroTotal - dI)/3.0
nrgb = iro - sI
if np.any(nrgb<0):
pixelStatus = G_NOISE
else:
iro = nrgb
else:
pixelStatus = G_NOISE
return pixelStatus, iro
'''
reset all pixelStatus labels, except for DARK pixels
src is numpy-array of shape (3,maxY, maxX)
pixelLabels is a numpy-array of shape (maxY, maxX)
Called in zIteration().
'''
def reset_labels(pixelLabels):
"""Resets all pixel-markers (except those indicating "dark" pixels, to 0.
Input:
pixelLabels: numpy array of shape (maxX, maxY) containing pixel-markers.
Returns:
pixelLabels: numpy array of shape (maxX, maxY) containing updated pixel-markers..
"""
# to reset the label of the pixels
pixelLabels[np.where(pixelLabels <> G_CAMERA_DARK)] = 0
return pixelLabels
#####
#functions from globals
#####
'''
returns the max of r,g,b values for a given pixel
'''
def max_color(rgbImg):
"""For input RGB image, this function computes max intensity among all color-channels, for each pixel position.
Input:
rgbImg: numpy array of shape (3, maxY, maxX) containing a RGB image.
Returns:
rgbMax: (2D numpy array of shape (maxY, maxY)) contains pixel-wise max. among the three color values.
"""
return np.amax(rgbImg, axis=0)
# '''
# returns min. of r,g,b values for a pixel
# '''
# def zMin(rgbImg):
# return np.amin(rgbImg, axis=0)
'''
returns total of r,g,b values for pixel
'''
def total_color(rgbImg):
"""For input RGB image, this function computes sum of intensities in each color-channel for each pixel position.
Input:
rgbImg: numpy array of shape (3, maxY, maxX) containing a RGB image.
Returns:
rgbTotal: (2D numpy array of shape (maxY, maxY)) contains pixel-wise sum of the 3 color-values.
"""
return np.sum(rgbImg.astype(float), axis=0)
'''
returns max_chroma of input image rgbImg
'''
def max_chroma(rgbImg, rgbMax=None, rgbTotal=None):
"""Given an input RGB image (rgbImg, with shape (3, maxY, maxX)), this function computes the maximum chroma-value for each pixel position.
Inputs:
rgbImg: numpy array of shape (3, maxY, maxX) containing a RGB image.
rgbMax: numpy array of shape (maxY, maxX) containing pixel-wise max. color intensity. If None, it is computed.
rgbTotal: numpy array of shape (maxY, maxX) containing pixel-wise sum of color-intensities. If None, it is computed.
Returns:
rgbChroma: (3D numpy array of same shape as rgbImg) contains the chroma-values of the individual channels in rgbChroma, and
rgbMax: (2D numpy array of shape (maxY, maxY)) contains pixel-wise max. among the three color values.
rgbTotal: (2D numpy array of shape (maxY, maxY)) contains pixel-wise sum of the 3 color-values.
"""
if rgbMax is None:
rgbMax = max_color(rgbImg)
if rgbTotal is None:
rgbTotal = total_color(rgbImg)
#srcChroma = np.zeros((src.shape[1], src.shape[2]))
maxChroma = np.divide(rgbMax.astype(float), rgbTotal.astype(float))
maxChroma[np.where(rgbTotal==0)]=0
return maxChroma, rgbMax, rgbTotal