Commit 72713d16 authored by Rakesh MEHTA's avatar Rakesh MEHTA
Browse files

LBP test added and comments in boosting and trainer

parent b18bc0d0
......@@ -162,10 +162,12 @@ class Boost:
# For each round of boosting initialize a new weak trainer
if(self.weak_trainer_type == 'LutTrainer'):
if self.weak_trainer_type == 'LutTrainer':
weak_trainer = trainers.LutTrainer(self.num_entries, self.lut_selection, num_op )
elif (self.weak_trainer_type == 'StumpTrainer'):
elif self.weak_trainer_type == 'StumpTrainer':
weak_trainer = trainers.StumpTrainer()
elif self.weak_trainer_type == 'GaussTrainer':
weak_trainer = trainers.GaussianTrainer(3)
# Start boosting iterations for num_rnds rounds
......@@ -188,6 +190,7 @@ class Boost:
# Perform lbfgs minimization and compute the scale (alpha_r) for current weak trainer
lbfgs_struct = scipy.optimize.fmin_l_bfgs_b(loss_func.loss_sum, init_point, fprime = loss_func.loss_grad_sum, args = (targets, pred_scores, curr_pred_scores))
alpha = lbfgs_struct[0]
print alpha
# Update the prediction score after adding the score from the current weak classifier f(x) = f(x) + alpha_r*g_r
......
class GaussianMachine():
def __init__(self, num_classes):
self.means = numpy.zeros(num_classes)
self.variance = numpy.zeros(num_classes)
self.selected_index = 0
def get_weak_scores(self, features):
num_classes = self.means.shape[0]
for i in range(num_classes):
mean_i = means[i]
variance_i = variance[i]
feature_i = features[:,i]
demon = numpy.sqrt(2*numpy.pi*variance_i)
scores[i] = numpy.exp(-(((feature_i - mean_i)**2)/2*variance_i))
return scores
class GaussianTrainer():
def __init__(self, num_classes):
self.num_classes = num_classes
def compute_weak_trainer(self, features, loss_grad):
num_features = features.shape[1]
means = numpy.zeros([num_features,self.num_classes])
variances = numpy.zeros([num_features,self.num_classes])
summed_loss = numpy.zeros(num_features)
gauss_machine = GaussianMachine()
for feature_index in range(num_features)
single_feature = features[:,feature_index]
means[i,;], variances[i,:], loss[i] = compute_current_loss(single_feature, classes, loss_grad)
gauss_machine.selected_index = numpy.argmin(loss)
gauss_machine.means = means[optimum_index,:]
gauss_machine.variance = variances[optimum_index,:]
def compute_current_loss(feature, classes, loss_grad):
num_samples = feature.shape[0]
scores = numpy.zeros([num_samples, self.num_classes])
for class_index in range(self.num_classes):
samples_i = feature[loss_grad[:,class_index] < 0]
mean[i] = numpy.mean(samples_i)
variance[i] = numpy.std(samples_i)**2
scores[:,class_index] = numpy.exp(-(((feature_i - mean_i)**2)/2*variance_i))
scores_sum = numpy.sum(scores)
return mean, variance, scores_sum
......@@ -252,7 +252,7 @@ class LutTrainer():
type: integer numpy array (#number of samples x number of features)
loss_grad: The loss gradient values for the training samples
type: numpy array (#number of samples)
type: numpy array (#number of samples x #number of outputs)
Return:
self: a trained LUT trainer
......@@ -360,7 +360,199 @@ class LutTrainer():
return hist_grad
"""
class GaussianMachine():
def __init__(self, num_classes):
self.means = numpy.zeros(num_classes)
self.variance = numpy.zeros(num_classes)
self.selected_index = 0
def get_weak_scores(self, features):
num_classes = self.means.shape[0]
num_features = features.shape[0]
scores = numpy.zeros([num_features,num_classes])
for i in range(num_classes):
mean_i = self.means[i]
variance_i = self.variance[i]
feature_i = features[:,self.selected_index]
denom = numpy.sqrt(2*numpy.pi*variance_i)
temp = ((feature_i - mean_i)**2)/2*variance_i
numerator = numpy.exp(-temp)
scores[:,i] = numerator/denom
return scores
class GaussianTrainer():
def __init__(self, num_classes):
self.num_classes = num_classes
def compute_weak_trainer(self, features, loss_grad):
num_features = features.shape[1]
means = numpy.zeros([num_features,self.num_classes])
variances = numpy.zeros([num_features,self.num_classes])
summed_loss = numpy.zeros(num_features)
gauss_machine = GaussianMachine(self.num_classes)
for feature_index in range(num_features):
single_feature = features[:,feature_index]
means[feature_index,:], variances[feature_index,:], summed_loss[feature_index] = self.compute_current_loss(single_feature, loss_grad)
selected_index = numpy.argmin(summed_loss)
gauss_machine.selected_index = selected_index
gauss_machine.means = means[selected_index,:]
gauss_machine.variance = variances[selected_index,:]
return gauss_machine
def compute_current_loss(self, feature, loss_grad):
num_samples = feature.shape[0]
mean = numpy.zeros([self.num_classes])
variance = numpy.zeros(self.num_classes)
scores = numpy.zeros([num_samples, self.num_classes])
for class_index in range(self.num_classes):
samples_i = feature[loss_grad[:,class_index] < 0]
mean[class_index] = numpy.mean(samples_i)
variance[class_index] = numpy.std(samples_i)**2
denom = numpy.sqrt(2*numpy.pi*variance[class_index])
scores[:,class_index] = numpy.exp(-(((feature - mean[class_index])**2)/2*variance[class_index]))/denom
# print mean
scores_sum = numpy.sum(scores)
return mean, variance, scores_sum
"""
"""
class BayesMachine():
def __init__(self, num_outputs, num_entries):
self.luts = numpy.ones((num_entries, num_outputs), dtype = numpy.int)
self.selected_indices = numpy.zeros([num_outputs,1], 'int16')
def get_weak_scores(self, features):
# Initialize
num_samp = len(features)
num_outputs = len(self.luts[0])
weak_scores = numpy.zeros([num_samp,num_outputs])
# Compute weak scores
for output_index in range(num_outputs):
weak_scores[:,output_index] = numpy.transpose(self.luts[features[:,self.selected_indices[output_index]],output_index])
return weak_scores
class BayesTrainer():
def __init__(self, num_entries, num_outputs):
self.num_entries = num_entries
self.num_outputs = num_outputs
self.selection_type = selection_type
def compute_weak_trainer(self, fea, loss_grad):
# Initializations
# num_outputs = loss_grad.shape[1]
fea_grad = numpy.zeros([self.num_entries, self.num_outputs])
lut_machine = LutMachine(self.num_outputs, self.num_entries)
# Compute the sum of the gradient based on the feature values or the loss associated with each
# feature index
sum_loss = self.compute_grad_sum(loss_grad, fea)
# Select the most discriminative index (or indices) for classification which minimizes the loss
# and compute the sum of gradient for that index
if self.selection_type == 'indep':
# indep (independent) feature selection is used if all the dimension of output use different feature
# each of the selected feature minimize a dimension of the loss function
selected_indices = [numpy.argmin(col) for col in numpy.transpose(sum_loss)]
for output_index in range(self.num_outputs):
curr_id = sum_loss[:,output_index].argmin()
fea_grad[:,output_index] = self.compute_grad_hist(loss_grad[:,output_index],fea[:,curr_id])
lut_machine.selected_indices[output_index] = curr_id
elif self.selection_type == 'shared':
# for 'shared' feature selection the loss function is summed over multiple dimensions and
# the feature that minimized this cumulative loss is used for all the outputs
accum_loss = numpy.sum(sum_loss,1)
selected_findex = accum_loss.argmin()
lut_machine.selected_indices = selected_findex*numpy.ones([self.num_outputs,1],'int16')
for output_index in range(self.num_outputs):
fea_grad[:,output_index] = self.compute_grad_hist(loss_grad[:,output_index],fea[:,selected_findex])
# Assign the values to LookUp Table
lut_machine.luts[fea_grad <= 0.0] = -1
return lut_machine
def compute_grad_sum(self, loss_grad, fea):
# initialize values
num_fea = len(fea[0])
num_samp = len(fea)
sum_loss = numpy.zeros([num_fea,self.num_outputs])
# Compute the loss for each feature
for feature_index in range(num_fea):
for output_index in range(self.num_outputs):
for feature_value in range(self.num_entries):
luts[]
return sum_loss
def compute_grad_hist(self, loss_grado,features):
# initialize the values
num_samp = len(features)
hist_grad = numpy.zeros([self.num_entries])
# compute the sum of the gradient
for output_index in range(self.num_outputs):
for feature_value in range(self.num_entries):
num_feature_i = sum(features == feature_value)
luts[feature_value,output_index] = sum(loss_grado[features == feature_value])
return hist_grad
"""
......@@ -7,7 +7,7 @@ features. """
import numpy
coord = [[0,0],[0,1],[0,2],[1,2],[2,2],[2,1],[2,0],[1,0]]
class lbp_feature():
""" The class to extract block based LBP type features from the image.
......@@ -77,9 +77,7 @@ class lbp_feature():
# Compute the sum of the blocks for the current scale
block_sum = integral_img[scale_y+1:,scale_x+1:] + integral_img[0:-(scale_y+1),0:-(scale_x+1)] - integral_img[scale_y+1:,0:-(scale_x+1)] - integral_img[0:-(scale_y+1),scale_x+1:]
# Initialize the size of the final feature map that will be obtained
feature_map_dimy = block_sum.shape[0] -2
feature_map_dimx = block_sum.shape[1] -2
# extract the specific feature from the image
if self.ftype == 'lbp':
......@@ -100,7 +98,7 @@ class lbp_feature():
def lbp(self, coord, feature_map_dimx, feature_map_dimy, block_sum):
def lbp(self, block_sum):
"""Function to compute the LBP for a image at single scale.
The LBP features of the given image is computed and the feature map is returned
......@@ -113,6 +111,8 @@ class lbp_feature():
Return:
feature_map: The lbp feature map
"""
feature_map_dimx, feature_map_dimy = self.get_map_dimension(block_sum)
num_neighbours = 8
blk_center = block_sum[1:1+feature_map_dimy,1:1+feature_map_dimx]
feature_map = numpy.zeros([feature_map_dimy, feature_map_dimx])
......@@ -122,7 +122,7 @@ class lbp_feature():
def tlbp(self, coord, feature_map_dimx, feature_map_dimy, block_sum):
def tlbp(self, block_sum):
"""Function to compute the tLBP for a image at single scale.
The tLBP features of the given image is computed and the feature map is returned
......@@ -135,7 +135,7 @@ class lbp_feature():
Return:
feature_map: The lbp feature map
"""
feature_map_dimx, feature_map_dimy = self.get_map_dimension(block_sum)
feature_map = numpy.zeros([feature_map_dimy, feature_map_dimx])
num_neighbours = 8
......@@ -151,7 +151,7 @@ class lbp_feature():
def dlbp(self, coord, feature_map_dimx, feature_map_dimy, block_sum):
def dlbp(self, block_sum):
"""Function to compute the dLBP for a image at single scale.
The dLBP features of the given image is computed and the feature map is returned
......@@ -165,6 +165,7 @@ class lbp_feature():
feature_map: The lbp feature map
"""
feature_map_dimx, feature_map_dimy = self.get_map_dimension(block_sum)
pc = block_sum[1:1+feature_map_dimy,1:1+feature_map_dimx]
num_neighbours = 8
feature_map = numpy.zeros([feature_map_dimy,feature_map_dimx])
......@@ -175,13 +176,13 @@ class lbp_feature():
pi4 = block_sum[coord[ind+4][0]:coord[ind+4][0]+ feature_map_dimy,coord[ind+4][1]:coord[ind+4][1] + feature_map_dimx]
""" Compare the neighbours and increment the feature map. """
feature_map = feature_map + (2**ind)*((pi-pc)*(pi4 - pc) > 0) + (4**ind)*(abs(pi - pc) >= abs(pi4 -pc))
feature_map = feature_map + (2**(2*ind))*((pi-pc)*(pi4 - pc) >= 0) + (2**(2*ind+1))*(abs(pi - pc) >= abs(pi4 -pc))
return feature_map
def mlbp(self, coord, feature_map_dimx, feature_map_dimy, block_sum):
def mlbp(self, block_sum):
"""Function to compute the mLBP for a image at single scale.
The mLBP features of the given image is computed and the feature map is returned.
......@@ -195,6 +196,8 @@ class lbp_feature():
feature_map: The lbp feature map
"""
feature_map_dimx, feature_map_dimy = self.get_map_dimension(block_sum)
num_neighbours = 8
pm = numpy.zeros([feature_map_dimy,feature_map_dimx])
......@@ -231,4 +234,10 @@ class lbp_feature():
feature_vector = self.get_features(img, scale_y, scale_x)
return feature_vector.shape[0]
def get_map_dimension(self, block_sum):
# Initialize the size of the final feature map that will be obtained
feature_map_dimy = block_sum.shape[0] -2
feature_map_dimx = block_sum.shape[1] -2
return feature_map_dimx, feature_map_dimy
......@@ -56,7 +56,7 @@ def main():
# Initilize the trainer with 'LutTrainer' or 'StumpTrainer'
boost_trainer = boosting.Boost('LutTrainer')
boost_trainer = boosting.Boost('GaussTrainer')
# Set the parameters for the boosting
boost_trainer.num_rnds = args.num_rnds
......
import unittest
import random
import xbob.boosting
import numpy
def get_image_3x3(val):
img = numpy.zeros([3,3])
img[0,0] = val[0]
img[0,1] = val[1]
img[0,2] = val[2]
img[1,2] = val[3]
img[2,2] = val[4]
img[2,1] = val[5]
img[2,0] = val[6]
img[1,0] = val[7]
img[1,1] = val[8]
return img
class TestdlbpFeatures(unittest.TestCase):
"""Perform test for dlbp features"""
""" The neighbourhood is defined as
p0 | p1 | p2
p7 | pc | p3
p6 | p5 | p4 """
def test_dlbp_image(self):
feature_extractor = xbob.boosting.features.local_feature.lbp_feature('dlbp')
img_values = numpy.array([1,1,1,1,1,1,1,1,1]) # p0,p1,p2,p3,p4,p5,p6,p7,pc
img = get_image_3x3(img_values)
returned_lbp = feature_extractor.dlbp(img)
self.assertTrue(returned_lbp == 255)
img_values = numpy.array([20,1,1,1,10,10,10,10,5])
img = get_image_3x3(img_values)
returned_lbp = feature_extractor.dlbp(img)
print returned_lbp
self.assertTrue(returned_lbp == 3)
img_values = numpy.array([1,20,1,1,10,10,10,10,5])
img = get_image_3x3(img_values)
returned_lbp = feature_extractor.dlbp(img)
self.assertTrue(returned_lbp == 12)
img_values = numpy.array([1,1,20,1,10,10,10,10,5])
img = get_image_3x3(img_values)
returned_lbp = feature_extractor.dlbp(img)
self.assertTrue(returned_lbp == 48)
img_values = numpy.array([1,1,1,20,10,10,10,10,5])
img = get_image_3x3(img_values)
returned_lbp = feature_extractor.dlbp(img)
self.assertTrue(returned_lbp == 192)
import unittest
import random
import xbob.boosting
import numpy
def get_image_3x3(val):
img = numpy.zeros([3,3])
img[0,0] = val[0]
img[0,1] = val[1]
img[0,2] = val[2]
img[1,2] = val[3]
img[2,2] = val[4]
img[2,1] = val[5]
img[2,0] = val[6]
img[1,0] = val[7]
img[1,1] = val[8]
return img
class TestIntegralImage(unittest.TestCase):
"""Perform test on integral images"""
def test_integral_image(self):
feature_extractor = xbob.boosting.features.local_feature.lbp_feature('lbp')
img = numpy.array([[1,1,1],
[1,1,1],
[1,1,1]])
int_img = numpy.array([[1,2,3],
[2,4,6],
[3,6,9]])
returned_integral = feature_extractor.compute_integral_image(img)
self.assertEqual(returned_integral.shape[0],int_img.shape[0])
self.assertEqual(returned_integral.shape[1],int_img.shape[1])
self.assertTrue((returned_integral == int_img).all())
class TestLbpFeatures(unittest.TestCase):
"""Perform test on integral images"""
""" The neighbourhood is defined as
p0 | p1 | p2
p7 | pc | p3
p6 | p5 | p4 """
def test_integral_image(self):
feature_extractor = xbob.boosting.features.local_feature.lbp_feature('lbp')
img_values = numpy.array([1,1,1,1,1,1,1,1,1]) # p0,p1,p2,p3,p4,p5,p6,p7,pc
img = get_image_3x3(img_values)
returned_lbp = feature_extractor.lbp(img)
self.assertTrue(returned_lbp == 255)
img_values = numpy.array([1,1,1,1,1,1,1,1,0])
img = get_image_3x3(img_values)
returned_lbp = feature_extractor.lbp(img)
self.assertTrue(returned_lbp == 255)
img_values = numpy.array([0,0,0,0,0,0,0,0,1])
img = get_image_3x3(img_values)
returned_lbp = feature_extractor.lbp(img)
self.assertTrue(returned_lbp == 0)
img_values = numpy.array([1,0,0,0,0,0,0,0,1])
img = get_image_3x3(img_values)
returned_lbp = feature_extractor.lbp(img)
self.assertTrue(returned_lbp == 1)
img_values = numpy.array([0,1,0,0,0,0,0,0,1])
img = get_image_3x3(img_values)
returned_lbp = feature_extractor.lbp(img)
self.assertTrue(returned_lbp == 2)
img_values = numpy.array([0,0,1,0,0,0,0,0,1])
img = get_image_3x3(img_values)
returned_lbp = feature_extractor.lbp(img)
self.assertTrue(returned_lbp == 4)
img_values = numpy.array([0,0,0,1,0,0,0,0,1])
img = get_image_3x3(img_values)
returned_lbp = feature_extractor.lbp(img)
self.assertTrue(returned_lbp == 8)
img_values = numpy.array([0,0,0,0,2,0,0,0,1])
img = get_image_3x3(img_values)
returned_lbp = feature_extractor.lbp(img)
self.assertTrue(returned_lbp == 16)
img_values = numpy.array([0,0,0,0,0,4,0,0,1])
img = get_image_3x3(img_values)
returned_lbp = feature_extractor.lbp(img)
self.assertTrue(returned_lbp == 32)
img_values = numpy.array([0,0,0,0,0,0,5,0,1])
img = get_image_3x3(img_values)
returned_lbp = feature_extractor.lbp(img)
self.assertTrue(returned_lbp == 64)
img_values = numpy.array([0,0,0,0,0,0,0,5,1])
img = get_image_3x3(img_values)
returned_lbp = feature_extractor.lbp(img)
self.assertTrue(returned_lbp == 128)
import unittest
import random
import xbob.boosting
import numpy
def get_image_3x3(val):
img = numpy.zeros([3,3])
img[0,0] = val[0]
img[0,1] = val[1]
img[0,2] = val[2]
img[1,2] = val[3]
img[2,2] = val[4]
img[2,1] = val[5]
img[2,0] = val[6]
img[1,0] = val[7]
img[1,1] = val[8]
return img
class TesttlbpFeatures(unittest.TestCase):
"""Perform test for mlbp features"""
""" The neighbourhood is defined as
p0 | p1 | p2
p7 | pc | p3
p6 | p5 | p4 """