Commit 9a419ab0 authored by Rakesh MEHTA's avatar Rakesh MEHTA
Browse files

modifcation in tests and optimization of code

parent 1a51ef4d
File added
...@@ -53,6 +53,7 @@ setup( ...@@ -53,6 +53,7 @@ setup(
'setuptools', 'setuptools',
'bob', # base signal proc./machine learning library 'bob', # base signal proc./machine learning library
'xbob.db.mnist', 'xbob.db.mnist',
'xbob.db.banca',
], ],
# Your project should be called something like 'xbob.<foo>' or # Your project should be called something like 'xbob.<foo>' or
......
...@@ -190,7 +190,6 @@ class Boost: ...@@ -190,7 +190,6 @@ class Boost:
# Perform lbfgs minimization and compute the scale (alpha_r) for current weak trainer # Perform lbfgs minimization and compute the scale (alpha_r) for current weak trainer
lbfgs_struct = scipy.optimize.fmin_l_bfgs_b(loss_func.loss_sum, init_point, fprime = loss_func.loss_grad_sum, args = (targets, pred_scores, curr_pred_scores)) lbfgs_struct = scipy.optimize.fmin_l_bfgs_b(loss_func.loss_sum, init_point, fprime = loss_func.loss_grad_sum, args = (targets, pred_scores, curr_pred_scores))
alpha = lbfgs_struct[0] alpha = lbfgs_struct[0]
print alpha
# Update the prediction score after adding the score from the current weak classifier f(x) = f(x) + alpha_r*g_r # Update the prediction score after adding the score from the current weak classifier f(x) = f(x) + alpha_r*g_r
......
...@@ -350,12 +350,15 @@ class LutTrainer(): ...@@ -350,12 +350,15 @@ class LutTrainer():
return: hist_grad: The sum of the loss gradient""" return: hist_grad: The sum of the loss gradient"""
# initialize the values # initialize the values
num_samp = len(features) # hist_grad = numpy.zeros([self.num_entries])
hist_grad = numpy.zeros([self.num_entries])
# compute the sum of the gradient # compute the sum of the gradient
for feature_value in range(self.num_entries): hist_grad, bin_val = numpy.histogram(features,bins = self.num_entries, range = (0,self.num_entries-1), weights = loss_grado)
hist_grad[feature_value] = sum(loss_grado[features == feature_value]) # hist_grad = [sum(loss_grado[features == feature_value]) for feature_value in xrange(self.num_entries)]
#for feature_value in range(self.num_entries):
# hist_grad[feature_value] = sum(loss_grado[features == feature_value])
return hist_grad return hist_grad
......
...@@ -82,13 +82,13 @@ class lbp_feature(): ...@@ -82,13 +82,13 @@ class lbp_feature():
# extract the specific feature from the image # extract the specific feature from the image
if self.ftype == 'lbp': if self.ftype == 'lbp':
feature_map = self.lbp(self.coord, feature_map_dimx, feature_map_dimy, block_sum) feature_map = self.lbp(block_sum)
elif self.ftype == 'tlbp': elif self.ftype == 'tlbp':
feature_map = self.tlbp(self.coord, feature_map_dimx, feature_map_dimy, block_sum) feature_map = self.tlbp(block_sum)
elif self.ftype == 'dlbp': elif self.ftype == 'dlbp':
feature_map = self.dlbp(self.coord, feature_map_dimx, feature_map_dimy, block_sum) feature_map = self.dlbp(block_sum)
elif self.ftype == 'mlbp': elif self.ftype == 'mlbp':
feature_map = self.mlbp(self.coord, feature_map_dimx, feature_map_dimy, block_sum) feature_map = self.mlbp(block_sum)
# reshape feature image into vector # reshape feature image into vector
temp_vector = numpy.reshape(feature_map,feature_map.shape[0]*feature_map.shape[1],1) temp_vector = numpy.reshape(feature_map,feature_map.shape[0]*feature_map.shape[1],1)
......
...@@ -85,8 +85,7 @@ def main(): ...@@ -85,8 +85,7 @@ def main():
boost_trainer.selection_type = args.selection_type boost_trainer.selection_type = args.selection_type
boost_trainer.num_entries = args.num_entries boost_trainer.num_entries = args.num_entries
print "Starting boosting the features" print "Start boosting the features"
print train_fea.shape
# Perform boosting of the feature set samp # Perform boosting of the feature set samp
machine = boost_trainer.train(train_fea, train_targets) machine = boost_trainer.train(train_fea, train_targets)
......
...@@ -28,6 +28,7 @@ class TestdlbpFeatures(unittest.TestCase): ...@@ -28,6 +28,7 @@ class TestdlbpFeatures(unittest.TestCase):
p6 | p5 | p4 """ p6 | p5 | p4 """
def test_dlbp_image(self): def test_dlbp_image(self):
""" Test for dlbp features with different neighbouring pixel values """
feature_extractor = xbob.boosting.features.local_feature.lbp_feature('dlbp') feature_extractor = xbob.boosting.features.local_feature.lbp_feature('dlbp')
img_values = numpy.array([1,1,1,1,1,1,1,1,1]) # p0,p1,p2,p3,p4,p5,p6,p7,pc img_values = numpy.array([1,1,1,1,1,1,1,1,1]) # p0,p1,p2,p3,p4,p5,p6,p7,pc
img = get_image_3x3(img_values) img = get_image_3x3(img_values)
......
...@@ -22,6 +22,7 @@ class TestIntegralImage(unittest.TestCase): ...@@ -22,6 +22,7 @@ class TestIntegralImage(unittest.TestCase):
"""Perform test on integral images""" """Perform test on integral images"""
def test_integral_image(self): def test_integral_image(self):
""" Test on the integral images """
feature_extractor = xbob.boosting.features.local_feature.lbp_feature('lbp') feature_extractor = xbob.boosting.features.local_feature.lbp_feature('lbp')
img = numpy.array([[1,1,1], img = numpy.array([[1,1,1],
[1,1,1], [1,1,1],
...@@ -46,7 +47,8 @@ class TestLbpFeatures(unittest.TestCase): ...@@ -46,7 +47,8 @@ class TestLbpFeatures(unittest.TestCase):
p7 | pc | p3 p7 | pc | p3
p6 | p5 | p4 """ p6 | p5 | p4 """
def test_integral_image(self): def test_lbp_features(self):
"""Test for LBP features with different neighbouring values."""
feature_extractor = xbob.boosting.features.local_feature.lbp_feature('lbp') feature_extractor = xbob.boosting.features.local_feature.lbp_feature('lbp')
img_values = numpy.array([1,1,1,1,1,1,1,1,1]) # p0,p1,p2,p3,p4,p5,p6,p7,pc img_values = numpy.array([1,1,1,1,1,1,1,1,1]) # p0,p1,p2,p3,p4,p5,p6,p7,pc
img = get_image_3x3(img_values) img = get_image_3x3(img_values)
......
...@@ -7,6 +7,7 @@ class TestExpLossFunctions(unittest.TestCase): ...@@ -7,6 +7,7 @@ class TestExpLossFunctions(unittest.TestCase):
"""Perform test on exponential loss function """ """Perform test on exponential loss function """
def test_exp_positive_target(self): def test_exp_positive_target(self):
""" Loss values computation test for postitive targets. """
loss_function = xbob.boosting.core.losses.ExpLossFunction() loss_function = xbob.boosting.core.losses.ExpLossFunction()
target = 1 target = 1
...@@ -47,6 +48,7 @@ class TestExpLossFunctions(unittest.TestCase): ...@@ -47,6 +48,7 @@ class TestExpLossFunctions(unittest.TestCase):
self.assertEqual(val4, grad_sum_val) self.assertEqual(val4, grad_sum_val)
def test_exp_negative_target(self): def test_exp_negative_target(self):
""" Exponential Loss values computation test for negative targets. """
loss_function = xbob.boosting.core.losses.ExpLossFunction() loss_function = xbob.boosting.core.losses.ExpLossFunction()
target = -1 target = -1
......
...@@ -28,6 +28,7 @@ class TestmlbpFeatures(unittest.TestCase): ...@@ -28,6 +28,7 @@ class TestmlbpFeatures(unittest.TestCase):
p6 | p5 | p4 """ p6 | p5 | p4 """
def test_mlbp_image(self): def test_mlbp_image(self):
""" Test on mlbp feature with different neighbourhood values."""
feature_extractor = xbob.boosting.features.local_feature.lbp_feature('mlbp') feature_extractor = xbob.boosting.features.local_feature.lbp_feature('mlbp')
img_values = numpy.array([1,1,1,1,1,1,1,1,1]) # p0,p1,p2,p3,p4,p5,p6,p7,pc, mean = 1 img_values = numpy.array([1,1,1,1,1,1,1,1,1]) # p0,p1,p2,p3,p4,p5,p6,p7,pc, mean = 1
img = get_image_3x3(img_values) img = get_image_3x3(img_values)
......
...@@ -152,9 +152,9 @@ class TestStumpTrainer(unittest.TestCase): ...@@ -152,9 +152,9 @@ class TestStumpTrainer(unittest.TestCase):
self.assertEqual(trained_threshold, threshold) self.assertEqual(trained_threshold, threshold)
if(fea1 < fea2): if(fea1 < fea2):
polarity = 1
else:
polarity = -1 polarity = -1
else:
polarity = 1
self.assertEqual(trained_polarity, polarity) self.assertEqual(trained_polarity, polarity)
...@@ -181,9 +181,9 @@ class TestStumpTrainer(unittest.TestCase): ...@@ -181,9 +181,9 @@ class TestStumpTrainer(unittest.TestCase):
self.assertEqual(trained_threshold, threshold) self.assertEqual(trained_threshold, threshold)
if(fea1 < fea2): if(fea1 < fea2):
polarity = 1
else:
polarity = -1 polarity = -1
else:
polarity = 1
self.assertEqual(trained_polarity, polarity) self.assertEqual(trained_polarity, polarity)
...@@ -208,9 +208,9 @@ class TestStumpTrainer(unittest.TestCase): ...@@ -208,9 +208,9 @@ class TestStumpTrainer(unittest.TestCase):
if(fea1 < fea2): if(fea1 < fea2):
polarity = 1
else:
polarity = -1 polarity = -1
else:
polarity = 1
self.assertEqual(trained_polarity, polarity) self.assertEqual(trained_polarity, polarity)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment