Commit 26c6da62 authored by Rakesh MEHTA's avatar Rakesh MEHTA
Browse files

added tests and changed variable names

parent 746d7f9a
File added
File added
......@@ -89,11 +89,11 @@ class Boost:
def __init__(self, trainer_type):
def __init__(self, trainer_type, num_rnds = 20, num_entries = 256, loss_type = 'log', lut_selection = 'indep'):
""" The function to initialize the boosting parameters.
The function set the default values for the following boosting parameters:
The number of rounds for boosting: 100
The number of rounds for boosting: 20
The number of entries in LUT: 256 (For LBP type features)
The loss function type: logit
The LUT selection type: independent
......@@ -101,13 +101,25 @@ class Boost:
Inputs:
trainer_type: The type of trainer for boosting.
Type: string
Values: LutTrainer or StumpTrainer
Values: LutTrainer or StumpTrainer
num_rnds: The number of rounds of boosting
Type: int
Values: 20 (Default)
num_entries: The number of entries for the lookup table
Type: int
Values: 256 (Default)
loss_type: The loss function to be be minimized
Type: string
Values: 'log' or 'exp'
lut_selection: The selection type for the LUT based trainers
Type: string
Values: 'indep' or 'shared'
"""
self.num_rnds = 100
self.num_entries = 256
self.loss_type = 'log'
self.lut_selection = 'indep'
self.num_rnds = num_rnds
self.num_entries = num_entries
self.loss_type = loss_type
self.lut_selection = lut_selection
self.weak_trainer_type = trainer_type
......
import numpy
import math
from scipy import optimize
......@@ -42,14 +41,15 @@ class ExpLossFunction():
return loss_grad
#return loss_grad
def loss_sum(self, *args):
#def loss_sum(self, *args):
def loss_sum(self, alpha, targets, prediction_scores, weak_scores):
"""The function computes the sum of the exponential loss which is used to find the optimized values of alpha (x).
The functions computes sum of loss values which is required during the linesearch step for the optimization of the alpha.
This function is given as the input for the lbfgs optimization function.
Inputs:
x: The current value of the alpha.
alpha: The current value of the alpha.
type: float
targets: The targets for the samples
......@@ -65,23 +65,24 @@ class ExpLossFunction():
Return:
sum_loss: The sum of the loss values for the current value of the alpha
type: float"""
"""
# initialize the values
x = args[0]
targets = args[1]
pred_scores = args[2]
weak_scores = args[3]
"""
# compute the scores and loss for the current alpha
curr_scores_x = pred_scores + x*weak_scores
loss = self.update_loss(targets, curr_scores_x)
curr_scores = prediction_scores + alpha * weak_scores
loss = self.update_loss(targets, curr_scores)
# compute the sum of the loss
sum_loss = numpy.sum(loss,0)
return sum_loss
def loss_grad_sum(self, *args):
def loss_grad_sum(self, alpha, targets, prediction_scores, weak_scores):
"""The function computes the sum of the exponential loss which is used to find the optimized values of alpha (x).
The functions computes sum of loss values which is required during the linesearch step for the optimization of the alpha.
......@@ -104,15 +105,18 @@ class ExpLossFunction():
Return:
sum_loss: The sum of the loss gradient values for the current value of the alpha
type: float"""
"""
# initialize the values
x = args[0]
targets = args[1]
pred_scores = args[2]
weak_scores = args[3]
"""
# compute the loss gradient for the updated score
curr_scores_x = pred_scores + x*weak_scores
loss_grad = self.update_loss_grad(targets, curr_scores_x)
curr_scores = prediction_scores + alpha *weak_scores
loss_grad = self.update_loss_grad(targets, curr_scores)
# take the sum of the loss gradient values
sum_grad = numpy.sum(loss_grad*weak_scores, 0)
......@@ -149,7 +153,7 @@ class LogLossFunction():
type: numpy array (# number of samples x #number of outputs)
scores: The current prediction scores for the samples.
type: numpy array (# number of samples)
type: numpy array (# number of samples x # number of outputs)
Return:
gradient: The loss gradient values for the samples """
......@@ -157,7 +161,7 @@ class LogLossFunction():
denom = 1/(1 + e)
return - targets* e* denom
def loss_sum(self, *args):
def loss_sum(self, alpha, targets, prediction_scores, weak_scores):
"""The function computes the sum of the logit loss which is used to find the optimized values of alpha (x).
The functions computes sum of loss values which is required during the linesearch step for the optimization of the alpha.
......@@ -170,10 +174,10 @@ class LogLossFunction():
targets: The targets for the samples
type: numpy array (# number of samples x #number of outputs)
pred_scores: The cumulative prediction scores of the samples until the previous round of the boosting.
prediction_scores: The cumulative prediction scores of the samples until the previous round of the boosting.
type: numpy array (# number of samples)
curr_scores: The prediction scores of the samples for the current round of the boosting.
weak_scores: The prediction scores of the samples for the current round of the boosting.
type: numpy array (# number of samples)
......@@ -181,17 +185,18 @@ class LogLossFunction():
sum_loss: The sum of the loss values for the current value of the alpha
type: float"""
"""
x = args[0]
targets = args[1]
pred_scores = args[2]
weak_scores = args[3]
curr_scores_x = pred_scores + x*weak_scores
loss = self.update_loss(targets, curr_scores_x)
sum_l = numpy.sum(loss,0)
return sum_l
"""
curr_scores = prediction_scores + alpha*weak_scores
loss = self.update_loss(targets, curr_scores)
sum_loss = numpy.sum(loss,0)
return sum_loss
#@abstractmethod
def loss_grad_sum(self, *args):
def loss_grad_sum(self, alpha, targets, prediction_scores, weak_scores):
"""The function computes the sum of the logit loss gradient which is used to find the optimized values of alpha (x).
The functions computes sum of loss values which is required during the linesearch step for the optimization of the alpha.
......@@ -214,14 +219,16 @@ class LogLossFunction():
Return:
sum_loss: The sum of the loss gradient values for the current value of the alpha
type: float"""
"""
x = args[0]
targets = args[1]
pred_scores = args[2]
weak_scores = args[3]
curr_scores_x = pred_scores + x*weak_scores
loss_grad = self.update_loss_grad( targets, curr_scores_x)
sum_g = numpy.sum(loss_grad*weak_scores, 0)
return sum_g
"""
curr_scores = prediction_scores + alpha*weak_scores
loss_grad = self.update_loss_grad( targets, curr_scores)
sum_grad = numpy.sum(loss_grad*weak_scores, 0)
return sum_grad
"""def loss_sum(self, targets, scores):
......
......@@ -107,15 +107,16 @@ class StumpTrainer():
# Find the corresponding threshold value
threshold = 0.0
if(opt_id == num_samp-1):
if (opt_id == num_samp-1):
threshold = fea[opt_id]
else:
threshold = (float(fea[opt_id]) + float(fea[opt_id+1]))*0.5
# Find the polarity or the directionality of the current trainer
if(gain_max == gain[opt_id]):
polarity = -1
else:
polarity = 1
polarity = 1
return polarity, threshold, gain_max
......@@ -131,7 +132,7 @@ class StumpTrainer():
scores are either +1 or -1.
Input: self: a weak stump trainer
test_features: A matrix of the test features of dimension.
Num. of Test images x Num of features
Num. of Test images x Num. of features
Return: weak_scores: classification scores of the test features use the weak classifier self
Array of dimension = Num. of samples
"""
......@@ -157,7 +158,7 @@ class LutTrainer():
def __init__(self, num_entries, selection_type, num_op):
def __init__(self, num_entries, selection_type, num_outputs):
""" Function to initialize the parameters.
Function to initialize the weak LutTrainer. Each weak Luttrainer is specified with a
......@@ -175,14 +176,14 @@ class LutTrainer():
and a single feature is used for all the outputs. See Cosmin's thesis for more details.
Type: string {'indep', 'shared'}
num_op: The number of outputs for the classification task.
num_outputs: The number of outputs for the classification task.
type: Integer
"""
self.num_entries = num_entries
self.luts = numpy.ones((num_entries, num_op), dtype = numpy.int)
self.luts = numpy.ones((num_entries, num_outputs), dtype = numpy.int)
self.selection_type = selection_type
self.selected_indices = numpy.zeros([num_op,1], 'int16')
self.selected_indices = numpy.zeros([num_outputs,1], 'int16')
......@@ -208,12 +209,13 @@ class LutTrainer():
"""
# Initializations
num_op = loss_grad.shape[1]
fea_grad = numpy.zeros([self.num_entries,num_op])
num_outputs = loss_grad.shape[1]
print num_outputs
fea_grad = numpy.zeros([self.num_entries,num_outputs])
# Compute the sum of the gradient based on the feature values or the loss associated with each
# feature index
sum_loss = self.compute_fgrad(loss_grad, fea)
sum_loss = self.compute_grad_sum(loss_grad, fea)
# Select the most discriminative index (or indices) for classification which minimizes the loss
......@@ -226,9 +228,10 @@ class LutTrainer():
selected_indices = [numpy.argmin(col) for col in numpy.transpose(sum_loss)]
for oi in range(num_op):
for oi in range(num_outputs):
curr_id = sum_loss[:,oi].argmin()
fea_grad[:,oi] = self.compute_hgrad(loss_grad[:,oi],fea[:,curr_id])
fea_grad[:,oi] = self.compute_grad_hist(loss_grad[:,oi],fea[:,curr_id])
print oi
self.selected_indices[oi] = curr_id
......@@ -239,10 +242,10 @@ class LutTrainer():
accum_loss = numpy.sum(sum_loss,1)
selected_findex = accum_loss.argmin()
self.selected_indices = selected_findex*numpy.ones([num_op,1],'int16')
self.selected_indices = selected_findex*numpy.ones([num_outputs,1],'int16')
for oi in range(num_op):
fea_grad[:,oi] = self.compute_hgrad(loss_grad[:,oi],fea[:,selected_findex])
for oi in range(num_outputs):
fea_grad[:,oi] = self.compute_grad_hist(loss_grad[:,oi],fea[:,selected_findex])
# Assign the values to LookUp Table
self.luts[fea_grad <= 0.0] = -1
......@@ -252,7 +255,7 @@ class LutTrainer():
def compute_fgrad(self, loss_grad, fea):
def compute_grad_sum(self, loss_grad, fea):
""" The function to compute the loss gradient for all the features.
The function computes the loss for whole set of features. The loss refers to the sum of the loss gradient
......@@ -269,13 +272,13 @@ class LutTrainer():
# initialize values
num_fea = len(fea[0])
num_samp = len(fea)
num_op = len(loss_grad[0])
sum_loss = numpy.zeros([num_fea,num_op])
num_outputs = len(loss_grad[0])
sum_loss = numpy.zeros([num_fea,num_outputs])
# Compute the loss for each feature
for fi in range(num_fea):
for oi in range(num_op):
hist_grad = self.compute_hgrad(loss_grad[:,oi],fea[:,fi])
for oi in range(num_outputs):
hist_grad = self.compute_grad_hist(loss_grad[:,oi],fea[:,fi])
sum_loss[fi,oi] = - sum(abs(hist_grad))
......@@ -285,7 +288,7 @@ class LutTrainer():
def compute_hgrad(self, loss_grado,fval):
def compute_grad_hist(self, loss_grado,fval):
""" The function computes the loss for a single feature.
Function computes sum of the loss gradient that have same feature values.
......@@ -318,9 +321,9 @@ class LutTrainer():
return:
weak_scores: The classification scores of the features based on current weak classifier"""
num_samp = len(fset)
num_op = len(self.luts[0])
weak_scores = numpy.zeros([num_samp,num_op])
for oi in range(num_op):
num_outputs = len(self.luts[0])
weak_scores = numpy.zeros([num_samp,num_outputs])
for oi in range(num_outputs):
a = self.luts[fset[:,self.selected_indices[oi]],oi]
weak_scores[:,oi] = numpy.transpose(self.luts[fset[:,self.selected_indices[oi]],oi])
return weak_scores
......
......@@ -137,13 +137,13 @@ class lbp_feature():
"""
feature_map = numpy.zeros([feature_map_dimy, feature_map_dimx])
num_neighbour = 8
num_neighbours = 8
""" Compute the feature map for the tLBP features. """
for ind in range(num_neighbours):
"""The comparison of pixel is done with the adjacent neighbours."""
comparing_img = block_sum[coord[(ind+1)%num_neighbour][0]:coord[(ind+1)%num_neighbour][0] + feature_map_dimy,coord[(ind+1)%num_neighbour][1]:coord[(ind+1)%num_neighbour][1] + feature_map_dimx]
comparing_img = block_sum[coord[(ind+1)%num_neighbours][0]:coord[(ind+1)%num_neighbours][0] + feature_map_dimy,coord[(ind+1)%num_neighbours][1]:coord[(ind+1)%num_neighbours][1] + feature_map_dimx]
""" Compare the neighbours and increment the feature map. """
feature_map = feature_map + (2**ind)*(block_sum[coord[ind][0]:coord[ind][0] + feature_map_dimy,coord[ind][1]:coord[ind][1] + feature_map_dimx]>= comparing_img)
......
......@@ -4,43 +4,86 @@ import xbob.boosting
import numpy
class TestExpLossFunctions(unittest.TestCase):
"""Perform test on loss function """
"""Perform test on exponential loss function """
def test_exp_loss(self):
def test_exp_positive_target(self):
exp_ = xbob.boosting.core.losses.ExpLossFunction()
loss_function = xbob.boosting.core.losses.ExpLossFunction()
target = 1
score = numpy.random.rand()
score = 0.34
alpha = 0.5
targets = numpy.array([1, 1, 1,1,1, 1,1,1,1,1])
weak_scores = numpy.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], 'float64')
prev_scores = numpy.array([0.53, 0.23, 0.63, 0.12, 1.34, 5.76, 3.21, 2.11, 1.21, 5.36], 'float64')
# check the loss values
l1 = exp_.update_loss(target, score)
val1 = numpy.exp(- target * score)
self.assertEqual(l1,val1)
loss_value = loss_function.update_loss(target, score)
val = numpy.exp(- target * score)
self.assertEqual(loss_value,val)
self.assertTrue(loss_value >= 0)
# Check loss gradient
l2 = exp_.update_loss_grad( target, score)
loss_grad = loss_function.update_loss_grad( target, score)
temp = numpy.exp(-target * score)
val2 = -target * temp
self.assertEqual(loss_grad,val2)
# Check loss sum
loss_sum_val = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
curr_scores = prev_scores + alpha*weak_scores
val3 = sum(numpy.exp(-targets * curr_scores))
self.assertEqual(val3, loss_sum_val)
# Check the gradient sum
grad_sum_val = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
curr_scores = prev_scores + alpha*weak_scores
temp = numpy.exp(-targets * curr_scores)
grad = -target * temp
val4 = numpy.sum(grad * weak_scores,0)
self.assertEqual(val4, grad_sum_val)
def test_exp_negative_target(self):
loss_function = xbob.boosting.core.losses.ExpLossFunction()
target = -1
score = 0.34
alpha = 0.5
targets = numpy.array([-1, -1, -1,-1,-1, -1,-1,-1,-1,-1])
weak_scores = numpy.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], 'float64')
prev_scores = numpy.array([0.53, 0.23, 0.63, 0.12, 1.34, 5.76, 3.21, 2.11, 1.21, 5.36], 'float64')
# check the loss values
loss_value = loss_function.update_loss(target, score)
val = numpy.exp(- target * score)
self.assertEqual(loss_value,val)
self.assertTrue(loss_value >= 0)
# Check loss gradient
loss_grad = loss_function.update_loss_grad( target, score)
temp = numpy.exp(-target * score)
val2 = -target * temp
self.assertEqual(l2,val2)
self.assertEqual(loss_grad,val2)
# Check loss sum
weak_scores = numpy.random.rand(10)
prev_scores = numpy.random.rand(10)
x = numpy.random.rand(1)
curr_scores = prev_scores + x*weak_scores
l3 = exp_.loss_sum(x, target, prev_scores, weak_scores)
val3 = sum(numpy.exp(-target * curr_scores))
self.assertEqual(val3, l3)
loss_sum_val = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
curr_scores = prev_scores + alpha*weak_scores
val3 = sum(numpy.exp(-targets * curr_scores))
self.assertEqual(val3, loss_sum_val)
# Check the gradient sum
weak_scores = numpy.random.rand(10)
prev_scores = numpy.random.rand(10)
x = numpy.random.rand(1)
curr_scores = prev_scores + x*weak_scores
l4 = exp_.loss_grad_sum(x, target, prev_scores, weak_scores)
temp = numpy.exp(-target * curr_scores)
grad_sum_val = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
curr_scores = prev_scores + alpha*weak_scores
temp = numpy.exp(-targets * curr_scores)
grad = -target * temp
val4 = numpy.sum(grad * weak_scores,0)
self.assertEqual(val4, l4)
self.assertEqual(val4, grad_sum_val)
import unittest
import random
import xbob.boosting
import numpy
class TestExpLossMulti(unittest.TestCase):
""" Test the loss function using multivariate data """
def test_log_multivariate_dimensions(self):
""" Check the loss function values for multivariate targets """
loss_function = xbob.boosting.core.losses.ExpLossFunction()
num_samples = 2
num_dimension = 2
targets = numpy.array([[1, -1], [-1, 1]])
score = numpy.array([[0.5, 0.5], [0.5, 0.5]], 'float64')
alpha = 0.5
weak_scores = numpy.array([[0.2, 0.4], [0.5, 0.6]], 'float64')
prev_scores = numpy.array([[0.1, 0.2],[0.3, 0.4]], 'float64')
# check the loss dimensions
loss_value = loss_function.update_loss(targets, score)
self.assertTrue(loss_value.shape[0] == num_samples)
self.assertTrue(loss_value.shape[1] == num_dimension)
# Check loss gradient
grad_value = loss_function.update_loss_grad( targets, score)
self.assertTrue(grad_value.shape[0] == num_samples)
self.assertTrue(grad_value.shape[1] == num_dimension)
# Check loss sum
loss_sum = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
self.assertTrue(loss_sum.shape[0] == num_samples)
# Check the gradient sum
grad_sum = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
self.assertTrue(grad_sum.shape[0] == num_samples)
def test_exp_negative_target(self):
loss_function = xbob.boosting.core.losses.ExpLossFunction()
num_samples = 2
num_dimension = 2
targets = numpy.array([[1, -1], [-1, 1]])
score = numpy.array([[0.5, 0.5], [0.5, 0.5]], 'float64')
alpha = 0.5
weak_scores = numpy.array([[0.2, 0.4], [0.5, 0.6]], 'float64')
prev_scores = numpy.array([[0.1, 0.2],[0.3, 0.4]], 'float64')
# check the loss values
loss_value = loss_function.update_loss(targets, score)
val1 = numpy.exp(- targets * score)
self.assertTrue((loss_value == val1).all())
# Check loss gradient
loss_grad = loss_function.update_loss_grad( targets, score)
temp = numpy.exp(-targets * score)
val2 = -targets * temp
self.assertTrue((loss_grad == val2).all())
# Check loss sum
loss_sum_val = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
curr_scores = prev_scores + alpha*weak_scores
val3 = sum(numpy.exp(-targets * curr_scores))
self.assertTrue((val3 == loss_sum_val).all())
# Check the gradient sum
grad_sum_val = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
curr_scores = prev_scores + alpha*weak_scores
temp = numpy.exp(-targets * curr_scores)
grad = -targets * temp
val4 = numpy.sum(grad * weak_scores,0)
self.assertTrue((val4 == grad_sum_val).all())
......@@ -6,42 +6,81 @@ import numpy
class TestLogLossFunctions(unittest.TestCase):
"""Perform test on loss function """
def test_log_loss(self):
def test_log_positive_target(self):
""" Check the loss function value for positive targets """
exp_ = xbob.boosting.core.losses.LogLossFunction()
loss_function = xbob.boosting.core.losses.LogLossFunction()
target = 1
score = numpy.random.rand()
score = 0.34
alpha = 0.5
targets = numpy.array([1, 1, 1,1,1, 1,1,1,1,1])
weak_scores = numpy.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], 'float64')
prev_scores = numpy.array([0.53, 0.23, 0.63, 0.12, 1.34, 5.76, 3.21, 2.11, 1.21, 5.36], 'float64')
# check the loss values
l1 = exp_.update_loss(target, score)
loss_value = loss_function.update_loss(target, score)
val1 = numpy.log(1 + numpy.exp(- target * score))
self.assertEqual(l1,val1)
self.assertEqual(loss_value,val1)
# Check loss gradient
l2 = exp_.update_loss_grad( target, score)
grad_value = loss_function.update_loss_grad( target, score)
temp = numpy.exp(-target * score)
val2 = -(target * temp* (1/(1 + temp)) )
self.assertEqual(l2,val2)
self.assertEqual(grad_value,val2)
# Check loss sum
weak_scores = numpy.random.rand(10)
prev_scores = numpy.random.rand(10)
x = numpy.random.rand(1)
curr_scores = prev_scores + x*weak_scores
l3 = exp_.loss_sum(x, target, prev_scores, weak_scores)
val3 = sum(numpy.log(1 + numpy.exp(-target * curr_scores)))
self.assertEqual(val3, l3)
loss_sum = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
curr_scores = prev_scores + alpha*weak_scores
val3 = sum(numpy.log(1 + numpy.<