Skip to content
Snippets Groups Projects
Commit f20bd685 authored by Rakesh MEHTA's avatar Rakesh MEHTA
Browse files

Trainer tests added

parent 27d8ccae
No related branches found
No related tags found
No related merge requests found
...@@ -181,7 +181,7 @@ class LutMachine(): ...@@ -181,7 +181,7 @@ class LutMachine():
def get_weak_scores(self, fset): def get_weak_scores(self, features):
""" Function computes classification results according to the LUT machine """ Function computes classification results according to the LUT machine
Function classifies the features based on a single LUT machine. Function classifies the features based on a single LUT machine.
...@@ -193,13 +193,13 @@ class LutMachine(): ...@@ -193,13 +193,13 @@ class LutMachine():
weak_scores: The classification scores of the features based on current weak classifier""" weak_scores: The classification scores of the features based on current weak classifier"""
# Initialize # Initialize
num_samp = len(fset) num_samp = len(features)
num_outputs = len(self.luts[0]) num_outputs = len(self.luts[0])
weak_scores = numpy.zeros([num_samp,num_outputs]) weak_scores = numpy.zeros([num_samp,num_outputs])
# Compute weak scores # Compute weak scores
for oi in range(num_outputs): for output_index in range(num_outputs):
weak_scores[:,oi] = numpy.transpose(self.luts[fset[:,self.selected_indices[oi]],oi]) weak_scores[:,output_index] = numpy.transpose(self.luts[features[:,self.selected_indices[output_index]],output_index])
return weak_scores return weak_scores
...@@ -233,9 +233,7 @@ class LutTrainer(): ...@@ -233,9 +233,7 @@ class LutTrainer():
""" """
self.num_entries = num_entries self.num_entries = num_entries
self.num_outputs = num_outputs self.num_outputs = num_outputs
self.luts = numpy.ones((num_entries, num_outputs), dtype = numpy.int)
self.selection_type = selection_type self.selection_type = selection_type
self.selected_indices = numpy.zeros([num_outputs,1], 'int16')
...@@ -261,8 +259,8 @@ class LutTrainer(): ...@@ -261,8 +259,8 @@ class LutTrainer():
""" """
# Initializations # Initializations
num_outputs = loss_grad.shape[1] # num_outputs = loss_grad.shape[1]
fea_grad = numpy.zeros([self.num_entries, num_outputs]) fea_grad = numpy.zeros([self.num_entries, self.num_outputs])
lut_machine = LutMachine(self.num_outputs, self.num_entries) lut_machine = LutMachine(self.num_outputs, self.num_entries)
# Compute the sum of the gradient based on the feature values or the loss associated with each # Compute the sum of the gradient based on the feature values or the loss associated with each
...@@ -270,6 +268,7 @@ class LutTrainer(): ...@@ -270,6 +268,7 @@ class LutTrainer():
sum_loss = self.compute_grad_sum(loss_grad, fea) sum_loss = self.compute_grad_sum(loss_grad, fea)
# Select the most discriminative index (or indices) for classification which minimizes the loss # Select the most discriminative index (or indices) for classification which minimizes the loss
# and compute the sum of gradient for that index # and compute the sum of gradient for that index
...@@ -280,10 +279,10 @@ class LutTrainer(): ...@@ -280,10 +279,10 @@ class LutTrainer():
selected_indices = [numpy.argmin(col) for col in numpy.transpose(sum_loss)] selected_indices = [numpy.argmin(col) for col in numpy.transpose(sum_loss)]
for oi in range(num_outputs): for output_index in range(self.num_outputs):
curr_id = sum_loss[:,oi].argmin() curr_id = sum_loss[:,output_index].argmin()
fea_grad[:,oi] = self.compute_grad_hist(loss_grad[:,oi],fea[:,curr_id]) fea_grad[:,output_index] = self.compute_grad_hist(loss_grad[:,output_index],fea[:,curr_id])
lut_machine.selected_indices[oi] = curr_id lut_machine.selected_indices[output_index] = curr_id
elif self.selection_type == 'shared': elif self.selection_type == 'shared':
...@@ -293,10 +292,11 @@ class LutTrainer(): ...@@ -293,10 +292,11 @@ class LutTrainer():
accum_loss = numpy.sum(sum_loss,1) accum_loss = numpy.sum(sum_loss,1)
selected_findex = accum_loss.argmin() selected_findex = accum_loss.argmin()
lut_machine.selected_indices = selected_findex*numpy.ones([num_outputs,1],'int16') lut_machine.selected_indices = selected_findex*numpy.ones([self.num_outputs,1],'int16')
for output_index in range(self.num_outputs):
fea_grad[:,output_index] = self.compute_grad_hist(loss_grad[:,output_index],fea[:,selected_findex])
for oi in range(num_outputs):
fea_grad[:,oi] = self.compute_grad_hist(loss_grad[:,oi],fea[:,selected_findex])
# Assign the values to LookUp Table # Assign the values to LookUp Table
lut_machine.luts[fea_grad <= 0.0] = -1 lut_machine.luts[fea_grad <= 0.0] = -1
...@@ -323,14 +323,14 @@ class LutTrainer(): ...@@ -323,14 +323,14 @@ class LutTrainer():
# initialize values # initialize values
num_fea = len(fea[0]) num_fea = len(fea[0])
num_samp = len(fea) num_samp = len(fea)
num_outputs = len(loss_grad[0]) sum_loss = numpy.zeros([num_fea,self.num_outputs])
sum_loss = numpy.zeros([num_fea,num_outputs])
# Compute the loss for each feature # Compute the loss for each feature
for fi in range(num_fea): for feature_index in range(num_fea):
for oi in range(num_outputs): for output_index in range(self.num_outputs):
hist_grad = self.compute_grad_hist(loss_grad[:,oi],fea[:,fi]) hist_grad = self.compute_grad_hist(loss_grad[:,output_index],fea[:,feature_index])
sum_loss[fi,oi] = - sum(abs(hist_grad)) sum_loss[feature_index,output_index] = - sum(abs(hist_grad))
return sum_loss return sum_loss
...@@ -339,7 +339,7 @@ class LutTrainer(): ...@@ -339,7 +339,7 @@ class LutTrainer():
def compute_grad_hist(self, loss_grado,fval): def compute_grad_hist(self, loss_grado,features):
""" The function computes the loss for a single feature. """ The function computes the loss for a single feature.
Function computes sum of the loss gradient that have same feature values. Function computes sum of the loss gradient that have same feature values.
...@@ -350,12 +350,13 @@ class LutTrainer(): ...@@ -350,12 +350,13 @@ class LutTrainer():
return: hist_grad: The sum of the loss gradient""" return: hist_grad: The sum of the loss gradient"""
# initialize the values # initialize the values
num_samp = len(fval) num_samp = len(features)
hist_grad = numpy.zeros([self.num_entries]) hist_grad = numpy.zeros([self.num_entries])
# compute the sum of the gradient # compute the sum of the gradient
for hi in range(self.num_entries): for feature_value in range(self.num_entries):
hist_grad[hi] = sum(loss_grado[fval == hi])
hist_grad[feature_value] = sum(loss_grado[features == feature_value])
return hist_grad return hist_grad
......
File added
...@@ -4,9 +4,6 @@ import xbob.boosting ...@@ -4,9 +4,6 @@ import xbob.boosting
import numpy import numpy
import bob import bob
def get_single_feature():
num_feature = 100
class TestLutTrainer(unittest.TestCase): class TestLutTrainer(unittest.TestCase):
"""Class to test the LUT trainer """ """Class to test the LUT trainer """
...@@ -32,10 +29,63 @@ class TestLutTrainer(unittest.TestCase): ...@@ -32,10 +29,63 @@ class TestLutTrainer(unittest.TestCase):
def test_lut_selected_index(self):
num_samples = 100
max_feature = 20
dimension_feature = 10
selected_index = 5
range_feature = max_feature
trainer = xbob.boosting.core.trainers.LutTrainer(range_feature,'indep', 1)
data_file = bob.io.File('xbob/boosting/tests/datafile.hdf5', 'r')
#features = bob.io.load('test_data.hdf5')
features = data_file.read()
x_train1 = numpy.copy(features)
x_train1[x_train1[:,selected_index] >=10, selected_index] = 9
x_train2 = numpy.copy(features)
x_train2[x_train2[:,selected_index] < 10, selected_index] = 10
x_train = numpy.vstack((x_train1, x_train2))
y_train = numpy.vstack((numpy.ones([num_samples,1]),-numpy.ones([num_samples,1])))
scores = numpy.zeros([2*num_samples,1])
loss_grad = -y_train*(numpy.exp(y_train*scores))
machine = trainer.compute_weak_trainer(x_train, loss_grad)
self.assertTrue((machine.luts[0:9] == -1).all()) # The values of the LUT are negative of the classes sign
self.assertTrue((machine.luts[10:] == 1).all())
def test_lut_selected_index(self):
num_samples = 100
max_feature = 20
dimension_feature = 10
delta = 5
selected_index = 5
range_feature = max_feature + delta
trainer = xbob.boosting.core.trainers.LutTrainer(range_feature,'indep', 1)
data_file = bob.io.File('xbob/boosting/tests/datafile.hdf5', 'r')
features = data_file.read()
x_train1 = numpy.copy(features)
x_train2 = numpy.copy(features)
x_train = numpy.vstack((x_train1, x_train2))
x_train[0:num_samples,selected_index] = x_train[0:num_samples,selected_index] + delta
y_train = numpy.vstack((numpy.ones([num_samples,1]),-numpy.ones([num_samples,1])))
scores = numpy.zeros([2*num_samples,1])
loss_grad = -y_train*(numpy.exp(y_train*scores))
machine = trainer.compute_weak_trainer(x_train, loss_grad)
self.assertEqual(machine.selected_indices[0], selected_index)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment