diff --git a/data1.hdf5 b/data1.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..68c7b19ea89c102deae65a07ccf3a5087998a52c
Binary files /dev/null and b/data1.hdf5 differ
diff --git a/datafile1.hdf5 b/datafile1.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..e16e27279ee8b566359bdbd0556cf107ef228b3f
Binary files /dev/null and b/datafile1.hdf5 differ
diff --git a/xbob/boosting/core/boosting.py b/xbob/boosting/core/boosting.py
index d5ec85fdb78984bb7fdc001dd2551a31adf678e2..47dfacc3c072412b08b27be8d0b1db5d9051d768 100644
--- a/xbob/boosting/core/boosting.py
+++ b/xbob/boosting/core/boosting.py
@@ -89,11 +89,11 @@ class Boost:
 
 
 
-    def __init__(self, trainer_type):
+    def __init__(self, trainer_type, num_rnds = 20, num_entries = 256, loss_type = 'log', lut_selection = 'indep'):
         """ The function to initialize the boosting parameters. 
 
         The function set the default values for the following boosting parameters:
-        The number of rounds for boosting: 100
+        The number of rounds for boosting: 20
         The number of entries in LUT: 256 (For LBP type features)
         The loss function type: logit
         The LUT selection type: independent
@@ -101,13 +101,25 @@ class Boost:
         Inputs:
         trainer_type: The type of trainer for boosting.
                       Type: string
-                      Values: LutTrainer or StumpTrainer    
+                      Values: LutTrainer or StumpTrainer
+        num_rnds:     The number of rounds of boosting
+                      Type: int
+                      Values: 20 (Default)    
+        num_entries:  The number of entries for the lookup table
+                      Type: int
+                      Values: 256 (Default)
+        loss_type:    The loss function to be be minimized
+                      Type: string
+                      Values: 'log' or 'exp' 
+        lut_selection: The selection type for the LUT based trainers
+                       Type: string
+                       Values: 'indep' or 'shared'   
                    
         """
-        self.num_rnds = 100
-        self.num_entries = 256
-        self.loss_type = 'log' 
-        self.lut_selection = 'indep'
+        self.num_rnds = num_rnds
+        self.num_entries = num_entries
+        self.loss_type = loss_type
+        self.lut_selection = lut_selection
         self.weak_trainer_type = trainer_type
 							
 	
diff --git a/xbob/boosting/core/losses.py b/xbob/boosting/core/losses.py
index b1936f35f87e8e0198b22868e5f929bcf5e6470c..f73d95212df959d97c82dff35ecbb555d60723cd 100644
--- a/xbob/boosting/core/losses.py
+++ b/xbob/boosting/core/losses.py
@@ -1,6 +1,5 @@
 import numpy
 import math
-from scipy import optimize
 
 
 
@@ -42,14 +41,15 @@ class ExpLossFunction():
         return loss_grad
         #return loss_grad
 
-    def loss_sum(self, *args):
+    #def loss_sum(self, *args):
+    def loss_sum(self, alpha, targets, prediction_scores, weak_scores):
         """The function computes the sum of the exponential loss which is used to find the optimized values of alpha (x).
          
         The functions computes sum of loss values which is required during the linesearch step for the optimization of the alpha.
         This function is given as the input for the lbfgs optimization function. 
 
         Inputs: 
-        x: The current value of the alpha.
+        alpha: The current value of the alpha.
            type: float
 
         targets: The targets for the samples
@@ -65,23 +65,24 @@ class ExpLossFunction():
         Return:
         sum_loss: The sum of the loss values for the current value of the alpha    
                  type: float"""
-
+        """
         # initialize the values
         x = args[0]
         targets = args[1]
         pred_scores = args[2]
         weak_scores = args[3]
-
+        """
+        
         # compute the scores and loss for the current alpha
-        curr_scores_x = pred_scores + x*weak_scores
-        loss = self.update_loss(targets, curr_scores_x)
+        curr_scores = prediction_scores + alpha * weak_scores
+        loss = self.update_loss(targets, curr_scores)
 
         # compute the sum of the loss
         sum_loss = numpy.sum(loss,0)
         return sum_loss
         
 
-    def loss_grad_sum(self, *args):
+    def loss_grad_sum(self, alpha, targets, prediction_scores, weak_scores):
         """The function computes the sum of the exponential loss which is used to find the optimized values of alpha (x).
          
         The functions computes sum of loss values which is required during the linesearch step for the optimization of the alpha.
@@ -104,15 +105,18 @@ class ExpLossFunction():
         Return:
         sum_loss: The sum of the loss gradient values for the current value of the alpha    
                  type: float"""
+
+        """
         # initialize the values
         x = args[0]
         targets = args[1]
         pred_scores = args[2]
         weak_scores = args[3]
+        """
 
         # compute the loss gradient for the updated score
-        curr_scores_x = pred_scores + x*weak_scores
-        loss_grad = self.update_loss_grad(targets, curr_scores_x)
+        curr_scores = prediction_scores + alpha *weak_scores
+        loss_grad = self.update_loss_grad(targets, curr_scores)
 
         # take the sum of the loss gradient values
         sum_grad = numpy.sum(loss_grad*weak_scores, 0)
@@ -149,7 +153,7 @@ class LogLossFunction():
                  type: numpy array (# number of samples x #number of outputs)
         
         scores: The current prediction scores for the samples.
-                type: numpy array (# number of samples) 
+                type: numpy array (# number of samples x # number of outputs) 
 
         Return:
         gradient: The loss gradient values for the samples     """
@@ -157,7 +161,7 @@ class LogLossFunction():
         denom = 1/(1 + e)
         return - targets* e* denom
 
-    def loss_sum(self, *args):
+    def loss_sum(self, alpha, targets, prediction_scores, weak_scores):
         """The function computes the sum of the logit loss which is used to find the optimized values of alpha (x).
          
         The functions computes sum of loss values which is required during the linesearch step for the optimization of the alpha.
@@ -170,10 +174,10 @@ class LogLossFunction():
         targets: The targets for the samples
                  type: numpy array (# number of samples x #number of outputs)
         
-        pred_scores: The cumulative prediction scores of the samples until the previous round of the boosting.
+        prediction_scores: The cumulative prediction scores of the samples until the previous round of the boosting.
                  type: numpy array (# number of samples) 
 
-        curr_scores: The prediction scores of the samples for the current round of the boosting.
+        weak_scores: The prediction scores of the samples for the current round of the boosting.
                  type: numpy array (# number of samples) 
 
 
@@ -181,17 +185,18 @@ class LogLossFunction():
         sum_loss: The sum of the loss values for the current value of the alpha    
                  type: float"""
 
+        """
         x = args[0]
         targets = args[1]
         pred_scores = args[2]
         weak_scores = args[3]
-        curr_scores_x = pred_scores + x*weak_scores
-        loss = self.update_loss(targets, curr_scores_x)
-        sum_l = numpy.sum(loss,0)
-        return sum_l
+        """
+        curr_scores = prediction_scores + alpha*weak_scores
+        loss = self.update_loss(targets, curr_scores)
+        sum_loss = numpy.sum(loss,0)
+        return sum_loss
         
-    #@abstractmethod
-    def loss_grad_sum(self, *args):
+    def loss_grad_sum(self, alpha, targets, prediction_scores, weak_scores):
         """The function computes the sum of the logit loss gradient which is used to find the optimized values of alpha (x).
          
         The functions computes sum of loss values which is required during the linesearch step for the optimization of the alpha.
@@ -214,14 +219,16 @@ class LogLossFunction():
         Return:
         sum_loss: The sum of the loss gradient values for the current value of the alpha    
                  type: float"""
+        """
         x = args[0]
         targets = args[1]
         pred_scores = args[2]
         weak_scores = args[3]
-        curr_scores_x = pred_scores + x*weak_scores
-        loss_grad = self.update_loss_grad( targets, curr_scores_x)
-        sum_g = numpy.sum(loss_grad*weak_scores, 0)
-        return sum_g
+        """
+        curr_scores = prediction_scores + alpha*weak_scores
+        loss_grad = self.update_loss_grad( targets, curr_scores)
+        sum_grad = numpy.sum(loss_grad*weak_scores, 0)
+        return sum_grad
 
 
     """def loss_sum(self, targets, scores):
diff --git a/xbob/boosting/core/trainers.py b/xbob/boosting/core/trainers.py
index 7c6bb2e893b51c49afd9f28ccb59b56080d1e872..2599e5d43aa7d39305f85bc7ee24c0a4e12b8a52 100644
--- a/xbob/boosting/core/trainers.py
+++ b/xbob/boosting/core/trainers.py
@@ -107,15 +107,16 @@ class StumpTrainer():
 
         # Find the corresponding threshold value
         threshold = 0.0
-        if(opt_id == num_samp-1):
+        if (opt_id == num_samp-1):
             threshold = fea[opt_id]
         else:
             threshold = (float(fea[opt_id]) + float(fea[opt_id+1]))*0.5
+
         # Find the polarity or the directionality of the current trainer
         if(gain_max == gain[opt_id]):
             polarity = -1
         else:
-            polarity = 1
+            polarity =  1
 
         return polarity, threshold, gain_max
 
@@ -131,7 +132,7 @@ class StumpTrainer():
         scores are either +1 or -1.
         Input: self: a weak stump trainer
                test_features: A matrix of the test features of dimension. 
-                              Num. of Test images x Num of features
+                              Num. of Test images x Num. of features
         Return: weak_scores: classification scores of the test features use the weak classifier self
                              Array of dimension =  Num. of samples 
         """
@@ -157,7 +158,7 @@ class LutTrainer():
  
 
     
-    def __init__(self, num_entries, selection_type, num_op):
+    def __init__(self, num_entries, selection_type, num_outputs):
         """ Function to initialize the parameters.
 
         Function to initialize the weak LutTrainer. Each weak Luttrainer is specified with a 
@@ -175,14 +176,14 @@ class LutTrainer():
                         and a single feature is used for all the outputs. See Cosmin's thesis for more details.
                        Type: string {'indep', 'shared'}
 
-        num_op: The number of outputs for the classification task. 
+        num_outputs: The number of outputs for the classification task. 
                     type: Integer
 
         """
         self.num_entries = num_entries
-        self.luts = numpy.ones((num_entries, num_op), dtype = numpy.int)
+        self.luts = numpy.ones((num_entries, num_outputs), dtype = numpy.int)
         self.selection_type = selection_type
-        self.selected_indices = numpy.zeros([num_op,1], 'int16')
+        self.selected_indices = numpy.zeros([num_outputs,1], 'int16')
     
 
 
@@ -208,12 +209,13 @@ class LutTrainer():
         """
 
         # Initializations
-        num_op = loss_grad.shape[1]
-        fea_grad = numpy.zeros([self.num_entries,num_op])
+        num_outputs = loss_grad.shape[1]
+        print num_outputs
+        fea_grad = numpy.zeros([self.num_entries,num_outputs])
 
         # Compute the sum of the gradient based on the feature values or the loss associated with each 
         # feature index
-        sum_loss = self.compute_fgrad(loss_grad, fea)
+        sum_loss = self.compute_grad_sum(loss_grad, fea)
 
 
         # Select the most discriminative index (or indices) for classification which minimizes the loss
@@ -226,9 +228,10 @@ class LutTrainer():
 
             selected_indices = [numpy.argmin(col) for col in numpy.transpose(sum_loss)]
 
-            for oi in range(num_op):
+            for oi in range(num_outputs):
                 curr_id = sum_loss[:,oi].argmin()
-                fea_grad[:,oi] = self.compute_hgrad(loss_grad[:,oi],fea[:,curr_id])
+                fea_grad[:,oi] = self.compute_grad_hist(loss_grad[:,oi],fea[:,curr_id])
+                print oi
                 self.selected_indices[oi] = curr_id
 
 
@@ -239,10 +242,10 @@ class LutTrainer():
 
             accum_loss = numpy.sum(sum_loss,1)
             selected_findex = accum_loss.argmin()
-            self.selected_indices = selected_findex*numpy.ones([num_op,1],'int16')
+            self.selected_indices = selected_findex*numpy.ones([num_outputs,1],'int16')
 
-            for oi in range(num_op):
-                fea_grad[:,oi] = self.compute_hgrad(loss_grad[:,oi],fea[:,selected_findex])
+            for oi in range(num_outputs):
+                fea_grad[:,oi] = self.compute_grad_hist(loss_grad[:,oi],fea[:,selected_findex])
      
         # Assign the values to LookUp Table
         self.luts[fea_grad <= 0.0] = -1
@@ -252,7 +255,7 @@ class LutTrainer():
 
 
      
-    def compute_fgrad(self, loss_grad, fea):
+    def compute_grad_sum(self, loss_grad, fea):
         """ The function to compute the loss gradient for all the features.
 
         The function computes the loss for whole set of features. The loss refers to the sum of the loss gradient
@@ -269,13 +272,13 @@ class LutTrainer():
         # initialize values
         num_fea = len(fea[0])
         num_samp = len(fea)
-        num_op = len(loss_grad[0])
-        sum_loss = numpy.zeros([num_fea,num_op])
+        num_outputs = len(loss_grad[0])
+        sum_loss = numpy.zeros([num_fea,num_outputs])
        
         # Compute the loss for each feature
         for fi in range(num_fea):
-            for oi in range(num_op):
-                hist_grad = self.compute_hgrad(loss_grad[:,oi],fea[:,fi])
+            for oi in range(num_outputs):
+                hist_grad = self.compute_grad_hist(loss_grad[:,oi],fea[:,fi])
                 sum_loss[fi,oi] = - sum(abs(hist_grad))
 
 
@@ -285,7 +288,7 @@ class LutTrainer():
 
 
 
-    def compute_hgrad(self, loss_grado,fval):
+    def compute_grad_hist(self, loss_grado,fval):
         """ The function computes the loss for a single feature.
 
         Function computes sum of the loss gradient that have same feature values. 
@@ -318,9 +321,9 @@ class LutTrainer():
         return: 
         weak_scores: The classification scores of the features based on current weak classifier"""
         num_samp = len(fset)
-        num_op = len(self.luts[0])
-        weak_scores = numpy.zeros([num_samp,num_op])
-        for oi in range(num_op):
+        num_outputs = len(self.luts[0])
+        weak_scores = numpy.zeros([num_samp,num_outputs])
+        for oi in range(num_outputs):
             a = self.luts[fset[:,self.selected_indices[oi]],oi]
             weak_scores[:,oi] = numpy.transpose(self.luts[fset[:,self.selected_indices[oi]],oi])
         return weak_scores
diff --git a/xbob/boosting/features/local_feature.py b/xbob/boosting/features/local_feature.py
index 7057b33f36d9719534429f9155ec33a066ba2530..c451c3cb495d4422cbe9bf451c093a9029beac68 100644
--- a/xbob/boosting/features/local_feature.py
+++ b/xbob/boosting/features/local_feature.py
@@ -137,13 +137,13 @@ class lbp_feature():
         """
 
         feature_map = numpy.zeros([feature_map_dimy, feature_map_dimx])
-        num_neighbour = 8
+        num_neighbours = 8
 
         """ Compute the feature map for the tLBP features. """
         for ind in range(num_neighbours):
             
             """The comparison of pixel is done with the adjacent neighbours."""
-            comparing_img = block_sum[coord[(ind+1)%num_neighbour][0]:coord[(ind+1)%num_neighbour][0] + feature_map_dimy,coord[(ind+1)%num_neighbour][1]:coord[(ind+1)%num_neighbour][1] + feature_map_dimx]
+            comparing_img = block_sum[coord[(ind+1)%num_neighbours][0]:coord[(ind+1)%num_neighbours][0] + feature_map_dimy,coord[(ind+1)%num_neighbours][1]:coord[(ind+1)%num_neighbours][1] + feature_map_dimx]
             
             """ Compare the neighbours and increment the feature map. """
             feature_map = feature_map + (2**ind)*(block_sum[coord[ind][0]:coord[ind][0] + feature_map_dimy,coord[ind][1]:coord[ind][1] + feature_map_dimx]>= comparing_img)
diff --git a/xbob/boosting/tests/data1.hdf5 b/xbob/boosting/tests/data1.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..68c7b19ea89c102deae65a07ccf3a5087998a52c
Binary files /dev/null and b/xbob/boosting/tests/data1.hdf5 differ
diff --git a/xbob/boosting/tests/test_loss_exp.py b/xbob/boosting/tests/test_loss_exp.py
index 9eba786d8d590dc3071195f8618b9e8aae505a06..420f5120e6b29632f286d7dd01c106384d43533e 100644
--- a/xbob/boosting/tests/test_loss_exp.py
+++ b/xbob/boosting/tests/test_loss_exp.py
@@ -4,43 +4,86 @@ import xbob.boosting
 import numpy
 
 class TestExpLossFunctions(unittest.TestCase):
-    """Perform test on loss function """
+    """Perform test on exponential loss function """
 
-    def test_exp_loss(self):
+    def test_exp_positive_target(self):
 
-        exp_ = xbob.boosting.core.losses.ExpLossFunction()
+        loss_function = xbob.boosting.core.losses.ExpLossFunction()
         target = 1
-        score = numpy.random.rand()
+        score = 0.34
+        alpha = 0.5
+        targets = numpy.array([1, 1, 1,1,1, 1,1,1,1,1])
+        weak_scores = numpy.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], 'float64')
+        prev_scores = numpy.array([0.53, 0.23, 0.63, 0.12, 1.34, 5.76, 3.21, 2.11, 1.21, 5.36], 'float64')
         
         # check the loss values
-        l1 = exp_.update_loss(target, score) 
-        val1 = numpy.exp(- target * score)
-        self.assertEqual(l1,val1)
+        loss_value = loss_function.update_loss(target, score) 
+        val = numpy.exp(- target * score)
+        self.assertEqual(loss_value,val)
+        self.assertTrue(loss_value >= 0)
 
         # Check loss gradient
-        l2 = exp_.update_loss_grad( target, score)
+        loss_grad = loss_function.update_loss_grad( target, score)
+
+        temp = numpy.exp(-target * score)
+        val2 = -target * temp
+        self.assertEqual(loss_grad,val2)
+
+        # Check loss sum
+        loss_sum_val = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
+
+        curr_scores = prev_scores + alpha*weak_scores
+        val3 = sum(numpy.exp(-targets * curr_scores))
+        self.assertEqual(val3, loss_sum_val)
+
+        # Check the gradient sum
+        grad_sum_val = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
+
+        curr_scores = prev_scores + alpha*weak_scores        
+        temp = numpy.exp(-targets * curr_scores)
+        grad = -target * temp
+        val4 = numpy.sum(grad * weak_scores,0)
+
+        self.assertEqual(val4, grad_sum_val)
+
+    def test_exp_negative_target(self):
+
+        loss_function = xbob.boosting.core.losses.ExpLossFunction()
+        target = -1
+        score = 0.34
+        alpha = 0.5
+        targets = numpy.array([-1, -1, -1,-1,-1, -1,-1,-1,-1,-1])
+        weak_scores = numpy.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], 'float64')
+        prev_scores = numpy.array([0.53, 0.23, 0.63, 0.12, 1.34, 5.76, 3.21, 2.11, 1.21, 5.36], 'float64')
+        
+        # check the loss values
+        loss_value = loss_function.update_loss(target, score) 
+        val = numpy.exp(- target * score)
+        self.assertEqual(loss_value,val)
+        self.assertTrue(loss_value >= 0)
+
+        # Check loss gradient
+        loss_grad = loss_function.update_loss_grad( target, score)
+
         temp = numpy.exp(-target * score)
         val2 = -target * temp
-        self.assertEqual(l2,val2)
+        self.assertEqual(loss_grad,val2)
 
         # Check loss sum
-        weak_scores = numpy.random.rand(10)
-        prev_scores = numpy.random.rand(10)
-        x = numpy.random.rand(1)
-        curr_scores = prev_scores + x*weak_scores
-        l3 = exp_.loss_sum(x, target, prev_scores, weak_scores)
-        val3 = sum(numpy.exp(-target * curr_scores))
-        self.assertEqual(val3, l3)
+        loss_sum_val = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
+
+        curr_scores = prev_scores + alpha*weak_scores
+        val3 = sum(numpy.exp(-targets * curr_scores))
+        self.assertEqual(val3, loss_sum_val)
 
         # Check the gradient sum
-        weak_scores = numpy.random.rand(10)
-        prev_scores = numpy.random.rand(10)
-        x = numpy.random.rand(1)
-        curr_scores = prev_scores + x*weak_scores
-        l4 = exp_.loss_grad_sum(x, target, prev_scores, weak_scores)
-        temp = numpy.exp(-target * curr_scores)
+        grad_sum_val = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
+
+        curr_scores = prev_scores + alpha*weak_scores        
+        temp = numpy.exp(-targets * curr_scores)
         grad = -target * temp
         val4 = numpy.sum(grad * weak_scores,0)
-        self.assertEqual(val4, l4)
+
+        self.assertEqual(val4, grad_sum_val)
             
    
diff --git a/xbob/boosting/tests/test_loss_exp_multivariate.py b/xbob/boosting/tests/test_loss_exp_multivariate.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e974870e2fe289714e4086c4370fdc5b5cd0609
--- /dev/null
+++ b/xbob/boosting/tests/test_loss_exp_multivariate.py
@@ -0,0 +1,83 @@
+import unittest
+import random
+import xbob.boosting
+import numpy
+
+class TestExpLossMulti(unittest.TestCase):
+
+    """ Test the loss function using multivariate data  """
+
+    def test_log_multivariate_dimensions(self):
+
+        """ Check the loss function values for multivariate targets """
+
+        loss_function = xbob.boosting.core.losses.ExpLossFunction()
+        num_samples = 2
+        num_dimension = 2
+        targets = numpy.array([[1, -1], [-1, 1]])
+        score = numpy.array([[0.5, 0.5], [0.5, 0.5]], 'float64')
+        alpha = 0.5
+        weak_scores = numpy.array([[0.2, 0.4], [0.5, 0.6]], 'float64')
+        prev_scores = numpy.array([[0.1, 0.2],[0.3, 0.4]], 'float64')
+        
+        # check the loss dimensions
+        loss_value = loss_function.update_loss(targets, score) 
+        self.assertTrue(loss_value.shape[0] == num_samples)
+        self.assertTrue(loss_value.shape[1] == num_dimension)
+
+        # Check loss gradient
+        grad_value = loss_function.update_loss_grad( targets, score)
+        self.assertTrue(grad_value.shape[0] == num_samples)
+        self.assertTrue(grad_value.shape[1] == num_dimension)
+
+        # Check loss sum
+        loss_sum = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
+        self.assertTrue(loss_sum.shape[0] == num_samples)
+
+
+
+        # Check the gradient sum
+        grad_sum = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
+        self.assertTrue(grad_sum.shape[0] == num_samples)
+
+
+
+    def test_exp_negative_target(self):
+
+        loss_function = xbob.boosting.core.losses.ExpLossFunction()
+        num_samples = 2
+        num_dimension = 2
+        targets = numpy.array([[1, -1], [-1, 1]])
+        score = numpy.array([[0.5, 0.5], [0.5, 0.5]], 'float64')
+        alpha = 0.5
+        weak_scores = numpy.array([[0.2, 0.4], [0.5, 0.6]], 'float64')
+        prev_scores = numpy.array([[0.1, 0.2],[0.3, 0.4]], 'float64')
+        
+        # check the loss values
+        loss_value = loss_function.update_loss(targets, score) 
+        val1 = numpy.exp(- targets * score)
+        self.assertTrue((loss_value == val1).all())
+
+        # Check loss gradient
+        loss_grad = loss_function.update_loss_grad( targets, score)
+
+        temp = numpy.exp(-targets * score)
+        val2 = -targets * temp
+        self.assertTrue((loss_grad == val2).all())
+
+        # Check loss sum
+        loss_sum_val = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
+
+        curr_scores = prev_scores + alpha*weak_scores
+        val3 = sum(numpy.exp(-targets * curr_scores))
+        self.assertTrue((val3 == loss_sum_val).all())
+
+        # Check the gradient sum
+        grad_sum_val = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
+
+        curr_scores = prev_scores + alpha*weak_scores        
+        temp = numpy.exp(-targets * curr_scores)
+        grad = -targets * temp
+        val4 = numpy.sum(grad * weak_scores,0)
+
+        self.assertTrue((val4 == grad_sum_val).all())
diff --git a/xbob/boosting/tests/test_loss_log.py b/xbob/boosting/tests/test_loss_log.py
index b334def66292d64faed2c5cff36d29b67372ce89..ec1cf28996eaf9cdbcad8e6c832ad39e4d2a6be2 100644
--- a/xbob/boosting/tests/test_loss_log.py
+++ b/xbob/boosting/tests/test_loss_log.py
@@ -6,42 +6,81 @@ import numpy
 class TestLogLossFunctions(unittest.TestCase):
     """Perform test on loss function """
             
-    def test_log_loss(self):
+    def test_log_positive_target(self):
+        """ Check the loss function value for positive targets """
 
-        exp_ = xbob.boosting.core.losses.LogLossFunction()
+        loss_function = xbob.boosting.core.losses.LogLossFunction()
         target = 1
-        score = numpy.random.rand()
+        score = 0.34
+        alpha = 0.5
+        targets = numpy.array([1, 1, 1,1,1, 1,1,1,1,1])
+        weak_scores = numpy.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], 'float64')
+        prev_scores = numpy.array([0.53, 0.23, 0.63, 0.12, 1.34, 5.76, 3.21, 2.11, 1.21, 5.36], 'float64')
         
         # check the loss values
-        l1 = exp_.update_loss(target, score) 
+        loss_value = loss_function.update_loss(target, score) 
         val1 = numpy.log(1 + numpy.exp(- target * score))
-        self.assertEqual(l1,val1)
+        self.assertEqual(loss_value,val1)
 
         # Check loss gradient
-        l2 = exp_.update_loss_grad( target, score)
+        grad_value = loss_function.update_loss_grad( target, score)
         temp = numpy.exp(-target * score)
         val2 = -(target * temp* (1/(1 + temp)) )
-        self.assertEqual(l2,val2)
+        self.assertEqual(grad_value,val2)
 
         # Check loss sum
-        weak_scores = numpy.random.rand(10)
-        prev_scores = numpy.random.rand(10)
-        x = numpy.random.rand(1)
-        curr_scores = prev_scores + x*weak_scores
-        l3 = exp_.loss_sum(x, target, prev_scores, weak_scores)
-        val3 = sum(numpy.log(1 + numpy.exp(-target * curr_scores)))
-        self.assertEqual(val3, l3)
+        loss_sum = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
+        curr_scores = prev_scores + alpha*weak_scores
+        
+        val3 = sum(numpy.log(1 + numpy.exp(-targets * curr_scores)))
+        self.assertEqual(val3, loss_sum)
+
+        # Check the gradient sum
+        grad_sum = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
+        curr_scores = prev_scores + alpha*weak_scores
+        temp = numpy.exp(-target * curr_scores)
+        grad = -targets * temp *(1/ (1 + temp))
+        val4 = numpy.sum(grad * weak_scores)
+        self.assertEqual(val4, grad_sum)
+
+    def test_log_negative_target(self):
+
+        """ Check the loss function value for negative targets """
+
+        loss_function = xbob.boosting.core.losses.LogLossFunction()
+        target = -1
+        score = 0.34
+        alpha = 0.5
+        targets = numpy.array([-1, -1, -1,-1,-1, -1,-1,-1,-1,-1])
+        weak_scores = numpy.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], 'float64')
+        prev_scores = numpy.array([0.53, 0.23, 0.63, 0.12, 1.34, 5.76, 3.21, 2.11, 1.21, 5.36], 'float64')
+        
+        # check the loss values
+        loss_value = loss_function.update_loss(target, score) 
+        val1 = numpy.log(1 + numpy.exp(- target * score))
+        self.assertEqual(loss_value,val1)
+
+        # Check loss gradient
+        grad_value = loss_function.update_loss_grad( target, score)
+        temp = numpy.exp(-target * score)
+        val2 = -(target * temp* (1/(1 + temp)) )
+        self.assertEqual(grad_value,val2)
+
+        # Check loss sum
+        loss_sum = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
+        curr_scores = prev_scores + alpha*weak_scores
+        
+        val3 = sum(numpy.log(1 + numpy.exp(-targets * curr_scores)))
+        self.assertEqual(val3, loss_sum)
 
         # Check the gradient sum
-        weak_scores = numpy.random.rand(10)
-        prev_scores = numpy.random.rand(10)
-        x = numpy.random.rand(1)
-        curr_scores = prev_scores + x*weak_scores
-        l3 = exp_.loss_grad_sum(x, target, prev_scores, weak_scores)
+        grad_sum = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
+        curr_scores = prev_scores + alpha*weak_scores
         temp = numpy.exp(-target * curr_scores)
-        grad = -target * temp *(1/ (1 + temp))
-        val3 = numpy.sum(grad * weak_scores)
-        self.assertEqual(val3, l3)
+        grad = -targets * temp *(1/ (1 + temp))
+        val4 = numpy.sum(grad * weak_scores)
+        self.assertEqual(val4, grad_sum)
+
 
              
 
diff --git a/xbob/boosting/tests/test_loss_log_multivariate.py b/xbob/boosting/tests/test_loss_log_multivariate.py
new file mode 100644
index 0000000000000000000000000000000000000000..74fec89908f90572ba48cd89c6625b2790aaebdd
--- /dev/null
+++ b/xbob/boosting/tests/test_loss_log_multivariate.py
@@ -0,0 +1,84 @@
+
+import unittest
+import random
+import xbob.boosting
+import numpy
+
+class TestLogLossMulti(unittest.TestCase):
+
+    """ Test the loss function using multivariate data  """
+
+    def test_log_multivariate_dimensions(self):
+
+        """ Check the loss function values for multivariate targets """
+
+        loss_function = xbob.boosting.core.losses.LogLossFunction()
+        num_samples = 2
+        num_dimension = 2
+        targets = numpy.array([[1, -1], [-1, 1]])
+        score = numpy.array([[0.5, 0.5], [0.5, 0.5]], 'float64')
+        alpha = 0.5
+        weak_scores = numpy.array([[0.2, 0.4], [0.5, 0.6]], 'float64')
+        prev_scores = numpy.array([[0.1, 0.2],[0.3, 0.4]], 'float64')
+        
+        # check the loss dimensions
+        loss_value = loss_function.update_loss(targets, score) 
+        self.assertTrue(loss_value.shape[0] == num_samples)
+        self.assertTrue(loss_value.shape[1] == num_dimension)
+
+        # Check loss gradient
+        grad_value = loss_function.update_loss_grad( targets, score)
+        self.assertTrue(grad_value.shape[0] == num_samples)
+        self.assertTrue(grad_value.shape[1] == num_dimension)
+
+        # Check loss sum
+        loss_sum = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
+        self.assertTrue(loss_sum.shape[0] == num_samples)
+
+
+
+        # Check the gradient sum
+        grad_sum = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
+        self.assertTrue(grad_sum.shape[0] == num_samples)
+
+
+
+    def test_log_multivariate(self):
+
+        """ Check the loss function values for multivariate targets """
+
+        loss_function = xbob.boosting.core.losses.LogLossFunction()
+        targets = numpy.array([[1, -1], [-1, 1]])
+        score = numpy.array([[0.5, 0.5], [0.5, 0.5]], 'float64')
+        alpha = 0.5
+        weak_scores = numpy.array([[0.2, 0.4], [0.5, 0.6]], 'float64')
+        prev_scores = numpy.array([[0.1, 0.2],[0.3, 0.4]], 'float64')
+        
+        # check the loss values
+        loss_value = loss_function.update_loss(targets, score) 
+        val1 = numpy.log(1 + numpy.exp(- targets * score))
+        self.assertTrue((loss_value == val1).all())
+
+        # Check loss gradient
+        grad_value = loss_function.update_loss_grad( targets, score)
+        temp = numpy.exp(-targets * score)
+        val2 = -(targets * temp* (1/(1 + temp)) )
+        self.assertTrue((grad_value == val2).all())
+
+        # Check loss sum
+        loss_sum = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
+        curr_scores = prev_scores + alpha*weak_scores
+        
+        val3 = sum(numpy.log(1 + numpy.exp(-targets * curr_scores)))
+        self.assertTrue((val3 == loss_sum).all())
+
+        # Check the gradient sum
+        grad_sum = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
+        curr_scores = prev_scores + alpha*weak_scores
+        temp = numpy.exp(-targets * curr_scores)
+        grad = -targets * temp *(1/ (1 + temp))
+        val4 = sum(grad * weak_scores)
+        self.assertTrue((val4 == grad_sum).all())
+
+
+
diff --git a/xbob/boosting/tests/test_trainer_lut.py b/xbob/boosting/tests/test_trainer_lut.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a4c519fb86da3b5f43abbab64536761252e379f
--- /dev/null
+++ b/xbob/boosting/tests/test_trainer_lut.py
@@ -0,0 +1,41 @@
+import unittest
+import random
+import xbob.boosting
+import numpy
+import bob
+
+def get_single_feature():
+    num_feature = 100
+
+
+class TestLutTrainer(unittest.TestCase):
+    """Class to test the LUT trainer """
+
+    def test_hist_grad(self):
+
+        num_feature = 100
+        range_feature = 10
+        trainer = xbob.boosting.core.trainers.LutTrainer(range_feature,'indep', 1)
+
+        features = numpy.array([2, 8, 4, 7, 1, 0, 6, 3, 6, 1, 7, 0, 6, 8, 3, 6, 8, 2, 6, 9, 4, 6,
+                                2, 0, 4, 9, 7, 4, 1, 3, 9, 9, 3, 3, 5, 2, 4, 0, 1, 3, 8, 8, 6, 7,
+                                3, 0, 6, 7, 4, 0, 6, 4, 1, 2, 4, 2, 1, 9, 3, 5, 5, 8, 8, 4, 7, 4,
+                                1, 5, 1, 8, 5, 4, 2, 4, 5, 3, 0, 0, 6, 2, 4, 7, 1, 4, 1, 4, 4, 4,
+                                1, 4, 7, 5, 6, 9, 7, 5, 3, 3, 6, 6])
+
+        loss_grad = numpy.ones(100)
+
+        hist_value, bins = numpy.histogram(features,range(range_feature +1))
+        sum_grad = trainer.compute_grad_hist(loss_grad,features)
+        self.assertEqual(sum_grad.shape[0],range_feature)
+        self.assertTrue((sum_grad == hist_value).all())
+
+
+
+
+
+
+
+
+
+    
diff --git a/xbob/boosting/tests/test_trainer_stump.py b/xbob/boosting/tests/test_trainer_stump.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7ea5933b47b0ef005daead02d8de417882f4de3
--- /dev/null
+++ b/xbob/boosting/tests/test_trainer_stump.py
@@ -0,0 +1,217 @@
+import unittest
+import random
+import xbob.boosting
+import numpy
+
+class TestStumpTrainer(unittest.TestCase):
+    """Perform test on stump weak trainer"""
+
+    def test_stump_limits(self):
+        # test the stump trainer and check the basic limits on stump parameters
+        trainer = xbob.boosting.core.trainers.StumpTrainer()
+        rand_matrix = numpy.array([[-1.57248569,  0.92857928,  0.97908357, -0.0758847 , -0.34067902],
+                                   [ 0.88562798,  1.82759883, -0.55953264,  0.82822718,  2.29955421],
+                                   [ 1.03220648,  0.20467357,  0.67769647,  0.57652722,  0.45538562],
+                                   [ 1.49901643,  1.34450249,  0.08667704,  0.33658217, -1.32629319]], 'float64')
+
+        n_samples = 4
+        dim = 5
+        x_train1 = rand_matrix + 4
+        x_train2 = rand_matrix - 4
+        x_train = numpy.vstack((x_train1, x_train2))
+        y_train = numpy.hstack((numpy.ones(n_samples),-numpy.ones(n_samples)))
+
+        scores = numpy.zeros(2*n_samples)
+        t = y_train*scores
+        loss = -y_train*(numpy.exp(y_train*scores))
+
+        stump = trainer.compute_weak_trainer(x_train,loss)
+
+        self.assertTrue(stump.threshold <= numpy.max(x_train))
+        self.assertTrue(stump.threshold >= numpy.min(x_train))
+        self.assertTrue(stump.selected_indices >= 0)
+        self.assertTrue(stump.selected_indices < dim)
+
+
+
+    def test_stump_index(self):
+        # test the stump trainer if the correct feature indices are selected
+        trainer = xbob.boosting.core.trainers.StumpTrainer()
+        rand_matrix = numpy.array([[-1.57248569,  0.92857928,  0.97908357, -0.0758847 , -0.34067902],
+                                   [ 0.88562798,  1.82759883, -0.55953264,  0.82822718,  2.29955421],
+                                   [ 1.03220648,  0.20467357,  0.67769647,  0.57652722,  0.45538562],
+                                   [ 1.49901643,  1.34450249,  0.08667704,  0.33658217, -1.32629319]], 'float64')
+
+       
+        num_samples = 4
+        dim = 5
+        selected_index = 2
+        delta = 2
+        x_train1 = rand_matrix + 0.1
+        x_train2 = rand_matrix - 0.1
+        x_train = numpy.vstack((x_train1, x_train2))
+        x_train[0:num_samples,selected_index] = x_train[0:num_samples,selected_index] + delta
+        x_train[num_samples+1:,selected_index] = x_train[num_samples +1:,selected_index] - delta
+        y_train = numpy.hstack((numpy.ones(num_samples),-numpy.ones(num_samples)))
+
+        scores = numpy.zeros(2*num_samples)
+        loss = -y_train*(numpy.exp(y_train*scores))
+
+        stump = trainer.compute_weak_trainer(x_train,loss)
+
+        self.assertEqual(stump.selected_indices, selected_index)
+
+    def test_stump_polarity(self):
+        # test the stump trainer if the polarity is reversed with change in targets sign
+        trainer = xbob.boosting.core.trainers.StumpTrainer()
+        rand_matrix = numpy.array([[-1.57248569,  0.92857928,  0.97908357, -0.0758847 , -0.34067902],
+                                   [ 0.88562798,  1.82759883, -0.55953264,  0.82822718,  2.29955421],
+                                   [ 1.03220648,  0.20467357,  0.67769647,  0.57652722,  0.45538562],
+                                   [ 1.49901643,  1.34450249,  0.08667704,  0.33658217, -1.32629319]], 'float64')
+        num_samples = 4
+        dim = 5
+        selected_index = 2
+        delta = 2
+        x_train1 = rand_matrix + 0.1
+        x_train2 = rand_matrix - 0.1
+        x_train = numpy.vstack((x_train1, x_train2))
+        x_train[0:num_samples,selected_index] = x_train[0:num_samples,selected_index] + delta
+        x_train[num_samples+1:,selected_index] = x_train[num_samples +1:,selected_index] - delta
+        y_train = numpy.hstack((numpy.ones(num_samples),-numpy.ones(num_samples)))
+
+        scores = numpy.zeros(2*num_samples)
+        t = y_train*scores
+        loss = -y_train*(numpy.exp(y_train*scores))
+
+        stump = trainer.compute_weak_trainer(x_train,loss)
+
+        self.assertEqual(stump.selected_indices, selected_index)
+
+        polarity = stump.polarity
+
+        # test the check on polarity when the labels are reversed
+        y_train = - y_train
+        t = y_train*scores
+        loss = -y_train*(numpy.exp(y_train*scores))
+        
+        stump = trainer.compute_weak_trainer(x_train,loss)
+        polarity_rev = stump.polarity 
+        self.assertEqual(polarity, -polarity_rev)
+
+    def test_threshold(self):
+        # test to check the threshold value of the weak trainer
+        trainer = xbob.boosting.core.trainers.StumpTrainer()
+
+        rand_matrix = numpy.array([[-1.57248569,  0.92857928,  0.97908357, -0.0758847 , -0.34067902],
+                                   [ 0.88562798,  1.82759883, -0.55953264,  0.82822718,  2.29955421],
+                                   [ 1.03220648,  0.20467357,  0.67769647,  0.57652722,  0.45538562],
+                                   [ 1.49901643,  1.34450249,  0.08667704,  0.33658217, -1.32629319]], 'float64')
+        num_samples = 4
+        dim = 5
+        selected_index = 2
+        x_train1 = rand_matrix + 0.1
+        x_train2 = rand_matrix - 0.1
+        delta1 = 4
+        delta2 = 2
+        x_train = numpy.vstack((x_train1, x_train2))
+        x_train[0:num_samples,selected_index] = x_train[0:num_samples,selected_index] + delta1
+        x_train[num_samples+1:,selected_index] = x_train[num_samples +1:,selected_index] + delta2
+        y_train = numpy.hstack((numpy.ones(num_samples),-numpy.ones(num_samples)))
+
+        scores = numpy.zeros(2*num_samples)
+        loss = -y_train*(numpy.exp(y_train*scores))
+
+        stump = trainer.compute_weak_trainer(x_train,loss)
+
+        print stump.threshold 
+
+        self.assertTrue(stump.threshold > delta2)
+        self.assertTrue(stump.threshold < delta1)
+
+
+    def test_compute_thresh(self):
+        # Test the threshold for a single feature 
+        trainer = xbob.boosting.core.trainers.StumpTrainer()
+
+        num_samples = 10
+        # The value of feature for class 1
+        fea1 = 1                          
+        # The value of the feature for class 2   
+        fea2 = 10
+        
+        # feature vector for 10 samples
+        features = numpy.array([fea1, fea1,fea1,fea1,fea1,fea2,fea2,fea2,fea2,fea2])
+        label = numpy.array([1,1,1,1,1,-1, -1, -1,-1,-1])
+
+        scores = numpy.zeros(num_samples)
+        loss = -label*(numpy.exp(label*scores))
+
+        trained_polarity, trained_threshold, trained_gain = trainer.compute_thresh(features, loss)
+
+        threshold = float(fea1 + fea2)/2
+        self.assertEqual(trained_threshold, threshold)
+
+        if(fea1 < fea2):
+            polarity = 1
+        else:
+            polarity = -1
+
+        self.assertEqual(trained_polarity, polarity)
+
+    def test_compute_thresh_rearrange(self):
+        # test the threshold for single feature using a different permutation
+        trainer = xbob.boosting.core.trainers.StumpTrainer()
+
+        num_samples = 10
+        # The value of feature for class 1
+        fea1 = 1                          
+        # The value of the feature for class 2   
+        fea2 = 10
+        
+        # feature vector for 10 samples
+        features = numpy.array([fea1, fea1, fea2, fea1, fea2, fea1, fea2, fea1, fea2, fea2])
+        label =     numpy.array([ 1,    1,   -1,   1,    -1,    1,   -1,   1,    -1,  -1])
+
+        scores = numpy.zeros(num_samples)
+        loss = -label*(numpy.exp(label*scores))
+
+        trained_polarity, trained_threshold, trained_gain = trainer.compute_thresh(features, loss)
+
+        threshold = float(fea1 + fea2)/2
+        self.assertEqual(trained_threshold, threshold)
+
+        if(fea1 < fea2):
+            polarity = 1
+        else:
+            polarity = -1
+
+        self.assertEqual(trained_polarity, polarity)
+
+    def test_compute_polarity(self):
+        # test the threshold for single feature using a different permutation
+        trainer = xbob.boosting.core.trainers.StumpTrainer()
+
+        num_samples = 10
+        # The value of feature for class 1
+        fea1 = 10                          
+        # The value of the feature for class 2   
+        fea2 = 1
+        
+        # feature vector for 10 samples
+        features = numpy.array([fea1, fea1, fea2, fea1, fea2, fea1, fea2, fea1, fea2, fea2])
+        label =     numpy.array([ 1,    1,   -1,   1,    -1,    1,   -1,   1,    -1,  -1])
+
+        scores = numpy.zeros(num_samples)
+        loss = -label*(numpy.exp(label*scores))
+
+        trained_polarity, trained_threshold, trained_gain = trainer.compute_thresh(features, loss)
+
+        
+        if(fea1 < fea2):
+            polarity = 1
+        else:
+            polarity = -1
+
+        self.assertEqual(trained_polarity, polarity)
+
+