From 72713d16c15c19d711740170bd1f09331017a7b0 Mon Sep 17 00:00:00 2001
From: Rakesh Mehta <rakesh.mehta@idiap.ch>
Date: Mon, 26 Aug 2013 18:58:11 +0200
Subject: [PATCH] LBP test added and comments in boosting and trainer

---
 xbob/boosting/core/boosting.py            |   7 +-
 xbob/boosting/core/gaussiantrainer.py     |  61 +++++++
 xbob/boosting/core/trainers.py            | 194 +++++++++++++++++++++-
 xbob/boosting/features/__init__.py        |   1 +
 xbob/boosting/features/local_feature.py   |  29 ++--
 xbob/boosting/scripts/mnist_multi.py      |   2 +-
 xbob/boosting/tests/test_dlbp_features.py |  59 +++++++
 xbob/boosting/tests/test_lbp_features.py  | 104 ++++++++++++
 xbob/boosting/tests/test_mlpb_features.py |  85 ++++++++++
 xbob/boosting/tests/test_tlpb_features.py |  81 +++++++++
 xbob/boosting/tests/test_trainer_stump.py |   2 +-
 11 files changed, 610 insertions(+), 15 deletions(-)
 create mode 100644 xbob/boosting/core/gaussiantrainer.py
 create mode 100644 xbob/boosting/tests/test_dlbp_features.py
 create mode 100644 xbob/boosting/tests/test_lbp_features.py
 create mode 100644 xbob/boosting/tests/test_mlpb_features.py
 create mode 100644 xbob/boosting/tests/test_tlpb_features.py

diff --git a/xbob/boosting/core/boosting.py b/xbob/boosting/core/boosting.py
index 3f91edc..1d48c01 100644
--- a/xbob/boosting/core/boosting.py
+++ b/xbob/boosting/core/boosting.py
@@ -162,10 +162,12 @@ class Boost:
 	
 
         # For each round of boosting initialize a new weak trainer
-        if(self.weak_trainer_type == 'LutTrainer'):
+        if self.weak_trainer_type == 'LutTrainer':
             weak_trainer = trainers.LutTrainer(self.num_entries, self.lut_selection, num_op )
-        elif (self.weak_trainer_type == 'StumpTrainer'):
+        elif self.weak_trainer_type == 'StumpTrainer':
             weak_trainer = trainers.StumpTrainer()
+        elif self.weak_trainer_type == 'GaussTrainer':
+            weak_trainer = trainers.GaussianTrainer(3)
 
 
         # Start boosting iterations for num_rnds rounds
@@ -188,6 +190,7 @@ class Boost:
             # Perform lbfgs minimization and compute the scale (alpha_r) for current weak trainer
             lbfgs_struct = scipy.optimize.fmin_l_bfgs_b(loss_func.loss_sum, init_point, fprime = loss_func.loss_grad_sum, args = (targets, pred_scores, curr_pred_scores)) 
             alpha = lbfgs_struct[0]
+            print alpha
 
 
             # Update the prediction score after adding the score from the current weak classifier f(x) = f(x) + alpha_r*g_r
diff --git a/xbob/boosting/core/gaussiantrainer.py b/xbob/boosting/core/gaussiantrainer.py
new file mode 100644
index 0000000..851fef0
--- /dev/null
+++ b/xbob/boosting/core/gaussiantrainer.py
@@ -0,0 +1,61 @@
+
+class GaussianMachine():
+
+    def __init__(self, num_classes):
+        self.means = numpy.zeros(num_classes)
+        self.variance = numpy.zeros(num_classes)
+        self.selected_index = 0
+
+
+    def get_weak_scores(self, features):
+        num_classes = self.means.shape[0]
+        
+
+        for i in range(num_classes):
+            mean_i = means[i]
+            variance_i = variance[i]
+            feature_i = features[:,i]
+            demon = numpy.sqrt(2*numpy.pi*variance_i)
+            scores[i] = numpy.exp(-(((feature_i - mean_i)**2)/2*variance_i))
+
+        return scores
+             
+class GaussianTrainer():
+
+    def __init__(self, num_classes):
+        self.num_classes = num_classes
+
+
+    def compute_weak_trainer(self, features, loss_grad):
+
+        num_features = features.shape[1]
+        means = numpy.zeros([num_features,self.num_classes])
+        variances = numpy.zeros([num_features,self.num_classes])
+        summed_loss = numpy.zeros(num_features)
+        gauss_machine = GaussianMachine()
+
+        for feature_index in range(num_features)
+            single_feature = features[:,feature_index]
+            means[i,;], variances[i,:], loss[i] = compute_current_loss(single_feature, classes, loss_grad)
+
+        gauss_machine.selected_index = numpy.argmin(loss)
+        gauss_machine.means = means[optimum_index,:]
+        gauss_machine.variance = variances[optimum_index,:]
+        
+        
+
+    def compute_current_loss(feature, classes, loss_grad):
+        num_samples = feature.shape[0]
+        scores = numpy.zeros([num_samples, self.num_classes])
+
+        for class_index in range(self.num_classes):
+            samples_i = feature[loss_grad[:,class_index] < 0]
+            mean[i] = numpy.mean(samples_i)
+            variance[i] = numpy.std(samples_i)**2
+            scores[:,class_index] = numpy.exp(-(((feature_i - mean_i)**2)/2*variance_i))
+            
+
+        scores_sum = numpy.sum(scores)
+
+        return mean, variance, scores_sum
+
diff --git a/xbob/boosting/core/trainers.py b/xbob/boosting/core/trainers.py
index 0446277..8e6638b 100644
--- a/xbob/boosting/core/trainers.py
+++ b/xbob/boosting/core/trainers.py
@@ -252,7 +252,7 @@ class LutTrainer():
              type: integer numpy array (#number of samples x number of features)
 
         loss_grad: The loss gradient values for the training samples
-              type: numpy array (#number of samples)
+              type: numpy array (#number of samples x #number of outputs)
 
         Return:
         self: a trained LUT trainer
@@ -360,7 +360,199 @@ class LutTrainer():
         return hist_grad
 
 
+"""
+class GaussianMachine():
+
+    def __init__(self, num_classes):
+        self.means = numpy.zeros(num_classes)
+        self.variance = numpy.zeros(num_classes)
+        self.selected_index = 0
+
+
+    def get_weak_scores(self, features):
+        num_classes = self.means.shape[0]
+        num_features = features.shape[0]
+        scores = numpy.zeros([num_features,num_classes])
+        
+
+        for i in range(num_classes):
+            mean_i = self.means[i]
+            variance_i = self.variance[i]
+            feature_i = features[:,self.selected_index]
+            denom = numpy.sqrt(2*numpy.pi*variance_i) 
+            temp = ((feature_i - mean_i)**2)/2*variance_i
+            numerator =  numpy.exp(-temp)
+            
+            scores[:,i] = numerator/denom
+
+
+        return scores
+             
+class GaussianTrainer():
+
+    def __init__(self, num_classes):
+        self.num_classes = num_classes
+
+
+    def compute_weak_trainer(self, features, loss_grad):
+
+        num_features = features.shape[1]
+        means = numpy.zeros([num_features,self.num_classes])
+        variances = numpy.zeros([num_features,self.num_classes])
+        summed_loss = numpy.zeros(num_features)
+        gauss_machine = GaussianMachine(self.num_classes)
+
+        for feature_index in range(num_features):
+            single_feature = features[:,feature_index]
+            means[feature_index,:], variances[feature_index,:], summed_loss[feature_index] = self.compute_current_loss(single_feature,  loss_grad)
+        selected_index = numpy.argmin(summed_loss)
+        gauss_machine.selected_index = selected_index
+        gauss_machine.means = means[selected_index,:]
+        gauss_machine.variance = variances[selected_index,:]
+        return gauss_machine
+        
+        
+
+    def compute_current_loss(self, feature, loss_grad):
+        num_samples = feature.shape[0]
+        mean = numpy.zeros([self.num_classes])
+        variance = numpy.zeros(self.num_classes)
+        scores = numpy.zeros([num_samples, self.num_classes])
+
+        for class_index in range(self.num_classes):
+            samples_i = feature[loss_grad[:,class_index] < 0]
+            mean[class_index] = numpy.mean(samples_i)
+            variance[class_index] = numpy.std(samples_i)**2
+            denom = numpy.sqrt(2*numpy.pi*variance[class_index])
+            scores[:,class_index] = numpy.exp(-(((feature - mean[class_index])**2)/2*variance[class_index]))/denom
+            
+
+        # print mean
+        scores_sum = numpy.sum(scores)
+        return mean, variance, scores_sum
+
+"""
+"""
+class BayesMachine():
+    
+    def __init__(self, num_outputs, num_entries):
+        
+        self.luts = numpy.ones((num_entries, num_outputs), dtype = numpy.int)
+        self.selected_indices = numpy.zeros([num_outputs,1], 'int16')
+
+
+
+    def get_weak_scores(self, features):
+	
+
+        # Initialize
+        num_samp = len(features)
+        num_outputs = len(self.luts[0])
+        weak_scores = numpy.zeros([num_samp,num_outputs])
+
+        # Compute weak scores
+        for output_index in range(num_outputs):
+            weak_scores[:,output_index] = numpy.transpose(self.luts[features[:,self.selected_indices[output_index]],output_index])
+        return weak_scores
+
+
+class BayesTrainer():
+    
+ 
+
+    
+    def __init__(self, num_entries, num_outputs):
+
+        self.num_entries = num_entries
+        self.num_outputs = num_outputs
+        self.selection_type = selection_type
+    
+
+
+
+    def compute_weak_trainer(self, fea, loss_grad):
+
+        # Initializations
+        # num_outputs = loss_grad.shape[1]
+        fea_grad = numpy.zeros([self.num_entries, self.num_outputs])
+        lut_machine = LutMachine(self.num_outputs, self.num_entries)
+
+        # Compute the sum of the gradient based on the feature values or the loss associated with each 
+        # feature index
+        sum_loss = self.compute_grad_sum(loss_grad, fea)
 
 
 
+        # Select the most discriminative index (or indices) for classification which minimizes the loss
+        #  and compute the sum of gradient for that index
+       
+        if self.selection_type == 'indep':
+
+            # indep (independent) feature selection is used if all the dimension of output use different feature
+            # each of the selected feature minimize a dimension of the loss function
+
+            selected_indices = [numpy.argmin(col) for col in numpy.transpose(sum_loss)]
+
+            for output_index in range(self.num_outputs):
+                curr_id = sum_loss[:,output_index].argmin()
+                fea_grad[:,output_index] = self.compute_grad_hist(loss_grad[:,output_index],fea[:,curr_id])
+                lut_machine.selected_indices[output_index] = curr_id
+
+
+        elif self.selection_type == 'shared':
+
+            # for 'shared' feature selection the loss function is summed over multiple dimensions and 
+            # the feature that minimized this cumulative loss is used for all the outputs
+
+            accum_loss = numpy.sum(sum_loss,1)
+            selected_findex = accum_loss.argmin()
+            lut_machine.selected_indices = selected_findex*numpy.ones([self.num_outputs,1],'int16')
+
+            for output_index in range(self.num_outputs):
+                fea_grad[:,output_index] = self.compute_grad_hist(loss_grad[:,output_index],fea[:,selected_findex])
+
+     
+        # Assign the values to LookUp Table
+        lut_machine.luts[fea_grad <= 0.0] = -1
+        return lut_machine
+    
+
+
+
+     
+    def compute_grad_sum(self, loss_grad, fea):
+
+
+        # initialize values
+        num_fea = len(fea[0])
+        num_samp = len(fea)
+        sum_loss = numpy.zeros([num_fea,self.num_outputs])
+       
+        # Compute the loss for each feature
+        for feature_index in range(num_fea):
+            for output_index in range(self.num_outputs):
+                for feature_value in range(self.num_entries):
+                    luts[]
+                
+
+
+        return sum_loss
+
+
+
+
+
+    def compute_grad_hist(self, loss_grado,features):
+
+        # initialize the values
+        num_samp = len(features)
+        hist_grad = numpy.zeros([self.num_entries])
+
+        # compute the sum of the gradient
+        for output_index in range(self.num_outputs):
+            for feature_value in range(self.num_entries):
+                num_feature_i = sum(features == feature_value)
+                luts[feature_value,output_index] = sum(loss_grado[features == feature_value])
+        return hist_grad
+"""
 
diff --git a/xbob/boosting/features/__init__.py b/xbob/boosting/features/__init__.py
index e69de29..edc2396 100644
--- a/xbob/boosting/features/__init__.py
+++ b/xbob/boosting/features/__init__.py
@@ -0,0 +1 @@
+import local_feature
diff --git a/xbob/boosting/features/local_feature.py b/xbob/boosting/features/local_feature.py
index c451c3c..11d6da5 100644
--- a/xbob/boosting/features/local_feature.py
+++ b/xbob/boosting/features/local_feature.py
@@ -7,7 +7,7 @@ features. """
 
 
 import numpy
-
+coord = [[0,0],[0,1],[0,2],[1,2],[2,2],[2,1],[2,0],[1,0]]
 
 class lbp_feature():
     """ The class to extract block based LBP type features from the image.
@@ -77,9 +77,7 @@ class lbp_feature():
                 # Compute the sum of the blocks for the current scale
                 block_sum = integral_img[scale_y+1:,scale_x+1:] + integral_img[0:-(scale_y+1),0:-(scale_x+1)] - integral_img[scale_y+1:,0:-(scale_x+1)] - integral_img[0:-(scale_y+1),scale_x+1:]
 
-                # Initialize the size of the final feature map that will be obtained
-                feature_map_dimy = block_sum.shape[0] -2    
-                feature_map_dimx = block_sum.shape[1] -2
+
 
                 # extract the specific feature from the image
                 if self.ftype == 'lbp':
@@ -100,7 +98,7 @@ class lbp_feature():
 
 
 
-    def lbp(self, coord, feature_map_dimx, feature_map_dimy, block_sum):
+    def lbp(self, block_sum):
         """Function to compute the LBP for a image at single scale. 
 
         The LBP features of the given image is computed and the feature map is returned
@@ -113,6 +111,8 @@ class lbp_feature():
         Return:
         feature_map: The lbp feature map
         """
+
+        feature_map_dimx, feature_map_dimy = self.get_map_dimension(block_sum)
         num_neighbours = 8
         blk_center = block_sum[1:1+feature_map_dimy,1:1+feature_map_dimx]
         feature_map = numpy.zeros([feature_map_dimy, feature_map_dimx])
@@ -122,7 +122,7 @@ class lbp_feature():
 
 
 
-    def tlbp(self, coord, feature_map_dimx, feature_map_dimy, block_sum):
+    def tlbp(self, block_sum):
         """Function to compute the tLBP for a image at single scale. 
 
         The tLBP features of the given image is computed and the feature map is returned
@@ -135,7 +135,7 @@ class lbp_feature():
         Return:
         feature_map: The lbp feature map
         """
-
+        feature_map_dimx, feature_map_dimy = self.get_map_dimension(block_sum)
         feature_map = numpy.zeros([feature_map_dimy, feature_map_dimx])
         num_neighbours = 8
 
@@ -151,7 +151,7 @@ class lbp_feature():
 
 
 
-    def dlbp(self, coord, feature_map_dimx, feature_map_dimy, block_sum):
+    def dlbp(self, block_sum):
         """Function to compute the dLBP for a image at single scale. 
 
         The dLBP features of the given image is computed and the feature map is returned
@@ -165,6 +165,7 @@ class lbp_feature():
         feature_map: The lbp feature map
         """
 
+        feature_map_dimx, feature_map_dimy = self.get_map_dimension(block_sum)
         pc = block_sum[1:1+feature_map_dimy,1:1+feature_map_dimx]
         num_neighbours = 8
         feature_map = numpy.zeros([feature_map_dimy,feature_map_dimx])
@@ -175,13 +176,13 @@ class lbp_feature():
             pi4 = block_sum[coord[ind+4][0]:coord[ind+4][0]+ feature_map_dimy,coord[ind+4][1]:coord[ind+4][1] + feature_map_dimx]
 
             """ Compare the neighbours and increment the feature map. """
-            feature_map = feature_map + (2**ind)*((pi-pc)*(pi4 - pc) > 0) + (4**ind)*(abs(pi - pc) >= abs(pi4 -pc))
+            feature_map = feature_map + (2**(2*ind))*((pi-pc)*(pi4 - pc) >= 0) + (2**(2*ind+1))*(abs(pi - pc) >= abs(pi4 -pc))
 
         return feature_map
 
 
 
-    def mlbp(self, coord, feature_map_dimx, feature_map_dimy, block_sum):
+    def mlbp(self, block_sum):
         """Function to compute the mLBP for a image at single scale. 
 
         The mLBP features of the given image is computed and the feature map is returned. 
@@ -195,6 +196,8 @@ class lbp_feature():
         feature_map: The lbp feature map
         """
 
+        feature_map_dimx, feature_map_dimy = self.get_map_dimension(block_sum)
+
         num_neighbours = 8
         pm = numpy.zeros([feature_map_dimy,feature_map_dimx])
 
@@ -231,4 +234,10 @@ class lbp_feature():
         feature_vector = self.get_features(img, scale_y, scale_x)
         return feature_vector.shape[0]
 
+    def get_map_dimension(self, block_sum):
+
+        # Initialize the size of the final feature map that will be obtained
+        feature_map_dimy = block_sum.shape[0] -2    
+        feature_map_dimx = block_sum.shape[1] -2
+        return feature_map_dimx, feature_map_dimy
 
diff --git a/xbob/boosting/scripts/mnist_multi.py b/xbob/boosting/scripts/mnist_multi.py
index 781edb3..9d189cc 100755
--- a/xbob/boosting/scripts/mnist_multi.py
+++ b/xbob/boosting/scripts/mnist_multi.py
@@ -56,7 +56,7 @@ def main():
 
 
     # Initilize the trainer with 'LutTrainer' or 'StumpTrainer'
-    boost_trainer = boosting.Boost('LutTrainer')
+    boost_trainer = boosting.Boost('GaussTrainer')
 
     # Set the parameters for the boosting
     boost_trainer.num_rnds = args.num_rnds     
diff --git a/xbob/boosting/tests/test_dlbp_features.py b/xbob/boosting/tests/test_dlbp_features.py
new file mode 100644
index 0000000..99926b4
--- /dev/null
+++ b/xbob/boosting/tests/test_dlbp_features.py
@@ -0,0 +1,59 @@
+import unittest
+import random
+import xbob.boosting
+import numpy
+
+def get_image_3x3(val):
+    img = numpy.zeros([3,3])
+    img[0,0] = val[0]
+    img[0,1] = val[1]
+    img[0,2] = val[2]
+    img[1,2] = val[3]
+    img[2,2] = val[4]
+    img[2,1] = val[5]
+    img[2,0] = val[6]
+    img[1,0] = val[7]
+    img[1,1] = val[8]
+    return img
+
+
+
+
+class TestdlbpFeatures(unittest.TestCase):
+    """Perform test for dlbp features"""
+
+    """ The neighbourhood is defined as 
+        p0 | p1 | p2
+        p7 | pc | p3
+        p6 | p5 | p4 """
+
+    def test_dlbp_image(self):
+        feature_extractor = xbob.boosting.features.local_feature.lbp_feature('dlbp')
+        img_values = numpy.array([1,1,1,1,1,1,1,1,1])  # p0,p1,p2,p3,p4,p5,p6,p7,pc
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.dlbp(img)
+        self.assertTrue(returned_lbp == 255)
+
+
+        img_values = numpy.array([20,1,1,1,10,10,10,10,5]) 
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.dlbp(img)
+        print returned_lbp
+        self.assertTrue(returned_lbp == 3)
+
+        img_values = numpy.array([1,20,1,1,10,10,10,10,5]) 
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.dlbp(img)
+        self.assertTrue(returned_lbp == 12)
+
+        img_values = numpy.array([1,1,20,1,10,10,10,10,5]) 
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.dlbp(img)
+        self.assertTrue(returned_lbp == 48)
+
+        img_values = numpy.array([1,1,1,20,10,10,10,10,5]) 
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.dlbp(img)
+        self.assertTrue(returned_lbp == 192)
+
+        
diff --git a/xbob/boosting/tests/test_lbp_features.py b/xbob/boosting/tests/test_lbp_features.py
new file mode 100644
index 0000000..a2c47fc
--- /dev/null
+++ b/xbob/boosting/tests/test_lbp_features.py
@@ -0,0 +1,104 @@
+import unittest
+import random
+import xbob.boosting
+import numpy
+
+def get_image_3x3(val):
+    img = numpy.zeros([3,3])
+    img[0,0] = val[0]
+    img[0,1] = val[1]
+    img[0,2] = val[2]
+    img[1,2] = val[3]
+    img[2,2] = val[4]
+    img[2,1] = val[5]
+    img[2,0] = val[6]
+    img[1,0] = val[7]
+    img[1,1] = val[8]
+    return img
+
+
+
+class TestIntegralImage(unittest.TestCase):
+    """Perform test on integral images"""
+
+    def test_integral_image(self):
+        feature_extractor = xbob.boosting.features.local_feature.lbp_feature('lbp')
+        img = numpy.array([[1,1,1],
+                           [1,1,1],
+                           [1,1,1]])
+
+        int_img = numpy.array([[1,2,3],
+                               [2,4,6],
+                               [3,6,9]])
+
+        returned_integral = feature_extractor.compute_integral_image(img)
+        self.assertEqual(returned_integral.shape[0],int_img.shape[0])
+        self.assertEqual(returned_integral.shape[1],int_img.shape[1])
+        self.assertTrue((returned_integral == int_img).all())
+
+
+
+class TestLbpFeatures(unittest.TestCase):
+    """Perform test on integral images"""
+
+    """ The neighbourhood is defined as 
+        p0 | p1 | p2
+        p7 | pc | p3
+        p6 | p5 | p4 """
+
+    def test_integral_image(self):
+        feature_extractor = xbob.boosting.features.local_feature.lbp_feature('lbp')
+        img_values = numpy.array([1,1,1,1,1,1,1,1,1])  # p0,p1,p2,p3,p4,p5,p6,p7,pc
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.lbp(img)
+        self.assertTrue(returned_lbp == 255)
+
+        img_values = numpy.array([1,1,1,1,1,1,1,1,0])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.lbp(img)
+        self.assertTrue(returned_lbp == 255)
+
+        img_values = numpy.array([0,0,0,0,0,0,0,0,1])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.lbp(img)
+        self.assertTrue(returned_lbp == 0)
+
+        img_values = numpy.array([1,0,0,0,0,0,0,0,1])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.lbp(img)
+        self.assertTrue(returned_lbp == 1)
+
+        img_values = numpy.array([0,1,0,0,0,0,0,0,1])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.lbp(img)
+        self.assertTrue(returned_lbp == 2)
+
+        img_values = numpy.array([0,0,1,0,0,0,0,0,1])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.lbp(img)
+        self.assertTrue(returned_lbp == 4)
+
+        img_values = numpy.array([0,0,0,1,0,0,0,0,1])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.lbp(img)
+        self.assertTrue(returned_lbp == 8)
+
+        img_values = numpy.array([0,0,0,0,2,0,0,0,1])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.lbp(img)
+        self.assertTrue(returned_lbp == 16)
+
+        img_values = numpy.array([0,0,0,0,0,4,0,0,1])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.lbp(img)
+        self.assertTrue(returned_lbp == 32)
+
+        img_values = numpy.array([0,0,0,0,0,0,5,0,1])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.lbp(img)
+        self.assertTrue(returned_lbp == 64)
+
+        img_values = numpy.array([0,0,0,0,0,0,0,5,1])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.lbp(img)
+        self.assertTrue(returned_lbp == 128)
diff --git a/xbob/boosting/tests/test_mlpb_features.py b/xbob/boosting/tests/test_mlpb_features.py
new file mode 100644
index 0000000..be385db
--- /dev/null
+++ b/xbob/boosting/tests/test_mlpb_features.py
@@ -0,0 +1,85 @@
+import unittest
+import random
+import xbob.boosting
+import numpy
+
+def get_image_3x3(val):
+    img = numpy.zeros([3,3])
+    img[0,0] = val[0]
+    img[0,1] = val[1]
+    img[0,2] = val[2]
+    img[1,2] = val[3]
+    img[2,2] = val[4]
+    img[2,1] = val[5]
+    img[2,0] = val[6]
+    img[1,0] = val[7]
+    img[1,1] = val[8]
+    return img
+
+
+
+
+class TesttlbpFeatures(unittest.TestCase):
+    """Perform test for mlbp features"""
+
+    """ The neighbourhood is defined as 
+        p0 | p1 | p2
+        p7 | pc | p3
+        p6 | p5 | p4 """
+
+    def test_tlbp_image(self):
+        feature_extractor = xbob.boosting.features.local_feature.lbp_feature('mlbp')
+        img_values = numpy.array([1,1,1,1,1,1,1,1,1])  # p0,p1,p2,p3,p4,p5,p6,p7,pc
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.tlbp(img)
+        self.assertTrue(returned_lbp == 255)
+
+        img_values = numpy.array([1,1,1,1,1,1,1,1,0])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.tlbp(img)
+        self.assertTrue(returned_lbp == 255)
+
+        img_values = numpy.array([0,0,0,0,0,0,0,0,1])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.tlbp(img)
+        self.assertTrue(returned_lbp == 255)
+
+        img_values = numpy.array([7,0,1,2,3,4,5,6,100])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.tlbp(img)
+        self.assertTrue(returned_lbp == 1)
+
+        img_values = numpy.array([6,7,0,1,2,3,4,5,100])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.tlbp(img)
+        self.assertTrue(returned_lbp == 2)
+
+        img_values = numpy.array([5,6,7,0,1,2,3,4,100])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.tlbp(img)
+        self.assertTrue(returned_lbp == 4)
+
+        img_values = numpy.array([4,5,6,7,0,1,2,3,100])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.tlbp(img)
+        self.assertTrue(returned_lbp == 8)
+
+        img_values = numpy.array([3,4,5,6,7,0,1,2,100])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.tlbp(img)
+        self.assertTrue(returned_lbp == 16)
+
+        img_values = numpy.array([2,3,4,5,6,7,0,1,100])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.tlbp(img)
+        self.assertTrue(returned_lbp == 32)
+
+        img_values = numpy.array([1,2,3,4,5,6,7,0,100])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.tlbp(img)
+        self.assertTrue(returned_lbp == 64)
+
+        img_values = numpy.array([0,1,2,3,4,5,6,7,100])
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.tlbp(img)
+        self.assertTrue(returned_lbp == 128)
diff --git a/xbob/boosting/tests/test_tlpb_features.py b/xbob/boosting/tests/test_tlpb_features.py
new file mode 100644
index 0000000..debd4a2
--- /dev/null
+++ b/xbob/boosting/tests/test_tlpb_features.py
@@ -0,0 +1,81 @@
+import unittest
+import random
+import xbob.boosting
+import numpy
+
+def get_image_3x3(val):
+    img = numpy.zeros([3,3])
+    img[0,0] = val[0]
+    img[0,1] = val[1]
+    img[0,2] = val[2]
+    img[1,2] = val[3]
+    img[2,2] = val[4]
+    img[2,1] = val[5]
+    img[2,0] = val[6]
+    img[1,0] = val[7]
+    img[1,1] = val[8]
+    return img
+
+
+
+
+class TestmlbpFeatures(unittest.TestCase):
+    """Perform test for mlbp features"""
+
+    """ The neighbourhood is defined as 
+        p0 | p1 | p2
+        p7 | pc | p3
+        p6 | p5 | p4 """
+
+    def test_mlbp_image(self):
+        feature_extractor = xbob.boosting.features.local_feature.lbp_feature('mlbp')
+        img_values = numpy.array([1,1,1,1,1,1,1,1,1])  # p0,p1,p2,p3,p4,p5,p6,p7,pc, mean = 1
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.mlbp(img)
+        self.assertTrue(returned_lbp == 255)
+
+        img_values = numpy.array([1,1,1,1,1,1,1,1,0])  # mean = 1
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.mlbp(img)
+        self.assertTrue(returned_lbp == 255)
+
+        img_values = numpy.array([0,0,0,0,0,0,0,0,1])  # mean = 0
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.mlbp(img)
+        self.assertTrue(returned_lbp == 255)
+
+        img_values = numpy.array([1,0,0,0,0,0,0,0,100]) # mean = 0.125, first bit pass 
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.mlbp(img)
+        self.assertTrue(returned_lbp == 1)
+
+        img_values = numpy.array([1,1,0,0,0,0,0,0,100]) # mean = 0.25, first two bits pass 
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.mlbp(img)
+        self.assertTrue(returned_lbp == 3)
+
+        img_values = numpy.array([1,1,1,0,0,0,0,0,100]) # mean = 3/8, first three bits pass 
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.mlbp(img)
+        self.assertTrue(returned_lbp == 7)
+
+        img_values = numpy.array([1,1,1,1,0,0,0,0,100]) # mean = 4/8, first four bits pass 
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.mlbp(img)
+        self.assertTrue(returned_lbp == 15)
+
+        img_values = numpy.array([1,1,1,1,1,0,0,0,100]) # mean = 5/8, first five bits pass 
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.mlbp(img)
+        self.assertTrue(returned_lbp == 31)
+
+        img_values = numpy.array([1,1,1,1,1,1,0,0,100]) # mean = 6/8, first six bits pass  
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.mlbp(img)
+        self.assertTrue(returned_lbp == 63)
+
+        img_values = numpy.array([1,1,1,1,1,1,1,0,100]) # mean = 7/8, first seven bits pass  
+        img = get_image_3x3(img_values)
+        returned_lbp = feature_extractor.mlbp(img)
+        self.assertTrue(returned_lbp == 127)
+
diff --git a/xbob/boosting/tests/test_trainer_stump.py b/xbob/boosting/tests/test_trainer_stump.py
index c7ea593..2179cc2 100644
--- a/xbob/boosting/tests/test_trainer_stump.py
+++ b/xbob/boosting/tests/test_trainer_stump.py
@@ -188,7 +188,7 @@ class TestStumpTrainer(unittest.TestCase):
         self.assertEqual(trained_polarity, polarity)
 
     def test_compute_polarity(self):
-        # test the threshold for single feature using a different permutation
+        # test the polarity of the classifier
         trainer = xbob.boosting.core.trainers.StumpTrainer()
 
         num_samples = 10
-- 
GitLab