diff --git a/outputstat b/outputstat
new file mode 100644
index 0000000000000000000000000000000000000000..d6f1dec85c428eb2b79f7a893a5cbd929674b84f
Binary files /dev/null and b/outputstat differ
diff --git a/setup.py b/setup.py
index 886c3b576c38314ecdcf5bc297b3612859530c00..457b46b06ad4471f7fd61c4750c0811efb34630c 100644
--- a/setup.py
+++ b/setup.py
@@ -53,6 +53,7 @@ setup(
       'setuptools',
       'bob', # base signal proc./machine learning library
       'xbob.db.mnist',
+      'xbob.db.banca',
     ],
 
     # Your project should be called something like 'xbob.<foo>' or 
diff --git a/xbob/boosting/core/boosting.py b/xbob/boosting/core/boosting.py
index 1d48c0131820beb7933a3da6c17f82f6fd554407..8a1f09bfb6a04e88e5f08bffef445179d2418516 100644
--- a/xbob/boosting/core/boosting.py
+++ b/xbob/boosting/core/boosting.py
@@ -190,7 +190,6 @@ class Boost:
             # Perform lbfgs minimization and compute the scale (alpha_r) for current weak trainer
             lbfgs_struct = scipy.optimize.fmin_l_bfgs_b(loss_func.loss_sum, init_point, fprime = loss_func.loss_grad_sum, args = (targets, pred_scores, curr_pred_scores)) 
             alpha = lbfgs_struct[0]
-            print alpha
 
 
             # Update the prediction score after adding the score from the current weak classifier f(x) = f(x) + alpha_r*g_r
diff --git a/xbob/boosting/core/trainers.py b/xbob/boosting/core/trainers.py
index 98228faa2ccba6a07522290d127c8a8a19b1b006..575f3acbf6f352d561d8530466a9735436671ae9 100644
--- a/xbob/boosting/core/trainers.py
+++ b/xbob/boosting/core/trainers.py
@@ -350,12 +350,15 @@ class LutTrainer():
 
         return: hist_grad: The sum of the loss gradient"""
         # initialize the values
-        num_samp = len(features)
-        hist_grad = numpy.zeros([self.num_entries])
+        # hist_grad = numpy.zeros([self.num_entries])
+
+        
 
         # compute the sum of the gradient
-        for feature_value in range(self.num_entries):
-            hist_grad[feature_value] = sum(loss_grado[features == feature_value])
+        hist_grad, bin_val = numpy.histogram(features,bins = self.num_entries, range = (0,self.num_entries-1), weights = loss_grado)
+        # hist_grad = [sum(loss_grado[features == feature_value]) for feature_value in xrange(self.num_entries)]
+        #for feature_value in range(self.num_entries):
+        #    hist_grad[feature_value] = sum(loss_grado[features == feature_value])
         return hist_grad
 
 
diff --git a/xbob/boosting/features/local_feature.py b/xbob/boosting/features/local_feature.py
index 755971d8aebe5728479244a36410a445ae646945..604ddaf78b4037c7ff36ae00be07b0c78fb99644 100644
--- a/xbob/boosting/features/local_feature.py
+++ b/xbob/boosting/features/local_feature.py
@@ -82,13 +82,13 @@ class lbp_feature():
 
                 # extract the specific feature from the image
                 if self.ftype == 'lbp':
-                    feature_map = self.lbp(self.coord, feature_map_dimx, feature_map_dimy, block_sum)
+                    feature_map = self.lbp(block_sum)
                 elif self.ftype == 'tlbp':
-                    feature_map = self.tlbp(self.coord, feature_map_dimx, feature_map_dimy, block_sum)
+                    feature_map = self.tlbp(block_sum)
                 elif self.ftype == 'dlbp':
-                    feature_map = self.dlbp(self.coord, feature_map_dimx, feature_map_dimy, block_sum)
+                    feature_map = self.dlbp(block_sum)
                 elif self.ftype == 'mlbp':
-                    feature_map = self.mlbp(self.coord, feature_map_dimx, feature_map_dimy, block_sum)
+                    feature_map = self.mlbp(block_sum)
 
                 # reshape feature image into vector
                 temp_vector = numpy.reshape(feature_map,feature_map.shape[0]*feature_map.shape[1],1)
diff --git a/xbob/boosting/scripts/mnist_multi_block_lbp.py b/xbob/boosting/scripts/mnist_multi_block_lbp.py
index a22c8f7ed5b107e338dd41997700a23652286490..87116baa8bb3d8c2caeb55822db799d678af0054 100755
--- a/xbob/boosting/scripts/mnist_multi_block_lbp.py
+++ b/xbob/boosting/scripts/mnist_multi_block_lbp.py
@@ -85,8 +85,7 @@ def main():
     boost_trainer.selection_type = args.selection_type
     boost_trainer.num_entries = args.num_entries
 
-    print "Starting boosting the features"
-    print train_fea.shape
+    print "Start boosting the features"
     # Perform boosting of the feature set samp
     machine = boost_trainer.train(train_fea, train_targets)
 
diff --git a/xbob/boosting/tests/test_dlbp_features.py b/xbob/boosting/tests/test_dlbp_features.py
index 99926b404fc6ffc793388fb0094f619aa3a73891..44de37686e313275ec1e760ba2002cf133e7b8f3 100644
--- a/xbob/boosting/tests/test_dlbp_features.py
+++ b/xbob/boosting/tests/test_dlbp_features.py
@@ -28,6 +28,7 @@ class TestdlbpFeatures(unittest.TestCase):
         p6 | p5 | p4 """
 
     def test_dlbp_image(self):
+        """ Test for dlbp features with different neighbouring pixel values """
         feature_extractor = xbob.boosting.features.local_feature.lbp_feature('dlbp')
         img_values = numpy.array([1,1,1,1,1,1,1,1,1])  # p0,p1,p2,p3,p4,p5,p6,p7,pc
         img = get_image_3x3(img_values)
diff --git a/xbob/boosting/tests/test_lbp_features.py b/xbob/boosting/tests/test_lbp_features.py
index a2c47fc152f77622fd99d54ef3f5aca68bd6aa12..e057869f07d13b51046722d0326b1e2394252131 100644
--- a/xbob/boosting/tests/test_lbp_features.py
+++ b/xbob/boosting/tests/test_lbp_features.py
@@ -22,6 +22,7 @@ class TestIntegralImage(unittest.TestCase):
     """Perform test on integral images"""
 
     def test_integral_image(self):
+        """ Test on the integral images """
         feature_extractor = xbob.boosting.features.local_feature.lbp_feature('lbp')
         img = numpy.array([[1,1,1],
                            [1,1,1],
@@ -46,7 +47,8 @@ class TestLbpFeatures(unittest.TestCase):
         p7 | pc | p3
         p6 | p5 | p4 """
 
-    def test_integral_image(self):
+    def test_lbp_features(self):
+        """Test for LBP features with different neighbouring values."""
         feature_extractor = xbob.boosting.features.local_feature.lbp_feature('lbp')
         img_values = numpy.array([1,1,1,1,1,1,1,1,1])  # p0,p1,p2,p3,p4,p5,p6,p7,pc
         img = get_image_3x3(img_values)
diff --git a/xbob/boosting/tests/test_loss_exp.py b/xbob/boosting/tests/test_loss_exp.py
index 420f5120e6b29632f286d7dd01c106384d43533e..4f6d670b92667f828f8bee560d21cee47258182c 100644
--- a/xbob/boosting/tests/test_loss_exp.py
+++ b/xbob/boosting/tests/test_loss_exp.py
@@ -7,6 +7,7 @@ class TestExpLossFunctions(unittest.TestCase):
     """Perform test on exponential loss function """
 
     def test_exp_positive_target(self):
+        """ Loss values computation test for postitive targets. """
 
         loss_function = xbob.boosting.core.losses.ExpLossFunction()
         target = 1
@@ -47,6 +48,7 @@ class TestExpLossFunctions(unittest.TestCase):
         self.assertEqual(val4, grad_sum_val)
 
     def test_exp_negative_target(self):
+        """ Exponential Loss values computation test for negative targets. """
 
         loss_function = xbob.boosting.core.losses.ExpLossFunction()
         target = -1
diff --git a/xbob/boosting/tests/test_tlpb_features.py b/xbob/boosting/tests/test_tlpb_features.py
index debd4a2a2a405a070c5a123bfd8f0095265f677a..8cd7b2f3ba56fa942fe5aa0f03fd5571117875f7 100644
--- a/xbob/boosting/tests/test_tlpb_features.py
+++ b/xbob/boosting/tests/test_tlpb_features.py
@@ -28,6 +28,7 @@ class TestmlbpFeatures(unittest.TestCase):
         p6 | p5 | p4 """
 
     def test_mlbp_image(self):
+        """ Test on mlbp feature with different neighbourhood values."""
         feature_extractor = xbob.boosting.features.local_feature.lbp_feature('mlbp')
         img_values = numpy.array([1,1,1,1,1,1,1,1,1])  # p0,p1,p2,p3,p4,p5,p6,p7,pc, mean = 1
         img = get_image_3x3(img_values)
diff --git a/xbob/boosting/tests/test_trainer_stump.py b/xbob/boosting/tests/test_trainer_stump.py
index 2179cc26c998f724920f662ca1463f960c98cc59..27445e69efc016d167affb2c0bfb036baefd2c61 100644
--- a/xbob/boosting/tests/test_trainer_stump.py
+++ b/xbob/boosting/tests/test_trainer_stump.py
@@ -152,9 +152,9 @@ class TestStumpTrainer(unittest.TestCase):
         self.assertEqual(trained_threshold, threshold)
 
         if(fea1 < fea2):
-            polarity = 1
-        else:
             polarity = -1
+        else:
+            polarity = 1
 
         self.assertEqual(trained_polarity, polarity)
 
@@ -181,9 +181,9 @@ class TestStumpTrainer(unittest.TestCase):
         self.assertEqual(trained_threshold, threshold)
 
         if(fea1 < fea2):
-            polarity = 1
-        else:
             polarity = -1
+        else:
+            polarity = 1
 
         self.assertEqual(trained_polarity, polarity)
 
@@ -208,9 +208,9 @@ class TestStumpTrainer(unittest.TestCase):
 
         
         if(fea1 < fea2):
-            polarity = 1
-        else:
             polarity = -1
+        else:
+            polarity = 1
 
         self.assertEqual(trained_polarity, polarity)