From 26c6da625596528ff003aa4e8278c63f33d92506 Mon Sep 17 00:00:00 2001
From: Rakesh Mehta <rakesh.mehta@idiap.ch>
Date: Mon, 19 Aug 2013 18:36:07 +0200
Subject: [PATCH] added tests and changed variable names

---
 data1.hdf5                                    | Bin 0 -> 10144 bytes
 datafile1.hdf5                                | Bin 0 -> 12536 bytes
 xbob/boosting/core/boosting.py                |  26 ++-
 xbob/boosting/core/losses.py                  |  55 +++--
 xbob/boosting/core/trainers.py                |  51 ++--
 xbob/boosting/features/local_feature.py       |   4 +-
 xbob/boosting/tests/data1.hdf5                | Bin 0 -> 10144 bytes
 xbob/boosting/tests/test_loss_exp.py          |  89 +++++--
 .../tests/test_loss_exp_multivariate.py       |  83 +++++++
 xbob/boosting/tests/test_loss_log.py          |  83 +++++--
 .../tests/test_loss_log_multivariate.py       |  84 +++++++
 xbob/boosting/tests/test_trainer_lut.py       |  41 ++++
 xbob/boosting/tests/test_trainer_stump.py     | 217 ++++++++++++++++++
 13 files changed, 631 insertions(+), 102 deletions(-)
 create mode 100644 data1.hdf5
 create mode 100644 datafile1.hdf5
 create mode 100644 xbob/boosting/tests/data1.hdf5
 create mode 100644 xbob/boosting/tests/test_loss_exp_multivariate.py
 create mode 100644 xbob/boosting/tests/test_loss_log_multivariate.py
 create mode 100644 xbob/boosting/tests/test_trainer_lut.py
 create mode 100644 xbob/boosting/tests/test_trainer_stump.py

diff --git a/data1.hdf5 b/data1.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..68c7b19ea89c102deae65a07ccf3a5087998a52c
GIT binary patch
literal 10144
zcmeD5aB<`1lHy_j0S*oZ76t(j3y%Lo!2)%N5S05L!ed}afHD}NbO)4P!31G2GJqfh
zg9L=jAP6-dU0q0!t1ANoBLmEQ7!B3NV88-laX_e1a)gC|hpS@%$jcERf`I{=iWnG9
zK+_pim?5#KD6x_Ol#(DK9uSIbl97Rp0i3qM1jt+suvv^u5P=jhkAVS_ffyLj`OFLg
zVEqCd3=EL8>j2il%D@2@XJUdF#K9l|<}))UK$I~!$b%I^2_dL%kgdXQLO~1+1yC9K
zQo(-yE}$F@btHoVGkxtJwQV#6MnhoegaEkEW?*1ogF0ChO1nU5J17k+*HxkXASkU2
zrD5s?p?sJ+PAFd=N{2(~U?{Bzr4^tw%pJT?z892++3ybJ!`e08P(I8YSh)wQcKo37
zuwqag$`6Io{!kibZ#0w-D#t-g9uUF6z~BU>)u6NylxBs}nowE<O2gb63+2P)wV`}i
zxL85?)=)YbO2haTP(DADwujPkP#WeAF(@BaDMdp0F#kJ3`Osz>1I%8S|7D@_u=-dF
z%7@9r?19B&AXFaa9$35@K*eG9!omlZK4AWbg%2#gVCe}K58P05VxTn4U2#x8Ed7{5
z`6^Hv7A`_iJ}f+8>R{mn3s0E4Vd=#Xs?QBd!_0;0Lr<45{V?-k`k>8m21%$nu=EEr
z50<WB@$3Ut2lIC#ln-gmFfhR4Ll`P<1Epc<O$W-ChSIS3hov7_{(_}DSU!i*u<(Mp
z8y2sy_=M$iSiXV9Crm%g9+<smP<O!6BTT;(R2=3GSh&N|A1r@Cl`(ii88Gu<_CZ={
z3=HN_d06@ggYseShoy5^JSjruVfMiE!_o;XUBluF=1)tgK3I7GOW&}31xrsb_rk&x
z79KDfmcC)`jE9;Fi(gkLAC`__;SWpiu<(SXLs-1?LG{7fxiI&@@-NIiu=s=d1J;{_
z>4%lyNl^1(<rXY{VCe`JZ_ZG4#!wm-KQQxP;SAFU%b&1v8&;nCLiH;_X&4_?e!{{P
zmY-nZ1Tzn24=g@l@dXPfSh|C$gXJSw{(<=e7S2#-Fu>vsW<M+(Y@rHZ`50DC$w0+n
z;R~ymVC5#P{(+@0SosKZC#?K|rEgex!o*?aCM?~++zl&NVCf20uE6R^n7?4*0t*M2
z`=g-lhLx`{ahN|spz<(#VC4j?UVym^R&K%gFn_`H!@>z>E-XG_{)5FgtUQHU%K&q~
z2~+@PKFr-Pcfibp#S^UjgoPt4-N5n*JJdW_K7*-)<qKFkgryVMI0?+1Fn7T6AuQj(
z+EuXdf|&=4Z&>`n(l4w$hlK;wSq!lB1hW@bp25t6)k`q{z}yK-7cl?8$`@Gq1}k4+
z;Q{jxtp0%IPguH#m7lQig5@7reubF}OK-6H9Ht-EeuC8luyBBt+pzouGY3|F!NL(1
zPOx+bOQ$e@!`uVQSFrei$-~kGtbBmE17;p9++q0@RxZKHF_=GL;R}loSbGN6ZiI=$
z@;NMh!16P!UWCO5tR95bKd^9tl~=HG0~VjK_<`j!n7d%@2UvRw7OyaSVC`{OeuUWz
zOGmJD0&5?_!UdLZVdlWn8?1c{OGhyCVBrYU53BEB>R{;rR<FRyAy|D1OE0i|2g~2E
z`Ua*C7LKrRhUIry{RWF~m^m<a!O}4--ND*hFn7WH2MY&Sy#`BfFnL(`!rTSR_po?^
zxeJ!gVC@%Jc)-eG7!51eVD5nBA6R~Z^}}HK9~R!QbOlTQFg`4tVBrAE2e5h=RzJYp
z4+{^N`7nRN+yQexEZxEU4@-Zrat`J$So(nZ3+8^9JgmHh#Sg4LfVmHr{$cGkSU!Z6
z6EO2(=>TRvEIeWH3d^ss^bHFiSh|OmvoQa{${m=$VdW1jJ?KI;z{){bc>*gpVE%%o
z7g#w8b2qG>f`v0I-eB&8wbx+w!P=j&d;!b9u<(HS6IM>Z%!QRBuy}*Dhhgeq`3hz)
zEZxEKBP<+Y<rmc346yiw<s(=<4NFI`d=HBsn0c^vE6hAty9!qB!NL>fURe0S%5hjd
z2rHLi`3n|5uyP;Po`=N?%zd!*4$GggbPFpNVCfoG&cW2d+z&Gs7N4+o7A)Pv@-?g+
zgP99UZ?N<PD^FnQ0Ol`P`h?{}Sh&F44{MLX$~RcPf%zXM4{ImE`~wSjSosK(hqd!z
z{Q_9NgUQ3<2^O!g`VAHzuzC(=Kg|EI`UjRTVdlW{1uVV8+GVix4l^H?PGRX6mM>xP
z2MaG4AC@0r;RSOKEFZwqE36!anGcIcSopxwA1q(N;tgg#EM3CV0WADr<p?a@z}j^%
zf5GxEEWg3ZWmtU(%fGO2hou{sIk5Bxvj<iVz``3=@4(_0mTqDGhP6*%G%S2z=D^|y
z7S1sJuzU>5_po#eOE0kSg5^h8yujM`Fn_`737C6f`2ZFVFnzG}4AT!QKVa%%?t-}=
z)-Hj?2fF)U`4HwWSop)rRaiX0?1kk&SiXdni?DJ8mOo(rh1Kh@c!cFYm^m<gFn7Yr
zU0D8rxeHc5!@>dPUl<=24lsLR@c~l@%b&1v0oKlg)%UP;1Peb{xWK|0CJ!seVBrGG
zkFfLwvj-MFF#p2p4Osqyg)^+Z4ofF6|G?@`Sp5#mCouQG(jzRr!s-cF{)2@(%>OWd
z!{lM<4wgS*;Sb9nuzU|IA7JeZSiXamx3Kt!r59Ma1#=%P++hBNmA5b&W<HFDl~1sA
z3iB5%eqr$niyv5j5axbZ`vF!Sz{(4lI#_)Kb1$qt1&cqJzhLnT%SSMG!Q2f?pD_Kf
z^bNBYmj7Yp5iGo6=>?YlVDSkHZ&?0^r8Ag*SUkhZD_D69GY^)ZV0>6P3(JqN^Z@fW
zEIq=)0~W8a`UjQ{Vfh0V4lsAX`cW|d!`uyvcUXMF+yhG|u=)%ZkFa(wEc{^pf`tc6
z9A+M@9D{`?EM39e1uIWr_QBFK%>S@>gw<cLd<N@J!|F?zeX#ZqtQ`l-&#-a==3ZDm
z2rI8(=^j?E!R&#lhoxs&c?Sz8SiHf?1z0+Sr8gK23x8O+!^$04yus2v%$=}yBFrDK
z^aoQ1YZt=80TxcM{0GZlu=D_{7h(AnmVRLBVd)nZPcZvn<qgc=uzmzAJ;Kr%tlb0)
zZ&?0>m0vJE%zRkBgQag+_`&=QiwBthVd)E29>d%PYq!D7hs6^tpTPV9OP4TvVdXt6
z-NEW1Sh));KVj(*RxZQ*53A>4@d!(IuyG7nc){`utUQC!Fn7SpCzyG#aDasqEM3Fm
z6P8Y4?t_H~tX~5QKbSl$oM7n%)-HghLs<C2!U?7y7Je{uVC_X%IKk?7n15mM4ht_>
zI~Nu|u=EKtALcKZc`$dt%0E~>fR&Rl|HI-J)}Mft3o!d&;RZ|JuyBU?1J?e7xeFFP
zuyhJ@4=kQw@eAX_%6C|N!SWL<AHnJ~SUC%eFIafP`~~wTtQ>)r6R`9JOBXQz!`uZ+
i=dk#Mm18h-VDSb^53uwBONX#}4p#2M)WO0NrVaq0r16OW

literal 0
HcmV?d00001

diff --git a/datafile1.hdf5 b/datafile1.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..e16e27279ee8b566359bdbd0556cf107ef228b3f
GIT binary patch
literal 12536
zcmeD5aB<`1lHy_j0S*oZ76t(j3y%Lo!4Ct75S05L!ed}afHD}NbO)4P!31G2GJqfh
zg9L=jAP6-dU0q0!t1ANoBLmEQ7!B3NV88-laX_e1a)gC|hpS@%$jcERf&r19PC(Nc
zRG1;Ls3@_rIJE@Ehowsgm;i}1BLf=)*bFcMN}(2D6BwBx0uYjcAq6bTzyQfe4DfV}
zF3!v#05(s6gMk5(mLV=<W#9mdGchxPjpSfZ0P~rdSSnZ{W`o2*#(}JWXk}nv5Q3Tl
zl4Nuh3SwY@g%Cs&waH*Ve-}`WhB-iinc6mt4D&$c9Vi@O<s6K5kSEUnI|NbHf#M$|
z0!knt8k9&tG%TJ#>`^=#0;3@?8UmvsFd71*Aut*OqaiRF0;3@?G(!N~8D?N$V1qhe
z6iT~5X*(zl>kX<x`9V-x8A`*{!@3hNb(~OneJC9crGufg9+Xyq(lB@MLit`$8fL#c
zln)y(@`mzZ=D_+Xu<pDcR36s76o>Ldp|n4ghS?hp<-__xJW#$9lvab%Mo=2sr)JQE
z@<pID%)PNtK1^O4%7=xE6_jrcrIVpFj1L(lU|`^firYhJIVcTthZvL(>-I%L`7r-G
zLiy0)F9w*sF#pR!<vpOZ7L<m`!|Z{@V<1!><{ntQLWlPlVD`eo2bMly{)dGREWTjr
z2^J6BQ1fG;G|XLbP(CdEm_qp~P#P94LQp;|JYni!;RFj$n7d)=#Sp5`4NAkzh3P|2
zmoWV>^I`hbq537EG%WqW%!8$CSUmec)xrFo2<2-)X;^#+L-{sP8kXL4pnPd44U2zR
z`hn#ySh|Dda~KT^FPOVw@d}GiSU!j48(4h8^uz3d*=q)M2P{3p^h-g-VeWv1J1qUd
z@)uMYgC~>$GaqIjWVDNc!5k_NOCMoSKFs~FbPkIrMW{T?9+-YuI)SBYSbV|!X$jQ_
zD-U4l8<wwN=?UguSa`z114hHrH_V;!P;+7N>k8$=(h)5DVd)(fp0IQXi+4V#KG=8-
z%ssID3v&-F{$T!qjnc#P!^-a@sClq*3l=}HbOehxXQ(=3C=H7rn0c^phUtUlPguDP
zE6;tQ`jwzGj1Mb6Vc`nPPq1);nFq5679X(qf`t<--NDqs@)0cm!2AIVXQ(q6VDSdC
z9~KU_PzA7j3@fK(pyIIbh1E;2auZhnz|t42e1y3ZR{p@!H!M71;;?cPmTq9~hLtO@
zbOkF{VD%)-U$Ah2g#*m}QBZfo%2${;%pW08d6+%0aspN_z}y8Zw_tpjzhL@d;RG`m
z7N0Qx!QvZMo<gl<fVtlUDgZMd=5ClfVCKQ%308i>!V#8kVEKd{Y91_~!PLR>1uPxH
z(g|$-3Fc0iJ7D<`mTzF~Dp+{I%!9=@EdF5W7gnCb!U5_m23UH6*$XSrVCKQ<C76F;
z?u4Zan15j93#@#Dl`pXHfcXbjf57r5EZxJ(Pgr=t@((P(!pw!GH&}fR(+_Ju!Ri56
zIKawnSbl<;11rB^;Rp*SSh|CyQ<%SD?t$eiSbV_bVd(-^KET`oGY=N-u>1-umtf@>
z%%8CEg~bP~Jp*eu!o*?u9F{&{`59I(!r}u~55npnSh&E-D_FS!i%(em!15W)U9k28
ztUU#bSC~Dp_Bbp*!t8~mBUn0twGUz80?W5Bb71KW);@-%Bba%xaD?fH)psy;uyg>c
zS77B3tUiUM7g)Z7<!@Mh1Jef!M_4$+@;j`4gT*(@9GJUc=@^#oVC^lKyI}r<g#)Z!
zgQYi^JS==+?t<leSUkbp1xshJ_6saLVC683hLvkDcfj%wEI+~eVX*uU3vXDuf~9{L
z9~MrqaDe3lSUn7@A7Ji>g$K-hm_K3efVm%*?qL3hr9W6X2XhxJeZc$$b3aTTR^Gzm
z2UZ`z+y_hlu=W}(AHvEBnE9}D05cyJp0Id@<yTnxhJ_C--NVXRn15mA4$R-M@&}e4
zbfFqx<shs)ft4FDf5FlVtQ>{88&*%j!WkBCFn7Y*YcTs@?N3;~faPCUc)<J#D<@#)
z!pad?yusSTFm<qe1+y2H?qK;57LKs;3+inKSbW0r5v-nur6X9rhs6)fJXpIGW*)3v
z1uOSp;R$muEc{^QIIJFomCLaF1&be8xesg4!{P<zK3IB(<xg0;g_R4ibPX%#VCrD*
zhnWkDPgpw(mhNHs8di?M%!Q>lSbBn$C$Mw?^A{|A!tx<3Twv~pwZ~xP8!X?z{120d
zwUc1}frUG)e1yrv+WD}40W9Ca<YDmyi&t3v28$0^JqNQN=6_iI1Iw2%b71)bmfm6Q
zGFW<tnGZ{+u=ESdm$3MQg%^wu%MY;dg1HBl4`AsPR*u5Vhs7f-d|>Ghmaky(1~VU)
zE@9~a7JjgD1eR`K?K+shVEGr8-(cl3tUiS0Us$-q(hbZUSo(w611kq$;SH;IVDSq}
zw=jRh+9xm?7Cta@VDSSBXPACiK8EFcSh|I!7g%_~@*^x>VC{RDzhLzQ%)PLD0E-8h
zK3IB&>4%jcF!eBZ!Q2mPm%!o!-F>ip2=f;#{9)xPEFNI?!tx(1U&6{oSUCdAA29#I
z>UCH=!tx)?9GE_sJ7MK6EPue<1*@N7;Q;e5j1LP3n7y$0fT@G!PguDCYv;l0dssSx
zg&!<jVBrjthm~WnaDnAVSo(t50}CIRe_{0oEPuhm8P;Bhr4yKcVD%@geuw1~n0sL9
z5td$I^#m;c!NMKpf0(~v^00IV%b&3Dhvg4gzK4|$u=WKk-@(dTSp37%3#{CNxepd@
zF#p2JTNn*9A4bE<Cs;a#`3n}muy}>V53D~3b3d&804on*<poR~tUiLd7uKGF#UIRH
zu=s`LBbd8j?uMmLn0{FLhS>|t|FH517T&P*0!x3e_=JTwEdRsO8B9Mco?+z`tUQL9
z2g^?|KCGOD<wsb0fcYDi9%11Di&t3v151an`~eFGm^)zoD473Y?uNxXEIwiGfu$2z
zeFlq1SUVRMelUN*!UHA_GY?jd!NL=ku3+whl_xO!VCfm=e^@-i>MvM6gY~Cj^(D+c
zSo;Uoj)UcASUCZ6FRUJfl~=HI53AQ;_Q2G`(le~QgM||;-eBbdEFHqq8;pj9KP=o~
z<qj;~VCf#_PFOn;<_}o<gQ<hH3t`~^3ny6qgXJ$+dVtl7uzU(jKQQ&M^b3n8n0>JF
z2Ig;AKLVB>Vd)IkZi0n3EPukvFBl(YJ}lqC(l;#pVE%^11I+)h^aU%AVeW#p+hFFy
z;t7^dVE%xmOPIZ|@*bA%VD%8J+=Z2&uyhD3mtp>g)$_1;grz&!I0h`dVEF}Bp229C
zJ7DD#%sf~)z`_ZZu3_;BOQ$gR!NLR9uYrXhOdb|au=E0J7r@dXEPP?%1k(=-KbSeN
z_985tVD&r9zp!|Rg%_-y3yU9E`h=Me^B2rKm^)zQA1oih%1N02Vet#=Pr%9rn0>Ht
zgQag+IK%t_YyZLA1q&ZoI)%9h7EiGFh4EqKJ1o9n`3aVfVD%ZSoQ1^~EWBa<g836x
xj=;(ZSo(sc3z+|5?t-OrSbW0DF_<~9c!Q+}So(maLs&frD|cb)VBrZ<2LMEI4j=#k

literal 0
HcmV?d00001

diff --git a/xbob/boosting/core/boosting.py b/xbob/boosting/core/boosting.py
index d5ec85f..47dfacc 100644
--- a/xbob/boosting/core/boosting.py
+++ b/xbob/boosting/core/boosting.py
@@ -89,11 +89,11 @@ class Boost:
 
 
 
-    def __init__(self, trainer_type):
+    def __init__(self, trainer_type, num_rnds = 20, num_entries = 256, loss_type = 'log', lut_selection = 'indep'):
         """ The function to initialize the boosting parameters. 
 
         The function set the default values for the following boosting parameters:
-        The number of rounds for boosting: 100
+        The number of rounds for boosting: 20
         The number of entries in LUT: 256 (For LBP type features)
         The loss function type: logit
         The LUT selection type: independent
@@ -101,13 +101,25 @@ class Boost:
         Inputs:
         trainer_type: The type of trainer for boosting.
                       Type: string
-                      Values: LutTrainer or StumpTrainer    
+                      Values: LutTrainer or StumpTrainer
+        num_rnds:     The number of rounds of boosting
+                      Type: int
+                      Values: 20 (Default)    
+        num_entries:  The number of entries for the lookup table
+                      Type: int
+                      Values: 256 (Default)
+        loss_type:    The loss function to be be minimized
+                      Type: string
+                      Values: 'log' or 'exp' 
+        lut_selection: The selection type for the LUT based trainers
+                       Type: string
+                       Values: 'indep' or 'shared'   
                    
         """
-        self.num_rnds = 100
-        self.num_entries = 256
-        self.loss_type = 'log' 
-        self.lut_selection = 'indep'
+        self.num_rnds = num_rnds
+        self.num_entries = num_entries
+        self.loss_type = loss_type
+        self.lut_selection = lut_selection
         self.weak_trainer_type = trainer_type
 							
 	
diff --git a/xbob/boosting/core/losses.py b/xbob/boosting/core/losses.py
index b1936f3..f73d952 100644
--- a/xbob/boosting/core/losses.py
+++ b/xbob/boosting/core/losses.py
@@ -1,6 +1,5 @@
 import numpy
 import math
-from scipy import optimize
 
 
 
@@ -42,14 +41,15 @@ class ExpLossFunction():
         return loss_grad
         #return loss_grad
 
-    def loss_sum(self, *args):
+    #def loss_sum(self, *args):
+    def loss_sum(self, alpha, targets, prediction_scores, weak_scores):
         """The function computes the sum of the exponential loss which is used to find the optimized values of alpha (x).
          
         The functions computes sum of loss values which is required during the linesearch step for the optimization of the alpha.
         This function is given as the input for the lbfgs optimization function. 
 
         Inputs: 
-        x: The current value of the alpha.
+        alpha: The current value of the alpha.
            type: float
 
         targets: The targets for the samples
@@ -65,23 +65,24 @@ class ExpLossFunction():
         Return:
         sum_loss: The sum of the loss values for the current value of the alpha    
                  type: float"""
-
+        """
         # initialize the values
         x = args[0]
         targets = args[1]
         pred_scores = args[2]
         weak_scores = args[3]
-
+        """
+        
         # compute the scores and loss for the current alpha
-        curr_scores_x = pred_scores + x*weak_scores
-        loss = self.update_loss(targets, curr_scores_x)
+        curr_scores = prediction_scores + alpha * weak_scores
+        loss = self.update_loss(targets, curr_scores)
 
         # compute the sum of the loss
         sum_loss = numpy.sum(loss,0)
         return sum_loss
         
 
-    def loss_grad_sum(self, *args):
+    def loss_grad_sum(self, alpha, targets, prediction_scores, weak_scores):
         """The function computes the sum of the exponential loss which is used to find the optimized values of alpha (x).
          
         The functions computes sum of loss values which is required during the linesearch step for the optimization of the alpha.
@@ -104,15 +105,18 @@ class ExpLossFunction():
         Return:
         sum_loss: The sum of the loss gradient values for the current value of the alpha    
                  type: float"""
+
+        """
         # initialize the values
         x = args[0]
         targets = args[1]
         pred_scores = args[2]
         weak_scores = args[3]
+        """
 
         # compute the loss gradient for the updated score
-        curr_scores_x = pred_scores + x*weak_scores
-        loss_grad = self.update_loss_grad(targets, curr_scores_x)
+        curr_scores = prediction_scores + alpha *weak_scores
+        loss_grad = self.update_loss_grad(targets, curr_scores)
 
         # take the sum of the loss gradient values
         sum_grad = numpy.sum(loss_grad*weak_scores, 0)
@@ -149,7 +153,7 @@ class LogLossFunction():
                  type: numpy array (# number of samples x #number of outputs)
         
         scores: The current prediction scores for the samples.
-                type: numpy array (# number of samples) 
+                type: numpy array (# number of samples x # number of outputs) 
 
         Return:
         gradient: The loss gradient values for the samples     """
@@ -157,7 +161,7 @@ class LogLossFunction():
         denom = 1/(1 + e)
         return - targets* e* denom
 
-    def loss_sum(self, *args):
+    def loss_sum(self, alpha, targets, prediction_scores, weak_scores):
         """The function computes the sum of the logit loss which is used to find the optimized values of alpha (x).
          
         The functions computes sum of loss values which is required during the linesearch step for the optimization of the alpha.
@@ -170,10 +174,10 @@ class LogLossFunction():
         targets: The targets for the samples
                  type: numpy array (# number of samples x #number of outputs)
         
-        pred_scores: The cumulative prediction scores of the samples until the previous round of the boosting.
+        prediction_scores: The cumulative prediction scores of the samples until the previous round of the boosting.
                  type: numpy array (# number of samples) 
 
-        curr_scores: The prediction scores of the samples for the current round of the boosting.
+        weak_scores: The prediction scores of the samples for the current round of the boosting.
                  type: numpy array (# number of samples) 
 
 
@@ -181,17 +185,18 @@ class LogLossFunction():
         sum_loss: The sum of the loss values for the current value of the alpha    
                  type: float"""
 
+        """
         x = args[0]
         targets = args[1]
         pred_scores = args[2]
         weak_scores = args[3]
-        curr_scores_x = pred_scores + x*weak_scores
-        loss = self.update_loss(targets, curr_scores_x)
-        sum_l = numpy.sum(loss,0)
-        return sum_l
+        """
+        curr_scores = prediction_scores + alpha*weak_scores
+        loss = self.update_loss(targets, curr_scores)
+        sum_loss = numpy.sum(loss,0)
+        return sum_loss
         
-    #@abstractmethod
-    def loss_grad_sum(self, *args):
+    def loss_grad_sum(self, alpha, targets, prediction_scores, weak_scores):
         """The function computes the sum of the logit loss gradient which is used to find the optimized values of alpha (x).
          
         The functions computes sum of loss values which is required during the linesearch step for the optimization of the alpha.
@@ -214,14 +219,16 @@ class LogLossFunction():
         Return:
         sum_loss: The sum of the loss gradient values for the current value of the alpha    
                  type: float"""
+        """
         x = args[0]
         targets = args[1]
         pred_scores = args[2]
         weak_scores = args[3]
-        curr_scores_x = pred_scores + x*weak_scores
-        loss_grad = self.update_loss_grad( targets, curr_scores_x)
-        sum_g = numpy.sum(loss_grad*weak_scores, 0)
-        return sum_g
+        """
+        curr_scores = prediction_scores + alpha*weak_scores
+        loss_grad = self.update_loss_grad( targets, curr_scores)
+        sum_grad = numpy.sum(loss_grad*weak_scores, 0)
+        return sum_grad
 
 
     """def loss_sum(self, targets, scores):
diff --git a/xbob/boosting/core/trainers.py b/xbob/boosting/core/trainers.py
index 7c6bb2e..2599e5d 100644
--- a/xbob/boosting/core/trainers.py
+++ b/xbob/boosting/core/trainers.py
@@ -107,15 +107,16 @@ class StumpTrainer():
 
         # Find the corresponding threshold value
         threshold = 0.0
-        if(opt_id == num_samp-1):
+        if (opt_id == num_samp-1):
             threshold = fea[opt_id]
         else:
             threshold = (float(fea[opt_id]) + float(fea[opt_id+1]))*0.5
+
         # Find the polarity or the directionality of the current trainer
         if(gain_max == gain[opt_id]):
             polarity = -1
         else:
-            polarity = 1
+            polarity =  1
 
         return polarity, threshold, gain_max
 
@@ -131,7 +132,7 @@ class StumpTrainer():
         scores are either +1 or -1.
         Input: self: a weak stump trainer
                test_features: A matrix of the test features of dimension. 
-                              Num. of Test images x Num of features
+                              Num. of Test images x Num. of features
         Return: weak_scores: classification scores of the test features use the weak classifier self
                              Array of dimension =  Num. of samples 
         """
@@ -157,7 +158,7 @@ class LutTrainer():
  
 
     
-    def __init__(self, num_entries, selection_type, num_op):
+    def __init__(self, num_entries, selection_type, num_outputs):
         """ Function to initialize the parameters.
 
         Function to initialize the weak LutTrainer. Each weak Luttrainer is specified with a 
@@ -175,14 +176,14 @@ class LutTrainer():
                         and a single feature is used for all the outputs. See Cosmin's thesis for more details.
                        Type: string {'indep', 'shared'}
 
-        num_op: The number of outputs for the classification task. 
+        num_outputs: The number of outputs for the classification task. 
                     type: Integer
 
         """
         self.num_entries = num_entries
-        self.luts = numpy.ones((num_entries, num_op), dtype = numpy.int)
+        self.luts = numpy.ones((num_entries, num_outputs), dtype = numpy.int)
         self.selection_type = selection_type
-        self.selected_indices = numpy.zeros([num_op,1], 'int16')
+        self.selected_indices = numpy.zeros([num_outputs,1], 'int16')
     
 
 
@@ -208,12 +209,13 @@ class LutTrainer():
         """
 
         # Initializations
-        num_op = loss_grad.shape[1]
-        fea_grad = numpy.zeros([self.num_entries,num_op])
+        num_outputs = loss_grad.shape[1]
+        print num_outputs
+        fea_grad = numpy.zeros([self.num_entries,num_outputs])
 
         # Compute the sum of the gradient based on the feature values or the loss associated with each 
         # feature index
-        sum_loss = self.compute_fgrad(loss_grad, fea)
+        sum_loss = self.compute_grad_sum(loss_grad, fea)
 
 
         # Select the most discriminative index (or indices) for classification which minimizes the loss
@@ -226,9 +228,10 @@ class LutTrainer():
 
             selected_indices = [numpy.argmin(col) for col in numpy.transpose(sum_loss)]
 
-            for oi in range(num_op):
+            for oi in range(num_outputs):
                 curr_id = sum_loss[:,oi].argmin()
-                fea_grad[:,oi] = self.compute_hgrad(loss_grad[:,oi],fea[:,curr_id])
+                fea_grad[:,oi] = self.compute_grad_hist(loss_grad[:,oi],fea[:,curr_id])
+                print oi
                 self.selected_indices[oi] = curr_id
 
 
@@ -239,10 +242,10 @@ class LutTrainer():
 
             accum_loss = numpy.sum(sum_loss,1)
             selected_findex = accum_loss.argmin()
-            self.selected_indices = selected_findex*numpy.ones([num_op,1],'int16')
+            self.selected_indices = selected_findex*numpy.ones([num_outputs,1],'int16')
 
-            for oi in range(num_op):
-                fea_grad[:,oi] = self.compute_hgrad(loss_grad[:,oi],fea[:,selected_findex])
+            for oi in range(num_outputs):
+                fea_grad[:,oi] = self.compute_grad_hist(loss_grad[:,oi],fea[:,selected_findex])
      
         # Assign the values to LookUp Table
         self.luts[fea_grad <= 0.0] = -1
@@ -252,7 +255,7 @@ class LutTrainer():
 
 
      
-    def compute_fgrad(self, loss_grad, fea):
+    def compute_grad_sum(self, loss_grad, fea):
         """ The function to compute the loss gradient for all the features.
 
         The function computes the loss for whole set of features. The loss refers to the sum of the loss gradient
@@ -269,13 +272,13 @@ class LutTrainer():
         # initialize values
         num_fea = len(fea[0])
         num_samp = len(fea)
-        num_op = len(loss_grad[0])
-        sum_loss = numpy.zeros([num_fea,num_op])
+        num_outputs = len(loss_grad[0])
+        sum_loss = numpy.zeros([num_fea,num_outputs])
        
         # Compute the loss for each feature
         for fi in range(num_fea):
-            for oi in range(num_op):
-                hist_grad = self.compute_hgrad(loss_grad[:,oi],fea[:,fi])
+            for oi in range(num_outputs):
+                hist_grad = self.compute_grad_hist(loss_grad[:,oi],fea[:,fi])
                 sum_loss[fi,oi] = - sum(abs(hist_grad))
 
 
@@ -285,7 +288,7 @@ class LutTrainer():
 
 
 
-    def compute_hgrad(self, loss_grado,fval):
+    def compute_grad_hist(self, loss_grado,fval):
         """ The function computes the loss for a single feature.
 
         Function computes sum of the loss gradient that have same feature values. 
@@ -318,9 +321,9 @@ class LutTrainer():
         return: 
         weak_scores: The classification scores of the features based on current weak classifier"""
         num_samp = len(fset)
-        num_op = len(self.luts[0])
-        weak_scores = numpy.zeros([num_samp,num_op])
-        for oi in range(num_op):
+        num_outputs = len(self.luts[0])
+        weak_scores = numpy.zeros([num_samp,num_outputs])
+        for oi in range(num_outputs):
             a = self.luts[fset[:,self.selected_indices[oi]],oi]
             weak_scores[:,oi] = numpy.transpose(self.luts[fset[:,self.selected_indices[oi]],oi])
         return weak_scores
diff --git a/xbob/boosting/features/local_feature.py b/xbob/boosting/features/local_feature.py
index 7057b33..c451c3c 100644
--- a/xbob/boosting/features/local_feature.py
+++ b/xbob/boosting/features/local_feature.py
@@ -137,13 +137,13 @@ class lbp_feature():
         """
 
         feature_map = numpy.zeros([feature_map_dimy, feature_map_dimx])
-        num_neighbour = 8
+        num_neighbours = 8
 
         """ Compute the feature map for the tLBP features. """
         for ind in range(num_neighbours):
             
             """The comparison of pixel is done with the adjacent neighbours."""
-            comparing_img = block_sum[coord[(ind+1)%num_neighbour][0]:coord[(ind+1)%num_neighbour][0] + feature_map_dimy,coord[(ind+1)%num_neighbour][1]:coord[(ind+1)%num_neighbour][1] + feature_map_dimx]
+            comparing_img = block_sum[coord[(ind+1)%num_neighbours][0]:coord[(ind+1)%num_neighbours][0] + feature_map_dimy,coord[(ind+1)%num_neighbours][1]:coord[(ind+1)%num_neighbours][1] + feature_map_dimx]
             
             """ Compare the neighbours and increment the feature map. """
             feature_map = feature_map + (2**ind)*(block_sum[coord[ind][0]:coord[ind][0] + feature_map_dimy,coord[ind][1]:coord[ind][1] + feature_map_dimx]>= comparing_img)
diff --git a/xbob/boosting/tests/data1.hdf5 b/xbob/boosting/tests/data1.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..68c7b19ea89c102deae65a07ccf3a5087998a52c
GIT binary patch
literal 10144
zcmeD5aB<`1lHy_j0S*oZ76t(j3y%Lo!2)%N5S05L!ed}afHD}NbO)4P!31G2GJqfh
zg9L=jAP6-dU0q0!t1ANoBLmEQ7!B3NV88-laX_e1a)gC|hpS@%$jcERf`I{=iWnG9
zK+_pim?5#KD6x_Ol#(DK9uSIbl97Rp0i3qM1jt+suvv^u5P=jhkAVS_ffyLj`OFLg
zVEqCd3=EL8>j2il%D@2@XJUdF#K9l|<}))UK$I~!$b%I^2_dL%kgdXQLO~1+1yC9K
zQo(-yE}$F@btHoVGkxtJwQV#6MnhoegaEkEW?*1ogF0ChO1nU5J17k+*HxkXASkU2
zrD5s?p?sJ+PAFd=N{2(~U?{Bzr4^tw%pJT?z892++3ybJ!`e08P(I8YSh)wQcKo37
zuwqag$`6Io{!kibZ#0w-D#t-g9uUF6z~BU>)u6NylxBs}nowE<O2gb63+2P)wV`}i
zxL85?)=)YbO2haTP(DADwujPkP#WeAF(@BaDMdp0F#kJ3`Osz>1I%8S|7D@_u=-dF
z%7@9r?19B&AXFaa9$35@K*eG9!omlZK4AWbg%2#gVCe}K58P05VxTn4U2#x8Ed7{5
z`6^Hv7A`_iJ}f+8>R{mn3s0E4Vd=#Xs?QBd!_0;0Lr<45{V?-k`k>8m21%$nu=EEr
z50<WB@$3Ut2lIC#ln-gmFfhR4Ll`P<1Epc<O$W-ChSIS3hov7_{(_}DSU!i*u<(Mp
z8y2sy_=M$iSiXV9Crm%g9+<smP<O!6BTT;(R2=3GSh&N|A1r@Cl`(ii88Gu<_CZ={
z3=HN_d06@ggYseShoy5^JSjruVfMiE!_o;XUBluF=1)tgK3I7GOW&}31xrsb_rk&x
z79KDfmcC)`jE9;Fi(gkLAC`__;SWpiu<(SXLs-1?LG{7fxiI&@@-NIiu=s=d1J;{_
z>4%lyNl^1(<rXY{VCe`JZ_ZG4#!wm-KQQxP;SAFU%b&1v8&;nCLiH;_X&4_?e!{{P
zmY-nZ1Tzn24=g@l@dXPfSh|C$gXJSw{(<=e7S2#-Fu>vsW<M+(Y@rHZ`50DC$w0+n
z;R~ymVC5#P{(+@0SosKZC#?K|rEgex!o*?aCM?~++zl&NVCf20uE6R^n7?4*0t*M2
z`=g-lhLx`{ahN|spz<(#VC4j?UVym^R&K%gFn_`H!@>z>E-XG_{)5FgtUQHU%K&q~
z2~+@PKFr-Pcfibp#S^UjgoPt4-N5n*JJdW_K7*-)<qKFkgryVMI0?+1Fn7T6AuQj(
z+EuXdf|&=4Z&>`n(l4w$hlK;wSq!lB1hW@bp25t6)k`q{z}yK-7cl?8$`@Gq1}k4+
z;Q{jxtp0%IPguH#m7lQig5@7reubF}OK-6H9Ht-EeuC8luyBBt+pzouGY3|F!NL(1
zPOx+bOQ$e@!`uVQSFrei$-~kGtbBmE17;p9++q0@RxZKHF_=GL;R}loSbGN6ZiI=$
z@;NMh!16P!UWCO5tR95bKd^9tl~=HG0~VjK_<`j!n7d%@2UvRw7OyaSVC`{OeuUWz
zOGmJD0&5?_!UdLZVdlWn8?1c{OGhyCVBrYU53BEB>R{;rR<FRyAy|D1OE0i|2g~2E
z`Ua*C7LKrRhUIry{RWF~m^m<a!O}4--ND*hFn7WH2MY&Sy#`BfFnL(`!rTSR_po?^
zxeJ!gVC@%Jc)-eG7!51eVD5nBA6R~Z^}}HK9~R!QbOlTQFg`4tVBrAE2e5h=RzJYp
z4+{^N`7nRN+yQexEZxEU4@-Zrat`J$So(nZ3+8^9JgmHh#Sg4LfVmHr{$cGkSU!Z6
z6EO2(=>TRvEIeWH3d^ss^bHFiSh|OmvoQa{${m=$VdW1jJ?KI;z{){bc>*gpVE%%o
z7g#w8b2qG>f`v0I-eB&8wbx+w!P=j&d;!b9u<(HS6IM>Z%!QRBuy}*Dhhgeq`3hz)
zEZxEKBP<+Y<rmc346yiw<s(=<4NFI`d=HBsn0c^vE6hAty9!qB!NL>fURe0S%5hjd
z2rHLi`3n|5uyP;Po`=N?%zd!*4$GggbPFpNVCfoG&cW2d+z&Gs7N4+o7A)Pv@-?g+
zgP99UZ?N<PD^FnQ0Ol`P`h?{}Sh&F44{MLX$~RcPf%zXM4{ImE`~wSjSosK(hqd!z
z{Q_9NgUQ3<2^O!g`VAHzuzC(=Kg|EI`UjRTVdlW{1uVV8+GVix4l^H?PGRX6mM>xP
z2MaG4AC@0r;RSOKEFZwqE36!anGcIcSopxwA1q(N;tgg#EM3CV0WADr<p?a@z}j^%
zf5GxEEWg3ZWmtU(%fGO2hou{sIk5Bxvj<iVz``3=@4(_0mTqDGhP6*%G%S2z=D^|y
z7S1sJuzU>5_po#eOE0kSg5^h8yujM`Fn_`737C6f`2ZFVFnzG}4AT!QKVa%%?t-}=
z)-Hj?2fF)U`4HwWSop)rRaiX0?1kk&SiXdni?DJ8mOo(rh1Kh@c!cFYm^m<gFn7Yr
zU0D8rxeHc5!@>dPUl<=24lsLR@c~l@%b&1v0oKlg)%UP;1Peb{xWK|0CJ!seVBrGG
zkFfLwvj-MFF#p2p4Osqyg)^+Z4ofF6|G?@`Sp5#mCouQG(jzRr!s-cF{)2@(%>OWd
z!{lM<4wgS*;Sb9nuzU|IA7JeZSiXamx3Kt!r59Ma1#=%P++hBNmA5b&W<HFDl~1sA
z3iB5%eqr$niyv5j5axbZ`vF!Sz{(4lI#_)Kb1$qt1&cqJzhLnT%SSMG!Q2f?pD_Kf
z^bNBYmj7Yp5iGo6=>?YlVDSkHZ&?0^r8Ag*SUkhZD_D69GY^)ZV0>6P3(JqN^Z@fW
zEIq=)0~W8a`UjQ{Vfh0V4lsAX`cW|d!`uyvcUXMF+yhG|u=)%ZkFa(wEc{^pf`tc6
z9A+M@9D{`?EM39e1uIWr_QBFK%>S@>gw<cLd<N@J!|F?zeX#ZqtQ`l-&#-a==3ZDm
z2rI8(=^j?E!R&#lhoxs&c?Sz8SiHf?1z0+Sr8gK23x8O+!^$04yus2v%$=}yBFrDK
z^aoQ1YZt=80TxcM{0GZlu=D_{7h(AnmVRLBVd)nZPcZvn<qgc=uzmzAJ;Kr%tlb0)
zZ&?0>m0vJE%zRkBgQag+_`&=QiwBthVd)E29>d%PYq!D7hs6^tpTPV9OP4TvVdXt6
z-NEW1Sh));KVj(*RxZQ*53A>4@d!(IuyG7nc){`utUQC!Fn7SpCzyG#aDasqEM3Fm
z6P8Y4?t_H~tX~5QKbSl$oM7n%)-HghLs<C2!U?7y7Je{uVC_X%IKk?7n15mM4ht_>
zI~Nu|u=EKtALcKZc`$dt%0E~>fR&Rl|HI-J)}Mft3o!d&;RZ|JuyBU?1J?e7xeFFP
zuyhJ@4=kQw@eAX_%6C|N!SWL<AHnJ~SUC%eFIafP`~~wTtQ>)r6R`9JOBXQz!`uZ+
i=dk#Mm18h-VDSb^53uwBONX#}4p#2M)WO0NrVaq0r16OW

literal 0
HcmV?d00001

diff --git a/xbob/boosting/tests/test_loss_exp.py b/xbob/boosting/tests/test_loss_exp.py
index 9eba786..420f512 100644
--- a/xbob/boosting/tests/test_loss_exp.py
+++ b/xbob/boosting/tests/test_loss_exp.py
@@ -4,43 +4,86 @@ import xbob.boosting
 import numpy
 
 class TestExpLossFunctions(unittest.TestCase):
-    """Perform test on loss function """
+    """Perform test on exponential loss function """
 
-    def test_exp_loss(self):
+    def test_exp_positive_target(self):
 
-        exp_ = xbob.boosting.core.losses.ExpLossFunction()
+        loss_function = xbob.boosting.core.losses.ExpLossFunction()
         target = 1
-        score = numpy.random.rand()
+        score = 0.34
+        alpha = 0.5
+        targets = numpy.array([1, 1, 1,1,1, 1,1,1,1,1])
+        weak_scores = numpy.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], 'float64')
+        prev_scores = numpy.array([0.53, 0.23, 0.63, 0.12, 1.34, 5.76, 3.21, 2.11, 1.21, 5.36], 'float64')
         
         # check the loss values
-        l1 = exp_.update_loss(target, score) 
-        val1 = numpy.exp(- target * score)
-        self.assertEqual(l1,val1)
+        loss_value = loss_function.update_loss(target, score) 
+        val = numpy.exp(- target * score)
+        self.assertEqual(loss_value,val)
+        self.assertTrue(loss_value >= 0)
 
         # Check loss gradient
-        l2 = exp_.update_loss_grad( target, score)
+        loss_grad = loss_function.update_loss_grad( target, score)
+
+        temp = numpy.exp(-target * score)
+        val2 = -target * temp
+        self.assertEqual(loss_grad,val2)
+
+        # Check loss sum
+        loss_sum_val = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
+
+        curr_scores = prev_scores + alpha*weak_scores
+        val3 = sum(numpy.exp(-targets * curr_scores))
+        self.assertEqual(val3, loss_sum_val)
+
+        # Check the gradient sum
+        grad_sum_val = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
+
+        curr_scores = prev_scores + alpha*weak_scores        
+        temp = numpy.exp(-targets * curr_scores)
+        grad = -target * temp
+        val4 = numpy.sum(grad * weak_scores,0)
+
+        self.assertEqual(val4, grad_sum_val)
+
+    def test_exp_negative_target(self):
+
+        loss_function = xbob.boosting.core.losses.ExpLossFunction()
+        target = -1
+        score = 0.34
+        alpha = 0.5
+        targets = numpy.array([-1, -1, -1,-1,-1, -1,-1,-1,-1,-1])
+        weak_scores = numpy.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], 'float64')
+        prev_scores = numpy.array([0.53, 0.23, 0.63, 0.12, 1.34, 5.76, 3.21, 2.11, 1.21, 5.36], 'float64')
+        
+        # check the loss values
+        loss_value = loss_function.update_loss(target, score) 
+        val = numpy.exp(- target * score)
+        self.assertEqual(loss_value,val)
+        self.assertTrue(loss_value >= 0)
+
+        # Check loss gradient
+        loss_grad = loss_function.update_loss_grad( target, score)
+
         temp = numpy.exp(-target * score)
         val2 = -target * temp
-        self.assertEqual(l2,val2)
+        self.assertEqual(loss_grad,val2)
 
         # Check loss sum
-        weak_scores = numpy.random.rand(10)
-        prev_scores = numpy.random.rand(10)
-        x = numpy.random.rand(1)
-        curr_scores = prev_scores + x*weak_scores
-        l3 = exp_.loss_sum(x, target, prev_scores, weak_scores)
-        val3 = sum(numpy.exp(-target * curr_scores))
-        self.assertEqual(val3, l3)
+        loss_sum_val = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
+
+        curr_scores = prev_scores + alpha*weak_scores
+        val3 = sum(numpy.exp(-targets * curr_scores))
+        self.assertEqual(val3, loss_sum_val)
 
         # Check the gradient sum
-        weak_scores = numpy.random.rand(10)
-        prev_scores = numpy.random.rand(10)
-        x = numpy.random.rand(1)
-        curr_scores = prev_scores + x*weak_scores
-        l4 = exp_.loss_grad_sum(x, target, prev_scores, weak_scores)
-        temp = numpy.exp(-target * curr_scores)
+        grad_sum_val = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
+
+        curr_scores = prev_scores + alpha*weak_scores        
+        temp = numpy.exp(-targets * curr_scores)
         grad = -target * temp
         val4 = numpy.sum(grad * weak_scores,0)
-        self.assertEqual(val4, l4)
+
+        self.assertEqual(val4, grad_sum_val)
             
    
diff --git a/xbob/boosting/tests/test_loss_exp_multivariate.py b/xbob/boosting/tests/test_loss_exp_multivariate.py
new file mode 100644
index 0000000..9e97487
--- /dev/null
+++ b/xbob/boosting/tests/test_loss_exp_multivariate.py
@@ -0,0 +1,83 @@
+import unittest
+import random
+import xbob.boosting
+import numpy
+
+class TestExpLossMulti(unittest.TestCase):
+
+    """ Test the loss function using multivariate data  """
+
+    def test_log_multivariate_dimensions(self):
+
+        """ Check the loss function values for multivariate targets """
+
+        loss_function = xbob.boosting.core.losses.ExpLossFunction()
+        num_samples = 2
+        num_dimension = 2
+        targets = numpy.array([[1, -1], [-1, 1]])
+        score = numpy.array([[0.5, 0.5], [0.5, 0.5]], 'float64')
+        alpha = 0.5
+        weak_scores = numpy.array([[0.2, 0.4], [0.5, 0.6]], 'float64')
+        prev_scores = numpy.array([[0.1, 0.2],[0.3, 0.4]], 'float64')
+        
+        # check the loss dimensions
+        loss_value = loss_function.update_loss(targets, score) 
+        self.assertTrue(loss_value.shape[0] == num_samples)
+        self.assertTrue(loss_value.shape[1] == num_dimension)
+
+        # Check loss gradient
+        grad_value = loss_function.update_loss_grad( targets, score)
+        self.assertTrue(grad_value.shape[0] == num_samples)
+        self.assertTrue(grad_value.shape[1] == num_dimension)
+
+        # Check loss sum
+        loss_sum = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
+        self.assertTrue(loss_sum.shape[0] == num_samples)
+
+
+
+        # Check the gradient sum
+        grad_sum = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
+        self.assertTrue(grad_sum.shape[0] == num_samples)
+
+
+
+    def test_exp_negative_target(self):
+
+        loss_function = xbob.boosting.core.losses.ExpLossFunction()
+        num_samples = 2
+        num_dimension = 2
+        targets = numpy.array([[1, -1], [-1, 1]])
+        score = numpy.array([[0.5, 0.5], [0.5, 0.5]], 'float64')
+        alpha = 0.5
+        weak_scores = numpy.array([[0.2, 0.4], [0.5, 0.6]], 'float64')
+        prev_scores = numpy.array([[0.1, 0.2],[0.3, 0.4]], 'float64')
+        
+        # check the loss values
+        loss_value = loss_function.update_loss(targets, score) 
+        val1 = numpy.exp(- targets * score)
+        self.assertTrue((loss_value == val1).all())
+
+        # Check loss gradient
+        loss_grad = loss_function.update_loss_grad( targets, score)
+
+        temp = numpy.exp(-targets * score)
+        val2 = -targets * temp
+        self.assertTrue((loss_grad == val2).all())
+
+        # Check loss sum
+        loss_sum_val = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
+
+        curr_scores = prev_scores + alpha*weak_scores
+        val3 = sum(numpy.exp(-targets * curr_scores))
+        self.assertTrue((val3 == loss_sum_val).all())
+
+        # Check the gradient sum
+        grad_sum_val = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
+
+        curr_scores = prev_scores + alpha*weak_scores        
+        temp = numpy.exp(-targets * curr_scores)
+        grad = -targets * temp
+        val4 = numpy.sum(grad * weak_scores,0)
+
+        self.assertTrue((val4 == grad_sum_val).all())
diff --git a/xbob/boosting/tests/test_loss_log.py b/xbob/boosting/tests/test_loss_log.py
index b334def..ec1cf28 100644
--- a/xbob/boosting/tests/test_loss_log.py
+++ b/xbob/boosting/tests/test_loss_log.py
@@ -6,42 +6,81 @@ import numpy
 class TestLogLossFunctions(unittest.TestCase):
     """Perform test on loss function """
             
-    def test_log_loss(self):
+    def test_log_positive_target(self):
+        """ Check the loss function value for positive targets """
 
-        exp_ = xbob.boosting.core.losses.LogLossFunction()
+        loss_function = xbob.boosting.core.losses.LogLossFunction()
         target = 1
-        score = numpy.random.rand()
+        score = 0.34
+        alpha = 0.5
+        targets = numpy.array([1, 1, 1,1,1, 1,1,1,1,1])
+        weak_scores = numpy.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], 'float64')
+        prev_scores = numpy.array([0.53, 0.23, 0.63, 0.12, 1.34, 5.76, 3.21, 2.11, 1.21, 5.36], 'float64')
         
         # check the loss values
-        l1 = exp_.update_loss(target, score) 
+        loss_value = loss_function.update_loss(target, score) 
         val1 = numpy.log(1 + numpy.exp(- target * score))
-        self.assertEqual(l1,val1)
+        self.assertEqual(loss_value,val1)
 
         # Check loss gradient
-        l2 = exp_.update_loss_grad( target, score)
+        grad_value = loss_function.update_loss_grad( target, score)
         temp = numpy.exp(-target * score)
         val2 = -(target * temp* (1/(1 + temp)) )
-        self.assertEqual(l2,val2)
+        self.assertEqual(grad_value,val2)
 
         # Check loss sum
-        weak_scores = numpy.random.rand(10)
-        prev_scores = numpy.random.rand(10)
-        x = numpy.random.rand(1)
-        curr_scores = prev_scores + x*weak_scores
-        l3 = exp_.loss_sum(x, target, prev_scores, weak_scores)
-        val3 = sum(numpy.log(1 + numpy.exp(-target * curr_scores)))
-        self.assertEqual(val3, l3)
+        loss_sum = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
+        curr_scores = prev_scores + alpha*weak_scores
+        
+        val3 = sum(numpy.log(1 + numpy.exp(-targets * curr_scores)))
+        self.assertEqual(val3, loss_sum)
+
+        # Check the gradient sum
+        grad_sum = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
+        curr_scores = prev_scores + alpha*weak_scores
+        temp = numpy.exp(-target * curr_scores)
+        grad = -targets * temp *(1/ (1 + temp))
+        val4 = numpy.sum(grad * weak_scores)
+        self.assertEqual(val4, grad_sum)
+
+    def test_log_negative_target(self):
+
+        """ Check the loss function value for negative targets """
+
+        loss_function = xbob.boosting.core.losses.LogLossFunction()
+        target = -1
+        score = 0.34
+        alpha = 0.5
+        targets = numpy.array([-1, -1, -1,-1,-1, -1,-1,-1,-1,-1])
+        weak_scores = numpy.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], 'float64')
+        prev_scores = numpy.array([0.53, 0.23, 0.63, 0.12, 1.34, 5.76, 3.21, 2.11, 1.21, 5.36], 'float64')
+        
+        # check the loss values
+        loss_value = loss_function.update_loss(target, score) 
+        val1 = numpy.log(1 + numpy.exp(- target * score))
+        self.assertEqual(loss_value,val1)
+
+        # Check loss gradient
+        grad_value = loss_function.update_loss_grad( target, score)
+        temp = numpy.exp(-target * score)
+        val2 = -(target * temp* (1/(1 + temp)) )
+        self.assertEqual(grad_value,val2)
+
+        # Check loss sum
+        loss_sum = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
+        curr_scores = prev_scores + alpha*weak_scores
+        
+        val3 = sum(numpy.log(1 + numpy.exp(-targets * curr_scores)))
+        self.assertEqual(val3, loss_sum)
 
         # Check the gradient sum
-        weak_scores = numpy.random.rand(10)
-        prev_scores = numpy.random.rand(10)
-        x = numpy.random.rand(1)
-        curr_scores = prev_scores + x*weak_scores
-        l3 = exp_.loss_grad_sum(x, target, prev_scores, weak_scores)
+        grad_sum = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
+        curr_scores = prev_scores + alpha*weak_scores
         temp = numpy.exp(-target * curr_scores)
-        grad = -target * temp *(1/ (1 + temp))
-        val3 = numpy.sum(grad * weak_scores)
-        self.assertEqual(val3, l3)
+        grad = -targets * temp *(1/ (1 + temp))
+        val4 = numpy.sum(grad * weak_scores)
+        self.assertEqual(val4, grad_sum)
+
 
              
 
diff --git a/xbob/boosting/tests/test_loss_log_multivariate.py b/xbob/boosting/tests/test_loss_log_multivariate.py
new file mode 100644
index 0000000..74fec89
--- /dev/null
+++ b/xbob/boosting/tests/test_loss_log_multivariate.py
@@ -0,0 +1,84 @@
+
+import unittest
+import random
+import xbob.boosting
+import numpy
+
+class TestLogLossMulti(unittest.TestCase):
+
+    """ Test the loss function using multivariate data  """
+
+    def test_log_multivariate_dimensions(self):
+
+        """ Check the loss function values for multivariate targets """
+
+        loss_function = xbob.boosting.core.losses.LogLossFunction()
+        num_samples = 2
+        num_dimension = 2
+        targets = numpy.array([[1, -1], [-1, 1]])
+        score = numpy.array([[0.5, 0.5], [0.5, 0.5]], 'float64')
+        alpha = 0.5
+        weak_scores = numpy.array([[0.2, 0.4], [0.5, 0.6]], 'float64')
+        prev_scores = numpy.array([[0.1, 0.2],[0.3, 0.4]], 'float64')
+        
+        # check the loss dimensions
+        loss_value = loss_function.update_loss(targets, score) 
+        self.assertTrue(loss_value.shape[0] == num_samples)
+        self.assertTrue(loss_value.shape[1] == num_dimension)
+
+        # Check loss gradient
+        grad_value = loss_function.update_loss_grad( targets, score)
+        self.assertTrue(grad_value.shape[0] == num_samples)
+        self.assertTrue(grad_value.shape[1] == num_dimension)
+
+        # Check loss sum
+        loss_sum = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
+        self.assertTrue(loss_sum.shape[0] == num_samples)
+
+
+
+        # Check the gradient sum
+        grad_sum = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
+        self.assertTrue(grad_sum.shape[0] == num_samples)
+
+
+
+    def test_log_multivariate(self):
+
+        """ Check the loss function values for multivariate targets """
+
+        loss_function = xbob.boosting.core.losses.LogLossFunction()
+        targets = numpy.array([[1, -1], [-1, 1]])
+        score = numpy.array([[0.5, 0.5], [0.5, 0.5]], 'float64')
+        alpha = 0.5
+        weak_scores = numpy.array([[0.2, 0.4], [0.5, 0.6]], 'float64')
+        prev_scores = numpy.array([[0.1, 0.2],[0.3, 0.4]], 'float64')
+        
+        # check the loss values
+        loss_value = loss_function.update_loss(targets, score) 
+        val1 = numpy.log(1 + numpy.exp(- targets * score))
+        self.assertTrue((loss_value == val1).all())
+
+        # Check loss gradient
+        grad_value = loss_function.update_loss_grad( targets, score)
+        temp = numpy.exp(-targets * score)
+        val2 = -(targets * temp* (1/(1 + temp)) )
+        self.assertTrue((grad_value == val2).all())
+
+        # Check loss sum
+        loss_sum = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
+        curr_scores = prev_scores + alpha*weak_scores
+        
+        val3 = sum(numpy.log(1 + numpy.exp(-targets * curr_scores)))
+        self.assertTrue((val3 == loss_sum).all())
+
+        # Check the gradient sum
+        grad_sum = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
+        curr_scores = prev_scores + alpha*weak_scores
+        temp = numpy.exp(-targets * curr_scores)
+        grad = -targets * temp *(1/ (1 + temp))
+        val4 = sum(grad * weak_scores)
+        self.assertTrue((val4 == grad_sum).all())
+
+
+
diff --git a/xbob/boosting/tests/test_trainer_lut.py b/xbob/boosting/tests/test_trainer_lut.py
new file mode 100644
index 0000000..3a4c519
--- /dev/null
+++ b/xbob/boosting/tests/test_trainer_lut.py
@@ -0,0 +1,41 @@
+import unittest
+import random
+import xbob.boosting
+import numpy
+import bob
+
+def get_single_feature():
+    num_feature = 100
+
+
+class TestLutTrainer(unittest.TestCase):
+    """Class to test the LUT trainer """
+
+    def test_hist_grad(self):
+
+        num_feature = 100
+        range_feature = 10
+        trainer = xbob.boosting.core.trainers.LutTrainer(range_feature,'indep', 1)
+
+        features = numpy.array([2, 8, 4, 7, 1, 0, 6, 3, 6, 1, 7, 0, 6, 8, 3, 6, 8, 2, 6, 9, 4, 6,
+                                2, 0, 4, 9, 7, 4, 1, 3, 9, 9, 3, 3, 5, 2, 4, 0, 1, 3, 8, 8, 6, 7,
+                                3, 0, 6, 7, 4, 0, 6, 4, 1, 2, 4, 2, 1, 9, 3, 5, 5, 8, 8, 4, 7, 4,
+                                1, 5, 1, 8, 5, 4, 2, 4, 5, 3, 0, 0, 6, 2, 4, 7, 1, 4, 1, 4, 4, 4,
+                                1, 4, 7, 5, 6, 9, 7, 5, 3, 3, 6, 6])
+
+        loss_grad = numpy.ones(100)
+
+        hist_value, bins = numpy.histogram(features,range(range_feature +1))
+        sum_grad = trainer.compute_grad_hist(loss_grad,features)
+        self.assertEqual(sum_grad.shape[0],range_feature)
+        self.assertTrue((sum_grad == hist_value).all())
+
+
+
+
+
+
+
+
+
+    
diff --git a/xbob/boosting/tests/test_trainer_stump.py b/xbob/boosting/tests/test_trainer_stump.py
new file mode 100644
index 0000000..c7ea593
--- /dev/null
+++ b/xbob/boosting/tests/test_trainer_stump.py
@@ -0,0 +1,217 @@
+import unittest
+import random
+import xbob.boosting
+import numpy
+
+class TestStumpTrainer(unittest.TestCase):
+    """Perform test on stump weak trainer"""
+
+    def test_stump_limits(self):
+        # test the stump trainer and check the basic limits on stump parameters
+        trainer = xbob.boosting.core.trainers.StumpTrainer()
+        rand_matrix = numpy.array([[-1.57248569,  0.92857928,  0.97908357, -0.0758847 , -0.34067902],
+                                   [ 0.88562798,  1.82759883, -0.55953264,  0.82822718,  2.29955421],
+                                   [ 1.03220648,  0.20467357,  0.67769647,  0.57652722,  0.45538562],
+                                   [ 1.49901643,  1.34450249,  0.08667704,  0.33658217, -1.32629319]], 'float64')
+
+        n_samples = 4
+        dim = 5
+        x_train1 = rand_matrix + 4
+        x_train2 = rand_matrix - 4
+        x_train = numpy.vstack((x_train1, x_train2))
+        y_train = numpy.hstack((numpy.ones(n_samples),-numpy.ones(n_samples)))
+
+        scores = numpy.zeros(2*n_samples)
+        t = y_train*scores
+        loss = -y_train*(numpy.exp(y_train*scores))
+
+        stump = trainer.compute_weak_trainer(x_train,loss)
+
+        self.assertTrue(stump.threshold <= numpy.max(x_train))
+        self.assertTrue(stump.threshold >= numpy.min(x_train))
+        self.assertTrue(stump.selected_indices >= 0)
+        self.assertTrue(stump.selected_indices < dim)
+
+
+
+    def test_stump_index(self):
+        # test the stump trainer if the correct feature indices are selected
+        trainer = xbob.boosting.core.trainers.StumpTrainer()
+        rand_matrix = numpy.array([[-1.57248569,  0.92857928,  0.97908357, -0.0758847 , -0.34067902],
+                                   [ 0.88562798,  1.82759883, -0.55953264,  0.82822718,  2.29955421],
+                                   [ 1.03220648,  0.20467357,  0.67769647,  0.57652722,  0.45538562],
+                                   [ 1.49901643,  1.34450249,  0.08667704,  0.33658217, -1.32629319]], 'float64')
+
+       
+        num_samples = 4
+        dim = 5
+        selected_index = 2
+        delta = 2
+        x_train1 = rand_matrix + 0.1
+        x_train2 = rand_matrix - 0.1
+        x_train = numpy.vstack((x_train1, x_train2))
+        x_train[0:num_samples,selected_index] = x_train[0:num_samples,selected_index] + delta
+        x_train[num_samples+1:,selected_index] = x_train[num_samples +1:,selected_index] - delta
+        y_train = numpy.hstack((numpy.ones(num_samples),-numpy.ones(num_samples)))
+
+        scores = numpy.zeros(2*num_samples)
+        loss = -y_train*(numpy.exp(y_train*scores))
+
+        stump = trainer.compute_weak_trainer(x_train,loss)
+
+        self.assertEqual(stump.selected_indices, selected_index)
+
+    def test_stump_polarity(self):
+        # test the stump trainer if the polarity is reversed with change in targets sign
+        trainer = xbob.boosting.core.trainers.StumpTrainer()
+        rand_matrix = numpy.array([[-1.57248569,  0.92857928,  0.97908357, -0.0758847 , -0.34067902],
+                                   [ 0.88562798,  1.82759883, -0.55953264,  0.82822718,  2.29955421],
+                                   [ 1.03220648,  0.20467357,  0.67769647,  0.57652722,  0.45538562],
+                                   [ 1.49901643,  1.34450249,  0.08667704,  0.33658217, -1.32629319]], 'float64')
+        num_samples = 4
+        dim = 5
+        selected_index = 2
+        delta = 2
+        x_train1 = rand_matrix + 0.1
+        x_train2 = rand_matrix - 0.1
+        x_train = numpy.vstack((x_train1, x_train2))
+        x_train[0:num_samples,selected_index] = x_train[0:num_samples,selected_index] + delta
+        x_train[num_samples+1:,selected_index] = x_train[num_samples +1:,selected_index] - delta
+        y_train = numpy.hstack((numpy.ones(num_samples),-numpy.ones(num_samples)))
+
+        scores = numpy.zeros(2*num_samples)
+        t = y_train*scores
+        loss = -y_train*(numpy.exp(y_train*scores))
+
+        stump = trainer.compute_weak_trainer(x_train,loss)
+
+        self.assertEqual(stump.selected_indices, selected_index)
+
+        polarity = stump.polarity
+
+        # test the check on polarity when the labels are reversed
+        y_train = - y_train
+        t = y_train*scores
+        loss = -y_train*(numpy.exp(y_train*scores))
+        
+        stump = trainer.compute_weak_trainer(x_train,loss)
+        polarity_rev = stump.polarity 
+        self.assertEqual(polarity, -polarity_rev)
+
+    def test_threshold(self):
+        # test to check the threshold value of the weak trainer
+        trainer = xbob.boosting.core.trainers.StumpTrainer()
+
+        rand_matrix = numpy.array([[-1.57248569,  0.92857928,  0.97908357, -0.0758847 , -0.34067902],
+                                   [ 0.88562798,  1.82759883, -0.55953264,  0.82822718,  2.29955421],
+                                   [ 1.03220648,  0.20467357,  0.67769647,  0.57652722,  0.45538562],
+                                   [ 1.49901643,  1.34450249,  0.08667704,  0.33658217, -1.32629319]], 'float64')
+        num_samples = 4
+        dim = 5
+        selected_index = 2
+        x_train1 = rand_matrix + 0.1
+        x_train2 = rand_matrix - 0.1
+        delta1 = 4
+        delta2 = 2
+        x_train = numpy.vstack((x_train1, x_train2))
+        x_train[0:num_samples,selected_index] = x_train[0:num_samples,selected_index] + delta1
+        x_train[num_samples+1:,selected_index] = x_train[num_samples +1:,selected_index] + delta2
+        y_train = numpy.hstack((numpy.ones(num_samples),-numpy.ones(num_samples)))
+
+        scores = numpy.zeros(2*num_samples)
+        loss = -y_train*(numpy.exp(y_train*scores))
+
+        stump = trainer.compute_weak_trainer(x_train,loss)
+
+        print stump.threshold 
+
+        self.assertTrue(stump.threshold > delta2)
+        self.assertTrue(stump.threshold < delta1)
+
+
+    def test_compute_thresh(self):
+        # Test the threshold for a single feature 
+        trainer = xbob.boosting.core.trainers.StumpTrainer()
+
+        num_samples = 10
+        # The value of feature for class 1
+        fea1 = 1                          
+        # The value of the feature for class 2   
+        fea2 = 10
+        
+        # feature vector for 10 samples
+        features = numpy.array([fea1, fea1,fea1,fea1,fea1,fea2,fea2,fea2,fea2,fea2])
+        label = numpy.array([1,1,1,1,1,-1, -1, -1,-1,-1])
+
+        scores = numpy.zeros(num_samples)
+        loss = -label*(numpy.exp(label*scores))
+
+        trained_polarity, trained_threshold, trained_gain = trainer.compute_thresh(features, loss)
+
+        threshold = float(fea1 + fea2)/2
+        self.assertEqual(trained_threshold, threshold)
+
+        if(fea1 < fea2):
+            polarity = 1
+        else:
+            polarity = -1
+
+        self.assertEqual(trained_polarity, polarity)
+
+    def test_compute_thresh_rearrange(self):
+        # test the threshold for single feature using a different permutation
+        trainer = xbob.boosting.core.trainers.StumpTrainer()
+
+        num_samples = 10
+        # The value of feature for class 1
+        fea1 = 1                          
+        # The value of the feature for class 2   
+        fea2 = 10
+        
+        # feature vector for 10 samples
+        features = numpy.array([fea1, fea1, fea2, fea1, fea2, fea1, fea2, fea1, fea2, fea2])
+        label =     numpy.array([ 1,    1,   -1,   1,    -1,    1,   -1,   1,    -1,  -1])
+
+        scores = numpy.zeros(num_samples)
+        loss = -label*(numpy.exp(label*scores))
+
+        trained_polarity, trained_threshold, trained_gain = trainer.compute_thresh(features, loss)
+
+        threshold = float(fea1 + fea2)/2
+        self.assertEqual(trained_threshold, threshold)
+
+        if(fea1 < fea2):
+            polarity = 1
+        else:
+            polarity = -1
+
+        self.assertEqual(trained_polarity, polarity)
+
+    def test_compute_polarity(self):
+        # test the threshold for single feature using a different permutation
+        trainer = xbob.boosting.core.trainers.StumpTrainer()
+
+        num_samples = 10
+        # The value of feature for class 1
+        fea1 = 10                          
+        # The value of the feature for class 2   
+        fea2 = 1
+        
+        # feature vector for 10 samples
+        features = numpy.array([fea1, fea1, fea2, fea1, fea2, fea1, fea2, fea1, fea2, fea2])
+        label =     numpy.array([ 1,    1,   -1,   1,    -1,    1,   -1,   1,    -1,  -1])
+
+        scores = numpy.zeros(num_samples)
+        loss = -label*(numpy.exp(label*scores))
+
+        trained_polarity, trained_threshold, trained_gain = trainer.compute_thresh(features, loss)
+
+        
+        if(fea1 < fea2):
+            polarity = 1
+        else:
+            polarity = -1
+
+        self.assertEqual(trained_polarity, polarity)
+
+
-- 
GitLab