Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
bob.learn.boosting
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
bob
bob.learn.boosting
Commits
f20bd685
Commit
f20bd685
authored
11 years ago
by
Rakesh MEHTA
Browse files
Options
Downloads
Patches
Plain Diff
Trainer tests added
parent
27d8ccae
Branches
Branches containing commit
Tags
Tags containing commit
No related merge requests found
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
xbob/boosting/core/trainers.py
+26
-25
26 additions, 25 deletions
xbob/boosting/core/trainers.py
xbob/boosting/tests/datafile.hdf5
+0
-0
0 additions, 0 deletions
xbob/boosting/tests/datafile.hdf5
xbob/boosting/tests/test_trainer_lut.py
+53
-3
53 additions, 3 deletions
xbob/boosting/tests/test_trainer_lut.py
with
79 additions
and
28 deletions
xbob/boosting/core/trainers.py
+
26
−
25
View file @
f20bd685
...
...
@@ -181,7 +181,7 @@ class LutMachine():
def
get_weak_scores
(
self
,
f
set
):
def
get_weak_scores
(
self
,
f
eatures
):
"""
Function computes classification results according to the LUT machine
Function classifies the features based on a single LUT machine.
...
...
@@ -193,13 +193,13 @@ class LutMachine():
weak_scores: The classification scores of the features based on current weak classifier
"""
# Initialize
num_samp
=
len
(
f
set
)
num_samp
=
len
(
f
eatures
)
num_outputs
=
len
(
self
.
luts
[
0
])
weak_scores
=
numpy
.
zeros
([
num_samp
,
num_outputs
])
# Compute weak scores
for
o
i
in
range
(
num_outputs
):
weak_scores
[:,
o
i
]
=
numpy
.
transpose
(
self
.
luts
[
f
set
[:,
self
.
selected_indices
[
o
i
]],
oi
])
for
o
utput_index
in
range
(
num_outputs
):
weak_scores
[:,
o
utput_index
]
=
numpy
.
transpose
(
self
.
luts
[
f
eatures
[:,
self
.
selected_indices
[
o
utput_index
]],
output_index
])
return
weak_scores
...
...
@@ -233,9 +233,7 @@ class LutTrainer():
"""
self
.
num_entries
=
num_entries
self
.
num_outputs
=
num_outputs
self
.
luts
=
numpy
.
ones
((
num_entries
,
num_outputs
),
dtype
=
numpy
.
int
)
self
.
selection_type
=
selection_type
self
.
selected_indices
=
numpy
.
zeros
([
num_outputs
,
1
],
'
int16
'
)
...
...
@@ -261,8 +259,8 @@ class LutTrainer():
"""
# Initializations
num_outputs
=
loss_grad
.
shape
[
1
]
fea_grad
=
numpy
.
zeros
([
self
.
num_entries
,
num_outputs
])
#
num_outputs = loss_grad.shape[1]
fea_grad
=
numpy
.
zeros
([
self
.
num_entries
,
self
.
num_outputs
])
lut_machine
=
LutMachine
(
self
.
num_outputs
,
self
.
num_entries
)
# Compute the sum of the gradient based on the feature values or the loss associated with each
...
...
@@ -270,6 +268,7 @@ class LutTrainer():
sum_loss
=
self
.
compute_grad_sum
(
loss_grad
,
fea
)
# Select the most discriminative index (or indices) for classification which minimizes the loss
# and compute the sum of gradient for that index
...
...
@@ -280,10 +279,10 @@ class LutTrainer():
selected_indices
=
[
numpy
.
argmin
(
col
)
for
col
in
numpy
.
transpose
(
sum_loss
)]
for
o
i
in
range
(
num_outputs
):
curr_id
=
sum_loss
[:,
o
i
].
argmin
()
fea_grad
[:,
o
i
]
=
self
.
compute_grad_hist
(
loss_grad
[:,
o
i
],
fea
[:,
curr_id
])
lut_machine
.
selected_indices
[
o
i
]
=
curr_id
for
o
utput_index
in
range
(
self
.
num_outputs
):
curr_id
=
sum_loss
[:,
o
utput_index
].
argmin
()
fea_grad
[:,
o
utput_index
]
=
self
.
compute_grad_hist
(
loss_grad
[:,
o
utput_index
],
fea
[:,
curr_id
])
lut_machine
.
selected_indices
[
o
utput_index
]
=
curr_id
elif
self
.
selection_type
==
'
shared
'
:
...
...
@@ -293,10 +292,11 @@ class LutTrainer():
accum_loss
=
numpy
.
sum
(
sum_loss
,
1
)
selected_findex
=
accum_loss
.
argmin
()
lut_machine
.
selected_indices
=
selected_findex
*
numpy
.
ones
([
num_outputs
,
1
],
'
int16
'
)
lut_machine
.
selected_indices
=
selected_findex
*
numpy
.
ones
([
self
.
num_outputs
,
1
],
'
int16
'
)
for
output_index
in
range
(
self
.
num_outputs
):
fea_grad
[:,
output_index
]
=
self
.
compute_grad_hist
(
loss_grad
[:,
output_index
],
fea
[:,
selected_findex
])
for
oi
in
range
(
num_outputs
):
fea_grad
[:,
oi
]
=
self
.
compute_grad_hist
(
loss_grad
[:,
oi
],
fea
[:,
selected_findex
])
# Assign the values to LookUp Table
lut_machine
.
luts
[
fea_grad
<=
0.0
]
=
-
1
...
...
@@ -323,14 +323,14 @@ class LutTrainer():
# initialize values
num_fea
=
len
(
fea
[
0
])
num_samp
=
len
(
fea
)
num_outputs
=
len
(
loss_grad
[
0
])
sum_loss
=
numpy
.
zeros
([
num_fea
,
num_outputs
])
sum_loss
=
numpy
.
zeros
([
num_fea
,
self
.
num_outputs
])
# Compute the loss for each feature
for
fi
in
range
(
num_fea
):
for
oi
in
range
(
num_outputs
):
hist_grad
=
self
.
compute_grad_hist
(
loss_grad
[:,
oi
],
fea
[:,
fi
])
sum_loss
[
fi
,
oi
]
=
-
sum
(
abs
(
hist_grad
))
for
feature_index
in
range
(
num_fea
):
for
output_index
in
range
(
self
.
num_outputs
):
hist_grad
=
self
.
compute_grad_hist
(
loss_grad
[:,
output_index
],
fea
[:,
feature_index
])
sum_loss
[
feature_index
,
output_index
]
=
-
sum
(
abs
(
hist_grad
))
return
sum_loss
...
...
@@ -339,7 +339,7 @@ class LutTrainer():
def
compute_grad_hist
(
self
,
loss_grado
,
f
val
):
def
compute_grad_hist
(
self
,
loss_grado
,
f
eatures
):
"""
The function computes the loss for a single feature.
Function computes sum of the loss gradient that have same feature values.
...
...
@@ -350,12 +350,13 @@ class LutTrainer():
return: hist_grad: The sum of the loss gradient
"""
# initialize the values
num_samp
=
len
(
f
val
)
num_samp
=
len
(
f
eatures
)
hist_grad
=
numpy
.
zeros
([
self
.
num_entries
])
# compute the sum of the gradient
for
hi
in
range
(
self
.
num_entries
):
hist_grad
[
hi
]
=
sum
(
loss_grado
[
fval
==
hi
])
for
feature_value
in
range
(
self
.
num_entries
):
hist_grad
[
feature_value
]
=
sum
(
loss_grado
[
features
==
feature_value
])
return
hist_grad
...
...
This diff is collapsed.
Click to expand it.
xbob/boosting/tests/datafile.hdf5
0 → 100644
+
0
−
0
View file @
f20bd685
File added
This diff is collapsed.
Click to expand it.
xbob/boosting/tests/test_trainer_lut.py
+
53
−
3
View file @
f20bd685
...
...
@@ -4,9 +4,6 @@ import xbob.boosting
import
numpy
import
bob
def
get_single_feature
():
num_feature
=
100
class
TestLutTrainer
(
unittest
.
TestCase
):
"""
Class to test the LUT trainer
"""
...
...
@@ -32,10 +29,63 @@ class TestLutTrainer(unittest.TestCase):
def
test_lut_selected_index
(
self
):
num_samples
=
100
max_feature
=
20
dimension_feature
=
10
selected_index
=
5
range_feature
=
max_feature
trainer
=
xbob
.
boosting
.
core
.
trainers
.
LutTrainer
(
range_feature
,
'
indep
'
,
1
)
data_file
=
bob
.
io
.
File
(
'
xbob/boosting/tests/datafile.hdf5
'
,
'
r
'
)
#features = bob.io.load('test_data.hdf5')
features
=
data_file
.
read
()
x_train1
=
numpy
.
copy
(
features
)
x_train1
[
x_train1
[:,
selected_index
]
>=
10
,
selected_index
]
=
9
x_train2
=
numpy
.
copy
(
features
)
x_train2
[
x_train2
[:,
selected_index
]
<
10
,
selected_index
]
=
10
x_train
=
numpy
.
vstack
((
x_train1
,
x_train2
))
y_train
=
numpy
.
vstack
((
numpy
.
ones
([
num_samples
,
1
]),
-
numpy
.
ones
([
num_samples
,
1
])))
scores
=
numpy
.
zeros
([
2
*
num_samples
,
1
])
loss_grad
=
-
y_train
*
(
numpy
.
exp
(
y_train
*
scores
))
machine
=
trainer
.
compute_weak_trainer
(
x_train
,
loss_grad
)
self
.
assertTrue
((
machine
.
luts
[
0
:
9
]
==
-
1
).
all
())
# The values of the LUT are negative of the classes sign
self
.
assertTrue
((
machine
.
luts
[
10
:]
==
1
).
all
())
def
test_lut_selected_index
(
self
):
num_samples
=
100
max_feature
=
20
dimension_feature
=
10
delta
=
5
selected_index
=
5
range_feature
=
max_feature
+
delta
trainer
=
xbob
.
boosting
.
core
.
trainers
.
LutTrainer
(
range_feature
,
'
indep
'
,
1
)
data_file
=
bob
.
io
.
File
(
'
xbob/boosting/tests/datafile.hdf5
'
,
'
r
'
)
features
=
data_file
.
read
()
x_train1
=
numpy
.
copy
(
features
)
x_train2
=
numpy
.
copy
(
features
)
x_train
=
numpy
.
vstack
((
x_train1
,
x_train2
))
x_train
[
0
:
num_samples
,
selected_index
]
=
x_train
[
0
:
num_samples
,
selected_index
]
+
delta
y_train
=
numpy
.
vstack
((
numpy
.
ones
([
num_samples
,
1
]),
-
numpy
.
ones
([
num_samples
,
1
])))
scores
=
numpy
.
zeros
([
2
*
num_samples
,
1
])
loss_grad
=
-
y_train
*
(
numpy
.
exp
(
y_train
*
scores
))
machine
=
trainer
.
compute_weak_trainer
(
x_train
,
loss_grad
)
self
.
assertEqual
(
machine
.
selected_indices
[
0
],
selected_index
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment