Commit c8005632 authored by Manuel Günther's avatar Manuel Günther
Browse files

Removed a skipped test.

parent a0d806ee
......@@ -40,44 +40,3 @@ class TestJesorskyLoss(unittest.TestCase):
self.assertTrue(grad_sum.shape[0] == num_outputs)
@unittest.skip("Implement me!")
def test02_negative_target(self):
loss_function = bob.learn.boosting.JesorskyLoss()
num_samples = 2
num_outputs = 4
targets = numpy.array([[10, 10, 10, 30], [12, 11, 13, 29]])
score = numpy.array([[8, 9, 7, 34], [11, 6, 16, 26]], 'float64')
alpha = 0.5
weak_scores = numpy.array([[0.2, 0.4, 0.5, 0.6], [0.5, 0.5, 0.5, 0.5]], 'float64')
prev_scores = numpy.array([[0.1, 0.2, 0.3, 0.4], [0.5, 0.5, 0.5, 0.5]], 'float64')
# TODO: implement this test properly
# check the loss values
loss_value = loss_function.loss(targets, score)
val1 = numpy.exp(- targets * score)
self.assertTrue((loss_value == val1).all())
# Check loss gradient
loss_grad = loss_function.loss_gradient( targets, score)
temp = numpy.exp(-targets * score)
val2 = -targets * temp
self.assertTrue((loss_grad == val2).all())
# Check loss sum
loss_sum_val = loss_function.loss_sum(alpha, targets, prev_scores, weak_scores)
curr_scores = prev_scores + alpha*weak_scores
val3 = sum(numpy.exp(-targets * curr_scores))
self.assertTrue((val3 == loss_sum_val).all())
# Check the gradient sum
grad_sum_val = loss_function.loss_grad_sum(alpha, targets, prev_scores, weak_scores)
curr_scores = prev_scores + alpha*weak_scores
temp = numpy.exp(-targets * curr_scores)
grad = -targets * temp
val4 = numpy.sum(grad * weak_scores,0)
self.assertTrue((val4 == grad_sum_val).all())
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment