Fixed GIL issue

parent d8eb5517
......@@ -407,9 +407,9 @@ static PyObject* PyBobLearnEMKMeansTrainer_e_step(PyBobLearnEMKMeansTrainerObjec
return 0;
}
auto gstate = PyGILState_Ensure();
auto state = PyEval_SaveThread();
self->cxx->eStep(*kmeans_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
PyGILState_Release(gstate);
PyEval_RestoreThread(state);
BOB_CATCH_MEMBER("cannot perform the e_step method", 0)
......
......@@ -374,8 +374,9 @@ static PyObject* PyBobLearnEMMAPGMMTrainer_e_step(PyBobLearnEMMAPGMMTrainerObjec
return 0;
}
auto state = PyEval_SaveThread();
self->cxx->eStep(*gmm_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
PyEval_RestoreThread(state);
BOB_CATCH_MEMBER("cannot perform the e_step method", 0)
......
......@@ -258,9 +258,9 @@ static PyObject* PyBobLearnEMMLGMMTrainer_e_step(PyBobLearnEMMLGMMTrainerObject*
return 0;
}
auto gstate = PyGILState_Ensure();
auto state = PyEval_SaveThread();
self->cxx->eStep(*gmm_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
PyGILState_Release(gstate);
PyEval_RestoreThread(state);
BOB_CATCH_MEMBER("cannot perform the e_step method", 0)
......
......@@ -124,7 +124,7 @@ def train(trainer, machine, data, max_iterations=50, convergence_threshold=None,
# create trainers for each process
trainers = [trainer.__class__(trainer) for p in range(n_processes)]
# no need to copy the machines
machines = [machine for p in range(n_processes)]
machines = [machine.__class__(machine) for p in range(n_processes)]
# call the parallel processes
pool.map(_parallel_e_step, zip(trainers, machines, split_data))
# update the trainer with the data of the other trainers
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment