diff --git a/doc/rcfs.pdf b/doc/rcfs.pdf
index f4f80d69003379d7d30abc81bf4db4dfb3f63ffd..dfc85ea92f1683a19f68595fbd48ea552398ce2a 100644
Binary files a/doc/rcfs.pdf and b/doc/rcfs.pdf differ
diff --git a/doc/rcfs.tex b/doc/rcfs.tex
index d40dc55bc54c8d6b496edc0d1671f4a22e8c5dda..e1f06259e99040adfe0ca70225e6e7eb443b1e9f 100644
--- a/doc/rcfs.tex
+++ b/doc/rcfs.tex
@@ -150,20 +150,32 @@ Similarly, the solution of a cost function composed of several quadratic terms
 	\bm{\hat{\mu}} = \arg\min_{\bm{x}} \sum_{k=1}^K {(\bm{x} - \bm{\mu}_k)}^\trsp \bm{W}_k (\bm{x} - \bm{\mu}_k)
 \label{eq:quad_costs_multiple}
 \end{equation}
-can be seen as a product of Gaussians $\prod_{k=1}^K\mathcal{N}(\bm{\mu}_k,\bm{W}_k^{-1})$, with centers $\bm{\mu}_k$ and covariance matrices $\bm{W}_k^{-1}$. The Gaussian $\mathcal{N}(\bm{\hat{\mu}},\bm{\hat{W}}^{-1})$ resulting from this product has parameters
+can be seen as a product of Gaussians $\prod_{k=1}^K\mathcal{N}(\bm{\mu}_k,\bm{W}_k^{-1})$, with centers $\bm{\mu}_k$ and covariance matrices $\bm{\Sigma}_k=\bm{W}_k^{-1}$. The Gaussian $\mathcal{N}(\bm{\hat{\mu}},\bm{\hat{W}}^{-1})$ resulting from this product has parameters
 \begin{equation*}
 	\bm{\hat{\mu}} = {\left(\sum_{k=1}^K \bm{W}_k\right)}^{\!-1} \left(\sum_{k=1}^K\bm{W}_k \bm{\mu}_k \right), \quad 
 	\bm{\hat{W}} = \sum_{k=1}^K \bm{W}_k.
 \end{equation*}
 
-$\bm{\hat{\mu}}$ and $\bm{\hat{W}}$ are the same as the solution of~\eqref{eq:quad_costs_multiple} and its Hessian, respectively. Viewing the quadratic cost probabilistically can capture more information about the cost function in the form of the covariance matrix $\bm{\hat{W}}^{-1}$. 
+$\bm{\hat{\mu}}$ and $\bm{\hat{W}}$ are the same as the solution of~\eqref{eq:quad_costs_multiple} and its Hessian, respectively. Viewing the quadratic cost probabilistically can capture more information about the cost function in the form of the covariance matrix $\bm{\hat{\Sigma}}= \bm{\hat{W}}^{-1}$. 
 
-In case of Gaussians close to singularity, to be numerically more robust when computing the product of two Gaussians $\mathcal{N}(\bm{\mu}_1, \bm{\Sigma}_1)$ and $\mathcal{N}(\bm{\mu}_2, \bm{\Sigma}_2)$, the Gaussian $\mathcal{N}(\bm{\hat{\mu}}, \bm{\hat{\Sigma}})$ resulting from the product can be alternatively computed with 
+There are also computation alternatives in case of Gaussians close to singularity. To be numerically more robust when computing the product of two Gaussians $\mathcal{N}(\bm{\mu}_1, \bm{\Sigma}_1)$ and $\mathcal{N}(\bm{\mu}_2, \bm{\Sigma}_2)$, the Gaussian $\mathcal{N}(\bm{\hat{\mu}}, \bm{\hat{\Sigma}})$ resulting from the product can indeed be computed with 
 \begin{equation*}
 	\bm{\hat{\mu}} = \bm{\Sigma}_2 {\left(\bm{\Sigma}_1+\bm{\Sigma}_2\right)}^{-1} \bm{\mu}_1 + 
 	\bm{\Sigma}_1 {\left(\bm{\Sigma}_1+\bm{\Sigma}_2\right)}^{-1} \bm{\mu}_2 , \quad 
-	\bm{\hat{\Sigma}} = \bm{\Sigma}_1 {\left(\bm{\Sigma}_1+\bm{\Sigma}_2\right)}^{-1} \bm{\Sigma}_2.
+	\bm{\hat{\Sigma}} = \bm{\Sigma}_1 {\left(\bm{\Sigma}_1+\bm{\Sigma}_2\right)}^{-1} \bm{\Sigma}_2,
 \end{equation*}
+by exploiting the fact that $\bm{\Sigma}_1\bm{\Sigma}_1^{-1}\!=\!\bm{I}$ and $\bm{\Sigma}_2^{-1}\bm{\Sigma}_2\!=\!\bm{I}$, we can observe that 
+\begin{align*}
+	\bm{W}_1+\bm{W}_2 &= \bm{\Sigma}_2^{-1}+\bm{\Sigma}_1^{-1}\\
+	&= \bm{\Sigma}_2^{-1}\bm{\Sigma}_1\bm{\Sigma}_1^{-1}+\bm{\Sigma}_2^{-1}\bm{\Sigma}_2\bm{\Sigma}_1^{-1}\\
+	&=\bm{\Sigma}_2^{-1} (\bm{\Sigma}_1+\bm{\Sigma}_2) \bm{\Sigma}_1^{-1},
+\end{align*}
+so that 
+\begin{align*}
+\bm{\hat{\Sigma}} &= {(\bm{W}_1+\bm{W}_2)}^{-1} = \bm{\Sigma}_1 {(\bm{\Sigma}_1+\bm{\Sigma}_2)}^{-1} \bm{\Sigma}_2,\\
+\bm{\hat{\mu}} &= {(\bm{W}_1+\bm{W}_2)}^{-1} (\bm{\Sigma}_1^{-1}\bm{\mu}_1+\bm{\Sigma}_2^{-1}\bm{\mu}_2) 
+= \bm{\Sigma}_2 {\left(\bm{\Sigma}_1+\bm{\Sigma}_2\right)}^{-1} \bm{\mu}_1 + \bm{\Sigma}_1 {\left(\bm{\Sigma}_1+\bm{\Sigma}_2\right)}^{-1} \bm{\mu}_2.
+\end{align*}
 
 \begin{figure}
 \centering
diff --git a/python/IK_manipulator.py b/python/IK_manipulator.py
index 7e955be7467228df7964c778af9ec52fb4a08107..46710ba811189ed21e0adabd41d8693af4a8ded3 100644
--- a/python/IK_manipulator.py
+++ b/python/IK_manipulator.py
@@ -71,9 +71,9 @@ plt.scatter(fh[0], fh[1], color='r', marker='.', s=10**2) #Plot target
 for t in range(param.nbData):
 	f = fkin(x, param) # Forward kinematics (for end-effector)
 	J = Jkin(x, param) # Jacobian (for end-effector)
-#	x += np.linalg.pinv(J) @ (fh - f) * 10 * param.dt # Update state 
-	x += np.linalg.pinv(J) @ logmap(fh, f) * 10 * param.dt # Update state 
-	
+	u = np.linalg.pinv(J) @ logmap(fh, f) * 0.1 / param.dt # Velocity command (u=delta_x/dt)
+	x += u * param.dt # Update state 
+
 	f_rob = fkin0(x, param) # Forward kinematics (for all articulations, including end-effector)
 	plt.plot(f_rob[0,:], f_rob[1,:], color=str(1-t/param.nbData), linewidth=2) # Plot robot