Commit 43e9f4e3 authored by Emmanuel PIGNAT's avatar Emmanuel PIGNAT
Browse files

fixing in notebooks

parent be3d5713
%% Cell type:code id: tags:
``` python
import numpy as np
import matplotlib.pyplot as plt
import pbdlib as pbd
import os
%matplotlib inline
%load_ext autoreload
%autoreload 2
```
%% Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
%% Cell type:markdown id: tags:
### Read data from file
%% Cell type:code id: tags:
``` python
filename = os.path.dirname(pbd.__file__) + '/data/gui/' + 'test_conditioning_002.npy'
data = np.load(filename)[()]
x = np.concatenate(data['x'], axis=0)[::3]
```
%% Cell type:code id: tags:
``` python
plt.plot(x[:, 0], x[:, 1], 'kx')
plt.axes().set_aspect('equal')
```
%% Output
%% Cell type:markdown id: tags:
### Learning a joint distribution
%% Cell type:code id: tags:
``` python
gmm = pbd.GMM(nb_dim=2, nb_states=6)
gmm.em(x, reg=0.1);
# based on sklearn.mixture.BayesianGaussianMixture
bgmm = pbd.VBayesianGMM({'n_components':15, 'n_init':5, 'reg_covar': 0.1 ** 2,
'covariance_prior': 10. ** 2 * np.eye(2),'mean_precision_prior':1e-9})
bgmm.posterior(x);
```
%% Cell type:code id: tags:
``` python
plt.plot(x[:, 0], x[:, 1], 'kx')
gmm.plot(color='steelblue')
bgmm.plot(color='gold')
plt.axes().set_aspect('equal')
```
%% Output
%% Cell type:code id: tags:
``` python
x_in = np.linspace(-200, 200, 300)[:, None]
mu, sigma = gmm.condition(x_in, slice(0, 1), slice(1, 2))
bmu, bsigma = bgmm.condition(x_in, slice(0, 1), slice(1, 2))
```
%% Cell type:code id: tags:
``` python
plt.figure(figsize=(20, 5))
gen.plot(color='orangered', alpha=0.2)
gmm.plot(color='steelblue')
plt.plot(x_in, mu, label='predictive distribution - frequentist', color='steelblue')
plt.fill_between(x_in[:, 0],
mu[:, 0] - sigma[:, 0, 0]**0.5,
mu[:, 0] + sigma[:, 0, 0]**0.5,
alpha=0.3, color='steelblue')
plt.fill_between(x_in[:, 0],
bmu[:, 0] - bsigma[:, 0, 0]**0.5,
bmu[:, 0] + bsigma[:, 0, 0]**0.5,
alpha=0.3, color='gold')
plt.plot(x_in, bmu, label='posterior predictive distribution', color='gold')
plt.plot(x[:, 0], x[:, 1], 'kx')
plt.legend()
```
%% Output
<matplotlib.legend.Legend at 0x7fdb85579410>
%% Cell type:code id: tags:
``` python
```
%% Cell type:code id: tags:
``` python
```
......
%% Cell type:markdown id: tags:
$
\newcommand{\Tau}{\mathcal{T}}
\newcommand{\bm}[1]{{\boldsymbol{#1}}}
\newcommand{\dt}[1]{{\frac{d#1}{dt}}}
%\newcommand{\bm}{\mathbf{#1}}
\newcommand{\trsp}{{\scriptscriptstyle\top}}$
%% Cell type:code id: tags:
``` python
import os
import numpy as np
import matplotlib.pyplot as plt
import pbdlib as pbd
%matplotlib inline
%load_ext autoreload
%autoreload 2
```
%% Cell type:markdown id: tags:
# Hidden Markov Model and LQR
This is an example of learning a HMM over some trajectories demonstrations and reproducing it using optimal control.
More infos : http://calinon.ch/papers/Calinon-JIST2015.pdf, http://calinon.ch/papers/Calinon-HFR2016.pdf
%% Cell type:markdown id: tags:
## Learning Hidden Markov Model (HMM)
%% Cell type:code id: tags:
``` python
from scipy.io import loadmat # loading data from matlab
letter = 'C' # choose a letter in the alphabet
datapath = os.path.dirname(pbd.__file__) + '/data/2Dletters/'
data = loadmat(datapath + '%s.mat' % letter)
demos_x = [d['pos'][0][0].T for d in data['demos'][0]] # Position data
demos_dx = [d['vel'][0][0].T for d in data['demos'][0]] # Velocity data
demos_xdx = [np.hstack([_x, _dx]) for _x ,_dx in zip(demos_x, demos_dx)] # Position-velocity
```
%% Cell type:code id: tags:
``` python
model = pbd.HMM(nb_states=4, nb_dim=4)
model.init_hmm_kbins(demos_xdx) # initializing model
# EM to train model
model.em(demos_xdx, reg=1e-3)
# plotting
fig, ax = plt.subplots(ncols=3)
fig.set_size_inches(12,3.5)
# position plotting
ax[0].set_title('pos')
for p in demos_x:
ax[0].plot(p[:, 0], p[:, 1])
pbd.plot_gmm(model.mu, model.sigma, ax=ax[0], dim=[0, 1]);
# velocity plotting
ax[1].set_title('vel')
for p in demos_dx:
ax[1].plot(p[:, 0], p[:, 1])
pbd.plot_gmm(model.mu, model.sigma, ax=ax[1], dim=[2, 3]);
# plotting transition matrix
ax[2].set_title('transition')
ax[2].imshow(np.log(model.Trans+1e-10), interpolation='nearest', vmin=-5, cmap='viridis');
plt.tight_layout()
```
%% Output
EM converges
False
%% Cell type:markdown id: tags:
# Reproduction (LQR)
Using Product of Gaussian formulation with augmented transfer matrices see : http://calinon.ch/papers/Calinon-HFR2016.pdf
%% Cell type:markdown id: tags:
### Get sequence of states
%% Cell type:code id: tags:
``` python
demo_idx = 0
sq = model.viterbi(demos_xdx[demo_idx])
plt.figure(figsize=(5, 1))
# plt.axis('off')
plt.plot(sq, lw=3);
plt.xlabel('timestep');
```
%% Output
%% Cell type:markdown id: tags:
## Create and solve LQR
%% Cell type:code id: tags:
``` python
lqr = pbd.PoGLQR(nb_dim=2, dt=0.01, horizon=demos_xdx[demo_idx].shape[0])
lqr.mvn_xi = pbd.MVN(mu=, lmbda=)
lqr.mvn_xi = model.concatenate_gaussian(sq)
lqr.mvn_u = -4.
lqr.x0 = demos_xdx[demo_idx][0]
xi = lqr.seq_xi
```
%% Cell type:markdown id: tags:
## Plotting reproduced trajectory (position and velocity)
%% Cell type:code id: tags:
``` python
fig, ax = plt.subplots(ncols=2)
fig.set_size_inches(8,3.5)
# position plotting
ax[0].set_title('position')
for p in demos_x:
ax[0].plot(p[:, 0], p[:, 1], alpha=0.4)
ax[0].plot(xi[:, 0], xi[:, 1], 'b', lw=3)
pbd.plot_gmm(model.mu, model.sigma, ax=ax[0], dim=[0, 1]);
# velocity plotting
ax[1].set_title('velocity')
for p in demos_dx:
ax[1].plot(p[:, 0], p[:, 1], alpha=0.4)
ax[1].plot(xi[:, 2], xi[:, 3], 'b', lw=3, label='repro')
plt.legend()
pbd.plot_gmm(model.mu, model.sigma, ax=ax[1], dim=[2, 3]);
```
%% Output
%% Cell type:code id: tags:
``` python
```
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment