Commit c3fbee75 authored by Emmanuel PIGNAT's avatar Emmanuel PIGNAT
Browse files

various commit

parent bdbf6757
......@@ -9,7 +9,7 @@ from .mvn import *
from .plot import *
from .pylqr import *
from .poglqr import PoGLQR
from .mtmm import MTMM, VBayesianGMM
from .mtmm import MTMM, VBayesianGMM, VMBayesianGMM
from .lqr import LQR
from .multi_pogmm import MultiPoGHSMM, MultiPoGMM
......
......@@ -282,11 +282,17 @@ def multi_variate_t(x, nu, mu, sigma=None, log=True, gmm=False, lmbda=None):
dist = np.einsum('...j,...j', dx, np.einsum('...jk,...j->...k', lmbda_, dx))
# (nb_timestep, )
lik = gamma((nu + p)/2) * np.linalg.det(lmbda_) ** 0.5/\
(gamma(nu/2) * nu ** (p/2) * np.pi ** (p/2) ) * \
(1 + 1/nu * dist) ** (-(nu+p)/2)
if not log:
lik = gamma((nu + p)/2) * np.linalg.det(lmbda_) ** 0.5/\
(gamma(nu/2) * nu ** (p/2) * np.pi ** (p/2) ) * \
(1 + 1/nu * dist) ** (-(nu+p)/2)
return lik
else:
log_lik = np.log(gamma((nu + p)/2)) + 0.5 * np.linalg.slogdet(lmbda_)[1] - \
(np.log(gamma(nu / 2)) + (p / 2) * np.log(nu) + (p / 2) * np.log(np.pi)) + \
((-(nu + p) / 2) * np.log(1 + 1 / nu * dist))
return np.log(lik) if log else lik
return log_lik
else:
raise NotImplementedError
......
......@@ -221,7 +221,7 @@ class GMM(Model):
self.priors = np.ones(self.nb_states) / self.nb_states
def em(self, data, reg=1e-8, maxiter=100, minstepsize=1e-5, diag=False, reg_finish=False,
kmeans_init=False, random_init=True, dep_mask=None, verbose=False):
kmeans_init=False, random_init=True, dep_mask=None, verbose=False, only_scikit=False):
"""
:param data: [np.array([nb_timesteps, nb_dim])]
......@@ -259,6 +259,7 @@ class GMM(Model):
else:
self.init_params_scikit(data)
if only_scikit: return
data = data.T
LL = np.zeros(nb_max_steps)
......
#!/usr/bin/env python2.7
import numpy as np
import os
import matplotlib.pyplot as plt
......
......@@ -181,7 +181,6 @@ class HMM(GMM):
B, _ = self.obs_likelihood(demo, dep, marginal, sample_size)
# if table is not None:
# B *= table[:, [n]]
self._B = B
# forward variable alpha (rescaled)
......@@ -228,8 +227,24 @@ class HMM(GMM):
self.init_priors = np.ones(self.nb_states) / self.nb_states
self.Trans = np.ones((self.nb_states, self.nb_states))/self.nb_states
def init_loop(self, demos):
self.Trans = 0.98 * np.eye(self.nb_states)
for i in range(self.nb_states-1):
self.Trans[i, i + 1] = 0.02
self.Trans[-1, 0] = 0.02
data = np.concatenate(demos, axis=0)
_mu = np.mean(data, axis=0)
_cov = np.cov(data.T)
self.mu = np.array([_mu for i in range(self.nb_states)])
self.sigma = np.array([_cov for i in range(self.nb_states)])
self.init_priors = np.array([1.] + [0. for i in range(self.nb_states-1)])
def em(self, demos, dep=None, reg=1e-8, table=None, end_cov=False, cov_type='full', dep_mask=None,
reg_finish=None, left_to_right=False):
reg_finish=None, left_to_right=False, nb_max_steps=40, loop=False):
"""
:param demos: [list of np.array([nb_timestep, nb_dim])]
......@@ -254,8 +269,7 @@ class HMM(GMM):
if reg_finish is not None: end_cov = True
nb_min_steps = 5 # min num iterations
nb_max_steps = 50 # max iterations
nb_min_steps = 2 # min num iterations
max_diff_ll = 1e-4 # max log-likelihood increase
nb_samples = len(demos)
......@@ -272,10 +286,12 @@ class HMM(GMM):
self.reg = reg
# create regularization matrix
if left_to_right:
if left_to_right or loop:
mask = np.eye(self.Trans.shape[0])
for i in range(self.Trans.shape[0] - 1):
mask[i, i + 1] = 1.
if loop:
mask[-1, 0] = 1.
if dep_mask is not None:
self.sigma *= dep_mask
......@@ -319,7 +335,7 @@ class HMM(GMM):
self.Trans = np.sum(zeta, axis=2) / (np.sum(gamma_trk, axis=1) + realmin)
if left_to_right:
if left_to_right or loop:
self.Trans *= mask
self.Trans /= np.sum(self.Trans, axis=0, keepdims=True)
......
......@@ -164,6 +164,15 @@ class MTMM(GMM):
class VBayesianGMM(MTMM):
def __init__(self, sk_parameters, *args, **kwargs):
"""
self.model = tff.VBayesianGMM(
{'n_components':5, 'n_init':4, 'reg_covar': 0.006 ** 2,
'covariance_prior': 0.02 ** 2 * np.eye(12),'mean_precision_prior':1e-9})
:param sk_parameters:
:param args:
:param kwargs:
"""
MTMM.__init__(self, *args, **kwargs)
from sklearn import mixture
......@@ -197,7 +206,7 @@ class VBayesianGMM(MTMM):
_gmm.priors = self.priors
self._posterior_samples += [_gmm]
def posterior(self, data, dims=slice(0, 7)):
def posterior(self, data, dims=slice(0, 7), mean_scale=10., cov=None, dp=True):
self.nb_dim = data.shape[1]
......@@ -206,7 +215,6 @@ class VBayesianGMM(MTMM):
states = np.where(self._sk_model.weights_ > 5e-2)[0]
self.nb_states = states.shape[0]
# see [1] K. P. Murphy, 'Conjugate Bayesian analysis of the Gaussian distribution,' vol. 0, no. 7, 2007. par 9.4
# or [1] E. Fox, 'Bayesian nonparametric learning of complex dynamical phenomena,' 2009, p 55
self.priors = np.copy(self._sk_model.weights_[states])
......@@ -217,6 +225,23 @@ class VBayesianGMM(MTMM):
self.k[:, None, None] + 1) * self.nu[:, None, None] \
/ (self.k[:, None, None] * (self.nu[:, None, None] - self.nb_dim + 1))
# add new state, base measure TODO make not heuristic
if dp:
self.priors = np.concatenate([self.priors, 0.02 * np.ones((1,))], 0)
self.priors /= np.sum(self.priors)
self.mu = np.concatenate([self.mu, np.zeros((1, self.nb_dim))], axis=0)
if cov is None:
cov = mean_scale ** 2 * np.eye(self.nb_dim)
self.sigma = np.concatenate([self.sigma, cov[None]], axis=0)
self.k = np.concatenate([self.k, np.ones((1, ))], axis=0)
self.nu = np.concatenate([self.nu, np.ones((1, ))], axis=0)
self.nb_states = states.shape[0] + 1
def condition(self, *args, **kwargs):
if not kwargs.get('samples', False):
return MTMM.condition(self, *args, **kwargs)
......@@ -237,3 +262,53 @@ class VBayesianGMM(MTMM):
return mu, sigma
class VMBayesianGMM(VBayesianGMM):
def __init__(self, n, sk_parameters, *args, **kwargs):
"""
Multimodal posterior approximation using a several training
self.model = tff.VMBayesianGMM(
{'n_components':5, 'n_init':4, 'reg_covar': 0.006 ** 2,
'covariance_prior': 0.02 ** 2 * np.eye(12),'mean_precision_prior':1e-9})
:param n: number of evaluations
:param sk_parameters:
:param args:
:param kwargs:
"""
self.models = [VBayesianGMM(sk_parameters, *args, **kwargs) for i in range(n)]
self.n = n
self._training_data = None
def posterior(self, data, *args, **kwargs):
for model in self.models:
model.posterior(data, *args, **kwargs)
def condition(self, *args, **kwargs):
params = []
for model in self.models:
params += [model.condition(*args, **kwargs)]
# TODO check how to compute priors in a good way
params = zip(*params)
mu, sigma = gaussian_moment_matching(np.array(params[0]),
np.array(params[1]))
return mu, sigma
@property
def nb_states(self):
return [model.nb_states for model in self.models]
def plot(self, *args, **kwargs):
import matplotlib
cmap = kwargs.pop('cmap', 'viridis')
colors = matplotlib.cm.get_cmap(cmap, self.n)
for i, model in enumerate(self.models):
color = colors(i)
kwargs['color'] = [color[i] for i in range(3)]
model.plot(*args, **kwargs)
\ No newline at end of file
......@@ -86,15 +86,15 @@ class MultiPoGMM(PoGMM):
obj_exp = [_e for _e in self.idx['exp'][obj] if _e != 'orientation']
assert all([exp in obj_exp for exp in exps]), \
"Some experts are not available for object %s" % id
# assert all([exp in obj_exp for exp in exps]), \
# "Some experts are not available for object %s" % id
if feat is None:
mus_ = [self.mus[(obj, exp)] for exp in exps]
lmbdas_ = [self.lmbdas[(obj, exp)] for exp in exps]
mus_ = [self.mus[(obj, exp)] for exp in exps if exp in obj_exp]
lmbdas_ = [self.lmbdas[(obj, exp)] for exp in exps if exp in obj_exp]
else:
mus_ = [self.mus[(obj, exp, feat)] for exp in exps]
lmbdas_ = [self.lmbdas[(obj, exp, feat)] for exp in exps]
mus_ = [self.mus[(obj, exp, feat)] for exp in exps if exp in obj_exp]
lmbdas_ = [self.lmbdas[(obj, exp, feat)] for exp in exps if exp in obj_exp]
mu = np.array(mus_).swapaxes(0, 1) # nb_states x nb_experts x nb_dim
lmbda = np.array(lmbdas_).swapaxes(0, 1)
......
......@@ -269,6 +269,36 @@ def plot_linear_system(K, b=None, name=None, nb_sub=10, ax0=None, xlim=[-1, 1],
return [strm]
def plot_function_map(f, nb_sub=10, ax0=None, xlim=[-1, 1], ylim=[-1, 1], opp=False):
"""
:param f: [function]
A function to plot that can take an array((N, nb_dim)) as input
:param nb_sub:
:param ax0:
:param xlim:
:param ylim:
:return:
"""
x = np.linspace(*xlim)
y = np.linspace(*ylim)
xx, yy = np.meshgrid(x, y)
# Y, X = np.mgrid[ylim[0]:ylim[1]:complex(nb_sub), xlim[0]:xlim[1]:complex(nb_sub)]
mesh_data = np.concatenate([np.atleast_2d(xx.ravel()), np.atleast_2d(yy.ravel())]).T
try:
zz = f(mesh_data)
except: # if function cannot take a vector as input
zz = np.array([f(_x) for _x in mesh_data])
z = zz.reshape(xx.shape)
CS = plt.contour(xx, yy, z, cmap='viridis')
plt.clabel(CS, inline=1, fontsize=10)
if opp: z = -z
plt.imshow(np.exp(z), interpolation='bilinear', origin='lower', extent=xlim + ylim,
alpha=0.5, cmap='viridis')
def plot_mixture_linear_system(model, mode='glob', nb_sub=20, gmm=True, min_alpha=0.,
cmap=plt.cm.jet, A=None,b=None, gmr=False, return_strm=False,
**kwargs):
......@@ -351,7 +381,7 @@ def plot_mixture_linear_system(model, mode='glob', nb_sub=20, gmm=True, min_alph
def plot_gmm(Mu, Sigma, dim=None, color=[1, 0, 0], alpha=0.5, linewidth=1, markersize=6,
ax=None, empty=False, edgecolor=None, edgealpha=None,
ax=None, empty=False, edgecolor=None, edgealpha=None, priors=None,
border=False, nb=1, swap=True, center=True):
''' This function displays the parameters of a Gaussian Mixture Model (GMM).
......@@ -393,6 +423,9 @@ def plot_gmm(Mu, Sigma, dim=None, color=[1, 0, 0], alpha=0.5, linewidth=1, marke
sl = np.ix_(dim, dim)
Sigma = Sigma[sl]
if priors is not None:
priors /= np.max(priors)
priors = np.clip(priors, 0.1, 1.)
nbDrawingSeg = 35;
t = np.linspace(-np.pi, np.pi, nbDrawingSeg);
......@@ -423,6 +456,8 @@ def plot_gmm(Mu, Sigma, dim=None, color=[1, 0, 0], alpha=0.5, linewidth=1, marke
if edgecolor is None:
edgecolor = c
if priors is not None: a *= priors[i]
polygon = plt.Polygon(points.transpose().tolist(), facecolor=c, alpha=a,
linewidth=linewidth, zorder=20, edgecolor=edgecolor)
......@@ -507,6 +542,74 @@ def plot_gaussian(mu, sigma, dim=None, color='r', alpha=0.5, lw=1, markersize=6,
return center, line
def plot_y_gaussian(x, mu, sigma, dim=0, alpha=1., alpha_fill=None, color='r', lw=1.):
"""
:param mu: [n_states]
:param mu: [n_states, n_dim]
:param sigma: [n_states, n_dim, n_dim]
:param dim:
:return:
"""
if x.ndim == 2:
x = x[:, 0]
if alpha_fill is None:
alpha_fill = 0.4 * alpha
plt.plot(x, mu[:, dim], alpha=alpha, color=color)
plt.fill_between(x,
mu[:, dim] - sigma[:, dim, dim] ** 0.5,
mu[:, dim] + sigma[:, dim, dim] ** 0.5,
alpha=alpha_fill, color=color)
def plot_dynamic_system(f, nb_sub=10, ax=None, xlim=[-1, 1], ylim=[-1, 1], scale=0.01,
name=None, equal=False, **kwargs):
"""
Plot a dynamical system dx = f(x)
:param f: a function that takes as input x as [N,2] and return dx [N, 2]
:param nb_sub:
:param ax0:
:param xlim:
:param ylim:
:param scale:
:param kwargs:
:return:
"""
Y, X = np.mgrid[ylim[0]:ylim[1]:complex(nb_sub), xlim[0]:xlim[1]:complex(nb_sub)]
mesh_data = np.vstack([X.ravel(), Y.ravel()])
field = f(mesh_data.T).T
U = field[0]
V = field[1]
U = U.reshape(nb_sub, nb_sub)
V = V.reshape(nb_sub, nb_sub)
speed = np.sqrt(U * U + V * V)
if name is not None:
plt.suptitle(name)
if ax is not None:
strm = ax.streamplot(X, Y, U, V, linewidth=scale* speed,**kwargs)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if equal:
ax.set_aspect('equal')
else:
strm = plt.streamplot(X, Y, U, V, linewidth=scale* speed, **kwargs)
plt.xlim(xlim)
plt.ylim(ylim)
if equal:
plt.axes().set_aspect('equal')
return [strm]
def plot_trajdist(td, ix=0, iy=1, covScale=1, color=[1, 0, 0], alpha=0.1, linewidth=0.1):
'''Plot 2D representation of a trajectory distribution'''
......
import numpy as np
def gaussian_moment_matching(mus, sigmas, h):
def gaussian_moment_matching(mus, sigmas, h=None):
"""
:param mu: [np.array([nb_states, nb_timestep, nb_dim])]
......@@ -10,6 +10,10 @@ def gaussian_moment_matching(mus, sigmas, h):
:param h: [np.array([nb_timestep, nb_states])]
:return:
"""
if h is None:
h = np.ones((mus.shape[1], mus.shape[0]))/ mus.shape[0]
if h.ndim == 1:
h = h[None]
......
......@@ -100,7 +100,7 @@ def gu_pinv(A, rcond=1e-15):
return np.array([[np.linalg.pinv(A[i, j]) for j in range(J)] for i in range(I)])
def _create_relative_time(q, start=-1.):
def create_relative_time(q, start=-1.):
"""
:param q: [list of int]
List of state indicator.
......@@ -135,7 +135,7 @@ def align_trajectories_hsmm(data, nb_states=5):
qs = [model.viterbi(d) for d in data_vectorized]
time, sqs = zip(*[_create_relative_time(q) for q in qs])
time, sqs = zip(*[create_relative_time(q) for q in qs])
start_idx = [np.array((np.nonzero(np.diff(q))[0] + 1).tolist()) for q in qs]
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment