Commit 790e3725 authored by Hakan GIRGIN's avatar Hakan GIRGIN
Browse files

updating the library

parent d853a5a6
......@@ -10,11 +10,12 @@ from .plot import *
from .poglqr import PoGLQR, LQR, GMMLQR
from .mtmm import MTMM, VBayesianGMM, VMBayesianGMM, VBayesianHMM
from .dmp import DMP
from .vhmm import BayesianMarkovianGaussianMixture
try:
import gui
except ImportError as e:
print("Could not import gui: {0}".format(e.message))
print("Could not import gui: {0}".format(e.msg))
print("run : sudo apt-get install tkinter")
except:
print("Unexpected error:", sys.exc_info()[0])
......
This diff is collapsed.
......@@ -2,13 +2,13 @@ import numpy as np
from .model import *
from .functions import multi_variate_normal
from scipy.linalg import block_diag
from scipy.special import logsumexp
from termcolor import colored
from .mvn import MVN
class GMM(Model):
def __init__(self, nb_states=1, nb_dim=None, init_zeros=False, mu=None, lmbda=None, sigma=None, priors=None):
def __init__(self, nb_states=1, nb_dim=None, init_zeros=False, mu=None, lmbda=None, sigma=None, priors=None, log_priors=None):
if mu is not None:
nb_states = mu.shape[0]
nb_dim = mu.shape[-1]
......@@ -22,6 +22,9 @@ class GMM(Model):
self._lmbda = lmbda
self._sigma = sigma
self._priors = priors
self._log_priors = log_priors
if init_zeros:
self.init_zeros()
......@@ -41,7 +44,7 @@ class GMM(Model):
# print priors, self.priors
mus, sigmas = self.moment_matching(priors)
mvn = MVN(nb_dim=self.nb_dim, mu=mus[0], sigma=sigmas[0])
mvn = MVN(nb_dim=self.nb_dim, mu=mus, sigma=sigmas)
return mvn
......@@ -55,10 +58,16 @@ class GMM(Model):
if h.ndim == 1:
h = h[None]
mus = np.einsum('ak,ki->ai', h, self.mu)
dmus = self.mu[None] - mus[:, None] # nb_timesteps, nb_states, nb_dim
sigmas = np.einsum('ak,kij->aij', h, self.sigma) + \
np.einsum('ak,akij->aij', h, np.einsum('aki,akj->akij', dmus, dmus))
if self.mu.ndim == 2:
mus = np.einsum('ak,ki->ai', h, self.mu)
dmus = self.mu[None] - mus[:, None] # nb_timesteps, nb_states, nb_dim
sigmas = np.einsum('ak,kij->aij', h, self.sigma) + \
np.einsum('ak,akij->aij', h, np.einsum('aki,akj->akij', dmus, dmus))
else:
mus = np.einsum('ak,aki->ai', h, self.mu)
dmus = self.mu- mus[:, None] # nb_timesteps, nb_states, nb_dim
sigmas = np.einsum('ak,akij->aij', h, self.sigma) + \
np.einsum('ak,akij->aij', h, np.einsum('aki,akj->akij', dmus, dmus))
return mus, sigmas
......@@ -104,6 +113,7 @@ class GMM(Model):
gmm.priors /= np.sum(gmm.priors)
else:
# component wise
gmm = GMM(nb_dim=self.nb_dim, nb_states=self.nb_states)
gmm.priors = self.priors
gmm.mu = np.einsum('aij,aj->ai', self.lmbda, self.mu) + \
......@@ -291,6 +301,8 @@ class GMM(Model):
Composed of 0 and 1. Mask given the dependencies in the covariance matrices
:return:
"""
if self.nb_dim is None:
self.nb_dim = data.shape[-1]
self.reg = reg
......@@ -457,18 +469,28 @@ class GMM(Model):
:return: np.array([nb_states, nb_samples])
log mvn
"""
# if len(x.shape) > 1: # TODO implement mvn for multiple xs
# raise NotImplementedError
mu, lmbda_, sigma_chol_ = self.mu, self.lmbda, self.sigma_chol
if x.ndim > 1:
if x.ndim > 1 and mu.ndim == 2:
dx = mu[None] - x[:, None] # nb_timesteps, nb_states, nb_dim
eins_idx = ('baj,baj->ba', 'ajk,baj->bak')
elif x.ndim > 1 and mu.ndim == 3:
dx = mu - x[:, None] # nb_timesteps, nb_states, nb_dim
eins_idx = ('baj,baj->ba', 'bajk,baj->bak')
else:
dx = mu - x
eins_idx =('aj,aj->a', 'ajk,aj->ak')
eins_idx = ('baj,baj->ba', 'ajk,baj->bak') if x.ndim > 1 else (
'aj,aj->a', 'ajk,aj->ak')
if lmbda_.ndim == 4:
cov_part = np.sum( np.log(sigma_chol_.diagonal(axis1=2, axis2=3)), axis=-1)
else:
cov_part = np.sum(np.log(sigma_chol_.diagonal(axis1=1, axis2=2)), axis=1)
return -0.5 * np.einsum(eins_idx[0], dx, np.einsum(eins_idx[1], lmbda_, dx)) \
- mu.shape[1] / 2. * np.log(2 * np.pi) - np.sum(
np.log(sigma_chol_.diagonal(axis1=1, axis2=2)), axis=1)
- (mu.shape[-1] / 2.) * np.log(2 * np.pi) - cov_part
def log_prob(self, x):
return logsumexp(self.log_priors + self.mvn_pdf(x), -1)
......@@ -201,7 +201,6 @@ class HMM(GMM):
sample_size = demo.shape[0]
elif isinstance(demo, dict):
sample_size = demo['x'].shape[0]
B, _ = self.obs_likelihood(demo, dep, marginal, sample_size)
# if table is not None:
# B *= table[:, [n]]
......@@ -256,11 +255,14 @@ class HMM(GMM):
"""
mu = np.mean(data, axis=0)
sigma = np.cov(data.T)
if sigma.ndim == 0:
sigma = np.ones((1,1))*sigma
if left_to_right:
self.mu = np.array([mu for i in range(self.nb_states)])
else:
self.mu = np.array([np.random.multivariate_normal(mu, sigma)
self.mu = np.array([np.random.multivariate_normal(mu*1, sigma)
for i in range(self.nb_states)])
self.sigma = np.array([sigma + self.reg for i in range(self.nb_states)])
......
......@@ -79,6 +79,7 @@ class Model(object):
elif isinstance(value, np.ndarray):
self._reg = value
elif isinstance(value, float):
print(value)
self._reg = value ** 2 * np.eye(self.nb_dim)
else:
raise ValueError('Regularization should be of type float, ndarray or list')
......@@ -96,6 +97,17 @@ class Model(object):
def priors(self, value):
self._priors = value
@property
def log_priors(self):
if self._log_priors is None:
self._log_priors = np.log(self._priors + 1e-40)
return self._log_priors
@log_priors.setter
def log_priors(self, value):
self._log_priors = value
@property
def mu(self):
"""
......
import numpy as np
import numpy
from .gmm import GMM, MVN
from .hmm import HMM
from scipy.special import logsumexp
......@@ -6,6 +6,7 @@ from sklearn import mixture
from scipy.stats import wishart
from .model import *
from .utils import gaussian_moment_matching
from .utils.math_utils import log_normalize
class MTMM(Model):
......@@ -151,7 +152,7 @@ class MTMM(Model):
axis=2) # [nb_states, nb_samples]
log_norm = self.log_normalization[:, None]
return log_norm + (-(self.nu + self.nb_dim) / 2)[:, None] * np.log(1 + s / self.nu[:, None])
return log_norm + (-(self.nu + self.nb_dim) / 2)[:, None] * numpy.log(1 + s / self.nu[:, None])
def obs_likelihood(self, demo=None, dep=None, marginal=None, *args, **kwargs):
B = self.log_prob_components(demo)
......@@ -184,7 +185,6 @@ class MTMM(Model):
Overrides marginal probability of states given input dimensions
:return:
"""
if data_in.ndim == 1:
data_in = data_in[None]
was_not_batch = True
......@@ -192,7 +192,6 @@ class MTMM(Model):
was_not_batch = False
sample_size = data_in.shape[0]
if tmp and hasattr(self, '_tmp_slices') and not self._tmp_slices == (dim_in, dim_out):
del self._tmp_inv_sigma_out_in, self._tmp_inv_sigma_in_in, self._tmp_slices, self._tmp_marginal_model
......@@ -208,7 +207,11 @@ class MTMM(Model):
if h is None:
h = marginal_model.log_prob_components(data_in)
# self._resp = h
h += np.log(self.priors)[:, None]
self._resp = h
# h = log_normalize(h, axis=0)
# h = np.exp(h).T
h = np.exp(h).T
h /= np.sum(h, axis=1, keepdims=True)
......@@ -271,6 +274,8 @@ class MTMM(Model):
nu = self.nu + mu_in.shape[1]
# the conditional distribution is now a still a mixture
# normalize h:
if was_not_batch:
mu_est = mu_est[:, 0]
sigma_est = sigma_est[:, 0]
......@@ -289,18 +294,21 @@ class MTMM(Model):
gmm = GMM(nb_states=self.nb_states)
gmm.mu = mu_est
gmm.priors = h
gmm.sigma = sigma_est * (nu / (nu - 2.))[:, None, None, None]
gmm.sigma = sigma_est * (nu / (nu - 2.))[:,None, None, None]
return gmm
elif return_linear:
As = inv_sigma_out_in
bs = mu_out - np.matmul(inv_sigma_out_in, mu_in[:, :, None])[:, :, 0]
A = np.einsum('ak,kij->aij', h, As)
b = np.einsum('ak,ki->ai', h, bs)
if was_not_batch:
return A[0], b[0], \
gaussian_moment_matching(mu_est, sigma_est * (nu / (nu - 2.))[:, None, None, None], h)[1][0]
A = np.einsum('k,kij->ij', h, As)
b = np.einsum('k,ki->i', h, bs)
return A, b, \
gaussian_moment_matching(mu_est, sigma_est * (nu / (nu - 2.))[:, None, None], h)[1]
else:
A = np.einsum('ak,kij->aij', h, As)
b = np.einsum('ak,ki->ai', h, bs)
return A, b, gaussian_moment_matching(mu_est, sigma_est * (nu / (nu - 2.))[:, None, None, None], h)[1]
elif moment_matching:
......@@ -483,6 +491,11 @@ class MTMM(Model):
size=num_comp_in_X)
return X
def truncate(self):
keep = self.priors > 1E-6
return MTMM(mu=self.mu[keep, :], lmbda=self.lmbda[keep, :],
sigma=self.sigma[keep, :], nu=self.nu[keep], priors=self.priors[keep])
class VBayesianGMM(MTMM):
def __init__(self, sk_parameters, *args, **kwargs):
......@@ -562,8 +575,14 @@ class VBayesianGMM(MTMM):
self.nu_prior = m.degrees_of_freedom_prior_
# print(m.degrees_of_freedom_prior_)
if m.covariances_.ndim == 2:
covs = np.zeros((self.mu.shape[0], self.nb_dim, self.nb_dim))
for j in range(self.mu.shape[0]):
covs[j] = np.diag(m.covariances_[j])
w_k = np.linalg.inv(covs* m.degrees_of_freedom_[:, None, None])
w_k = np.linalg.inv(m.covariances_ * m.degrees_of_freedom_[:, None, None])
else:
w_k = np.linalg.inv(m.covariances_ * m.degrees_of_freedom_[:, None, None])
l_k = ((m.degrees_of_freedom_[:, None, None] + 1 - self.nb_dim) * m.mean_precision_[:, None, None]) / \
(1. + m.mean_precision_[:, None, None]) * w_k
......
import numpy as np
prec_min = 1e-15
import sys
from .utils.gaussian_utils import gaussian_conditioning
from .functions import mvn_pdf
from .functions import multi_variate_normal
import pbdlib as pbd
from scipy.linalg import sqrtm
class MVN(object):
def __init__(self, mu=None, sigma=None, lmbda=None, lmbda_ns=None, sigma_cv=None, nb_dim=2):
"""
Multivariate Normal Distribution
def __init__(self, mu=None, sigma=None, lmbda=None, lmbda_ns=None, sigma_cv=None, nb_dim=2):
"""
Multivariate Normal Distribution with batch B1, B2, ... ,
:param mu: np.array([nb_dim])
:param mu: np.array([B1, B2, ... , nb_dim])
Mean vector
:param sigma: np.array([nb_dim, nb_dim])
:param sigma: np.array([B1, B2, ... , nb_dim, nb_dim])
Covariance matrix
:param lmbda: np.array([nb_dim, nb_dim])
:param lmbda: np.array([B1, B2, ... , nb_dim, nb_dim])
Precision matrix
:param lmbda_ns:
:param sigma_cv:
"""
self._mu = mu
self._sigma = sigma
self._lmbda = lmbda
self._sigma_chol = None
self._eta = None
#
self.lmbda_ns = lmbda_ns
self.sigma_cv = sigma_cv
self._lmbdaT = None
self._muT = None
if mu is not None:
self.nb_dim = mu.shape[0]
elif sigma is not None:
self.nb_dim = sigma.shape[0]
elif lmbda is not None:
self.nb_dim = lmbda.shape[0]
else:
self.nb_dim = nb_dim
@property
def eta(self):
"""
self._mu = mu
self._sigma = sigma
self._lmbda = lmbda
self._sigma_chol = None
self._eta = None
#
self.lmbda_ns = lmbda_ns
self.sigma_cv = sigma_cv
self._lmbdaT = None
self._muT = None
if mu is not None:
self.nb_dim = mu.shape[-1]
self._batch = True if mu.ndim > 1 else False
elif sigma is not None:
self.nb_dim = sigma.shape[-1]
self._batch = True if sigma.ndim > 2 else False
elif lmbda is not None:
self.nb_dim = lmbda.shape[-1]
self._batch = True if lmbda.ndim > 2 else False
else:
self.nb_dim = nb_dim
self._batch = None
@property
def eta(self):
"""
Natural parameters eta = lambda.dot(mu)
:return:
"""
if self._eta is None:
self._eta = self.lmbda.dot(self.mu)
return self._eta
@property
def mu(self):
if self._mu is None:
self._mu = np.zeros(self.nb_dim)
return self._mu
@mu.setter
def mu(self, value):
self.nb_dim = value.shape[0]
self._mu = value
self._eta = None
@property
def sigma(self):
if self._sigma is None and not self._lmbda is None:
try:
self._sigma = np.linalg.inv(self._lmbda)
# except np.linalg.LinAlgError:
except np.linalg.LinAlgError:
self._sigma = np.linalg.inv(self._lmbda + prec_min * np.eye(self._lmbda.shape[0]))
except:
print("Unexpected error:", sys.exc_info()[0])
raise
return self._sigma
@sigma.setter
def sigma(self, value):
self.nb_dim = value.shape[0]
self._lmbda = None
self._sigma_chol = None
self._sigma = value
self._eta = None
def plot(self, *args, **kwargs):
pbd.plot_gaussian(self.mu, self.sigma, *args, **kwargs)
@property
def muT(self):
"""
if self._eta is None:
self._eta = self.lmbda.dot(self.mu)
return self._eta
@property
def mu(self):
if self._mu is None:
self._mu = np.zeros(self.nb_dim)
return self._mu
@mu.setter
def mu(self, value):
self.nb_dim = value.shape[-1]
self._mu = value
self._eta = None
@property
def sigma(self):
if self._sigma is None and not self._lmbda is None:
try:
self._sigma = np.linalg.inv(self._lmbda)
# except np.linalg.LinAlgError:
except np.linalg.LinAlgError:
self._sigma = np.linalg.inv(self._lmbda + prec_min * np.eye(self._lmbda.shape[0]))
except:
print("Unexpected error:", sys.exc_info()[0])
raise
return self._sigma
@sigma.setter
def sigma(self, value):
self.nb_dim = value.shape[-1]
self._lmbda = None
self._sigma_chol = None
self._sigma = value
self._eta = None
def plot(self, *args, **kwargs):
"""
only works if no batch
:param args:
:param kwargs:
:return:
"""
pbd.plot_gaussian(self.mu, self.sigma, *args, **kwargs)
@property
def muT(self):
"""
Returns muT-b
:return:
"""
if self._muT is not None:
return self._muT
else:
return self.mu
@property
def lmbdaT(self):
"""
if self._muT is not None:
return self._muT
else:
return self.mu
@property
def lmbdaT(self):
"""
Returns A^T.dot(lmbda)
:return:
"""
if self._lmbdaT is not None:
return self._lmbdaT
else:
return self.lmbda
@property
def lmbda(self):
if self._lmbda is None and not self._sigma is None:
self._lmbda = np.linalg.inv(self._sigma)
return self._lmbda
@lmbda.setter
def lmbda(self, value):
self._sigma = None # reset sigma
self._sigma_chol = None
self._lmbda = value
self._eta = None
@property
def sigma_chol(self):
if self.sigma is None:
return None
else:
if self._sigma_chol is None:
self._sigma_chol = np.linalg.cholesky(self.sigma)
return self._sigma_chol
def ml(self, data):
self.mu = np.mean(data, axis=0)
self.sigma = np.cov(data.T)
self.lmbda = np.linalg.inv(self.sigma)
def log_prob(self, x, marginal=None, reg=None):
"""
if self._lmbdaT is not None:
return self._lmbdaT
else:
return self.lmbda
@property
def lmbda(self):
if self._lmbda is None and not self._sigma is None:
self._lmbda = np.linalg.inv(self._sigma)
return self._lmbda
@lmbda.setter
def lmbda(self, value):
self._sigma = None # reset sigma
self._sigma_chol = None
self._lmbda = value
self._eta = None
@property
def sigma_chol(self):
if self.sigma is None:
return None
else:
if self._sigma_chol is None:
self._sigma_chol = np.linalg.cholesky(self.sigma)
return self._sigma_chol
def ml(self, data):
self.mu = np.mean(data, axis=-1)
self.sigma = np.cov(data.T)
self.lmbda = np.linalg.inv(self.sigma)
def log_prob(self, x, marginal=None, reg=None):
"""
:param x:
:param marginal:
:type marginal: slice
:return:
"""
if marginal is not None:
_mu = self.mu[marginal]
_sigma = self.sigma[marginal, marginal]
if reg is not None:
_sigma += np.eye(marginal.stop-marginal.start) * reg
return multi_variate_normal(x, _mu, _sigma)
return multi_variate_normal(x, self.mu, self.sigma)
def transform(self, A, b=None, dA=None, db=None):
if b is None: b = np.zeros(A.shape[0])
if dA is None:
return type(self)(mu=A.dot(self.mu) + b, sigma=A.dot(self.sigma).dot(A.T))
else:
return self.transform_uncertainty(A, b, dA=None, db=None)
def inv_transform(self, A, b):
"""
if marginal is not None:
if self._batch:
_mu = self.mu[..., marginal]
_sigma = self.sigma[..., marginal, marginal]
else:
_mu = self.mu[marginal]
_sigma = self.sigma[marginal, marginal]
if reg is not None:
_sigma += np.eye(marginal.stop - marginal.start) * reg
return multi_variate_normal(x, _mu, _sigma)
return multi_variate_normal(x, self.mu, self.sigma)
def transform(self, A, b=None, dA=None, db=None):
if b is None: b = np.zeros(A.shape[0])
if dA is None:
return type(self)(mu=A.dot(self.mu) + b, sigma=A.dot(self.sigma).dot(A.T))
else:
return self.transform_uncertainty(A, b, dA=None, db=None)
def inv_transform(self, A, b):
"""