function model = EM_tensorHDGMM(Data, model)
% Training of a task-parameterized high dimensional GMM with an expectation-maximization (EM) algorithm.
%
% Writing code takes time. Polishing it and making it available to others takes longer!
% If some parts of the code were useful for your research of for a better understanding
% of the algorithms, please reward the authors by citing the related publications,
% and consider making your own research available in this way.
%
% @article{Calinon15,
% author="Calinon, S.",
% title="A Tutorial on Task-Parameterized Movement Learning and Retrieval",
% journal="Intelligent Service Robotics",
% year="2015"
% }
%
% Copyright (c) 2015 Idiap Research Institute, http://idiap.ch/
% Written by Sylvain Calinon, http://calinon.ch/
%
% This file is part of PbDlib, http://www.idiap.ch/software/pbdlib/
%
% PbDlib is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License version 3 as
% published by the Free Software Foundation.
%
% PbDlib is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with PbDlib. If not, see .
%Parameters of the EM algorithm
nbMinSteps = 5; %Minimum number of iterations allowed
nbMaxSteps = 100; %Maximum number of iterations allowed
maxDiffLL = 1E-5; %Likelihood increase threshold to stop the algorithm
nbData = size(Data,3);
%diagRegularizationFactor = 1E-2; %Optional regularization term
diagRegularizationFactor = 1E-10; %Optional regularization term
%EM loop
for nbIter=1:nbMaxSteps
fprintf('.');
%E-step
[Lik, GAMMA] = computeGamma(Data, model); %See 'computeGamma' function below
GAMMA2 = GAMMA ./ repmat(sum(GAMMA,2),1,nbData);
model.Pix = GAMMA2;
%M-step
for i=1:model.nbStates
%Update Priors
model.Priors(i) = sum(sum(GAMMA(i,:))) / nbData;
for m=1:model.nbFrames
%Matricization/flattening of tensor
DataMat(:,:) = Data(:,m,:);
%Update Mu
model.Mu(:,m,i) = DataMat * GAMMA2(i,:)';
%Compute covariance
DataTmp = DataMat - repmat(model.Mu(:,m,i),1,nbData);
S(:,:,m,i) = DataTmp * diag(GAMMA2(i,:)) * DataTmp' + eye(model.nbVar)*diagRegularizationFactor;
%HDGMM update
[V,D] = eig(S(:,:,m,i));
[~,id] = sort(diag(D),'descend');
d = diag(D);
model.D(:,:,m,i) = diag([d(id(1:model.nbFA)); repmat(mean(d(id(model.nbFA+1:end))), model.nbVar-model.nbFA, 1)]);
model.V(:,:,m,i) = V(:,id);
%Reconstruct Sigma
model.Sigma(:,:,m,i) = model.V(:,:,m,i) * model.D(:,:,m,i) * model.V(:,:,m,i)' + eye(model.nbVar) * diagRegularizationFactor;
end
end
%Compute average log-likelihood
LL(nbIter) = sum(log(sum(Lik,1))) / size(Lik,2);
%Stop the algorithm if EM converged (small change of LL)
if nbIter>nbMinSteps
if LL(nbIter)-LL(nbIter-1)