EM_tensorMPPCA.m 4.45 KB
Newer Older
1
function model = EM_tensorMPPCA(Data, model)
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
% Training of a task-parameterized mixture of probabilistic principal component analyzers (TP-MPPCA), 
% with an expectation-maximization (EM) algorithm.
%
% Writing code takes time. Polishing it and making it available to others takes longer! 
% If some parts of the code were useful for your research of for a better understanding 
% of the algorithms, please reward the authors by citing the related publications, 
% and consider making your own research available in this way.
%
% @article{Calinon15,
%   author="Calinon, S.",
%   title="A Tutorial on Task-Parameterized Movement Learning and Retrieval",
%   journal="Intelligent Service Robotics",
%   year="2015"
% }
%
% Copyright (c) 2015 Idiap Research Institute, http://idiap.ch/
% Written by Sylvain Calinon, http://calinon.ch/
% 
% This file is part of PbDlib, http://www.idiap.ch/software/pbdlib/
% 
% PbDlib is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License version 3 as
% published by the Free Software Foundation.
% 
% PbDlib is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
% 
% You should have received a copy of the GNU General Public License
% along with PbDlib. If not, see <http://www.gnu.org/licenses/>.

34 35 36 37 38 39 40

%Parameters of the EM algorithm
nbMinSteps = 5; %Minimum number of iterations allowed
nbMaxSteps = 100; %Maximum number of iterations allowed
maxDiffLL = 1E-5; %Likelihood increase threshold to stop the algorithm
nbData = size(Data,3);

41 42
%diagRegularizationFactor = 1E-2; %Optional regularization term
diagRegularizationFactor = 1E-10; %Optional regularization term
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
	
%Initialization of the MPPCA parameters from eigendecomposition
for i=1:model.nbStates
	for m=1:model.nbFrames
		model.o(m,i) = trace(model.Sigma(:,:,m,i)) / model.nbVar;
		[V,D] = eig(model.Sigma(:,:,m,i)-eye(model.nbVar)*model.o(m,i)); 
		[~,id] = sort(diag(D),'descend');
		V = V(:,id)*D(id,id).^.5;
		model.L(:,:,m,i) = V(:,1:model.nbFA);
	end
end

%EM loop
for nbIter=1:nbMaxSteps
	fprintf('.');
	
	%E-step
	[Lik, GAMMA] = computeGamma(Data, model); %See 'computeGamma' function below
	GAMMA2 = GAMMA ./ repmat(sum(GAMMA,2),1,nbData);
	model.Pix = GAMMA2;
	
	%M-step
	for i=1:model.nbStates
		
67
		%Update Priors
68 69 70 71 72 73
		model.Priors(i) = sum(sum(GAMMA(i,:))) / nbData;
		
		for m=1:model.nbFrames
			%Matricization/flattening of tensor
			DataMat(:,:) = Data(:,m,:);
			
74
			%Update Mu
75 76
			model.Mu(:,m,i) = DataMat * GAMMA2(i,:)';
			
77
			%Compute covariance
78 79 80
			DataTmp = DataMat - repmat(model.Mu(:,m,i),1,nbData);
			S(:,:,m,i) = DataTmp * diag(GAMMA2(i,:)) * DataTmp' + eye(model.nbVar)*diagRegularizationFactor;
			
81
			%Update M 
82
			M = eye(model.nbFA)*model.o(m,i) + model.L(:,:,m,i)' * model.L(:,:,m,i);
83
			%Update Lambda 
84
			Lnew =  S(:,:,m,i) * model.L(:,:,m,i) / (eye(model.nbFA)*model.o(m,i) + M \ model.L(:,:,m,i)' * S(:,:,m,i) * model.L(:,:,m,i));
85
			%Update of sigma^2 
86 87
			model.o(m,i) = trace(S(:,:,m,i) - S(:,:,m,i) * model.L(:,:,m,i) / M * Lnew') / model.nbVar;
			model.L(:,:,m,i) = Lnew;
88
			%Update Psi 
89 90
			model.P(:,:,m,i) = eye(model.nbVar) * model.o(m,i);
		
91
			%Reconstruct Sigma
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
			model.Sigma(:,:,m,i) = model.L(:,:,m,i) * model.L(:,:,m,i)' + model.P(:,:,m,i);
		end
	end
	
	%Compute average log-likelihood
	LL(nbIter) = sum(log(sum(Lik,1))) / size(Lik,2);
	%Stop the algorithm if EM converged (small change of LL)
	if nbIter>nbMinSteps
		if LL(nbIter)-LL(nbIter-1)<maxDiffLL || nbIter==nbMaxSteps-1
			disp(['EM converged after ' num2str(nbIter) ' iterations.']);
			return;
		end
	end
end
disp(['The maximum number of ' num2str(nbMaxSteps) ' EM iterations has been reached.']);
end

109

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [Lik, GAMMA, GAMMA0] = computeGamma(Data, model)
nbData = size(Data, 3);
Lik = ones(model.nbStates, nbData);
GAMMA0 = zeros(model.nbStates, model.nbFrames, nbData);
for i=1:model.nbStates
	for m=1:model.nbFrames
		DataMat(:,:) = Data(:,m,:); %Matricization/flattening of tensor
		GAMMA0(i,m,:) = gaussPDF(DataMat, model.Mu(:,m,i), model.Sigma(:,:,m,i));
		Lik(i,:) = Lik(i,:) .* squeeze(GAMMA0(i,m,:))';
	end
	Lik(i,:) = Lik(i,:) * model.Priors(i);
end
GAMMA = Lik ./ repmat(sum(Lik,1)+realmin, size(Lik,1), 1);
end