EM_MFA.m 4.47 KB
Newer Older
1
function [model, GAMMA2] = EM_MFA(Data, model)
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
% EM for Mixture of factor analysis (implementation based on "Parsimonious Gaussian 
% Mixture Models" by McNicholas and Murphy, Appendix 8, p.17, UUU version).
%
% Writing code takes time. Polishing it and making it available to others takes longer! 
% If some parts of the code were useful for your research of for a better understanding 
% of the algorithms, please reward the authors by citing the related publications, 
% and consider making your own research available in this way.
%
% @article{Calinon15,
%   author="Calinon, S.",
%   title="A Tutorial on Task-Parameterized Movement Learning and Retrieval",
%   journal="Intelligent Service Robotics",
%   year="2015"
% }
%
% Copyright (c) 2015 Idiap Research Institute, http://idiap.ch/
% Written by Sylvain Calinon, http://calinon.ch/
% 
% This file is part of PbDlib, http://www.idiap.ch/software/pbdlib/
% 
% PbDlib is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License version 3 as
% published by the Free Software Foundation.
% 
% PbDlib is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
% 
% You should have received a copy of the GNU General Public License
% along with PbDlib. If not, see <http://www.gnu.org/licenses/>.

34 35 36 37 38 39 40

%Parameters of the EM iterations
nbMinSteps = 5; %Minimum number of iterations allowed
nbMaxSteps = 100; %Maximum number of iterations allowed
maxDiffLL = 1E-4; %Likelihood increase threshold to stop the algorithm
nbData = size(Data,2);

41
diagRegularizationFactor = 1E-6; %Optional regularization term
42 43 44 45 46 47 48 49 50 51 52 53

% %Circular initialization of the MFA parameters
% Itmp = eye(model.nbVar)*1E-2;
% model.P = repmat(Itmp, [1 1 model.nbStates]);
% model.L = repmat(Itmp(:,1:model.nbFA), [1 1 model.nbStates]);

%Initialization of the MFA parameters from eigendecomposition estimate
for i=1:model.nbStates
	model.P(:,:,i) = diag(diag(model.Sigma(:,:,i))); %Dimension-wise variance
	[V,D] = eig(model.Sigma(:,:,i)-model.P(:,:,i)); 
	[~,id] = sort(diag(D),'descend');
	V = V(:,id)*D(id,id).^.5;
54
	model.L(:,:,i) = V(:,1:model.nbFA); %-> Sigma=LL'+P
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
end
for nbIter=1:nbMaxSteps
	for i=1:model.nbStates
		%Update B,L,P
		B(:,:,i) = model.L(:,:,i)' / (model.L(:,:,i) * model.L(:,:,i)' + model.P(:,:,i));
		model.L(:,:,i) = model.Sigma(:,:,i) * B(:,:,i)' / (eye(model.nbFA) - B(:,:,i) * model.L(:,:,i) + B(:,:,i) * model.Sigma(:,:,i) * B(:,:,i)');
		model.P(:,:,i) = diag(diag(model.Sigma(:,:,i) - model.L(:,:,i) * B(:,:,i) * model.Sigma(:,:,i)));
	end
end

%EM loop
for nbIter=1:nbMaxSteps
	fprintf('.');
	%E-step
	[Lik, GAMMA] = computeGamma(Data, model); %See 'computeGamma' function below
	GAMMA2 = GAMMA ./ repmat(sum(GAMMA,2),1,nbData);
	
	%M-step
73
	%Update Priors
74 75
	model.Priors = sum(GAMMA,2) / nbData;
	
76
	%Update Mu
77 78 79 80
	model.Mu = Data * GAMMA2';
	
	%Update factor analysers parameters
	for i=1:model.nbStates
81
		%Compute covariance
82 83 84
		DataTmp = Data - repmat(model.Mu(:,i),1,nbData);
		S(:,:,i) = DataTmp * diag(GAMMA2(i,:)) * DataTmp' + eye(model.nbVar) * diagRegularizationFactor;

85
		%Update B
86
		B(:,:,i) = model.L(:,:,i)' / (model.L(:,:,i) * model.L(:,:,i)' + model.P(:,:,i));
87
		%Update Lambda
88
		model.L(:,:,i) = S(:,:,i) * B(:,:,i)' / (eye(model.nbFA) - B(:,:,i) * model.L(:,:,i) + B(:,:,i) * S(:,:,i) * B(:,:,i)');
89
		%Update Psi
90 91
		model.P(:,:,i) = diag(diag(S(:,:,i) - model.L(:,:,i) * B(:,:,i) * S(:,:,i))) + eye(model.nbVar) * diagRegularizationFactor;

92
		%Reconstruct Sigma
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
		model.Sigma(:,:,i) = model.L(:,:,i) * model.L(:,:,i)' + model.P(:,:,i);
	end
	%Compute average log-likelihood
	LL(nbIter) = sum(log(sum(Lik,1))) / nbData;
	%Stop the algorithm if EM converged (small change of LL)
	if nbIter>nbMinSteps
		if LL(nbIter)-LL(nbIter-1)<maxDiffLL || nbIter==nbMaxSteps-1
			disp(['EM converged after ' num2str(nbIter) ' iterations.']);
			return;
		end
	end
end
disp(['The maximum number of ' num2str(nbMaxSteps) ' EM iterations has been reached.']);
end

108

109 110 111 112 113 114 115 116
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [Lik, GAMMA] = computeGamma(Data, model)
Lik = zeros(model.nbStates,size(Data,2));
for i=1:model.nbStates
	Lik(i,:) = model.Priors(i) * gaussPDF(Data, model.Mu(:,i), model.Sigma(:,:,i));
end
GAMMA = Lik ./ repmat(sum(Lik,1)+realmin, model.nbStates, 1);
end