Commit dbd1195f by Sylvain CALINON

demo_Gaussian0*.m examples added

parent d937bee4
......@@ -152,6 +152,9 @@ All the examples are located in the main folder, and the functions are located i
| [demo_DPMeans_Online01](./demos/demo_DPMeans_Online01.m) | [[6]](#ref-6) | Online clustering with DP-means algorithm |
| [demo_DSGMR01](./demos/demo_DSGMR01.m) | [[1]](#ref-1) | Gaussian mixture model (GMM), with Gaussian mixture regression(GMR) and dynamical systems used for reproduction, with decay variable used as input (as in DMP) |
| [demo_DTW01](./demos/demo_DTW01.m) | [[1]](#ref-1) | Trajectory realignment through dynamic time warping (DTW) |
| [demo_Gaussian01](./demos/demo_Gaussian01.m) | [[1]](#ref-1) | Use of Chi-square values to determine the percentage of data within the contour of a multivariate normal distribution |
| [demo_Gaussian02](./demos/demo_Gaussian02.m) | [[1]](#ref-1) | Conditional probability with a multivariate normal distribution |
| [demo_Gaussian03](./demos/demo_Gaussian03.m) | [[1]](#ref-1) | Gaussian conditioning with uncertain inputs |
| [demo_GMM01](./demos/demo_GMM01.m) | [[1]](#ref-1) | Gaussian mixture model (GMM) parameters estimation |
| [demo_GMM02](./demos/demo_GMM02.m) | [[1]](#ref-1) | GMM with different covariance structures |
| [demo_GMR01](./demos/demo_GMR01.m) | [[1]](#ref-1) | GMM and time-based Gaussian mixture regression (GMR) used for reproduction |
......
function demo_Gaussian01
% Use of Chi-square values to determine the percentage of data within the contour of a multivariate normal distribution.
%
% Writing code takes time. Polishing it and making it available to others takes longer!
% If some parts of the code were useful for your research of for a better understanding
% of the algorithms, please reward the authors by citing the related publications,
% and consider making your own research available in this way.
%
% @article{Calinon16JIST,
% author="Calinon, S.",
% title="A Tutorial on Task-Parameterized Movement Learning and Retrieval",
% journal="Intelligent Service Robotics",
% publisher="Springer Berlin Heidelberg",
% doi="10.1007/s11370-015-0187-9",
% year="2016",
% volume="9",
% number="1",
% pages="1--29"
% }
%
% Copyright (c) 2015 Idiap Research Institute, http://idiap.ch/
% Written by Sylvain Calinon, http://calinon.ch/
%
% This file is part of PbDlib, http://www.idiap.ch/software/pbdlib/
%
% PbDlib is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License version 3 as
% published by the Free Software Foundation.
%
% PbDlib is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with PbDlib. If not, see <http://www.gnu.org/licenses/>.
addpath('./m_fcts/');
%% Parameters
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
model.nbVar = 2; %Number of variables [x1,x2]
nbStd = 3; %Number of standard deviations to consider
%For the normal distribution with D=1, the values less than one standard deviation away from the mean account for 68.27% of the set;
%while two standard deviations from the mean account for 95.45%; and three standard deviations account for 99.73%.
%For D>1:
%https://en.wikipedia.org/wiki/Chi-squared_distribution
%http://www.visiondummy.com/2014/04/draw-error-ellipse-representing-covariance-matrix/
%ChiSq = [4.605 5.991 9.210]; %90% 95% 99% intervals for D=2
ChiSq = [2.41 3.22 4.60]; %70% 80% 90% intervals for D=2
% %% Generate random normally distributed data
% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% nbData = 1000;
% Data = [];
% r = rand(model.nbVar,10)-0.5;
% [V,D] = eigs(cov(r'));
% %D(model.nbFA+1:end,model.nbFA+1:end) = 0;
% R = real(V*D.^.5);
% b = (rand(model.nbVar,1)-0.5) * 2;
% Data = R * randn(model.nbVar,nbData) + repmat(b,1,nbData); % + randn(model.nbVar,nbData)*1E-3;
%% Load data
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
load('data/faithful.mat');
Data = faithful';
nbData = size(Data,2);
% %% Load handwriting data
% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% nbData = 100;
% nbSamples = 10;
% demos=[];
% load('data/2Dletters/P.mat');
% %nbSamples = length(demos);
% Data=[];
% for n=1:nbSamples
% s(n).Data = spline(1:size(demos{n}.pos,2), demos{n}.pos, linspace(1,size(demos{n}.pos,2),nbData)); %Resampling
% Data = [Data s(n).Data];
% end
% nbData = size(Data,2); %Total number of datapoints
%% Parameters estimation
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
model.Mu = mean(Data,2);
model.Sigma = cov(Data');
[V,D] = eigs(model.Sigma);
%Compute threshold
for n=1:nbStd
thrStd(n) = gaussPDF(model.Mu+V(:,1)*(ChiSq(n)*D(1,1))^.5, model.Mu, model.Sigma);
end
%Estimate number of points within n standard deviations
H = gaussPDF(Data, model.Mu, model.Sigma);
for n=1:nbStd
id(:,n) = H>=thrStd(n);
ratio(n) = sum(id(:,n)) / nbData;
end
%% Plots
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
figure('PaperPosition',[0 0 10 4],'position',[10,10,1300,500]);
clrmap = min(lines(nbStd)+0.6,1);
mrg = (max(Data') - min(Data')) * 0.52;
for nn=1:nbStd
subplot(1,nbStd,nn); hold on; axis off; title(['Contour of ' num2str(ChiSq(nn),'%.2f') ' \Sigma'],'fontsize',16);
for n=nn:nn %-1:1
plotGMM(model.Mu, model.Sigma*ChiSq(n), clrmap(n,:),1);
end
plot(Data(1,1:200), Data(2,1:200), 'o','markersize',4,'color',[.5 .5 .5]);
for n=nn:nn %-1:1
hg = plot(Data(1,id(1:200,n)), Data(2,id(1:200,n)), '.','markersize',16,'color',max(clrmap(n,:)-0.2,0));
end
hl = legend(hg, sprintf('%s\n%s',[num2str(sum(id(:,nn))) ' datapoints'], ['(' num2str(ratio(nn)*100,'%.0f') '% of total number)']));
set(hl,'Box','off','Location','SouthOutside','fontsize',16);
axis([min(Data(1,:))-mrg(1) max(Data(1,:))+mrg(1) min(Data(2,:))-mrg(2) max(Data(2,:))+mrg(2)]);
%axis equal; set(gca,'Xtick',[]); set(gca,'Ytick',[]);
end
%print('-dpng','graphs/demo_Gaussian01.png');
%pause;
%close all;
function demo_Gaussian02
% Conditional probability with a multivariate normal distribution.
%
% Writing code takes time. Polishing it and making it available to others takes longer!
% If some parts of the code were useful for your research of for a better understanding
% of the algorithms, please reward the authors by citing the related publications,
% and consider making your own research available in this way.
%
% @article{Calinon16JIST,
% author="Calinon, S.",
% title="A Tutorial on Task-Parameterized Movement Learning and Retrieval",
% journal="Intelligent Service Robotics",
% publisher="Springer Berlin Heidelberg",
% doi="10.1007/s11370-015-0187-9",
% year="2016",
% volume="9",
% number="1",
% pages="1--29"
% }
%
% Copyright (c) 2015 Idiap Research Institute, http://idiap.ch/
% Written by Sylvain Calinon, http://calinon.ch/
%
% This file is part of PbDlib, http://www.idiap.ch/software/pbdlib/
%
% PbDlib is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License version 3 as
% published by the Free Software Foundation.
%
% PbDlib is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with PbDlib. If not, see <http://www.gnu.org/licenses/>.
addpath('./m_fcts/');
%% Parameters
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
model.nbVar = 2; %Number of variables [x1,x2]
%% Load data
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
load('data/faithful.mat');
Data = faithful';
Data(1,:) = Data(1,:)*1E1;
%% Gaussian conditioning
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
model.Mu = mean(Data,2);
model.Sigma = cov(Data');
in=1; out=2;
DataIn = 50;
%See for example section "2.3.1 Conditional Gaussian distributions" in Bishop's book,
%or Conditional distribution section on the multivariate normal distribution wikipedia page
MuOut = model.Mu(out) + model.Sigma(out,in)/model.Sigma(in,in) * (DataIn-model.Mu(in));
SigmaOut = model.Sigma(out,out) - model.Sigma(out,in)/model.Sigma(in,in) * model.Sigma(in,out);
slope = model.Sigma(out,in)/model.Sigma(in,in);
%% Plot
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
mrg = [10 10];
limAxes = [min(Data(1,:))-mrg(1) max(Data(1,:))+mrg(1) min(Data(2,:))-mrg(2) max(Data(2,:))+mrg(2)];
figure('PaperPosition',[0 0 4 3],'position',[10,50,1600,1200]); hold on; %axis off;
plot(DataIn+[-50,50], MuOut+slope*[-50,50], ':','linewidth',1,'color',[.7 .3 .3]);
plot([model.Mu(1) model.Mu(1)], [limAxes(3) model.Mu(2)], ':','linewidth',1,'color',[.7 .3 .3]);
plot([limAxes(1) model.Mu(1)], [model.Mu(2) model.Mu(2)], ':','linewidth',1,'color',[.7 .3 .3]);
%Plot joint distribution
plotGMM(model.Mu, model.Sigma, [.8 0 0], .4);
%Plot marginal distribution horizontally
plotGaussian1D(model.Mu(1), model.Sigma(1,1), [limAxes(1) limAxes(3) limAxes(2)-limAxes(1) 10], [.8 0 0], .4, 'h');
%Plot marginal distribution vertically
plotGaussian1D(model.Mu(2), model.Sigma(2,2), [limAxes(1) limAxes(3) 10 limAxes(4)-limAxes(3)], [.8 0 0], .4, 'v');
%Plot conditional distribution vertically
plotGaussian1D(MuOut, SigmaOut, [limAxes(1) limAxes(3) 10 limAxes(4)-limAxes(3)], [0 0 .8], .4, 'v');
plot(DataIn,MuOut,'.','markersize',12,'color',[.7 .3 .3]);
plot(DataIn,limAxes(3),'.','markersize',26,'color',[0 0 .8]);
plot([DataIn DataIn], [limAxes(3) MuOut], ':','linewidth',1,'color',[.7 .3 .3]);
plot([limAxes(1) DataIn], [MuOut MuOut], ':','linewidth',1,'color',[.7 .3 .3]);
%plot(Data(1,1:200), Data(2,1:200), 'o','markersize',4,'color',[.5 .5 .5]);
axis(limAxes);
set(gca,'Xtick',[]); set(gca,'Ytick',[]);
%axis equal;
xlabel('$x_1$','fontsize',16,'interpreter','latex');
ylabel('$x_2$','fontsize',16,'interpreter','latex');
%print('-dpng','-r600','graphs/demo_Gaussian02.png');
%pause;
%close all;
function demo_Gaussian03
% Gaussian conditioning with uncertain inputs
%
% Writing code takes time. Polishing it and making it available to others takes longer!
% If some parts of the code were useful for your research of for a better understanding
% of the algorithms, please reward the authors by citing the related publications,
% and consider making your own research available in this way.
%
% @article{Calinon16JIST,
% author="Calinon, S.",
% title="A Tutorial on Task-Parameterized Movement Learning and Retrieval",
% journal="Intelligent Service Robotics",
% publisher="Springer Berlin Heidelberg",
% doi="10.1007/s11370-015-0187-9",
% year="2016",
% volume="9",
% number="1",
% pages="1--29"
% }
%
% Copyright (c) 2017 Idiap Research Institute, http://idiap.ch/
% Written by Sylvain Calinon, http://calinon.ch/
%
% This file is part of PbDlib, http://www.idiap.ch/software/pbdlib/
%
% PbDlib is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License version 3 as
% published by the Free Software Foundation.
%
% PbDlib is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with PbDlib. If not, see <http://www.gnu.org/licenses/>.
addpath('./m_fcts/');
%% Parameters
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
model.nbVar = 2; %Number of variables [x1,x2]
nbSamples = 10000; %Number of stochastic samples
%% Load data
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
load('data/faithful.mat');
Data = faithful';
Data(1,:) = Data(1,:)*1E1;
%% Gaussian conditioning with uncertain inputs
%% (see for example section "2.3.1 Conditional Gaussian distributions" in Bishop's book,
%% or the "Conditional distribution" section on the multivariate normal distribution wikipedia page)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
model.Mu = mean(Data,2);
model.Sigma = cov(Data');
in=1; out=2;
DataIn = 50;
SigmaIn = 1E1;
MuOut = model.Mu(out) + model.Sigma(out,in) / (model.Sigma(in,in) + SigmaIn) * (DataIn - model.Mu(in));
SigmaOut = model.Sigma(out,out) - model.Sigma(out,in) / (model.Sigma(in,in) + SigmaIn) * model.Sigma(in,out);
slope = model.Sigma(out,in) / (model.Sigma(in,in) + SigmaIn);
%% Gaussian conditioning generated by a Gaussian product followed by stochastically sampled data on input and output
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Compute product of Gaussians between N(xIn,SigmaIn) and N(Mu(in),Sigma(in,in)) and store it in N(xP,SigmaP)
SigmaTmp = inv(SigmaIn) + inv(model.Sigma(in,in));
MuTmp = SigmaIn \ DataIn + model.Sigma(in,in) \ model.Mu(in);
SigmaP = inv(SigmaTmp);
xP = SigmaP * MuTmp;
%Stochastic sampling on the input
[V,D] = eig(SigmaP);
xIn = repmat(xP,1,nbSamples) + V*D^.5* randn(1,nbSamples);
%Regression
xMuOut = repmat(model.Mu(out),1,nbSamples) + model.Sigma(out,in) / model.Sigma(in,in) * (xIn - repmat(model.Mu(in),1,nbSamples));
xSigmaOut = model.Sigma(out,out) - model.Sigma(out,in) / model.Sigma(in,in) * model.Sigma(in,out);
%Stochastic sampling on the output
[V,D] = eig(xSigmaOut);
xOut = xMuOut + V * D^.5 * randn(1,nbSamples);
%Gaussian fit on the stochastic samples
MuOut2 = mean(xOut,2);
SigmaOut2 = cov(xOut');
%% Plot
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
mrg = [10 10];
limAxes = [min(Data(1,:))-mrg(1) max(Data(1,:))+mrg(1) min(Data(2,:))-mrg(2) max(Data(2,:))+mrg(2)];
figure('PaperPosition',[0 0 4 3],'position',[10,50,1600,1200]); hold on; %axis off;
plot(DataIn+[-50,50], MuOut+slope*[-50,50], ':','linewidth',1,'color',[.7 .3 .3]);
plot([model.Mu(1) model.Mu(1)], [limAxes(3) model.Mu(2)], ':','linewidth',1,'color',[.7 .3 .3]);
plot([limAxes(1) model.Mu(1)], [model.Mu(2) model.Mu(2)], ':','linewidth',1,'color',[.7 .3 .3]);
%Plot joint distribution
plotGMM(model.Mu, model.Sigma, [.8 0 0], .4);
%Plot marginal distribution horizontally
plotGaussian1D(model.Mu(1), model.Sigma(1,1), [limAxes(1) limAxes(3) limAxes(2)-limAxes(1) 10], [.8 0 0], .4, 'h');
%Plot marginal distribution vertically
plotGaussian1D(model.Mu(2), model.Sigma(2,2), [limAxes(1) limAxes(3) 10 limAxes(4)-limAxes(3)], [.8 0 0], .4, 'v');
%Plot input distribution horizontally
plotGaussian1D(DataIn, SigmaIn, [limAxes(1) limAxes(3) limAxes(2)-limAxes(1) 10], [0 0 .8], .4, 'h');
%Plot conditional distribution vertically
plotGaussian1D(MuOut, SigmaOut, [limAxes(1) limAxes(3) 10 limAxes(4)-limAxes(3)], [0 0 .8], .4, 'v');
%Plot estimated output distribution from stochastic sampling
plotGaussian1D(MuOut2, SigmaOut2, [limAxes(1) limAxes(3) 10 limAxes(4)-limAxes(3)], [0 .8 0], .4, 'v');
plot(DataIn,MuOut,'.','markersize',12,'color',[.7 .3 .3]);
plot(DataIn,limAxes(3),'.','markersize',12,'color',[.7 .3 .3]);
plot([DataIn DataIn], [limAxes(3) MuOut], ':','linewidth',1,'color',[.7 .3 .3]);
plot([limAxes(1) DataIn], [MuOut MuOut], ':','linewidth',1,'color',[.7 .3 .3]);
axis(limAxes);
set(gca,'Xtick',[]); set(gca,'Ytick',[]);
%axis equal;
%plot(Data(1,1:200), Data(2,1:200), 'o','markersize',4,'color',[.5 .5 .5]);
xlabel('$x_1$','fontsize',16,'interpreter','latex');
ylabel('$x_2$','fontsize',16,'interpreter','latex');
%print('-dpng','-r600','graphs/demo_Gaussian03.png');
%pause;
%close all;
function plotGaussian1D(Mu, Sigma, boundbox, color, valAlpha, orient_option)
% Plot Gaussian profile horizontally or vertically
%
% Writing code takes time. Polishing it and making it available to others takes longer!
% If some parts of the code were useful for your research of for a better understanding
% of the algorithms, please reward the authors by citing the related publications,
% and consider making your own research available in this way.
%
% @article{Calinon16JIST,
% author="Calinon, S.",
% title="A Tutorial on Task-Parameterized Movement Learning and Retrieval",
% journal="Intelligent Service Robotics",
% publisher="Springer Berlin Heidelberg",
% doi="10.1007/s11370-015-0187-9",
% year="2016",
% volume="9",
% number="1",
% pages="1--29"
% }
%
% Copyright (c) 2017 Idiap Research Institute, http://idiap.ch/
% Written by Sylvain Calinon, http://calinon.ch/
%
% This file is part of PbDlib, http://www.idiap.ch/software/pbdlib/
%
% PbDlib is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License version 3 as
% published by the Free Software Foundation.
%
% PbDlib is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with PbDlib. If not, see <http://www.gnu.org/licenses/>.
if nargin<3
boundbox = [-1,1,0,1];
end
if nargin<4
color = [0,0,0];
end
if nargin<5
valAlpha = .4;
end
if nargin<6
orient_option = 'h';
end
nbData = 100;
darkcolor = color * 0.5;
if orient_option=='h'
Xin(1,:) = linspace(boundbox(1), boundbox(1)+boundbox(3), nbData);
X = [Xin; gaussPDF(Xin, Mu(1), Sigma(1,1))'];
Xout = sum(reshape(X(2,:),nbData,1),2)';
Xout = Xout / max(Xout);
P = [Xin; boundbox(2)+boundbox(4)*Xout];
patch([P(1,1) P(1,:) P(1,end)], [boundbox(2) P(2,:) boundbox(2)], ...
color, 'lineWidth', 1, 'EdgeColor', darkcolor, 'facealpha', valAlpha,'edgealpha', valAlpha);
plot([Mu(1) Mu(1)], [boundbox(2) max(P(2,:))], '-','linewidth',1,'color',min(darkcolor+0.3,1));
else
Xin(1,:) = linspace(boundbox(2), boundbox(2)+boundbox(4), nbData);
X = [gaussPDF(Xin, Mu(1), Sigma(1,1))'; Xin];
Xout = sum(reshape(X(1,:),nbData,1),2)';
Xout = Xout / max(Xout);
P = [boundbox(1)+boundbox(3)*Xout; Xin];
patch([boundbox(1) P(1,:) boundbox(1)], [P(2,1) P(2,:) P(2,end)], ...
color, 'lineWidth', 1, 'EdgeColor', darkcolor, 'facealpha', valAlpha,'edgealpha', valAlpha);
plot([boundbox(1) max(P(1,:))], [Mu(1) Mu(1)], '-','linewidth',1,'color',min(darkcolor+0.3,1));
end
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment