% // ====================================================================
% // This file is part of the Endmember Induction Algorithms Toolbox for MATLAB
% // Copyright (C) Grupo de Inteligencia Computacional, Universidad del
% // País Vasco (UPV/EHU), Spain, released under the terms of the GNU
% // General Public License.
% //
% // Endmember Induction Algorithms Toolbox is free software: you can redistribute
% // it and/or modify it under the terms of the GNU General Public License
% // as published by the Free Software Foundation, either version 3 of the
% // License, or (at your option) any later version.
% //
% // Endmember Induction Algorithms Toolbox is distributed in the hope that it will
% // be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
% // of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
% // General Public License for more details.
% //
% // You should have received a copy of the GNU General Public License
% // along with Endmember Induction Algorithms Toolbox.
% // If not, see .
% // ====================================================================
%
%% [E,C] = EIA_ILSIA(data,alpha)
%
% Manuel Grana
% Miguel Angel Veganzones
% Grupo de Inteligencia Computacional (GIC), Universidad del Pais Vasco /
% Euskal Herriko Unibertsitatea (UPV/EHU)
% http://www.ehu.es/computationalintelligence
%
% Copyright (2011) Grupo de Inteligencia Computacional @ Universidad del Pais Vasco, Spain.
%
% Incremental lattice Source Induction Algorithm (ILSIA) endmembers induction algorithm.
% ------------------------------------------------------------------------------
% Input: data : column data matrix [nvariables x nsamples]
% alpha : Chebyshev-best approximation tolerance threshold (>= 0). Default = 0.
%
% Output: E : set of induced endmembers [nvariables x p]
% C : induced endmembers indexes vector [nsamples] with {0,1} values, where '1' indicates that the corresponding sample has been identified as an endmember.
%
% Bibliographical references:
% [1] M. Graña, D. Chyzhyk, M. García-Sebastián, y C. Hernández, “Lattice independent component analysis for functional magnetic resonance imaging”, Information Sciences, vol. 181, nº. 10, págs. 1910-1928, May. 2011.
function [E,C] = EIA_ILSIA(data,alpha)
%% Parameters
if (nargin == 0)
error('Insufficient parameters');
end
if (nargin < 2 || alpha < 0)
alpha = 0;
end
%% data size
[nvariables,nsamples] = size(data);
%% data normalization
data_Z = zeros(nvariables,nsamples);
mean_data = mean(data,2);
std_data = zeros(nvariables,1);
for i=1:nvariables
std_data(i) = std(data(i,:));
end
for i=1:nsamples
data_Z(:,i) = (data(:,i) - mean_data) ./ std_data;
end
%% Algorithm Initialization
% Initialize endmembers set and index vector
E = [];
C = zeros(1,nsamples);
% Initial LIS
LIS = []; % lattice independent Sources
idx = floor(rand*nsamples) + 1;
p = 1; % number of current endmembers
C(idx) = 1;
f = data_Z(:,idx);
LIS(:,p) = f;
new_LIS = true;
signs(:,p) = sign(f); % data signs
E(:,p) = data(:,idx);
IDX(p) = idx; % indicates wich pixels is identified as an endmember
%% Algorithm
% Run over each sample
for i=1:nsamples
% Check for LAAM recalculation
if (new_LIS)
% Recalculate LAAM
Wxx = EIA_LAM(LIS);
new_LIS = false;
end
% Sample
f = data_Z(:,i);
if sum(abs(f)) > 0
if (alpha <= 0)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A
% Check if the pixel is lattice dependent
y = zeros(nvariables,1);
for j=1:nvariables
y(j) = max(Wxx(j,:)' + f);
end
if (abs(f - y) < 10^(-6))
% find the most similar and check the norms
sum_signs = 0; selected_e = 1;
for e=1:p
sum_signs_e = sum(signs(:,e)==sign(f));
if (sum_signs_e > sum_signs)
sum_signs = sum_signs_e;
selected_e = e;
end
end
if (norm(LIS(:,selected_e)) < norm(f))
% substitute LIS
new_LIS = true;
C(i) = 1;
C(IDX(selected_e)) = 0;
IDX(selected_e) = i;
LIS(:,selected_e) = f;
signs(:,selected_e) = sign(f);
E(:,selected_e) = data(:,i);
end
continue;
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
else
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% B
% Chebyshev-Best approximation
x_sharp = zeros(nvariables,1);
Wxx_conjugate = Wxx*(-1);
for j=1:nvariables
x_sharp(j) = min(Wxx_conjugate(:,j) + f);
mu = max(Wxx(:,j) + x_sharp);
mu = max(mu + f)/2;
end
c1 = zeros(nvariables,1);
for j=1:nvariables
c1(j) = max(Wxx(:,j) + mu + x_sharp);
end
if (EIA_CHEBYSHEV(c1, f) < alpha)
% find the most similar and check the norms
sum_signs = 0; selected_e = 1;
for e=1:p
sum_signs_e = sum(signs(:,e)==sign(f));
if (sum_signs_e > sum_signs)
sum_signs = sum_signs_e;
selected_e = e;
end
end
if (norm(LIS(:,selected_e)) < norm(f))
% substitute LIS
new_LIS = true;
C(i) = 1;
C(IDX(selected_e)) = 0;
IDX(selected_e) = i;
LIS(:,selected_e) = f;
signs(:,selected_e) = sign(f);
E(:,selected_e) = data(:,i);
end
continue;
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% C
% Max-Min dominance
mu1 = 0; mu2 = 0;
for j=1:p+1
s1 = zeros(nvariables,1);
s2 = zeros(nvariables,1);
for k=1:p+1
if j~=k
if (j==(p+1))
vi = f;
else
vi = LIS(:,j);
end
if (k ==(p+1))
vk = f;
else
vk = LIS(:,k);
end
d = vi - vk;
m1 = max(d);
m2 = min(d);
s1 = s1 + (d == m1);
s2 = s2 + (d == m2);
end
end
mu1 = mu1 + (max(s1) == p);
mu2 = mu2 + (max(s2) == p);
end
if (mu1==(p+1) || mu2==(p+1))
% new LIS
new_LIS = true;
p = p + 1;
C(i) = 1;
IDX(p) = i;
LIS(:,p) = f;
signs(:,p) = sign(f);
E(:,p) = data(:,i);
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
end %% if norm(i) > 0
end %% for each sample