// ==================================================================== // This file is part of the Endmember Induction Algorithms Toolbox for SCILAB // Copyright (C) Grupo de Inteligencia Computacional, Universidad del // País Vasco (UPV/EHU), Spain, released under the terms of the GNU // General Public License. // // Endmember Induction Algorithms Toolbox is free software: you can redistribute // it and/or modify it under the terms of the GNU General Public License // as published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // Endmember Induction Algorithms Toolbox is distributed in the hope that it will // be useful, but WITHOUT ANY WARRANTY; without even the implied warranty // of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Endmember Induction Algorithms Toolbox. // If not, see . // ==================================================================== function [E,C] = EIA_EIHA(data,alpha) //// [E,C] = EIA_EIHA(data,alpha) // // Manuel Grana // Miguel Angel Veganzones // Grupo de Inteligencia Computacional (GIC), Universidad del Pais Vasco / // Euskal Herriko Unibertsitatea (UPV/EHU) // http://www.ehu.es/computationalintelligence // // Copyright (2011) Grupo de Inteligencia Computacional @ Universidad del Pais Vasco, Spain. // Copyright (2007) GRNPS group @ University of Extremadura, Spain. // // N-FINDR endmembers induction algorithm. // ------------------------------------------------------------------------------ // Input: data : column data matrix [nvariables x nsamples] // alpha : perturbation tolerance. Default = 2 // // Output: E : set of induced endmembers [nvariables x p] // C : induced endmembers indexes vector [nsamples] with {0,1} values, where '1' indicates that the corresponding sample has been identified as an endmember. // // Bibliographical references: // [1] M. Grana, I. Villaverde, J. O. Maldonado, y C. Hernandez, «Two lattice computing approaches for the unsupervised segmentation of hyperspectral images», Neurocomput., vol. 72, nº. 10-12, págs. 2111-2120, 2009. //// Parameters if alpha < 0 alpha = 2; end //// data size [nvariables,nsamples] = size(data); //// data normalization data_Z = zeros(nvariables,nsamples); mean_data = mean(data,'c'); std_data = stdev(data,'c'); for i=1:nsamples data_Z(:,i) = (data(:,i) - mean_data) ./ std_data; end //// Initialization extrems = []; signs = []; idx = floor(rand()*nsamples) + 1; p = 1; f = data(:,idx); extrems(:,p) = f; signs(:,p) = sign(f) - mean_data; IDX(p) = idx; //// Algorithm for i=1:nsamples f = data(:,i); if sum(abs(f)) > 0 // Perturbations fplus = f + alpha*std_data; fminus = f - alpha*std_data; // flag for pixel extremeness new_extreme = 1; // Erosive and dilative independence erosive_indep = ones(p,1); dilative_indep = ones(p,1); // Check if f is in the same quadrant than any of the already selected endmembers for e=1:p if signs(:,e) == sign(f - mean_data) new_extreme = 0; if norm(f - mean_data) > norm(extrems(:,e) - mean_data) extrems(:,e) = f; IDX(e) = i; end break end end // If f is in the same quadrant than any of the already selected // endmember (new_extreme == 1) then check perturbations for k=1:p if (f - extrems(:,k))^2 < alpha*std_data new_extreme = 0; break end if (extrems(:,k) > fminus) dilative_indep(k) = 0; elseif (extrems(:,k) < fplus) erosive_indep(k) = 0; end end // check if there is erosive or dilative independence independence = %f; if dilative_indep independence = %t; elseif erosive_indep independence = %t; end // Check if there is new extreme if new_extreme & independence p = p + 1; extrems(:,p) = f; signs(:,p) = sign(f - mean_data); IDX(p) = i; end end end E = zeros(nvariables,p); C = zeros(1,nsamples); for i=1:p E(:,i) = data(:,IDX(i)); C(IDX(i)) = 1; end endfunction