當(dāng)前位置:首頁(yè) > IT技術(shù) > 系統(tǒng)服務(wù) > 正文

【UWB】ELM,Extreme Learning Machine
2021-10-20 10:40:41


例子 1

【UWB】ELM,Extreme Learning Machine_python

% 任務(wù)2: ELM 網(wǎng)絡(luò)
% Date: 2021-10-15
% Author: Zhao-Jichao
clear
clc

%% 訓(xùn)練
% 導(dǎo)入數(shù)據(jù)集
data = [760,4550,4550,6300]; % 輸入數(shù)據(jù)
label = [500, 500, 880]; % 示教數(shù)據(jù)輸出

[N,n] = size(data); % 返回輸入數(shù)據(jù)的維度

L = 7; % 隱層節(jié)點(diǎn)個(gè)數(shù)
m = 3; % 要分的類(lèi)別數(shù)

% 初始化權(quán)重和偏置矩陣
W = rand(n,L)*2-1; % rand 隨機(jī)生成范圍是 (0,1),*2-1運(yùn)算后為 (-1,1)
b_1 = rand(1,L); % 隨機(jī)生成 b 偏置矩陣
ind = ones(N,1); b = b_1(ind,:); % 將生成的 b 矩陣擴(kuò)充成 N*L 維度的矩陣,為了計(jì)算
H = G(data*W+b); % 得到 H

beta = pinv(H)*label; % 求解出來(lái)輸出權(quán)重 beta 的最小二乘解

output = H * beta; % 計(jì)算實(shí)際輸出

%% 驗(yàn)證
validataData = [4580 6000 1290 3960];
G(validataData*W+b) * beta

%% 激活函數(shù)的定義
function out = G(in)
out = 1./(1 + exp(-in));
% out = x;
end



例子 2

%% I. 清空環(huán)境變量
clear
clc

%% II. 訓(xùn)練集/測(cè)試集產(chǎn)生
% 1. 導(dǎo)入數(shù)據(jù)
load spectra_data.mat

% 2. 隨機(jī)產(chǎn)生訓(xùn)練集和測(cè)試集
temp = randperm(size(NIR,1)); % randperm 整數(shù)的隨機(jī)排列

% 訓(xùn)練集――50個(gè)樣本
P_train = NIR(temp(1:50),:)'; % 401x50
T_train = octane(temp(1:50),:)'; % 1x50

% 測(cè)試集――10個(gè)樣本
P_test = NIR(temp(51:end),:)';
T_test = octane(temp(51:end),:)';

% 有了隨機(jī),效果更好

%% III. 數(shù)據(jù)歸一化
% 1. 訓(xùn)練集
[Pn_train,inputps] = mapminmax(P_train);
Pn_test = mapminmax('apply',P_test,inputps);

% 2. 測(cè)試集
[Tn_train,outputps] = mapminmax(T_train);
Tn_test = mapminmax('apply',T_test,outputps);

%% IV. ELM創(chuàng)建/訓(xùn)練
[IW,B,LW,TF,TYPE] = elmtrain(Pn_train,Tn_train,300,'sig',0);

%% V. ELM仿真測(cè)試
tn_sim = elmpredict(Pn_test,IW,B,LW,TF,TYPE);
% 1. 反歸一化
T_sim = mapminmax('reverse',tn_sim,outputps);

%% VI. 結(jié)果對(duì)比
result = [T_test' T_sim'];

% 1. 均方誤差
E = mse(T_sim - T_test);

% 2. 決定系數(shù)
N = length(T_test);
R2=(N*sum(T_sim.*T_test)-sum(T_sim)*sum(T_test))^2/((N*sum((T_sim).^2)-(sum(T_sim))^2)*(N*sum((T_test).^2)-(sum(T_test))^2));

%% VII. 繪圖
figure(1)
plot(1:N,T_test,'r-*',1:N,T_sim,'b:o')
grid on
legend('真實(shí)值','預(yù)測(cè)值')
xlabel('樣本編號(hào)')
ylabel('辛烷值')
string = {'測(cè)試集辛烷值含量預(yù)測(cè)結(jié)果對(duì)比(ELM)';['(mse = ' num2str(E) ' R^2 = ' num2str(R2) ')']};
title(string)


%%
function [IW,B,LW,TF,TYPE] = elmtrain(P,T,N,TF,TYPE)
% ELMTRAIN Create and Train a Extreme Learning Machine
% Syntax
% [IW,B,LW,TF,TYPE] = elmtrain(P,T,N,TF,TYPE)
% Description
% Input
% P - Input Matrix of Training Set (R*Q)
% T - Output Matrix of Training Set (S*Q)
% N - Number of Hidden Neurons (default = Q)
% TF - Transfer Function:
% 'sig' for Sigmoidal function (default)
% 'sin' for Sine function
% 'hardlim' for Hardlim function
% TYPE - Regression (0,default) or Classification (1)
% Output
% IW - Input Weight Matrix (N*R)
% B - Bias Matrix (N*1)
% LW - Layer Weight Matrix (N*S)
% Example
% Regression:
% [IW,B,LW,TF,TYPE] = elmtrain(P,T,20,'sig',0)
% Y = elmtrain(P,IW,B,LW,TF,TYPE)
% Classification:
% [IW,B,LW,TF,TYPE] = elmtrain(P,T,20,'sig',1)
% Y = elmtrain(P,IW,B,LW,TF,TYPE)
% See also ELMPREDICT
% Yu Lei,11-7-2010
% Copyright www.matlabsky.com
% $Revision:1.0 $
if nargin < 2
error('ELM:Arguments','Not enough input arguments.');
end
if nargin < 3
N = size(P,2);
end
if nargin < 4
TF = 'sig';
end
if nargin < 5
TYPE = 0;
end
if size(P,2) ~= size(T,2)
error('ELM:Arguments','The columns of P and T must be same.');
end
[R,Q] = size(P);
if TYPE == 1
T = ind2vec(T);
end
[S,Q] = size(T);
% Randomly Generate the Input Weight Matrix
IW = rand(N,R) * 2 - 1;
% Randomly Generate the Bias Matrix
B = rand(N,1);
BiasMatrix = repmat(B,1,Q);
% Calculate the Layer Output Matrix H
tempH = IW * P + BiasMatrix;
switch TF
case 'sig'
H = 1 ./ (1 + exp(-tempH));
case 'sin'
H = sin(tempH);
case 'hardlim'
H = hardlim(tempH);
end
% Calculate the Output Weight Matrix
LW = pinv(H') * T';
end



本文摘自 :https://blog.51cto.com/u

開(kāi)通會(huì)員,享受整站包年服務(wù)立即開(kāi)通 >