File:SG RLS LMS chan inv.png

Page contents not supported in other languages.
This is a file from the Wikimedia Commons
From Wikipedia, the free encyclopedia

SG_RLS_LMS_chan_inv.png(561 × 420 pixels, file size: 15 KB, MIME type: image/png)

Summary

Description
English: Developed according to TU Ilmenau teaching materials.

MatLab/Octave source code:

clear all; close all; clc 

%% Initialization

% channel parameters
sigmaS = 1; %signal power
sigmaN = 0.01; %noise power
% CSI (channel state information):
channel = [0.722-1j*0.779; -0.257-1j*0.722; -0.789-1j*1.862]; 

M = 5; % filter order

% step sizes
mu_LMS = [0.01,0.07];
mu_SG = [0.01,0.07];

NS = 1000; %number of symbols
NEnsembles = 1000; %number of ensembles

%% Compute Rxx and p

%the maximum index of channel taps (l=0,1...L):
L = length(channel) - 1;  
H = convmtx(channel, M-L); %channel matrix (Toeplitz structure)
Rnn = sigmaN*eye(M); %the noise covariance matrix

%the received signal covariance matrix:
Rxx = sigmaS*(H*H')+sigmaN*eye(M);
%the cross-correlation vector 
%between the tap-input vector and the desired response: 
p = sigmaS*H(:,1); 

% An inline function to calculate MSE(w) for a weight vector w
calc_MSE = @(w) real(w'*Rxx*w - w'*p - p'*w + sigmaS);

%% Adaptive Equalization
N_test = 2;
MSE_LMS = zeros(NEnsembles, NS, N_test);
MSE_SG = zeros(NEnsembles, NS, N_test);
MSE_RLS = zeros(NEnsembles, NS, N_test);

for nEnsemble = 1:NEnsembles
	%initial symbols:
	symbols = sigmaS*sign(randn(1,NS));
	%received noisy symbols:
	X = H*hankel(symbols(1:M-L),[symbols(M-L:end),zeros(1,M-L-1)]) + ...
		sqrt(sigmaN)*(randn(M,NS)+1j*randn(M,NS))/sqrt(2); 
	for n_mu = 1:N_test
		w_LMS = zeros(M,1);
		w_SG = zeros(M,1);
		p_SG = zeros(M,1);
		R_SG = zeros(M);
		for n = 1:NS
			%% LMS - Least Mean Square
			e = symbols(n) - w_LMS'*X(:,n);
			w_LMS = w_LMS + mu_LMS(n_mu)*X(:,n)*conj(e);
			MSE_LMS(nEnsemble,n,n_mu)= calc_MSE(w_LMS);
			
			%% SG - Stochastic gradient
			R_SG = 1/n*((n-1)*R_SG + X(:,n)*X(:,n)');
			p_SG = 1/n*((n-1)*p_SG + X(:,n)*conj(symbols(n)));
			w_SG = w_SG + mu_SG(n_mu)*(p_SG - R_SG*w_SG);
			MSE_SG(nEnsemble,n,n_mu)= calc_MSE(w_SG);
		end
	end
	
	%RLS - Recursive Least Squares
	lambda_RLS = [0.8; 1]; %forgetting factors
	for n_lambda=1:length(lambda_RLS)
		%Initialize the weight vectors for RLS
		delta = 1; 
		w_RLS = zeros(M,1);
		P = eye(M)/delta; % (n-1)-th iteration, where n = 1,2...
		PI = zeros(M,1); % n-th iteration
		K = zeros(M,1);
		for n=1:NS
			% the recursive process of RLS
			PI = P*X(:,n);
			K = PI/(lambda_RLS(n_lambda)+X(:,n)'*PI);
			ee = symbols(n) - w_RLS'*X(:,n);
			w_RLS = w_RLS + K*conj(ee);
			MSE_RLS(nEnsemble,n,n_lambda)= calc_MSE(w_RLS);
			P = P/lambda_RLS(n_lambda) - K/lambda_RLS(n_lambda)*X(:,n)'*P;
		end
	end
end

MSE_LMS_1 = mean(MSE_LMS(:,:,1));
MSE_LMS_2 = mean(MSE_LMS(:,:,2));
MSE_SG_1 = mean(MSE_SG(:,:,1));
MSE_SG_2 = mean(MSE_SG(:,:,2));
MSE_RLS_1 = mean(MSE_RLS(:,:,1));
MSE_RLS_2 = mean(MSE_RLS(:,:,2));

n = 1:NS;
m = [1 3 6 10 30 60 100 300 600 1000];

figure(1)
loglog(m, MSE_LMS_1(m),'x','linewidth',2, 'color','blue');
hold all;
loglog(m, MSE_LMS_2(m),'o','linewidth',2, 'color','blue');
loglog(m, MSE_SG_1(m),'x','linewidth',2, 'color','red');
loglog(m, MSE_SG_2(m),'o','linewidth',2, 'color','red');
loglog(m, MSE_RLS_1(m),'x','linewidth',2, 'color','green');
loglog(m, MSE_RLS_2(m),'o','linewidth',2, 'color','green');

wopt = Rxx\p;
MSEopt = calc_MSE(wopt);

loglog(n, MSE_LMS_1(n),'linewidth',2, 'color','blue');
loglog(n, MSE_LMS_2(n),'linewidth',2, 'color','blue');
loglog(n, MSE_SG_1(n),'linewidth',2, 'color','red');
loglog(n, MSE_SG_2(n),'linewidth',2, 'color','red');
loglog(n, MSE_RLS_1(n),'linewidth',2, 'color','green');
loglog(n, MSE_RLS_2(n),'linewidth',2, 'color','green');

loglog(n, MSEopt*ones(size(n)), 'color','black','linewidth',2);
grid on
xlabel('Ns');
ylabel('Mean-Squares Error');
title('LMS, SG, RLS')
legend(['LMS, \mu=' num2str(mu_LMS(1))],['LMS, \mu=' num2str(mu_LMS(2))],...
['SG, \mu=' num2str(mu_SG(1))],['SG, \mu=' num2str(mu_SG(2))],...
['RLS, \lambda=' num2str(lambda_RLS(1))],['RLS, \lambda=' num2str(lambda_RLS(2))],...
'Wiener solution')
Date
Source Own work
Author Kirlf

Licensing

I, the copyright holder of this work, hereby publish it under the following license:
w:en:Creative Commons
attribution share alike
This file is licensed under the Creative Commons Attribution-Share Alike 4.0 International license.
You are free:
  • to share – to copy, distribute and transmit the work
  • to remix – to adapt the work
Under the following conditions:
  • attribution – You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use.
  • share alike – If you remix, transform, or build upon the material, you must distribute your contributions under the same or compatible license as the original.

Captions

The mean square error perofrmance of Least mean squares filter, Stochastic gradient descent and Recursive least squares filter in dependance of training symbols.

Items portrayed in this file

depicts

2 March 2019

image/png

File history

Click on a date/time to view the file as it appeared at that time.

Date/TimeThumbnailDimensionsUserComment
current18:42, 15 July 2019Thumbnail for version as of 18:42, 15 July 2019561 × 420 (15 KB)KirlfNoise power was wrong in signal modeling.
15:00, 2 March 2019Thumbnail for version as of 15:00, 2 March 2019561 × 420 (15 KB)KirlfUser created page with UploadWizard
The following pages on the English Wikipedia use this file (pages on other projects are not listed):

Metadata