You are on page 1of 8

2) GOLDEN SEARCH

% % % % ------------------------------------------------------------------METODO: Golden Search Golden Line Search -------------------------------------------------------------------

function [s,Xs] = LSGolden(Fnome, X, d, g) tol = 1e-2; GR = (sqrt(5)+1)/2; tl = (sqrt(5)-1)/2; SV = 0.0005; s = SV; Xs = X + s*d; fXs = feval(Fnome,Xs); f0 = feval(Fnome,X); while fXs>f0 SV = SV/1000; s = SV; Xs = X + s*d; fXs = feval(Fnome,Xs); end fXso = fXs; i=0; while (fXso>=fXs) i = i + 1; fXso = fXs; so = s; s = so+SV*GR^i; Xs = X + s*d; fXs = feval(Fnome,Xs); sp(i) = s; end if i su = si = else su = si = end > 2 sp(i); sp(i-2); sp(i); so; % Tolerancia % Golden Ratio % Factor related to the golden ratio % Initial Step Value

% f(0)

% Alpha; % X(s) % f(Xs)

I = su - si; cont = 0; while (I>=tol) sb = su - (1 - tl)*I; sa = su - tl*I; Xsb = X + sb*d; Xsa = X + sa*d; fsa = feval(Fnome, Xsa); fsb = feval(Fnome, Xsb); if fsa>fsb

si elseif su else si su end I = su cont = end

= sa; fsb>fsa = sb; = sa; = sb; - si; cont + 1;

s = (su + si)/2; Xs = X + s*d; fXs = feval(Fnome,Xs); g0 = g;

% Alpha % X(alpha) % f(alpha) % f'(0)

% DENTRO DA condio (9.4)livro do ARORA % gfd = g0'*d; % if gfd > 0 % error('no descent direction') % end

INTERPOLAO QUADRADA
% % % % ------------------------------------------------------------------METODO: Golden Search + Quadratic Golden + Quadratic Line Search ------------------------------------------------------------------= LSQuadra(Fnome, X, d, g) % % % % Tolerancia Golden Ratio Factor related to the golden ratio Initial Step Value

function [s,Xs]

tol = 1e-1; GR = (sqrt(5)+1)/2; tl = (sqrt(5)-1)/2; SV = 0.05; s = SV; Xs = X + s*d; fXs = feval(Fnome,Xs); f0 = feval(Fnome,X); while fXs>f0 SV = SV/100; s = SV; Xs = X + s*d; fXs = feval(Fnome,Xs); end fXso = fXs; i=0; while (fXso>=fXs) i = i + 1; fXso = fXs; so = s; s = so+SV*GR^i; Xs = X + s*d; fXs = feval(Fnome,Xs); sp(i) = s; end if i>2 s1 = sp(i-2); su = sp(i); si = sp(i-1); else s1 = so; su = sp(i); si = (s1+su)/2; end I=100; h=0; while I>tol X1 = X + s1*d; fX1 = feval (Fnome,X1); Xi = X + si*d; fXi = feval (Fnome,Xi); Xu = X + su*d; fXu = feval (Fnome,Xu);

% f(0)

% Alpha; % X(s) % f(Xs)

a2 = (1/(su-si))*((fXu-fX1)/(su-s1)-(fXi-fX1)/(si-s1)); a1 = ((fXi-fX1)/(si-s1))-a2*(s1+si); a0 = fX1 - a1*s1-a2*s1^2; sbarra = -(a1/(2*a2)); Xp = X + sbarra*d; fXp = feval (Fnome,Xp); if fXp >= fXi if sbarra > si su = sbarra; s0 = si; elseif sbarra < si s1 = sbarra; s0 = si; end elseif fXp < fXi if sbarra > si s1 = sbarra; s0 = sbarra; elseif sbarra < si su = si; si = sbarra; s0 = sbarra; end end h=h+1; I = abs(fXp) - abs(fXi); end s = s0; Xs = X + s*d; fXs = feval(Fnome,Xs); % Alpha % X(alpha) % f(alpha)

Questes: 3 e 4.
%------------------------------------------------------------------% [X,f,g] = Newton (Fnome, Gnome, Hnome, X0, Type) % X = Optimal solution % f = Optimal value of objective function % g = Optimal value of gradient of objective function % Fnome = name of objective function f = Fnome(X0) % Gnome = name of gradient of objective function g = Gnome(X0) % Hnome = name of Hessian of objective function h = Hnome(X0) % X0 = Initial point % Type = Type of algorithm: 0=Gradient; 1=Newton; 2=Quasi Newton % ------------------------------------------------------------------function [X,f1,g1] = Newton (Fnome, Gnome, Hnome, X0, Type) % Maximum iteration number NIter = 100; % Gradient of cost function at X0 g0 = feval(Gnome, X0); % Convergence test at X0 Epsilon = 1.e-6 * norm(g0); if (norm(g0) < Epsilon) return; end % Inicialization X = X0; dim = size(X,1); H = eye(dim,dim); g = feval(Gnome, X); count = 0;

% Iterations for i = 1:NIter % Compute descent direction d = -(H\g); Beta = 1/norm(H); theta = norm(inv(H))*norm(H); mi = 1.2; cont=0; while g'*d > -theta*norm(g)*norm(d) & mi <= 100 Bk = H + mi*eye(dim,dim); [Lb,Db] = ldl(Bk); z = Lb\(-g); d = (Db*Lb')\z; mi = max(2*mi,10); cont=cont+1; end Ep = 0.1; if mi > 100 & g'*d > -theta*norm(g)*norm(d) H = H + Ep*eye(dim,dim); d = -(H\g);

3) Item B

3) Item C

end if norm(d) < Beta*norm(g) d = Beta*(norm(g)/norm(d))*d; end

3) Item A

% Line Search [s,X1] = LSGolden (Fnome, X, d, g); % [s,X1] = LSQuadra (Fnome, X, d, g); % [s,X1] = LSArmijo (Fnome, X, d, g); %[alphaI,xI,gI] = LSPolynomial(func, gfunc, x0, d) % Convergence verification f1= feval(Fnome, X1); g1 = feval(Gnome, X1); norma = (norm(g1) / norm(g0)); if (norma < Epsilon) disp('Convergence OK!'); return end % Plotting and printing dx = [X(1) X1(1)]; dy = [X(2) X1(2)]; plot(dx,dy,'-ro'); disp(sprintf('\n Iteration number %d', i)); disp(sprintf(' f = %g Step = %d', f1, s)); disp(sprintf(' X = ')); disp(sprintf(' %d ',X1)); disp(sprintf(' g/g0 = ')); disp(sprintf(' %d ',norma));

% Variables updating switch(Type) case 1, % Newton H = feval(Hnome, X1); case 2, % BFGS y = g1 - g; D = (y*y')/dot(y,(s*d)); E = (g*g')/dot(g,d); H = H + D + E; case 3, % BFGS_2 end X = X1; g = g1; end error('ERROR: NO CONVERGENCE')

4) Quase Newton

4)GRADIENTE CONJUGADO
%------------------------------------------------------------------% [X,f,g] = Newton (Fnome, Gnome, Hnome, X0, Type) % X = Optimal solution % f = Optimal value of objective function % g = Optimal value of gradient of objective function % Fnome = name of objective function f = Fnome(X0) % Gnome = name of gradient of objective function g = Gnome(X0) % Hnome = name of Hessian of objective function h = Hnome(X0) % X0 = Initial point % Type = Type of algorithm: 0=Gradient; 1=Newton; 2=Quasi Newton % ------------------------------------------------------------------function [X,f1,g1] = ConjGrad (Fnome, Gnome, Hnome, X0, Type) % Maximum iteration number NIter = 100; % Gradient of cost function at X0 g0 = feval(Gnome, X0); % Convergence test at X0 Epsilon = 1.e-6 * norm(g0); if (norm(g0) < Epsilon) return; end % Inicialization X = X0; dim = size(X,1); H = eye(dim,dim); g = feval(Gnome, X); count = 0; d0 = -g0; % Iterations for i = 1:NIter % Compute descent direction Beta = (norm(g)/norm(g0))^2; d = -g + Beta*d0;

% Line Search % [s,X1] = LSGolden (Fnome, X, d, g); % [s,X1] = LSQuadra (Fnome, X, d, g); % [s,X1] = LSArmijo (Fnome, X, d, g); %[alphaI,xI,gI] = LSPolynomial(func, gfunc, x0, d) % Convergence verification f1= feval(Fnome, X1); g1 = feval(Gnome, X1); norma = (norm(g1) / norm(g0)); if (norm(g1) < Epsilon) disp('Convergence OK!'); return end % Plotting and printing dx = [X(1) X1(1)];

dy = [X(2) X1(2)]; plot(dx,dy,'-ro'); disp(sprintf('\n Iteration number %d', i)); disp(sprintf(' f = %g Step = %d', f1, s)); disp(sprintf(' X = ')); disp(sprintf(' %d ',X1)); disp(sprintf(' g/g0 = ')); disp(sprintf(' %d ',norma));

% Variables updating switch(Type) case 1, % Newton H = feval(Hnome, X1); d0 = d; g0 = g; case 2, % BFGS y = g1 - g; D = (y*y')/dot(y,(s*d)); E = (g*g')/dot(g,d); H = H + D + E; d0 = d; g0 = g; case 3, % BFGS_2 end X = X1; g = g1; end error('ERROR: NO CONVERGENCE')

You might also like