Jasmine Roberts - MIT Fab Lab
            %% (15.1) (a) Rosenbrock function
            f = @(x1,x2) 100*(x2 - x1.^2).^2 + (1 - x1).^2;
            X1 = -2:0.1:2;
            X2 = -2:0.1:2;
            [X1 X2] = meshgrid(X1,X2);
            RR = f(X1,X2);

            get(0,'ScreenSize');
            figure('Units','normalized','Position',[0 0 .3 1])
            h1 = subplot(211);
            surf(X1,X2,RR,'EdgeColor',[.1 .1 .1])
            title('Rosenbrock function')
            h2 = subplot(212);
            contourf(X1,X2,RR,30,'EdgeColor',[.1 .1 .1])
            colorbar %('location','westoutside')
            set([h1 h2], 'LooseInset',[0 0 0 0])
            colormap(flipud(colormap))

            %% (15.1)-(b) the downhill simplex method

            clc

            fn = @(X) 100*(X(2) - X(1).^2).^2 + (1 - X(1)).^2 ; %   fn        name of the vector function
            V=[-1 -1;-0.9 -0.8;-0.8 -0.9];%   V         starting simplex
            min1=1; %   min1      minimum number of iterations
            max1=10000; %   max1      maximum number of iterations
            epsilon=0.001;%   epsilon   convergence tolerance

            %   V0        vertex V0 for the minimum
            %   y0        function value  Fn(V0)
            %   dV        size of the final simplex
            %   dy        error bound for the minimum
            %   P         matrix containing the vertices in the iterations
            %   Q         array containing iterations for  F(P)


            [mm n] = size(V);
            for j =1:n+1,
              Z = V(j,1:n);
              Y(j) = feval(fn,Z);
            end
            [mm lo] = min(Y);            % Order the vertices:
            [mm hi] = max(Y);
            li = hi;
            ho = lo;
            for j = 1:n+1,
              if (j~=lo & j~=hi & Y(j)<=Y(li)), li=j; end
              if (j~=hi & j~=lo & Y(j)>=Y(ho)), ho=j; end
            end                          % End of Order.
            cnt = 0;
            while (Y(hi)>Y(lo)+epsilon & cnt=Y(ho)), ho=j; end
              end                        % End of Order.
              cnt = cnt+1;
              P(cnt,:) = V(lo,:);
              Q(cnt) = Y(lo);
            end                          % End of the main while loop.
            snorm = 0;                   % Determine the size of the simplex:
            for j = 1:n+1,
              s = norm(V(j)-V(lo));
              if (s>=snorm), snorm = s; end
            end
            figure (1)
            hold on ;
            contourf(X1,X2,RR,30,'EdgeColor',[.1 .1 .1])
            plot(P(:,1),P(:,2),'r');
            title('Rosenbrock function')

            Q0=Q';
            V0 = V(lo,1:n);
            y0 = Y(lo);
            dV = snorm;
            dy = abs(Y(hi)-Y(lo));


            figure (2)
            imshow(imread('DOWNHILL_SIMPLEX_computing_time.png'));
            title('DOWNHILL SIMPLEX computing time');

            %% (15.1)-(c) Direct Set Methode 

            global x s

            itmax = 2;% itmax = maximum number of iterations
            N = 2;% N = number of design variables

            % initial guess for vector of design variables
            %
            x0 = [-1 -1]';
            x = x0;
            %
            % initial set of search directions grouped into a
            % matrix (each column is a search direction). For 
            % this  method this matrix is the identity matrix
            H = eye(N);
            n = N;

            % main iteration loop (n minimizations plus minimization
            % along new conjugate direction)

            for iter = 1:1:itmax
            %
            % perform n unidimensional minimizations
            %
               s_new = zeros(n,1);
               
               for i=1:1:n
                  s = H(:,i);
                  alpha_star = fmin(fn, -100, 100);
                  F = fn(alpha_star);
                  x = x +alpha_star*s;
                  
                  s_new = s_new + alpha_star*s;
               end
            %
            % generate new search direction, and minimize along it
            %
               s = s_new;
               alpha_star = fmin(fn, -100, 100);
               F = fn(alpha_star);
               x = x +alpha_star*s;
            %
            % substitute one of the initial directions with the new one
            %
               for ic=1:1:n-1
                  H(:,ic) = H(:,ic+1);
               end
               H(:,n) = s_new;
            end

            %% (15.1)-(d) Conjugate Gradients method

            f=@(x,y) 100*(y-x^2)^2+(1-x)^2 ;
            x0=-1;
            y0=-1;
            tol=10^-4; % our tolerance
            x(1)=x0;y(1)=y0;%matlab starts vectors at index 1
            %f is a function of x and y, k:number of steps limited to 200
            gradf=[diff(f,sym('x'));diff(f,sym('y'))]; %the gradient of f
            %p(1)=-subs(subs(f,'x',x0),'y',y0) %by this we mean p0=-gradf(x0)
            p(:,1)=-subs(subs(gradf,'x',x0),'y',y0); %by this we mean p0=-gradf(x0)
            %:,1 inside p means the first col in all the rows in the matrix
            k=1;%matlab starts counting at 1
            finalX = x(1) ; %initialize the vector
            finalY = y(1) ;
            finalZ = subs(subs(f,'x',x(1)),'y',y(1));
            while norm(subs(subs(gradf,'x',x(k)),'y',y(k)))>tol,k < 500;
                rho=0.5;c=0.1;
                alp=armijo(f,rho,c,x(k),y(k),gradf,p(:,k)); % alp is updated using the other function
                 x(k+1)=x(k)+alp*p(1,k);
                %updating x(k);p(1,k)means row 1 &col k
                 y(k+1)=y(k)+alp*p(2,k);
                %updating y(k);p(2,k)means row 2 &col k
                bet(k+1)=subs(subs(gradf,'x',x(k+1)),'y',y(k+1))'*subs(subs(gradf,'x',x(k+1)),'y',y(k+1))/((subs(subs(gradf,'x',x(k)),'y',y(k))'*subs(subs(gradf,'x',x(k)),'y',y(k))));
                 p(:,k+1)=-subs(subs(gradf,'x',x(k+1)),'y',y(k+1))+bet(k+1)*p(:,k);
                %p(:,k+1)takes existing matrix and adds a col
                 finalX = [finalX; x(k+1)];
                %each calculated value is appended into the
                 finalY = [finalY; y(k+1)];
                 finalZ= [finalZ; subs(subs(f,'x',x(k+1)),'y',y(k+1))];
                 k=k+1;
            end

            fmin=subs(subs(f,'x',x(k)),'y',y(k));
            xmin=x(k);
            ymin=y(k);
            finalX = [finalX; xmin]; %each calculated value is append into the
            finalY = [finalY; ymin];
            finalZ = [finalZ; fmin];%vertical descent down along the Z direction

            plot3(finalX,finalY,finalZ);%plot of the iterarates x,y and z i.e the path to the solution
            hold on
             %plot(finalX,finalY);this was for the 2D projection that we are not
             %using now
             [X,Y]= meshgrid(x(k)-5:1:x(k)+5,y(k)-5:1:y(k)+5);%
             % Z=subs(subs(f,'x','X.'),'y','Y.');
             IJ=size(X);%getting the matrix dimension for the mesh grid
             Z=zeros(size(X));%initializing Z
             for i=1:IJ(1)
             for j=1:IJ(2)
             Z(i,j)=subs(subs(f,'x',X(i,j)),'y',Y(i,j));%evaluating the function on the grid
             end
             end
             mesh(X,Y,Z)%plotting the surface
             title('Subrats Pics'),xlabel('x'),ylabel('y')
             
            figure 
            imshow(imread('conjugategradient.png'));
            title('Conjugate Gradient Methode');

            %% (15.1)-(e)

            %% Alpha=0.1
            clc;
            clear;
            close all;

            % Start CONFIGURATION 
            L=100 ; %Spins number
            x=randn(1,L);
            J = (x-mean(x))./std(x);% creating the centred  Gaussian distributed variable 

            Spin=[-1 1];
            SpinConfig0=zeros(1,L);
            for i=1:L
            SpinConfig0(i)=Spin(randi([1 2]));
            end
            SpinConfig0(L)=SpinConfig0(1);
            E0=f(SpinConfig0,J);
            Emin_Bound=-sum(abs(J));
            alpha=0.1;

            % Simulated Annealing
            x_start=SpinConfig0;
            % Number of cycles
            n = 50;
            % Number of trials per cycle
            m = 50;
            % Number of accepted solutions
            na = 0.0;
            % Probability of accepting worse solution at the start
            p1 = 0.7;
            % Probability of accepting worse solution at the end
            p50 = 0.001;
            % Initial temperature
            beta0 = 0;
            % Final temperature
            beta50 = 50*alpha;
            % Fractional reduction every cycle
            frac = (beta0/beta50)^(1.0/(n-1.0));
            % Initialize x
            x = zeros(n+1,L);
            x(1,:) = x_start;
            xi = x_start;
            % Current best results so far
            xc = x(1,:);
            fc = f(xi,J);
            fs = zeros(n+1,1);
            fs(1,:) = fc;
            % Current coeff
            beta = beta0;
            for i=1:n
                disp(['Cycle: ',num2str(i),' with boltzman coeff: ',num2str(beta)])
                for j=1:m
                    % Generate new trial Spin configuration
                    Spin=[-1 1];
                    SpinConfigi=zeros(1,L);
                    for ii=1:L
                        SpinConfigi(ii)=Spin(randi([1 2]));
                    end
                    SpinConfigi(L)=SpinConfigi(1);
                    xi=SpinConfigi;
                    DeltaE = abs(f(xi,J)-fc); 
                    if (f(xi,J)>fc )
                        % objective function is worse
                        % generate probability of acceptance
                        p = exp(-DeltaE*beta);
                        % determine whether to accept worse point
                        if (rand() Emin_Bound
                        % Energy is lower, automatically accept
                        accept = true;
                    else 
                        continue
                    end
                    if (accept==true)
                        % update currently accepted solution
                        xc(1:end)=xi(1:end);
                        fc = f(xc,J);
                        % increment number of accepted solutions
                    end
                end
                % Record the best x values at the end of every cycle
                for l=1:L
                    x(i+1,l) = xc(l);
                end
                fs(i+1) = fc;
                % Lower the temperature for next cycle
                beta=alpha*i;
            end
            % print solution
            disp(['Best solution: ',num2str(xc)])
            disp(['Best objective: ',num2str(fc)])


            figure(2);
            plot(fs,'r.-')
            legend('Objective')

            %% alpha=0.01
            clc;
            clear;
            close all;

            % Start CONFIGURATION 
            L=100 ; %Spins number
            x=randn(1,L);
            J = (x-mean(x))./std(x);% creating the centred  Gaussian distributed variable 

            Spin=[-1 1];
            SpinConfig0=zeros(1,L);
            for i=1:L
            SpinConfig0(i)=Spin(randi([1 2]));
            end
            SpinConfig0(L)=SpinConfig0(1);
            E0=f(SpinConfig0,J);
            Emin_Bound=-sum(abs(J));
            alpha=0.01;

            % Simulated Annealing
            x_start=SpinConfig0;
            % Number of cycles
            n = 50;
            % Number of trials per cycle
            m = 50;
            % Number of accepted solutions
            na = 0.0;
            % Probability of accepting worse solution at the start
            p1 = 0.7;
            % Probability of accepting worse solution at the end
            p50 = 0.001;
            % Initial temperature
            beta0 = 0;
            % Final temperature
            beta50 = 50*alpha;
            % Fractional reduction every cycle
            frac = (beta0/beta50)^(1.0/(n-1.0));
            % Initialize x
            x = zeros(n+1,L);
            x(1,:) = x_start;
            xi = x_start;
            % Current best results so far
            xc = x(1,:);
            fc = f(xi,J);
            fs = zeros(n+1,1);
            fs(1,:) = fc;
            % Current coeff
            beta = beta0;
            for i=1:n
                disp(['Cycle: ',num2str(i),' with boltzman coeff: ',num2str(beta)])
                for j=1:m
                    % Generate new trial Spin configuration
                    Spin=[-1 1];
                    SpinConfigi=zeros(1,L);
                    for ii=1:L
                        SpinConfigi(ii)=Spin(randi([1 2]));
                    end
                    SpinConfigi(L)=SpinConfigi(1);
                    xi=SpinConfigi;
                    DeltaE = abs(f(xi,J)-fc); 
                    if (f(xi,J)>fc )
                        % objective function is worse
                        % generate probability of acceptance
                        p = exp(-DeltaE*beta);
                        % determine whether to accept worse point
                        if (rand() Emin_Bound
                        % Energy is lower, automatically accept
                        accept = true;
                    else 
                        continue
                    end
                    if (accept==true)
                        % update currently accepted solution
                        xc(1:end)=xi(1:end);
                        fc = f(xc,J);
                        % increment number of accepted solutions
                    end
                end
                % Record the best x values at the end of every cycle
                for l=1:L
                    x(i+1,l) = xc(l);
                end
                fs(i+1) = fc;
                % Lower the temperature for next cycle
                beta=alpha*i;
            end
            % print solution
            disp(['Best solution: ',num2str(xc)])
            disp(['Best objective: ',num2str(fc)])


            figure(2);
            plot(fs,'r.-')
            legend('Objective')

            %% Alpha=0.001
            clc;
            clear;
            close all;

            % Start CONFIGURATION 
            L=100 ; %Spins number
            x=randn(1,L);
            J = (x-mean(x))./std(x);% creating the centred  Gaussian distributed variable 

            Spin=[-1 1];
            SpinConfig0=zeros(1,L);
            for i=1:L
            SpinConfig0(i)=Spin(randi([1 2]));
            end
            SpinConfig0(L)=SpinConfig0(1);
            E0=f(SpinConfig0,J);
            Emin_Bound=-sum(abs(J));
            alpha=0.1;

            % Simulated Annealing
            x_start=SpinConfig0;
            % Number of cycles
            n = 50;
            % Number of trials per cycle
            m = 50;
            % Number of accepted solutions
            na = 0.0;
            % Probability of accepting worse solution at the start
            p1 = 0.7;
            % Probability of accepting worse solution at the end
            p50 = 0.001;
            % Initial temperature
            beta0 = 0;
            % Final temperature
            beta50 = 50*alpha;
            % Fractional reduction every cycle
            frac = (beta0/beta50)^(1.0/(n-1.0));
            % Initialize x
            x = zeros(n+1,L);
            x(1,:) = x_start;
            xi = x_start;
            % Current best results so far
            xc = x(1,:);
            fc = f(xi,J);
            fs = zeros(n+1,1);
            fs(1,:) = fc;
            % Current coeff
            beta = beta0;
            for i=1:n
                disp(['Cycle: ',num2str(i),' with boltzman coeff: ',num2str(beta)])
                for j=1:m
                    % Generate new trial Spin configuration
                    Spin=[-1 1];
                    SpinConfigi=zeros(1,L);
                    for ii=1:L
                        SpinConfigi(ii)=Spin(randi([1 2]));
                    end
                    SpinConfigi(L)=SpinConfigi(1);
                    xi=SpinConfigi;
                    DeltaE = abs(f(xi,J)-fc); 
                    if (f(xi,J)>fc )
                        % objective function is worse
                        % generate probability of acceptance
                        p = exp(-DeltaE*beta);
                        % determine whether to accept worse point
                        if (rand() Emin_Bound
                        % Energy is lower, automatically accept
                        accept = true;
                    else 
                        continue
                    end
                    if (accept==true)
                        % update currently accepted solution
                        xc(1:end)=xi(1:end);
                        fc = f(xc,J);
                        % increment number of accepted solutions
                    end
                end
                % Record the best x values at the end of every cycle
                for l=1:L
                    x(i+1,l) = xc(l);
                end
                fs(i+1) = fc;
                % Lower the temperature for next cycle
                beta=alpha*i;
            end
            % print solution
            disp(['Best solution: ',num2str(xc)])
            disp(['Best objective: ',num2str(fc)])


            figure(2);
            plot(fs,'r.-')
            legend('Objective')