# Many functions below require the `norm` function from LinearAlgebra using LinearAlgebra # the Newton method """ newton(F, JF, X0; kwargs...) Find one solution of F(X)=0 for a vector-valued function `F` with Jacobian matrix `JF` using Newton's iteration with initial approximation `X0`. Returns a named tuple (X = X, n = n). Keyword arguments are `tol = 1e-8` and `maxit = 100`. # Examples ```julia-repl julia> newton(x -> x^2 - 2, x -> 2x, 1.4).X 1.4142135623730951 ``` """ function newton(F, JF, X0; tol = 1e-8, maxit = 100) # set two variables which will be used (also) outside the for loop X = X0 n = 1 # first evaluation of F Y = F(X) for outer n = 1:maxit # execute one step of Newton's iteration X = X0 - JF(X0)\Y Y = F(X) # check if the result is within prescribed tolerance if norm(X-X0) + norm(Y) < tol break; end # otherwise repeat X0 = X end # a warning if maxit was reached if n == maxit @warn "no convergence after $maxit iterations" end # let's return a named tuple return (X = X, n = n) end # discretized Newton's method """ dnewton(F, X0; kwargs...) Find one solution of F(X)=0 for a vector-valued function `F` using discretized Newton's iteration with initial approximation `X0`. Returns a named tuple (X = X, n = n). Keyword arguments are `tol = 1e-8` and `maxit = 100`. # Examples ```julia-repl julia> dnewton(x -> x.^2 .- 2, [1.4]).X 1.4142135623732486 ``` """ function dnewton(F, X0; tol = 1e-8, maxit = 100) # size of X0 n = length(X0) # standard basis vectors of R^n e = Matrix{Float64}(I, n, n) # reserve memory for the approximation to JF ajac = zeros(Float64, n, n) # pick a 'step' size h = sqrt(tol); # start (and initialize) the iteration X = X0 k = 1 for outer k = 1:maxit F0 = F(X0) # evaluate the approximation to JF... for j = 1:n ajac[:, j] = (F(X0 + h*e[:, j]) - F0)/h end #... and do one step of Newton's iteration. X = X0 - ajac\F0 # check if the result is within prescribed tolerance if norm(X-X0) < tol break end # otherwise repeat X0 = X end # a warning if maxit was reached if k == maxit @warn "no convergence after $maxit iterations" end # let's return a named tuple return (X = X, n = k) end # the multidimensional secant method """ secant(F, X0; kwargs...) Find one solution of F(X)=0 for a vector-valued function `F` using multidimensional secant method with initial approximations stored in a matrix `X0`. Returns a named tuple (X = X, n = n). Keyword arguments are `tol = 1e-8` and `maxit = 100`. # Examples ```julia-repl julia> secant(x -> x.^2 .- 2, [1.4 1.5]).X 1.4142135642135643 ``` """ function secant(F, X; tol = 1e-8, maxit = 100) # save the size and the number of initial approximations (should be m = n+1) (n, m) = size(X) # prepare the first basis vector of R^(n+1) e1 = [1.0; zeros(Float64, n, 1)] # We prepare the initial Z matrix. #(Later we will only change one column of Z at each step of the iteration.) Z = zeros(Float64, m, m) for j = 1:m Z[:, j] = [1.0; F(X[:, j])] end # start (and initialize) the iteration k = 1 x = X[end] for outer k = 1:maxit # the new approximation x = X*(Z\e1) Fx = F(x) # Note that this is the single evaluation of F (per iteration step). # check if the result is within prescribed tolerance if norm(Fx) < tol break end X[:, mod(k, m) == 0 ? m : mod(k, m)] = x # add a new approximation to X, discard the first one Z[:, mod(k, m) == 0 ? m : mod(k, m)] = [1.0; Fx] # add a new column to Z, discard the first one end # a warning if maxit was reached if k == maxit @warn "no convergence after $maxit iterations" end # let's return a named tuple return (X = x, n = k) end # Broyden's method """ broyden(F, X0; kwargs...) Find one solution of F(X)=0 for a vector-valued function `F` using Broyden's method with initial approximation `X0`. Returns a named tuple (X = X, n = n). Keyword arguments are `tol = 1e-8` and `maxit = 100`. # Examples ```julia-repl julia> broyden(x -> x.^2 .- 2, [1.4]).X 1.414213562373095 ``` """ function broyden(F, X0; tol = 1e-8, maxit = 100) # size of X0 n = length(X0) # standard basis vectors of R^n e = Matrix{Float64}(I, n, n) # default step size h = sqrt(tol) # reserve memory for the approximation to JF J = zeros(Float64, n, n) # evaluate the first approximation to JF (finite differences) FX0 = F(X0) for j = 1:n J[:, j] = (F(X0 + h*e[:, j]) - FX0)/h end # start (and initialize) the iteration X = X0 k = 1 for outer k = 1:maxit d = -J\FX0 X = X0 + d # check if the result is within prescribed tolerance if norm(FX0) < tol break end # Broyden rank 1 update FX = F(X) # Note that this is the single evaluation of F (per iteration step). J = J + (FX - FX0 - J*d)*d'/norm(d)^2; # new X0 and FX0 X0 = X FX0 = FX end # a warning if maxit was reached if k == maxit @warn "no convergence after $maxit iterations" end return (X = X, n = k) end # 4th order Runge-Kutta method function rk4(f, Y0, (t0, tk), h) # set time steps t = t0:h:tk # initial Y value Y = [Y0] for ti in t[1:end-1] k1 = h*f(ti, Y[end]) k2 = h*f(ti+h/2, Y[end]+k1/2) k3 = h*f(ti+h/2, Y[end]+k2/2) k4 = h*f(ti+h, Y[end]+k3) # add the next approximation to Y push!(Y, Y[end] + (k1 + 2*k2 + 2*k3 + k4)/6) end return (t, Y) end