# Module NumericalMethodsAndAnalysis

# Module *NumericalMethodsAndAnalysis*#

The following cell is the content of file `NumericalMethodsAndAnalysis.jl`

,
used to define the module `NumericalMethodsAndAnalysis`

This file just exists so that module’s definition can be displayed in the eponomous Jupyter Book.

```
module NumericalMethodsAndAnalysis
export newton, secant, rowReduce, print_matrix, simple_sum
```

```
function newton(f, Df, x0, errorTolerance; maxIterations=100, demoMode=false)
"""Basic usage is:
(rootApproximation, errorEstimate, iterations) = newton(f, Df, x0, errorTolerance)
There is an optional input parameter "demoMode" which controls whether to
- println intermediate results (for "study" purposes), or to
- work silently (for "production" use).
The default is silence.
"""
if demoMode
println("Solving by Newton's Method.")
println("maxIterations=",maxIterations)
println("errorTolerance=",errorTolerance)
end
x = x0
global errorEstimate # make it global to this function; without this it would be local to the for loop.
for iteration in 1:maxIterations
fx = f(x)
Dfx = Df(x)
# Note: a careful, robust code would check for the possibility of division by zero here,
# but for now I just want a simple presentation of the basic mathematical idea.
dx = fx/Dfx
x -= dx # Aside: this is shorthand for "x = x - dx"
errorEstimate = abs(dx);
if demoMode
println("At iteration ",iteration," x = ",x," with estimated error ",errorEstimate," and backward error ",abs(f(x)))
end
if errorEstimate <= errorTolerance
if demoMode
println("Done!")
end
return (x, errorEstimate, iteration)
end
end
# Note: if we get to here (no "return" above), it completed maxIterations iterations without satisfying the accuracy target,
# but we still return the information that we have.
return (x, errorEstimate, maxIterations)
end;
```

```
function secant(f, a, b; errorTolerance=1e-15, maxIterations=15, demoMode=False)
# Solve f(x)=0 in the interval [a, b] by the Secant Method.
if demoMode
print("Solving by the Secant Method.")
end;
# Some more descriptive names
x_older = a
x_more_recent = b
f_x_older = f(x_older)
f_x_more_recent = f(x_more_recent)
for iteration in 1:maxIterations
global x_new, errorEstimate
if demoMode
println("\nIteration $(iteration):")
end;
x_new = (x_older * f_x_more_recent - f_x_older * x_more_recent)/(f_x_more_recent - f_x_older)
f_x_new = f(x_new)
(x_older, x_more_recent) = (x_more_recent, x_new)
(f_x_older, f_x_more_recent) = (f_x_more_recent, f_x_new)
errorEstimate = abs(x_older - x_more_recent)
if demoMode
println("The latest pair of approximations are $(x_older) and $(x_more_recent),")
println("where the function's values are $(f_x_older) and $(f_x_more_recent) respectively.")
println("The new approximation is $(x_new) with estimated error $(errorEstimate), backward error $(abs(f_x_new))")
end;
if errorEstimate < errorTolerance
break
end;
end;
# Whether we got here due to accuracy of running out of iterations,
# return the information we have, including an error estimate:
return [x_new errorEstimate]
end; # function
```

```
function rowReduce(A, b)
# To avoid modifying the matrix and vector specified as input,
# they are copied to new arrays, with the function copy().
# Warning: it does not work to say "U = A" and "c = b";
# this makes these names synonyms, referring to the same stored data.
U = copy(A) # not "U=A", which makes U and A synonyms
c = copy(b)
n = length(b)
L = zeros(n, n)
for k in 1:n-1
for i in k+1:n
# compute all the L values for column k:
L[i,k] = U[i,k] / U[k,k] # Beware the case where U[k,k] is 0
for j in k+1:n
U[i,j] -= L[i,k] * U[k,j]
end
# Put in the zeros below the main diagonal in column k of U;
# this is not important for calculations, since those elements of U are not used in backward substitution,
# but it helps for displaying results and for checking the results via residuals.
U[i,k] = 0.
c[i] -= L[i,k] * c[k]
end
end
for i in 2:n
for j in 1:i-1
U[i,j] = 0.
end
end
return (U, c)
end;
```

```
function rowReduceOffset(A, b)
# To avoid modifying the matrix and vector specified as input,
# they are copied to new arrays, with the function copy().
# Warning: it does not work to say "U = A" and "c = b";
# this makes these names synonyms, referring to the same stored data.
U = copy(A) # not "U=A", which makes U and A synonyms
c = copy(b)
n = length(b)
L = zeros(n, n)
for k in 1:n-1
for i in k+1:n
# compute all the L values for column k:
L[i,k] = U[i,k] / U[k,k] # Beware the case where U[k,k] is 0
for j in k+1:n
U[i,j] -= L[i,k] * U[k,j]
end
# Put in the zeros below the main diagonal in column k of U;
# this is not important for calculations, since those elements of U are not used in backward substitution,
# but it helps for displaying results and for checking the results via residuals.
U[i,k] = 0.
c[i] -= L[i,k] * c[k]
end
end
for i in 2:n
for j in 1:i-1
U[i,j] = 0.
end
end
return (U, c)
end;
```

```
function backwardSubstitution(U, c; demoMode=false)
n = length(c)
x = zeros(n)
x[n] = c[n]/U[n,n]
if demoMode
println("x_$(n) = $(x[n])")
end
for i in n-1:-1:1
if demoMode
println("i=$(i)")
end
x[i] = ( c[i] - sum(U[i,i+1:n] .* x[i+1:n]) ) / U[i,i]
if demoMode
print("x_$(i) = $(x[i])")
end
end
return x
end;
```

```
function forwardSubstitution(L, b)
# Solve L c = b for c.
n = length(b)
c = zeros(n)
c[1] = b[1]
for i in 2:n
c[i] = b[i] - sum(L[i:i,1:i] * c[1:i])
end;
return c
end;
```

Some “helpers” for presentation in notebooks:

```
function print_matrix(A)
# A helper function to "pretty print" matrices.
(rows, cols) = size(A)
println("[")
for row in 1:rows
for col in 1:cols
print(A[row,col], " ")
end
println()
end
println("]")
end;
```

And finally, some simple ones, for demonstrating the use of modules:

```
function simple_sum(a, b)
return a+b
end;
function simple_diff(a, b)
# Not exported, so must be acesessed by its fully qualified name "NumericalMethodsAndAnalysis.simple_diff", or imported by name.
return a-b
end;
theAnswer = 42 # Also not exported.
end
```