import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set()
sns.set_style("whitegrid")
plt.rcParams['figure.figsize'] = 12, 6
For a simple harmonic oscillator, we know that $y = -\ddot{y} $. So we will run Euler's method to approximate both $y$ and $\dot{y}$ iteratively, with the steps being:
$$ y_t = y_{t-1} + h \dot{y}_{t-1} \\ \dot{y}_t = \dot{y}_{t-1} - h y_{t-1} $$
def euler_method(y0, dy0, step, max_step):
y = y0
dy = dy0
result = [y]
t = step
while t <= max_step:
tmp = y
y = y + step * dy
dy = dy - step * tmp
result.append(y)
t += step
return np.array(result)
# just test that it works
y = euler_method(1, 0, 0.001, 100 * np.pi)
plt.plot(y)
plt.show()
# now for Runge Kutte approximation, similarly to Euler, but we hold 4 ks for each function (y and dy)
def runge_kutta_step(step, y, dy):
k1 = [0, 0]
k2 = [0, 0]
k3 = [0, 0]
k4 = [0, 0]
k1[0] = step * dy
k1[1] = - step * y
k2[0] = step * (dy + k1[1]/2.0)
k2[1] = -step * (y + k1[0]/2.0)
k3[0] = step * (dy + k2[1]/2.0)
k3[1] = -step * (y + k2[0]/2.0)
k4[0] = step * (dy + k3[1])
k4[1] = -step * (y + k3[0])
new_y = y + (k1[0] + 2* k2[0] + 2 * k3[0] + k4[0])/6
new_dy = dy + (k1[1] + 2* k2[1] + 2 * k3[1] + k4[1])/6
return (new_y, new_dy)
def runge_kutta(y0, dy0, step, max_step):
y = y0
dy = dy0
result = [y]
t = step
while t <= max_step:
y, dy = runge_kutta_step(step, y, dy)
result.append(y)
t += step
return np.array(result)
# test that it works
y = runge_kutta(1, 0, 0.001, 100 * np.pi)
plt.plot(y)
plt.show()
To calculate errors statistics, we need the real function values. Since we know that $y_0 = 1$ and $\dot{y}_0 = 0$, it corresponds to the equation
$$ y = cos(t)$$
def ground_truth(step, max_step):
result = []
t = 0
while t <= max_step:
result.append(np.cos(t))
t += step
return np.array(result)
# and test that it works
y = ground_truth(0.001, 100 * np.pi)
plt.plot(y)
plt.show()
# now compute average error and final value errur:
step_sizes = [1, 0.1, 0.01, 0.001, 0.0001]
max_step = 100 * np.pi
euler_avg_error = []
rk_avg_error = []
euler_final_error = []
rk_final_error = []
for step_size in step_sizes:
y_truth = ground_truth(step_size, max_step)
# Euler's
y_euler = euler_method(1, 0, step_size, max_step)
average_error = np.mean(np.abs(y_euler - y_truth))
euler_avg_error.append(average_error)
final_error = np.abs(y_euler[-1] - y_truth[-1])
euler_final_error.append(final_error)
# Runge Kutta
y_rk = runge_kutta(1, 0, step_size, max_step)
average_error = np.mean(np.abs(y_rk - y_truth))
rk_avg_error.append(average_error)
final_error = np.abs(y_rk[-1] - y_truth[-1])
rk_final_error.append(final_error)
plt.plot(np.log(euler_avg_error))
plt.plot(np.log(rk_avg_error))
plt.xlim(-1, len(step_sizes))
plt.xlabel("Step Size")
plt.xticks(np.arange(len(step_sizes)), step_sizes)
plt.ylabel("Log(Average Error)")
plt.title("Euler's vs vs. Runge-Kutte - Average Error per Step Size", y=1.05)
plt.legend(["Euler's", "Runge-Kutte"])
plt.show()
# Raw errors values
euler_avg_error
rk_avg_error
plt.plot(np.log(euler_final_error))
plt.plot(np.log(rk_final_error))
plt.xlim(-1, len(step_sizes))
plt.xlabel("Step Size")
plt.xticks(np.arange(len(step_sizes)), step_sizes)
plt.ylabel("Log(Final Error)")
plt.title("Euler's vs vs. Runge-Kutte - Final Error per Step Size", y=1.05)
plt.legend(["Euler's", "Runge-Kutta"])
plt.show()
euler_final_error
rk_final_error
For an adaptive Runge Kutta, we are going to use the step function above and evaluate the step size at each iteration
# returns the approximation of y, the time for each y, and the avg step size
def adaptive_runge_kutta(y0, dy0, inital_step, max_step, err_upper_thr, err_lower_thr):
y = y0
dy = dy0
result = [y]
xs = [0]
step = inital_step
num_steps = 0
t = 0
while t <= max_step:
y_full, dy_full = runge_kutta_step(step, y, dy)
y_half1, dy_half1 = runge_kutta_step(step/2, y, dy)
y_half2, dy_half2 = runge_kutta_step(step/2, y_half1, dy_half1)
# first check the step size is not too small already
# second, if our two half steps error is too big, time to slow down
d = np.abs(y_full - y_half2)
if step > 1e-6 and d > err_upper_thr:
# move half a step
y = y_half1
dy = dy_half1
t += step/2
# decrease by a non rational number, so we can visit a lot of different step sizes
step /= np.sqrt(2)
print(t, "New step (-):", step)
else:
# move the whole step
y = y_full
dy = dy_full
t += step
if d < err_lower_thr:
# increase by a non rational number
step *= np.sqrt(3)
print(t, "New step (+):", step)
result.append(y)
xs.append(t)
num_steps += 1
return (np.array(result), np.array(xs), t / num_steps)
# sanity check
y, x, avg_step = adaptive_runge_kutta(1, 0, 0.1, 100 * np.pi, 1e-4, 1e-7)
plt.plot(x, y)
plt.show()
print("Average Step:", avg_step)
desired_errors = []
desired_error = 1
initial_step = 0.1
avg_steps = []
while desired_error > 1e-10:
desired_errors.append(desired_error)
y, x, avg_step = adaptive_runge_kutta(1, 0, initial_step, 100 * np.pi, desired_error, desired_error/100)
avg_steps.append(avg_step)
desired_error /= 10
plt.plot(avg_steps)
plt.xlim(-1, len(desired_errors))
plt.xlabel("Desired Error (Upper Threshold)")
plt.xticks(np.arange(len(desired_errors)), ['%.0E' % de for de in desired_errors])
plt.ylabel("Average Step Size")
plt.title("Average Step Sizes for Various Desired Error Thresholds", y=1.05)
plt.show()