import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import ifft, idct, dct
%matplotlib inline
# np.random.seed(0)
x = np.arange(0.0, 3.0, 0.01)
# compute the value (amplitude) of the sin wave at the for each sample
t_697 = np.sin(2*np.pi*697 * (x))
t_1209 = np.sin(2*np.pi*1209 * (x))
sum_t = t_697 + t_1209
sum_t.shape
plt.figure(1)
plt.subplot(311)
plt.plot(x,t_697)
plt.subplot(312)
plt.plot(x,t_1209)
plt.subplot(313)
plt.plot(x,sum_t)
from scipy.fftpack import ifft, idct, dct
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.dct.html
# N-1
# y[k] = [scaling]* 2* sum x[n]*cos(pi*k*(2n+1)/(2*N)), 0 <= k < N.
# n=0
f_i = dct(sum_t, type=2, norm='ortho') #this does the same transform as neil's
plt.plot(x,f_i)
f_i_inverse = idct(f_i, type=2, norm='ortho') #this does the same transform as neil's
diff_t = sum_t - f_i_inverse
plt.plot(x,diff_t)
np.random.seed(0)
M = 100
sum_t_sample = np.zeros(len(sum_t))
sum_t_sample[sum_t_sample==0]=np.nan
sum_t_sample_index = np.random.choice(range(len(sum_t)), M, replace = False)
sum_t_sample[sum_t_sample_index] = sum_t[sum_t_sample_index]
plt.plot(x,sum_t_sample,linestyle="",marker="o", color = 'blue')
plt.plot(x,sum_t, color = 'blue')
sum_t_sample.shape
sum_t_sample_new = sum_t_sample[np.isfinite(sum_t_sample)]
plt.plot(range(M), sum_t_sample_new, color = 'orange')
plt.plot(range(M), sum_t_sample_new, color = 'orange', marker="o")
f_i_dash = dct(sum_t_sample_new, type=2, norm='ortho') #this does the same transform as neil's
plt.plot(range(M),sum_t_sample_new, color = 'orange')
plt.plot(range(M),idct(f_i_dash, type=2, norm='ortho'), marker="o", linestyle="", color = 'blue')
np.random.seed(0)
alpha = 0.0005 #learning rate
iterations = 10000
f_i_optim = np.ones(M) #random starting guess
cost_list = []
for i in range(0, iterations):
hypothesis = idct(f_i_optim, type=2, norm='ortho')
# loss = 2*(hypothesis - sum_t_sample_new)
cost = np.sum((hypothesis - sum_t_sample_new) ** 2)
if i % 100 == 0:
# print("Iteration %d | Cost: %f" % (i, cost))
cost_list.append(cost)
gradient = idct(2*(hypothesis - sum_t_sample_new), type=2, norm='ortho') # from derivation
f_i_optim = f_i_optim - alpha * gradient
# break
plt.plot(range(M),f_i_dash)
plt.plot(range(M),f_i_optim)
# print(f_i_optim, f_i_dash, (f_i_dash - f_i_optim)/f_i_dash*100 )
plt.figure(1)
plt.subplot(121)
plt.plot(range(M),f_i_dash, color = 'orange')
plt.plot(range(M),f_i_optim, linestyle="", marker="o", color = 'blue')
plt.subplot(122)
plt.plot(cost_list)
print('min cost:', min(cost_list))
np.random.seed(0)
alpha = 0.0005 #learning rate
iterations = 10000
f_i_optim = np.ones(M) #random starting guess
cost_list = []
for i in range(0, iterations):
hypothesis = idct(f_i_optim, type=2, norm='ortho')
# loss = 2*(hypothesis - sum_t_sample_new)
cost = np.sum((hypothesis - sum_t_sample_new) ** 2)
if i % 100 == 0:
# print("Iteration %d | Cost: %f" % (i, cost))
cost_list.append(cost)
gradient = idct(2*(hypothesis - sum_t_sample_new), type=2, norm='ortho') + 2*f_i_optim
f_i_optim = f_i_optim - alpha * gradient
# break
plt.plot(range(M),f_i_dash)
plt.plot(range(M),f_i_optim)
# print(f_i_optim, f_i_dash, (f_i_dash - f_i_optim)/f_i_dash*100 )
plt.figure(1)
plt.subplot(121)
plt.plot(range(M),f_i_dash, color = 'orange')
plt.plot(range(M),f_i_optim, linestyle="", marker="o", color = 'blue')
plt.subplot(122)
plt.plot(cost_list)
print('min cost:', min(cost_list))
np.random.seed(0)
alpha = 0.0005 #learning rate
iterations = 10000
f_i_optim = np.ones(M) #random starting guess
cost_list = []
for i in range(0, iterations):
hypothesis = idct(f_i_optim, type=2, norm='ortho')
# loss = 2*(hypothesis - sum_t_sample_new)
cost = np.sum((hypothesis - sum_t_sample_new) ** 2)
if i % 100 == 0:
# print("Iteration %d | Cost: %f" % (i, cost))
cost_list.append(cost)
gradient = idct(2*(hypothesis - sum_t_sample_new), type=2, norm='ortho') + abs(f_i_optim)/f_i_optim
f_i_optim = f_i_optim - alpha * gradient
# break
plt.plot(range(M),f_i_dash)
plt.plot(range(M),f_i_optim)
# print(f_i_optim, f_i_dash, (f_i_dash - f_i_optim)/f_i_dash*100 )
plt.figure(1)
plt.subplot(121)
plt.plot(range(M),f_i_dash, color = 'orange')
plt.plot(range(M),f_i_optim, linestyle="", marker="o", color = 'blue')
plt.subplot(122)
plt.plot(cost_list)
print('min cost:', min(cost_list))
from scipy.fftpack import ifft, idct, dct
np.random.seed(0)
alpha = 0.0005 #learning rate
iterations = 10000
x = np.arange(0.0, 3.0, 0.0001)
# compute the value (amplitude) of the sin wave at the for each sample
t_697 = np.sin(2*np.pi*697 * (x))
t_1209 = np.sin(2*np.pi*1209 * (x))
sum_t = t_697 + t_1209
cost_list_orig_min = []
list_M = np.arange(0.1, 5, 0.1)*1000
for M in list_M:
M = int(M)
# print(M)
cost_list = []
sum_t_sample = np.zeros(len(sum_t))
sum_t_sample[sum_t_sample==0]=np.nan
sum_t_sample_index = np.random.choice(range(len(sum_t)), M, replace = False)
sum_t_sample[sum_t_sample_index] = sum_t[sum_t_sample_index]
sum_t_sample_new = sum_t_sample[np.isfinite(sum_t_sample)]
f_i_dash = dct(sum_t_sample_new, type=2, norm='ortho') #this does the same transform as neil's
# sum_t_sample.shape
f_i_optim = np.ones(M)
# print(f_i_optim, M)
for i in range(0, iterations):
hypothesis = idct(f_i_optim, type=2, norm='ortho')
# print(hypothesis.shape, sum_t_sample_new.shape)
cost_orig = np.sum((hypothesis - sum_t_sample_new) ** 2)
if i % 100 == 0:
# print("Iteration %d | Cost: %f" % (i, cost_orig))
cost_list.append(cost_orig)
gradient = idct(2*(hypothesis - sum_t_sample_new), type=2, norm='ortho')
f_i_optim = f_i_optim - alpha * gradient
# break
if M%500 == 0: print('M:', M, 'min cost:', np.mean(cost_list[-5:]) )
cost_list_orig_min.append(np.mean(cost_list[-5:]))
# break
plt.plot(list_M, cost_list_orig_min)
from scipy.fftpack import ifft, idct, dct
np.random.seed(0)
alpha = 0.0005 #learning rate
iterations = 10000
x = np.arange(0.0, 3.0, 0.0001)
# compute the value (amplitude) of the sin wave at the for each sample
t_697 = np.sin(2*np.pi*697 * (x))
t_1209 = np.sin(2*np.pi*1209 * (x))
sum_t = t_697 + t_1209
cost_list_L1_min = []
list_M = np.arange(0.1, 5, 0.1)*1000
for M in list_M:
M = int(M)
# print(M)
cost_list = []
sum_t_sample = np.zeros(len(sum_t))
sum_t_sample[sum_t_sample==0]=np.nan
sum_t_sample_index = np.random.choice(range(len(sum_t)), M, replace = False)
sum_t_sample[sum_t_sample_index] = sum_t[sum_t_sample_index]
sum_t_sample_new = sum_t_sample[np.isfinite(sum_t_sample)]
f_i_dash = dct(sum_t_sample_new, type=2, norm='ortho') #this does the same transform as neil's
# sum_t_sample.shape
f_i_optim = np.ones(M)
# print(f_i_optim, M)
for i in range(0, iterations):
hypothesis = idct(f_i_optim, type=2, norm='ortho')
# print(hypothesis.shape, sum_t_sample_new.shape)
cost = np.sum((hypothesis - sum_t_sample_new) ** 2 + abs(hypothesis) )
if i % 100 == 0:
# print("Iteration %d | Cost: %f" % (i, cost_orig))
cost_list.append(cost)
gradient = idct(2*(hypothesis - sum_t_sample_new), type=2, norm='ortho') + abs(f_i_optim)/f_i_optim
f_i_optim = f_i_optim - alpha * gradient
# break
if M%500 == 0: print('M:', M, 'min cost:', np.mean(cost_list[-5:]) )
cost_list_L1_min.append(np.mean(cost_list[-5:]))
# break
plt.plot(list_M, cost_list_L1_min)
from scipy.fftpack import ifft, idct, dct
np.random.seed(0)
alpha = 0.0005 #learning rate
iterations = 10000
x = np.arange(0.0, 3.0, 0.0001)
# compute the value (amplitude) of the sin wave at the for each sample
t_697 = np.sin(2*np.pi*697 * (x))
t_1209 = np.sin(2*np.pi*1209 * (x))
sum_t = t_697 + t_1209
cost_list_L2_min = []
list_M = np.arange(0.1, 5, 0.1)*1000
for M in list_M:
M = int(M)
# print(M)
cost_list = []
sum_t_sample = np.zeros(len(sum_t))
sum_t_sample[sum_t_sample==0]=np.nan
sum_t_sample_index = np.random.choice(range(len(sum_t)), M, replace = False)
sum_t_sample[sum_t_sample_index] = sum_t[sum_t_sample_index]
sum_t_sample_new = sum_t_sample[np.isfinite(sum_t_sample)]
f_i_dash = dct(sum_t_sample_new, type=2, norm='ortho') #this does the same transform as neil's
# sum_t_sample.shape
f_i_optim = np.ones(M)
# print(f_i_optim, M)
for i in range(0, iterations):
hypothesis = idct(f_i_optim, type=2, norm='ortho')
# print(hypothesis.shape, sum_t_sample_new.shape)
cost = np.sum((hypothesis - sum_t_sample_new) ** 2 + hypothesis**2 )
if i % 100 == 0:
# print("Iteration %d | Cost: %f" % (i, cost_orig))
cost_list.append(cost)
gradient = idct(2*(hypothesis - sum_t_sample_new), type=2, norm='ortho') + 2*f_i_optim
f_i_optim = f_i_optim - alpha * gradient
# break
if M%500 == 0: print('M:', M, 'min cost:', np.mean(cost_list[-5:]) )
cost_list_L2_min.append(np.mean(cost_list[-5:]))
# break
plt.plot(list_M, cost_list_L2_min)
plt.plot(list_M, cost_list_orig_min, color = 'blue')
plt.plot(list_M, cost_list_L1_min, color = 'orange')
plt.plot(list_M, cost_list_L2_min, color = 'red')