???
import numpy as np
import matplotlib.pyplot as plt
from pydrake.all import (
Evaluate, Variable
)
plot DTMF tone
N = 300
pi = np.pi
startTime = 0
stopTime = 0.01
t = np.linspace(startTime,stopTime, num=N)
freq1 = 697 #Hz
freq2 = 1209 #Hz
toneInTime = (np.sin(2*pi*freq1*t)+np.sin(2*pi*freq2*t))
plt.plot(t, toneInTime)
plt.show()
calculate and plot DCT coeffs
def Dij(i,j,N):
if i==0:
return np.sqrt(1/N)
else:
return np.sqrt(2/N)*np.cos((pi*(2*j+1)*i)/(2*N))
DijMatrix = np.zeros((N,N))
for i in range(N):
for j in range(N):
DijMatrix[i][j]=Dij(i,j,N)
f = np.dot(DijMatrix, toneInTime)
plt.plot(f)
plt.show()
plot inverse
DijMatrix = np.zeros((N,N))
for i in range(N):
for j in range(N):
DijMatrix[i][j]=Dij(i,j,N)
DijMatrixT = DijMatrix.transpose()
ti = np.dot(DijMatrixT, f)
plt.plot(t, ti)
plt.show()
randomly sample and plot M points of time series.
M = 100
randomIndex = np.random.randint(0,N,M)
#randomTime = stopTime * np.random.rand(M)
randomTime = t[randomIndex]
randomSampleVals = (np.sin(2*pi*freq1*randomTime)+np.sin(2*pi*freq2*randomTime))
randomSampleVals = randomSampleVals.reshape(M,1)
#print(randomTime)
plt.scatter(randomTime,randomSampleVals, color='red')
plt.plot(t, toneInTime)
plt.show()
start with random guess for DCT coefficients, use gradient descent to minimize error at sample points
initialGuessRand = np.random.rand(N)
initialGuessRand = initialGuessRand.reshape(N,1)
rate = 0.01
DijMatrixM = np.zeros((M,M))
for i in range(M):
for j in range(M):
DijMatrixM[i][j]=Dij(i,j,M)
DijMatrixMT = DijMatrixM.transpose()
#print(DijMatrixMT.shape)
#print(((randomSampleVals-f_prime_to_t(initialGuessRand,randomIndex))).shape)
def f_prime_to_t(x, randomIndex):
time_series = np.dot(DijMatrixT,x)
return time_series[randomIndex]
def f_prime_to_t_l(x, randomIndex):
vals = f_prime_to_t(x, randomIndex)
#print(vals.shape)
res = np.zeros(N)
res = res.reshape(N,1)
res[randomIndex]=vals
return res
randomSampleValMat = np.zeros((N,1))
randomSampleValMat[randomIndex] = randomSampleVals
def loss(x):
res = np.sum((randomSampleVals-f_prime_to_t(x,randomIndex))**2)
return res
#iter was 1000
def gradient_descent(rate, update_rule, initial_x, iter=1000):
"""gradient descent algorithm
@params:
- rate (float): eta variable of gradient descent.
- update_rule: a function with a signature update_rule(x, rate).
- initial_x: initial position for gradient descent.
- iter: number of iterations to run gradient descent for.
"""
x = initial_x
x_list = []
#x_list.append([x, loss(x)])
#x_list.append(x)
# Loop through with gradient descent.
for i in range(iter):
# Update the parameters using update rule.
if loss(x)<=0.001:
break
x = update_rule(x, rate)
#x -= -2*rate*
#x_list.append(x)
#return np.array(x_list)
return x, loss(x)
def approximated_gradient(x, rate):
"""
Update rule. Receive theta and update it with the next theta.
@params
- x: input variable x.
- rate: rate of descent, variable "eta".
@returns:
- updated varaible x.
"""
#w = np.random.normal(0, (0.25)**0.5, N)
#w = 0.8*np.random.rand(1)*np.ones(N)
#print(w.shape)
#x = x - rate*(loss(x+w)-loss(x))*w
#x = x -2*rate*(np.dot(DijMatrixM,(randomSampleVals-f_prime_to_t(x,randomIndex))))
x = x +2*rate*(np.dot(DijMatrixT,(randomSampleValMat-f_prime_to_t_l(x,randomIndex))))
#tempM = randomSampleVals-f_prime_to_t(x,randomIndex)
#x = x - 2*rate*(randomSampleVals-f_prime_to_t(x,randomIndex))
#x = x - rate*(loss(x+w)-(0.5*np.ones(N)))*w
return x
f_opt, loss_f = gradient_descent(rate,approximated_gradient,initialGuessRand,1000)
re_constructed_time = np.dot(DijMatrixT,f_opt)
print(loss_f)
#print(f_opt.shape)
#print(f_opt[-1][1])
#print(f_opt[1000][1])
#plt.plot(f_opt[-1][0])
plt.plot(t, re_constructed_time)
plt.plot(t, np.dot(DijMatrixT, f), color='blue', linestyle='dashed')
plt.scatter(randomTime,randomSampleVals, color='red')
plt.ylim(-5,5)
plt.show()
L2 norm
def lossL1(x):
res = np.sum((randomSampleVals-np.sum(np.dot(DijMatrixT, x)))**2)+np.sum(x**2)
return res
#iter was 1000
def gradient_descent_L1(rate, update_rule, initial_x, iter=1000):
"""gradient descent algorithm
@params:
- rate (float): eta variable of gradient descent.
- update_rule: a function with a signature update_rule(x, rate).
- initial_x: initial position for gradient descent.
- iter: number of iterations to run gradient descent for.
"""
x = initial_x
# Loop through with gradient descent.
for i in range(iter):
# Update the parameters using update rule.
if lossL1(x)<=0.001:
break
x = update_rule(x, rate)
return x, loss(x)
def L1_gradient(x, rate):
"""
Update rule. Receive theta and update it with the next theta.
@params
- x: input variable x.
- rate: rate of descent, variable "eta".
@returns:
- updated varaible x.
"""
x = x -rate*(-2*(np.dot(DijMatrixT,(randomSampleValMat-f_prime_to_t_l(x,randomIndex)))) + 2*x)
return x
f_opt_L1, loss_f_L1 = gradient_descent_L1(rate,L1_gradient,initialGuessRand,1000)
re_constructed_time_L1 = np.dot(DijMatrixT,f_opt_L1)
print(loss_f_L1)
plt.plot(t, re_constructed_time_L1)
plt.plot(t, np.dot(DijMatrixT, f), color='blue', linestyle='dashed')
plt.scatter(randomTime,randomSampleVals, color='red')
plt.ylim(-4,4)
plt.show()
L1
def lossL2(x):
res = np.sum((randomSampleVals-np.sum(np.dot(DijMatrixT, x)))**2)+np.sum(np.abs(x))
return res
#iter was 1000
def gradient_descent_L2(rate, update_rule, initial_x, iter=1000):
"""gradient descent algorithm
@params:
- rate (float): eta variable of gradient descent.
- update_rule: a function with a signature update_rule(x, rate).
- initial_x: initial position for gradient descent.
- iter: number of iterations to run gradient descent for.
"""
x = initial_x
# Loop through with gradient descent.
for i in range(iter):
# Update the parameters using update rule.
if lossL1(x)<=0.001:
break
x = update_rule(x, rate)
return x, loss(x)
def L2_gradient(x, rate):
"""
Update rule. Receive theta and update it with the next theta.
@params
- x: input variable x.
- rate: rate of descent, variable "eta".
@returns:
- updated varaible x.
"""
x = x -rate*(-2*(np.dot(DijMatrixT,(randomSampleValMat-f_prime_to_t_l(x,randomIndex)))) + 2*np.sign(x))
return x
f_opt_L2, loss_f_L2 = gradient_descent_L2(rate,L2_gradient,initialGuessRand,1000)
re_constructed_time_L2 = np.dot(DijMatrixT,f_opt_L2)
print(loss_f_L2)
plt.plot(t, re_constructed_time_L2)
plt.plot(t, np.dot(DijMatrixT, f), color='blue', linestyle='dashed')
plt.scatter(randomTime,randomSampleVals, color='red')
plt.ylim(-4,4)
plt.show()
m = [25,50,100,150,200]
f_prime = []
re_constructed_times=[]
randomTimes = []
randomSamples = []
losses = []
for n in range(5):
M = m[n]
#generate random samples
randomIndex = np.random.randint(0,N,M)
randomTime = t[randomIndex]
randomSampleVals = (np.sin(2*pi*freq1*randomTime)+np.sin(2*pi*freq2*randomTime))
randomSampleVals = randomSampleVals.reshape(M,1)
randomSampleValMat = np.zeros((N,1))
randomSampleValMat[randomIndex] = randomSampleVals
#generate guess
initialGuessRand = np.random.rand(N)
initialGuessRand = initialGuessRand.reshape(N,1)
#do L1 gradient descent
f_opt_L2, loss_f_L2 = gradient_descent_L2(rate,L2_gradient,initialGuessRand,1000)
re_constructed_time_L2 = np.dot(DijMatrixT,f_opt_L2)
#log results
f_prime.append(f_opt_L2)
re_constructed_times.append(re_constructed_time_L2)
randomTimes.append(randomTime)
randomSamples.append(randomSampleVals)
losses.append(loss_f_L2)
#axs[0].plot(t, re_constructed_time_L2)
#axs[0].plot(t, np.dot(DijMatrixT, f), color='blue', linestyle='dashed')
#axs[0].scatter(randomTime,randomSampleVals, color='red')
fig, axs = plt.subplots(5,1, figsize=(15,15))
for i in range(5):
axs[i].plot(t, re_constructed_times[i])
axs[i].plot(t, np.dot(DijMatrixT, f), color='gray', linestyle='dashed')
axs[i].scatter(randomTimes[i],randomSamples[i], color='red')
axs[i].title.set_text(("M = {}, loss = {}".format(m[i], round(losses[i]))))
fig.suptitle('L1 norm gradient descent time reconstruction')
plt.show()
fig, axs = plt.subplots(5,1, figsize=(15,15))
for i in range(5):
axs[i].plot(f, color='black', linestyle='dashed')
axs[i].plot(f_prime[i])
#axs[i].plot(f, color='blue', linestyle='dashed')
#axs[i].scatter(randomTimes[i],randomSamples[i], color='red')
axs[i].title.set_text(("M = {}, loss = {}".format(m[i], round(losses[i]))))
fig.suptitle('L1 norm estimated coefficients')
plt.show()
#Nyquist
print("Nyquist frequency = ",2*freq2)
print("Samples required in 0.01s window = ", 2*freq2*0.01)
# trying something bad lol
def L2_gradient_approx(x, rate):
w = 0.001*np.random.normal(0, (0.25)**0.5, N)
x = x -rate*(((lossL2(x+w)-lossL2(x))*w) + 2*np.sign(x))
return x
f_opt_L2, loss_f_L2 = gradient_descent_L2(rate,L2_gradient_approx,initialGuessRand,100)
re_constructed_time_L2 = np.dot(DijMatrixT,f_opt_L2)
print(loss_f_L2)
plt.plot(t, re_constructed_time_L2)
plt.plot(t, np.dot(DijMatrixT, f), color='blue', linestyle='dashed')
plt.scatter(randomTime,randomSampleVals, color='red')
plt.ylim(-4,4)
plt.show()