# IMPORTS
import numpy as np
import matplotlib.pyplot as plt
import sounddevice as sd
import time
%config InlineBackend.figure_format = 'svg'
PLOT DTMF TONE
pi = np.pi
def toneInTime(t, freq1=697,freq2=1209):
"""plot time series tone with two frequencies:
freq 1 in Hz, default=697
freq2 in Hz, default = 1209
t: times
returns np.array"""
return((np.sin(2*pi*freq1*t)+np.sin(2*pi*freq2*t)))
startTime = 0
stopTime = 0.25
N = 2500
t = np.linspace(startTime,stopTime, num=N)
tone_t = toneInTime(t)
plt.plot(t, tone_t)
plt.title("N=2500, t=0.25s")
plt.show()
# Let's have a listen.
f_s = N/stopTime # Sampling frequency: samples/total duration
# extend the tone -- this method introduces artifacts where the sound is chunked on.
duration = 2 #seconds
tone_list = [tone_t]
iters = int(duration/stopTime)
for i in range(iters):
tone_list.append(tone_t)
tone_long = np.concatenate(tone_list)
print(tone_long)
sd.play(tone_long,f_s)
time.sleep(1.0)
sd.stop()
# Let's try to do a better job of dropping the artifacts
f_s = N/stopTime # Sampling frequency: samples/total duration
# extend the tone -- slightly less clicking
duration = 2 #seconds
tone_interval = tone_t[0:(N-19)]
print(1/697)
tone_list = [tone_interval]
iters = int(duration/stopTime)
for i in range(3):
tone_list.append(tone_interval)
tone_long = np.concatenate(tone_list)
print(tone_long)
sd.play(tone_long,f_s)
time.sleep(1.0)
sd.stop()
# Let's zoom and enhance lmao
startTime = 0
stopTime = 0.01
N = 500
t = np.linspace(startTime,stopTime, num=N)
tone_t = toneInTime(t)
plt.plot(t, tone_t)
plt.title("N=750, t=0.01s")
plt.show()
# Let's zoom in w/ the prior sampling rate, for clarity
startTime = 0
stopTime = 0.025
N = 250
t = np.linspace(startTime,stopTime, num=N)
tone_t = toneInTime(t)
plt.plot(t, tone_t)
plt.title("N=250, t=0.025s")
plt.show()
DCT
def Dij(i,j,N):
if i==0:
return np.sqrt(1/N)
else:
return np.sqrt(2/N)*np.cos((pi*(2*j+1)*i)/(2*N))
DijMatrix = np.zeros((N,N))
for i in range(N):
for j in range(N):
DijMatrix[i][j]=Dij(i,j,N)
startTime = 0
stopTime = 0.25
N = 2500
t = np.linspace(startTime,stopTime, num=N)
tone_t = toneInTime(t)
f = np.dot(DijMatrix, tone_t)
# and there's the tones
plt.title("DCT coeffs")
plt.plot(f)
plt.show()
inverse DCT
DijMatrix = np.zeros((N,N))
for i in range(N):
for j in range(N):
DijMatrix[i][j]=Dij(i,j,N)
DijMatrixT = DijMatrix.transpose()
ti = np.dot(DijMatrixT, f)
plt.title("Reconstructed signal, 0.25 second duration")
plt.plot(t, ti)
plt.show()
plt.title("Reconstructed signal, 0.025 second duration")
plt.plot(t[:(int(N/10))], ti[:(int(N/10))])
plt.show()
random sampling of 125 points
M = 125
i_rand = np.random.choice(np.arange(N),M, replace=True)
randomTime = t[i_rand]
randomSampleVals = toneInTime(randomTime)
plt.scatter(randomTime,randomSampleVals, color='red')
plt.plot(t, tone_t)
plt.show()
gradient descent
def getFrequencies(tone_t,N):
DijMatrix = np.zeros((N,N))
for i in range(N):
for j in range(N):
DijMatrix[i][j]=Dij(i,j,N)
f = np.dot(DijMatrix, tone_t)
return f, DijMatrix
def getTimes(f):
time_series = np.dot(DijMatrix.T,f)
return time_series
initialGuess = np.random.rand(N)
rate = 0.01
# print(DijMatrix.shape)
def loss(x):
l = np.sum((randomSampleVals-getTimes(x))**2)
return l
def approx_gradient(x, rate):
x = x +2*rate*(np.dot(DijMatrixT,(randomSampleVals-getTimes(x))))
return x
def gradient_descent(rate, update_rule, initial_x, iter=100):
x = initial_x
# Loop through with gradient descent.
for i in range(iter):
# Update the parameters using update rule.
if loss(x)<=0.001:
break
x = update_rule(x, rate)
return x, loss(x)
f_opt, loss_f = gradient_descent(rate,approx_gradient,initialGuessRand)
print(loss_f)
re_constructed_time = np.dot(DijMatrixT,f_opt)
print(re_constructed_time.shape)
# plt.plot(t, np.dot(DijMatrixT, f), 'k--',label='true')
# plt.scatter(randomTime,randomSampleVals, color='red',label='random points')
# plt.plot(t, re_constructed_time,label='grad descent')
# plt.ylim(-5,5)
# # plt.legend()
# # plt.title("oops,looks cool")
# plt.show()
re_constructed_time = np.dot(DijMatrixT,f_opt)
plt.plot(t, np.dot(DijMatrixT, f), 'k--',label='true')
plt.scatter(randomTime,randomSampleVals, color='red',label='random points')
plt.plot(t, re_constructed_time,label='grad descent')
plt.ylim(-5,5)
# plt.legend()
plt.title("oops,looks cool")
plt.show()
randomIndex = np.random.randint(0,N,M)
freq1=697
freq2=1209
#randomTime = stopTime * np.random.rand(M)
randomTime = t[randomIndex]
randomSampleVals = (np.sin(2*pi*freq1*randomTime)+np.sin(2*pi*freq2*randomTime))
randomSampleVals = randomSampleVals.reshape(M,1)
initialGuessRand = np.random.rand(N)
initialGuessRand = initialGuessRand.reshape(N,1)
rate = 0.01
DijMatrixM = np.zeros((M,M))
for i in range(M):
for j in range(M):
DijMatrixM[i][j]=Dij(i,j,M)
DijMatrixMT = DijMatrixM.transpose()
#print(DijMatrixMT.shape)
#print(((randomSampleVals-f_prime_to_t(initialGuessRand,randomIndex))).shape)
def f_prime_to_t(x, randomIndex):
time_series = np.dot(DijMatrixT,x)
return time_series[randomIndex]
def f_prime_to_t_l(x, randomIndex):
vals = f_prime_to_t(x, randomIndex)
#print(vals.shape)
res = np.zeros(N)
res = res.reshape(N,1)
res[randomIndex]=vals
return res
randomSampleValMat = np.zeros((N,1))
randomSampleValMat[randomIndex] = randomSampleVals
def loss(x):
res = np.sum((randomSampleVals-f_prime_to_t(x,randomIndex))**2)
return res
#iter was 1000
def gradient_descent(rate, update_rule, initial_x, iter=1000):
"""gradient descent algorithm
@params:
- rate (float): eta variable of gradient descent.
- update_rule: a function with a signature update_rule(x, rate).
- initial_x: initial position for gradient descent.
- iter: number of iterations to run gradient descent for.
"""
x = initial_x
x_list = []
#x_list.append([x, loss(x)])
#x_list.append(x)
# Loop through with gradient descent.
for i in range(iter):
# Update the parameters using update rule.
if loss(x)<=0.001:
break
x = update_rule(x, rate)
#x -= -2*rate*
#x_list.append(x)
#return np.array(x_list)
return x, loss(x)
def approximated_gradient(x, rate):
"""
Update rule. Receive theta and update it with the next theta.
@params
- x: input variable x.
- rate: rate of descent, variable "eta".
@returns:
- updated varaible x.
"""
#w = np.random.normal(0, (0.25)**0.5, N)
#w = 0.8*np.random.rand(1)*np.ones(N)
#print(w.shape)
#x = x - rate*(loss(x+w)-loss(x))*w
#x = x -2*rate*(np.dot(DijMatrixM,(randomSampleVals-f_prime_to_t(x,randomIndex))))
x = x +2*rate*(np.dot(DijMatrixT,(randomSampleValMat-f_prime_to_t_l(x,randomIndex))))
#tempM = randomSampleVals-f_prime_to_t(x,randomIndex)
#x = x - 2*rate*(randomSampleVals-f_prime_to_t(x,randomIndex))
#x = x - rate*(loss(x+w)-(0.5*np.ones(N)))*w
return x
f_opt, loss_f = gradient_descent(rate,approximated_gradient,initialGuessRand,1000)
re_constructed_time = np.dot(DijMatrixT,f_opt)
print(loss_f)
#print(f_opt.shape)
#print(f_opt[-1][1])
#print(f_opt[1000][1])
#plt.plot(f_opt[-1][0])
# plt.plot(t, re_constructed_time)
plt.plot(t, np.dot(DijMatrixT, f), color='black', linestyle='dashed',label='true')
plt.scatter(randomTime,randomSampleVals, color='red',label='random')
plt.plot(t, re_constructed_time,label='grad')
plt.legend()
plt.title("grad descent")
plt.ylim(-5,5)
plt.show()
#yikes! want to hear it?
duration = 2 #seconds
tone_interval = re_constructed_time[:]
print(1/697)
tone_list = [tone_interval]
iters = int(duration/stopTime)
# for i in range(1):
# tone_list.append(tone_interval)
tone_long = np.concatenate(tone_list)
# print(tone_long)
sd.play(tone_long,f_s)
time.sleep(1.0)
sd.stop()
#no you didn't <3
def lossL1(x):
res = np.sum((randomSampleVals-np.sum(np.dot(DijMatrixT, x)))**2)+np.sum(x**2)
return res
#iter was 1000
def gradient_descent_L1(rate, update_rule, initial_x, iter=1000):
"""gradient descent algorithm
@params:
- rate (float): eta variable of gradient descent.
- update_rule: a function with a signature update_rule(x, rate).
- initial_x: initial position for gradient descent.
- iter: number of iterations to run gradient descent for.
"""
x = initial_x
# Loop through with gradient descent.
for i in range(iter):
# Update the parameters using update rule.
if lossL1(x)<=0.001:
break
x = update_rule(x, rate)
return x, loss(x)
def L1_gradient(x, rate):
"""
Update rule. Receive theta and update it with the next theta.
@params
- x: input variable x.
- rate: rate of descent, variable "eta".
@returns:
- updated varaible x.
"""
x = x -rate*(-2*(np.dot(DijMatrixT,(randomSampleValMat-f_prime_to_t_l(x,randomIndex)))) + 2*x)
return x
f_opt_L1, loss_f_L1 = gradient_descent_L1(rate,L1_gradient,initialGuessRand,1000)
re_constructed_time_L1 = np.dot(DijMatrixT,f_opt_L1)
print(loss_f_L1)
# plt.plot(t, re_constructed_time_L1)
plt.plot(t, np.dot(DijMatrixT, f), color='black', linestyle='dashed')
plt.scatter(randomTime,randomSampleVals, color='red')
plt.plot(t, re_constructed_time_L1)
plt.ylim(-4,4)
plt.show()
sd.play(re_constructed_time_L1,f_s)
time.sleep(1.0)
sd.stop()
rate=0.01
def lossL2(x):
res = np.sum((randomSampleVals-np.sum(np.dot(DijMatrixT, x)))**2)+np.sum(np.abs(x))
return res
#iter was 1000
def gradient_descent_L2(rate, update_rule, initial_x, iter=1000):
"""gradient descent algorithm
@params:
- rate (float): eta variable of gradient descent.
- update_rule: a function with a signature update_rule(x, rate).
- initial_x: initial position for gradient descent.
- iter: number of iterations to run gradient descent for.
"""
x = initial_x
# Loop through with gradient descent.
for i in range(iter):
# Update the parameters using update rule.
if lossL1(x)<=0.001:
break
x = update_rule(x, rate)
return x, loss(x)
def L2_gradient(x, rate):
"""
Update rule. Receive theta and update it with the next theta.
@params
- x: input variable x.
- rate: rate of descent, variable "eta".
@returns:
- updated varaible x.
"""
x = x -rate*(-2*(np.dot(DijMatrixT,(randomSampleValMat-f_prime_to_t_l(x,randomIndex)))) + 2*np.sign(x))
return x
f_opt_L2, loss_f_L2 = gradient_descent_L2(rate,L2_gradient,initialGuessRand,1000)
re_constructed_time_L2 = np.dot(DijMatrixT,f_opt_L2)
print(loss_f_L2)
plt.plot(t, np.dot(DijMatrixT, f), color='black', linestyle='dashed')
plt.scatter(randomTime,randomSampleVals, color='red')
plt.plot(t, re_constructed_time_L2)
plt.ylim(-4,4)
plt.show()
duration = 2 #seconds
tone_interval = re_constructed_time_L2[0:(N-19)]
print(1/697)
tone_list = [tone_interval]
iters = int(duration/stopTime)
for i in range(3):
tone_list.append(tone_interval)
tone_long = np.concatenate(tone_list)
print(tone_long)
sd.play(tone_long,f_s)
time.sleep(1.0)
sd.stop()
def lossL2(x):
res = np.sum((randomSampleVals-np.sum(np.dot(DijMatrixT, x)))**2)+np.sum(np.abs(x))
return res
#iter was 1000
def gradient_descent_L2(rate, update_rule, initial_x, iter=1000):
"""gradient descent algorithm
@params:
- rate (float): eta variable of gradient descent.
- update_rule: a function with a signature update_rule(x, rate).
- initial_x: initial position for gradient descent.
- iter: number of iterations to run gradient descent for.
"""
x = initial_x
# Loop through with gradient descent.
for i in range(iter):
# Update the parameters using update rule.
if lossL1(x)<=0.001:
break
x = update_rule(x, rate)
return x, loss(x)
def L2_gradient(x, rate):
"""
Update rule. Receive theta and update it with the next theta.
@params
- x: input variable x.
- rate: rate of descent, variable "eta".
@returns:
- updated varaible x.
"""
x = x -rate*(-2*(np.dot(DijMatrixT,(randomSampleValMat-f_prime_to_t_l(x,randomIndex)))) + 2*np.sign(x))
return x
f_opt_L2, loss_f_L2 = gradient_descent_L2(rate,L2_gradient,initialGuessRand,1000)
re_constructed_time_L2 = np.dot(DijMatrixT,f_opt_L2)
print(loss_f_L2)
plt.plot(t, np.dot(DijMatrixT, f), color='black', linestyle='dashed')
plt.scatter(randomTime,randomSampleVals, color='red')
plt.plot(t, re_constructed_time_L2)
plt.ylim(-4,4)
plt.show()
duration = 2 #seconds
tone_interval = re_constructed_time_L2[0:(N-19)]
print(1/697)
tone_list = [tone_interval]
iters = int(duration/stopTime)
for i in range(3):
tone_list.append(tone_interval)
tone_long = np.concatenate(tone_list)
print(tone_long)
sd.play(2*tone_long,f_s)
time.sleep(1.0)
sd.stop()
N = 300
pi = np.pi
startTime = 0
stopTime = 0.01
t = np.linspace(startTime,stopTime, num=N)
freq1 = 697 #Hz
freq2 = 1209 #Hz
toneInTime = (np.sin(2*pi*freq1*t)+np.sin(2*pi*freq2*t))
def Dij(i,j,N):
if i==0:
return np.sqrt(1/N)
else:
return np.sqrt(2/N)*np.cos((pi*(2*j+1)*i)/(2*N))
DijMatrix = np.zeros((N,N))
for i in range(N):
for j in range(N):
DijMatrix[i][j]=Dij(i,j,N)
f = np.dot(DijMatrix, toneInTime)
DijMatrix = np.zeros((N,N))
for i in range(N):
for j in range(N):
DijMatrix[i][j]=Dij(i,j,N)
DijMatrixT = DijMatrix.transpose()
ti = np.dot(DijMatrixT, f)
M = 100
randomIndex = np.random.randint(0,N,M)
#randomTime = stopTime * np.random.rand(M)
randomTime = t[randomIndex]
randomSampleVals = (np.sin(2*pi*freq1*randomTime)+np.sin(2*pi*freq2*randomTime))
randomSampleVals = randomSampleVals.reshape(M,1)
initialGuessRand = np.random.rand(N)
initialGuessRand = initialGuessRand.reshape(N,1)
rate = 0.01
DijMatrixM = np.zeros((M,M))
for i in range(M):
for j in range(M):
DijMatrixM[i][j]=Dij(i,j,M)
DijMatrixMT = DijMatrixM.transpose()
def lossL2(x):
res = np.sum((randomSampleVals-np.sum(np.dot(DijMatrixT, x)))**2)+np.sum(np.abs(x))
return res
#iter was 1000
def gradient_descent_L2(rate, update_rule, initial_x, iter=1000):
"""gradient descent algorithm
@params:
- rate (float): eta variable of gradient descent.
- update_rule: a function with a signature update_rule(x, rate).
- initial_x: initial position for gradient descent.
- iter: number of iterations to run gradient descent for.
"""
x = initial_x
# Loop through with gradient descent.
for i in range(iter):
# Update the parameters using update rule.
if lossL1(x)<=0.001:
break
x = update_rule(x, rate)
return x, loss(x)
def L2_gradient(x, rate):
"""
Update rule. Receive theta and update it with the next theta.
@params
- x: input variable x.
- rate: rate of descent, variable "eta".
@returns:
- updated varaible x.
"""
x = x -rate*(-2*(np.dot(DijMatrixT,(randomSampleValMat-f_prime_to_t_l(x,randomIndex)))) + 2*np.sign(x))
return x
f_opt_L2, loss_f_L2 = gradient_descent_L2(rate,L2_gradient,initialGuessRand,1000)
re_constructed_time_L2 = np.dot(DijMatrixT,f_opt_L2)
print(loss_f_L2)
plt.plot(t, re_constructed_time_L2)
plt.plot(t, np.dot(DijMatrixT, f), color='blue', linestyle='dashed')
plt.scatter(randomTime,randomSampleVals, color='red')
plt.ylim(-4,4)
plt.show()
wavelet. referenced numerical recipes chapter 13 wavelet transforms
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import circulant
L = 2**12
x = np.zeros(L)
x[4] = 1
x[29] = 1
# Daub4 coeffs
c0 = (1+np.sqrt(3))/(4*np.sqrt(2))
c1 = (3+np.sqrt(3))/(4*np.sqrt(2))
c2 = (3-np.sqrt(3))/(4*np.sqrt(2))
c3 = (1-np.sqrt(3))/(4*np.sqrt(2))
for i in range(12):
N = 2**(i+1) #stepping though starting from the final column, with only 2 S.
x_new = np.zeros(L)
# apply inverse matrix
# permute
temp_x = np.copy(x)
S = temp_x[:int(N/2)] #the first 'two' values.
d = temp_x[int(N/2):N] #everything else has already been mixed/cleaned/whatever
# for j in range(1,N,2):
# x_new[j] =
# generate random pairs s1,s2 within [0,1]
N = 100
s = np.random.uniform(size=(500,2))
# print(s)
plt.plot(s[:,0],s[:,1],'ko')
plt.show()
A = np.array([[1,2],[3,1]])
# print(A)
mixed = A.dot(s.T)
plt.plot(mixed[0],mixed[1],'ko')
plt.show()
mixed_m = mixed - np.mean(mixed,keepdims=True)
# print(mixed_m)
covariance = np.cov(mixed_m)
l,v = np.linalg.eig(covariance)
new = np.dot((v/np.sqrt(l)),mixed_m) # apply transform
print(new.shape)
# print(new[:,0])
plt.plot(new[0,:],new[1,:],'ko')
plt.show()
from sympy import *
x = symbols('x')
f = log(cosh(x))
f
dfdx = Derivative(f,x).doit()
# dfdx.doit()
dfdx
df2 = Derivative(dfdx,x).doit()
# df2.doit()
df2
lam_f = lambdify(x, f)
lam_df = lambdify(x,dfdx)
lam_df2 = lambdify(x,df2)
w = np.random.random((2,1))
# lam_f(w)
iters = 500
for i in range(iters):
w_new = np.zeros((2,1))
w_new = np.mean(new*lam_df(np.dot(w.T,new)),keepdims=True) - w*np.mean(lam_df2(np.dot(w.T,new)), keepdims=True)
w = w_new/np.linalg.norm(w_new)
# w
plt.plot(new[0,:],new[1,:],'ko')
plt.plot(mixed[0],mixed[1],'ro')
plt.plot(s[:,0],s[:,1],'bo')
print(w)
plt.plot([0,w[0,0]],[0,w[1,0]],'g-')
plt.plot([0,-w[1,0]],[0,w[0,0]],'g-')
plt.show()