import numpy as np
%matplotlib inline
from matplotlib import pyplot as plt
from scipy import fftpack as fft
Equation 13.8 gives us that the DFT can be expressed as:
$$ X_f=\mathbf{M}\cdot\vec x $$Where: $$ \mathbf{M}=\frac{1}{\sqrt{N}}\sum_{n=0}^{N-1}e^{2\pi ifn/N} $$
To simplify, let us say $\omega=e^{2\pi i/N}$, and thus, $\mathbf{M}=\frac{1}{\sqrt{N}}\sum\omega^{fn}$
In matrix form:
$$ \begin{align} \frac{1}{\sqrt{N}} \begin{bmatrix} \omega^0&\omega^0&\omega^0&\cdots&\omega^0\\ \omega^0&\omega^1&\omega^2&\cdots&\omega^{N-1}\\ \omega^0&w^2&w^4&\cdots&\omega^{2(N-1)}\\ \vdots&\vdots&\vdots&\ddots&\vdots\\ \omega^0&\omega^{N-1}&\omega^{2(N-1)}&\cdots&\omega^{(N-1)(N-1)} \end{bmatrix} \end{align} $$If $\mathbf{M}$ is unitary, then its transpose is equal to its inverse.
$$ $$\begin{align} \mathbf{M}^T=&\frac{1}{\sqrt{N}} \begin{bmatrix} \omega^0&\omega^0&\omega^0&\cdots&\omega^0\\ \omega^0&\omega^1&\omega^2&\cdots&\omega^{N-1}\\ \omega^0&w^2&w^4&\cdots&\omega^{2(N-1)}\\ \vdots&\vdots&\vdots&\ddots&\vdots\\ \omega^0&\omega^{N-1}&\omega^{2(N-1)}&\cdots&\omega^{(N-1)(N-1)} \end{bmatrix} \end{align}$$ $$Multiplying the two...
$$ \begin{align} \mathbf{M}^T\mathbf{M}=&\frac{1}{N} \begin{bmatrix} \omega^0&\omega^0&\omega^0&\cdots&\omega^0\\ \omega^0&\omega^1&\omega^2&\cdots&\omega^{N-1}\\ \omega^0&w^2&w^4&\cdots&\omega^{2(N-1)}\\ \vdots&\vdots&\vdots&\ddots&\vdots\\ \omega^0&\omega^{N-1}&\omega^{2(N-1)}&\cdots&\omega^{(N-1)(N-1)} \end{bmatrix} \begin{bmatrix} \omega^0&\omega^0&\omega^0&\cdots&\omega^0\\ \omega^0&\omega^1&\omega^2&\cdots&\omega^{N-1}\\ \omega^0&w^2&w^4&\cdots&\omega^{2(N-1)}\\ \vdots&\vdots&\vdots&\ddots&\vdots\\ \omega^0&\omega^{N-1}&\omega^{2(N-1)}&\cdots&\omega^{(N-1)(N-1)} \end{bmatrix} \end{align} $$Before doing the multiplication, let us remember that: $\omega^0=1$, $\sum\omega^0=N$
$$ \begin{align} =&\frac{1}{N} \begin{bmatrix} N^2&\sum\omega^0\omega^n&\sum\omega^0\omega^{2n}&\cdots&\sum\omega^0\omega^{n*(N-1)}\\ \sum\omega^0\omega^n&\sum\omega^{2n}&\sum\omega^n\omega^{2n}&\cdots&\sum\omega^n\omega^{n(N-1)}\\ \sum\omega^0\omega^{2n}&\sum\omega^{2n}\omega^n&\sum\omega^{4n}&\cdots&\sum\omega^{2n}\omega^{n(N-1)}\\ \vdots&\vdots&\vdots&\ddots&\vdots\\ \sum\omega^0\omega^{n(N-1)}&\sum\omega^{n}\omega^{n(N-1)}&\sum\omega^{2n}\omega^{n(N-1)}&\cdots&\sum\omega^{2n(N-1)} \end{bmatrix} \end{align} $$Now, along the diagonal, $\sum(\omega^{xn})^2=N^2$. Off the diagonal, the cross multiplacation equals zero. Thus:
$$ \begin{align} =&\frac{1}{N} \begin{bmatrix} N^2&0&0&\cdots&0\\ 0&N^2&0&\cdots&0\\ 0&0&N^2&\cdots&0\\ \vdots&\vdots&\vdots&\ddots&\vdots\\ 0&0&0&\cdots&N^2 \end{bmatrix}= \begin{bmatrix} 1&0&0&\cdots&0\\ 0&1&0&\cdots&0\\ 0&0&1&\cdots&0\\ \vdots&\vdots&\vdots&\ddots&\vdots\\ 0&0&0&\cdots&1 \end{bmatrix} \blacksquare \end{align} $$X = np.linspace(0,.25,2500)
y1 = np.sin(697*x*2*np.pi)
y2 = np.sin(1209*x*2*np.pi)
plt.plot(X,y1+y2)
plt.xlim(0,.01)
plt.plot(X,y1+y2)
plt.xlim(0,.25)
t_j = y1+y2
D = np.tile(np.sqrt(1/2500),2500).reshape((1,2500))
for i in range(1,2500):
j = np.arange(0,2500)
row = np.sqrt(2/2500)*np.cos((np.pi*(2*j+1)*i)/(2*2500))
row =np.reshape(row,(1,2500))
D = np.concatenate((D,row),axis=0)
f_i = np.matmul(D,t_j)
plt.plot(X,f_i)
plt.plot(X,np.matmul(D.transpose(),f_i))
np.random.seed(15217)
samp = np.random.choice(2500,size=125,replace=False)
t_k = t_j[samp]
D_ik = D[:, samp]
#Using Neil Conjugate Gradient Descent Algorithm from last week...
from code_search_cg import cg
import math
import copy as cp
evals = 0
def f(x):
global evals, D_ik, t_k
evals += 1
return np.sum((t_k-np.matmul(D_ik.transpose(),x))**2)
def df(x):
global evals, D_ik, t_k
evals += 1
return -2*np.sum(t_k-(np.matmul(D_ik.transpose(),start)))*np.sum(D_ik,axis=1)
dim = 2500
start = np.zeros(dim)
startstep = 0.1
tolerance = 1e-6
#cg Requires (f,df,start,startstep,tolerance)
pts = cg(f, df, start,startstep,tolerance)
plt.plot(X,pts[1]['pt'])
Something seems... off?
To construct the inverse wavelet transform, we must construct the matrix:
$$ \begin{bmatrix} c_0&c_3&&&&c_2&c_1\\ c_1&-c_2&&&&c_3&-c_0\\ c_2&c_1&c_0&c_3\\ c_3&-c_0&c_1&-c_2\\ &&c_0&c_1&c_2&c_3\\ &&c_3&-c_2&c_1&-c_0\\ &&&&&&\ddots\\ &&&&c_2&c_1&c_0&c_3\\ &&&&c_3&-c_0&c_1&-c_2\\ &&&&&&c_2&c_1&c_0&c_3\\ &&&&&&c_3&-c_0&c_1&-c_2\\ \end{bmatrix} $$Where:
$$ c_0=\frac{1+\sqrt3}{4\sqrt2}, c_1=\frac{3+\sqrt3}{4\sqrt2}\\ c_2=\frac{3-\sqrt3}{4\sqrt2}, c_3=\frac{1-\sqrt3}{4\sqrt2} $$#Construct the matrix:
# c_0 = 0.1
# c_1 = 1
# c_2 = 2
# c_3 = 3
c_0=(1+np.sqrt(3))/(4*np.sqrt(2))
c_1=(3+np.sqrt(3))/(4*np.sqrt(2))
c_2=(3-np.sqrt(3))/(4*np.sqrt(2))
c_3=(1-np.sqrt(3))/(4*np.sqrt(2))
c=np.array([[c_0,c_1,c_2,c_3],[c_3,-c_2,c_1,-c_0]])
N = 2**12
coefs = np.zeros((N,N),float)
np.fill_diagonal(coefs,[c_0,-c_2])
np.fill_diagonal(coefs[1:],[c_3,0])
np.fill_diagonal(coefs[:,1:], [c_1,c_1])
np.fill_diagonal(coefs[:,2:], [c_2,-c_0])
np.fill_diagonal(coefs[:,3:], [c_3,0])
coefs[N-2,0]=c_2
coefs[N-2,1]=c_3
coefs[N-1,0]=c_1
coefs[N-1,1]=-c_0
#Construct the vector:
vec = np.zeros(2**12)
vec[4]=1
vec[29] = 1
transform = np.dot(coefs.transpose(),vec)
plt.plot(np.linspace(0,40,40),transform[0:40])
For a Covariance matrix $\mathbf{C_{ij}}$, the covariance, $cov_{ij}=\mathbb{E}[(X_i-\mathbb{E}[X_i])(X_j-\mathbb{E}[X_j])]$. Now, because the expectation operator is commutative and distributive, this can be rewritten as:
$$ cov_{ij}=\mathbb{E}[X_iX_j-\mathbb{E}[X_i]X_j-X_i\mathbb{E}[X_j]+\mathbb{E}[X_i]\mathbb{E}[X_j]]\\ =\mathbb{E}[X_iX_j]-\mathbb{E}[\mathbb{E}[X_i]X_j]-\mathbb{E}[X_i\mathbb{E}[X_j]]+\mathbb{E}[\mathbb{E}[X_i]\mathbb{E}[X_j]] $$Recall that, by, definition, $\mathbb{E}[X]=0$ in this case, thus: $cov_{ij}=\mathbb{E}[X_iX_j]$ for the off diagonal. When $i=j$, this reduces to the variance, $\sigma^2$, know to be 1 in this case.
Thus, $cov_{11}=cov_{22}=1$. Since $x_1$ and $x_2$ are IID, we also know that $cov_{12}=cov{21}=0$, since the expected value of $\mathbb{E}[X_iX_j]=\mathbb{E}[X_i]\mathbb{E}[X_j]$ for independent variables. This leaves us with the following matrix:
$$ \begin{bmatrix} 1&0&?\\ 0&1&?\\ ?&?&? \end{bmatrix} $$Because the matrix is symmetric, working along the 3rd row entry by entry will allow us to figure this out.
Consider that $x_3=x_1+x_2$:
$$ cov_{13}=\mathbb{E}[x_1(x_1+x_2)]-\mathbb{E}[\mathbb{E}[x_1](x_1+x_2)-\mathbb{E}[x_1\mathbb{E}[x_1+x_2]]+\mathbb{E}[\mathbb{E}[x_1]\mathbb{E}[x_1+x_2]] $$It is clear to see that the 2nd term is equal to zero, since it has the expectation of $x_1$. Recall that $\mathbb{E}$ is distributive, so the 3rd and 4th terms are also equal to 0, leaving us with: $\mathbb{E}[x_1^2+x_1x_2]=\mathbb{E}[x_1^2]+\mathbb{E}[x_1x_2]=\mathbb{E}[x_1^2]$. What is $\mathbb{E}[x_1^2]$? Recall that $\sigma^2=\mathbb{E}[x^2]-\mathbb{E}[x]^2$. In the present case, that means that $\mathbb{E}[x_1^2]=\sigma^2=1$.
Since $x_1$ is statistically identical to $x_2$, $cov_{13}=cov_{23}=cov_{31}=cov_{32}$. Leaving us with the last entry, $cov_{33}$:
$$ cov_{33}=\mathbb{E}[x_3^2]-\mathbb{E}[x_3]^2\\ =\mathbb{E}[(x_1+x_2)^2-\mathbb{E}[x_1+x_2]^2\\ =\mathbb{E}[x_1^2+x_2^2+x_1x_2]\\ =\mathbb{E}[x_1^2]+\mathbb{E}[x_2^2]+\mathbb{E}[x_1x_2]\\ cov_{33}=1+1=2. $$Where the second equation comes from substitution, the third equation from the distributive nature of the expectation, and the fourth equation from the fact that $x_1$ and $x_2$ are IID. Thus, the covariance matrix is:
$$ $$\begin{bmatrix} 1&0&1\\ 0&1&1\\ 1&1&2 \end{bmatrix}$$ $$The eigenvalues for the above matrix can be found:
$$ $$\begin{align} \det \begin{bmatrix} 1-\lambda&0&1\\ 0&1-\lambda&1\\ 1&1&2-\lambda \end{bmatrix} =(1-\lambda)((1-\lambda)(2-\lambda)-1)+\\1(-(1-\lambda)) \end{align}$$ $$Where multiplications by 0s have been ommited.
$$ (1-\lambda)((1-\lambda)(2-\lambda)-1)+1(-(1-\lambda))\\ =(1-\lambda)(2-2\lambda-\lambda+\lambda^2-1)+1(\lambda-1)\\ =(1-\lambda)(\lambda^2-3\lambda+1)+\lambda-1\\ =\lambda^2-3\lambda+1-\lambda^3+3\lambda^2-\lambda+\lambda-1\\ =-\lambda^3+4\lambda^2-3\lambda\\ =-\lambda(\lambda^2-4\lambda+3)\\ =\lambda(\lambda-3)(\lambda-1) $$Thus, the eigenvalues are $\lambda=0,\lambda=3,\lambda=1$
x1 = np.random.normal(size=1000).transpose()
x2 = np.random.normal(size=1000).transpose()
x3 = x1+x2
x = np.column_stack([x1,x2,x3])
cov = np.cov(x,rowvar=False)
cov
val, vec = np.linalg.eig(cov)
val
y = np.dot(np.dot(vec.transpose(),cov),vec.transpose())
valy, vecy = np.linalg.eig(y)
val
np.random.seed(15217)
s1 = np.random.uniform(size=1000)
s2 = np.random.uniform(size=1000)
s = np.column_stack([s1,s2])
plt.scatter(s1,s2)
A = np.array([[1,2],[3,1]])
x = np.matmul(A,s.transpose()).transpose()
x[:,0] = (x[:,0]-np.mean(x[:,0]))/(np.mean(x[:,0])**2)
x[:,1] = (x[:,1]-np.mean(x[:,1]))/(np.mean(x[:,1])**2)
xcov = np.cov(x,rowvar=False)
val, vec = np.linalg.eig(xcov)
x = np.dot(x,vec.transpose())
plt.scatter(x[:,0],x[:,1])
x.shape
def ica:
return np.log(np.cosh(x))
def df_ica:
#d/dx of log(cosh(x)) = 1/cosh(x)*sinh(x)
# = tanh
return np.tanh(x)