Transforms pset

compressed sensing

In [35]:
# IMPORTS
import numpy as np
import matplotlib.pyplot as plt
import sounddevice as sd
import time
In [22]:
%config InlineBackend.figure_format = 'svg'

a

PLOT DTMF TONE

In [430]:
pi = np.pi
def toneInTime(t, freq1=697,freq2=1209):
    """plot time series tone with two frequencies:
    freq 1 in Hz, default=697
    freq2 in Hz, default = 1209
    t: times
    returns np.array"""
    return((np.sin(2*pi*freq1*t)+np.sin(2*pi*freq2*t)))
In [431]:
startTime = 0
stopTime = 0.25
N = 2500
t = np.linspace(startTime,stopTime, num=N)
tone_t = toneInTime(t)
In [432]:
plt.plot(t, tone_t)
plt.title("N=2500, t=0.25s")
plt.show()
2023-04-20T13:53:39.426718 image/svg+xml Matplotlib v3.6.3, https://matplotlib.org/
In [397]:
# Let's have a listen. 

f_s = N/stopTime # Sampling frequency: samples/total duration

# extend the tone -- this method introduces artifacts where the sound is chunked on.
duration = 2 #seconds
tone_list = [tone_t]
iters = int(duration/stopTime)
for i in range(iters):
    tone_list.append(tone_t)
tone_long = np.concatenate(tone_list)
print(tone_long)
sd.play(tone_long,f_s)

time.sleep(1.0)

sd.stop()
[0.         1.11311041 1.76703328 ... 0.69094714 1.63043029 2.        ]
In [433]:
# Let's try to do a better job of dropping the artifacts

f_s = N/stopTime # Sampling frequency: samples/total duration

# extend the tone -- slightly less clicking
duration = 2 #seconds
tone_interval = tone_t[0:(N-19)]
print(1/697)
tone_list = [tone_interval]
iters = int(duration/stopTime)
for i in range(3):
    tone_list.append(tone_interval)
tone_long = np.concatenate(tone_list)
print(tone_long)
sd.play(tone_long,f_s)

time.sleep(1.0)

sd.stop()
0.0014347202295552368
[ 0.          1.11311041  1.76703328 ... -1.94364225 -1.66161234
 -0.75019496]
In [399]:
# Let's zoom and enhance lmao
startTime = 0
stopTime = 0.01
N = 500
t = np.linspace(startTime,stopTime, num=N)
tone_t = toneInTime(t)
plt.plot(t, tone_t)
plt.title("N=750, t=0.01s")
plt.show()
2023-04-20T01:18:56.826209 image/svg+xml Matplotlib v3.6.3, https://matplotlib.org/
In [400]:
# Let's zoom in w/ the prior sampling rate, for clarity
startTime = 0
stopTime = 0.025
N = 250
t = np.linspace(startTime,stopTime, num=N)
tone_t = toneInTime(t)
plt.plot(t, tone_t)
plt.title("N=250, t=0.025s")
plt.show()
2023-04-20T01:18:58.877231 image/svg+xml Matplotlib v3.6.3, https://matplotlib.org/

b

DCT

In [403]:
def Dij(i,j,N):
    if i==0:
        return np.sqrt(1/N)
    else:
        return np.sqrt(2/N)*np.cos((pi*(2*j+1)*i)/(2*N))

DijMatrix = np.zeros((N,N))

for i in range(N):
    for j in range(N):
        DijMatrix[i][j]=Dij(i,j,N)

startTime = 0
stopTime = 0.25
N = 2500
t = np.linspace(startTime,stopTime, num=N)
tone_t = toneInTime(t)        

f = np.dot(DijMatrix, tone_t)
In [404]:
# and there's the tones
plt.title("DCT coeffs")
plt.plot(f)
plt.show()
2023-04-20T01:19:32.573453 image/svg+xml Matplotlib v3.6.3, https://matplotlib.org/

C

inverse DCT

In [405]:
DijMatrix = np.zeros((N,N))
for i in range(N):
    for j in range(N):
        DijMatrix[i][j]=Dij(i,j,N)

DijMatrixT = DijMatrix.transpose()

ti = np.dot(DijMatrixT, f)
In [406]:
plt.title("Reconstructed signal, 0.25 second duration")
plt.plot(t, ti)
plt.show()
2023-04-20T01:19:49.253736 image/svg+xml Matplotlib v3.6.3, https://matplotlib.org/
In [163]:
plt.title("Reconstructed signal, 0.025 second duration")
plt.plot(t[:(int(N/10))], ti[:(int(N/10))])
plt.show()
2023-04-19T18:34:48.153061 image/svg+xml Matplotlib v3.6.3, https://matplotlib.org/

d

random sampling of 125 points

In [407]:
M = 125

i_rand = np.random.choice(np.arange(N),M, replace=True)
randomTime = t[i_rand]
randomSampleVals = toneInTime(randomTime)
plt.scatter(randomTime,randomSampleVals, color='red')
plt.plot(t, tone_t)
plt.show()
2023-04-20T01:19:49.400940 image/svg+xml Matplotlib v3.6.3, https://matplotlib.org/

e

gradient descent

In [408]:
def getFrequencies(tone_t,N):
    DijMatrix = np.zeros((N,N))
    for i in range(N):
        for j in range(N):
            DijMatrix[i][j]=Dij(i,j,N)
    f = np.dot(DijMatrix, tone_t)
    return f, DijMatrix
def getTimes(f):
    time_series = np.dot(DijMatrix.T,f)
    return time_series
In [300]:
initialGuess = np.random.rand(N)
rate = 0.01
# print(DijMatrix.shape)
def loss(x):
    l = np.sum((randomSampleVals-getTimes(x))**2)
    return l

def approx_gradient(x, rate):
    x = x +2*rate*(np.dot(DijMatrixT,(randomSampleVals-getTimes(x))))
    return x

def gradient_descent(rate, update_rule, initial_x, iter=100):
    x = initial_x
    # Loop through with gradient descent. 
    for i in range(iter):
        # Update the parameters using update rule.
        if loss(x)<=0.001:
            break
        x = update_rule(x, rate)
    return x, loss(x)

f_opt, loss_f = gradient_descent(rate,approx_gradient,initialGuessRand)
print(loss_f)
13199.317637152426
In [302]:
re_constructed_time = np.dot(DijMatrixT,f_opt)
print(re_constructed_time.shape)
# plt.plot(t, np.dot(DijMatrixT, f), 'k--',label='true')
# plt.scatter(randomTime,randomSampleVals, color='red',label='random points')
# plt.plot(t, re_constructed_time,label='grad descent')
# plt.ylim(-5,5)
# # plt.legend()
# # plt.title("oops,looks cool")
# plt.show()
(2500, 125)
In [290]:
re_constructed_time = np.dot(DijMatrixT,f_opt)
plt.plot(t, np.dot(DijMatrixT, f), 'k--',label='true')
plt.scatter(randomTime,randomSampleVals, color='red',label='random points')
plt.plot(t, re_constructed_time,label='grad descent')
plt.ylim(-5,5)
# plt.legend()
plt.title("oops,looks cool")
plt.show()
2023-04-20T00:53:49.007995 image/svg+xml Matplotlib v3.6.3, https://matplotlib.org/
In [409]:
randomIndex = np.random.randint(0,N,M)
freq1=697
freq2=1209
#randomTime = stopTime * np.random.rand(M)
randomTime = t[randomIndex]
randomSampleVals = (np.sin(2*pi*freq1*randomTime)+np.sin(2*pi*freq2*randomTime))
randomSampleVals = randomSampleVals.reshape(M,1)
initialGuessRand = np.random.rand(N)
initialGuessRand = initialGuessRand.reshape(N,1)
rate = 0.01

DijMatrixM = np.zeros((M,M))
for i in range(M):
    for j in range(M):
        DijMatrixM[i][j]=Dij(i,j,M)
DijMatrixMT = DijMatrixM.transpose()
#print(DijMatrixMT.shape)
#print(((randomSampleVals-f_prime_to_t(initialGuessRand,randomIndex))).shape)


def f_prime_to_t(x, randomIndex):
    time_series = np.dot(DijMatrixT,x)
    return time_series[randomIndex]

def f_prime_to_t_l(x, randomIndex):
    vals = f_prime_to_t(x, randomIndex)
    #print(vals.shape)
    res = np.zeros(N)
    res = res.reshape(N,1)
    res[randomIndex]=vals
    return res
randomSampleValMat = np.zeros((N,1))
randomSampleValMat[randomIndex] = randomSampleVals

def loss(x):
    res = np.sum((randomSampleVals-f_prime_to_t(x,randomIndex))**2)
    return res

#iter was 1000 
def gradient_descent(rate, update_rule, initial_x, iter=1000):
  """gradient descent algorithm 
  @params: 
  - rate (float): eta variable of gradient descent.
  - update_rule: a function with a signature update_rule(x, rate). 
  - initial_x: initial position for gradient descent. 
  - iter: number of iterations to run gradient descent for.
  """
  x = initial_x
  x_list = []
  #x_list.append([x, loss(x)])
  #x_list.append(x)
  # Loop through with gradient descent. 
  for i in range(iter):
    # Update the parameters using update rule.
    if loss(x)<=0.001:
        break
    x = update_rule(x, rate)
    #x -= -2*rate*
    #x_list.append(x)
  #return np.array(x_list)
  return x, loss(x)

def approximated_gradient(x, rate):
  """
  Update rule. Receive theta and update it with the next theta.
  @params
  - x: input variable x.
  - rate: rate of descent, variable "eta". 
  @returns:
  - updated varaible x. 
  """
  #w = np.random.normal(0, (0.25)**0.5, N)
  #w = 0.8*np.random.rand(1)*np.ones(N)
  #print(w.shape)
  #x = x - rate*(loss(x+w)-loss(x))*w
  #x = x -2*rate*(np.dot(DijMatrixM,(randomSampleVals-f_prime_to_t(x,randomIndex))))
  x = x +2*rate*(np.dot(DijMatrixT,(randomSampleValMat-f_prime_to_t_l(x,randomIndex))))
  #tempM = randomSampleVals-f_prime_to_t(x,randomIndex)
  #x = x - 2*rate*(randomSampleVals-f_prime_to_t(x,randomIndex))
  #x = x - rate*(loss(x+w)-(0.5*np.ones(N)))*w
  return x

f_opt, loss_f = gradient_descent(rate,approximated_gradient,initialGuessRand,1000)
re_constructed_time = np.dot(DijMatrixT,f_opt)
print(loss_f)
#print(f_opt.shape)
#print(f_opt[-1][1])
#print(f_opt[1000][1])
#plt.plot(f_opt[-1][0])
# plt.plot(t, re_constructed_time)
plt.plot(t, np.dot(DijMatrixT, f), color='black', linestyle='dashed',label='true')
plt.scatter(randomTime,randomSampleVals, color='red',label='random')
plt.plot(t, re_constructed_time,label='grad')
plt.legend()
plt.title("grad descent")
plt.ylim(-5,5)
plt.show()
0.00099199904061381
2023-04-20T01:19:51.283990 image/svg+xml Matplotlib v3.6.3, https://matplotlib.org/
In [438]:
#yikes! want to hear it?
duration = 2 #seconds
tone_interval = re_constructed_time[:]
print(1/697)
tone_list = [tone_interval]
iters = int(duration/stopTime)
# for i in range(1):
#     tone_list.append(tone_interval)
tone_long = np.concatenate(tone_list)
# print(tone_long)
sd.play(tone_long,f_s)

time.sleep(1.0)

sd.stop()
#no you didn't <3
0.0014347202295552368

L2 Norm

In [415]:
def lossL1(x):
    res = np.sum((randomSampleVals-np.sum(np.dot(DijMatrixT, x)))**2)+np.sum(x**2)
    return res
    
#iter was 1000 
def gradient_descent_L1(rate, update_rule, initial_x, iter=1000):
  """gradient descent algorithm 
  @params: 
  - rate (float): eta variable of gradient descent.
  - update_rule: a function with a signature update_rule(x, rate). 
  - initial_x: initial position for gradient descent. 
  - iter: number of iterations to run gradient descent for.
  """
  x = initial_x
  # Loop through with gradient descent. 
  for i in range(iter):
    # Update the parameters using update rule.
    if lossL1(x)<=0.001:
        break
    x = update_rule(x, rate)

  return x, loss(x)

def L1_gradient(x, rate):
  """
  Update rule. Receive theta and update it with the next theta.
  @params
  - x: input variable x.
  - rate: rate of descent, variable "eta". 
  @returns:
  - updated varaible x. 
  """
  x = x -rate*(-2*(np.dot(DijMatrixT,(randomSampleValMat-f_prime_to_t_l(x,randomIndex)))) + 2*x)
  return x

f_opt_L1, loss_f_L1 = gradient_descent_L1(rate,L1_gradient,initialGuessRand,1000)
re_constructed_time_L1 = np.dot(DijMatrixT,f_opt_L1)
print(loss_f_L1)

# plt.plot(t, re_constructed_time_L1)
plt.plot(t, np.dot(DijMatrixT, f), color='black', linestyle='dashed')
plt.scatter(randomTime,randomSampleVals, color='red')
plt.plot(t, re_constructed_time_L1)
plt.ylim(-4,4)
plt.show()
29.183962573807257
2023-04-20T01:20:35.255527 image/svg+xml Matplotlib v3.6.3, https://matplotlib.org/
In [414]:
sd.play(re_constructed_time_L1,f_s)

time.sleep(1.0)

sd.stop()

L1

In [429]:
rate=0.01
def lossL2(x):
    res = np.sum((randomSampleVals-np.sum(np.dot(DijMatrixT, x)))**2)+np.sum(np.abs(x))
    return res
    
#iter was 1000 
def gradient_descent_L2(rate, update_rule, initial_x, iter=1000):
  """gradient descent algorithm 
  @params: 
  - rate (float): eta variable of gradient descent.
  - update_rule: a function with a signature update_rule(x, rate). 
  - initial_x: initial position for gradient descent. 
  - iter: number of iterations to run gradient descent for.
  """
  x = initial_x
  # Loop through with gradient descent. 
  for i in range(iter):
    # Update the parameters using update rule.
    if lossL1(x)<=0.001:
        break
    x = update_rule(x, rate)

  return x, loss(x)

def L2_gradient(x, rate):
  """
  Update rule. Receive theta and update it with the next theta.
  @params
  - x: input variable x.
  - rate: rate of descent, variable "eta". 
  @returns:
  - updated varaible x. 
  """
  x = x -rate*(-2*(np.dot(DijMatrixT,(randomSampleValMat-f_prime_to_t_l(x,randomIndex)))) + 2*np.sign(x))
  return x

f_opt_L2, loss_f_L2 = gradient_descent_L2(rate,L2_gradient,initialGuessRand,1000)
re_constructed_time_L2 = np.dot(DijMatrixT,f_opt_L2)
print(loss_f_L2)


plt.plot(t, np.dot(DijMatrixT, f), color='black', linestyle='dashed')
plt.scatter(randomTime,randomSampleVals, color='red')
plt.plot(t, re_constructed_time_L2)
plt.ylim(-4,4)
plt.show()
92.33568190096946
2023-04-20T01:22:53.003342 image/svg+xml Matplotlib v3.6.3, https://matplotlib.org/
In [428]:
duration = 2 #seconds
tone_interval = re_constructed_time_L2[0:(N-19)]
print(1/697)
tone_list = [tone_interval]
iters = int(duration/stopTime)
for i in range(3):
    tone_list.append(tone_interval)
tone_long = np.concatenate(tone_list)
print(tone_long)
sd.play(tone_long,f_s)

time.sleep(1.0)

sd.stop()
0.0014347202295552368
[[-0.07031869]
 [ 0.03194066]
 [ 0.08155162]
 ...
 [-0.02625613]
 [-0.0394443 ]
 [ 0.00558535]]
In [377]:
def lossL2(x):
    res = np.sum((randomSampleVals-np.sum(np.dot(DijMatrixT, x)))**2)+np.sum(np.abs(x))
    return res
    
#iter was 1000 
def gradient_descent_L2(rate, update_rule, initial_x, iter=1000):
  """gradient descent algorithm 
  @params: 
  - rate (float): eta variable of gradient descent.
  - update_rule: a function with a signature update_rule(x, rate). 
  - initial_x: initial position for gradient descent. 
  - iter: number of iterations to run gradient descent for.
  """
  x = initial_x
  # Loop through with gradient descent. 
  for i in range(iter):
    # Update the parameters using update rule.
    if lossL1(x)<=0.001:
        break
    x = update_rule(x, rate)

  return x, loss(x)

def L2_gradient(x, rate):
  """
  Update rule. Receive theta and update it with the next theta.
  @params
  - x: input variable x.
  - rate: rate of descent, variable "eta". 
  @returns:
  - updated varaible x. 
  """
  x = x -rate*(-2*(np.dot(DijMatrixT,(randomSampleValMat-f_prime_to_t_l(x,randomIndex)))) + 2*np.sign(x))
  return x

f_opt_L2, loss_f_L2 = gradient_descent_L2(rate,L2_gradient,initialGuessRand,1000)
re_constructed_time_L2 = np.dot(DijMatrixT,f_opt_L2)
print(loss_f_L2)


plt.plot(t, np.dot(DijMatrixT, f), color='black', linestyle='dashed')
plt.scatter(randomTime,randomSampleVals, color='red')
plt.plot(t, re_constructed_time_L2)
plt.ylim(-4,4)
plt.show()
46.04959768186768
2023-04-20T01:16:47.382416 image/svg+xml Matplotlib v3.6.3, https://matplotlib.org/
In [392]:
duration = 2 #seconds
tone_interval = re_constructed_time_L2[0:(N-19)]
print(1/697)
tone_list = [tone_interval]
iters = int(duration/stopTime)
for i in range(3):
    tone_list.append(tone_interval)
tone_long = np.concatenate(tone_list)
print(tone_long)
sd.play(2*tone_long,f_s)

time.sleep(1.0)

sd.stop()
0.0014347202295552368
[[0.57318085]
 [0.59310424]
 [0.58492223]
 ...
 [1.0403446 ]
 [1.0422879 ]
 [1.04007756]]
In [393]:
N = 300
pi = np.pi
startTime = 0
stopTime = 0.01
t = np.linspace(startTime,stopTime, num=N)
freq1 = 697 #Hz
freq2 = 1209 #Hz

toneInTime = (np.sin(2*pi*freq1*t)+np.sin(2*pi*freq2*t))

def Dij(i,j,N):
    if i==0:
        return np.sqrt(1/N)
    else:
        return np.sqrt(2/N)*np.cos((pi*(2*j+1)*i)/(2*N))

DijMatrix = np.zeros((N,N))

for i in range(N):
    for j in range(N):
        DijMatrix[i][j]=Dij(i,j,N)


f = np.dot(DijMatrix, toneInTime)

DijMatrix = np.zeros((N,N))
for i in range(N):
    for j in range(N):
        DijMatrix[i][j]=Dij(i,j,N)

DijMatrixT = DijMatrix.transpose()

ti = np.dot(DijMatrixT, f)

M = 100

randomIndex = np.random.randint(0,N,M)
#randomTime = stopTime * np.random.rand(M)
randomTime = t[randomIndex]
randomSampleVals = (np.sin(2*pi*freq1*randomTime)+np.sin(2*pi*freq2*randomTime))
randomSampleVals = randomSampleVals.reshape(M,1)

initialGuessRand = np.random.rand(N)
initialGuessRand = initialGuessRand.reshape(N,1)
rate = 0.01

DijMatrixM = np.zeros((M,M))
for i in range(M):
    for j in range(M):
        DijMatrixM[i][j]=Dij(i,j,M)
DijMatrixMT = DijMatrixM.transpose()

def lossL2(x):
    res = np.sum((randomSampleVals-np.sum(np.dot(DijMatrixT, x)))**2)+np.sum(np.abs(x))
    return res
    
#iter was 1000 
def gradient_descent_L2(rate, update_rule, initial_x, iter=1000):
  """gradient descent algorithm 
  @params: 
  - rate (float): eta variable of gradient descent.
  - update_rule: a function with a signature update_rule(x, rate). 
  - initial_x: initial position for gradient descent. 
  - iter: number of iterations to run gradient descent for.
  """
  x = initial_x
  # Loop through with gradient descent. 
  for i in range(iter):
    # Update the parameters using update rule.
    if lossL1(x)<=0.001:
        break
    x = update_rule(x, rate)

  return x, loss(x)

def L2_gradient(x, rate):
  """
  Update rule. Receive theta and update it with the next theta.
  @params
  - x: input variable x.
  - rate: rate of descent, variable "eta". 
  @returns:
  - updated varaible x. 
  """
  x = x -rate*(-2*(np.dot(DijMatrixT,(randomSampleValMat-f_prime_to_t_l(x,randomIndex)))) + 2*np.sign(x))
  return x


f_opt_L2, loss_f_L2 = gradient_descent_L2(rate,L2_gradient,initialGuessRand,1000)
re_constructed_time_L2 = np.dot(DijMatrixT,f_opt_L2)
print(loss_f_L2)

plt.plot(t, re_constructed_time_L2)
plt.plot(t, np.dot(DijMatrixT, f), color='blue', linestyle='dashed')
plt.scatter(randomTime,randomSampleVals, color='red')
plt.ylim(-4,4)
plt.show()
62.95212229353828
2023-04-20T01:17:18.722469 image/svg+xml Matplotlib v3.6.3, https://matplotlib.org/

Daubechies

wavelet. referenced numerical recipes chapter 13 wavelet transforms

In [170]:
import numpy as np
import matplotlib.pyplot as plt
In [250]:
from scipy.linalg import circulant
In [251]:
L = 2**12
x = np.zeros(L)
x[4] = 1
x[29] = 1

# Daub4 coeffs
c0 = (1+np.sqrt(3))/(4*np.sqrt(2))
c1 = (3+np.sqrt(3))/(4*np.sqrt(2))
c2 = (3-np.sqrt(3))/(4*np.sqrt(2))
c3 = (1-np.sqrt(3))/(4*np.sqrt(2))

for i in range(12):
    N = 2**(i+1) #stepping though starting from the final column, with only 2 S.
    x_new = np.zeros(L)
    
    # apply inverse matrix
    
    # permute
    temp_x = np.copy(x)
    S = temp_x[:int(N/2)] #the first 'two' values.
    d = temp_x[int(N/2):N] #everything else has already been mixed/cleaned/whatever
    
#     for j in range(1,N,2):
#         x_new[j] = 
[[ 0.48296291 -0.12940952  0.22414387  0.8365163 ]
 [ 0.8365163   0.48296291 -0.12940952  0.22414387]
 [ 0.22414387  0.8365163   0.48296291 -0.12940952]
 [-0.12940952  0.22414387  0.8365163   0.48296291]]

12.4 random pairs

In [175]:
# generate random pairs s1,s2 within [0,1]
N = 100
s = np.random.uniform(size=(500,2))
# print(s)
plt.plot(s[:,0],s[:,1],'ko')
plt.show()
2023-04-19T23:28:32.349029 image/svg+xml Matplotlib v3.6.3, https://matplotlib.org/
In [180]:
A = np.array([[1,2],[3,1]])
# print(A)
mixed = A.dot(s.T)
plt.plot(mixed[0],mixed[1],'ko')
plt.show()
2023-04-19T23:30:11.198261 image/svg+xml Matplotlib v3.6.3, https://matplotlib.org/
In [231]:
mixed_m = mixed - np.mean(mixed,keepdims=True)
# print(mixed_m)
covariance = np.cov(mixed_m)
l,v = np.linalg.eig(covariance) 
new = np.dot((v/np.sqrt(l)),mixed_m) # apply transform
print(new.shape)
# print(new[:,0])
plt.plot(new[0,:],new[1,:],'ko')
plt.show()
(2, 500)
2023-04-19T23:55:06.014458 image/svg+xml Matplotlib v3.6.3, https://matplotlib.org/
In [233]:
from sympy import *
In [234]:
x = symbols('x')
f = log(cosh(x))
f
Out[234]:
$\displaystyle \log{\left(\cosh{\left(x \right)} \right)}$
In [243]:
dfdx = Derivative(f,x).doit()
# dfdx.doit()
dfdx
Out[243]:
$\displaystyle \frac{\sinh{\left(x \right)}}{\cosh{\left(x \right)}}$
In [244]:
df2 = Derivative(dfdx,x).doit()
# df2.doit()
df2
Out[244]:
$\displaystyle - \frac{\sinh^{2}{\left(x \right)}}{\cosh^{2}{\left(x \right)}} + 1$
In [241]:
lam_f = lambdify(x, f)
lam_df = lambdify(x,dfdx)
lam_df2 = lambdify(x,df2)
In [249]:
w = np.random.random((2,1))
# lam_f(w)
iters = 500

for i in range(iters):
    w_new = np.zeros((2,1))
    w_new = np.mean(new*lam_df(np.dot(w.T,new)),keepdims=True) - w*np.mean(lam_df2(np.dot(w.T,new)), keepdims=True)
    w = w_new/np.linalg.norm(w_new)
# w
plt.plot(new[0,:],new[1,:],'ko')
plt.plot(mixed[0],mixed[1],'ro')
plt.plot(s[:,0],s[:,1],'bo')
print(w)
plt.plot([0,w[0,0]],[0,w[1,0]],'g-')
plt.plot([0,-w[1,0]],[0,w[0,0]],'g-')
plt.show()
[[-7.04199538e-04]
 [-9.99999752e-01]]
Out[249]:
[<matplotlib.lines.Line2D at 0x11febe0b0>]
2023-04-19T23:59:56.442040 image/svg+xml Matplotlib v3.6.3, https://matplotlib.org/