import numpy as np
import matplotlib.pyplot as plt
%config InlineBackend.figure_format = 'svg'
x = np.random.rand(100)
zeta = np.random.normal(0.0,0.5,100)
y = 2 + 3*x + zeta
plt.plot(x,y,'ro')
plt.title('the random dataset')
plt.show()
A = np.stack((np.ones_like(x),x),axis=1)
u,s,vh = np.linalg.svd(A,full_matrices=False)
print("answer is: ")
(vh.T*(1/s)@(u.T)).dot(y)
# re package into function
def getSol(x,y):
A = np.stack((np.ones_like(x),x),axis=1)
u,s,vh = np.linalg.svd(A,full_matrices=False)
b,a = (vh.T*(1/s)@(u.T)).dot(y)
return a,b,u,s,vh
# ok function works lol
a,b,u,s,vh = getSol(x,y)
x_s = np.linspace(0,1,100)
y_s = a*x_s + b
plt.plot(x,y,'ro',label="dataset")
plt.plot(x_s,y_s,'k--',label="SVD solution")
plt.legend()
plt.show()
# now we get errors
# part 1
print("known error model")
print("a, b: ")
print(a,b)
print("a_error, b_error: ")
print(np.sqrt(np.sum((vh/s)**2, axis=0)))
# part 2, bootstrap
bootstrap_params = np.zeros((100,2))
for i in range(100):
i_rand = np.random.choice(np.arange(100),100, replace=True)
x_new = x[i_rand]
y_new = y[i_rand]
a,b,u,s,vh = getSol(x_new,y_new)
bootstrap_params[i,0] = a
bootstrap_params[i,1] = b
# print(bootstrap_params[:,0])
print("a error:")
print(np.std(bootstrap_params[:,0]))
print("b error:")
print(np.std(bootstrap_params[:,1]))
# part 3, 100 ensembles
params = np.zeros((100,2))
for i in range(100):
x = np.random.rand(100)
zeta = np.random.normal(0.0,0.5,100)
y = 2 + 3*x + zeta
a,b,u,s,vh = getSol(x,y)
params[i,0] = a
params[i,1] = b
# print(params)
print("a error:")
print(np.std(params[:,0]))
print("b error:")
print(np.std(params[:,1]))
boot_a = np.mean(bootstrap_params[:,0])
boot_b = np.mean(bootstrap_params[:,1])
boot_y = boot_a*x_s+boot_b
ens_a = np.mean(params[:,0])
ens_b = np.mean(params[:,1])
ens_y = ens_a*x_s+ens_b
plt.plot(x,y,'ro',label="dataset")
plt.plot(x_s,boot_y,'k',label="boot")
plt.plot(x_s, ens_y, 'g',label="ensemble")
plt.plot(x_s,y_s,'b',label="SVD solution")
plt.legend()
plt.show()
x = np.random.rand(100)
zeta = np.random.normal(0.0,0.1,100)
y = np.sin(2+3*x) + zeta
plt.plot(x,y,'o')
plt.title("our new data set")
plt.show()
def LevMar(x,y,a,b,lamba,adaptive=False):
maxIter = 5000
eps = 1.1
i = 0
M = np.zeros((2,2))
H = np.zeros((2,2))
gr= np.zeros((2,))
err = np.sum((y-np.sin(a*x+b))**2)
while (err>eps) and (i<maxIter):
#deriv
gr[0] = -2*np.sum((y-np.sin(a*x+b))*np.cos(a*x+b))
gr[1] = -2*np.sum((y-np.sin(a*x+b))*x*np.cos(a*x+b))
#Hessian
H[0,0] = 2*np.sum(np.cos(a+b*x)**2)
H[1,1] = 2*np.sum(x**2*(np.cos(a+b*x)**2))
H[0,1] = 2*np.sum(x*(np.cos(a+b*x)**2))
H[1,0] = H[0,1]
#Put together
M[0,0] = 1/2*H[0,0]*(1+lamba)
M[1,1] = 1/2*H[1,1]*(1+lamba)
M[0,1] = 1/2*H[0,1]
M[1,0] = M[0,1]
#solve
D = -1 * (np.linalg.inv(M).dot(gr.T))
#update parameters
a+= D[1]
b+= D[0]
new_err = np.sum((y-np.sin(a*x+b))**2)
#adjust step
if adaptive:
if new_err > err:
lamba = lamba * np.sqrt(2)
elif new_err < err:
lamba = lamba*1/np.sqrt(2)
else:
lamba = lamba
#update loop
err = new_err
i+=1
return a,b,i,err
# Are things working?
a = 1
b = 1
lamba = 1
a,b,i,err = LevMar(x,y,a,b,lamba)
print(a,b,i,err)
# Yes
x_s = np.linspace(0,1,100)
y_s = np.sin(b+a*x_s)
plt.plot(x,y,'o')
plt.plot(x_s,y_s)
plt.show()
lambdas = [0.1,1.0,10,100]
iters = []
errs = []
colors = plt.cm.plasma(np.linspace(0,1,len(lambdas)))
for j in range(len(lambdas)):
lamba = lambdas[j]
a = 1
b = 1
a,b,i,err = LevMar(x,y,a,b,lamba)
iters.append(i)
errs.append(err)
x_s = np.linspace(0,1,100)
y_s = np.sin(b+a*x_s)
plt.plot(x_s,y_s,color=colors[j])
# plt.plot(x,y,'o')
plt.title("fixed lambda")
plt.show()
print(errs)
print(iters)
lambdas = [0.1,1.0,10,100]
iters = []
errs = []
colors = plt.cm.plasma(np.linspace(0,1,len(lambdas)))
for j in range(len(lambdas)):
lamba = lambdas[j]
a = 1
b = 1
a,b,i,err = LevMar(x,y,a,b,lamba,adaptive=True)
iters.append(i)
errs.append(err)
x_s = np.linspace(0,1,100)
y_s = np.sin(b+a*x_s)
plt.plot(x_s,y_s,color=colors[j], label=lamba)
# plt.plot(x,y,'o')
plt.title("adaptive lambda (initial lambda sweep)")
plt.legend()
plt.show()
print(errs)
print(iters)
lambdas = [10,100,1000,10000]
iters = []
errs = []
colors = plt.cm.plasma(np.linspace(0,1,len(lambdas)))
for j in range(len(lambdas)):
lamba = lambdas[j]
a = 1
b = 1
a,b,i,err = LevMar(x,y,a,b,lamba,adaptive=True)
iters.append(i)
errs.append(err)
x_s = np.linspace(0,1,100)
y_s = np.sin(b+a*x_s)
plt.plot(x_s,y_s,color=colors[j], label=lamba)
# plt.plot(x,y,'o')
plt.title("adaptive lambda (initial lambda sweep)")
plt.legend()
plt.show()
print(errs)
print(iters)
from sympy import *
x = Symbol('x') # the random variable x
p = Function('p')(x) #probability distribution
n = symbols('n', integer=True) # range
i = tensor.Idx('i',n) #indexes
lambda_i = tensor.IndexedBase('lambda_i') # lagrange multipliers
f_i = tensor.IndexedBase('f_i') # funcs... a little wrong
# f_i = Function('f_i')(x)#symbols('f_i', cls=Function) #fs
lambda0 = Symbol('lambda0') # lagrange multiplier
S = -1 * integrate((p*log(p)),(x,-oo,oo)) #entropy
# S = -1 * integrate((p*log(p)),x) #entropy
L = S + lambda0*(integrate(p,(x,-oo,oo))-1) + Sum(lambda_i*(integrate(p*f_i,(x,-oo,oo))),i)
# L = S + lambda0*(integrate(p,x)-1) + Sum(lambda_i*(integrate(p*f_i,x)),i)
L_ni = -1 *(p*log(p)) + lambda0*p + Sum(lambda_i*p*f_i,i) #removing all the integrals: https://math.stackexchange.com/questions/1137134/derivative-of-an-integral-with-respect-to-a-function
dLdp = diff(L_ni,p) #not evaluating the way i expect, using ^ to get it evaluate correctly... :/
# dpdx = diff(p,x)
# we don't care about the derivative wrt to the lagrange multipliers; these are just the constraints...
sol = solveset(dLdp,p)
# dpdx
sol
# no...
n = symbols('n', integer=True) # range
x = tensor.Idx('x',n) #indexes # the random variable x
p = tensor.IndexedBase('p') #probability distribution
i = tensor.Idx('i',n) #indexes
lambda_i = tensor.IndexedBase('lambda_i') # lagrange multipliers
f_i = tensor.IndexedBase('f_i') # funcs... a little wrong
# f_i = Function('f_i')(x)#symbols('f_i', cls=Function) #fs
lambda0 = Symbol('lambda0') # lagrange multiplier
S = -1 * Sum((p*log(p)),x) #entropy
# S = -1 * integrate((p*log(p)),x) #entropy
L = S + lambda0*(Sum(p,x)-1) + Sum(lambda_i*(Sum(p*f_i,x)),i)
# L = S + lambda0*(integrate(p,x)-1) + Sum(lambda_i*(integrate(p*f_i,x)),i)
# L_ni = -1 *(p*log(p)) + lambda0*p + Sum(lambda_i*p*f_i,i) #removing all the integrals.
dLdp = diff(L,p[x]) #not evaluating the way i expect
# dpdx = diff(p,x)
# we don't care about the derivative wrt to the lagrange multipliers; these are just the constraints...
# sol = solveset(dLdp.doit(),p[x])
# dpdx
dLdp.doit()
#no goood
x = Symbol('x') # the random variable x
p = Function('p')(x) #probability distribution
n = symbols('n', integer=True) # range
i = tensor.Idx('i',n) #indexes
lambda0 = Symbol('lambda0') # lagrange multiplier
S = -1 * integrate((p*log(p)),x) #entropy
# S = -1 * integrate((p*log(p)),x) #entropy
L = S + lambda0*(integrate(p,x)-1)
# L_ni = -1 *(p*log(p)) + lambda0*p + Sum(lambda_i*p*f_i,i) #removing all the integrals.
dLdp = diff(L,p) #not evaluating the way i expect
# dpdx = diff(p,x)
# we don't care about the derivative wrt to the lagrange multipliers; these are just the constraints...
sol = solveset(dLdp,p)
# dpdx
L
x = Symbol('x') # the random variable x
p = Function('p') #probability distribution
n = symbols('n', integer=True) # range
i = tensor.Idx('i',n) #indexes
lambda_i = tensor.IndexedBase('lambda_i') # lagrange multipliers
f_i = tensor.IndexedBase('f_i') # funcs... a little wrong
# f_i = Function('f_i')(x)#symbols('f_i', cls=Function) #fs
lambda0 = Symbol('lambda0') # lagrange multiplier
S = -1 * integrate((p(x)*log(p(x))),(x,-oo,oo)) #entropy
# S = -1 * integrate((p*log(p)),x) #entropy
L = S + lambda0*(integrate(p(x),(x,-oo,oo))-1) + Sum(lambda_i*(integrate(p(x)*f_i,(x,-oo,oo))),i)
# L = S + lambda0*(integrate(p,x)-1) + Sum(lambda_i*(integrate(p*f_i,x)),i)
# L_ni = -1 *(p*log(p)) + lambda0*p + Sum(lambda_i*p*f_i,i) #removing all the integrals.
dLdp = diff(L,p(x)) #not evaluating the way i expect
# dpdx = diff(p,x)
# we don't care about the derivative wrt to the lagrange multipliers; these are just the constraints...
# sol = solveset(dLdp,p)
# dpdx
dLdp
x = Symbol('x') # the random variable x
p = Function('p')(x) #probability distribution
n = symbols('n', integer=True) # range
i = tensor.Idx('i',n) #indexes
lambda_i = tensor.IndexedBase('lambda_i') # lagrange multipliers
f_i = tensor.IndexedBase('f_i') # funcs... a little wrong
# f_i = Function('f_i')(x)#symbols('f_i', cls=Function) #fs
lambda0 = Symbol('lambda0') # lagrange multiplier
S = -1 * integrate((p*log(p)),(x,-oo,oo)) #entropy
# S = -1 * integrate((p*log(p)),x) #entropy
L = S + lambda0*(integrate(p,(x,-oo,oo))-1) + Sum(lambda_i*(integrate(p*f_i,(x,-oo,oo))),i)
# L = S + lambda0*(integrate(p,x)-1) + Sum(lambda_i*(integrate(p*f_i,x)),i)
L_ni = -1 *(p*log(p)) + lambda0*p + Sum(lambda_i*p*f_i,i) #removing all the integrals: https://math.stackexchange.com/questions/1137134/derivative-of-an-integral-with-respect-to-a-function
dLdp = Derivative(L,p) #not evaluating the way i expect, using ^ to get it evaluate correctly... :/
# dpdx = diff(p,x)
# we don't care about the derivative wrt to the lagrange multipliers; these are just the constraints...
sol = solveset(dLdp,p)
# dpdx
dLdp.doit()
the next problem states: Now consider the reverse situation. Let’s say that we know that [...] was drawn from a Gaussian distribution with variance $σ^2$ ... this means that the answer to this problem is Gaussian distribution😉