import numpy as np, matplotlib.pyplot as plt
def factorial(n):
return float(max(1,np.product(np.arange(1,n+1))))
def taylor_N(N):
return np.exp(np.arange(n))
def solve_b(N):
A = np.zeros((N, N))
y = np.zeros((N,1))
for i in range(N):
y[i] = -1/factorial(N+1+i)
for j in range(N):
A[i,j] = 1/factorial(N+i-j)
return np.linalg.pinv(A).dot(y).reshape(N)
def solve_a(b):
N = len(b)
coeffs = [1/factorial(n) for n in range(N+1)]
a = np.zeros(N+1)
b_prime = np.ones(N+1)
b_prime[1:] = b
for i in range(N+1):
for j in range(i+1):
a[i] += coeffs[j]*b_prime[i-j]
return a
def Pade_eval(a,b,x):
numerator = a.dot(x**np.arange(0,len(a)))
denominator = 1+b.dot(x**np.arange(1,len(b)+1))
return numerator/denominator
for i in range(1,6):
b = solve_b(i)
a = solve_a(b)
val = Pade_eval(a,b,1)
print "\n[%d/%d] Padé Approximant" %(i,i)
print "a = ", a
print "b = ", b
print "y =", val
print "error =", val-np.exp(1)
x_train = np.arange(-10,11)
y_train = np.array(x_train > 0, dtype=int)
x_test = x_train-0.5
y_test = np.array(x_test > 0, dtype=int)
def vandermonde(x, order):
return np.repeat(x.reshape(len(x),1),order+1,axis=1)**np.arange(order+1).reshape(1, order+1)
def vandermondeFit(x, y, order):
X = vandermonde(x, order)
return (np.linalg.pinv(X).dot(y), X)
def test_error(x, y, b):
return np.sum(np.abs(X.dot(b)-y)**2)
for i in [5, 10, 15]:
print "order:",i
(b, X) = vandermondeFit(x_train, y_train, i)
print "test_error:", test_error(vandermonde(x_test,i), y_test, b)
plt.figure()
plt.plot(np.linspace(-10,10,50), vandermonde(np.linspace(-10,10,50),i).dot(b))
plt.show()
def RBF(x, n, order=3):
c = np.linspace(-10,10,n).reshape(1,n)
return np.abs(np.repeat(x.reshape(len(x),1),n,axis=1) - c)**order
def RBFFit(x, y, n, order=3):
X = RBF(x, n, order)
return (np.linalg.pinv(X).dot(y), X)
for i in [5, 10, 15]:
print "order:",i
(b, X) = RBFFit(x_train, y_train, i)
print "test_error:", test_error(RBF(x_test,i), y_test, b)
plt.figure()
plt.plot(np.linspace(-10,10,50), RBF(np.linspace(-10,10,50),i).dot(b))
plt.show()
class LFSR(object):
def __init__(self, M, lags):
self.state = np.random.randint(0, 2, size=M)
self.lags = np.array(lags) - 1
def update(self):
self.data_y = []
x = sum(self.state[self.lags]) % 2
self.state[1:] = self.state[:-1]
self.state[0] = x
return x
def get_state(self):
return self.state
lfsr = LFSR(4, [1,4])
y = [lfsr.update() for i in range(100000)]
X_train = np.array([y[i:i+4] for i in range(2000)])
y_train = np.array([y[i+4] for i in range(2000)])
X_test = np.array([y[i:i+4] for i in range(2000,2500)])
y_test = np.array([y[i+4] for i in range(2000,2500)])
from keras.models import Sequential
from keras.optimizers import SGD
from keras.layers.core import Dense, Activation, Dropout
from keras.utils import np_utils
from keras import backend as K
model = Sequential()
model.add(Dense(output_dim=6, input_dim=4))
model.add(Activation("relu"))
model.add(Dense(output_dim=1))
model.compile(loss='mean_squared_error', optimizer=SGD(lr=0.05, momentum=0.5), metrics=["accuracy"])
model.fit(X_train, y_train, nb_epoch=5, batch_size=32,validation_split=0.1)
objective_score = model.evaluate(X_test, y_test, batch_size=32)
print ("test set loss:" + str(objective_score[0]) + "test set accuracy:" + str(objective_score[1]))
y_model = model.predict(X_test[0:100])
y_predict = np.array(np.round(y_model), dtype=int)
plt.figure()
plt.title("test values")
plt.scatter(np.arange(20), y_test[:20])
plt.figure()
plt.title("predicted values")
plt.plot(np.arange(20), y_model[:20])
plt.scatter(np.arange(20), y_predict[:20])
plt.xlim(0,20)
plt.show()