Padé Approximants of $e^{x}$

In [5]:
import numpy as np, matplotlib.pyplot as plt
In [6]:
def factorial(n):
    return float(max(1,np.product(np.arange(1,n+1))))
    
def taylor_N(N):
    return np.exp(np.arange(n))

def solve_b(N):
    A = np.zeros((N, N))
    y = np.zeros((N,1))
    for i in range(N):
        y[i] = -1/factorial(N+1+i)
        for j in range(N):
            A[i,j] = 1/factorial(N+i-j)
    return np.linalg.pinv(A).dot(y).reshape(N)

def solve_a(b):
    N = len(b)
    coeffs = [1/factorial(n) for n in range(N+1)]
    a = np.zeros(N+1)
    b_prime = np.ones(N+1)
    b_prime[1:] = b
    for i in range(N+1):
        for j in range(i+1):
            a[i] += coeffs[j]*b_prime[i-j]
    return a

def Pade_eval(a,b,x):
    numerator = a.dot(x**np.arange(0,len(a)))
    denominator = 1+b.dot(x**np.arange(1,len(b)+1))
    return numerator/denominator
In [7]:
for i in range(1,6):
    b = solve_b(i)
    a = solve_a(b)
    val = Pade_eval(a,b,1)
    
    print "\n[%d/%d] Padé Approximant" %(i,i)
    print "a = ", a
    print "b = ", b
    print "y =", val
    print "error =", val-np.exp(1)
    
[1/1] Padé Approximant
a =  [ 1.   0.5]
b =  [-0.5]
y = 3.0
error = 0.281718171541

[2/2] Padé Approximant
a =  [ 1.          0.5         0.08333333]
b =  [-0.5         0.08333333]
y = 2.71428571429
error = -0.00399611417333

[3/3] Padé Approximant
a =  [ 1.          0.5         0.1         0.00833333]
b =  [-0.5         0.1        -0.00833333]
y = 2.71830985915
error = 2.80306958849e-05

[4/4] Padé Approximant
a =  [  1.00000000e+00   5.00000000e-01   1.07142857e-01   1.19047619e-02
   5.95238095e-04]
b =  [-0.5         0.10714286 -0.01190476  0.00059524]
y = 2.71828171828
error = -1.10177325929e-07

[5/5] Padé Approximant
a =  [  1.00000000e+00   5.00000000e-01   1.11111111e-01   1.38888889e-02
   9.92063492e-04   3.30687831e-05]
b =  [ -5.00000000e-01   1.11111111e-01  -1.38888889e-02   9.92063492e-04
  -3.30687831e-05]
y = 2.71828182874
error = 2.76650258257e-10

Integer Function Approximation

In [28]:
x_train = np.arange(-10,11)
y_train = np.array(x_train > 0, dtype=int)

x_test = x_train-0.5
y_test = np.array(x_test > 0, dtype=int)
In [29]:
def vandermonde(x, order):
    return np.repeat(x.reshape(len(x),1),order+1,axis=1)**np.arange(order+1).reshape(1, order+1)

def vandermondeFit(x, y, order):
    X = vandermonde(x, order)
    return (np.linalg.pinv(X).dot(y), X)

def test_error(x, y, b):
    return np.sum(np.abs(X.dot(b)-y)**2)

Van Der Monde Polynomial Fitting

In [30]:
for i in [5, 10, 15]:
    print "order:",i
    (b, X) = vandermondeFit(x_train, y_train, i)
    print "test_error:", test_error(vandermonde(x_test,i), y_test, b)
    plt.figure()
    plt.plot(np.linspace(-10,10,50), vandermonde(np.linspace(-10,10,50),i).dot(b))
    plt.show()
    
order: 5
test_error: 0.494971443955
order: 10
test_error: 0.266237399627
order: 15
test_error: 0.570250337211

Radial Basis Function fits

In [31]:
def RBF(x, n, order=3):
    c = np.linspace(-10,10,n).reshape(1,n)
    return np.abs(np.repeat(x.reshape(len(x),1),n,axis=1) - c)**order


def RBFFit(x, y, n, order=3):
    X = RBF(x, n, order)
    return (np.linalg.pinv(X).dot(y), X)
In [32]:
for i in [5, 10, 15]:
    print "order:",i
    (b, X) = RBFFit(x_train, y_train, i)
    print "test_error:", test_error(RBF(x_test,i), y_test, b)    
    plt.figure()
    plt.plot(np.linspace(-10,10,50), RBF(np.linspace(-10,10,50),i).dot(b))
    plt.show()
order: 5
test_error: 0.62487896921
order: 10
test_error: 0.181219178416
order: 15
test_error: 0.0567454869455

Training Neural Nets on LFSR Outputs

In [24]:
class LFSR(object): 
    def __init__(self, M, lags):
        self.state = np.random.randint(0, 2, size=M)
        self.lags = np.array(lags) - 1
    
    def update(self):
        self.data_y = []
        x = sum(self.state[self.lags]) % 2
        self.state[1:] = self.state[:-1]
        self.state[0] = x
        return x
        
    def get_state(self):
        return self.state
    
In [25]:
lfsr = LFSR(4, [1,4])

y = [lfsr.update() for i in range(100000)]
In [26]:
X_train = np.array([y[i:i+4] for i in range(2000)])
y_train = np.array([y[i+4] for i in range(2000)])

X_test = np.array([y[i:i+4] for i in range(2000,2500)])
y_test = np.array([y[i+4] for i in range(2000,2500)])
In [16]:
from keras.models import Sequential
from keras.optimizers import SGD
from keras.layers.core import Dense, Activation, Dropout
from keras.utils import np_utils
from keras import backend as K
Using TensorFlow backend.
In [79]:
model = Sequential()

model.add(Dense(output_dim=6, input_dim=4))

model.add(Activation("relu"))

model.add(Dense(output_dim=1))

model.compile(loss='mean_squared_error', optimizer=SGD(lr=0.05, momentum=0.5), metrics=["accuracy"])

model.fit(X_train, y_train, nb_epoch=5, batch_size=32,validation_split=0.1)

objective_score = model.evaluate(X_test, y_test, batch_size=32)

print ("test set loss:"  + str(objective_score[0]) + "test set accuracy:" + str(objective_score[1]))
/home/bgheneti/.local/lib/python2.7/site-packages/ipykernel/__main__.py:3: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(units=6, input_dim=4)`
  app.launch_new_instance()
/home/bgheneti/.local/lib/python2.7/site-packages/ipykernel/__main__.py:7: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(units=1)`
Train on 1800 samples, validate on 200 samples
Epoch 1/5
1800/1800 [==============================] - 0s - loss: 0.1277 - acc: 0.8806 - val_loss: 0.0311 - val_acc: 1.0000
Epoch 2/5
1800/1800 [==============================] - 0s - loss: 0.0123 - acc: 1.0000 - val_loss: 0.0039 - val_acc: 1.0000
Epoch 3/5
1800/1800 [==============================] - 0s - loss: 0.0022 - acc: 1.0000 - val_loss: 0.0011 - val_acc: 1.0000
Epoch 4/5
1800/1800 [==============================] - 0s - loss: 6.8457e-04 - acc: 1.0000 - val_loss: 4.4585e-04 - val_acc: 1.0000
Epoch 5/5
1800/1800 [==============================] - 0s - loss: 3.1971e-04 - acc: 1.0000 - val_loss: 2.4995e-04 - val_acc: 1.0000
 32/500 [>.............................] - ETA: 0stest set loss:0.000242763807182test set accuracy:1.0
In [80]:
y_model = model.predict(X_test[0:100])
y_predict = np.array(np.round(y_model), dtype=int)
In [82]:
plt.figure()
plt.title("test values")
plt.scatter(np.arange(20), y_test[:20])
plt.figure()
plt.title("predicted values")
plt.plot(np.arange(20), y_model[:20])
plt.scatter(np.arange(20), y_predict[:20])
plt.xlim(0,20)
plt.show()
In [ ]: