In [6]:
import numpy as np
class NN:
    """
    Adapted from paper: http://www.inf.fu-berlin.de/inst/ag-ki/ger/B-06-04.pdf
    Parameters: 
    Theta - K x N
    W . S X N 
    M = N X S
    N = num of original sources 
    S = num of mixed audio signals
    Output:
    W, M, theta, and error
    Here, to find E,just add up all the errors for the matrix 
    E is a matrix since the error fo separability will be different for each point in time.
    Here, N =S since mixing matrix A must be invertible.
    Notes:
    Currently, this does not converge.Modify so it implements Levenberg Marquett as not to get stuck in 
    local minimization
    """
    def __init__(self, mixed_audio, S, N ,K=10):
        # initial first
        self.E = 1
        self.S = S
        self.N = N
        self.K = K
        self.T = len(mixed_audio[0]) # the number of time steps
        self.W = np.random.rand(S, N) # weights
        self.M = np.random.rand(N,S)
        self.theta = np.random.rand(K, N) # here, k = 10, or current can be approximated by 
        # the linear combination of the signal at the past 10 time frames.  
        self.x = mixed_audio
    # I'm literally writing it thi sway soit'smore compliable. 

    def find_params(self):
        for i in range(5):
            error_1, y, xtilda = self.group_param_1()
            error_2, ytilda = self.group_param_2(y)
            """ 
            print(error_1)
            print(error_2)
            print("final")
            """
            E = self.E_final(error_1, error_2) # Gets a matrix of the errors for each tau
            self.update(y,ytilda, self.x, xtilda)
         
            
        return self.W, self.M, self.theta, E
    def dEdM(self, y_input, xtilda, x):
        d = np.zeros((self.N,self.S))
        for i in range(self.N):
            for j in range(self.S):
                d[i,j] = self.dEdMHelper(y_input, xtilda, x,  i, j) # where does time come into plaly here?
        return d
    def dEdMHelper(self, y_input, xtilda,x, i, j):
        return np.linalg.norm(y_input[i, :]*(xtilda[j, :]- x[j, :]))

    def dEdtheta(self, y_input, ytilda):
        d = np.zeros((self.K,self.N))
        for i in range(self.K):
            for j in range(self.N):
                d[i, j] = self.dEdthetahelper(y_input, ytilda, i, j) 
                # not sure about this - how does the time get involved with dEdtheta?
        return d
    
    def dEdthetahelper(self, y_input, ytilda, i, j):
        res = 0
        for t in range(self.T):
            if ((t - i) >= 0):
                res += y_input[j,  t- i]*(ytilda[j, t] - y_input[j, t])
            else:
                res += 0 # Just throw away if out of bounds. Makes more sense than wrapping around.
        return res
    def dEdW(self, y_input, x, ytilda, xtilda):
        d = np.zeros((self.S,self.N))
        for i in range(self.S):
            for j in range(self.N):
                self.dEdWhelper(y_input, x, ytilda, xtilda, i, j)
        return d
    
    def dEdWhelper(self, y_input, x, ytilda, xtilda,i,j):
        d = np.zeros((self.S, self.N))
        def num_sum(i, j): # given j, calculate the sum
            res = 0
            for l in range(self.S):
                res += self.M[j, l] * ((np.linalg.norm(xtilda[l, :]) - np.linalg.norm(x[l, :]))
                *(np.linalg.norm(ytilda[j, :]) - np.linalg.norm(y_input[j,:])))
            return res
        def num_sum_2(i, j):
            res = 0 
            for l in range(self.K):
                for t in range(self.T):
                    if ((t-l) >= 0):
                        res += x[i, t - l]*self.theta[l, j]*(np.linalg.norm(ytilda[j,t] - y_input[j,t]))
            return res
        for i in range(self.S):
            for j in range(self.N):
                d[i,j] = (np.linalg.norm(x[i,:]) * num_sum(i,j)) + num_sum_2(i,j)
        return d
        
    def update(self, y_input, ytilda, x, xtilda):
        self.theta += self.dEdtheta(y_input, ytilda)
        self.W += self.dEdW(y_input, x, ytilda, xtilda)
        self.M += self.dEdM(y_input,xtilda, x )
        
    def E_final(self, error_1, error_2):
        # for each Tau, sum it up. 
        return np.linalg.norm(error_1, axis=0) + np.linalg.norm(error_2, axis=0)

    def group_param_1(self):
        # The minimization of this error means the approximation of the mixing matrix A with the matrix M .
        # input is time. 
        y = np.dot(np.transpose(self.W), self.x)
        xtilda = np.dot(np.transpose(self.M), y)
        error = np.subtract(xtilda, self.x)**2
        error /= 2 
        return error, y, xtilda

    def group_param_2(self, y_input):
        y_tilde = []
        for i in range(self.N):
            y_bar = []
            for t in range(len(y_input[0])):# time
                y_barlocal = []
                for j in range(self.K):
                    if ((t-j) >= 0):
                        y_barlocal.append(y_input[i, t-j])
                    else:
                        y_barlocal.append(0)
                y_bar.append(y_barlocal)
                
            y_bar = np.transpose(y_bar)
            th = np.transpose(self.theta)[i]
            y_til = np.dot(th,y_bar)
            y_tilde.append(y_til)
        y_tilde = np.array(y_tilde)
        error = np.subtract(y_tilde, y_input)**2
        error /= 2    
        return error, y_tilde
    #implement theneural network 
    
In [2]:
X = np.array([[0,1, 2, 3, 4, 5,6 , 7], [0,1,2,3,4,5,6,7]])
X.shape
Out[2]:
(2, 8)
In [9]:
nn = NN(X, 2, 2)
W, M, theta, error = nn.find_params()
print("W")
print(W)
print("with error")
print(error)
# did not converge
W
[[ 0.19922089  0.44006783]
 [ 0.02969131  0.44110918]]
with error
[  0.00000000e+00   2.44544069e+17   1.93364634e+18   6.74306717e+18
   1.62081730e+19   3.13485322e+19   5.25486063e+19   7.96823940e+19]
In [ ]: