zoukankan      html  css  js  c++  java
  • 一个简单的神经网络

    import numpy as np
    
    def tanh(x):  #双曲函数
        return np.tanh(x)
    
    def tanh_deriv(x):#更新权重时,需要用到双曲函数的倒数
        return 1.0 - np.tanh(x)*np.tanh(x)
    
    def logistic(x):#构建逻辑函数
        return 1/(1 + np.exp(-x))
    
    def logistic_derivatic(x):  #逻辑函数的倒数
        return logistic(x)*(1 - logistic(x))
    
    class NeuralNetwork:
        def __init__(self,layer,activation='tanh'):
            '''
            :param layer:A list containing the number of unit in each layer.
            Should be at least two values.每层包含的神经元数目
            :param activation: the activation function to be used.Can be
            "logistic" or "tanh"
            '''
            if activation == 'logistic':
                self.activation = logistic
                self.activation_deriv = logistic_derivatic
            elif activation == 'tanh':
                self.activation = tanh
                self.activation_deriv = tanh_deriv
    
            self.weights = []
            for i in range(1,len(layer) - 1):#权重的设置
                self.weights.append((2*np.random.random((layer[i - 1] + 1,layer[i] + 1))-1)*0.25)
                self.weights.append((2*np.random.random((layer[i] + 1,layer[i+1]))-1)*0.25)
        '''训练神经网络,通过传入的数据,不断更新权重weights'''
        def fit(self,X,y,learning_rate=0.2,epochs=10000):
            '''
            :param X: 数据集
            :param y: 数据输出结果,分类标记
            :param learning_rate: 学习率
            :param epochs: 随机抽取的数据的训练次数
            :return:
            '''
            X = np.atleast_2d(X) #转化X为np数据类型,试数据类型至少是两维的
            temp = np.ones([X.shape[0],X.shape[1]+1])
            temp[:,0:-1] = X
            X = temp
            y = np.array(y)
    
            for k in range(epochs):
                i = np.random.randint(X.shape[0])  #随机抽取的行
                a = [X[i]]
    
                for I in range(len(self.weights)):#完成正向所有的更新
                    a.append(self.activation(np.dot(a[I],self.weights[I])))#dot():对应位相乘后相加
                error = y[i] - a[-1]
                deltas = [error * self.activation_deriv(a[-1])]#*self.activation_deriv(a[I])#输出层误差
                # 反向更新
                for I in range(len(a) -2,0,-1):
                    deltas.append(deltas[-1].dot(self.weights[I].T)*self.activation_deriv(a[I]))
                deltas.reverse()
                for i in range(len(self.weights)):
                    layer = np.atleast_2d(a[i])
                    delta = np.atleast_2d(deltas[i])
                    self.weights[i] += learning_rate*layer.T.dot(delta)
    
        def predict(self,x):
            x = np.array(x)
            temp = np.ones(x.shape[0] + 1)
            temp[0:-1] = x
            a = temp
            for I in range(0,len(self.weights)):
                a = self.activation(np.dot(a,self.weights[I]))
            return a  #只需要保存最后的值,就是预测出来的值
    nn = NeuralNetwork([2,2,1], 'tanh')
    X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
    y = np.array([0, 1, 1, 0])
    nn.fit(X, y)
    for i in [[0, 0], [0, 1], [1, 0], [1,1]]:
        print(i, nn.predict(i))
  • 相关阅读:
    51nod 1179 最大的最大公约数 (数论)
    POJ 3685 二分套二分
    POJ 3045 贪心
    LIC
    HDU 1029 Ignatius and the Princess IV
    HDU 1024 Max Sum Plus Plus
    HDU 2389 Rain on your Parade
    HDU 2819 Swap
    HDU 1281 棋盘游戏
    HDU 1083 Courses
  • 原文地址:https://www.cnblogs.com/ilovecpp/p/12715422.html
Copyright © 2011-2022 走看看