zoukankan      html  css  js  c++  java
  • CS231n assignment1 Q4 Two-Layer Neural Network

    网络设置:
    两层的神经网络,第一层激活函数为Relu,第二层用softmax输出分类概率。使用随机梯度下降来训练。

    neural_net.py

    from __future__ import print_function
    
    import numpy as np
    import matplotlib.pyplot as plt
    
    class TwoLayerNet(object):
      """
      A two-layer fully-connected neural network. The net has an input dimension of
      N, a hidden layer dimension of H, and performs classification over C classes.
      We train the network with a softmax loss function and L2 regularization on the
      weight matrices. The network uses a ReLU nonlinearity after the first fully
      connected layer.
    
      In other words, the network has the following architecture:
    
      input - fully connected layer - ReLU - fully connected layer - softmax
    
      The outputs of the second fully-connected layer are the scores for each class.
      """
    
      def __init__(self, input_size, hidden_size, output_size, std=1e-4):
        """
        Initialize the model. Weights are initialized to small random values and
        biases are initialized to zero. Weights and biases are stored in the
        variable self.params, which is a dictionary with the following keys:
    
        W1: First layer weights; has shape (D, H)
        b1: First layer biases; has shape (H,)
        W2: Second layer weights; has shape (H, C)
        b2: Second layer biases; has shape (C,)
    
        Inputs:
        - input_size: The dimension D of the input data.
        - hidden_size: The number of neurons H in the hidden layer.
        - output_size: The number of classes C.
        """
        self.params = {}
        self.params['W1'] = std * np.random.randn(input_size, hidden_size)
        self.params['b1'] = np.zeros(hidden_size)
        self.params['W2'] = std * np.random.randn(hidden_size, output_size)
        self.params['b2'] = np.zeros(output_size)
    
      def loss(self, X, y=None, reg=0.0):
        """
        Compute the loss and gradients for a two layer fully connected neural
        network.
    
        Inputs:
        - X: Input data of shape (N, D). Each X[i] is a training sample.
        - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is
          an integer in the range 0 <= y[i] < C. This parameter is optional; if it
          is not passed then we only return scores, and if it is passed then we
          instead return the loss and gradients.
        - reg: Regularization strength.
    
        Returns:
        If y is None, return a matrix scores of shape (N, C) where scores[i, c] is
        the score for class c on input X[i].
    
        If y is not None, instead return a tuple of:
        - loss: Loss (data loss and regularization loss) for this batch of training
          samples.
        - grads: Dictionary mapping parameter names to gradients of those parameters
          with respect to the loss function; has the same keys as self.params.
        """
        # Unpack variables from the params dictionary
        W1, b1 = self.params['W1'], self.params['b1']
        W2, b2 = self.params['W2'], self.params['b2']
        N, D = X.shape
    
        # Compute the forward pass
        scores = None
        #############################################################################
        # TODO: Perform the forward pass, computing the class scores for the input. #
        # Store the result in the scores variable, which should be an array of      #
        # shape (N, C).                                                             #
        #############################################################################
        z1 = X.dot(W1) + b1
        h1 = np.maximum(0, z1) 
        #两个部分,分别是线性部分:计算wx+b,然后非线性部分:ReLu
        scores=np.dot(h1,W2)+b2  #第二层
        #X(N,D)W1(D,H)b1(H,1) h1 (N,H) W2(H,C)b1(C,1)
        #############################################################################
        #                              END OF YOUR CODE                             #
        #############################################################################
        
        # If the targets are not given then jump out, we're done
        if y is None:
          return scores
    
        # Compute the loss
        loss = None
        #############################################################################
        # TODO: Finish the forward pass, and compute the loss. This should include  #
        # both the data loss and L2 regularization for W1 and W2. Store the result  #
        # in the variable loss, which should be a scalar. Use the Softmax           #
        # classifier loss.                                                          #
        #############################################################################
        scores_max = np.max(scores, axis=1, keepdims=True)  # (N,1)
        # Compute the class probabilities
        exp_scores = np.exp(scores - scores_max)  # (N,C)
        probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)  # (N,C)
        # cross-entropy loss and L2-regularization
        correct_logprobs = -np.log(probs[range(N), y])  # (N,1)
        data_loss = np.sum(correct_logprobs) / N
        reg_loss = 0.5 * reg * np.sum(W1 * W1) + 0.5 * reg * np.sum(W2 * W2)
        loss = data_loss + reg_loss     #计算出误差
        #因为loss使用的是softmax
        #############################################################################
        #                              END OF YOUR CODE                             #
        #############################################################################
    
        # Backward pass: compute gradients
        grads = {}
        #############################################################################
        # TODO: Compute the backward pass, computing the derivatives of the weights #
        # and biases. Store the results in the grads dictionary. For example,       #
        # grads['W1'] should store the gradient on W1, and be a matrix of same size #
        #############################################################################
        dscores = probs  # (N,C) 
        dscores[range(N), y] -= 1  #  计算分值的梯度
        dscores /= N
        # W2,b2的梯度
        grads['W2'] = np.dot(h1.T, dscores)
        grads['b2'] = np.sum(dscores, axis=0)
        # 反向传播中第二个隐藏层
        dhidden = np.dot(dscores, W2.T)  # (N,H)
        # 激活函数ReLu的梯度
        dhidden[h1 <= 0] = 0
        # W1,b1的梯度
        grads['W1'] = np.dot(X.T, dhidden)
        grads['b1'] = np.sum(dhidden, axis=0)
        # Add the regularization gradient contribution
        grads['W2'] += reg * W2
        grads['W1'] += reg * W1
        #############################################################################
        #                              END OF YOUR CODE                             #
        #############################################################################
    
        return loss, grads
    
      def train(self, X, y, X_val, y_val,
                learning_rate=1e-3, learning_rate_decay=0.95,
                reg=5e-6, num_iters=100,
                batch_size=200, verbose=False):
        """
        Train this neural network using stochastic gradient descent.
    
        Inputs:
        - X: A numpy array of shape (N, D) giving training data.
        - y: A numpy array f shape (N,) giving training labels; y[i] = c means that
          X[i] has label c, where 0 <= c < C.
        - X_val: A numpy array of shape (N_val, D) giving validation data.
        - y_val: A numpy array of shape (N_val,) giving validation labels.
        - learning_rate: Scalar giving learning rate for optimization.
        - learning_rate_decay: Scalar giving factor used to decay the learning rate
          after each epoch.
        - reg: Scalar giving regularization strength.
        - num_iters: Number of steps to take when optimizing.
        - batch_size: Number of training examples to use per step.
        - verbose: boolean; if true print progress during optimization.
        """
        num_train = X.shape[0]
        iterations_per_epoch = max(num_train / batch_size, 1)
    
        # Use SGD to optimize the parameters in self.model
        loss_history = []
        train_acc_history = []
        val_acc_history = []
    
        for it in range(num_iters):
          X_batch = None
          y_batch = None
    
          #########################################################################
          # TODO: Create a random minibatch of training data and labels, storing  #
          # them in X_batch and y_batch respectively.                             #
          #########################################################################
          #取一个batch的数据
          sample_indices = np.random.choice(np.arange(num_train),batch_size,replace = True)
          X_batch = X[sample_indices,:]
          y_batch = y[sample_indices]
          #########################################################################
          #                             END OF YOUR CODE                          #
          #########################################################################
    
          # Compute loss and gradients using the current minibatch
          loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
          loss_history.append(loss)
    
          #########################################################################
          # TODO: Use the gradients in the grads dictionary to update the         #
          # parameters of the network (stored in the dictionary self.params)      #
          # using stochastic gradient descent. You'll need to use the gradients   #
          # stored in the grads dictionary defined above.                         #
          #########################################################################
          self.params['W1'] += -learning_rate * grads['W1']
          self.params['b1'] += -learning_rate * grads['b1']
          self.params['W2'] += -learning_rate * grads['W2']
          self.params['b2'] += -learning_rate * grads['b2']
          #########################################################################
          #                             END OF YOUR CODE                          #
          #########################################################################
    
          if verbose and it % 100 == 0:
            print('iteration %d / %d: loss %f' % (it, num_iters, loss))
    
          # Every epoch, check train and val accuracy and decay learning rate.
          if it % iterations_per_epoch == 0:
            # Check accuracy
            train_acc = (self.predict(X_batch) == y_batch).mean()
            val_acc = (self.predict(X_val) == y_val).mean()
            train_acc_history.append(train_acc)
            val_acc_history.append(val_acc)
    
            # Decay learning rate
            learning_rate *= learning_rate_decay
    
        return {
          'loss_history': loss_history,
          'train_acc_history': train_acc_history,
          'val_acc_history': val_acc_history,
        }
    
      def predict(self, X):
        """
        Use the trained weights of this two-layer network to predict labels for
        data points. For each data point we predict scores for each of the C
        classes, and assign each data point to the class with the highest score.
    
        Inputs:
        - X: A numpy array of shape (N, D) giving N D-dimensional data points to
          classify.
    
        Returns:
        - y_pred: A numpy array of shape (N,) giving predicted labels for each of
          the elements of X. For all i, y_pred[i] = c means that X[i] is predicted
          to have class c, where 0 <= c < C.
        """
        y_pred = None
    
        ###########################################################################
        # TODO: Implement this function; it should be VERY simple!                #
        ###########################################################################
        #使用最终的参数来预测
        h1 = np.maximum(0,(np.dot(X, self.params['W1']) + self.params['b1']))
        scores = np.dot(h1, self.params['W2']) + self.params['b2']
        y_pred = np.argmax(scores, axis=1)
        ###########################################################################
        #                              END OF YOUR CODE                           #
        ###########################################################################
    
        return y_pred
    
    

    超参数的优化:

    best_net = None # store the best model into this 
    
    #################################################################################
    # TODO: Tune hyperparameters using the validation set. Store your best trained  #
    # model in best_net.                                                            #
    #                                                                               #
    # To help debug your network, it may help to use visualizations similar to the  #
    # ones we used above; these visualizations will have significant qualitative    #
    # differences from the ones we saw above for the poorly tuned network.          #
    #                                                                               #
    # Tweaking hyperparameters by hand can be fun, but you might find it useful to  #
    # write code to sweep through possible combinations of hyperparameters          #
    # automatically like we did on the previous exercises.                          #
    #################################################################################
    best_val = -1
    best_stats = None
    learning_rates = [1e-2,1e-3]
    regularization_strengths = [0.4,0.5,0.6]
    results = {}
    iters = 2000
    for lr in learning_rates:
        for rs in regularization_strengths:
            net = TwoLayerNet(input_size,hidden_size,num_classes)
            stats = net.train(X_train,y_train,X_val,y_val,num_iters = iters,batch_size = 200,learning_rate = lr,learning_rate_decay = 0.95,reg = rs)
            y_train_pred = net.predict(X_train)
            acc_train = np.mean(y_train == y_train_pred)
            y_val_pred = net.predict(X_val)
            acc_val = np.mean(y_val == y_val_pred)
            results[(lr,rs)] = (acc_train,acc_val)
            if best_val < acc_val:
                best_stats = stats
                best_val = acc_val
                best_net = net
    for (lr,reg) in sorted(results):
        (train_accuracy,val_accuracy) = results[(lr,reg)]
        print('lr:%f,reg:%f,train_accuracy:%f,val_accuracy:%f' %(lr,reg,train_accuracy,val_accuracy))
    print('best validation accuracy achieved during cross-validation:%f' %best_val)
    #################################################################################
    #                               END OF YOUR CODE                                #
    #################################################################################
    
  • 相关阅读:
    从helloworld回顾程序的编译过程之二
    笔试题知识点记录
    jQuery面试题与答案
    hdu2586(How far away ?)
    解析PHP跳出循环的方法以及continue、break、exit的区别介绍
    如何查看任何一下网站的全部二级域名?
    urlencode()与urldecode()
    基于 LaravelAdmin 在十分钟内搭建起功能齐全的后台模板
    第一种方式:cookie的优化与购物车实例
    右键菜单的响应问题
  • 原文地址:https://www.cnblogs.com/bernieloveslife/p/10179501.html
Copyright © 2011-2022 走看看