zoukankan      html  css  js  c++  java
  • 改善深层神经网络

    来自吴恩达深度学习系列视频 改善深层神经网络 - 第一周作业 1.3 3.Gradient+Checking。
    如果直接看代码对你来说有困难,参见:https://blog.csdn.net/u013733326/article/details/79847918
    该作业实现了一个示例的J=theta*x梯度下降检测,又实现了一个真正的用于网络的梯度下降检测。最后我会给出原作业及参考博客没有指出的错误在哪里。

    https://github.com/Hongze-Wang/Deep-Learning-Andrew-Ng/tree/master/homework 戳这里看完整版

    1-dimensional gradient checking

    # Packages
    import numpy as np
    from testCases import *
    from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector
    
    # GRADED FUNCTION: forward_propagation
    
    def forward_propagation(x, theta):
        """
        Implement the linear forward propagation (compute J) presented in Figure 1 (J(theta) = theta * x)
        
        Arguments:
        x -- a real-valued input
        theta -- our parameter, a real number as well
        
        Returns:
        J -- the value of function J, computed using the formula J(theta) = theta * x
        """
        
        ### START CODE HERE ### (approx. 1 line)
        J = np.dot(theta, x)
        ### END CODE HERE ###
        
        return J
    
    x, theta = 2, 4
    J = forward_propagation(x, theta)
    print ("J = " + str(J))
    

    J = 8

    # GRADED FUNCTION: backward_propagation
    
    def backward_propagation(x, theta):
        """
        Computes the derivative of J with respect to theta (see Figure 1).
        
        Arguments:
        x -- a real-valued input
        theta -- our parameter, a real number as well
        
        Returns:
        dtheta -- the gradient of the cost with respect to theta
        """
        
        ### START CODE HERE ### (approx. 1 line)
        dtheta = x
        ### END CODE HERE ###
        
        return dtheta
    
    x, theta = 2, 4
    dtheta = backward_propagation(x, theta)
    print ("dtheta = " + str(dtheta))
    

    dtheta = 2

    # GRADED FUNCTION: gradient_check
    
    def gradient_check(x, theta, epsilon = 1e-7):
        """
        Implement the backward propagation presented in Figure 1.
        
        Arguments:
        x -- a real-valued input
        theta -- our parameter, a real number as well
        epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
        
        Returns:
        difference -- difference (2) between the approximated gradient and the backward propagation gradient
        """
        
        # Compute gradapprox using left side of formula (1). epsilon is small enough, you don't need to worry about the limit.
        ### START CODE HERE ### (approx. 5 lines)
        thetaplus = theta + epsilon                               # Step 1
        thetaminus = theta - epsilon                              # Step 2
        J_plus = forward_propagation(x, thetaplus)                # Step 3
        J_minus = forward_propagation(x, thetaminus)              # Step 4
        gradapprox = (J_plus - J_minus) / (2*epsilon)             # Step 5
        ### END CODE HERE ###
        
        # Check if gradapprox is close enough to the output of backward_propagation()
        ### START CODE HERE ### (approx. 1 line)
        grad = backward_propagation(x, theta)
        ### END CODE HERE ###
        
        ### START CODE HERE ### (approx. 1 line)
        numerator = np.linalg.norm(grad - gradapprox)                      # Step 1'
        denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox)    # Step 2'
        difference = numerator / denominator                               # Step 3'
        ### END CODE HERE ###
        
        if difference < 1e-7:
            print ("The gradient is correct!")
        else:
            print ("The gradient is wrong!")
        
        return difference
       
    x, theta = 2, 4
    difference = gradient_check(x, theta)
    print("difference = " + str(difference))
    
    The gradient is correct!
    difference = 2.919335883291695e-10
    

    N-dimensional gradient checking

    def forward_propagation_n(X, Y, parameters):
        """
        Implements the forward propagation (and computes the cost) presented in Figure 3.
        
        Arguments:
        X -- training set for m examples
        Y -- labels for m examples 
        parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
                        W1 -- weight matrix of shape (5, 4)
                        b1 -- bias vector of shape (5, 1)
                        W2 -- weight matrix of shape (3, 5)
                        b2 -- bias vector of shape (3, 1)
                        W3 -- weight matrix of shape (1, 3)
                        b3 -- bias vector of shape (1, 1)
        
        Returns:
        cost -- the cost function (logistic cost for one example)
        """
        
        # retrieve parameters
        m = X.shape[1]
        W1 = parameters["W1"]
        b1 = parameters["b1"]
        W2 = parameters["W2"]
        b2 = parameters["b2"]
        W3 = parameters["W3"]
        b3 = parameters["b3"]
    
        # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
        Z1 = np.dot(W1, X) + b1
        A1 = relu(Z1)
        Z2 = np.dot(W2, A1) + b2
        A2 = relu(Z2)
        Z3 = np.dot(W3, A2) + b3
        A3 = sigmoid(Z3)
    
        # Cost
        logprobs = np.multiply(-np.log(A3),Y) + np.multiply(-np.log(1 - A3), 1 - Y)
        cost = 1./m * np.sum(logprobs)
        
        cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
        
        return cost, cache
    
    def backward_propagation_n(X, Y, cache):
        """
        Implement the backward propagation presented in figure 2.
        
        Arguments:
        X -- input datapoint, of shape (input size, 1)
        Y -- true "label"
        cache -- cache output from forward_propagation_n()
        
        Returns:
        gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables.
        """
        
        m = X.shape[1]
        (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
        
        dZ3 = A3 - Y
        dW3 = 1./m * np.dot(dZ3, A2.T)
        db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
        
        dA2 = np.dot(W3.T, dZ3)
        dZ2 = np.multiply(dA2, np.int64(A2 > 0))
        dW2 = 1./m * np.dot(dZ2, A1.T) * 2
        db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
        
        dA1 = np.dot(W2.T, dZ2)
        dZ1 = np.multiply(dA1, np.int64(A1 > 0))
        dW1 = 1./m * np.dot(dZ1, X.T)
        db1 = 4./m * np.sum(dZ1, axis=1, keepdims = True)
        
        gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
                     "dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
                     "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
        
        return gradients
    
    # GRADED FUNCTION: gradient_check_n
    
    def gradient_check_n(parameters, gradients, X, Y, epsilon = 1e-7):
        """
        Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n
        
        Arguments:
        parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
        grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameters. 
        x -- input datapoint, of shape (input size, 1)
        y -- true "label"
        epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
        
        Returns:
        difference -- difference (2) between the approximated gradient and the backward propagation gradient
        """
        
        # Set-up variables
        parameters_values, _ = dictionary_to_vector(parameters)
        grad = gradients_to_vector(gradients)
        num_parameters = parameters_values.shape[0]
        J_plus = np.zeros((num_parameters, 1))
        J_minus = np.zeros((num_parameters, 1))
        gradapprox = np.zeros((num_parameters, 1))
        
        # Compute gradapprox
        for i in range(num_parameters):
            
            # Compute J_plus[i]. Inputs: "parameters_values, epsilon". Output = "J_plus[i]".
            # "_" is used because the function you have to outputs two parameters but we only care about the first one
            ### START CODE HERE ### (approx. 3 lines)
            thetaplus = np.copy(parameters_values)                                            # Step 1
            thetaplus[i][0] = thetaplus[i][0] + epsilon                                          # Step 2
            J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus))       # Step 3
            ### END CODE HERE ###
            
            # Compute J_minus[i]. Inputs: "parameters_values, epsilon". Output = "J_minus[i]".
            ### START CODE HERE ### (approx. 3 lines)
            thetaminus = np.copy(parameters_values)                                             # Step 1
            thetaminus[i][0] = thetaminus[i][0] - epsilon                                         # Step 2        
            J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus))     # Step 3
            ### END CODE HERE ###
            
            # Compute gradapprox[i]
            ### START CODE HERE ### (approx. 1 line)
            gradapprox[i] = (J_plus[i] - J_minus[i]) / (2*epsilon)
            ### END CODE HERE ###
        
        # Compare gradapprox to backward propagation gradients by computing difference.
        ### START CODE HERE ### (approx. 1 line)
        numerator = np.linalg.norm(grad - gradapprox)                                        # Step 1'
        denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox)                      # Step 2'
        difference = numerator / denominator                                                 # Step 3'
        ### END CODE HERE ###
    
        if difference > 1e-7:
            print ("33[93m" + "There is a mistake in the backward propagation! difference = " + str(difference) + "33[0m")
        else:
            print ("33[92m" + "Your backward propagation works perfectly fine! difference = " + str(difference) + "33[0m")
        
        return difference
    
    X, Y, parameters = gradient_check_n_test_case()
    
    cost, cache = forward_propagation_n(X, Y, parameters)
    gradients = backward_propagation_n(X, Y, cache)
    difference = gradient_check_n(parameters, gradients, X, Y)
    

    There is a mistake in the backward propagation! difference = 0.2850931566540251

    错误在哪?

    仔细看看backward_propagation_n里的下面2行。

    dW2 = 1./m * np.dot(dZ2, A1.T) * 2  # dW1 dW3的计算没有*2
    db1 = 4./m * np.sum(dZ1, axis=1, keepdims = True) # db2 db3的计算没有*4
    

    不过改正这些错误,梯度下降仍然不正确。
    There is a mistake in the backward propagation! difference = 1.1885552035482147e-07
    博主发现dZ1和dZ2的计算方法也是不对的,正确的是 d Z [ l ] = d A i [ l ] ∗ g ′ ( Z [ l ] ) dZ^{[l]}=dAi^{[l]}*g^{&#x27;}(Z^{[l]}) dZ[l]=dAi[l]g(Z[l])

    dZ2 = np.multiply(dA2, np.int64(A2 > 0))
    dZ1 = np.multiply(dA1, np.int64(A1 > 0))
    
  • 相关阅读:
    代码记录——phase16,block36
    代码记录——phase16,block32
    旋转中建立一个新的数组来存原图数据,测试
    微信小程序:工具配置 project.config.json
    微信小程序:全局配置app.json
    Python3:Requests模块的异常值处理
    微信小程序:本地资源图片无法通过 WXSS 获取,可以使用网络图片或者 base64或者使用image标签
    快速制作一款高逼格词云图
    jQuery事件
    jQuery:$(document).ready()用法
  • 原文地址:https://www.cnblogs.com/wanghongze95/p/13842536.html
Copyright © 2011-2022 走看看