zoukankan      html  css  js  c++  java
  • 简单神经网络TensorFlow实现

      1 #学习TensorFlow笔记
      2 import tensorflow as tf
      3  
      4 #定义变量
      5 #Variable 定义张量及shape
      6 w1= tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))
      7 w2= tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
      8 with tf.Session() as sess:
      9     print(sess.run(w1.initializer))
     10     print(sess.run(w2.initializer))
     11 #None
     12 #None
     13  
     14  
     15  
     16 #打印张量,查看数据shape等信息
     17 print(w1)
     18 print(w2)
     19 #<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32_ref>
     20 #<tf.Variable 'Variable_1:0' shape=(3, 1) dtype=float32_ref>
     21  
     22 #tf.constan是一个计算,结果为一个张量,保存在变量x中
     23 x = tf.constant([[0.7, 0.9]])
     24 print(x)
     25 #Tensor("Const:0", shape=(1, 2), dtype=float32)
     26 with tf.Session() as sess:
     27     print(sess.run(x))
     28 #[[ 0.69999999  0.89999998]]
     29  
     30  
     31 #定义前向传播的神经网络
     32 #matmul做矩阵乘法
     33 a = tf.matmul(x, w1)   # x shape=(1, 2)   w1 shape=(2, 3)
     34  
     35 print(a)
     36 #Tensor("MatMul:0", shape=(1, 3), dtype=float32)
     37  
     38 y = tf.matmul(a, w2)  #a shape=(1, 3)   w2 shape=(3, 1)
     39 print(y)
     40 #Tensor("MatMul_1:0", shape=(1, 1), dtype=float32)
     41  
     42  
     43 #调用会话输出结果
     44 with tf.Session() as sess:
     45     sess.run(w1.initializer)
     46     sess.run(w2.initializer)
     47     print(sess.run(a))
     48     #[[-2.76635647  1.12854266  0.57783246]]
     49     print(sess.run(y))
     50     #[[ 3.95757794]]
     51  
     52 #placeholder
     53 x=tf.placeholder(tf.float32,shape=(1,2),name="input")
     54 a=tf.matmul(x,w1)
     55 y=tf.matmul(a,w2)
     56 sess=tf.Session()
     57 init_op=tf.global_variables_initializer()
     58 sess.run(init_op)
     59  
     60 print(sess.run(y,feed_dict={x:[[0.8,0.9]]}))
     61 #[[ 4.2442317]]
     62 x = tf.placeholder(tf.float32, shape=(3, 2), name="input")
     63 a = tf.matmul(x, w1)
     64 y = tf.matmul(a, w2)
     65  
     66 sess = tf.Session()
     67 #使用tf.global_variables_initializer()来初始化所有的变量
     68 init_op = tf.global_variables_initializer()
     69 sess.run(init_op)
     70  
     71 print(sess.run(y, feed_dict={x: [[0.7,0.9],[0.1,0.4],[0.5,0.8]]}))
     72  
     73 '''
     74 [[ 3.95757794]
     75  [ 1.15376544]
     76  [ 3.16749239]]
     77 '''
     78 


    #整体神经网络的实现 81 import tensorflow as tf 82 from numpy.random import RandomState 83 #定义神经网络的参数,输入和输出节点 84 batch_size=8 85 #均值为0 方差为1 随机分布满足正态分布 shape为2*3 86 w1=tf.Variable(tf.random_normal([2,3],stddev=1,seed=1)) 87 w2=tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1)) 88 #shape 根据数据自动计算 batchsize个 89 x=tf.placeholder(tf.float32,shape=(None,2)) 90 y_=tf.placeholder(tf.float32,shape=(None,1)) 91 92 #定义前向传播过程,损失函数及反向传播算法 93 94 a=tf.matmul(x,w1) 95 y=tf.matmul(a,w2) 96 #损失函数 使用交叉熵 97 #优化方法使用AdamOptimizer 98 cross_entropy=-tf.reduce_mean(y_*tf.log(tf.clip_by_value(y,1e-10,1.0))) 99 train_step=tf.train.AdamOptimizer(learning_rate=0.001).minimize(cross_entropy) 100 101 rdm=RandomState(1) 102 #随机生成128个数据 shape 128*2 103 X=rdm.rand(128,2) 104 105 #Y的值是模拟的 ,实际假设x2+x1如果大于1则标签Y为1 否则标签Y为0 106 Y=[[int(x1+x2<1)] for (x1,x2) in X] 107 108 #创建一个会话 ,运算计算图 109 #全局初始化变量 110 STEPS = 5000 111 with tf.Session() as sess: 112 init_op=tf.global_variables_initializer() 113 sess.run(init_op) 114 # 输出目前(未经训练)的参数取值。 115 print("w1:", sess.run(w1)) 116 print("w2:", sess.run(w2)) 117 print(" ") 118 for i in range(STEPS): 119 start = (i * batch_size) % 128 120 end = (i * batch_size) % 128 + batch_size 121 sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]}) 122 if i % 1000 == 0: 123 total_cross_entropy = sess.run(cross_entropy, feed_dict={x: X, y_: Y}) 124 print("After %d training step(s), cross entropy on all data is %g" % (i, total_cross_entropy)) 125 # 输出训练后的参数取值。 126 print(" ") 127 print("w1:", sess.run(w1)) 128 print("w2:", sess.run(w2)) 129 130 ''' 131 132 w1: [[-0.81131822 1.48459876 0.06532937] 133 [-2.4427042 0.0992484 0.59122431]] 134 w2: [[-0.81131822] 135 [ 1.48459876] 136 [ 0.06532937]] 137 138 139 After 0 training step(s), cross entropy on all data is 0.0674925 140 After 1000 training step(s), cross entropy on all data is 0.0163385 141 After 2000 training step(s), cross entropy on all data is 0.00907547 142 After 3000 training step(s), cross entropy on all data is 0.00714436 143 After 4000 training step(s), cross entropy on all data is 0.00578471 144 145 146 w1: [[-1.96182752 2.58235407 1.68203771] 147 [-3.46817183 1.06982315 2.11788988]] 148 w2: [[-1.82471502] 149 [ 2.68546653] 150 [ 1.41819501]] 151 152 Process finished with exit code 0 153 '''

    简单神经网络TensorFlow实现

     

    学习TensorFlow笔记

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    import tensorflow as tf
     
    #定义变量
    #Variable 定义张量及shape
    w1= tf.Variable(tf.random_normal([23], stddev=1, seed=1))
    w2= tf.Variable(tf.random_normal([31], stddev=1, seed=1))
    with tf.Session() as sess:
        print(sess.run(w1.initializer))
        print(sess.run(w2.initializer))
    #None
    #None
     
     
     
    #打印张量,查看数据shape等信息
    print(w1)
    print(w2)
    #<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32_ref>
    #<tf.Variable 'Variable_1:0' shape=(3, 1) dtype=float32_ref>
     
    #tf.constan是一个计算,结果为一个张量,保存在变量x中
    = tf.constant([[0.70.9]])
    print(x)
    #Tensor("Const:0", shape=(1, 2), dtype=float32)
    with tf.Session() as sess:
        print(sess.run(x))
    #[[ 0.69999999  0.89999998]]
     
     
    #定义前向传播的神经网络
    #matmul做矩阵乘法
    = tf.matmul(x, w1)   # x shape=(1, 2)   w1 shape=(2, 3)
     
    print(a)
    #Tensor("MatMul:0", shape=(1, 3), dtype=float32)
     
    = tf.matmul(a, w2)  #a shape=(1, 3)   w2 shape=(3, 1)
    print(y)
    #Tensor("MatMul_1:0", shape=(1, 1), dtype=float32)
     
     
    #调用会话输出结果
    with tf.Session() as sess:
        sess.run(w1.initializer)
        sess.run(w2.initializer)
        print(sess.run(a))
        #[[-2.76635647  1.12854266  0.57783246]]
        print(sess.run(y))
        #[[ 3.95757794]]
     
    #placeholder
    x=tf.placeholder(tf.float32,shape=(1,2),name="input")
    a=tf.matmul(x,w1)
    y=tf.matmul(a,w2)
    sess=tf.Session()
    init_op=tf.global_variables_initializer()
    sess.run(init_op)
     
    print(sess.run(y,feed_dict={x:[[0.8,0.9]]}))
    #[[ 4.2442317]]
    = tf.placeholder(tf.float32, shape=(32), name="input")
    = tf.matmul(x, w1)
    = tf.matmul(a, w2)
     
    sess = tf.Session()
    #使用tf.global_variables_initializer()来初始化所有的变量
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
     
    print(sess.run(y, feed_dict={x: [[0.7,0.9],[0.1,0.4],[0.5,0.8]]}))
     
    '''
    [[ 3.95757794]
     [ 1.15376544]
     [ 3.16749239]]
    '''

      整体神经网络的实现

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    import tensorflow as tf
    from   numpy.random import RandomState
    #定义神经网络的参数,输入和输出节点
    batch_size=8
    #均值为0 方差为1 随机分布满足正态分布 shape为2*3
    w1=tf.Variable(tf.random_normal([2,3],stddev=1,seed=1))
    w2=tf.Variable(tf.random_normal([31], stddev=1, seed=1))
    #shape 根据数据自动计算 batchsize个
    x=tf.placeholder(tf.float32,shape=(None,2))
    y_=tf.placeholder(tf.float32,shape=(None,1))
     
    #定义前向传播过程,损失函数及反向传播算法
     
    a=tf.matmul(x,w1)
    y=tf.matmul(a,w2)
    #损失函数 使用交叉熵
    #优化方法使用AdamOptimizer
    cross_entropy=-tf.reduce_mean(y_*tf.log(tf.clip_by_value(y,1e-10,1.0)))
    train_step=tf.train.AdamOptimizer(learning_rate=0.001).minimize(cross_entropy)
     
    rdm=RandomState(1)
    #随机生成128个数据 shape 128*2
    X=rdm.rand(128,2)
     
    #Y的值是模拟的 ,实际假设x2+x1如果大于1则标签Y为1 否则标签Y为0
    Y=[[int(x1+x2<1)] for (x1,x2) in X]
     
    #创建一个会话 ,运算计算图
    #全局初始化变量
    STEPS = 5000
    with tf.Session() as sess:
        init_op=tf.global_variables_initializer()
        sess.run(init_op)
        # 输出目前(未经训练)的参数取值。
        print("w1:", sess.run(w1))
        print("w2:", sess.run(w2))
        print(" ")
        for in range(STEPS):
            start = (i * batch_size) % 128
            end = (i * batch_size) % 128 + batch_size
            sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})
            if % 1000 == 0:
                total_cross_entropy = sess.run(cross_entropy, feed_dict={x: X, y_: Y})
                print("After %d training step(s), cross entropy on all data is %g" % (i, total_cross_entropy))
                # 输出训练后的参数取值。
        print(" ")
        print("w1:", sess.run(w1))
        print("w2:", sess.run(w2))
     
    '''
     
    w1: [[-0.81131822  1.48459876  0.06532937]
     [-2.4427042   0.0992484   0.59122431]]
    w2: [[-0.81131822]
     [ 1.48459876]
     [ 0.06532937]]
     
     
    After 0 training step(s), cross entropy on all data is 0.0674925
    After 1000 training step(s), cross entropy on all data is 0.0163385
    After 2000 training step(s), cross entropy on all data is 0.00907547
    After 3000 training step(s), cross entropy on all data is 0.00714436
    After 4000 training step(s), cross entropy on all data is 0.00578471
     
     
    w1: [[-1.96182752  2.58235407  1.68203771]
     [-3.46817183  1.06982315  2.11788988]]
    w2: [[-1.82471502]
     [ 2.68546653]
     [ 1.41819501]]
     
    Process finished with exit code 0
    '''
  • 相关阅读:
    string整理
    1295 N皇后问题
    排序整理
    Debian下Cannot set LC_CTYPE to default locale: No such file or directory解决方法
    9012,9013三极管总结
    android selector设置button点击效果(具体)以及常见问题
    C语言keywordstatic的绝妙用途
    Activity的launchMode和任务栈小结
    基于matlab的音频波形实时採集显示 v0.1
    how tomcat works读书笔记 七 日志记录器
  • 原文地址:https://www.cnblogs.com/xiaoboge/p/10457025.html
Copyright © 2011-2022 走看看