zoukankan      html  css  js  c++  java
  • 简单神经网络TensorFlow实现

    学习TensorFlow笔记

    import tensorflow as tf
    
    #定义变量
    #Variable 定义张量及shape
    w1= tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))
    w2= tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
    with tf.Session() as sess:
        print(sess.run(w1.initializer))
        print(sess.run(w2.initializer))
    #None
    #None
    
    
    
    #打印张量,查看数据shape等信息
    print(w1)
    print(w2)
    #<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32_ref>
    #<tf.Variable 'Variable_1:0' shape=(3, 1) dtype=float32_ref>
    
    #tf.constan是一个计算,结果为一个张量,保存在变量x中
    x = tf.constant([[0.7, 0.9]])
    print(x)
    #Tensor("Const:0", shape=(1, 2), dtype=float32)
    with tf.Session() as sess:
        print(sess.run(x))
    #[[ 0.69999999  0.89999998]]
    
    
    #定义前向传播的神经网络
    #matmul做矩阵乘法
    a = tf.matmul(x, w1)   # x shape=(1, 2)   w1 shape=(2, 3)
    
    print(a)
    #Tensor("MatMul:0", shape=(1, 3), dtype=float32)
    
    y = tf.matmul(a, w2)  #a shape=(1, 3)   w2 shape=(3, 1)
    print(y)
    #Tensor("MatMul_1:0", shape=(1, 1), dtype=float32)
    
    
    #调用会话输出结果
    with tf.Session() as sess:
        sess.run(w1.initializer)
        sess.run(w2.initializer)
        print(sess.run(a))
        #[[-2.76635647  1.12854266  0.57783246]]
        print(sess.run(y))
        #[[ 3.95757794]]
    
    #placeholder
    x=tf.placeholder(tf.float32,shape=(1,2),name="input")
    a=tf.matmul(x,w1)
    y=tf.matmul(a,w2)
    sess=tf.Session()
    init_op=tf.global_variables_initializer()
    sess.run(init_op)
    
    print(sess.run(y,feed_dict={x:[[0.8,0.9]]}))
    #[[ 4.2442317]]
    x = tf.placeholder(tf.float32, shape=(3, 2), name="input")
    a = tf.matmul(x, w1)
    y = tf.matmul(a, w2)
    
    sess = tf.Session()
    #使用tf.global_variables_initializer()来初始化所有的变量
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    
    print(sess.run(y, feed_dict={x: [[0.7,0.9],[0.1,0.4],[0.5,0.8]]}))
    
    '''
    [[ 3.95757794]
     [ 1.15376544]
     [ 3.16749239]]
    '''
    

      整体神经网络的实现

    import tensorflow as tf
    from   numpy.random import RandomState
    #定义神经网络的参数,输入和输出节点
    batch_size=8
    #均值为0 方差为1 随机分布满足正态分布 shape为2*3
    w1=tf.Variable(tf.random_normal([2,3],stddev=1,seed=1))
    w2=tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
    #shape 根据数据自动计算 batchsize个
    x=tf.placeholder(tf.float32,shape=(None,2))
    y_=tf.placeholder(tf.float32,shape=(None,1))
    
    #定义前向传播过程,损失函数及反向传播算法
    
    a=tf.matmul(x,w1)
    y=tf.matmul(a,w2)
    #损失函数 使用交叉熵
    #优化方法使用AdamOptimizer
    cross_entropy=-tf.reduce_mean(y_*tf.log(tf.clip_by_value(y,1e-10,1.0)))
    train_step=tf.train.AdamOptimizer(learning_rate=0.001).minimize(cross_entropy)
    
    rdm=RandomState(1)
    #随机生成128个数据 shape 128*2
    X=rdm.rand(128,2)
    
    #Y的值是模拟的 ,实际假设x2+x1如果大于1则标签Y为1 否则标签Y为0
    Y=[[int(x1+x2<1)] for (x1,x2) in X]
    
    #创建一个会话 ,运算计算图
    #全局初始化变量
    STEPS = 5000
    with tf.Session() as sess:
        init_op=tf.global_variables_initializer()
        sess.run(init_op)
        # 输出目前(未经训练)的参数取值。
        print("w1:", sess.run(w1))
        print("w2:", sess.run(w2))
        print("
    ")
        for i in range(STEPS):
            start = (i * batch_size) % 128
            end = (i * batch_size) % 128 + batch_size
            sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})
            if i % 1000 == 0:
                total_cross_entropy = sess.run(cross_entropy, feed_dict={x: X, y_: Y})
                print("After %d training step(s), cross entropy on all data is %g" % (i, total_cross_entropy))
                # 输出训练后的参数取值。
        print("
    ")
        print("w1:", sess.run(w1))
        print("w2:", sess.run(w2))
    
    '''
    
    w1: [[-0.81131822  1.48459876  0.06532937]
     [-2.4427042   0.0992484   0.59122431]]
    w2: [[-0.81131822]
     [ 1.48459876]
     [ 0.06532937]]
    
    
    After 0 training step(s), cross entropy on all data is 0.0674925
    After 1000 training step(s), cross entropy on all data is 0.0163385
    After 2000 training step(s), cross entropy on all data is 0.00907547
    After 3000 training step(s), cross entropy on all data is 0.00714436
    After 4000 training step(s), cross entropy on all data is 0.00578471
    
    
    w1: [[-1.96182752  2.58235407  1.68203771]
     [-3.46817183  1.06982315  2.11788988]]
    w2: [[-1.82471502]
     [ 2.68546653]
     [ 1.41819501]]
    
    Process finished with exit code 0
    '''
    

      

  • 相关阅读:
    适配器模式(2)
    设计模式之6大设计原则(1)
    Mybatis框架基础支持层——反射工具箱之MetaClass(7)
    Mybatis框架基础支持层——反射工具箱之实体属性Property工具集(6)
    Mybatis框架基础支持层——反射工具箱之对象工厂ObjectFactory&DefaultObjectFactory(5)
    Mybatis框架基础支持层——反射工具箱之泛型解析工具TypeParameterResolver(4)
    Guava动态调用方法
    数据库的数据同步
    springboot(二十二)-sharding-jdbc-读写分离
    springboot(二十一)-集成memcached
  • 原文地址:https://www.cnblogs.com/vincentqliu/p/7806978.html
Copyright © 2011-2022 走看看