zoukankan      html  css  js  c++  java
  • 莫烦TensorFlow_08 tensorboard可视化进阶

    import tensorflow as tf  
    import numpy as np  
    import matplotlib.pyplot as plt  
      
     #
     # add layer
     #
    def add_layer(inputs, in_size, out_size,n_layer, activation_function = None):  
      layer_name = 'layer%s' % n_layer
      with tf.name_scope(layer_name):
        with tf.name_scope('Weights'):
          Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='W')  # hang lie  
          tf.summary.histogram(layer_name + '/weights', Weights)#保存成一个直方图,bin是取值
        with tf.name_scope('biases'):
          biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name = 'b')  
          tf.summary.histogram(layer_name + '/biases', biases)#注意histogram的路径
        with tf.name_scope('Wx_plus_b'):
          Wx_plus_b = tf.matmul(inputs, Weights) + biases  
        
        if activation_function is None:  
          outputs = Wx_plus_b  
        else:  
          outputs = activation_function(Wx_plus_b)  
          
        tf.summary.histogram(layer_name + '/outputs', outputs)  
        return outputs  
    #
    #make up some data
    #
    x_data = np.linspace(-1,1,300)[:, np.newaxis]
    noise  = np.random.normal(0, 0.05, x_data.shape)
    y_data = np.square(x_data) - 0.5 + noise
     
    #
    #define placeholder
    #
    with tf.name_scope('inputs'):
      xs = tf.placeholder(tf.float32, [None, 1], name = 'x_input') #注意命名
      ys = tf.placeholder(tf.float32, [None, 1], name = 'y_input')  
    
    #add hidden layer
    l1 = add_layer(xs, 1, 10, n_layer = 1,activation_function = tf.nn.relu)  
    #add output layer
    prediction = add_layer(l1, 10, 1, n_layer = 2, activation_function = None)  
    
    #the error between prediction and real data  
    with tf.name_scope('loss'):
      loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),   
    				    reduction_indices=[1]  ))  
      tf.summary.scalar('loss', loss)#记录operation,是存储在scaler里的
      
    with tf.name_scope('train'):
      train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)  
      
    sess = tf.Session()  
    merged = tf.summary.merge_all() #所有的summary在merge以后,在一个run中就可执行
    writer = tf.summary.FileWriter("logs/", sess.graph) #定义writer
    
    #import step 
    sess.run(tf.global_variables_initializer() )
    
    #
    # Session
    #
    
    for i in range(1000):
      sess.run(train_step, feed_dict={xs:x_data, ys:y_data}) 
      if i % 50 == 0:
        result = sess.run(merged, # 否则要一个个run summary。
    		      feed_dict = {xs:x_data, ys:y_data})
        
        writer.add_summary(result, i)#按序列写入结果
        print(sess.run(loss, feed_dict={xs:x_data, ys:y_data}))
    

      

  • 相关阅读:
    【Hadoop】HDFS的运行原理
    ZOOKEEPER3.3.3源码分析(四)对LEADER选举过程分析的纠正
    zookeeper源码分析二FASTLEADER选举算法
    面试题9:用两个栈实现队列
    面试题7:重建二叉树
    C/C++实现链表的常用操作
    扩展卡尔曼滤波(EKF)实现三维位置估计
    毕业论文思路
    链表常用操作
    关于指针
  • 原文地址:https://www.cnblogs.com/alexYuin/p/8684244.html
Copyright © 2011-2022 走看看