zoukankan      html  css  js  c++  java
  • tensorflow 存取数据

    #!/usr/bin/env python

    # coding: utf-8

    # In[1]:

    import tensorflow as tf
    from tensorflow.examples.tutorials.mnist import input_data

    # In[2]:

    # 载入数据集
    mnist = input_data.read_data_sets("MNIST_data", one_hot=True)

    # 每个批次100张照片
    batch_size = 100
    # 计算一共有多少个批次
    n_batch = mnist.train.num_examples // batch_size

    # 定义两个placeholder
    x = tf.placeholder(tf.float32, [None, 784])
    y = tf.placeholder(tf.float32, [None, 10])

    # 创建一个简单的神经网络,输入层784个神经元,输出层10个神经元
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    prediction = tf.nn.softmax(tf.matmul(x, W) + b)

    # 二次代价函数
    # loss = tf.reduce_mean(tf.square(y-prediction))
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))
    # 使用梯度下降法
    train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

    # 初始化变量
    init = tf.global_variables_initializer()

    # 结果存放在一个布尔型列表中
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1)) # argmax返回一维张量中最大的值所在的位置
    # 求准确率
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    saver = tf.train.Saver()

    with tf.Session() as sess:
    sess.run(init)
    for epoch in range(11):
    for batch in range(n_batch):
    batch_xs, batch_ys = mnist.train.next_batch(batch_size)
    sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys})

    acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})
    print("Iter " + str(epoch) + ",Testing Accuracy " + str(acc))
    # 保存模型
    saver.save(sess, 'net/my_net.ckpt')

    # In[ ]:

    #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++

    # coding: utf-8

    # In[1]:

    import tensorflow as tf
    from tensorflow.examples.tutorials.mnist import input_data


    # In[2]:

    #载入数据集
    mnist = input_data.read_data_sets("MNIST_data",one_hot=True)

    #每个批次100张照片
    batch_size = 100
    #计算一共有多少个批次
    n_batch = mnist.train.num_examples // batch_size

    #定义两个placeholder
    x = tf.placeholder(tf.float32,[None,784])
    y = tf.placeholder(tf.float32,[None,10])

    #创建一个简单的神经网络,输入层784个神经元,输出层10个神经元
    W = tf.Variable(tf.zeros([784,10]))
    b = tf.Variable(tf.zeros([10]))
    prediction = tf.nn.softmax(tf.matmul(x,W)+b)

    #二次代价函数
    # loss = tf.reduce_mean(tf.square(y-prediction))
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
    #使用梯度下降法
    train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

    #初始化变量
    init = tf.global_variables_initializer()

    #结果存放在一个布尔型列表中
    correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一维张量中最大的值所在的位置
    #求准确率
    accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

    saver = tf.train.Saver()

    with tf.Session() as sess:
    sess.run(init)
    print(sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}))
    saver.restore(sess,'net/my_net.ckpt')
    print(sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}))


    # In[ ]:





  • 相关阅读:
    日志管理
    LAMP源码编译安装
    实现LAMP架构
    mariadb-server安装问题(Error: MariaDB-common conflicts with 1:mariadb-libs-5.5.60-1.el7_5.x86_64)
    MySQL之八---Mysql实现数据库主从复制、主主复制、级联复制、半同步复制及复制监控
    httpd-2.4源码编译
    HTTPD之三----HTTPS加密技术及重定向
    HTTPD之二---HTTPD服务详解——httpd的配置文件常见设置
    HTTPD之一---HTTPD服务基础理论
    MySQL的MHA实现高可用性
  • 原文地址:https://www.cnblogs.com/rongye/p/10013413.html
Copyright © 2011-2022 走看看