zoukankan      html  css  js  c++  java
  • 逻辑斯特回归tensorflow实现

    calss

    #!/usr/bin/python2.7
    #coding:utf-8
    
    from __future__ import print_function
    import tensorflow as tf
    
    # Import MNIST data
    from tensorflow.examples.tutorials.mnist import input_data
    mnist = input_data.read_data_sets("../Mnist_data/", one_hot=True)
    print(mnist)
    
    # Parameters setting
    learning_rate = 0.01
    training_epochs = 25 # 训练迭代的次数
    batch_size = 100   # 一次输入的样本
    display_step = 1
    
    # set the tf Graph Input & set the model weights
    x = tf.placeholder(dtype=tf.float32, shape=[None,784], name="input_x")
    y = tf.placeholder(dtype=tf.float32, shape=[None,10],  name="input_y")
    
    #set models weights,bias
    W=tf.Variable(tf.zeros([784,10]))
    b=tf.Variable(tf.zeros([10]))
    
    # Construct the model
    pred=tf.nn.softmax(tf.matmul(x,W)+b)  # 归一化,the possibility of getting the right value
    
    # Minimize error using cross entropy & set the gradient descent
    cost=tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred),reduction_indices=1)) #交叉熵,reducion_indices=1横向求和
    optimizer=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
    
    # Initialize the variables (i.e. assign their default value)
    init = tf.global_variables_initializer()
    
    # Start training
    with tf.Session() as sess:
    
        # Run the initializer
        sess.run(init)
    
        # Training cycle
        for epoch in range(training_epochs):
            avg_cost = 0.
            total_batch = int(mnist.train.num_examples/batch_size)
            # Loop over all batches
            for i in range(total_batch):
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                # Run optimization op (backprop) and cost op (to get loss value)
                _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,
                                                              y: batch_ys})
                # Compute average loss
                avg_cost += c / total_batch
            # Display logs per epoch step
            if (epoch+1) % display_step == 0:
                print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
    
        print("Optimization Finished!")
    
        # Test model
        correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
        # Calculate accuracy
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
    

    linear regression

    from __future__ import print_function
    
    import tensorflow as tf
    import numpy as np
    
    def add_layer(inputs, in_size, out_size, activation_function=None):
    # add one more layer and return the output of this layer
        Weights = tf.Variable(tf.random_normal([in_size, out_size]))
        biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
        Wx_plus_b = tf.matmul(inputs, Weights) + biases
        if activation_function is None:
            outputs = Wx_plus_b
        else:
            outputs = activation_function(Wx_plus_b)
        return outputs
    
    
    # 1.训练的数据 # Make up some real data
    x_data = np.linspace(-1,1,300)[:, np.newaxis]
    noise = np.random.normal(0, 0.05, x_data.shape)
    y_data = np.square(x_data) - 0.5 + noise
    
    # 2.定义节点准备接收数据
    #  define placeholder for inputs to network
    xs = tf.placeholder(tf.float32, [None, 1])
    ys = tf.placeholder(tf.float32, [None, 1])
    
    # 3.定义神经层:隐藏层和预测层
    #  add hidden layer 输入值是 xs,在隐藏层有 10 个神经元
    l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
    
    # add output layer 输入值是隐藏层 l1,在预测层输出 1 个结果
    prediction = add_layer(l1, 10, 1, activation_function=None)
    
    # 4.定义 loss 表达式
    #  the error between prediciton and real data
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]))
    
    # 5.选择 optimizer 使 loss 达到最小
    #  这一行定义了用什么方式去减少 loss,学习率是 0.1
    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
    
    # important step 对所有变量进行初始化
    init = tf.initialize_all_variables()
    
    with tf.Session() as sess:
        # 上面定义的都没有运算,直到 sess.run 才会开始运算
        sess.run(init)
        #  迭代 1000 次学习,sess.run optimizer
        for epoch in range(1000):
        #  training train_step 和 loss 都是由 placeholder 定义的运算,所以这里要用 feed 传入参数
            _, cost = sess.run([train_step, loss], feed_dict={xs: x_data, ys: y_data})
            if (epoch+1) % 50 == 0:
        # to see the step improvement
                print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(cost))
    
  • 相关阅读:
    sublime there are no packages for installation
    linux 安装php扩展mbstring
    生成器表达式和列表推导式
    send()和next()
    迭代器生成器
    装饰器
    函数随笔
    Django进阶
    数据结构与算法入门
    MySQL必会
  • 原文地址:https://www.cnblogs.com/narjaja/p/9507685.html
Copyright © 2011-2022 走看看