zoukankan      html  css  js  c++  java
  • TensorFlow高级API(tf.contrib.learn)及可视化工具TensorBoard的使用

    一.TensorFlow高层次机器学习API (tf.contrib.learn)

    1.tf.contrib.learn.datasets.base.load_csv_with_header 加载csv格式数据

    2.tf.contrib.learn.DNNClassifier 建立DNN模型(classifier)

    3.classifer.fit 训练模型

    4.classifier.evaluate 评价模型

    5.classifier.predict 预测新样本

    完整代码:

     1 from __future__ import absolute_import
     2 from __future__ import division
     3 from __future__ import print_function
     4 
     5 import tensorflow as tf
     6 import numpy as np
     7 
     8 # Data sets
     9 IRIS_TRAINING = "iris_training.csv"
    10 IRIS_TEST = "iris_test.csv"
    11 
    12 # Load datasets.
    13 training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
    14     filename=IRIS_TRAINING,
    15     target_dtype=np.int,
    16     features_dtype=np.float32)
    17 test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
    18     filename=IRIS_TEST,
    19     target_dtype=np.int,
    20     features_dtype=np.float32)
    21 
    22 # Specify that all features have real-value data
    23 feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)]
    24 
    25 # Build 3 layer DNN with 10, 20, 10 units respectively.
    26 classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
    27                                             hidden_units=[10, 20, 10],
    28                                             n_classes=3,
    29                                             model_dir="/tmp/iris_model")
    30 
    31 # Fit model.
    32 classifier.fit(x=training_set.data,
    33                y=training_set.target,
    34                steps=2000)
    35 
    36 # Evaluate accuracy.
    37 accuracy_score = classifier.evaluate(x=test_set.data,
    38                                      y=test_set.target)["accuracy"]
    39 print('Accuracy: {0:f}'.format(accuracy_score))
    40 
    41 # Classify two new flower samples.
    42 new_samples = np.array(
    43     [[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]], dtype=float)
    44 y = list(classifier.predict(new_samples, as_iterable=True))
    45 print('Predictions: {}'.format(str(y)))

     结果:

    Accuracy:0.966667

    二.在tf.contrib.learn中创建input函数(输入预处理函数)

    格式:

    def my_input_fn():

      # Preprocess your data here...

      # ...then return 1) a mapping of feature columns to Tensors with
      # the corresponding feature data, and 2) a Tensor containing labels
      return feature_cols, labels

    完整代码:

     1 #  Copyright 2016 The TensorFlow Authors. All Rights Reserved.
     2 #
     3 #  Licensed under the Apache License, Version 2.0 (the "License");
     4 #  you may not use this file except in compliance with the License.
     5 #  You may obtain a copy of the License at
     6 #
     7 #   http://www.apache.org/licenses/LICENSE-2.0
     8 #
     9 #  Unless required by applicable law or agreed to in writing, software
    10 #  distributed under the License is distributed on an "AS IS" BASIS,
    11 #  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12 #  See the License for the specific language governing permissions and
    13 #  limitations under the License.
    14 """DNNRegressor with custom input_fn for Housing dataset."""
    15 
    16 from __future__ import absolute_import
    17 from __future__ import division
    18 from __future__ import print_function
    19 
    20 import itertools
    21 
    22 import pandas as pd
    23 import tensorflow as tf
    24 
    25 tf.logging.set_verbosity(tf.logging.INFO)
    26 
    27 COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
    28            "dis", "tax", "ptratio", "medv"]
    29 FEATURES = ["crim", "zn", "indus", "nox", "rm",
    30             "age", "dis", "tax", "ptratio"]
    31 LABEL = "medv"
    32 
    33 
    34 def input_fn(data_set):
    35   feature_cols = {k: tf.constant(data_set[k].values) for k in FEATURES}
    36   labels = tf.constant(data_set[LABEL].values)
    37   return feature_cols, labels
    38 
    39 
    40 def main(unused_argv):
    41   # Load datasets
    42   training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
    43                              skiprows=1, names=COLUMNS)
    44   test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
    45                          skiprows=1, names=COLUMNS)
    46 
    47   # Set of 6 examples for which to predict median house values
    48   prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
    49                                skiprows=1, names=COLUMNS)
    50 
    51   # Feature cols
    52   feature_cols = [tf.contrib.layers.real_valued_column(k)
    53                   for k in FEATURES]
    54 
    55   # Build 2 layer fully connected DNN with 10, 10 units respectively.
    56   regressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols,
    57                                             hidden_units=[10, 10],
    58                                             model_dir="/tmp/boston_model")
    59 
    60   # Fit
    61   regressor.fit(input_fn=lambda: input_fn(training_set), steps=5000)
    62 
    63   # Score accuracy
    64   ev = regressor.evaluate(input_fn=lambda: input_fn(test_set), steps=1)
    65   loss_score = ev["loss"]
    66   print("Loss: {0:f}".format(loss_score))
    67 
    68   # Print out predictions
    69   y = regressor.predict(input_fn=lambda: input_fn(prediction_set))
    70   # .predict() returns an iterator; convert to a list and print predictions
    71   predictions = list(itertools.islice(y, 6))
    72   print("Predictions: {}".format(str(predictions)))
    73 
    74 if __name__ == "__main__":
    75   tf.app.run()
    inputfunc_contrib_learn.py

    三.TensorFlow可视化(TensorBoard)

    代码:

      1 # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the 'License');
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #     http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an 'AS IS' BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 # ==============================================================================
     15 """A simple MNIST classifier which displays summaries in TensorBoard.
     16  This is an unimpressive MNIST model, but it is a good example of using
     17 tf.name_scope to make a graph legible in the TensorBoard graph explorer, and of
     18 naming summary tags so that they are grouped meaningfully in TensorBoard.
     19 It demonstrates the functionality of every TensorBoard dashboard.
     20 """
     21 from __future__ import absolute_import
     22 from __future__ import division
     23 from __future__ import print_function
     24 
     25 import argparse
     26 import sys
     27 
     28 import tensorflow as tf
     29 
     30 from tensorflow.examples.tutorials.mnist import input_data
     31 
     32 FLAGS = None
     33 
     34 
     35 def train():
     36   # Import data
     37   mnist = input_data.read_data_sets(FLAGS.data_dir,
     38                                     one_hot=True,
     39                                     fake_data=FLAGS.fake_data)
     40 
     41   sess = tf.InteractiveSession()
     42   # Create a multilayer model.
     43 
     44   # Input placeholders
     45   with tf.name_scope('input'):
     46     x = tf.placeholder(tf.float32, [None, 784], name='x-input')
     47     y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')
     48 
     49   with tf.name_scope('input_reshape'):
     50     image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
     51     tf.summary.image('input', image_shaped_input, 10)
     52 
     53   # We can't initialize these variables to 0 - the network will get stuck.
     54   def weight_variable(shape):
     55     """Create a weight variable with appropriate initialization."""
     56     initial = tf.truncated_normal(shape, stddev=0.1)
     57     return tf.Variable(initial)
     58 
     59   def bias_variable(shape):
     60     """Create a bias variable with appropriate initialization."""
     61     initial = tf.constant(0.1, shape=shape)
     62     return tf.Variable(initial)
     63 
     64   def variable_summaries(var):
     65     """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
     66     with tf.name_scope('summaries'):
     67       mean = tf.reduce_mean(var)
     68       tf.summary.scalar('mean', mean)
     69       with tf.name_scope('stddev'):
     70         stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
     71       tf.summary.scalar('stddev', stddev)
     72       tf.summary.scalar('max', tf.reduce_max(var))
     73       tf.summary.scalar('min', tf.reduce_min(var))
     74       tf.summary.histogram('histogram', var)
     75 
     76   def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
     77     """Reusable code for making a simple neural net layer.
     78     It does a matrix multiply, bias add, and then uses relu to nonlinearize.
     79     It also sets up name scoping so that the resultant graph is easy to read,
     80     and adds a number of summary ops.
     81     """
     82     # Adding a name scope ensures logical grouping of the layers in the graph.
     83     with tf.name_scope(layer_name):
     84       # This Variable will hold the state of the weights for the layer
     85       with tf.name_scope('weights'):
     86         weights = weight_variable([input_dim, output_dim])
     87         variable_summaries(weights)
     88       with tf.name_scope('biases'):
     89         biases = bias_variable([output_dim])
     90         variable_summaries(biases)
     91       with tf.name_scope('Wx_plus_b'):
     92         preactivate = tf.matmul(input_tensor, weights) + biases
     93         tf.summary.histogram('pre_activations', preactivate)
     94       activations = act(preactivate, name='activation')
     95       tf.summary.histogram('activations', activations)
     96       return activations
     97 
     98   hidden1 = nn_layer(x, 784, 500, 'layer1')
     99 
    100   with tf.name_scope('dropout'):
    101     keep_prob = tf.placeholder(tf.float32)
    102     tf.summary.scalar('dropout_keep_probability', keep_prob)
    103     dropped = tf.nn.dropout(hidden1, keep_prob)
    104 
    105   # Do not apply softmax activation yet, see below.
    106   y = nn_layer(dropped, 500, 10, 'layer2', act=tf.identity)
    107 
    108   with tf.name_scope('cross_entropy'):
    109     # The raw formulation of cross-entropy,
    110     #
    111     # tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.softmax(y)),
    112     #                               reduction_indices=[1]))
    113     #
    114     # can be numerically unstable.
    115     #
    116     # So here we use tf.nn.softmax_cross_entropy_with_logits on the
    117     # raw outputs of the nn_layer above, and then average across
    118     # the batch.
    119     diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)
    120     with tf.name_scope('total'):
    121       cross_entropy = tf.reduce_mean(diff)
    122   tf.summary.scalar('cross_entropy', cross_entropy)
    123 
    124   with tf.name_scope('train'):
    125     train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(
    126         cross_entropy)
    127 
    128   with tf.name_scope('accuracy'):
    129     with tf.name_scope('correct_prediction'):
    130       correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    131     with tf.name_scope('accuracy'):
    132       accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    133   tf.summary.scalar('accuracy', accuracy)
    134 
    135   # Merge all the summaries and write them out to /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default)
    136   merged = tf.summary.merge_all()
    137   train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph)
    138   test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test')
    139   tf.global_variables_initializer().run()
    140 
    141   # Train the model, and also write summaries.
    142   # Every 10th step, measure test-set accuracy, and write test summaries
    143   # All other steps, run train_step on training data, & add training summaries
    144 
    145   def feed_dict(train):
    146     """Make a TensorFlow feed_dict: maps data onto Tensor placeholders."""
    147     if train or FLAGS.fake_data:
    148       xs, ys = mnist.train.next_batch(100, fake_data=FLAGS.fake_data)
    149       k = FLAGS.dropout
    150     else:
    151       xs, ys = mnist.test.images, mnist.test.labels
    152       k = 1.0
    153     return {x: xs, y_: ys, keep_prob: k}
    154 
    155   for i in range(FLAGS.max_steps):
    156     if i % 10 == 0:  # Record summaries and test-set accuracy
    157       summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict(False))
    158       test_writer.add_summary(summary, i)
    159       print('Accuracy at step %s: %s' % (i, acc))
    160     else:  # Record train set summaries, and train
    161       if i % 100 == 99:  # Record execution stats
    162         run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    163         run_metadata = tf.RunMetadata()
    164         summary, _ = sess.run([merged, train_step],
    165                               feed_dict=feed_dict(True),
    166                               options=run_options,
    167                               run_metadata=run_metadata)
    168         train_writer.add_run_metadata(run_metadata, 'step%03d' % i)
    169         train_writer.add_summary(summary, i)
    170         print('Adding run metadata for', i)
    171       else:  # Record a summary
    172         summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True))
    173         train_writer.add_summary(summary, i)
    174   train_writer.close()
    175   test_writer.close()
    176 
    177 
    178 def main(_):
    179   if tf.gfile.Exists(FLAGS.log_dir):
    180     tf.gfile.DeleteRecursively(FLAGS.log_dir)
    181   tf.gfile.MakeDirs(FLAGS.log_dir)
    182   train()
    183 
    184 
    185 if __name__ == '__main__':
    186   parser = argparse.ArgumentParser()
    187   parser.add_argument('--fake_data', nargs='?', const=True, type=bool,
    188                       default=False,
    189                       help='If true, uses fake data for unit testing.')
    190   parser.add_argument('--max_steps', type=int, default=1000,
    191                       help='Number of steps to run trainer.')
    192   parser.add_argument('--learning_rate', type=float, default=0.001,
    193                       help='Initial learning rate')
    194   parser.add_argument('--dropout', type=float, default=0.9,
    195                       help='Keep probability for training dropout.')
    196   parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data',
    197                       help='Directory for storing input data')
    198   parser.add_argument('--log_dir', type=str, default='/tmp/tensorflow/mnist/logs/mnist_with_summaries',
    199                       help='Summaries log directory')
    200   FLAGS, unparsed = parser.parse_known_args()
    201   tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
    mnist_with_summary.py

    启动TensorBoard: tensorboard --logdir=path/to/log-directory

    小结:

    1.重点为高层API tf.contrib.learn的使用;

    2.初步了解使用tensorboard的方法;

    3.网址:google/tensorflow游乐场

    参考文献:谷歌官方文档

  • 相关阅读:
    java获取当前项目或类路径
    java转义字符处理——“\”替换为“/”
    OpenModelica 在特定目录下生成仿真结果文件
    Eclipse常用设置
    java反编译器
    OMShell常用命令及遇到的问题
    Java中的单实例
    Eclipse常用设置
    Eclipse快捷键
    vlookup+match高亮显示行
  • 原文地址:https://www.cnblogs.com/wang-kai/p/6479960.html
Copyright © 2011-2022 走看看