基本概念实操:
import tensorflow as tf
#创建一个常量op
m1 = tf.constant([[3,3]])
#创建一个常量op
m2 = tf.constant([[2],[3]])
#创建一个矩阵乘法op,把m1和m2传入
product = tf.matmul(m1,m2)
print(product)
Tensor("MatMul:0", shape=(1, 1), dtype=int32)
#定义一个绘画,启动默认图
sess = tf.Session()
#调用sess的run方法来执行矩阵的乘法op
#run(product)触发了图中的3个op
result = sess.run(product)
print(result)
sess.close()
[[15]]
with tf.Session() as sess:
#调用sess的run方法来执行矩阵的乘法op
#run(product)触发了图中的3个op
result = sess.run(product)
print(result)
[[15]]
import tensorflow as tf
x = tf.Variable([1,2])
a = tf.constant([3,3])
#增加一个减法op
sub = tf.subtract(x,a)
#增加一个加法op
add = tf.add(x,a)
#初始化变量
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print(sess.run(sub))
print(sess.run(add))
[-2 -1]
[4 5]
#创建一个变量,初始化为0
state = tf.Variable(0,name="counter")
#创建一个op,作用是让state加1
new_value = tf.add(state,1)
#赋值op,将new_value赋值给state
update = tf.assign(state,new_value)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print(sess.run(state))
for i in range(5):
sess.run(update)
print(sess.run(state))
0 1 2 3 4 5
import tensorflow as tf
#Fetch
input1 = tf.constant(3.0)
input2 = tf.constant(2.0)
input3 = tf.constant(5.0)
add = tf.add(input2,input3)
mul = tf.multiply(input1,add)
with tf.Session() as sess:
result = sess.run([mul,add])
print(result)
[21.0, 7.0]
#Feed
#创建占位符
input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)
output = tf.multiply(input1,input2)
with tf.Session() as sess:
#feed数据以字典的形式传入
print(sess.run(output,feed_dict={input1:[8.],input2:[2.]}))
[ 16.]
import tensorflow as tf
import numpy as np
#使用numpy生成100个随机点
x_data = np.random.rand(100)
y_data = x_data*0.1+0.2
#构造一个线性模型
b = tf.Variable(32.11) #任意float32类型
k = tf.Variable(88.23) #任意float32类型
y = k*x_data+b
#二次代价函数
loss = tf.reduce_mean(tf.square(y_data-y))
#定义一个梯度下降法来训练的一个优化器
optimizer = tf.train.GradientDescentOptimizer(0.2)
#最小化代价函数
train = optimizer.minimize(loss)
#初始化变量
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for x in range(2001):
sess.run(train)
if x%20==0:
print(x,sess.run([loss,k,b]))
可以看出在迭代700次之后,损失值几乎不变了,k,b也基本固定不变了。