zoukankan      html  css  js  c++  java
  • TF常用知识

    命名空间及变量共享

    # coding=utf-8
    import tensorflow as tf
    import numpy as np
    import matplotlib.pyplot as plt;
    
    with tf.variable_scope('V1') as scope:
        a1 = tf.get_variable(name='a1', shape=[1], initializer=tf.constant_initializer(1))
        scope.reuse_variables()
        a3 = tf.get_variable('a1')
    
    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        print a1.name
        print sess.run(a1)
        print a3.name
        print sess.run(a3)
    

    等同于

    
    with tf.variable_scope('V1'):
    	a1 = tf.get_variable(name='a1', shape=[1], initializer=tf.constant_initializer(1))
     
    with tf.variable_scope('V1', reuse=True):
    	a3 = tf.get_variable('a1')
     
    with tf.Session() as sess:
    	sess.run(tf.initialize_all_variables())
    	print a1.name
    	print sess.run(a1)
    	print a3.name
    	print sess.run(a3)
    

    数学运算

    基本数学函数

    相同大小Tensor之间的任何算术运算都会将运算应用到元素级。

    # 算术操作符:+ - * / % 
    tf.add(x, y, name=None)        # 加法(支持 broadcasting)
    tf.subtract(x, y, name=None)   # 减法
    tf.multiply(x, y, name=None)   # 乘法
    tf.divide(x, y, name=None)     # 浮点除法, 返回浮点数(python3 除法)
    tf.mod(x, y, name=None)        # 取余
    
    # 幂指对数操作符:^ ^2 ^0.5 e^ ln 
    tf.pow(x, y, name=None)        # 幂次方
    tf.square(x, name=None)        # 平方
    tf.sqrt(x, name=None)          # 开根号,必须传入浮点数或复数
    tf.exp(x, name=None)           # 计算 e 的次方
    tf.log(x, name=None)           # 以 e 为底,必须传入浮点数或复数
    
    # 取符号、负、倒数、绝对值、近似、两数中较大/小的
    tf.negative(x, name=None)      # 取负(y = -x).
    tf.sign(x, name=None)          # 返回 x 的符号
    tf.reciprocal(x, name=None)    # 取倒数
    tf.abs(x, name=None)           # 求绝对值
    tf.round(x, name=None)         # 四舍五入
    tf.ceil(x, name=None)          # 向上取整
    tf.floor(x, name=None)         # 向下取整
    tf.rint(x, name=None)          # 取最接近的整数 
    tf.maximum(x, y, name=None)    # 返回两tensor中的最大值 (x > y ? x : y)
    tf.minimum(x, y, name=None)    # 返回两tensor中的最小值 (x < y ? x : y)
    
    # 三角函数和反三角函数
    tf.cos(x, name=None)    
    tf.sin(x, name=None)    
    tf.tan(x, name=None)    
    tf.acos(x, name=None)
    tf.asin(x, name=None)
    tf.atan(x, name=None)   
    
    # 其它
    tf.div(x, y, name=None)  # python 2.7 除法, x/y-->int or x/float(y)-->float
    tf.truediv(x, y, name=None) # python 3 除法, x/y-->float
    tf.floordiv(x, y, name=None)  # python 3 除法, x//y-->int
    tf.realdiv(x, y, name=None)
    tf.truncatediv(x, y, name=None)
    tf.floor_div(x, y, name=None)
    tf.truncatemod(x, y, name=None)
    tf.floormod(x, y, name=None)
    tf.cross(x, y, name=None)
    tf.add_n(inputs, name=None)  # inputs: A list of Tensor objects, each with same shape and type
    tf.squared_difference(x, y, name=None) 
    

    矩阵函数

    # 矩阵乘法(tensors of rank >= 2)
    tf.matmul(a, b, transpose_a=False, transpose_b=False,    adjoint_a=False, adjoint_b=False, a_is_sparse=False, b_is_sparse=False, name=None)
    
    # 转置,可以通过指定 perm=[1, 0] 来进行轴变换
    tf.transpose(a, perm=None, name='transpose')
    
    # 在张量 a 的最后两个维度上进行转置
    tf.matrix_transpose(a, name='matrix_transpose')
    # Matrix with two batch dimensions, x.shape is [1, 2, 3, 4]
    # tf.matrix_transpose(x) is shape [1, 2, 4, 3]
    
    # 求矩阵的迹
    tf.trace(x, name=None)
    
    # 计算方阵行列式的值
    tf.matrix_determinant(input, name=None)
    
    # 求解可逆方阵的逆,input 必须为浮点型或复数
    tf.matrix_inverse(input, adjoint=None, name=None)
    
    # 奇异值分解
    tf.svd(tensor, full_matrices=False, compute_uv=True, name=None)
    
    # QR 分解
    tf.qr(input, full_matrices=None, name=None)
    
    # 求张量的范数(默认2)
    tf.norm(tensor, ord='euclidean', axis=None, keep_dims=False, name=None)
    
    # 构建一个单位矩阵, 或者 batch 个矩阵,batch_shape 以 list 的形式传入
    tf.eye(num_rows, num_columns=None, batch_shape=None, dtype=tf.float32, name=None)
    # Construct one identity matrix.
    tf.eye(2)
    ==> [[1., 0.],
         [0., 1.]]
    
    # Construct a batch of 3 identity matricies, each 2 x 2.
    # batch_identity[i, :, :] is a 2 x 2 identity matrix, i = 0, 1, 2.
    batch_identity = tf.eye(2, batch_shape=[3])
    
    # Construct one 2 x 3 "identity" matrix
    tf.eye(2, num_columns=3)
    ==> [[ 1.,  0.,  0.],
         [ 0.,  1.,  0.]]
    
    # 构建一个对角矩阵,rank = 2*rank(diagonal)
    tf.diag(diagonal, name=None)
    # 'diagonal' is [1, 2, 3, 4]
    tf.diag(diagonal) ==> [[1, 0, 0, 0]
                           [0, 2, 0, 0]
                           [0, 0, 3, 0]
                           [0, 0, 0, 4]]
    # 其它
    tf.diag_part
    tf.matrix_diag
    tf.matrix_diag_part
    tf.matrix_band_part
    tf.matrix_set_diag
    tf.cholesky
    tf.cholesky_solve
    tf.matrix_solve
    tf.matrix_triangular_solve
    tf.matrix_solve_ls
    tf.self_adjoint_eig
    tf.self_adjoint_eigvals
    

    归约Reduction操作

    # 计算输入 tensor 所有元素的和,或者计算指定的轴所有元素的和
    tf.reduce_sum(input_tensor, axis=None, keep_dims=False, name=None)
    # 'x' is [[1, 1, 1]
    #         [1, 1, 1]]
    tf.reduce_sum(x) ==> 6
    tf.reduce_sum(x, 0) ==> [2, 2, 2]
    tf.reduce_sum(x, 1) ==> [3, 3]
    tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]  # 维度不缩减
    tf.reduce_sum(x, [0, 1]) ==> 6
    
    # 计算输入 tensor 所有元素的均值/最大值/最小值/积/逻辑与/或
    # 或者计算指定的轴所有元素的均值/最大值/最小值/积/逻辑与/或(just like reduce_sum)
    tf.reduce_mean(input_tensor, axis=None, keep_dims=False, name=None)
    tf.reduce_max(input_tensor, axis=None, keep_dims=False, name=None)
    tf.reduce_min(input_tensor, axis=None, keep_dims=False, name=None)
    tf.reduce_prod(input_tensor, axis=None, keep_dims=False, name=None)
    tf.reduce_all(input_tensor, axis=None, keep_dims=False, name=None)  # 全部满足条件
    tf.reduce_any(input_tensor, axis=None, keep_dims=False, name=None) #至少有一个满足条件
    
    -------------------------------------------
    # 分界线以上和 Numpy 中相应的用法完全一致
    -------------------------------------------
    
    # inputs 为一 list, 计算 list 中所有元素的累计和,
    # tf.add(x, y, name=None)只能计算两个元素的和,此函数相当于扩展了其功能
    tf.accumulate_n(inputs, shape=None, tensor_dtype=None, name=None)
    
    # Computes log(sum(exp(elements across dimensions of a tensor)))
    tf.reduce_logsumexp(input_tensor, axis=None, keep_dims=False, name=None)
    
    # Computes number of nonzero elements across dimensions of a tensor
    tf.count_nonzero(input_tensor, axis=None, keep_dims=False, name=None)
    

    Scan

    # Compute the cumulative sum of the tensor x along axis
    tf.cumsum(x, axis=0, exclusive=False, reverse=False, name=None)
    # Eg:
    tf.cumsum([a, b, c])  # => [a, a + b, a + b + c]
    tf.cumsum([a, b, c], exclusive=True)  # => [0, a, a + b]
    tf.cumsum([a, b, c], reverse=True)  # => [a + b + c, b + c, c]
    tf.cumsum([a, b, c], exclusive=True, reverse=True)  # => [b + c, c, 0]
    
    # Compute the cumulative product of the tensor x along axis
    tf.cumprod(x, axis=0, exclusive=False, reverse=False, name=None)
    

    Segmentation

    # Computes the sum/mean/max/min/prod along segments of a tensor
    tf.segment_sum(data, segment_ids, name=None)
    # Eg:
    m = tf.constant([5,1,7,2,3,4,1,3])
    s_id = [0,0,0,1,2,2,3,3]
    s.run(tf.segment_sum(m, segment_ids=s_id))
    >array([13,  2,  7,  4], dtype=int32)
    
    tf.segment_mean(data, segment_ids, name=None)
    tf.segment_max(data, segment_ids, name=None)
    tf.segment_min(data, segment_ids, name=None)
    tf.segment_prod(data, segment_ids, name=None)
    
    # 其它
    tf.unsorted_segment_sum
    tf.sparse_segment_sum
    tf.sparse_segment_mean
    tf.sparse_segment_sqrt_n
    

    分割

    tf.split(value, num_or_size_splits, axis=0, num=None, name='split')
    # 'value' is a tensor with shape [5, 30]
    # Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1
    split0, split1, split2 = tf.split(value, [4, 15, 11], 1)
    tf.shape(split0)  # [5, 4]
    tf.shape(split1)  # [5, 15]
    tf.shape(split2)  # [5, 11]
    # Split 'value' into 3 tensors along dimension 1
    split0, split1, split2 = tf.split(value, num_or_size_splits=3, axis=1)
    tf.shape(split0)  # [5, 10]
    
    tf.slice(input_, begin, size, name=None)
    t = tf.constant([[[1, 1, 1], [2, 2, 2]],
                     [[3, 3, 3], [4, 4, 4]],
                     [[5, 5, 5], [6, 6, 6]]])
    tf.slice(t, [1, 0, 0], [1, 1, 3])  # [[[3, 3, 3]]]
    tf.slice(t, [1, 0, 0], [1, 2, 3])  # [[[3, 3, 3],
                                       #   [4, 4, 4]]]
    tf.slice(t, [1, 0, 0], [2, 1, 3])  # [[[3, 3, 3]],
                                       #  [[5, 5, 5]]]
    

    序列比较与索引提取

    # 比较两个 list 或者 string 的不同,并返回不同的值和索引
    tf.setdiff1d(x, y, index_dtype=tf.int32, name=None)
    
    # 返回 x 中的唯一值所组成的tensor 和原 tensor 中元素在现 tensor 中的索引
    tf.unique(x, out_idx=None, name=None)
    
    # x if condition else y, condition 为 bool 类型的,可用tf.equal()等来表示
    # x 和 y 的形状和数据类型必须一致
    tf.where(condition, x=None, y=None, name=None)
    
    # 返回沿着坐标轴方向的最大/最小值的索引
    tf.argmax(input, axis=None, name=None, output_type=tf.int64)
    tf.argmin(input, axis=None, name=None, output_type=tf.int64)
    
    # x 的值当作 y 的索引,range(len(x)) 索引当作 y 的值
    # y[x[i]] = i for i in [0, 1, ..., len(x) - 1]
    tf.invert_permutation(x, name=None)
    
    # 其它
    tf.edit_distance
    

    常用函数

    tf.concat

    把一组向量从某一维上拼接起来,很向numpy中的Concatenate,官网例子:

    t1 = [[1, 2, 3], [4, 5, 6]]
    t2 = [[7, 8, 9], [10, 11, 12]]
    tf.concat([t1, t2], 0) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
    tf.concat([t1, t2], 1) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
     
    # tensor t3 with shape [2, 3]
    # tensor t4 with shape [2, 3]
    tf.shape(tf.concat([t3, t4], 0)) ==> [4, 3]
    

    如果是list类型的话也是可以的,只要是形似Tensor,最后tf.concat返回的还是Tensor类型

    tf.gather

    类似于数组的索引,可以把向量中某些索引值提取出来。只适合在一维的情况下使用。

    import tensorflow as tf 
     
    a = tf.Variable([[1,2,3,4,5], [6,7,8,9,10], [11,12,13,14,15]])
    index_a = tf.Variable([0,2])
     
    b = tf.Variable([1,2,3,4,5,6,7,8,9,10])
    index_b = tf.Variable([2,4,6,8])
     
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        print(sess.run(tf.gather(a, index_a)))
        print(sess.run(tf.gather(b, index_b)))
    #  [[ 1  2  3  4  5]
    #   [11 12 13 14 15]]
    
    #  [3 5 7 9]
    

    tf.gather_nd

    同上,但允许在多维上进行索引。

    tf.greater

    判断函数。首先张量x和张量y的尺寸要相同,输出的tf.greater(x, y)也是一个和x,y尺寸相同的张量。如果x的某个元素比y中对应位置的元素大,则tf.greater(x, y)对应位置返回True,否则返回False。与此类似的函数还有tf.less、tf.greater_equal。

    tf.cast

    a = tf.constant([0, 2, 0, 4, 2, 2], dtype='int32')
    print(a)
    # <tf.Tensor 'Const_1:0' shape=(6,) dtype=int32>
     
    b = tf.cast(a, 'float32')
    print(b)
    # <tf.Tensor 'Cast:0' shape=(6,) dtype=float32>
    

    tf.expand_dims & tf.squeeze

    增加 / 压缩张量的维度。

    a = tf.constant([0, 2, 0, 4, 2, 2], dtype='int32')
    print(a)
    # <tf.Tensor 'Const_1:0' shape=(6,) dtype=int32>
     
    b = tf.expand_dims(a, 0)
    print(b)
    # <tf.Tensor 'ExpandDims:0' shape=(1, 6) dtype=int32>
     
    print(tf.squeeze(b, 0))
    # <tf.Tensor 'Squeeze:0' shape=(6,) dtype=int32>
    

    Tensor的随机存取和遍历

    只要能事先知道tensor的size,都可以通过python的循环来对tensor的entry遍历处理。

    import tensorflow as tf
     
    data=tf.constant([[1,2,3],[4,5,6]])
    aa=data*1
    size=aa.get_shape()
    sum=tf.convert_to_tensor(0)
    for i in range(2):
        for j in range(2):
            sum=sum+data[i][j]
    with tf.Session() as sess:
        print(sess.run([sum, size]))
    

    但在tensor size只有在运行的时候才能确定时,比如输入不同尺寸的图片,不同数量的bounding box,就没把发在定义graph的时候就确定个数,这时只有使用tf.while_loop。

    参考

    学习
    TensorFlow API

  • 相关阅读:
    MHA-Failover(GTID,Auto_Position=0)
    PXC中的GTIDs
    MySQL备份与恢复
    MySQL复制框架
    pt-table-checksum检测不出主从差异处理
    MHA-Failover可能遇到的坑
    MHA-手动Failover流程(传统复制&GTID复制)
    MSSQL-SELECT&UPDATE动作要申请的锁
    sybench压测下模拟误truncate数据恢复
    gtid_executed和gtid_purged变量是如何初始化的
  • 原文地址:https://www.cnblogs.com/houkai/p/10307688.html
Copyright © 2011-2022 走看看