zoukankan      html  css  js  c++  java
  • 几种模型评价指标实现代码

    import tensorflow as tf
    
    #精确率评价指标
    def metric_precision(y_true,y_pred):    
        TP=tf.reduce_sum(y_true*tf.round(y_pred))
        TN=tf.reduce_sum((1-y_true)*(1-tf.round(y_pred)))
        FP=tf.reduce_sum((1-y_true)*tf.round(y_pred))
        FN=tf.reduce_sum(y_true*(1-tf.round(y_pred)))
        precision=TP/(TP+FP)
        return precision
    
    #召回率评价指标
    def metric_recall(y_true,y_pred):  
        TP=tf.reduce_sum(y_true*tf.round(y_pred))
        TN=tf.reduce_sum((1-y_true)*(1-tf.round(y_pred)))
        FP=tf.reduce_sum((1-y_true)*tf.round(y_pred))
        FN=tf.reduce_sum(y_true*(1-tf.round(y_pred)))
        recall=TP/(TP+FN)
        return recall
    
    #F1-score评价指标
    def metric_F1score(y_true,y_pred):    
        TP=tf.reduce_sum(y_true*tf.round(y_pred))
        TN=tf.reduce_sum((1-y_true)*(1-tf.round(y_pred)))
        FP=tf.reduce_sum((1-y_true)*tf.round(y_pred))
        FN=tf.reduce_sum(y_true*(1-tf.round(y_pred)))
        precision=TP/(TP+FP)
        recall=TP/(TP+FN)
        F1score=2*precision*recall/(precision+recall)
        return F1score

    #编译阶段引用自定义评价指标示例 model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', metric_precision, metric_recall, metric_F1score])

      

    # AUC for a binary classifier
    def auc(y_true, y_pred):
        ptas = tf.stack([binary_PTA(y_true,y_pred,k) for k in np.linspace(0, 1, 1000)],axis=0)
        pfas = tf.stack([binary_PFA(y_true,y_pred,k) for k in np.linspace(0, 1, 1000)],axis=0)
        pfas = tf.concat([tf.ones((1,)) ,pfas],axis=0)
        binSizes = -(pfas[1:]-pfas[:-1])
        s = ptas*binSizes
        return K.sum(s, axis=0)
    #-----------------------------------------------------------------------------------------------------------------------------------------------------
    # PFA, prob false alert for binary classifier
    def binary_PFA(y_true, y_pred, threshold=K.variable(value=0.5)):
        y_pred = K.cast(y_pred >= threshold, 'float32')
        # N = total number of negative labels
        N = K.sum(1 - y_true)
        # FP = total number of false alerts, alerts from the negative class labels
        FP = K.sum(y_pred - y_pred * y_true)
        return FP/N
    #-----------------------------------------------------------------------------------------------------------------------------------------------------
    # P_TA prob true alerts for binary classifier
    def binary_PTA(y_true, y_pred, threshold=K.variable(value=0.5)):
        y_pred = K.cast(y_pred >= threshold, 'float32')
        # P = total number of positive labels
        P = K.sum(y_true)
        # TP = total number of correct alerts, alerts from the positive class labels
        TP = K.sum(y_pred * y_true)
        return TP/P
     
    #接着在模型的compile中设置metrics
    

      

    # False Discovery Rate(FDR)
    from sklearn.metrics import confusion_matrix
    y_true = [0,0,0,0,0,0,,1,1,1,1,1]
    y_pred = [0,0,0,0,0,0,,1,1,1,1,1]
    
    tn, fp , fn, tp = confusion_matrix(y_true, y_pred).ravel()
    fdr = fp / (fp + tp)
    
    print(fdr)
    

      

  • 相关阅读:
    [日常摸鱼]UVA393 The Doors 简单计算几何+最短路
    [日常摸鱼]bzoj3122 [Sdoi]2013 随机数生成器
    [日常摸鱼]积性函数求和——杜教筛
    [OI笔记]NOIP2017前(退役前)模拟赛的总结
    [日常摸鱼]poj2417 DiscreteLoggingBSGS算法
    [日常摸鱼]UVA11424&11426 GCD Extreme
    [日常摸鱼]JSOI2008最大数
    [日常摸鱼]HDU1724 Ellipse自适应Simpson法
    原码、补码、反码的作用和区别
    Fibonacci序列or兔子序列
  • 原文地址:https://www.cnblogs.com/ylHe/p/12149873.html
Copyright © 2011-2022 走看看