zoukankan      html  css  js  c++  java
  • 机器学习笔记14-----SVM实践和分类器的性能的评价指标(了解python画图的技巧)

    1.主要内容

    2.SVM的应用

    (1)利用SVM处理分类问题

    分类器的性能的评价指标:

    应用案例:

    accuracy=3/6=0.5

    precision=3/5=0.6

    recall=3/4=0.75

    3.代码示例

    (1)鸢尾花SVM案例

    #!/usr/bin/python
    # -*- coding:utf-8 -*-
    
    import numpy as np
    from sklearn import svm
    from sklearn.model_selection import train_test_split
    import matplotlib as mpl
    import matplotlib.pyplot as plt
    
    def iris_type(s):
        it = {b'Iris-setosa': 0, b'Iris-versicolor': 1, b'Iris-virginica': 2}
        return it[s]
    
    
    # 'sepal length', 'sepal width', 'petal length', 'petal width'
    iris_feature = u'花萼长度', u'花萼宽度', u'花瓣长度', u'花瓣宽度'
    
    
    def show_accuracy(a, b, tip):
        acc = a.ravel() == b.ravel()
        print(tip + '正确率:', np.mean(acc))
    
    
    if __name__ == "__main__":
        path = '8.iris.data'  # 数据文件路径
        data = np.loadtxt(path, dtype=float, delimiter=',', converters={4: iris_type})
        x, y = np.split(data, (4,), axis=1)
        x = x[:, :2]
        x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1, train_size=0.6)
    
        # 分类器
        # clf = svm.SVC(C=0.1, kernel='linear', decision_function_shape='ovr')
        clf = svm.SVC(C=0.8, kernel='rbf', gamma=20, decision_function_shape='ovr')
        clf.fit(x_train, y_train.ravel())
    
        # 准确率
        print(clf.score(x_train, y_train))  # 精度
        y_hat = clf.predict(x_train)
        show_accuracy(y_hat, y_train, '训练集')
        print(clf.score(x_test, y_test))
        y_hat = clf.predict(x_test)
        show_accuracy(y_hat, y_test, '测试集')
    
        # 画图
        x1_min, x1_max = x[:, 0].min(), x[:, 0].max()  # 第0列的范围
        x2_min, x2_max = x[:, 1].min(), x[:, 1].max()  # 第1列的范围
        x1, x2 = np.mgrid[x1_min:x1_max:500j, x2_min:x2_max:500j]  # 生成网格采样点
        grid_test = np.stack((x1.flat, x2.flat), axis=1)  # 测试点
    
        Z = clf.decision_function(grid_test)    # 样本到决策面的距离
        print(Z)
        grid_hat = clf.predict(grid_test)       # 预测分类值
        print(grid_hat)
        grid_hat = grid_hat.reshape(x1.shape)  # 使之与输入的形状相同
        mpl.rcParams['font.sans-serif'] = [u'SimHei']
        mpl.rcParams['axes.unicode_minus'] = False
    
        cm_light = mpl.colors.ListedColormap(['#A0FFA0', '#FFA0A0', '#A0A0FF'])
        cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
        x1_min, x1_max = x[:, 0].min(), x[:, 0].max()  # 第0列的范围
        x2_min, x2_max = x[:, 1].min(), x[:, 1].max()  # 第1列的范围
        x1, x2 = np.mgrid[x1_min:x1_max:500j, x2_min:x2_max:500j]  # 生成网格采样点
        grid_test = np.stack((x1.flat, x2.flat), axis=1)  # 测试点
        plt.pcolormesh(x1, x2, grid_hat, cmap=cm_light)
    
        plt.scatter(x[:, 0], x[:, 1], c=np.squeeze(y), edgecolors='k', s=50, cmap=cm_dark)      # 样本
        plt.scatter(x_test[:, 0], x_test[:, 1], s=120, facecolors='none', zorder=10)     # 圈中测试集样本
        plt.xlabel(iris_feature[0], fontsize=13)
        plt.ylabel(iris_feature[1], fontsize=13)
        plt.xlim(x1_min, x1_max)
        plt.ylim(x2_min, x2_max)
        plt.title(u'鸢尾花SVM二特征分类', fontsize=15)
        plt.grid()
        plt.show()

    效果图:

    (2)

    #!/usr/bin/python
    # -*- coding:utf-8 -*-
    
    import numpy as np
    from sklearn import svm
    import matplotlib as mpl
    import matplotlib.colors
    import matplotlib.pyplot as plt
    
    
    def show_accuracy(a, b):
        acc = a.ravel() == b.ravel()
        print('正确率:%.2f%%' % (100 * float(acc.sum()) / a.size))
    
    
    if __name__ == "__main__":
        data = np.loadtxt('14.bipartition.txt', dtype=np.float, delimiter='	')
        x, y = np.split(data, (2, ), axis=1)
        y[y == 0] = -1
        y = y.ravel()
    
        # 分类器
        clfs = [svm.SVC(C=0.3, kernel='linear'),
               svm.SVC(C=10, kernel='linear'),
               svm.SVC(C=5, kernel='rbf', gamma=1),
               svm.SVC(C=5, kernel='rbf', gamma=4)]
        titles = 'Linear,C=0.3', 'Linear, C=10', 'RBF, gamma=1', 'RBF, gamma=4'
    
        x1_min, x1_max = x[:, 0].min(), x[:, 0].max()  # 第0列的范围
        x2_min, x2_max = x[:, 1].min(), x[:, 1].max()  # 第1列的范围
        x1, x2 = np.mgrid[x1_min:x1_max:500j, x2_min:x2_max:500j]  # 生成网格采样点
        grid_test = np.stack((x1.flat, x2.flat), axis=1)  # 测试点
    
        cm_light = matplotlib.colors.ListedColormap(['#77E0A0', '#FF8080'])
        cm_dark = matplotlib.colors.ListedColormap(['g', 'r'])
        matplotlib.rcParams['font.sans-serif'] = [u'SimHei']
        matplotlib.rcParams['axes.unicode_minus'] = False
        plt.figure(figsize=(10,8), facecolor='w')
        for i, clf in enumerate(clfs):
            clf.fit(x, y)
    
            y_hat = clf.predict(x)
            show_accuracy(y_hat, y)  # 准确率
    
            # 画图
            print('支撑向量的数目:', clf.n_support_)
            print('支撑向量的系数:', clf.dual_coef_)
            print('支撑向量:', clf.support_)
            print
            plt.subplot(2, 2, i+1)
            grid_hat = clf.predict(grid_test)       # 预测分类值
            grid_hat = grid_hat.reshape(x1.shape)  # 使之与输入的形状相同
            plt.pcolormesh(x1, x2, grid_hat, cmap=cm_light, alpha=0.8)
            plt.scatter(x[:, 0], x[:, 1], c=y, edgecolors='k', s=40, cmap=cm_dark)      # 样本的显示
            plt.scatter(x[clf.support_, 0], x[clf.support_, 1], edgecolors='k', facecolors='none', s=100, marker='o')   # 支撑向量
            z = clf.decision_function(grid_test)
            z = z.reshape(x1.shape)
            plt.contour(x1, x2, z, colors=list('krk'), linestyles=['--', '-', '--'], linewidths=[1, 2, 1], levels=[-1, 0, 1])
            plt.xlim(x1_min, x1_max)
            plt.ylim(x2_min, x2_max)
            plt.title(titles[i])
            plt.grid()
        plt.suptitle(u'SVM不同参数的分类', fontsize=18)
        plt.tight_layout(2)
        plt.subplots_adjust(top=0.92)
        plt.show()

    效果图:

  • 相关阅读:
    MySQL 快速删除大量数据(千万级别)的几种实践方案——附源码
    Elasticsearch 通过Scroll遍历索引,构造pandas dataframe 【Python多进程实现】
    MySQL LOAD DATA INFILE—从文件(csv、txt)批量导入数据
    【Java】 NullPointerException、ArrayIndexOutOfBoundsException、ClassCastException、ArrayIndexOutOfBoundsException、ArrayStoreException、ArithmeticException等没有异常堆栈信息
    技术人“结构化思维”训练的一点想法和实践
    gitlab内存消耗大,频繁出现502错误的解决办法
    Tesseract-OCR 4.1.0 安装和使用— windows及CentOS
    Tika结合Tesseract-OCR 实现光学汉字识别(简体、宋体的识别率百分之百)—附Java源码、测试数据和训练集下载地址
    记一次Elasticsearch OOM(内存溢出)的优化过程—基于segments force merge 和 store type 转为 hybridfs
    ElasticSearch如何一次查询出全部数据——基于Scroll
  • 原文地址:https://www.cnblogs.com/luckyplj/p/12696183.html
Copyright © 2011-2022 走看看