zoukankan      html  css  js  c++  java
  • python文本分类

    前面博客里面从谣言百科中爬取到了所有类别(10类)的新闻并以文本的形式存储。

    现在对这些数据进行分类,上代码:

    # -*- coding: utf-8 -*-
    """
    Created on Fri Mar  9 14:18:49 2018
    
    @author: Administrator
    """
    
    import os
    import time
    import random
    import jieba
    import nltk
    import sklearn
    from sklearn.naive_bayes import MultinomialNB
    import numpy as np
    import pylab as pl
    import matplotlib.pyplot as plt
    
    
    def MakeWordsSet(words_file):
        words_set = set()
        with open(words_file, 'r', encoding='UTF-8') as fp:
            for line in fp.readlines():
                word = line.strip()
                if len(word)>0 and word not in words_set: # 去重
                    words_set.add(word)
        return words_set
    
    def TextProcessing(folder_path, test_size=0.2):
        folder_list = os.listdir(folder_path)#获取文件夹下所有子文件夹
        data_list = []#获取文本数据
        class_list = []#获取类别数据
    
        # 所有类别进行循环
        for folder in folder_list:
            new_folder_path = os.path.join(folder_path, folder)#获得子文件夹路径
            files = os.listdir(new_folder_path)#获得子文件夹下所有文件
            # 类内循环
            j = 0
            for file in files:
                if j > 410: # 每类text样本数最多不超过这个
                    break
                with open(os.path.join(new_folder_path, file), 'r', encoding='UTF-8') as fp:
                   raw = fp.read()
                # print raw
                ## --------------------------------------------------------------------------------
                ## jieba分词
    
                word_cut = jieba.cut(raw, cut_all=False) # 精确模式,返回的结构是一个可迭代的genertor
                word_list = list(word_cut) # genertor转化为list,每个词unicode格式
    
                ## --------------------------------------------------------------------------------
                data_list.append(word_list)
                class_list.append(folder)
                j += 1
    
        ## 划分训练集和测试集
        # train_data_list, test_data_list, train_class_list, test_class_list = sklearn.cross_validation.train_test_split(data_list, class_list, test_size=test_size)
        data_class_list = list(zip(data_list, class_list))#zip函数:接受2个序列作为参数,返回tuple列表
        random.shuffle(data_class_list)#shuffle() 将序列的所有元素随机排序。
        index = int(len(data_class_list)*test_size)+1#数据总量*0.2来划分训练集和测试集
        train_list = data_class_list[index:]#训练集为后0.8的数据
        test_list = data_class_list[:index]#测试集为前0.2的数据
        train_data_list, train_class_list = zip(*train_list)#训练数据集
        test_data_list, test_class_list = zip(*test_list)#测试数据集
    
        # 统计词频放入all_words_dict
        all_words_dict = {}
        for word_list in train_data_list:
            for word in word_list:
                if word in all_words_dict:  
                    all_words_dict[word] += 1
                else:
                    all_words_dict[word] = 1
        # key函数利用词频进行降序排序
        all_words_tuple_list = sorted(all_words_dict.items(), key=lambda f:f[1], reverse=True) # 内建函数sorted参数需为list
        all_words_list = list(zip(*all_words_tuple_list))[0]
    
        return all_words_list, train_data_list, test_data_list, train_class_list, test_class_list
    
    
    def words_dict(all_words_list, deleteN, stopwords_set=set()):# 选取特征词:不全为数字,不是停留子,长度在1到5之间
        
        feature_words = []
        n = 1
        for t in range(deleteN, len(all_words_list), 1):
            if n > 1000: # feature_words的维度1000
                break
            # print all_words_list[t]
            if not all_words_list[t].isdigit() and all_words_list[t] not in stopwords_set and 1<len(all_words_list[t])<5:# isdigit() 方法检测字符串是否只由数字组成。
                feature_words.append(all_words_list[t])
                n += 1
        return feature_words
    
    
    def TextFeatures(train_data_list, test_data_list, feature_words, flag='nltk'):
        def text_features(text, feature_words):
            text_words = set(text)
            ## -----------------------------------------------------------------------------------
            if flag == 'nltk':
                ## nltk特征 dict
                features = {word:1 if word in text_words else 0 for word in feature_words}
            elif flag == 'sklearn':
                ## sklearn特征 list
                features = [1 if word in text_words else 0 for word in feature_words]
            else:
                features = []
            ## -----------------------------------------------------------------------------------
            return features
        train_feature_list = [text_features(text, feature_words) for text in train_data_list]
        test_feature_list = [text_features(text, feature_words) for text in test_data_list]
        return train_feature_list, test_feature_list
    
    
    def TextClassifier(train_feature_list, test_feature_list, train_class_list, test_class_list, flag='nltk'):
        ## -----------------------------------------------------------------------------------
        if flag == 'nltk':
            ## nltk分类器
            train_flist = zip(train_feature_list, train_class_list)
            test_flist = zip(test_feature_list, test_class_list)
            classifier = nltk.classify.NaiveBayesClassifier.train(train_flist)
    
            test_accuracy = nltk.classify.accuracy(classifier, test_flist)
        elif flag == 'sklearn':
            ## sklearn分类器
            classifier = MultinomialNB().fit(train_feature_list, train_class_list)
    
            test_accuracy = classifier.score(test_feature_list, test_class_list)
        else:
            test_accuracy = []
        return test_accuracy
    
    
    if __name__ == '__main__':
    
        print("start")
    
        ## 文本预处理
        folder_path = 'F:\test\demo'
        all_words_list, train_data_list, test_data_list, train_class_list, test_class_list = TextProcessing(folder_path, test_size=0.2)
    
        # 生成stopwords_set
        stopwords_file = 'F:\test\stopword.txt'
        stopwords_set = MakeWordsSet(stopwords_file)
    
        ## 文本特征提取和分类
        # flag = 'nltk'
        flag = 'sklearn'
        deleteNs = range(0, 1000, 20)
        test_accuracy_list = []
        for deleteN in deleteNs:
            # feature_words = words_dict(all_words_list, deleteN)
            feature_words = words_dict(all_words_list, deleteN, stopwords_set)#特征词;
            train_feature_list, test_feature_list = TextFeatures(train_data_list, test_data_list, feature_words, flag)#获得训练集以及测试数据集;
            test_accuracy = TextClassifier(train_feature_list, test_feature_list, train_class_list, test_class_list, flag)
            test_accuracy_list.append(test_accuracy)
        print(test_accuracy_list)
    
        # 结果评价
        plt.figure()
        plt.plot(deleteNs, test_accuracy_list)
        plt.title('Relationship of deleteNs and test_accuracy')
        plt.xlabel('deleteNs')
        plt.ylabel('test_accuracy')
        plt.savefig('result.png')
    
        print("finished")

    运行完分类完成!

  • 相关阅读:
    老生长谈:css实现右侧固定宽度,左侧宽度自适应
    链接rel属性external、nofollow、external nofollow三种写法的区别
    文字无缝向上滚动
    JS中判断鼠标按键的问题
    js 字符串转换数字
    jS字符串大小写转换实现方式
    JS截取字符串
    CentOS中查看物理CPU信息的方法
    如何开启MYSQL远程连接权限
    PHP中数组排序实例学习
  • 原文地址:https://www.cnblogs.com/baorantHome/p/8534530.html
Copyright © 2011-2022 走看看