zoukankan      html  css  js  c++  java
  • 解析搜狗实验室精简版数据

    1、要预处理xml文件,解决docs,url,content标签问题。主要代码是deal_label.py 存入数据文件夹是sougou_label_after

    SogouCS.reduced:存放原始的txt;
    sougou_label_after:解决docs,url,content标签问题
    import os
    # 预处理xml文件,解决docs,url,content标签问题
    def file_fill(file_dir,half_dir):
        # 查看half_dir文件夹下的文件夹和文件目录
        for root, dirs,files in os.walk(file_dir):
            # 判断是否存在half_dir,如果没有则创建
            if not os.path.exists(half_dir):
                os.makedirs(half_dir)
            for f in files:
                tmp_dir = half_dir+'\'+f
                text_init_dir = file_dir+'\'+f
                # 遍历文件夹下的每一篇xml文件
                with open(text_init_dir, 'r', encoding='gb18030') as source_file:
                    start,end = '<docs>
    ', '</docs>'
                    line_content = source_file.readlines()
                    # 在目标文件夹中创建新文件保存预处理后的文件
                    with open(tmp_dir, 'w+', encoding='utf-8') as handle_file:
                        # 添加'<docs>'头标签
                        handle_file.write(start)
                        for line in line_content:
                            # 处理url中的‘&’符号
                            text = line.replace('&', '&amp;')
                            # 添加'</docs>'头标签
                            handle_file.write(text)
                        handle_file.write(end)
    if __name__ == '__main__':
        file_dir = r'E:sssssSogouCS.reduced'     #原始文件夹
        half_dir = r'E:ssssssougou_label_after'  # 修改格式和符号生成的文件夹   无需自己手动建文件夹
        file_fill(file_dir, half_dir)     #将选好的文件进行加上<docs>和</docs>,并修改&这个符号
    

    2、部分样本函数获取,同时随机取去训练数据和测试数据。主要代码是data_train_test.py 存入数据文件夹分别为train_choice , test_choice

    import os, random
    
    # 部分样本获取函数
    def choice_files(half_dir,choice_dir_train,choice_dir_test,n,m):
        if not os.path.exists(choice_dir_train):
            os.makedirs(choice_dir_train)
        if not os.path.exists(choice_dir_test):
            os.makedirs(choice_dir_test)
    
        for _,_,files in os.walk(half_dir):
            file_list_train = random.sample(files, n)   #训练数据去随机的n个文件
            for file_choice in file_list_train:
                files.remove(file_choice)
            file_list_test = random.sample(files, m)    #测试数据取随机的m个文件
            for file in file_list_train:
                with open(half_dir+'\'+file, 'r', encoding='utf-8') as f1_train:
                    doc_train = f1_train.read()
                    path = choice_dir_train+'\'+file
                    with open(path, 'w+', encoding='utf-8') as f2_train:
                        f2_train.write(doc_train)
            for file in file_list_test:
                with open(half_dir+'\'+file, 'r', encoding='utf-8')as f1_test:
                    doc_test = f1_test.read()
                    path = choice_dir_test +'\'+file
                    with open(path, 'w+', encoding='utf-8') as f2_test:
                        f2_test.write(doc_test)
            # print(file_list_train)
            # print(file_list_test)
        return file_list_train, file_list_test
    
    if __name__ == '__main__':
        # 随机抽取10个文件进行深加工
        half_dir = r'E:ssssssougou_label_after'
        choice_dir_train = r'E:sssss	rain_choice'
        choice_dir_test = r'E:sssss	est_choice'
        file_list_train, file_list_test = choice_files(half_dir, choice_dir_train, choice_dir_test, 10, 5)
    

    3、提取文档文本内容,并且根据url将文本分好类.主要代码是data_content.py 存入数据文件夹是data

    import os
    from xml.dom import minidom
    from urllib.parse import urlparse
    
    # 检查url对应的文章是否在分类字典中
    def check_class(url_lb,labels):
        if url_lb in labels:
            return True
        return False
    
    def file_read(half_dir, labels, path):
        for _, _, files in os.walk(half_dir):
            for f in files:
                filename = half_dir + '\' + f
                doc = minidom.parse(filename)      #加载读取xml文件
                root = doc.documentElement         #获取xml节点属性值
                claimtext = root.getElementsByTagName('content')   #获取xml节点对象集合
                claimurl = root.getElementsByTagName('url')
                for ind in range(len(claimurl)):
                    if claimtext[ind].firstChild == None:
                        continue
                    url = urlparse(claimurl[ind].firstChild.data)
                    url_lb = url.hostname.strip().split('.')[0]
                    # 建立url和类别的映射字典
                    if check_class(url_lb, labels):
                        if not os.path.exists(path):
                            os.makedirs(path)
                        if not os.path.exists(path + './' + labels[url_lb]):
                            os.makedirs(path + './' + labels[url_lb])
                        file_name = path + './' + labels[url_lb] + './' + "{}.txt".format(labels[url_lb])
                        with open(file_name, "a+", encoding='utf-8') as file_in:
                            file_in.write(claimtext[ind].firstChild.data + '
    ')
    
    if __name__ == '__main__':
        labels = {'auto': '汽车', 'it': '互联网', 'health': '健康',
                  'sports': '体育','travel': '旅游', 'learning': '教育', 'career': '职业',
                  'cul': '文化','mil': '军事', 'house': '房产', 'yule': '娱乐',
                  'women': '女人','media': '媒体', '2008': '奥运',
                  'business': '商业'
                  }
        half_dir = r'E:sssss	rain_choice'
        path = r'E:sssssdatadata_train'  # 新创建的文件夹,主要是放分好类的文件
        file_read(half_dir, labels, path)  # 将选好的文件进行纯文本提取和分类存储
    
        half_dir = r'E:sssss	est_choice'
        path = r'E:sssssdatadata_test'    #新创建的文件夹,主要是放分好类的文件
        file_read(half_dir, labels, path)     # 将选好的文件进行纯文本提取和分类存储
    
  • 相关阅读:
    转:SVN常见问题与解决方法
    Mac OS 下的解压缩软件——The Unarchiver
    Django报错 The serializer field might be named incorrectly and not match any Got AttributeError when attempting to get a value for field `author_for` on serializer `KnownledgeBaseListSerializer`
    Django 生成数据库表时的报错TypeError: __init__() missing 1 required positional argument: 'on_delete'
    webstorm不能中文输入问题
    npm报错This is probably not a problem with npm. There is likely additional logging
    Django 报错no sucn column: OpretionalError
    Python 报错 AttributeError: module 'django.db.models' has no attribute 'SubfieldBase'
    详解Django中Request对象的相关用法
    Python中import, from...import,import...as的区别
  • 原文地址:https://www.cnblogs.com/ming-jing/p/10775393.html
Copyright © 2011-2022 走看看