zoukankan      html  css  js  c++  java
  • Hamlet的jieba分词和词云

    def getText():
        txt = open("hamlet.txt", "r").read()
        txt = txt.lower()
        for ch in '!"#$%&()*+,-./:;<=>?@[\]^_‘{|}~':
            txt = txt.replace(ch, " ")   #将文本中特殊字符替换为空格
        return txt
     
    hamletTxt = getText()
    words  = hamletTxt.split()
    counts = {}
    for word in words:           
        counts[word] = counts.get(word,0) + 1
    items = list(counts.items())
    items.sort(key=lambda x:x[1], reverse=True) 
    for i in range(10):
        word, count = items[i]
        print ("{0:<10}{1:>5}".format(word, count))

    from wordcloud import WordCloud
    import matplotlib.pyplot as plt
    import jieba
    import numpy as np
    from PIL import Image
    def create_word_cloud(filename):
        text = open("{}.txt".format(filename),'rb').read()
        wordlist = jieba.cut(text, cut_all=True)
        wl = " ".join(wordlist)
        cloud_mask = np.array(Image.open("taoxin.jpg"))
        wc = WordCloud(
            background_color="black",
            mask=cloud_mask,
            max_words=2000,
            height=1200,
            width=1600,
            max_font_size=100,
            random_state=100,
        )
     
        myword = wc.generate(wl)
        plt.imshow(myword)
        plt.axis("off")
        plt.show()
        wc.to_file('py_book.bmp')
    if __name__ == '__main__':
        create_word_cloud('Hamlet')

     

  • 相关阅读:
    魔法阵
    求和
    罗马数字
    「NOIP2005P」循环
    【Windows批处理III】实现删除含自定字符串的文件和文件夹(搜索子目录)
    扩展欧几里得算法
    埃氏筛法(素数筛)
    python学习之argparse模块
    51Nod1364 最大字典序排列
    51Nod1537 分解
  • 原文地址:https://www.cnblogs.com/jiana/p/12733325.html
Copyright © 2011-2022 走看看