zoukankan      html  css  js  c++  java
  • 朴素贝叶斯分类器

    1.贝叶斯公式

    • 条件概率
      p(B|A)=p(AB)p(A)

      p(AB)=p(A)p(B|A)
    • 全概率公式
      p(A)=p(B1)p(A|B1)+p(B2)p(A|B2)+...+p(Bn)p(A|Bn)
    • 贝叶斯公式
      p(Bi|A)=p(ABi)p(A)=p(A|Bi)p(Bi)Σj=0np(A|Bj)p(Bj)

      该公式给出了在事件A下,事件Bi发生的概率的计算方法。

      通常,将此公式成为后验概率公式。即在已知观察量A后得出的參数B的分布。当中p(Bi)称为先验概率,是人们依据经验给出的參数Bi的分布。


      贝叶斯方法与最大似然法的差别就在于引入了先验概率,通过先验概率能够避免最大似然法所带来的过拟合问题。

    2.朴素贝叶斯方法

    • 对于B={B1,B2...Bn},其条件概率可表示为
      p(B|A)=p(B1|A)p(B2|A,B1)p(B3|A,B1,B2)...p(Bn|A,B1,...,Bn1)
      然而在实际情况中。等式右边的公式非常难计算出来。

      故我们做出一个较强的如果,即Bi是相互独立的。这样条件概率能够表示为

      p(B|A)=p(B1|A)p(B2|A)...p(Bn|A)
      这就是朴素贝叶斯方法。

      当然在实际情况中。这样的相互独立的如果往往是不成立的,然而其还是能够在一定程度上给出对数据的描写叙述。

    • 依据这个如果,我们能够分别计算p(Bi|A)p(A|Bi)p(Bi)若对ji, 有p(Bi|A)>p(Bj|A)A就可归为Bi

    3.实例

    在训练过程中,须要计算两个概率:
    * 先验概率p(Bi)=Num(Bi)Num(B)
    * 条件概率p(A|Bi)=Num(A,Bi)Num(Bi)

    from numpy import *
    
    def loadDataSet():
        postingList=[['my', 'dog', 'has', 'flea','problems', 'help', 'please'],
            ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
            ['my', 'dalmation', 'is', 'so', 'cute','I', 'love', 'him'],
            ['stop', 'posting', 'stupid', 'worthless', 'garbage'],
            ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
            ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
        classVec=[0, 1, 0, 1, 0, 1]
        return postingList, classVec
    
    def createVocabList(dataSet):
        vocabSet = set([])
        for document in dataSet:
            vocabSet = vocabSet | set(document)
        return list(vocabSet)
    
    def setOfWord2Vec(vocabList, inputSet):
        returnVec = [0] * len(vocabList)
        for word in inputSet:
            if word in vocabList:
                returnVec[vocabList.index(word)]=1
            else:
                print "the word: %s is not in my vocabulary!" % word
        return returnVec
    
    def trainNB0(trainMatrix, trainCategory):
        numTrainDocs = len(trainMatrix)
        numWords = len(trainMatrix[0])
        pAbusive = sum(trainCategory)/float(numTrainDocs)
        p0Num = ones(numWords)
        p1Num = ones(numWords)
        p0Denom = 2.0; p1Denom = 2.0
        for i in range(numTrainDocs):
            if trainCategory[i]==1:
                p1Num += trainMatrix[i]
                p1Denom += sum(trainMatrix[i])
            else:
                p0Num += trainMatrix[i]
                p0Denom += sum(trainMatrix[i])
        p1Vect =log(p1Num/p1Denom)
        p0Vect =log(p0Num/p0Denom)
        return p0Vect, p1Vect, pAbusive
    
    def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
        p1 = sum(vec2Classify*p1Vec) + log(pClass1)
        p0 = sum(vec2Classify*p0Vec) + log(1.0-pClass1)
        if p1 > p0:
            return 1
        else:
            return 0
    
    
    def testingNB():
        listOPosts, listClasses = loadDataSet()
        myVocabList = createVocabList(listOPosts)
        trainMat = []
        for postinDoc in listOPosts:
            trainMat.append(setOfWord2Vec(myVocabList, postinDoc))
        p0V, p1V, pAb = trainNB0(array(trainMat), array(listClasses))
        testEntry = ['love', 'my', 'dalmation']
        thisDoc = array(setOfWord2Vec(myVocabList, testEntry))
        print testEntry, 'classified as:', classifyNB(thisDoc, p0V, p1V, pAb)
        testEntry=['stupid', 'garbage']
        thisDoc = array(setOfWord2Vec(myVocabList, testEntry))
        print testEntry, 'classified as:', classifyNB(thisDoc, p0V, p1V, pAb)
    
    def bagOfWords2VecMN(vocabList, inputSet):
        returnVec = [0]*len(vocabList)
        for word in inputSet:
            if word in vocabList:
                returnVec[vocabList.index(word)]+=1
        return returnVec
    
    
    def textParse(bigString):
        import re
        listOfTokens = re.split(r'W*', bigString)
        return [tok.lower() for tok in listOfTokens if len(tok)>2]
    
    def spamTest():
        docList = []; classList=[]; fullText=[]
        for i in range(1, 26):
            wordList = textParse(open('email/spam/%d.txt' % i).read())
            docList.append(wordList)
            fullText.extend(wordList)
            classList.append(1)
            wordList = textParse(open('email/ham/%d.txt' % i).read())
            docList.append(wordList)
            fullText.extend(wordList)
            classList.append(0)
        vocabList = createVocabList(docList)
        trainingSet = range(50);
        testSet = []
        for i in range(10):
            randIndex = int(random.uniform(0, len(trainingSet)))
            testSet.append(trainingSet[randIndex])
            del(trainingSet[randIndex])
        trainMat=[]; trainClasses=[]
        for docIndex in trainingSet:
            trainMat.append(setOfWord2Vec(vocabList, docList[docIndex]))
            trainClasses.append(classList[docIndex])
        p0V, p1V, pSpam=trainNB0(array(trainMat), array(trainClasses))
        errorCount = 0
        for docIndex in testSet:
            wordVector = setOfWord2Vec(vocabList, docList[docIndex])
            if classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
                errorCount += 1
        print 'the error rate is: ',float(errorCount)/len(testSet)
    
    
    def calcMostFreq(vocabList, fullText):
        import operator
        freqDict={}
        for token in vocabList:
            freqDict[token] = fullText.count(token)
        sortedFreq = sorted(freqDict.iteritems(), key=operator.itemgetter(1), reverse=True)
        return sortedFreq[:30]
    
    def localWords(feed1, feed0):
        import feedparser
        docList=[]; classList=[]; fullText=[]
        minLen = min(len(feed1['entries']), len(feed0['entries']))
        for i in range(minLen):
            wordList = textParse(feed1['entries'][i]['summary'])
            docList.append(wordList)
            fullText.extend(wordList)
            classList.append(1)
            wordList = textParse(feed0['entries'][i]['summary'])
            docList.append(wordList)
            fullText.extend(wordList)
            classList.append(0)
        vocabList = createVocabList(docList)
        top30Words = calcMostFreq(vocabList, fullText)
        for pairW in top30Words:
            if pairW[0] in vocabList:
                vocabList.remove(pairW[0])
        trainingSet = range(2*minLen)
        testSet=[]
        for i in range(20):
            randIndex = int(random.uniform(0, len(trainingSet)))
            testSet.append(trainingSet[randIndex])
            del(trainingSet[randIndex])
        trainMat=[]; trainClasses=[]
        for docIndex in trainingSet:
            trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
            trainClasses.append(classList[docIndex])
        p0V, p1V, pSpam = trainNB0(array(trainMat), array(trainClasses))
        errorCount = 0
        for docIndex in testSet:
            wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
            if classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
                errorCount += 1
        print 'the error rate is: ', float(errorCount)/len(testSet)
        return vocabList, p0V, p1V
    
    
    
    
    
    if __name__=="__main__":
        listOPosts, listClasses = loadDataSet()
        print listOPosts, listClasses
        myVocabList = createVocabList(listOPosts)
        print myVocabList
        print setOfWord2Vec(myVocabList, listOPosts[0])
        trainMat = []
        for postinDoc in listOPosts:
            trainMat.append(setOfWord2Vec(myVocabList, postinDoc))
        p0V, p1V, pAb = trainNB0(trainMat, listClasses)
        print p0V
        print testingNB()
        spamTest()
        import feedparser
        ny = feedparser.parse('http://newyork.craigslist.org/stp/index.rss')
        sf = feedparser.parse('http://sfbay.craigslist.org/stp/index.rss')
        vocabList,pSF,pNY=localWords(ny,sf)
    


查看全文
  • 相关阅读:
    chrome.declarativeWebRequest
    webRequest模块的解读
    C#连接池
    sftp
    Lynx
    LD_PRELOAD & LD_LIBRARY_PATH 动态库路径
    libc.so.6 误删后修复
    man 转 pdf _____ jpg 转 pdf
    here文档
    lsof fuser
  • 原文地址:https://www.cnblogs.com/ldxsuanfa/p/10728138.html
  • Copyright © 2011-2022 走看看