本帖最后由 小谢 于 2016-1-10 16:04 编辑
机器学习实战之——朴素贝叶斯算法学习总结
1、贝叶斯准则: P(A|B)=P(B|A)P(A) / P(B)
例如:一座别墅在过去的 20 年里一共发生过 2 次被盗,别墅的主人有一条狗,狗平均每周晚上叫 3 次,在盗贼入侵时狗叫的概率被估计为 0.9,问题是:在狗叫的时候发生入侵的概率是多少?
将狗叫事件看作A,盗贼入侵事件看作B。那么P(A)=3/7。P(B)=2/(20*365)=2/7300。已知盗贼入侵时狗叫的概率是0.9。则P(A|B)=0.9。 根据贝叶斯准则:P(A/B)=P(B|A)P(A)/P(B) ==> P(B|A)=P(A/B)P(B)/P(A) P(B|A)=0.9*(2/7300)/(3/7)=0.00058
2、使用python进行文本分类: 创建词表到向量的转换函数 - def loadDataSet():
- postingList=[['my','dog','has','flea','problems','help','please'],
- ['maybe','not','take','him','to','dog','park','stupid'],
- ['my','dalmation','is','so','cute','i','love','him'],
- ['stop','posting','stupid','worthless','garbage'],
- ['mr','licks','ate','my','steak','how','to','stop','him'],
- ['quit','buying','worthless','dog','food','stupid']]
- classVec=[0,1,0,1,0,1] #1代表侮辱性文字 0代表正常言论
- return postingList,classVec</font>
复制代码
- #创建一个不重复词的列表
- def createVocabList(dataSet):
- vocabSet=set([])
- for document in dataSet:
- #创建两个集合的并集
- vocabSet=vocabSet | set(document)
- return list(vocabSet)</font>
复制代码
- # 词集模型
- # vocabList 表示词汇表,inputSet 某个输入文档
- def setOfWords2Vec(vocabList,inputSet):
- # 构建一个与vocabList等长的向量
- returnVec=[0]*len(vocabList)
- #遍历文档中所有单词,如果出现了词汇表中的单词,则将输出的文档向量中对应值设为1
- for word in inputSet:
- if word in vocabList:
- returnVec[vocabList.index(word)]=1
- else: print "the word: %s is not in my Vocabulary!" % word
- # 输出文档向量
- return returnVec</font>
复制代码运行代码: $ python
>>> import bayes
>>> listOposts,listClasses = bayes.loadDataSet()
>>> myVocabList = bayes.createVocabList(listOposts)
>>> myVocabList
['cute', 'love', 'help', 'garbage', 'quit', 'food', 'problems', 'is', 'park', 'stop', 'flea', 'dalmation', 'licks', 'not', 'him', 'buying', 'posting', 'has', 'worthless', 'ate', 'to', >>> bayes.setOfWords2Vec(myVocabList,listOposts[0])
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1]
>>> bayes.setOfWords2Vec(myVocabList,listOposts[3])
[0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
朴素贝叶斯分类器训练函数
- # 该函数只适用二分类问题
- # trainMatrix 文档矩阵,trainCategory 类别标签
- def trainNBO(trainMatrix,trainCategory):
- numTrainDocs=len(trainMatrix)
- numWords=len(trainMatrix[0])
- pAbusive=sum(trainCategory)/float(numTrainDocs)
- p0Num=ones(numWords);p1Num=ones(numWords)
- p0Denom=2.0;p1Denom=2.0
- for i in range(numTrainDocs):
- if trainCategory[i]==1:
- p1Num +=trainMatrix[i]
- p1Denom +=sum(trainMatrix[i])
- else:
- p0Num +=trainMatrix[i]
- p0Denom +=sum(trainMatrix[i])
- p1Vect=log(p1Num/p1Denom)
- p0Vect=log(p1Num/p0Denom)
- return p0Vect,p1Vect,pAbusive</font>
复制代码
- # 朴素贝叶斯分类函数
- def classifyNB(vec2Classify,p0Vec,p1Vec,pClass1):
- #对应元素相乘加类别对数概率
- p1=sum(vec2Classify * p1Vec) + log(pClass1)
- p0=sum(vec2Classify * p0Vec) + log(1.0 - pClass1)
- if p1>p0:
- return 1
- else : return 0</font>
复制代码
- 测试分类函数
- def testingNB():
- listOPosts,listClasses=loadDataSet()
- myVocabList = createVocabList(listOPosts)
- trainMat=[]
- for postinDoc in listOPosts:
- trainMat.append(setOfWords2Vec(myVocabList,postinDoc))
- p0V,p1V,pAb=trainNBO(array(trainMat),array(listClasses))
- testEntry=['love','my','dalmation']
- thisDoc=array(setOfWords2Vec(myVocabList,testEntry))
- print testEntry,'classified as:',classifyNB(thisDoc,p0V,p1V,pAb)
- testEntry=['stupid','garbage']
- thisDoc=array(setOfWords2Vec(myVocabList,testEntry))
- print testEntry,'classified as:',classifyNB(thisDoc,p0V,p1V,pAb)
- </font>
复制代码
运行代码
$ python
>>> import bayes
>>> bayes.testingNB()
['love', 'my', 'dalmation'] classified as: 1
['stupid', 'garbage'] classified as: 1
3、使用朴素贝叶斯过滤垃圾邮件 - # 接受大字符串并转换为字符串列表
- def textParse(bigString):
- import re
- listOfTokens = re.split(r'\w*',bigString)
- #去掉小于两个字符的字符串,并将所有字符串转换为小写
- return [tok.lower() for tok in listOfTokens if len(tok) >2 ]
复制代码输出随机选择的电子邮件上的分类错误率
- def spamTest():
- docList=[];classList =[];fullText =[]
- # 导入文本文件并将他们解析为词列表
- for i in range(1,26):
- wordList = textParse(open('email/spam/%d.txt' % i).read())
- docList.append(wordList)
- fullText.extend(wordList)
- classList.append(1)
- wordList=textParse(open('email/ham/%d.txt' % i).read())
- docList.append(wordList)
- fullText.extend(wordList)
- classList.append(0)
- vocabList = createVocabList(docList)
- #随机抽取10个文件并添加到测试集同时也将他们从训练集中删除
- trainingSet = range(50);testSet=[]
- for i in range(10):
- randIndex = int(random.uniform(0,len(trainingSet)))
- testSet.append(trainingSet[randIndex])
- del(trainingSet[randIndex])
- trainMat=[];trainClasses=[]
- #随机选择数据的一部分作为训练集,剩余部分作为测试以达到交叉验证的效果
- for docIndex in trainingSet:
- trainMat.append(setOfWords2Vec(vocabList,docList[docIndex]))
- trainClasses.append(classList[docIndex])
- p0V,p1V,pSpam=trainNBO(array(trainMat),array(trainClasses))
- errorCount = 0
- for docIndex in testSet:
- wordVector = setOfWords2Vec(vocabList,docList[docIndex])
- if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:
- errorCount +1
- print 'the error rate is:',float(errorCount)/len(testSet)
复制代码 运行代码: $ python >>> import bayes
>>> bayes.spamTest()
the error rate is: 0.0
4、使用朴素贝叶斯分类器从个人广告中获取区域倾向 - # 遍历词汇表并统计每个词在文本中出现的次数,然后从高到底排序,返回排序最好的30个单词
- def calcMostFreq(vocabList,fullText):
- import operator
- freqDict ={}
- for token in vocabList:
- freqDict[token]=fullText.count(token)
- sortedFreq = sorted(freqDict.iteritems(),key=operator.itemgetter(1),reverse=True)
- return sortedFreq[:30]
复制代码
- #与spamTest功能基本类似,区别在于这里访问的是RSS源而不是文件
复制代码
运行代码
$ python
>>> import bayes
>>> ny=feedparser.parse('http://newyork.craigelist.org/stp/index.rss')
>>> sf=feedparser.parse('http://sfbay.craigelist.org/stp/index.rss')
>>> bayes.getTopWords(ny,sf)
|