数据统计分析项目联系QQ:231469242
sklearn实战-乳腺癌细胞数据挖掘
https://study.163.com/course/introduction.htm?courseId=1005269003&utm_campaign=commission&utm_source=cp-400000000398149&utm_medium=share

Python例子



# -*- coding: utf-8 -*-
"""
Created on Mon Jul 24 10:40:14 2017
@author: toby
"""
# Import standard packages
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import pandas as pd
import seaborn as sns
import os
# additional packages
import pymc as pm
from scipy.stats.mstats import mquantiles
# additional packages
import sys
sys.path.append(os.path.join('..', '..', 'Utilities'))
try:
# Import formatting commands if directory "Utilities" is available
from ISP_mystyle import setFonts, showData
except ImportError:
# Ensure correct performance otherwise
def setFonts(*options):
return
def showData(*options):
plt.show()
return
sns.set_context('poster')
def logistic(x, beta, alpha=0):
'''Logistic Function'''
return 1.0 / (1.0 + np.exp(np.dot(beta, x) + alpha))
def getData():
'''Get and show the O-ring data'''
inFile = 'challenger_data.csv'
challenger_data = np.genfromtxt(inFile, skip_header=1, usecols=[0, 1],
missing_values='NA', delimiter=',')
# drop the NA values
challenger_data = challenger_data[~np.isnan(challenger_data[:, 1])]
temperature = challenger_data[:, 0]
failureData = challenger_data[:, 1] # defect or not?
return (temperature, failureData)
def showAndSave(temperature, failures):
'''Shows the input data, and saves the resulting figure'''
# Plot it, as a function of tempature
plt.figure()
setFonts()
sns.set_style('darkgrid')
np.set_printoptions(precision=3, suppress=True)
plt.scatter(temperature, failures, s=200, color="k", alpha=0.5)
plt.yticks([0, 1])
plt.ylabel("Damage Incident?")
plt.xlabel("Outside Temperature [F]")
plt.title("Defects of the Space Shuttle O-Rings vs temperature")
plt.tight_layout
outFile = 'Challenger_ORings.png'
showData(outFile)
def mcmcSimulations(temperature, failures):
'''Perform the MCMC-simulations'''
# Define the prior distributions for alpha and beta
# 'value' sets the start parameter for the simulation
# The second parameter for the normal distributions is the "precision",
# i.e. the inverse of the standard deviation
np.random.seed(1234)
beta = pm.Normal("beta", 0, 0.001, value=0)
alpha = pm.Normal("alpha", 0, 0.001, value=0)
# Define the model-function for the temperature
@pm.deterministic
def p(t=temperature, alpha=alpha, beta=beta):
return 1.0 / (1. + np.exp(beta * t + alpha))
# connect the probabilities in `p` with our observations through a
# Bernoulli random variable.
observed = pm.Bernoulli("bernoulli_obs", p, value=failures, observed=True)
# Combine the values to a model
model = pm.Model([observed, beta, alpha])
# Perform the simulations
map_ = pm.MAP(model)
map_.fit()
mcmc = pm.MCMC(model)
mcmc.sample(120000, 100000, 2)
# --- Show the resulting posterior distributions ---
alpha_samples = mcmc.trace('alpha')[:, None] # best to make them 1d
beta_samples = mcmc.trace('beta')[:, None]
return(alpha_samples, beta_samples)
def showSimResults(alpha_samples, beta_samples):
'''Show the results of the simulations, and save them to an outFile'''
plt.figure(figsize=(12.5, 6))
sns.set_style('darkgrid')
setFonts(18)
# Histogram of the samples:
plt.subplot(211)
plt.title(r"Posterior distributions of the variables $alpha, eta$")
plt.hist(beta_samples, histtype='stepfilled', bins=35, alpha=0.85,
label=r"posterior of $eta$", color="#7A68A6", normed=True)
plt.legend()
plt.subplot(212)
plt.hist(alpha_samples, histtype='stepfilled', bins=35, alpha=0.85,
label=r"posterior of $alpha$", color="#A60628", normed=True)
plt.legend()
outFile = 'Challenger_Parameters.png'
showData(outFile)
def calculateProbability(alpha_samples, beta_samples, temperature, failures):
'''Calculate the mean probability, and the CIs'''
# Calculate the probability as a function of time
t = np.linspace(temperature.min() - 5, temperature.max() + 5, 50)[:, None]
p_t = logistic(t.T, beta_samples, alpha_samples)
mean_prob_t = p_t.mean(axis=0)
# --- Calculate CIs ---
# vectorized bottom and top 2.5% quantiles for "confidence interval"
quantiles = mquantiles(p_t, [0.025, 0.975], axis=0)
return (t, mean_prob_t, p_t, quantiles)
def showProbabilities(linearTemperature, temperature, failures, mean_prob_t, p_t, quantiles):
'''Show the posterior probabilities, and save the resulting figures'''
# --- Show the probability curve ----
plt.figure(figsize=(12.5, 4))
setFonts(18)
plt.plot(linearTemperature, mean_prob_t, lw=3, label="Average posterior
probability of defect")
plt.plot(linearTemperature, p_t[0, :], ls="--", label="Realization from posterior")
plt.plot(linearTemperature, p_t[-2, :], ls="--", label="Realization from posterior")
plt.scatter(temperature, failures, color="k", s=50, alpha=0.5)
plt.title("Posterior expected value of probability of defect, plus realizations")
plt.legend(loc="lower left")
plt.ylim(-0.1, 1.1)
plt.xlim(linearTemperature.min(), linearTemperature.max())
plt.ylabel("Probability")
plt.xlabel("Temperature [F]")
outFile = 'Challenger_Probability.png'
showData(outFile)
# --- Draw CIs ---
setFonts()
sns.set_style('darkgrid')
plt.fill_between(linearTemperature[:, 0], *quantiles, alpha=0.7,
color="#7A68A6")
plt.plot(linearTemperature[:, 0], quantiles[0], label="95% CI", color="#7A68A6", alpha=0.7)
plt.plot(linearTemperature, mean_prob_t, lw=1, ls="--", color="k",
label="average posterior
probability of defect")
plt.xlim(linearTemperature.min(), linearTemperature.max())
plt.ylim(-0.02, 1.02)
plt.legend(loc="lower left")
plt.scatter(temperature, failures, color="k", s=50, alpha=0.5)
plt.xlabel("Temperature [F]")
plt.ylabel("Posterior Probability Estimate")
outFile = 'Challenger_CIs.png'
showData(outFile)
if __name__=='__main__':
(temperature, failures) = getData()
showAndSave(temperature, failures)
(alpha, beta) = mcmcSimulations(temperature, failures)
showSimResults(alpha, beta)
(linearTemperature, mean_p, p, quantiles) = calculateProbability(alpha, beta, temperature, failures)
showProbabilities(linearTemperature, temperature, failures, mean_p, p, quantiles)




https://www.youtube.com/watch?v=vlAdYt_8d9A
一、什么是贝叶斯推断
贝叶斯推断(Bayesian inference)是一种统计学方法,用来估计统计量的某种性质。
它是贝叶斯定理(Bayes' theorem)的应用。英国数学家托马斯·贝叶斯(Thomas Bayes)在1763年发表的一篇论文中,首先提出了这个定理。
叶斯推断与其他统计学推断方法截然不同。它建立在主观判断的基础上,也就是说,你可以不需要客观证据,先估计一个值,然后根据实际结果不断修正。正是因为它的主观性太强,曾经遭到许多统计学家的诟病。
贝叶斯推断需要大量的计算,因此历史上很长一段时间,无法得到广泛应用。只有计算机诞生以后,它才获得真正的重视。人们发现,许多统计量是无法事先进行客观判断的,而互联网时代出现的大型数据集,再加上高速运算能力,为验证这些统计量提供了方便,也为应用贝叶斯推断创造了条件,它的威力正在日益显现。

六、【例子】假阳性问题
第二个例子是一个医学的常见问题,与现实生活关系紧密。

我们把P(A)称为"先验概率"(Prior probability),即在B事件发生之前,我们对A事件概率的一个判断。P(A|B)称为"后验概率"(Posterior probability),即在B事件发生之后,我们对A事件概率的重新评估。P(B|A)/P(B)称为"可能性函数"(Likelyhood),这是一个调整因子,使得预估概率更接近真实概率。
所以,条件概率可以理解成下面的式子:
后验概率 = 先验概率 x 调整因子
这就是贝叶斯推断的含义。我们先预估一个"先验概率",然后加入实验结果,看这个实验到底是增强还是削弱了"先验概率",由此得到更接近事实的"后验概率"。
在这里,如果"可能性函数"P(B|A)/P(B)>1,意味着"先验概率"被增强,事件A的发生的可能性变大;如果"可能性函数"=1,意味着B事件无助于判断事件A的可能性;如果"可能性函数"<1,意味着"先验概率"被削弱,事件A的可能性变小
已知某种疾病的发病率是0.001,即1000人中会有1个人得病。现有一种试剂可以检验患者是否得病,它的准确率是0.99,即在患者确实得病的情况下,它有99%的可能呈现阳性。它的误报率是5%,即在患者没有得病的情况下,它有5%的可能呈现阳性。现有一个病人的检验结果为阳性,请问他确实得病的可能性有多大?
假定A事件表示得病,那么P(A)为0.001。这就是"先验概率",即没有做试验之前,我们预计的发病率。再假定B事件表示阳性,那么要计算的就是P(A|B)。这就是"后验概率",即做了试验以后,对发病率的估计。
我们得到了一个惊人的结果,P(A|B)约等于0.019。也就是说,即使检验呈现阳性,病人得病的概率,也只是从0.1%增加到了2%左右。这就是所谓的"假阳性",即阳性结果完全不足以说明病人得病。
为什么会这样?为什么这种检验的准确率高达99%,但是可信度却不到2%?答案是与它的误报率太高有关。(【习题】如果误报率从5%降为1%,请问病人得病的概率会变成多少?)
有兴趣的朋友,还可以算一下"假阴性"问题,即检验结果为阴性,但是病人确实得病的概率有多大。然后问自己,"假阳性"和"假阴性",哪一个才是医学检验的主要风险?
贝叶斯分类器_代码实现
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 12 10:02:45 2017
@author: Administrator
"""
from numpy import *
#导入数据,列表和评论,0表示非侮辱,1表示侮辱
def loadDataSet():
postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid'],
['this','is','a','good','movie'],
['this','movie','is','suck'],
['so','fantanstic'],
['fuck','the','actress']]
classVec = [0,1,0,1,0,1,0,1,0,1] #1 is abusive, 0 not
return postingList,classVec
#得到一个列表,包含去重的单词
def createVocabList(dataSet):
vocabSet = set([]) #create empty set
for document in dataSet:
vocabSet = vocabSet | set(document) #union of the two sets
return list(vocabSet)
#inputSet为测试语句,vocabList为32个去重后的单词列表
#遍历输入语句的每个单词,例如第一个单词是my,如果my在vocabList里面,则returnVec对应值为1
def setOfWords2Vec(vocabList, inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
#print("word:",word)
if word in vocabList:
returnVec[vocabList.index(word)] = 1
#else: print ("the word: %s is not in my Vocabulary!") % word
return returnVec
#输入参数为32个不重复的单词列表,六篇文章的二维列表,得到特征向量的矩阵(二维列表)
def Matrix_featureVectors():
trainMat=[]
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
return trainMat
def trainNB0(trainMatrix,trainCategory):
numTrainDocs = len(trainMatrix)
numWords = len(trainMatrix[0])
pAbusive = sum(trainCategory)/float(numTrainDocs)
p0Num = ones(numWords); p1Num = ones(numWords) #change to ones()
#如果其中一个概率值为0,则最后乘积为0,所以把分母改为2
p0Denom = 2.0; p1Denom = 2.0 #change to 2.0
for i in range(numTrainDocs):
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
#太多很小书相乘会遇到下溢问题,所以取对数
p1Vect = log(p1Num/p1Denom) #change to log()
p0Vect = log(p0Num/p0Denom) #change to log()
return p0Vect,p1Vect,pAbusive
#分类器
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
p1 = sum(vec2Classify * p1Vec) + log(pClass1) #element-wise mult
p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)
if p1 > p0:
return 1
else:
return 0
#测试数据
def testingNB(testEntry):
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print (testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb))
value=classifyNB(thisDoc,p0V,p1V,pAb)
return value
#导入数据,列表语句listOPosts,listClasses评价列表[0,1,0,1,0,1]
listOPosts,listClasses = loadDataSet()
#得到一个列表,包含去重的单词
myVocabList = createVocabList(listOPosts)
#输入参数为32个不重复的单词列表,六篇文章的二维列表,得到特征向量的矩阵(二维列表)
trainMat=Matrix_featureVectors()
#得到侮辱性文档的概率以及两个类别的概率向量
p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses))
#测试非侮辱语句
testEntry0 = ['love', 'my', 'dalmation']
#测试侮辱语句
testEntry1 = ['stupid', 'garbage']
#测试语句
testEntry3=['fuck','the','movie']
testingNB(testEntry0)
testingNB(testEntry1)
testingNB(testEntry3)
