zoukankan      html  css  js  c++  java
  • Theano Logistic Regression

    原理

    逻辑回归的推理过程能够參考这篇文章:http://blog.csdn.net/zouxy09/article/details/20319673,当中包括了关于逻辑回归的推理,梯度下降以及python源代码,讲的有点多。能够直接看核心部分

    对于这篇文章补充一个就是其缺少的正则化内容:
    能够查看知乎上的一个回答,算是比較完整
    https://www.zhihu.com/question/35508851/answer/63093225

    Theano 代码

    #!/usr/bin/env python
    # -*- encoding:utf-8 -*-
    '''
    This is done by Vincent.Y
    mainly modified from deep learning tutorial
    '''
    import numpy as np
    import theano
    import theano.tensor as T
    from theano import function
    from sklearn.datasets import make_moons
    import matplotlib.pyplot as plt
    class LogisticRegression():
        def __init__(self,X,n_in,n_out):
            self.W = theano.shared(
                value=np.zeros(
                    (n_in,n_out),
                    dtype=theano.config.floatX
                ),
                name='W',
                borrow=True
            )
    
            self.b=theano.shared(
                value=np.zeros(
                    (n_out,),
                    dtype=theano.config.floatX
                ),
                name='b',
                borrow=True
            )
    
            self.p_y_given_x=T.nnet.softmax(T.dot(X,self.W)+self.b)
            self.y_pred=T.argmax(self.p_y_given_x,axis=1)
            self.params=[self.W,self.b]
            self.X=X
    
        def negative_log_likelihood(self,y):
            return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]),y])
    
        def errors(self,y):
            if y.ndim != self.y_pred.ndim:
                raise TypeError(
                    'y should have the same shape as self.y_pred',
                    ('y',y.type,'y_pred',self.y_pred.type)
                )
            if y.dtype.startswith('int'):
                return T.mean(T.neq(self.y_pred,y))
            else:
                return NotImplementedError()
    
    def load_data():
        #we generate data from sklearn
        np.random.seed(0)
        X, y = make_moons(800, noise=0.20)
        print "xxxxx",X.shape
        #return train validate test sets 
        return [(X[0:600,],y[0:600,]),(X[600:800,],y[600:800,])]
    
    def sgd_optimization(learing_rate=0.12,n_epochs=300):
        datasets=load_data()
        train_set_x,train_set_y=datasets[0]
        test_set_x,test_set_y=datasets[1]
    
        index=T.lscalar()
        x = T.matrix('x')
        y = T.lvector('y')
    
        classifier=LogisticRegression(X=x,n_in=2,n_out=2)
    
        cost=classifier.negative_log_likelihood(y)
    
        test_model=function(
            inputs=[x,y],
            outputs=classifier.errors(y)
        )
    
        g_W=T.grad(cost=cost,wrt=classifier.W)
        g_b=T.grad(cost=cost,wrt=classifier.b)
    
        updates=[(classifier.W,classifier.W-learing_rate*g_W),
            (classifier.b,classifier.b-learing_rate*g_b)]
    
        train_model=function(
            inputs=[x,y],
            outputs=classifier.errors(y),
            updates=updates
        )
    
        epoch=0
        while(epoch<n_epochs):
          epoch=epoch+1
          avg_cost=train_model(train_set_x,train_set_y)
          test_cost=test_model(test_set_x,test_set_y)
          print "epoch is %d,train error %f, test error %f"%(epoch,avg_cost,test_cost)
        predict_model=function(
            inputs=[x],
            outputs=classifier.y_pred
            )
        plot_decision_boundary(lambda x:predict_model(x),train_set_x,train_set_y)
    
    def plot_decision_boundary(pred_func,train_set_x,train_set_y):
        x_min, x_max = train_set_x[:, 0].min() - .5, train_set_x[:, 0].max() + .5
        y_min, y_max = train_set_x[:, 1].min() - .5, train_set_x[:, 1].max() + .5
        h = 0.01
        xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
        Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
        Z = Z.reshape(xx.shape)
        plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
        plt.scatter(train_set_x[:, 0], train_set_x[:, 1], c=train_set_y, cmap=plt.cm.Spectral)
        plt.show()
    if __name__=="__main__":
        sgd_optimization()
    

    效果

    Theano 逻辑回归的效果

  • 相关阅读:
    Android实现 再按一次退出 的三种方法 durationTime、timerTask 和Handler
    Android中使用Handler造成内存泄露的分析和解决
    Android上成功实现了蓝牙的一些Profile
    A2DP和AVRCP蓝牙音频传输协议的应用解释
    [LeetCode]Climbing Stairs
    Android该系统提供的服务--Vibrator(振子)
    阅读UML类图和时序图
    js如果你想删除您问
    【Win7】【磁盘管理】删除相似“33fbc1d57e9aaf1ea88e6f08”缓存目录
    Linux使用快捷键,who命令,rm命令,ps命令,cd,命令kill命令,find命令,grep命令,tar命令(gz、tar、bz2),用户管理,vim配置的一部分,相关命令
  • 原文地址:https://www.cnblogs.com/mthoutai/p/7354627.html
Copyright © 2011-2022 走看看