zoukankan      html  css  js  c++  java
  • 多层神经网络与C++实现

    BP理论部分参考:http://blog.csdn.net/itplus/article/details/11022243

    参考http://www.cnblogs.com/ronny/p/ann_02.html#!comments,结合BP算法的理论部分,可以真正理解了ANN。

    代码部分我加了部分注释

    #include<vector>
    using namespace std;
    
    
    
    //单个连接线
    class NNconnection
    {
    public:
        //两个索引,一个与该结点相连(前一层)的前一层结点的索引,
        //一个对应的权值索引在整个单层网络中权值向量中的索引
        unsigned weightIdx;
        unsigned neuralIdx;
    };
    
    //单个神经元,包括一个输出和多个连接线
    class NNneural
    {
    public:
        double output;//输出
        vector<NNconnection> m_connection;
    };
    
    //单层网络
    class NNlayer
    {
    public:
        NNlayer *preLayer;//该层网络的前一层
        NNlayer(){ preLayer = NULL; }
        vector<NNneural> m_neurals;//每层网络多个神经元
        vector<double> m_weights;//权值向量
        //加多少个神经元,及经前一层神经元的个数
        void addNeurals(unsigned num, unsigned preNumNeurals);
        //反向传播
        void backPropagate(vector<double>& ,vector<double>&,double);
    
    };
    
    class NeuralNetwork
    {
    private:
        unsigned nLayer;//网络层数
        vector<unsigned> nodes;//每层的结点数
        vector<double> actualOutput;//每次迭代的输出结果
        double etaLearningRate;//权值学习率
        unsigned iterNum;//迭代次数
    public:
        vector<NNlayer*>m_layers;//由多个单层网络组成
        //创建网络,第二个参数为[48,25,30],则表明该网络有三层,每层结点数分别为48,25,30
        void create(unsigned num_layers, unsigned *ar_nodes);
        void initializeNetwork();//初始化网络,包括权值设置等
    
        void forwardCalculate(vector<double> &invect, vector<double> &outvect);//向前计算
    
        void backPropagate(vector<double>& tVect, vector<double>& oVect);//反向传播
    
        void train(vector<vector<double>>& inputVec, vector<vector<double>>& outputVec);//训练
    
        void classifier(vector<vector<double>>& inputVec, vector<vector<double>>& outputVec);//分类
    
    };
    
    void NeuralNetwork::initializeNetwork()
    {
        //初始化网络,创建各层和各层结点,初始化权值
        // i为何如此定义?
        for (vector<NNlayer*>::size_type i = 0; i != nLayer; i++)
        {
            NNlayer *ptrLayer = new NNlayer;
            if (i == 0)
            {
                ptrLayer->addNeurals(nodes[i], 0);//第一层之前的结点数为0
            }
            else
            {
                ptrLayer->preLayer = m_layers[i - 1];
                //每个神经元的初值包括与前一层神经元的连接索引和该层权重索引
                ptrLayer->addNeurals(nodes[i], nodes[i - 1]);
                //连结权重个数
                unsigned num_weights = nodes[i] * (nodes[i - 1] + 1);//+bias
                //初始化权重
                for (vector<NNlayer*>::size_type k = 0; k != num_weights; k++)
                {
                    
                    ptrLayer->m_weights.push_back(0.05*rand() / RAND_MAX);//0~0.05
                }
            }
            m_layers.push_back(ptrLayer);
        }
    }
    
    void NNlayer::addNeurals(unsigned num, unsigned preNumNeural)
    {
        for (vector<NNneural>::size_type i = 0; i != num; i++)
        {
            NNneural sneural;
            sneural.output = 0;
            for (vector<NNconnection>::size_type k = 0; k != preNumNeural; k++)
            {
                NNconnection sconnection;
                //给该神经元加上连接索引和权值索引
                sconnection.weightIdx = i*(preNumNeural + 1) + k;//加1给偏置留一个索引位置
                sconnection.neuralIdx = k;
                sneural.m_connection.push_back(sconnection);
            }
            m_neurals.push_back(sneural);
        }
    }
    void NeuralNetwork::forwardCalculate(vector<double> &invect, vector<double> &outvect)
    {
        actualOutput.clear();
        vector<NNlayer*>::iterator layerIt = m_layers.begin();
        while (layerIt != m_layers.end())
        {
            if (layerIt == m_layers.begin())
            {
                for (vector<NNneural>::size_type k = 0; k != (*layerIt)->m_neurals.size(); k++)
                {
                    //对第一层的神经元来说,输出即为输入
                    (*layerIt)->m_neurals[k].output = invect[k];
                }
            }
            else
            {
                vector<NNneural>::iterator neuralIt = (*layerIt)->m_neurals.begin();
                int neuralIdx = 0;
                while (neuralIt != (*layerIt)->m_neurals.end())
                {
                    //每个神经元的连接线数
                    vector<NNconnection>::size_type num_connection = (*neuralIt).m_connection.size();
                    //偏置
                    double dsum = (*layerIt)->m_weights[num_connection*(neuralIdx + 1) - 1];
                    for (vector<NNconnection>::size_type i = 0; i != num_connection; i++)
                    {
                        //sum=sum+w*x;
                        unsigned wgtIdx = (*neuralIt).m_connection[i].weightIdx;
                        unsigned neuralIdx = (*neuralIt).m_connection[i].neuralIdx;
    
                        dsum += (*layerIt)->preLayer->m_neurals[neuralIdx].output*
                            (*layerIt)->m_weights[wgtIdx];
                    }
                    neuralIt->output = SIGMOID(dsum);
    
                    neuralIt++;//下一个神经元
                    neuralIdx++;//每个神经元的偏置不同
                }
            }
            layerIt++;//下一层网络
        }
        //将最后一层的结果保存至输出
        NNlayer * lastLayer = m_layers[m_layers.size() - 1];
        vector<NNneural>::iterator neuralIt = lastLayer->m_neurals.begin();
        while (neuralIt != lastLayer->m_neurals.end())
        {
            outvect.push_back(neuralIt->output);
            neuralIt++;
        }
    }
    
    void NeuralNetwork::backPropagate(vector<double>& tVect, vector<double>& oVect)
    {
        //首先取得最后一层迭代器
        vector<NNlayer *>::iterator lit = m_layers.end() - 1;
        //用于保存最后一层所有结点误差
        vector<double> dErrWrtDxLast((*lit)->m_neurals.size());
        for (vector<NNneural>::size_type i = 0; i != (*lit)->m_neurals.size(); i++)
        {
            dErrWrtDxLast[i]=oVect[i] - tVect[i];
        }
        //所有层的误差
        vector<vector<double>> diffVect(nLayer);
        diffVect[nLayer - 1] = dErrWrtDxLast;
    
        //先将其他层误差设为0
        for (unsigned int i = 0; i < nLayer - 1; i++)
        {
            //每层误差的个数要与神经元相等
            diffVect[i].resize(m_layers[i]->m_neurals.size(), 0.0);
        }
    
        vector<NNlayer>::size_type i = m_layers.size() - 1;
        //对每一层调用BP算法,第一个参数为第i层输出误差
        //第二个参数可作为下次调用的返回值
        for (lit; lit>m_layers.begin(); lit--)
        {
            (*lit)->backPropagate(diffVect[i], diffVect[i - 1], etaLearningRate);
            i--;
        }
        diffVect.clear();
    }
    
    void NNlayer::backPropagate(vector<double>& dErrWrtDxn, vector<double>& dErrWrtDxnm, double eta)
    {
        //三个参数分别代表第i层的误差,第i-1层的误差,学习速率
        //计算每个神经元的误差
        double output;
        vector<double> dErrWrtDyn(dErrWrtDxn.size());//每个神经元的残差
        for (vector<NNneural>::size_type i = 0; i != m_neurals.size(); i++)
        {
            output = m_neurals[i].output;
            //计算第i层的残差,对于输出层,dErrWrtDxn表示误差,对于
            //其他层,dErrWrtDxn表示w*(i+1层残差)
            dErrWrtDyn[i] = DSIGMOD(output)*dErrWrtDxn[i];
        }
        //计算每个w的偏导数
        unsigned ii(0);
        vector<NNneural>::iterator nit = m_neurals.begin();
        vector<double> dErrWrtDwn(m_weights.size(), 0);
    
        while (nit != m_neurals.end())
        {
            //对于每个神经元
            for (vector<NNconnection>::size_type k = 0; k != (*nit).m_connection.size(); k++)
            {
                //对于每个权重连接
                if (k == (*nit).m_connection.size() - 1)
                    output = 1;//如果是偏置,则为1
                else//与该权重相连的前一层神经元的输出
                    output = preLayer->m_neurals[(*nit).m_connection[k].neuralIdx].output;
                //计算该权重的偏导数值(随着迭代的进行,偏导也是逐渐累加的)
                dErrWrtDwn[((*nit).m_connection[k].weightIdx)] += output*dErrWrtDyn[ii];
            }
            nit++;
            ii++;
        }
    
    
        //dErrWrtDxnm作为下一层的dErrWrtDxn,用于计算残差
        unsigned j(0);
        nit = m_neurals.begin();
        while (nit != m_neurals.end())
        {
            for (vector<NNconnection>::size_type k = 0; k != (*nit).m_connection.size()-1; k++)
            {
                dErrWrtDxnm[(*nit).m_connection[k].neuralIdx] += dErrWrtDyn[j] *
                    m_weights[(*nit).m_connection[k].weightIdx];
            }
            j++;
            nit++;
        }
    
        for (vector<double>::size_type i = 0; i != m_weights.size(); i++)
        {
            m_weights[i] -= eta*dErrWrtDwn[i];
        }
    }
    View Code
  • 相关阅读:
    Linux下Tomcat日志分割
    adb logcat 命令使用说明
    linux系统下安装两个或多个tomcat
    架构师小跟班:SSL证书免费申请及部署,解决页面样式错乱问题完整攻略
    springboot获取七牛云空间文件列表及下载功能
    Java使用ganymed工具包执行LINUX命令教程
    Java学生信息管理系统源码
    数据库SQL语句性能优化
    Java开发环境系列:一篇能解决你99%问题的排雷日记
    架构师小跟班:教你从零开始申请和配置七牛云免费OSS对象存储(不能再详细了)
  • 原文地址:https://www.cnblogs.com/573177885qq/p/5770621.html
Copyright © 2011-2022 走看看