zoukankan      html  css  js  c++  java
  • electron主进程引入自定义模块

    对于electron以及nodejs开发,是一只小菜鸟,第一次想做个应用

    只能边学边做,遇到各种各样的问题。

    1、不想把所有的主进程函数放到一个文件中,这样文件比较乱,并且不好处理

    想法:将另一个js文件引入到该文件中,进行调用等

    问题:js不存在互相调用的功能

    解决方案:

    自定义nodejs模块,并在main.js中导入,然后进行处理(以下代码来自网络)

     1 'use strict';
     2 const electron = require('electron');
     3 const app = electron.app;
     4 const CrawlService = require('./libs/crawlService');
     5 
     6 //start crawl service
     7 CrawlService.start();
     8 
     9 // adds debug features like hotkeys for triggering dev tools and reload
    10 require('electron-debug')();
    11 
    12 // prevent window being garbage collected
    13 let mainWindow;
    14 
    15 function onClosed() {
    16     // dereference the window
    17     // for multiple windows store them in an array
    18     mainWindow = null;
    19 }
    20 
    21 function createMainWindow() {
    22     const win = new electron.BrowserWindow({
    23          1200,
    24         height: 800
    25     });
    26 
    27     win.loadURL(`file://${__dirname}/static/index.html`);
    28     win.openDevTools();
    29     win.on('closed', onClosed);
    30 
    31     return win;
    32 }
    33 
    34 app.on('window-all-closed', () => {
    35     if (process.platform !== 'darwin') {
    36         app.quit();
    37     }
    38 });
    39 
    40 app.on('activate', () => {
    41     if (!mainWindow) {
    42         mainWindow = createMainWindow();
    43     }
    44 });
    45 
    46 app.on('ready', () => {
    47     mainWindow = createMainWindow();
    48 })

    以上代码中的4、5、6、7行就是引入自定义模块进行处理,自定义模块代码为

    const request = require('request'),
        async = require('async'),
        ipcMain = require('electron').ipcMain,
        db = require('./dbService'),
        cheerio = require('cheerio');
    
    const CrawlService = {
        start: function () {
            ipcMain.on('search-keyword', function (event, keyword) {
                console.log('channel "search-keyword" on msg:' + keyword);
    
                let match = {$regex: eval('/' + keyword + '/')};
                var query = keyword ? {$or: [{title: match}, {content: match}]} : {};
                db.find(query).sort({publishDate: -1}).limit(100).exec(function (err, mails) {
                    event.sender.send('search-reply', {mails: mails});
                });
            });
    
            ipcMain.on('start-crawl', (event, arg) => {
                console.log('channel "start-crawl" on msg:' + arg);
                var updater = {
                    sender: event.sender,
                    channel: arg,
                    updateProgress: function (progress) {
                        this.sender.send(this.channel, {progress: progress});
                    }
                };
                crawler(updater);
            });
        }
    };
    
    function UrlCrawler(targetUrl) {
        return {
            targetUrl: targetUrl,
            startCrawl: function (processDom) {
                request(this.targetUrl, (err, response, body) => {
                    if (err) throw err;
                    var $ = cheerio.load(body);
                    processDom($)
                });
            }
        };
    }
    
    function pageCrawl(page, totalPage, updater, crawlNextPage, crawProgress) {
        new UrlCrawler('http://12345.chengdu.gov.cn/moreMail?page=' + page).startCrawl(($) => {
            var $pageMails = $('div.left5 ul li.f12px'),
                sameMailsInPage = 0;
    
            async.eachOfLimit($pageMails, 10, function iteratee(item, key, nextMail) {
                if(crawProgress.skip){
                    return nextMail();
                }
                let $item = $(item),
                    mailDetailUrl = $item.find('a').prop('href'),
                    divs = $item.find('div');
                var mail = {
                    _id: mailDetailUrl.match(/d+/g)[0],
                    title: $(divs[0]).text().trim(),
                    sender: $(divs[1]).text().trim(),
                    receiveUnit: $(divs[2]).text().trim(),
                    status: $(divs[3]).text().trim(),
                    category: $(divs[4]).text().trim(),
                    views: $(divs[5]).text().trim()
                };
    
                new UrlCrawler('http://12345.chengdu.gov.cn/' + mailDetailUrl).startCrawl(($) => {// crawl mail detail
                    mail.content = $($('.rightside1 td.td2')[1]).text().trim();
                    mail.result = $('.rightside1 tbody tr:last-child').text().trim();
                    mail.publishDate = $($('.rightside1 td.td32')[0]).text().trim() || $($('.rightside1 td.td32')[1]).text().trim();
    
                    console.log(mail._id);
    
                    db.update({_id: mail._id}, mail, {upsert: true, returnUpdatedDocs: true}, function (err, numReplaced, affectedDocuments, upsert) {
                        if (err) {
                            throw err;
                        }
                        if(!upsert && affectedDocuments.result == mail.result){//if a mail are not update
                            if(++sameMailsInPage == 15){ //if all mails in one page are note update.
                                crawProgress.skip = true;
                            }
                        }
                    });
    
                    nextMail();
                });
            }, function done() {
                crawlNextPage();
                updater.updateProgress(Math.floor(page * 100 / totalPage));
            });
        });
    }
    
    /**
     * 1. get total page size
     * 2. iterator from page 1 to totalSize
     *    2.1 fetch mails summary list on 1 page
     *    2.2 iterator from mails 1 to maxItems mails summary in 1 page
     *        2.2.1 fetch mails detail from url
     *        2.2.2 save mail to db
     *    2.3 test if none of mails in current page updated? if none, stop crawling or continue step 2.
     *
     * @param url
     */
    function crawler(updater) {
        new UrlCrawler('http://12345.chengdu.gov.cn/moreMail').startCrawl(($) => {
            var totalSize = $('div.pages script').html().match(/iRecCount = d+/g)[0].match(/d+/g)[0],
                totalPageSize = Math.ceil(totalSize / 15),
                pagesCollection = [],
                crawProgress = {skip: false};
            for (let i = 1; i <= totalPageSize; i++) {
                pagesCollection.push(i);
            }
            async.eachSeries(pagesCollection, function (page, crawlNextPage) {
                pageCrawl(page, totalPageSize, updater, crawlNextPage, crawProgress);
            })
        });
    }
    
    module.exports = CrawlService;

    作者: lazyboy
    文章来源: http://www.cnblogs.com/xbotao/
    如有问题,请留言或者Email我: upc_xbt 163.com

  • 相关阅读:
    数制转换
    禁止用户复制网页的内容
    TdxDBTreeView的节点移动排序
    cxgrid根据字段设置颜色
    获取用户IP
    在sql server中用存储过程发送邮件
    Delphi的DTS编程
    使年份适用于做有浏览器(IE和FireFox)
    用Delphi写扬声器音乐
    过滤字符串中的HTML代码(VBScript)
  • 原文地址:https://www.cnblogs.com/xbotao/p/6118616.html
Copyright © 2011-2022 走看看