zoukankan      html  css  js  c++  java
  • 统计nginx日志的python实现

    老大布置的任务,要分析一个5G大小的nginx log file,因为我的python也是刚学,所以摸索了很久,才实现了这个需求,废话不多话,简单粗暴,直接上代码!

    功能介绍:

    1、统计Top 100 访问次数最多的ip,并显示地理位置信息!这个是用的淘宝的地址库返回的ip地理位置及运营商信息  淘宝ip地址库REST API

    注:这地方说明一下,log里记录的文件有的是分段发送给客户端,所以同一个ip可能只是访问一次,但在log里显示了多条记录,在这里我就简单粗暴的把每一次都算作一个访问记录!有待改进,其他同学也可以修改下,告诉我应该怎么识别多少条记录是一次完整的访问!

    2、统计Top 100 流量最高ip,并显示地理位置信息!

    3、统计Top 100 访问流量最高url列表! 

    4、log文件记录的总流量!

    下面上代码,有需要的同学直接拿去!这个脚本分析一个4G的log用时13分左右,系统配置(16G内存)!

    (1)ip_location.py文件:利用淘宝ip地址库,返回ip所在国家,区域(省份),城市,运营商

    ip_location.py
    #!/usr/bin/env python
    # -*- coding: utf-8 -*-
    # the script is used to query the location of every ip
    
    import urllib
    import json
    
    #淘宝ip库接口
    url = "http://ip.taobao.com/service/getIpInfo.php?ip="
    
    def ip_location(ip):
            data = urllib.urlopen(url + ip).read()
            datadict=json.loads(data)
    
            for oneinfo in datadict:
                    if "code" == oneinfo:
                            if datadict[oneinfo] == 0:
                    return datadict["data"]["country"] + datadict["data"]["region"] + datadict["data"]["city"] + "\t\t" + datadict["data"]["isp"]

    (2)logparser.py文件:完成统计功能,具体见代码内注释!实现方法都很初级,毕竟是新手,见谅!

    #!/usr/local/python
    # -*- coding: utf-8 -*-
    
    import os
    import time
    import re
    import sys
    import ip_location
    
    """定义一个时间类,可以选取要分析的时间段,如果没有指定时间段,则分析全部log"""
    class TimeParser(object):
        def __init__(self, re_time, str_time, period):
            self.__re_time = re.compile(re_time)
            self.__str_time = str_time
            self.__period = period
    
        def __get(self, line):
            t= re.search(self.__re_time, line).group(0)
            return time.mktime(time.strptime(t, self.__str_time))
    
        def inPeriod(self, line):
            t = self.__get(line)
            return (t > time.mktime(time.strptime(self.__period[0], self.__str_time))
                        and t < time.mktime(time.strptime(self.__period[1], self.__str_time)))
    
    class ParseLog(object):
    
        def __init__(self, file, re_time, str_time, period):
    
            self.ip_dict = {}
            self.url_dict = {}
        try:
            self.domain, self.parsetime, self.suffix = file.split("_")
        except:
            self.domain = file.split(".")[0]
            self.parsetime = "unknown time" 
        
        #定义一个函数,用来统计数量和总流量,并存入到相应字典中
        def Count(self):
    
            #用TimeParser实例化CountTime
            CountTime = TimeParser(re_time, str_time, period)
            self.total_traffic = []
    
            """
            以下for循环分析每一行,如果这一行不包含时间,就跳过,如果包含时间信息,且在所分析时间段内,
            则统计ip和traffic,没有http_refer信息的行只记录ip,然后跳过!
            """
    
            with open(file) as f:
                for i, line in enumerate(f):
                    try:
                        if CountTime.inPeriod(line):
                            ip = line.split()[0]
                            try:
                                traffic = re.findall(r'\d{3}\ [^0]\d+', line)[0].split()[1]
                            except IndexError:
                                traffic = 0
                            try:
                                url = re.findall(r'GET\ .*\.*\ ', line)[0].split()[1]
                            except IndexError:
                                url = "unknown"
                        else:
                            continue
                    except AttributeError:
                        continue
    
                    self.ip_dict.setdefault(ip, {'number':0, 'traffic':0})['number'] += 1
                    self.ip_dict.setdefault(ip, {'number':0, 'traffic':0})['traffic'] += int(traffic)
                    self.url_dict.setdefault(url, 0)
                    self.url_dict[url] += int(traffic)
            
                    if not i % 1000000:
                        print "have processed " + str(i) + " lines !"
    
                #统计总流量
                    self.total_traffic.append(int(traffic))
                total = sum(self.total_traffic)
    
            #打印总流量大小
            print "******************************************************************"
            print self.domain + " all the traffic in " + self.parsetime + " is below:"
            print "total_traffic: %s" % str(total/1024/1024)+"MB"
    
        """定义两个字典,分别存储ip的数量和流量信息"""
        def TopIp(self, number):
    
            self.Count()
            TopNumberIp = {}
            TopTrafficIp = {}
    
            #对字典赋值
            for ip in self.ip_dict.keys():
                TopNumberIp[ip] = self.ip_dict[ip]['number']
                TopTrafficIp[ip] = self.ip_dict[ip]['traffic']
    
            #按值从大到小的顺序排序键
            SortIpNo = sorted(TopNumberIp.items(), key=lambda e: e[1], reverse=True)
            SortIpTraffic = sorted(TopTrafficIp.items(), key=lambda e: e[1], reverse=True)
    
            #输出连接数top 100 ip的相关信息到文件TopIpNo.txt中
            ipno = open('TopIpNo.txt', 'w+')
            ipno.write(u"ip地址\t\t\t访问次数\t\t国家/区域/城市\t\t\t运营商\n")
            ipno.write("-------------------------------------------------------------------------------------------------\n")
    
            for i in range(number):
                try:
                    ipno.write(SortIpNo[i][0]+"\t\t"+str(SortIpNo[i][1])+"\t\t\t"+ip_location.ip_location(SortIpNo[i][0])+"\n")
                except:
                    continue
            ipno.write("-------------------------------------------------------------------------------------------------\n")
            ipno.close()
    
            #输出流量top 100 ip的相关信息到文件iptraffic.txt中
            iptr = open('iptraffic.txt', 'w+')
            iptr.write(u"ip地址\t\t\t总流量(MB)\t\t国家/区域/城市\t\t\t运营商\n")
            iptr.write("-------------------------------------------------------------------------------------------------\n")
    
            for i in range(number):
                try:
                    iptr.write(SortIpTraffic[i][0]+"\t\t"+str(SortIpTraffic[i][1]/1024/1024))
            
            #记入地理信息
            iptr.write("\t\t\t"+ip_location.ip_location(SortIpTraffic[i][0])+"\n")
                except:
                    continue
            iptr.write("-------------------------------------------------------------------------------------------------\n")
            iptr.close()
    
        def TopUrl(self, number):
            SortUrlTraffic = sorted(self.url_dict.items(), key=lambda e: e[1], reverse=True)
    
            #输出流量top 100 url相关信息到urltraffic.txt文件中
            urtr = open('urltraffic.txt', 'w+')
            urtr.write("Filename".ljust(75)+u"TotalTraffic(MB)"+"\n")
            urtr.write("-----------------------------------------------------------------------------------------\n")
    
            for i in range(number):
                try:
                    urtr.write(SortUrlTraffic[i][0].ljust(80)+str(SortUrlTraffic[i][1]/1024/1024)+"\n")
                except:
                    continue
            urtr.write("-----------------------------------------------------------------------------------------\n")
            urtr.close()
    
    
    
    #时间的正则和格式,一般不需要更改
    re_time='\d{2}\/\w{3}\/\d{4}:\d{2}:\d{2}:\d{2}'
    str_time='%d/%b/%Y:%H:%M:%S'
    
    #定义分析的时间段
    period=("16/Nov/2000:16:00:00", "16/Nov/2015:17:00:00")
    
    #定义输出top number
    number = 100
    
    if __name__ == '__main__':
        if len(sys.argv) < 2:
            print 'no logfile specified!'
            print "Usage: python logParser.py filename"
            time.sleep(2)
            sys.exit()
        else:
            file = sys.argv[1]
        lp = ParseLog(file, re_time, str_time, period)
        print 
        print "Start to parse the " + file + " struggling! please wait patiently!"
        print
        print "******************************************************************"
        time.sleep(2) 
        lp.TopIp(number)
        lp.TopUrl(number)

    用法:python  logparser.py  要分析的log文件名

    ------ 若讷于言,则敏于行!------
  • 相关阅读:
    最全负载均衡:算法、实现、亿级负载解决方案详解
    淘宝分布式架构演变案例详解
    分布式一致性协议实现原理
    ReentrantReadWriteLock的使用
    线程之单例
    线程的优先级
    java线程的6种状态
    mybatis <foreach> 标签
    java多线程 上下文切换
    docker的复制和挂载
  • 原文地址:https://www.cnblogs.com/searchappiness/p/2788299.html
Copyright © 2011-2022 走看看