zoukankan      html  css  js  c++  java
  • apscheduler 设置python脚本定时任务

    理论概念:https://zhuanlan.zhihu.com/p/95563033

     BlockingScheduler与BackgroundScheduler区别 :https://www.jianshu.com/p/b829a920bd33

    apscheduler 设置循环任务:每隔5S 提交一次监控数据

    #!/usr/bin/env python
    # -*- coding:utf-8 -*- 
    import time,os,sys #定时提交,循环睡眠用
    
    import atexit
    
    # 需要先安装导入包
    # pip install requests
    # pip install requests-aws4auth
    import requests
    from requests_aws4auth import AWS4Auth
    import logging,datetime
    BASE_DIR = os.path.dirname(__file__)
    print(BASE_DIR)
    sys.path.append(BASE_DIR)
    print(sys.path)
    from monitor.monitor import Monitor
    import sys ,json
    # import Queue
    import threading
    import time
    
    
    logger = logging.getLogger("mylogger")
    logger.setLevel("DEBUG")
    ch = logging.StreamHandler()
    ch.setLevel("DEBUG")
    logger.addHandler(ch)
    
    logger.debug("推送监控数据-----")
    region = 'cn-beijing-6'
    service = 'monitor'
    host = 'http://%s.%s.api.ksyun.com' % (service, region)
    headers = {
        'Accept': 'Application/json'
    }
    # 自己的ak/sk
    ak = "XXXXXXXXXXXXXXX"
    sk = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
    # debug 输出调试信息
    logger.debug("region:" + region + ",service:" + service + ",host:" + host + ",ak:" + ak + ",sk:" + sk)
    credentials = {
        'ak': ak,
        'sk': sk
    }
    def auth():
        return AWS4Auth(credentials['ak'], credentials['sk'], region, service)
    
    query = {
        'Action': 'PutMetricData',
        'Version': '2017-07-01'
    }
    
    
    def getUtcTimeStampStr():
        utctime = time.gmtime()
        utc_str = time.strftime("%Y-%m-%dT%H:%M:%SZ", utctime)
        # utc_str = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
        return utc_str
    
    def get_data():
        m = Monitor()
        cpu_info, mem_info, swap_info = m.cpu(), m.mem(), m.swap()
        utc_time = getUtcTimeStampStr()
        json_data = [
            {
                "namespace": "ZMD_Host_Monitor",
                "metricName": "cpu_percent",
                "timestamp": utc_time,
                "value": cpu_info.get('percent_avg'),
                "dimensions": [
                    "product=Zmd_Host_Monitor",
                    "apiname=zmd_cpu_test"
                ],
                "unit": "Percent"
            },
            {
                "namespace": "ZMD_Host_Monitor",
                "metricName": "mem_percent",
                "timestamp": utc_time,
                "value": mem_info.get('percent'),
                "dimensions": [
                    "product=Zmd_Mem_Monitor",
                    "apiname=zmd_mem_test"
                ],
                "unit": "Percent"
            },
            {
                "namespace": "ZMD_Host_Monitor",
                "metricName": 'mem_total',
                "timestamp": utc_time,
                "value": mem_info.get('total'),
                "dimensions": [
                    "product=Zmd_Mem_Monitor",
                    "apiname=zmd_mem_test"
                ],
                "unit": "Gigabytes"
            },
            {
                "namespace": "ZMD_Host_Monitor",
                "metricName": 'mem_used',
                "timestamp": utc_time,
                "value": mem_info.get('used'),
                "dimensions": [
                    "product=Zmd_Mem_Monitor",
                    "apiname=zmd_mem_test"
                ],
                "unit": "Gigabytes"
            },
            {
                "namespace": "ZMD_Host_Monitor",
                "metricName": "mem_free",
                "timestamp": utc_time,
                "value": mem_info.get('free'),
                "dimensions": [
                    "product=Zmd_Mem_Monitor",
                    "apiname=zmd_mem_test"
                ],
                "unit": "Gigabytes"
            }
        ]
        logger.debug(json_data)
        return json_data
    
    
    
    
    
    #启动入口
    if __name__ == "__main__":
        from apscheduler.schedulers.blocking import BlockingScheduler
        # from apscheduler.jobstores.mongodb import MongoDBJobStore
        # from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
        from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
        from apscheduler.jobstores.memory import MemoryJobStore
        from apscheduler.jobstores.redis import RedisJobStore
        import redis
        import pickle
    
    
        def my_job(id='my_job'):
            response = requests.post(host, params=query, headers=headers, auth=auth(), json=get_data())
            logger.debug(response.text)
            print(id, '-->', datetime.datetime.now())
    
        connect_args = {
            'host': '192.168.1.8',
            'port': 6379,
            'password': ''
        }
        jobstores = {
            'default': RedisJobStore(db=13,
                                     jobs_key='apscheduler.jobs',
                                     run_times_key='apscheduler.run_times',
                                     pickle_protocol=pickle.HIGHEST_PROTOCOL,
                                     **connect_args)
        }
        # executors = {
        #     'default': ThreadPoolExecutor(10),
        #     'processpool': ProcessPoolExecutor(5)
        # }
        # job_defaults = {
        #     'coalesce': False,
        #     'max_instances': 3
        # }
        # scheduler = BlockingScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults,
        #                               timezone="Asia/Shanghai")
    ########
        # jobstores = {
        #     'default': MemoryJobStore()  # 使用内存作为作业存储
        # }
        executors = {
            'default': ThreadPoolExecutor(20),
            'processpool': ProcessPoolExecutor(10)
        }
        job_defaults = {
            'coalesce': True,  # 重启后作业如果被堆叠,只执行一次
            'max_instances': 3
        }
        scheduler = BlockingScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults)
        scheduler.add_job(my_job, args=['job_interval', ], id='job_interval', trigger='interval', seconds=5,
                          replace_existing=True)
        # scheduler.add_job(my_job, args=['job_cron', ], id='job_cron', trigger='cron', month='4-8,11-12', hour='20-23', second='*/10', 
        #                   end_date='2020-6-16')
        # scheduler.add_job(my_job, args=['job_once_now', ], id='job_once_now')
        # scheduler.add_job(my_job, args=['job_date_once', ], id='job_date_once', trigger='date',
        #                   run_date='2020-6-15 08:34:00')
        try:
            scheduler.start()
        except SystemExit:
            print('exit')
            exit()
  • 相关阅读:
    PetShop4.0学习笔记[01]:订单处理[01]——多线程程序结构
    PetShop4.0学习笔记——使用命名空间
    PetShop 4.0学习笔记:消息队列MSMQ
    petshop4 MSDTC 不可用解决
    经典工具软件备份
    ASP.NET知识点(三):购物车与收藏蓝的实现[Profile]
    PetShop 4.0知识点:加密和解密Web.config文件的配置节
    PetShop 4.0知识点:base 关键字用于从派生类中访问基类的成员
    从Word,Excel中提取Flash
    线性结构
  • 原文地址:https://www.cnblogs.com/zhangmingda/p/14001062.html
Copyright © 2011-2022 走看看