zoukankan      html  css  js  c++  java
  • day17.json模块、时间模块、zipfile模块、tarfile模块

    一、json模块

    """
    所有的编程语言都能够识别的数据格式叫做json,是字符串
    能够通过json序列化成字符串与如下类型: (int float bool str list tuple dict None)
    """
    import json

    1、json用法

    #(1) dumps和loads是一对,可以序列化成字符串 
    dic = {"name":"高云峰","age":81,"classroom":"python31","family":["老爸","老妈","老伴"]}
    # ensure_ascii=False 显示中文 sort_keys=True 对字典的键进行排序
    res = json.dumps(dic,ensure_ascii=False,sort_keys=True)
    print(res , type(res))
    
    # loads反序列化原来的数据类型
    dic = json.loads(res)
    print(dic,type(dic))
    
    #(2) dump 和 load 是一对,针对于文件,把数据序列化后存储文件
    dic = {"name":"高云峰","age":81,"classroom":"python31","family":["老爸","老妈","老伴"]}
    with open("ceshi0728.json",mode="w",encoding="utf-8") as fp:
        json.dump(dic,fp,ensure_ascii=False)
    
    with open("ceshi0728.json",mode="r",encoding="utf-8") as fp:
        dic = json.load(fp)
        print(dic, type(dic))

    2.json 和pickle 之间的区别

    """json 可以连续dump , 但是不能连续load"""
    dic1 = {"a":1,"b":2}
    dic2 = {"c":3,"d":4}
    with open("0728_2.json",mode="w",encoding="utf-8") as fp:
        json.dump(dic1,fp)
        fp.write("
    ")
        json.dump(dic2,fp)
        fp.write("
    ")
    """load 在获取数据时,是一次性拿取所有内容"""
    # error
    """
    with open("0728_2.json",mode="r",encoding="utf-8") as fp:
        res = json.load(fp)
        print(res)
    """

    解决办法:

    with open("0728_2.json",mode="r",encoding="utf-8") as fp:
        for i in fp:
            dic = json.loads(i)
            print(dic,type(dic))
    """pickle 可以连续dump 也可以连续load 因为pickle在存储数据的时候会在末尾加上结束符"""
    import pickle
    dic1 = {"a":1,"b":2}
    dic2 = {"c":3,"d":4}
    with open("0728_3.pkl",mode="wb") as fp:
        pickle.dump(dic1,fp)
        pickle.dump(dic2,fp)
    """
    with open("0728_3.pkl",mode="rb") as fp:
        dic1 = pickle.load(fp)
        print(dic1 , type(dic1))
        dic2 = pickle.load(fp)
        print(dic2 , type(dic2))
    """
    # try ... except ... 异常处理(用来抑制错误的)
    """
    try :
        可能报错的代码
    except:
        如果报错执行except这个代码块;
    """
    # 获取文件当中所有的数据
    try:
        with open("0728_3.pkl",mode="rb") as fp:
            while True:
                res = pickle.load(fp)
                print(res)
    except:
        pass
    """
    # json 和 pickle 两个模块的区别:
    (1)json序列化之后的数据类型是str,所有编程语言都识别,
       但数据类型仅限于(int float bool)(str list tuple dict None)
       json不能连续load,只能一次性拿出所有数据
    (2)pickle序列化之后的数据类型是bytes,
       所有数据类型都可转化,但仅限于python之间的存储传输.
       pickle可以连续load,多套数据放到同一个文件中
    
    json使用的广泛性比pickle更强.
    json   用在不同编程语言的数据交流中
    pickle 用于数据的存储
    """

    二、time模块 

    时间戳(Timestamp):时间戳表示从1970年1月1日00:00:00开始按秒计算的偏移量
    
    Format String:格式化的事件字符串
    
    时间元组(结构化时间)(struct_time): struct_time共有9个元素(年,月,日,时,分,秒,一年中第几周,一年中第几天,夏令时)

    %a %b %d %H:%M:%S %Y :时间字符串

    1、time 获取本地时间戳 (*)

    res = time.time()
    print(res)
    
    # 1596291325.1793654

    2、localtime 获取本地时间元组(*)

    参数是时间戳,默认当前

    res = time.localtime()
    print(res)
    """
    time.struct_time(
        tm_year=2020, 
        tm_mon=7, 
        tm_mday=28, 
        tm_hour=10, 
        tm_min=45, 
        tm_sec=9, 
        tm_wday=1, 
        tm_yday=210, 
        tm_isdst=0
    )
    """

    # 指定时间戳,返回时间元组

    ttp = 1595904161
    res = time.localtime(ttp)
    print(res)
    
    """
    time.struct_time(
        tm_year=2020, 
        tm_mon=7, 
        tm_mday=28,
        tm_hour=10, 
        tm_min=42, 
        tm_sec=41, 
        tm_wday=1,
        tm_yday=210,
        tm_isdst=0
    )
    """

    3、mktime() 通过时间元组获取时间戳 (*)

    参数是时间元组

    ttp = (2020,7,28,10,48,30,0,0,0)
    res = time.mktime(ttp)
    print(res) 
    
    # 1595904510

    4、ctime()  获取本地时间字符串 (*)

    参数是时间戳,默认当前

    res = time.ctime() # 默认以当前时间戳获取时间字符串
    print(res)
    
    # Sat Aug  1 22:24:00 2020

    指定时间戳

    res = time.ctime(1595904161)
    print(res)
    
    # Tue Jul 28 10:42:41 2020

    5、asctime()  通过时间元组获取时间字符串

    参数是时间元组(不能自动识别周几.)

    ttp = (2020,7,28,10,54,30,6,0,0) # 不能自动识别周几.
    res = time.asctime(ttp)
    print(res)
    
    # Sun Jul 28 10:54:30 2020

    改造办法

    ttp = (2020,7,28,10,54,30,0,0,0)
    res = time.mktime(ttp)
    str_time = time.ctime(res)
    print(str_time)
    
    # Tue Jul 28 10:54:30 2020

    6、sleep 程序睡眠等待

    time.sleep(2)
    print("我睡醒了")

    7、strftime 把时间元组 -> 时间字符串

    默认按照当前时间做格式化

    res = time.strftime("%Y-%m-%d %H:%M:%S")
    print(res)
    
    # 2020-08-01 22:31:40

    指定时间元组,对时间字符串格式化

    """strftime如果在windows当中出现中文,直接报错,不能解析,linux 可以支持"""
    ttp = (2000,10,1,12,12,12,0,0,0)
    res = time.strftime("%Y-%m-%d %H:%M:%S" , ttp)
    print(res)
    
    # 2000-10-01 12:12:12

    8、strptime() 将时间字符串通过指定格式提取到时间元组中

    """要求字符串不能乱加符号,必须严丝合缝.""" 
    strvar = "2020年7月28号11时12分13秒是著名歌星庾澄庆的生日"
    ttp = time.strptime(strvar,"%Y年%m月%d号%H时%M分%S秒是著名歌星庾澄庆的生日")
    print(ttp)
    
    """
    time.struct_time(
        tm_year=2020, 
        tm_mon=7, 
        tm_mday=28, 
        tm_hour=11, 
        tm_min=12, 
        tm_sec=13, 
        tm_wday=1, 
        tm_yday=210, 
        tm_isdst=-1)
    """

    9、perf_counter() 用于计算程序运行的时间

    # 记录开始时间
    startime = time.perf_counter()
    # startime = time.time()
    
    for i in range(100000000):
        pass
    
    # 记录结束时间
    endtime = time.perf_counter()
    # endtime = time.time()
    print(endtime - startime)
    
    # 5.847825

    三、进度条效果

    # (1) 定义进度条的样式
    print("[%-50s]" % ("#"))
    print("[%-50s]" % ("###############"))
    print("[%-50s]" % ("#########################"))
    
    # (2) 让进度条动起来
    """
    strvar = ""
    for  i in range(50):
        strvar += "#"
        time.sleep(0.1)
        print("
    [%-50s]" % (strvar) , end="" )
    """
    
    # (3) 根据文件的大小,调整进度条的位置
    print("<========>")
    # 假设文件的大小是 1024000
    def progress(percent):    
        # 如果百分比超过了1,说明数据已经接受完毕;
        if percent > 1:
            percent = 1
        
        # 打印对应的#号效果
        strvar = "#" * int(percent * 50) 
        # %% => %
        print("
    [%-50s] %d%%" % (strvar,int(percent * 100)) , end="" )
    
    # 初始化接受的字节数
    recv_size = 0
    # 文件接受总大小 
    total_size = 1024000
    while recv_size < total_size:
        recv_size += 1024
        
        # 模拟延迟
        time.sleep(0.01)
        # 计算百分比
        percent = recv_size/total_size #0.001
        # 调用进度条
        progress(percent)
    
    
    # [##################################################] 100%

    四、zipfile 压缩模块

    # 1.压缩文件
    # (1) 创建压缩包
    zf = zipfile.ZipFile("1424.zip","w",zipfile.ZIP_DEFLATED)
    # (2) 把文件写入到压缩包中
    # write(路径,别名)
    zf.write("/bin/cp","cp")
    zf.write("/bin/chmod","chmod")
    # 可以临时创建一个文件夹tmp在压缩包中
    zf.write("/bin/df","/tmp/df")
    # (3) 关闭压缩包
    zf.close()
    
    
    # 2.解压文件
    # (1)打开压缩包
    zf = zipfile.ZipFile("1424.zip","r")
    # (2)解压文件
    # 解压单个文件
    zf.extract("cp","ceshi1424_2")
    # 解压所有文件
    # zf.extractall("ceshi1424")
    # (3) 关闭压缩包
    zf.close()
    
    # 3.追加文件 (支持with语法)
    with zipfile.ZipFile("1424.zip","a",zipfile.ZIP_DEFLATED) as zf:
        zf.write("/bin/dir","dir")
    
    # 4.查看压缩包
    with zipfile.ZipFile("1424.zip","r",zipfile.ZIP_DEFLATED) as zf:
        lst = zf.namelist()
        print(lst)

    五、tarfile 压缩模块  .tar  |  .tar.gz  |   .tar.bz2

    import tarfile
    """最小的压缩包,后缀格式为bz2"""

    1、创建tar包

    # 单纯的tar包
    tf = tarfile.open("ceshi0729_1.tar","w",encoding="utf-8")
    tf.add("/bin/echo","echo")
    tf.add("/bin/ed","ed")
    tf.add("/bin/fuser","/tmp/fuser")
    tf.close()
    # .tar.gz
    tf = tarfile.open("ceshi0729_2.tar.gz","w:gz",encoding="utf-8")
    tf.add("/bin/echo","echo")
    tf.add("/bin/ed","ed")
    tf.add("/bin/fuser","/tmp/fuser")
    tf.close()
    # .tar.bz2
    tf = tarfile.open("ceshi0729_3.tar.bz2","w:bz2",encoding="utf-8")
    tf.add("/bin/echo","echo")
    tf.add("/bin/ed","ed")
    tf.add("/bin/fuser","/tmp/fuser")
    tf.close()

    2、对压缩包进行解压

    tf = tarfile.open("ceshi0729_3.tar.bz2","r",encoding="utf-8")
    # 解压单个
    tf.extract("echo","ceshi0729_4")
    # 解压所有
    # tf.extractall("ceshi0729_3")
    tf.close()

    3、追加文件 (支持with语法) 

    # [只能为没有压缩过的tar包进行追加.]
    with tarfile.open("cceshi0729_1.tar","a",encoding="utf-8") as tf:
        tf.add("/bin/cp","cp")

    4、查看压缩包中的文件

    with tarfile.open("ceshi0729_2.tar.gz","r",encoding="utf-8") as tf:
        lst = tf.getnames()
        print(lst)

    5、如何处理tarfile不能在已经压缩过的包中追加内容的问题

    # ceshi0729_3.tar.bz2
    import os
    path = os.getcwd()
    # 找到要解压的包的路径
    pathvar1 = os.path.join(path,"ceshi0729_3.tar.bz2")
    print(pathvar1)
    # 解压到哪里去
    pathvar2 =  os.path.join(path,"ceshi0729_3")
    print(pathvar2)
    
    # (1) 先对已经压缩过的包进行解压
    with tarfile.open(pathvar1,"r",encoding="utf-8") as tf:
        tf.extractall(pathvar2)
    
    # (2) 往这个解压的文件夹中添加新的文件
    mybin = "cp -a /bin/fgrep " + pathvar2
    # print(mybin) # cp -a /bin/fgrep /mnt/hgfs/python31_gx/day19/ceshi0729_3
    os.system(mybin)
    
    
    # (3) 对这个文件进行过滤筛选,重新打包压缩 (不要echo)
    lst = os.listdir(pathvar2)
    print(lst)
    with tarfile.open(pathvar1,"w:bz2",encoding="utf-8") as tf:
        for i in lst:
            if i != "echo":
                # 拼接完整路径
                pathnew = os.path.join(pathvar2,i)
                # add(路径,别名)
                tf.add(pathnew,i)
  • 相关阅读:
    Palindrome Partitioning
    Minimum Path Sum
    Maximum Depth of Binary Tree
    Minimum Depth of Binary Tree
    Unique Binary Search Trees II
    Unique Binary Search Trees
    Merge Intervals
    Merge Sorted Array
    Unique Paths II
    C++ Primer Plus 笔记第九章
  • 原文地址:https://www.cnblogs.com/kongxiangqun/p/13394736.html
Copyright © 2011-2022 走看看