1.定义
序列化 : 把不能够直接存储在文件中的数据变得可存储,这个过程就是序列化
反序列化: 把文件的数据内容拿出来,恢复成原来的数据类型,这个过程就是反序列化.
2.实际案例的使用dumps和loads
import pickle
# ### (1)dumps 和 loads
#dumps 把任意对象序列化成一个bytes
lst = [1,2,3]
res = pickle.dumps(lst)
print(res)
#loads 把任意bytes反序列化成原来数据
lst = pickle.loads(res)
print(lst,type(lst))
3.实际案例dump和load
# ### (2)dump 和 load
#dump 把对象序列化后写入到file-like Object(即文件对象) (如果配合文件操作 推荐使用dump和load)
with open("ceshi2.txt",mode="wb") as fp:
setvar = {"a",1,"b"}
# dump 相当于先dumps 变成字节流,再用write写入字节流;将两部操作合并成一步是dump
# dump(要序列化的数据,文件对象)
pickle.dump(setvar,fp)
#load 把file-like Object(即文件对象)中的内容拿出来,反序列化成原来数据
with open("ceshi2.txt",mode="rb") as fp:
# 首先把文件内容读出来,然后进行反序列化,将两部操作合并成一步
res = pickle.load(fp)
print(res,type(res))
4.dumps和dump区别对比
# dumps 和 loads 配合文件操作使用
with open("ceshi3.txt",mode="wb") as fp:
dic = {"xb":"比较喜欢晒女友","zh":"喜欢睡觉"}
res = pickle.dumps(dic)
fp.write(res)
with open("ceshi3.txt",mode="rb") as fp:
res = fp.read()
dic = pickle.loads(res)
print(dic,type(dic))
5,连续dump和load的操作
# (2)pickle
print("<===>")
import pickle
"""
可以连续dump ,也可以连续load
"""
dic1 = {"a":1,"b":2}
dic2 = {"c":3,"d":4}
with open("ceshi3.pkl",mode="wb") as fp:
pickle.dump(dic1,fp)
pickle.dump(dic2,fp)
# 方法一
with open("ceshi3.pkl",mode="rb") as fp:
dic1 = pickle.load(fp)
dic2 = pickle.load(fp)
print(dic1,type(dic1))
print(dic2,type(dic2))
# 方法二
try:
with open("ceshi3.pkl",mode="rb") as fp:
while True:
dic = pickle.load(fp)
print(dic)
except:
pass
6,json数据连续dump和load的操作
# (1)json
"""
可以连续dump , 不可以连续load
load 特征是一次性把所有的内容都取出来变成一个完整的字典;
所以如果是多个字典不能够转化;
# 解决方式:
loads 可以 配合遍历文件对象,一行一行读取内容,在通过loads来转换成原来的数据类型
"""
dic1 = {"a":1,"b":2}
dic2 = {"c":3,"d":4}
with open("ceshi2.json",mode="w",encoding="utf-8") as fp:
json.dump(dic1,fp)
fp.write("
")
json.dump(dic2,fp)
fp.write("
")
# 文件对象是迭代器,可以遍历;
from collections import Iterator,Iterable
res = isinstance(fp,Iterator)
print(res)
# 用loads 来解决load存在的弊端;
with open("ceshi2.json",mode="r",encoding="utf-8") as fp:
for i in fp:
# print(i,type(i))
dic = json.loads(i)
print(dic,type(dic))