随笔:Python批量合并csv文件的数据
os板块不是很会用,哈哈
import glob
import os
import pandas
import csv
#需要合并的文件路径
inputfile = str(os.path.dirname(r'D: estcloudAI est_data1 est_dataclassifydata'))+r'classifydata*.csv'
#合并后生成的文件保存的位置
outputfile = str(os.path.dirname(r'D: estcloudAI est_data1 est_data\classifydata
esult'))+r'
esult
esult.csv'
csv_list = glob.glob(inputfile)
filepath = csv_list[0]
df = pandas.read_csv(filepath)
df.to_csv(outputfile,index=False,encoding='utf-8')
for i in range(1,len(csv_list)):
filepath = csv_list[i]
d = pandas.read_csv(filepath)
d.to_csv(outputfile,index=False,header=False,mode='a+',encoding='utf-8')
print('***文件生成完成***')
#打印生成文件的行数
csv_reader = csv.reader(open(outputfile,encoding='utf-8'))
l = len(list(csv_reader))
print(l)
番外:
如果需要控制行数的话需要加入循环但是这样会比较慢,慢的原因是每次循环都会计算一次行数,当行数比较大的时候就会非常慢,我是放弃了
import glob
import os
import pandas
import csv
inputfile = str(os.path.dirname(r'D: estcloudAI est_data1 est_dataclassifydata'))+r'classifydata*.csv'
outputfile = str(os.path.dirname(r'D: estcloudAI est_data1 est_data\classifydata
esult'))+r'
esult
esult.csv'
csv_list = glob.glob(inputfile)
filepath = csv_list[0]
df = pandas.read_csv(filepath)
df.to_csv(outputfile,index=False,encoding='utf-8')
for i in range(1,len(csv_list)):
filepath = csv_list[i]
d = pandas.read_csv(filepath)
d.to_csv(outputfile,index=False,header=False,mode='a+',encoding='utf-8')
csv_reader = csv.reader(open(outputfile, encoding='utf-8'))
l = len(list(csv_reader))
if l>=26000000:
break
print('***文件生成完成***')
csv_reader = csv.reader(open(outputfile,encoding='utf-8'))
l = len(list(csv_reader))
print(l)