zoukankan      html  css  js  c++  java
  • 潮位离散

    import eofs
    from eofs.standard import Eof
    import pandas as pd
    import numpy as np
    import glob
    import datetime
    from matplotlib import pyplot as plt
    import copy
    import re
    import time
    from pylab import *
    import matplotlib.dates as mdate
    import matplotlib.patches as patches
    import matplotlib.ticker as ticker
    import xarray as ax
    import copy
    import geopandas as gpd
    from pykrige.ok import OrdinaryKriging
    from pykrige.kriging_tools import write_asc_grid
    import pykrige.kriging_tools as kt
    from matplotlib.colors import LinearSegmentedColormap
    from matplotlib.patches import Path, PathPatch
    from shapely.geometry import LineString
    from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
    from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter

    time_parse1 = lambda date: datetime.datetime.strptime(date, '%Y/%m/%d %H:%M')

    df = pd.read_csv('./water_level_baozhen_1.csv',parse_dates=['date'],date_parser=time_parse1,encoding='utf-8')


    # time_p = str(df.iat[-1,0]).split()[0] + ' ' + '0' + ':' + '00' + ':' + '00'
    # print(time_p)

    time_list_1 = []
    time_list_2 = []
    list_1 = []
    for i in range(24):
    time_list_1.append('start_time' + str(i))
    time_list_2.append('start_time' + str(i))
    list_1.append('df' + str(i))
    time_list_1[0] = datetime.datetime.strptime("0:00:00", "%H:%M:%S").time()
    time_list_2[0] = datetime.datetime.strptime("0:00:09", "%H:%M:%S").time()
    for i in range(1, 24):
    time_list_1[i] = (datetime.datetime.strptime("00:00:00", "%H:%M:%S") + datetime.timedelta(
    hours=int(i))).time()
    time_list_2[i] = (datetime.datetime.strptime("00:00:09", "%H:%M:%S") + datetime.timedelta(
    hours=int(i))).time()
    # print(time_list_1)
    # print(time_list_2)

    list_1[0] = df[
    (df['date'].dt.time >= time_list_1[0]) & (df['date'].dt.time <= time_list_2[0])]
    t = list_1[0]

    # print(list_1[0])
    for x1 in range(1, 24):
    list_1[x1] = df[
    (df['date'].dt.time >= time_list_1[x1]) & (df['date'].dt.time <= time_list_2[x1])]

    for i in range(1,24):
    t = t.append(list_1[i])


    t1 = copy.deepcopy(t)
    t1 = t1.reset_index(drop=True)
    t1 =t1.sort_values(by = 'date')
    t2 = t1.reset_index(drop=True)
    t2.to_csv('./baozhen_hour.csv')




  • 相关阅读:
    解决request.getRemoteAddr()获取的值为0:0:0:0:0:0:0:1这个小问题
    百度编辑器
    java程序中输出console的日志到文本
    收集常用的.net开源项目
    Html Agility Pack基础类介绍及运用
    HTML Agility Pack 搭配 ScrapySharp,彻底解除Html解析的痛苦
    HTML WEB 和HTML Agility Pack结合
    HtmlWeb类
    简单方便统一封装的傻瓜式GET/POST库AliasNet正式公布~开源喽~
    HtmlAgilityPack.dll的使用 获取HTMLid
  • 原文地址:https://www.cnblogs.com/chenyun-delft3d/p/14688183.html
Copyright © 2011-2022 走看看