oafuncs 0.0.97.15__py3-none-any.whl → 0.0.97.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oafuncs/_script/cprogressbar.py +42 -20
- oafuncs/_script/netcdf_modify.py +10 -2
- oafuncs/oa_cmap.py +211 -95
- oafuncs/oa_data.py +157 -218
- oafuncs/oa_date.py +71 -37
- oafuncs/oa_down/hycom_3hourly.py +209 -320
- oafuncs/oa_down/hycom_3hourly_20250407.py +1295 -0
- oafuncs/oa_down/idm.py +4 -4
- oafuncs/oa_draw.py +224 -124
- oafuncs/oa_file.py +279 -333
- oafuncs/oa_help.py +10 -0
- oafuncs/oa_nc.py +197 -164
- oafuncs/oa_python.py +51 -25
- oafuncs/oa_tool.py +84 -48
- {oafuncs-0.0.97.15.dist-info → oafuncs-0.0.97.17.dist-info}/METADATA +1 -1
- {oafuncs-0.0.97.15.dist-info → oafuncs-0.0.97.17.dist-info}/RECORD +20 -19
- /oafuncs/_script/{replace_file_concent.py → replace_file_content.py} +0 -0
- {oafuncs-0.0.97.15.dist-info → oafuncs-0.0.97.17.dist-info}/WHEEL +0 -0
- {oafuncs-0.0.97.15.dist-info → oafuncs-0.0.97.17.dist-info}/licenses/LICENSE.txt +0 -0
- {oafuncs-0.0.97.15.dist-info → oafuncs-0.0.97.17.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1295 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# coding=utf-8
|
3
|
+
"""
|
4
|
+
Author: Liu Kun && 16031215@qq.com
|
5
|
+
Date: 2025-04-07 12:28:13
|
6
|
+
LastEditors: Liu Kun && 16031215@qq.com
|
7
|
+
LastEditTime: 2025-04-07 12:28:13
|
8
|
+
FilePath: \\Python\\My_Funcs\\OAFuncs\\oafuncs\\oa_down\\hycom_3hourly copy.py
|
9
|
+
Description:
|
10
|
+
EditPlatform: vscode
|
11
|
+
ComputerInfo: XPS 15 9510
|
12
|
+
SystemInfo: Windows 11
|
13
|
+
Python Version: 3.12
|
14
|
+
"""
|
15
|
+
|
16
|
+
|
17
|
+
|
18
|
+
import datetime
|
19
|
+
import os
|
20
|
+
import random
|
21
|
+
import re
|
22
|
+
import time
|
23
|
+
import warnings
|
24
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
25
|
+
from pathlib import Path
|
26
|
+
from threading import Lock
|
27
|
+
|
28
|
+
import matplotlib.pyplot as plt
|
29
|
+
import netCDF4 as nc
|
30
|
+
import numpy as np
|
31
|
+
import pandas as pd
|
32
|
+
import requests
|
33
|
+
import xarray as xr
|
34
|
+
from rich import print
|
35
|
+
from rich.progress import Progress
|
36
|
+
|
37
|
+
from oafuncs.oa_down.idm import downloader as idm_downloader
|
38
|
+
from oafuncs.oa_down.user_agent import get_ua
|
39
|
+
from oafuncs.oa_file import file_size, mean_size
|
40
|
+
from oafuncs.oa_nc import check as check_nc
|
41
|
+
from oafuncs.oa_nc import modify as modify_nc
|
42
|
+
|
43
|
+
warnings.filterwarnings("ignore", category=RuntimeWarning, message="Engine '.*' loading failed:.*")
|
44
|
+
|
45
|
+
__all__ = ["draw_time_range", "download"]
|
46
|
+
|
47
|
+
|
48
|
+
def _get_initial_data():
|
49
|
+
global variable_info, data_info, var_group, single_var_group
|
50
|
+
# ----------------------------------------------
|
51
|
+
# variable
|
52
|
+
variable_info = {
|
53
|
+
"u": {"var_name": "water_u", "standard_name": "eastward_sea_water_velocity"},
|
54
|
+
"v": {"var_name": "water_v", "standard_name": "northward_sea_water_velocity"},
|
55
|
+
"temp": {"var_name": "water_temp", "standard_name": "sea_water_potential_temperature"},
|
56
|
+
"salt": {"var_name": "salinity", "standard_name": "sea_water_salinity"},
|
57
|
+
"ssh": {"var_name": "surf_el", "standard_name": "sea_surface_elevation"},
|
58
|
+
"u_b": {"var_name": "water_u_bottom", "standard_name": "eastward_sea_water_velocity_at_sea_floor"},
|
59
|
+
"v_b": {"var_name": "water_v_bottom", "standard_name": "northward_sea_water_velocity_at_sea_floor"},
|
60
|
+
"temp_b": {"var_name": "water_temp_bottom", "standard_name": "sea_water_potential_temperature_at_sea_floor"},
|
61
|
+
"salt_b": {"var_name": "salinity_bottom", "standard_name": "sea_water_salinity_at_sea_floor"},
|
62
|
+
}
|
63
|
+
# ----------------------------------------------
|
64
|
+
# time resolution
|
65
|
+
data_info = {"yearly": {}, "monthly": {}, "daily": {}, "hourly": {}}
|
66
|
+
|
67
|
+
# hourly data
|
68
|
+
# dataset: GLBv0.08, GLBu0.08, GLBy0.08
|
69
|
+
data_info["hourly"]["dataset"] = {"GLBv0.08": {}, "GLBu0.08": {}, "GLBy0.08": {}, "ESPC_D": {}}
|
70
|
+
|
71
|
+
# version
|
72
|
+
# version of GLBv0.08: 53.X, 56.3, 57.2, 92.8, 57.7, 92.9, 93.0
|
73
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"] = {"53.X": {}, "56.3": {}, "57.2": {}, "92.8": {}, "57.7": {}, "92.9": {}, "93.0": {}}
|
74
|
+
# version of GLBu0.08: 93.0
|
75
|
+
data_info["hourly"]["dataset"]["GLBu0.08"]["version"] = {"93.0": {}}
|
76
|
+
# version of GLBy0.08: 93.0
|
77
|
+
data_info["hourly"]["dataset"]["GLBy0.08"]["version"] = {"93.0": {}}
|
78
|
+
# version of ESPC_D: V02
|
79
|
+
data_info["hourly"]["dataset"]["ESPC_D"]["version"] = {"V02": {}}
|
80
|
+
|
81
|
+
# info details
|
82
|
+
# time range
|
83
|
+
# GLBv0.08
|
84
|
+
# 在网页上提交超过范围的时间,会返回该数据集实际时间范围,从而纠正下面的时间范围
|
85
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["53.X"]["time_range"] = {"time_start": "1994010112", "time_end": "2015123109"}
|
86
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["56.3"]["time_range"] = {"time_start": "2014070112", "time_end": "2016093009"}
|
87
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["57.2"]["time_range"] = {"time_start": "2016050112", "time_end": "2017020109"}
|
88
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["92.8"]["time_range"] = {"time_start": "2017020112", "time_end": "2017060109"}
|
89
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["57.7"]["time_range"] = {"time_start": "2017060112", "time_end": "2017100109"}
|
90
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["92.9"]["time_range"] = {"time_start": "2017100112", "time_end": "2018032009"}
|
91
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["93.0"]["time_range"] = {"time_start": "2018010112", "time_end": "2020021909"}
|
92
|
+
# GLBu0.08
|
93
|
+
data_info["hourly"]["dataset"]["GLBu0.08"]["version"]["93.0"]["time_range"] = {"time_start": "2018091912", "time_end": "2018120909"}
|
94
|
+
# GLBy0.08
|
95
|
+
data_info["hourly"]["dataset"]["GLBy0.08"]["version"]["93.0"]["time_range"] = {"time_start": "2018120412", "time_end": "2024090509"}
|
96
|
+
# ESPC-D
|
97
|
+
data_info["hourly"]["dataset"]["ESPC_D"]["version"]["V02"]["time_range"] = {"time_start": "2024081012", "time_end": "2030010100"}
|
98
|
+
|
99
|
+
# classification method
|
100
|
+
# year_different: the data of different years is stored in different files
|
101
|
+
# same_path: the data of different years is stored in the same file
|
102
|
+
# var_different: the data of different variables is stored in different files
|
103
|
+
# var_year_different: the data of different variables and years is stored in different files
|
104
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["53.X"]["classification"] = "year_different"
|
105
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["56.3"]["classification"] = "same_path"
|
106
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["57.2"]["classification"] = "same_path"
|
107
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["92.8"]["classification"] = "var_different"
|
108
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["57.7"]["classification"] = "same_path"
|
109
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["92.9"]["classification"] = "var_different"
|
110
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["93.0"]["classification"] = "var_different"
|
111
|
+
data_info["hourly"]["dataset"]["GLBu0.08"]["version"]["93.0"]["classification"] = "var_different"
|
112
|
+
data_info["hourly"]["dataset"]["GLBy0.08"]["version"]["93.0"]["classification"] = "var_year_different"
|
113
|
+
data_info["hourly"]["dataset"]["ESPC_D"]["version"]["V02"]["classification"] = "single_var_year_different"
|
114
|
+
|
115
|
+
# download info
|
116
|
+
# base url
|
117
|
+
# GLBv0.08 53.X
|
118
|
+
url_53x = {}
|
119
|
+
for y_53x in range(1994, 2016):
|
120
|
+
# r'https://ncss.hycom.org/thredds/ncss/GLBv0.08/expt_53.X/data/2013?'
|
121
|
+
url_53x[str(y_53x)] = rf"https://ncss.hycom.org/thredds/ncss/GLBv0.08/expt_53.X/data/{y_53x}?"
|
122
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["53.X"]["url"] = url_53x
|
123
|
+
# GLBv0.08 56.3
|
124
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["56.3"]["url"] = r"https://ncss.hycom.org/thredds/ncss/GLBv0.08/expt_56.3?"
|
125
|
+
# GLBv0.08 57.2
|
126
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["57.2"]["url"] = r"https://ncss.hycom.org/thredds/ncss/GLBv0.08/expt_57.2?"
|
127
|
+
# GLBv0.08 92.8
|
128
|
+
url_928 = {
|
129
|
+
"uv3z": r"https://ncss.hycom.org/thredds/ncss/GLBv0.08/expt_92.8/uv3z?",
|
130
|
+
"ts3z": r"https://ncss.hycom.org/thredds/ncss/GLBv0.08/expt_92.8/ts3z?",
|
131
|
+
"ssh": r"https://ncss.hycom.org/thredds/ncss/GLBv0.08/expt_92.8/ssh?",
|
132
|
+
}
|
133
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["92.8"]["url"] = url_928
|
134
|
+
# GLBv0.08 57.7
|
135
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["57.7"]["url"] = r"https://ncss.hycom.org/thredds/ncss/GLBv0.08/expt_57.7?"
|
136
|
+
# GLBv0.08 92.9
|
137
|
+
url_929 = {
|
138
|
+
"uv3z": r"https://ncss.hycom.org/thredds/ncss/GLBv0.08/expt_92.9/uv3z?",
|
139
|
+
"ts3z": r"https://ncss.hycom.org/thredds/ncss/GLBv0.08/expt_92.9/ts3z?",
|
140
|
+
"ssh": r"https://ncss.hycom.org/thredds/ncss/GLBv0.08/expt_92.9/ssh?",
|
141
|
+
}
|
142
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["92.9"]["url"] = url_929
|
143
|
+
# GLBv0.08 93.0
|
144
|
+
url_930_v = {
|
145
|
+
"uv3z": r"https://ncss.hycom.org/thredds/ncss/GLBv0.08/expt_93.0/uv3z?",
|
146
|
+
"ts3z": r"https://ncss.hycom.org/thredds/ncss/GLBv0.08/expt_93.0/ts3z?",
|
147
|
+
"ssh": r"https://ncss.hycom.org/thredds/ncss/GLBv0.08/expt_93.0/ssh?",
|
148
|
+
}
|
149
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["93.0"]["url"] = url_930_v
|
150
|
+
# GLBu0.08 93.0
|
151
|
+
url_930_u = {
|
152
|
+
"uv3z": r"https://ncss.hycom.org/thredds/ncss/GLBu0.08/expt_93.0/uv3z?",
|
153
|
+
"ts3z": r"https://ncss.hycom.org/thredds/ncss/GLBu0.08/expt_93.0/ts3z?",
|
154
|
+
"ssh": r"https://ncss.hycom.org/thredds/ncss/GLBu0.08/expt_93.0/ssh?",
|
155
|
+
}
|
156
|
+
data_info["hourly"]["dataset"]["GLBu0.08"]["version"]["93.0"]["url"] = url_930_u
|
157
|
+
# GLBy0.08 93.0
|
158
|
+
uv3z_930_y = {}
|
159
|
+
ts3z_930_y = {}
|
160
|
+
ssh_930_y = {}
|
161
|
+
for y_930_y in range(2018, 2025):
|
162
|
+
uv3z_930_y[str(y_930_y)] = rf"https://ncss.hycom.org/thredds/ncss/GLBy0.08/expt_93.0/uv3z/{y_930_y}?"
|
163
|
+
ts3z_930_y[str(y_930_y)] = rf"https://ncss.hycom.org/thredds/ncss/GLBy0.08/expt_93.0/ts3z/{y_930_y}?"
|
164
|
+
ssh_930_y[str(y_930_y)] = rf"https://ncss.hycom.org/thredds/ncss/GLBy0.08/expt_93.0/ssh/{y_930_y}?"
|
165
|
+
# GLBy0.08 93.0 data time range in each year: year-01-01 12:00 to year+1-01-01 09:00
|
166
|
+
url_930_y = {
|
167
|
+
"uv3z": uv3z_930_y,
|
168
|
+
"ts3z": ts3z_930_y,
|
169
|
+
"ssh": ssh_930_y,
|
170
|
+
}
|
171
|
+
data_info["hourly"]["dataset"]["GLBy0.08"]["version"]["93.0"]["url"] = url_930_y
|
172
|
+
# ESPC-D-V02
|
173
|
+
u3z_espc_d_v02_y = {}
|
174
|
+
v3z_espc_d_v02_y = {}
|
175
|
+
t3z_espc_d_v02_y = {}
|
176
|
+
s3z_espc_d_v02_y = {}
|
177
|
+
ssh_espc_d_v02_y = {}
|
178
|
+
for y_espc_d_v02 in range(2024, 2030):
|
179
|
+
u3z_espc_d_v02_y[str(y_espc_d_v02)] = rf"https://ncss.hycom.org/thredds/ncss/ESPC-D-V02/u3z/{y_espc_d_v02}?"
|
180
|
+
v3z_espc_d_v02_y[str(y_espc_d_v02)] = rf"https://ncss.hycom.org/thredds/ncss/ESPC-D-V02/v3z/{y_espc_d_v02}?"
|
181
|
+
t3z_espc_d_v02_y[str(y_espc_d_v02)] = rf"https://ncss.hycom.org/thredds/ncss/ESPC-D-V02/t3z/{y_espc_d_v02}?"
|
182
|
+
s3z_espc_d_v02_y[str(y_espc_d_v02)] = rf"https://ncss.hycom.org/thredds/ncss/ESPC-D-V02/s3z/{y_espc_d_v02}?"
|
183
|
+
ssh_espc_d_v02_y[str(y_espc_d_v02)] = rf"https://ncss.hycom.org/thredds/ncss/ESPC-D-V02/ssh/{y_espc_d_v02}?"
|
184
|
+
url_espc_d_v02_y = {
|
185
|
+
"u3z": u3z_espc_d_v02_y,
|
186
|
+
"v3z": v3z_espc_d_v02_y,
|
187
|
+
"t3z": t3z_espc_d_v02_y,
|
188
|
+
"s3z": s3z_espc_d_v02_y,
|
189
|
+
"ssh": ssh_espc_d_v02_y,
|
190
|
+
}
|
191
|
+
data_info["hourly"]["dataset"]["ESPC_D"]["version"]["V02"]["url"] = url_espc_d_v02_y
|
192
|
+
# ----------------------------------------------
|
193
|
+
var_group = {
|
194
|
+
"uv3z": ["u", "v", "u_b", "v_b"],
|
195
|
+
"ts3z": ["temp", "salt", "temp_b", "salt_b"],
|
196
|
+
"ssh": ["ssh"],
|
197
|
+
}
|
198
|
+
# ----------------------------------------------
|
199
|
+
single_var_group = {
|
200
|
+
"u3z": ["u"],
|
201
|
+
"v3z": ["v"],
|
202
|
+
"t3z": ["temp"],
|
203
|
+
"s3z": ["salt"],
|
204
|
+
"ssh": ["ssh"],
|
205
|
+
}
|
206
|
+
|
207
|
+
return variable_info, data_info, var_group, single_var_group
|
208
|
+
|
209
|
+
|
210
|
+
def draw_time_range(pic_save_folder=None):
|
211
|
+
if pic_save_folder is not None:
|
212
|
+
os.makedirs(pic_save_folder, exist_ok=True)
|
213
|
+
# Converting the data into a format suitable for plotting
|
214
|
+
data = []
|
215
|
+
for dataset, versions in data_info["hourly"]["dataset"].items():
|
216
|
+
for version, time_range in versions["version"].items():
|
217
|
+
t_s = time_range["time_range"]["time_start"]
|
218
|
+
t_e = time_range["time_range"]["time_end"]
|
219
|
+
if len(t_s) == 8:
|
220
|
+
t_s = t_s + "00"
|
221
|
+
if len(t_e) == 8:
|
222
|
+
t_e = t_e + "21"
|
223
|
+
t_s, t_e = t_s + "0000", t_e + "0000"
|
224
|
+
data.append(
|
225
|
+
{
|
226
|
+
"dataset": dataset,
|
227
|
+
"version": version,
|
228
|
+
"start_date": pd.to_datetime(t_s),
|
229
|
+
"end_date": pd.to_datetime(t_e),
|
230
|
+
}
|
231
|
+
)
|
232
|
+
|
233
|
+
# Creating a DataFrame
|
234
|
+
df = pd.DataFrame(data)
|
235
|
+
|
236
|
+
# Plotting with combined labels for datasets and versions on the y-axis
|
237
|
+
plt.figure(figsize=(12, 6))
|
238
|
+
|
239
|
+
# Combined labels for datasets and versions
|
240
|
+
combined_labels = [f"{dataset}_{version}" for dataset, version in zip(df["dataset"], df["version"])]
|
241
|
+
|
242
|
+
colors = plt.cm.viridis(np.linspace(0, 1, len(combined_labels)))
|
243
|
+
|
244
|
+
# Assigning a color to each combined label
|
245
|
+
label_colors = {label: colors[i] for i, label in enumerate(combined_labels)}
|
246
|
+
|
247
|
+
# Plotting each time range
|
248
|
+
k = 1
|
249
|
+
for _, row in df.iterrows():
|
250
|
+
plt.plot([row["start_date"], row["end_date"]], [k, k], color=label_colors[f"{row['dataset']}_{row['version']}"], linewidth=6)
|
251
|
+
# plt.text(row['end_date'], k,
|
252
|
+
# f"{row['version']}", ha='right', color='black')
|
253
|
+
ymdh_s = row["start_date"].strftime("%Y-%m-%d %H")
|
254
|
+
ymdh_e = row["end_date"].strftime("%Y-%m-%d %H")
|
255
|
+
# if k == 1 or k == len(combined_labels):
|
256
|
+
if k == 1:
|
257
|
+
plt.text(row["start_date"], k + 0.125, f"{ymdh_s}", ha="left", color="black")
|
258
|
+
plt.text(row["end_date"], k + 0.125, f"{ymdh_e}", ha="right", color="black")
|
259
|
+
else:
|
260
|
+
plt.text(row["start_date"], k + 0.125, f"{ymdh_s}", ha="right", color="black")
|
261
|
+
plt.text(row["end_date"], k + 0.125, f"{ymdh_e}", ha="left", color="black")
|
262
|
+
k += 1
|
263
|
+
|
264
|
+
# Setting the y-axis labels
|
265
|
+
plt.yticks(range(1, len(combined_labels) + 1), combined_labels)
|
266
|
+
plt.xlabel("Time")
|
267
|
+
plt.ylabel("Dataset - Version")
|
268
|
+
plt.title("Time Range of Different Versions of Datasets")
|
269
|
+
plt.xticks(rotation=45)
|
270
|
+
plt.grid(True)
|
271
|
+
plt.tight_layout()
|
272
|
+
if pic_save_folder:
|
273
|
+
plt.savefig(Path(pic_save_folder) / "HYCOM_time_range.png")
|
274
|
+
print(f"[bold green]HYCOM_time_range.png has been saved in {pic_save_folder}")
|
275
|
+
else:
|
276
|
+
plt.savefig("HYCOM_time_range.png")
|
277
|
+
print("[bold green]HYCOM_time_range.png has been saved in the current folder")
|
278
|
+
print(f"Curren folder: {os.getcwd()}")
|
279
|
+
# plt.show()
|
280
|
+
plt.close()
|
281
|
+
|
282
|
+
|
283
|
+
def _get_time_list(time_s, time_e, delta, interval_type="hour"):
|
284
|
+
"""
|
285
|
+
Description: get a list of time strings from time_s to time_e with a specified interval
|
286
|
+
Args:
|
287
|
+
time_s: start time string, e.g. '2023080203' for hours or '20230802' for days
|
288
|
+
time_e: end time string, e.g. '2023080303' for hours or '20230803' for days
|
289
|
+
delta: interval of hours or days
|
290
|
+
interval_type: 'hour' for hour interval, 'day' for day interval
|
291
|
+
Returns:
|
292
|
+
dt_list: a list of time strings
|
293
|
+
"""
|
294
|
+
time_s, time_e = str(time_s), str(time_e)
|
295
|
+
if interval_type == "hour":
|
296
|
+
time_format = "%Y%m%d%H"
|
297
|
+
delta_type = "hours"
|
298
|
+
elif interval_type == "day":
|
299
|
+
time_format = "%Y%m%d"
|
300
|
+
delta_type = "days"
|
301
|
+
# Ensure time strings are in the correct format for days
|
302
|
+
time_s = time_s[:8]
|
303
|
+
time_e = time_e[:8]
|
304
|
+
else:
|
305
|
+
raise ValueError("interval_type must be 'hour' or 'day'")
|
306
|
+
|
307
|
+
dt = datetime.datetime.strptime(time_s, time_format)
|
308
|
+
dt_list = []
|
309
|
+
while dt.strftime(time_format) <= time_e:
|
310
|
+
dt_list.append(dt.strftime(time_format))
|
311
|
+
dt += datetime.timedelta(**{delta_type: delta})
|
312
|
+
return dt_list
|
313
|
+
|
314
|
+
|
315
|
+
def _transform_time(time_str):
|
316
|
+
# old_time = '2023080203'
|
317
|
+
# time_new = '2023-08-02T03%3A00%3A00Z'
|
318
|
+
time_new = f"{time_str[:4]}-{time_str[4:6]}-{time_str[6:8]}T{time_str[8:10]}%3A00%3A00Z"
|
319
|
+
return time_new
|
320
|
+
|
321
|
+
|
322
|
+
def _get_query_dict(var, lon_min, lon_max, lat_min, lat_max, time_str_ymdh, time_str_end=None, mode="single_depth", depth=None, level_num=None):
|
323
|
+
query_dict = {
|
324
|
+
"var": variable_info[var]["var_name"],
|
325
|
+
"north": lat_max,
|
326
|
+
"west": lon_min,
|
327
|
+
"east": lon_max,
|
328
|
+
"south": lat_min,
|
329
|
+
"horizStride": 1,
|
330
|
+
"time": None,
|
331
|
+
"time_start": None,
|
332
|
+
"time_end": None,
|
333
|
+
"timeStride": None,
|
334
|
+
"vertCoord": None,
|
335
|
+
"vertStride": None,
|
336
|
+
"addLatLon": "true",
|
337
|
+
"accept": "netcdf4",
|
338
|
+
}
|
339
|
+
|
340
|
+
if time_str_end is not None:
|
341
|
+
query_dict["time_start"] = _transform_time(time_str_ymdh)
|
342
|
+
query_dict["time_end"] = _transform_time(time_str_end)
|
343
|
+
query_dict["timeStride"] = 1
|
344
|
+
else:
|
345
|
+
query_dict["time"] = _transform_time(time_str_ymdh)
|
346
|
+
|
347
|
+
def get_nearest_level_index(depth):
|
348
|
+
level_depth = [0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 15.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 125.0, 150.0, 200.0, 250.0, 300.0, 350.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0, 1250.0, 1500.0, 2000.0, 2500.0, 3000.0, 4000.0, 5000]
|
349
|
+
return min(range(len(level_depth)), key=lambda i: abs(level_depth[i] - depth))
|
350
|
+
|
351
|
+
if var not in ["ssh", "u_b", "v_b", "temp_b", "salt_b"] and var in ["u", "v", "temp", "salt"]:
|
352
|
+
if mode == "depth":
|
353
|
+
if depth < 0 or depth > 5000:
|
354
|
+
print("Please ensure the depth is in the range of 0-5000 m")
|
355
|
+
query_dict["vertCoord"] = get_nearest_level_index(depth) + 1
|
356
|
+
elif mode == "level":
|
357
|
+
if level_num < 1 or level_num > 40:
|
358
|
+
print("Please ensure the level_num is in the range of 1-40")
|
359
|
+
query_dict["vertCoord"] = max(1, min(level_num, 40))
|
360
|
+
elif mode == "full":
|
361
|
+
query_dict["vertStride"] = 1
|
362
|
+
else:
|
363
|
+
raise ValueError("Invalid mode. Choose from 'depth', 'level', or 'full'")
|
364
|
+
|
365
|
+
query_dict = {k: v for k, v in query_dict.items() if v is not None}
|
366
|
+
|
367
|
+
return query_dict
|
368
|
+
|
369
|
+
|
370
|
+
def _check_time_in_dataset_and_version(time_input, time_end=None):
|
371
|
+
# 判断是处理单个时间点还是时间范围
|
372
|
+
is_single_time = time_end is None
|
373
|
+
|
374
|
+
# 如果是单个时间点,初始化时间范围
|
375
|
+
if is_single_time:
|
376
|
+
time_start = int(time_input)
|
377
|
+
time_end = time_start
|
378
|
+
time_input_str = str(time_input)
|
379
|
+
else:
|
380
|
+
time_start = int(time_input)
|
381
|
+
time_end = int(time_end)
|
382
|
+
time_input_str = f"{time_input}-{time_end}"
|
383
|
+
|
384
|
+
# 根据时间长度补全时间格式
|
385
|
+
if len(str(time_start)) == 8:
|
386
|
+
time_start = str(time_start) + "00"
|
387
|
+
if len(str(time_end)) == 8:
|
388
|
+
time_end = str(time_end) + "21"
|
389
|
+
time_start, time_end = int(time_start), int(time_end)
|
390
|
+
|
391
|
+
d_list = []
|
392
|
+
v_list = []
|
393
|
+
trange_list = []
|
394
|
+
have_data = False
|
395
|
+
|
396
|
+
# 遍历数据集和版本
|
397
|
+
for dataset_name in data_info["hourly"]["dataset"].keys():
|
398
|
+
for version_name in data_info["hourly"]["dataset"][dataset_name]["version"].keys():
|
399
|
+
time_s, time_e = list(data_info["hourly"]["dataset"][dataset_name]["version"][version_name]["time_range"].values())
|
400
|
+
time_s, time_e = str(time_s), str(time_e)
|
401
|
+
if len(time_s) == 8:
|
402
|
+
time_s = time_s + "00"
|
403
|
+
if len(time_e) == 8:
|
404
|
+
time_e = time_e + "21"
|
405
|
+
# 检查时间是否在数据集的时间范围内
|
406
|
+
if is_single_time:
|
407
|
+
if time_start >= int(time_s) and time_start <= int(time_e):
|
408
|
+
d_list.append(dataset_name)
|
409
|
+
v_list.append(version_name)
|
410
|
+
trange_list.append(f"{time_s}-{time_e}")
|
411
|
+
have_data = True
|
412
|
+
else:
|
413
|
+
if time_start >= int(time_s) and time_end <= int(time_e):
|
414
|
+
d_list.append(dataset_name)
|
415
|
+
v_list.append(version_name)
|
416
|
+
trange_list.append(f"{time_s}-{time_e}")
|
417
|
+
have_data = True
|
418
|
+
|
419
|
+
# 输出结果
|
420
|
+
print(f"[bold red]{time_input_str} is in the following dataset and version:")
|
421
|
+
if have_data:
|
422
|
+
for d, v, trange in zip(d_list, v_list, trange_list):
|
423
|
+
print(f"[bold blue]{d} {v} {trange}")
|
424
|
+
if is_single_time:
|
425
|
+
return True
|
426
|
+
else:
|
427
|
+
base_url_s = _get_base_url(d_list[0], v_list[0], "u", str(time_start))
|
428
|
+
base_url_e = _get_base_url(d_list[0], v_list[0], "u", str(time_end))
|
429
|
+
if base_url_s == base_url_e:
|
430
|
+
return True
|
431
|
+
else:
|
432
|
+
print(f"[bold red]{time_start} to {time_end} is in different datasets or versions, so you can't download them together")
|
433
|
+
return False
|
434
|
+
else:
|
435
|
+
print(f"[bold red]{time_input_str} is not in any dataset and version")
|
436
|
+
return False
|
437
|
+
|
438
|
+
|
439
|
+
def _ensure_time_in_specific_dataset_and_version(dataset_name, version_name, time_input, time_end=None):
|
440
|
+
# 根据时间长度补全时间格式
|
441
|
+
if len(str(time_input)) == 8:
|
442
|
+
time_input = str(time_input) + "00"
|
443
|
+
time_start = int(time_input)
|
444
|
+
if time_end is not None:
|
445
|
+
if len(str(time_end)) == 8:
|
446
|
+
time_end = str(time_end) + "21"
|
447
|
+
time_end = int(time_end)
|
448
|
+
else:
|
449
|
+
time_end = time_start
|
450
|
+
|
451
|
+
# 检查指定的数据集和版本是否存在
|
452
|
+
if dataset_name not in data_info["hourly"]["dataset"]:
|
453
|
+
print(f"[bold red]Dataset {dataset_name} not found.")
|
454
|
+
return False
|
455
|
+
if version_name not in data_info["hourly"]["dataset"][dataset_name]["version"]:
|
456
|
+
print(f"[bold red]Version {version_name} not found in dataset {dataset_name}.")
|
457
|
+
return False
|
458
|
+
|
459
|
+
# 获取指定数据集和版本的时间范围
|
460
|
+
time_range = data_info["hourly"]["dataset"][dataset_name]["version"][version_name]["time_range"]
|
461
|
+
time_s, time_e = list(time_range.values())
|
462
|
+
time_s, time_e = str(time_s), str(time_e)
|
463
|
+
if len(time_s) == 8:
|
464
|
+
time_s = time_s + "00"
|
465
|
+
if len(time_e) == 8:
|
466
|
+
time_e = time_e + "21"
|
467
|
+
time_s, time_e = int(time_s), int(time_e)
|
468
|
+
|
469
|
+
# 检查时间是否在指定数据集和版本的时间范围内
|
470
|
+
if time_start >= time_s and time_end <= time_e:
|
471
|
+
print(f"[bold blue]Time {time_input} to {time_end} is within dataset {dataset_name} and version {version_name}.")
|
472
|
+
return True
|
473
|
+
else:
|
474
|
+
print(f"[bold red]Time {time_input} to {time_end} is not within dataset {dataset_name} and version {version_name}.")
|
475
|
+
return False
|
476
|
+
|
477
|
+
|
478
|
+
def _direct_choose_dataset_and_version(time_input, time_end=None):
|
479
|
+
# 假设 data_info 是一个字典,包含了数据集和版本的信息
|
480
|
+
# 示例结构:data_info['hourly']['dataset'][dataset_name]['version'][version_name]['time_range']
|
481
|
+
|
482
|
+
if len(str(time_input)) == 8:
|
483
|
+
time_input = str(time_input) + "00"
|
484
|
+
|
485
|
+
# 如果 time_end 是 None,则将 time_input 的值赋给它
|
486
|
+
if time_end is None:
|
487
|
+
time_end = time_input
|
488
|
+
|
489
|
+
# 处理开始和结束时间,确保它们是完整的 ymdh 格式
|
490
|
+
time_start, time_end = int(str(time_input)[:10]), int(str(time_end)[:10])
|
491
|
+
|
492
|
+
dataset_name_out, version_name_out = None, None
|
493
|
+
|
494
|
+
for dataset_name in data_info["hourly"]["dataset"].keys():
|
495
|
+
for version_name in data_info["hourly"]["dataset"][dataset_name]["version"].keys():
|
496
|
+
[time_s, time_e] = list(data_info["hourly"]["dataset"][dataset_name]["version"][version_name]["time_range"].values())
|
497
|
+
time_s, time_e = str(time_s), str(time_e)
|
498
|
+
if len(time_s) == 8:
|
499
|
+
time_s = time_s + "00"
|
500
|
+
if len(time_e) == 8:
|
501
|
+
time_e = time_e + "21"
|
502
|
+
time_s, time_e = int(time_s), int(time_e)
|
503
|
+
|
504
|
+
# 检查时间是否在数据集版本的时间范围内
|
505
|
+
if time_start >= time_s and time_end <= time_e:
|
506
|
+
# print(f'[bold purple]dataset: {dataset_name}, version: {version_name} is chosen')
|
507
|
+
# return dataset_name, version_name
|
508
|
+
dataset_name_out, version_name_out = dataset_name, version_name
|
509
|
+
|
510
|
+
if dataset_name_out is not None and version_name_out is not None:
|
511
|
+
print(f"[bold purple]dataset: {dataset_name_out}, version: {version_name_out} is chosen")
|
512
|
+
|
513
|
+
# 如果没有找到匹配的数据集和版本,会返回 None
|
514
|
+
return dataset_name_out, version_name_out
|
515
|
+
|
516
|
+
|
517
|
+
def _get_base_url(dataset_name, version_name, var, ymdh_str):
|
518
|
+
year_str = int(ymdh_str[:4])
|
519
|
+
url_dict = data_info["hourly"]["dataset"][dataset_name]["version"][version_name]["url"]
|
520
|
+
classification_method = data_info["hourly"]["dataset"][dataset_name]["version"][version_name]["classification"]
|
521
|
+
if classification_method == "year_different":
|
522
|
+
base_url = url_dict[str(year_str)]
|
523
|
+
elif classification_method == "same_path":
|
524
|
+
base_url = url_dict
|
525
|
+
elif classification_method == "var_different":
|
526
|
+
base_url = None
|
527
|
+
for key, value in var_group.items():
|
528
|
+
if var in value:
|
529
|
+
base_url = url_dict[key]
|
530
|
+
break
|
531
|
+
if base_url is None:
|
532
|
+
print("Please ensure the var is in [u,v,temp,salt,ssh,u_b,v_b,temp_b,salt_b]")
|
533
|
+
elif classification_method == "var_year_different":
|
534
|
+
if dataset_name == "GLBy0.08" and version_name == "93.0":
|
535
|
+
mdh_str = ymdh_str[4:]
|
536
|
+
# GLBy0.08 93.0
|
537
|
+
# data time range in each year: year-01-01 12:00 to year+1-01-01 09:00
|
538
|
+
if "010100" <= mdh_str <= "010109":
|
539
|
+
year_str = int(ymdh_str[:4]) - 1
|
540
|
+
else:
|
541
|
+
year_str = int(ymdh_str[:4])
|
542
|
+
base_url = None
|
543
|
+
for key, value in var_group.items():
|
544
|
+
if var in value:
|
545
|
+
base_url = url_dict[key][str(year_str)]
|
546
|
+
break
|
547
|
+
if base_url is None:
|
548
|
+
print("Please ensure the var is in [u,v,temp,salt,ssh,u_b,v_b,temp_b,salt_b]")
|
549
|
+
elif classification_method == "single_var_year_different":
|
550
|
+
base_url = None
|
551
|
+
if dataset_name == "ESPC_D" and version_name == "V02":
|
552
|
+
mdh_str = ymdh_str[4:]
|
553
|
+
# ESPC-D-V02
|
554
|
+
if "010100" <= mdh_str <= "010109":
|
555
|
+
year_str = int(ymdh_str[:4]) - 1
|
556
|
+
else:
|
557
|
+
year_str = int(ymdh_str[:4])
|
558
|
+
for key, value in single_var_group.items():
|
559
|
+
if var in value:
|
560
|
+
base_url = url_dict[key][str(year_str)]
|
561
|
+
break
|
562
|
+
if base_url is None:
|
563
|
+
print("Please ensure the var is in [u,v,temp,salt,ssh]")
|
564
|
+
return base_url
|
565
|
+
|
566
|
+
|
567
|
+
def _get_submit_url(dataset_name, version_name, var, ymdh_str, query_dict):
|
568
|
+
base_url = _get_base_url(dataset_name, version_name, var, ymdh_str)
|
569
|
+
if isinstance(query_dict["var"], str):
|
570
|
+
query_dict["var"] = [query_dict["var"]]
|
571
|
+
target_url = base_url + "&".join(f"var={var}" for var in query_dict["var"]) + "&" + "&".join(f"{key}={value}" for key, value in query_dict.items() if key != "var")
|
572
|
+
return target_url
|
573
|
+
|
574
|
+
|
575
|
+
def _clear_existing_file(file_full_path):
|
576
|
+
if os.path.exists(file_full_path):
|
577
|
+
os.remove(file_full_path)
|
578
|
+
print(f"{file_full_path} has been removed")
|
579
|
+
|
580
|
+
|
581
|
+
def _check_existing_file(file_full_path, avg_size):
|
582
|
+
if os.path.exists(file_full_path):
|
583
|
+
print(f"[bold #FFA54F]{file_full_path} exists")
|
584
|
+
fsize = file_size(file_full_path)
|
585
|
+
delta_size_ratio = (fsize - avg_size) / avg_size
|
586
|
+
if abs(delta_size_ratio) > 0.025:
|
587
|
+
if check_nc(file_full_path):
|
588
|
+
# print(f"File size is abnormal but can be opened normally, file size: {fsize:.2f} KB")
|
589
|
+
return True
|
590
|
+
else:
|
591
|
+
print(f"File size is abnormal and cannot be opened, {file_full_path}: {fsize:.2f} KB")
|
592
|
+
return False
|
593
|
+
else:
|
594
|
+
return True
|
595
|
+
else:
|
596
|
+
return False
|
597
|
+
|
598
|
+
|
599
|
+
def _get_mean_size30(store_path, same_file):
|
600
|
+
if same_file not in fsize_dict.keys():
|
601
|
+
# print(f'Same file name: {same_file}')
|
602
|
+
fsize_dict[same_file] = {"size": 0, "count": 0}
|
603
|
+
|
604
|
+
if fsize_dict[same_file]["count"] < 30 or fsize_dict[same_file]["size"] == 0:
|
605
|
+
# 更新30次文件最小值,后续认为可以代表所有文件,不再更新占用时间
|
606
|
+
fsize_mean = mean_size(store_path, same_file, max_num=30)
|
607
|
+
set_min_size = fsize_mean * 0.95
|
608
|
+
fsize_dict[same_file]["size"] = set_min_size
|
609
|
+
fsize_dict[same_file]["count"] += 1
|
610
|
+
else:
|
611
|
+
set_min_size = fsize_dict[same_file]["size"]
|
612
|
+
return set_min_size
|
613
|
+
|
614
|
+
|
615
|
+
def _get_mean_size_move(same_file, current_file):
|
616
|
+
# 获取锁
|
617
|
+
with fsize_dict_lock: # 全局锁,确保同一时间只能有一个线程访问
|
618
|
+
# 初始化字典中的值,如果文件不在字典中
|
619
|
+
if same_file not in fsize_dict.keys():
|
620
|
+
fsize_dict[same_file] = {"size_list": [], "mean_size": 1.0}
|
621
|
+
|
622
|
+
tolerance_ratio = 0.025 # 容忍的阈值比例
|
623
|
+
current_file_size = file_size(current_file)
|
624
|
+
|
625
|
+
# 如果列表不为空,则计算平均值,否则保持为1
|
626
|
+
if fsize_dict[same_file]["size_list"]:
|
627
|
+
fsize_dict[same_file]["mean_size"] = sum(fsize_dict[same_file]["size_list"]) / len(fsize_dict[same_file]["size_list"])
|
628
|
+
fsize_dict[same_file]["mean_size"] = max(fsize_dict[same_file]["mean_size"], 1.0)
|
629
|
+
else:
|
630
|
+
fsize_dict[same_file]["mean_size"] = 1.0
|
631
|
+
|
632
|
+
size_difference_ratio = (current_file_size - fsize_dict[same_file]["mean_size"]) / fsize_dict[same_file]["mean_size"]
|
633
|
+
|
634
|
+
if abs(size_difference_ratio) > tolerance_ratio:
|
635
|
+
if check_nc(current_file):
|
636
|
+
# print(f"File size is abnormal but can be opened normally, file size: {current_file_size:.2f} KB")
|
637
|
+
# 文件可以正常打开,但大小异常,保留当前文件大小
|
638
|
+
fsize_dict[same_file]["size_list"] = [current_file_size]
|
639
|
+
fsize_dict[same_file]["mean_size"] = current_file_size
|
640
|
+
else:
|
641
|
+
_clear_existing_file(current_file)
|
642
|
+
print(f"File size is abnormal, may need to be downloaded again, file size: {current_file_size:.2f} KB")
|
643
|
+
else:
|
644
|
+
# 添加当前文件大小到列表中,并更新计数
|
645
|
+
fsize_dict[same_file]["size_list"].append(current_file_size)
|
646
|
+
|
647
|
+
# 返回调整后的平均值,这里根据您的需求,返回的是添加新值之前的平均值
|
648
|
+
return fsize_dict[same_file]["mean_size"]
|
649
|
+
|
650
|
+
|
651
|
+
def _check_ftime(nc_file, tname="time", if_print=False):
|
652
|
+
if not os.path.exists(nc_file):
|
653
|
+
return False
|
654
|
+
nc_file = str(nc_file)
|
655
|
+
try:
|
656
|
+
ds = xr.open_dataset(nc_file)
|
657
|
+
real_time = ds[tname].values[0]
|
658
|
+
ds.close()
|
659
|
+
real_time = str(real_time)[:13]
|
660
|
+
real_time = real_time.replace("-", "").replace("T", "")
|
661
|
+
# -----------------------------------------------------
|
662
|
+
f_time = re.findall(r"\d{10}", nc_file)[0]
|
663
|
+
if real_time == f_time:
|
664
|
+
return True
|
665
|
+
else:
|
666
|
+
if if_print:
|
667
|
+
print(f"[bold #daff5c]File time error, file/real time: [bold blue]{f_time}/{real_time}")
|
668
|
+
return False
|
669
|
+
except Exception as e:
|
670
|
+
if if_print:
|
671
|
+
print(f"[bold #daff5c]File time check failed, {nc_file}: {e}")
|
672
|
+
return False
|
673
|
+
|
674
|
+
|
675
|
+
def _correct_time(nc_file):
|
676
|
+
# 打开NC文件
|
677
|
+
dataset = nc.Dataset(nc_file)
|
678
|
+
|
679
|
+
# 读取时间单位
|
680
|
+
time_units = dataset.variables["time"].units
|
681
|
+
|
682
|
+
# 关闭文件
|
683
|
+
dataset.close()
|
684
|
+
|
685
|
+
# 解析时间单位字符串以获取时间原点
|
686
|
+
origin_str = time_units.split("since")[1].strip()
|
687
|
+
origin_datetime = datetime.datetime.strptime(origin_str, "%Y-%m-%d %H:%M:%S")
|
688
|
+
|
689
|
+
# 从文件名中提取日期字符串
|
690
|
+
given_date_str = re.findall(r"\d{10}", str(nc_file))[0]
|
691
|
+
|
692
|
+
# 将提取的日期字符串转换为datetime对象
|
693
|
+
given_datetime = datetime.datetime.strptime(given_date_str, "%Y%m%d%H")
|
694
|
+
|
695
|
+
# 计算给定日期与时间原点之间的差值(以小时为单位)
|
696
|
+
time_difference = (given_datetime - origin_datetime).total_seconds()
|
697
|
+
if "hours" in time_units:
|
698
|
+
time_difference /= 3600
|
699
|
+
elif "days" in time_units:
|
700
|
+
time_difference /= 3600 * 24
|
701
|
+
|
702
|
+
# 修改NC文件中的时间变量
|
703
|
+
modify_nc(nc_file, "time", None, time_difference)
|
704
|
+
|
705
|
+
|
706
|
+
def _download_file(target_url, store_path, file_name, check=False):
|
707
|
+
# Check if the file exists
|
708
|
+
fname = Path(store_path) / file_name
|
709
|
+
file_name_split = file_name.split("_")
|
710
|
+
file_name_split = file_name_split[:-1]
|
711
|
+
# same_file = f"{file_name_split[0]}_{file_name_split[1]}*nc"
|
712
|
+
same_file = "_".join(file_name_split) + "*nc"
|
713
|
+
|
714
|
+
if match_time is not None:
|
715
|
+
if check_nc(fname):
|
716
|
+
if not _check_ftime(fname, if_print=True):
|
717
|
+
if match_time:
|
718
|
+
_correct_time(fname)
|
719
|
+
count_dict["skip"] += 1
|
720
|
+
else:
|
721
|
+
_clear_existing_file(fname)
|
722
|
+
# print(f"[bold #ffe5c0]File time error, {fname}")
|
723
|
+
count_dict["no_data"] += 1
|
724
|
+
else:
|
725
|
+
count_dict["skip"] += 1
|
726
|
+
print(f"[bold green]{file_name} is correct")
|
727
|
+
return
|
728
|
+
|
729
|
+
if check:
|
730
|
+
if same_file not in fsize_dict.keys(): # 对第一个文件单独进行检查,因为没有大小可以对比
|
731
|
+
check_nc(fname, delete_switch=True)
|
732
|
+
|
733
|
+
# set_min_size = _get_mean_size30(store_path, same_file) # 原方案,只30次取平均值;若遇变化,无法判断
|
734
|
+
get_mean_size = _get_mean_size_move(same_file, fname)
|
735
|
+
|
736
|
+
if _check_existing_file(fname, get_mean_size):
|
737
|
+
count_dict["skip"] += 1
|
738
|
+
return
|
739
|
+
_clear_existing_file(fname)
|
740
|
+
|
741
|
+
if not use_idm:
|
742
|
+
# -----------------------------------------------
|
743
|
+
print(f"[bold #f0f6d0]Requesting {file_name} ...")
|
744
|
+
# 创建会话
|
745
|
+
s = requests.Session()
|
746
|
+
download_success = False
|
747
|
+
request_times = 0
|
748
|
+
|
749
|
+
def calculate_wait_time(time_str, target_url):
|
750
|
+
# 定义正则表达式,匹配YYYYMMDDHH格式的时间
|
751
|
+
time_pattern = r"\d{10}"
|
752
|
+
|
753
|
+
# 定义两个字符串
|
754
|
+
# str1 = 'HYCOM_water_u_2018010100-2018010112.nc'
|
755
|
+
# str2 = 'HYCOM_water_u_2018010100.nc'
|
756
|
+
|
757
|
+
# 使用正则表达式查找时间
|
758
|
+
times_in_str = re.findall(time_pattern, time_str)
|
759
|
+
|
760
|
+
# 计算每个字符串中的时间数量
|
761
|
+
num_times_str = len(times_in_str)
|
762
|
+
|
763
|
+
if num_times_str > 1:
|
764
|
+
delta_t = datetime.datetime.strptime(times_in_str[1], "%Y%m%d%H") - datetime.datetime.strptime(times_in_str[0], "%Y%m%d%H")
|
765
|
+
delta_t = delta_t.total_seconds() / 3600
|
766
|
+
delta_t = delta_t / 3 + 1
|
767
|
+
else:
|
768
|
+
delta_t = 1
|
769
|
+
# 单个要素最多等待5分钟,不宜太短,太短可能请求失败;也不宜太长,太长可能会浪费时间
|
770
|
+
num_var = int(target_url.count("var="))
|
771
|
+
if num_var <= 0:
|
772
|
+
num_var = 1
|
773
|
+
return int(delta_t * 5 * 60 * num_var)
|
774
|
+
|
775
|
+
max_timeout = calculate_wait_time(file_name, target_url)
|
776
|
+
print(f"[bold #912dbc]Max timeout: {max_timeout} seconds")
|
777
|
+
|
778
|
+
# print(f'Download_start_time: {datetime.datetime.now()}')
|
779
|
+
download_time_s = datetime.datetime.now()
|
780
|
+
order_list = ["1st", "2nd", "3rd", "4th", "5th", "6th", "7th", "8th", "9th", "10th"]
|
781
|
+
while not download_success:
|
782
|
+
if request_times >= 10:
|
783
|
+
# print(f'下载失败,已重试 {request_times} 次\n可先跳过,后续再试')
|
784
|
+
print(f"[bold #ffe5c0]Download failed after {request_times} times\nYou can skip it and try again later")
|
785
|
+
count_dict["fail"] += 1
|
786
|
+
break
|
787
|
+
if request_times > 0:
|
788
|
+
# print(f'\r正在重试第 {request_times} 次', end="")
|
789
|
+
print(f"[bold #ffe5c0]Retrying the {order_list[request_times - 1]} time...")
|
790
|
+
# 尝试下载文件
|
791
|
+
try:
|
792
|
+
headers = {"User-Agent": get_ua()}
|
793
|
+
""" response = s.get(target_url, headers=headers, timeout=random.randint(5, max_timeout))
|
794
|
+
response.raise_for_status() # 如果请求返回的不是200,将抛出HTTPError异常
|
795
|
+
|
796
|
+
# 保存文件
|
797
|
+
with open(filename, 'wb') as f:
|
798
|
+
f.write(response.content) """
|
799
|
+
|
800
|
+
response = s.get(target_url, headers=headers, stream=True, timeout=random.randint(5, max_timeout)) # 启用流式传输
|
801
|
+
response.raise_for_status() # 如果请求返回的不是200,将抛出HTTPError异常
|
802
|
+
# 保存文件
|
803
|
+
with open(fname, "wb") as f:
|
804
|
+
print(f"[bold #96cbd7]Downloading {file_name} ...")
|
805
|
+
for chunk in response.iter_content(chunk_size=1024):
|
806
|
+
if chunk:
|
807
|
+
f.write(chunk)
|
808
|
+
|
809
|
+
f.close()
|
810
|
+
|
811
|
+
# print(f'\r文件 {fname} 下载成功', end="")
|
812
|
+
if os.path.exists(fname):
|
813
|
+
download_success = True
|
814
|
+
download_time_e = datetime.datetime.now()
|
815
|
+
download_delta = download_time_e - download_time_s
|
816
|
+
print(f"[#3dfc40]File [bold #dfff73]{fname} [#3dfc40]has been downloaded successfully, Time: [#39cbdd]{download_delta}")
|
817
|
+
count_dict["success"] += 1
|
818
|
+
# print(f'Download_end_time: {datetime.datetime.now()}')
|
819
|
+
|
820
|
+
except requests.exceptions.HTTPError as errh:
|
821
|
+
print(f"Http Error: {errh}")
|
822
|
+
except requests.exceptions.ConnectionError as errc:
|
823
|
+
print(f"Error Connecting: {errc}")
|
824
|
+
except requests.exceptions.Timeout as errt:
|
825
|
+
print(f"Timeout Error: {errt}")
|
826
|
+
except requests.exceptions.RequestException as err:
|
827
|
+
print(f"OOps: Something Else: {err}")
|
828
|
+
|
829
|
+
time.sleep(3)
|
830
|
+
request_times += 1
|
831
|
+
else:
|
832
|
+
idm_downloader(target_url, store_path, file_name, given_idm_engine)
|
833
|
+
idm_download_list.append(fname)
|
834
|
+
print(f"[bold #3dfc40]File [bold #dfff73]{fname} [#3dfc40]has been submit to IDM for downloading")
|
835
|
+
|
836
|
+
|
837
|
+
def _check_hour_is_valid(ymdh_str):
|
838
|
+
# hour should be 00, 03, 06, 09, 12, 15, 18, 21
|
839
|
+
hh = int(str(ymdh_str[-2:]))
|
840
|
+
if hh in [0, 3, 6, 9, 12, 15, 18, 21]:
|
841
|
+
return True
|
842
|
+
else:
|
843
|
+
return False
|
844
|
+
|
845
|
+
|
846
|
+
def _check_dataset_version(dataset_name, version_name, download_time, download_time_end=None):
|
847
|
+
if dataset_name is not None and version_name is not None:
|
848
|
+
just_ensure = _ensure_time_in_specific_dataset_and_version(dataset_name, version_name, download_time, download_time_end)
|
849
|
+
if just_ensure:
|
850
|
+
return dataset_name, version_name
|
851
|
+
else:
|
852
|
+
return None, None
|
853
|
+
|
854
|
+
# 确保下载时间是一个字符串
|
855
|
+
download_time_str = str(download_time)
|
856
|
+
|
857
|
+
if len(download_time_str) == 8:
|
858
|
+
download_time_str = download_time_str + "00"
|
859
|
+
|
860
|
+
# 检查小时是否有效(如果需要的话)
|
861
|
+
if download_time_end is None and not _check_hour_is_valid(download_time_str):
|
862
|
+
print("Please ensure the hour is 00, 03, 06, 09, 12, 15, 18, 21")
|
863
|
+
raise ValueError("The hour is invalid")
|
864
|
+
|
865
|
+
# 根据是否检查整个天来设置时间范围
|
866
|
+
if download_time_end is not None:
|
867
|
+
if len(str(download_time_end)) == 8:
|
868
|
+
download_time_end = str(download_time_end) + "21"
|
869
|
+
have_data = _check_time_in_dataset_and_version(download_time_str, download_time_end)
|
870
|
+
if have_data:
|
871
|
+
return _direct_choose_dataset_and_version(download_time_str, download_time_end)
|
872
|
+
else:
|
873
|
+
have_data = _check_time_in_dataset_and_version(download_time_str)
|
874
|
+
if have_data:
|
875
|
+
return _direct_choose_dataset_and_version(download_time_str)
|
876
|
+
|
877
|
+
return None, None
|
878
|
+
|
879
|
+
|
880
|
+
def _get_submit_url_var(var, depth, level_num, lon_min, lon_max, lat_min, lat_max, dataset_name, version_name, download_time, download_time_end=None):
|
881
|
+
# year_str = str(download_time)[:4]
|
882
|
+
ymdh_str = str(download_time)
|
883
|
+
if depth is not None and level_num is not None:
|
884
|
+
print("Please ensure the depth or level_num is None")
|
885
|
+
print("Progress will use the depth")
|
886
|
+
which_mode = "depth"
|
887
|
+
elif depth is not None and level_num is None:
|
888
|
+
print(f"Data of single depth (~{depth} m) will be downloaded...")
|
889
|
+
which_mode = "depth"
|
890
|
+
elif level_num is not None and depth is None:
|
891
|
+
print(f"Data of single level ({level_num}) will be downloaded...")
|
892
|
+
which_mode = "level"
|
893
|
+
else:
|
894
|
+
# print("Full depth or full level data will be downloaded...")
|
895
|
+
which_mode = "full"
|
896
|
+
query_dict = _get_query_dict(var, lon_min, lon_max, lat_min, lat_max, download_time, download_time_end, which_mode, depth, level_num)
|
897
|
+
submit_url = _get_submit_url(dataset_name, version_name, var, ymdh_str, query_dict)
|
898
|
+
return submit_url
|
899
|
+
|
900
|
+
|
901
|
+
def _prepare_url_to_download(var, lon_min=0, lon_max=359.92, lat_min=-80, lat_max=90, download_time="2024083100", download_time_end=None, depth=None, level_num=None, store_path=None, dataset_name=None, version_name=None, check=False):
|
902
|
+
print("[bold #ecdbfe]-" * mark_len)
|
903
|
+
download_time = str(download_time)
|
904
|
+
if download_time_end is not None:
|
905
|
+
download_time_end = str(download_time_end)
|
906
|
+
dataset_name, version_name = _check_dataset_version(dataset_name, version_name, download_time, download_time_end)
|
907
|
+
else:
|
908
|
+
dataset_name, version_name = _check_dataset_version(dataset_name, version_name, download_time)
|
909
|
+
if dataset_name is None and version_name is None:
|
910
|
+
count_dict["no_data"] += 1
|
911
|
+
if download_time_end is not None:
|
912
|
+
count_dict["no_data_list"].append(f"{download_time}-{download_time_end}")
|
913
|
+
else:
|
914
|
+
count_dict["no_data_list"].append(download_time)
|
915
|
+
return
|
916
|
+
|
917
|
+
if isinstance(var, str):
|
918
|
+
var = [var]
|
919
|
+
|
920
|
+
if isinstance(var, list):
|
921
|
+
if len(var) == 1:
|
922
|
+
var = var[0]
|
923
|
+
submit_url = _get_submit_url_var(var, depth, level_num, lon_min, lon_max, lat_min, lat_max, dataset_name, version_name, download_time, download_time_end)
|
924
|
+
file_name = f"HYCOM_{variable_info[var]['var_name']}_{download_time}.nc"
|
925
|
+
if download_time_end is not None:
|
926
|
+
file_name = f"HYCOM_{variable_info[var]['var_name']}_{download_time}-{download_time_end}.nc" # 这里时间不能用下划线,不然后续处理查找同一变量文件会出问题
|
927
|
+
_download_file(submit_url, store_path, file_name, check)
|
928
|
+
else:
|
929
|
+
if download_time < "2024081012":
|
930
|
+
varlist = [_ for _ in var]
|
931
|
+
for key, value in var_group.items():
|
932
|
+
current_group = []
|
933
|
+
for v in varlist:
|
934
|
+
if v in value:
|
935
|
+
current_group.append(v)
|
936
|
+
if len(current_group) == 0:
|
937
|
+
continue
|
938
|
+
|
939
|
+
var = current_group[0]
|
940
|
+
submit_url = _get_submit_url_var(var, depth, level_num, lon_min, lon_max, lat_min, lat_max, dataset_name, version_name, download_time, download_time_end)
|
941
|
+
file_name = f"HYCOM_{variable_info[var]['var_name']}_{download_time}.nc"
|
942
|
+
old_str = f"var={variable_info[var]['var_name']}"
|
943
|
+
new_str = f"var={variable_info[var]['var_name']}"
|
944
|
+
if len(current_group) > 1:
|
945
|
+
for v in current_group[1:]:
|
946
|
+
new_str = f"{new_str}&var={variable_info[v]['var_name']}"
|
947
|
+
submit_url = submit_url.replace(old_str, new_str)
|
948
|
+
# file_name = f'HYCOM_{'-'.join([variable_info[v]["var_name"] for v in current_group])}_{download_time}.nc'
|
949
|
+
file_name = f"HYCOM_{key}_{download_time}.nc"
|
950
|
+
if download_time_end is not None:
|
951
|
+
file_name = f"HYCOM_{key}_{download_time}-{download_time_end}.nc" # 这里时间不能用下划线,不然后续处理查找同一变量文件会出问题
|
952
|
+
_download_file(submit_url, store_path, file_name, check)
|
953
|
+
else:
|
954
|
+
for v in var:
|
955
|
+
submit_url = _get_submit_url_var(v, depth, level_num, lon_min, lon_max, lat_min, lat_max, dataset_name, version_name, download_time, download_time_end)
|
956
|
+
file_name = f"HYCOM_{variable_info[v]['var_name']}_{download_time}.nc"
|
957
|
+
if download_time_end is not None:
|
958
|
+
file_name = f"HYCOM_{variable_info[v]['var_name']}_{download_time}-{download_time_end}.nc"
|
959
|
+
_download_file(submit_url, store_path, file_name, check)
|
960
|
+
|
961
|
+
|
962
|
+
def _convert_full_name_to_short_name(full_name):
|
963
|
+
for var, info in variable_info.items():
|
964
|
+
if full_name == info["var_name"] or full_name == info["standard_name"] or full_name == var:
|
965
|
+
return var
|
966
|
+
print("[bold #FFE4E1]Please ensure the var is in:\n[bold blue]u,v,temp,salt,ssh,u_b,v_b,temp_b,salt_b")
|
967
|
+
print("or")
|
968
|
+
print("[bold blue]water_u, water_v, water_temp, salinity, surf_el, water_u_bottom, water_v_bottom, water_temp_bottom, salinity_bottom")
|
969
|
+
return False
|
970
|
+
|
971
|
+
|
972
|
+
def _download_task(var, time_str, time_str_end, lon_min, lon_max, lat_min, lat_max, depth, level, store_path, dataset_name, version_name, check):
|
973
|
+
"""
|
974
|
+
# 并行下载任务
|
975
|
+
# 这个函数是为了并行下载而设置的,是必须的,直接调用direct_download并行下载会出问题
|
976
|
+
|
977
|
+
任务封装:将每个任务需要的数据和操作封装在一个函数中,这样每个任务都是独立的,不会相互干扰。
|
978
|
+
本情况下,download_task函数的作用是将每个下载任务封装起来,包括它所需的所有参数。
|
979
|
+
这样,每个任务都是独立的,有自己的参数和数据,不会与其他任务共享或修改任何数据。
|
980
|
+
因此,即使多个任务同时执行,也不会出现数据交互错乱的问题。
|
981
|
+
"""
|
982
|
+
|
983
|
+
_prepare_url_to_download(var, lon_min, lon_max, lat_min, lat_max, time_str, time_str_end, depth, level, store_path, dataset_name, version_name, check)
|
984
|
+
|
985
|
+
|
986
|
+
def _done_callback(future, progress, task, total, counter_lock):
|
987
|
+
"""
|
988
|
+
# 并行下载任务的回调函数
|
989
|
+
# 这个函数是为了并行下载而设置的,是必须的,直接调用direct_download并行下载会出问题
|
990
|
+
|
991
|
+
回调函数:当一个任务完成后,会调用这个函数,这样可以及时更新进度条,显示任务的完成情况。
|
992
|
+
本情况下,done_callback函数的作用是当一个任务完成后,更新进度条的进度,显示任务的完成情况。
|
993
|
+
这样,即使多个任务同时执行,也可以及时看到每个任务的完成情况,不会等到所有任务都完成才显示。
|
994
|
+
"""
|
995
|
+
|
996
|
+
global parallel_counter
|
997
|
+
with counter_lock:
|
998
|
+
parallel_counter += 1
|
999
|
+
progress.update(task, advance=1, description=f"[cyan]{bar_desc} {parallel_counter}/{total}")
|
1000
|
+
|
1001
|
+
|
1002
|
+
def _download_hourly_func(var, time_s, time_e, lon_min=0, lon_max=359.92, lat_min=-80, lat_max=90, depth=None, level=None, store_path=None, dataset_name=None, version_name=None, num_workers=None, check=False, ftimes=1, interval_hour=3):
|
1003
|
+
"""
|
1004
|
+
Description:
|
1005
|
+
Download the data of single time or a series of time
|
1006
|
+
|
1007
|
+
Parameters:
|
1008
|
+
var: str, the variable name, such as 'u', 'v', 'temp', 'salt', 'ssh', 'u_b', 'v_b', 'temp_b', 'salt_b' or 'water_u', 'water_v', 'water_temp', 'salinity', 'surf_el', 'water_u_bottom', 'water_v_bottom', 'water_temp_bottom', 'salinity_bottom'
|
1009
|
+
time_s: str, the start time, such as '2024110100' or '20241101', if add hour, the hour should be 00, 03, 06, 09, 12, 15, 18, 21
|
1010
|
+
time_e: str, the end time, such as '2024110221' or '20241102', if add hour, the hour should be 00, 03, 06, 09, 12, 15, 18, 21
|
1011
|
+
lon_min: float, the minimum longitude, default is 0
|
1012
|
+
lon_max: float, the maximum longitude, default is 359.92
|
1013
|
+
lat_min: float, the minimum latitude, default is -80
|
1014
|
+
lat_max: float, the maximum latitude, default is 90
|
1015
|
+
depth: float, the depth, default is None
|
1016
|
+
level: int, the level number, default is None
|
1017
|
+
store_path: str, the path to store the data, default is None
|
1018
|
+
dataset_name: str, the dataset name, default is None, example: 'GLBv0.08', 'GLBu0.08', 'GLBy0.08'
|
1019
|
+
version_name: str, the version name, default is None, example: '53.X', '56.3'
|
1020
|
+
num_workers: int, the number of workers, default is None
|
1021
|
+
|
1022
|
+
Returns:
|
1023
|
+
None
|
1024
|
+
"""
|
1025
|
+
ymdh_time_s, ymdh_time_e = str(time_s), str(time_e)
|
1026
|
+
if num_workers is not None and num_workers > 1: # 如果使用多线程下载,用于进度条显示
|
1027
|
+
global parallel_counter
|
1028
|
+
parallel_counter = 0
|
1029
|
+
counter_lock = Lock() # 创建一个锁,线程安全的计数器
|
1030
|
+
if ymdh_time_s == ymdh_time_e:
|
1031
|
+
_prepare_url_to_download(var, lon_min, lon_max, lat_min, lat_max, ymdh_time_s, None, depth, level, store_path, dataset_name, version_name, check)
|
1032
|
+
elif int(ymdh_time_s) < int(ymdh_time_e):
|
1033
|
+
print("Downloading a series of files...")
|
1034
|
+
time_list = _get_time_list(ymdh_time_s, ymdh_time_e, interval_hour, "hour")
|
1035
|
+
with Progress() as progress:
|
1036
|
+
task = progress.add_task(f"[cyan]{bar_desc}", total=len(time_list))
|
1037
|
+
if ftimes == 1:
|
1038
|
+
if num_workers is None or num_workers <= 1:
|
1039
|
+
# 串行方式
|
1040
|
+
for i, time_str in enumerate(time_list):
|
1041
|
+
_prepare_url_to_download(var, lon_min, lon_max, lat_min, lat_max, time_str, None, depth, level, store_path, dataset_name, version_name, check)
|
1042
|
+
progress.update(task, advance=1, description=f"[cyan]{bar_desc} {i + 1}/{len(time_list)}")
|
1043
|
+
else:
|
1044
|
+
# 并行方式
|
1045
|
+
with ThreadPoolExecutor(max_workers=num_workers) as executor:
|
1046
|
+
futures = [executor.submit(_download_task, var, time_str, None, lon_min, lon_max, lat_min, lat_max, depth, level, store_path, dataset_name, version_name, check) for time_str in time_list]
|
1047
|
+
""" for i, future in enumerate(futures):
|
1048
|
+
future.add_done_callback(lambda _: progress.update(task, advance=1, description=f"[cyan]{bar_desc} {i+1}/{len(time_list)}")) """
|
1049
|
+
for feature in as_completed(futures):
|
1050
|
+
_done_callback(feature, progress, task, len(time_list), counter_lock)
|
1051
|
+
else:
|
1052
|
+
# new_time_list = get_time_list(ymdh_time_s, ymdh_time_e, 3 * ftimes, "hour")
|
1053
|
+
new_time_list = _get_time_list(ymdh_time_s, ymdh_time_e, interval_hour * ftimes, "hour")
|
1054
|
+
total_num = len(new_time_list)
|
1055
|
+
if num_workers is None or num_workers <= 1:
|
1056
|
+
# 串行方式
|
1057
|
+
for i, time_str in enumerate(new_time_list):
|
1058
|
+
time_str_end_index = int(min(len(time_list) - 1, int(i * ftimes + ftimes - 1)))
|
1059
|
+
time_str_end = time_list[time_str_end_index]
|
1060
|
+
_prepare_url_to_download(var, lon_min, lon_max, lat_min, lat_max, time_str, time_str_end, depth, level, store_path, dataset_name, version_name, check)
|
1061
|
+
progress.update(task, advance=1, description=f"[cyan]{bar_desc} {i + 1}/{total_num}")
|
1062
|
+
else:
|
1063
|
+
# 并行方式
|
1064
|
+
with ThreadPoolExecutor(max_workers=num_workers) as executor:
|
1065
|
+
futures = [executor.submit(_download_task, var, new_time_list[i], time_list[int(min(len(time_list) - 1, int(i * ftimes + ftimes - 1)))], lon_min, lon_max, lat_min, lat_max, depth, level, store_path, dataset_name, version_name, check) for i in range(total_num)]
|
1066
|
+
""" for i, future in enumerate(futures):
|
1067
|
+
future.add_done_callback(lambda _: progress.update(task, advance=1, description=f"[cyan]{bar_desc} {i+1}/{total_num}")) """
|
1068
|
+
for feature in as_completed(futures):
|
1069
|
+
_done_callback(feature, progress, task, len(time_list), counter_lock)
|
1070
|
+
else:
|
1071
|
+
print("[bold red]Please ensure the time_s is no more than time_e")
|
1072
|
+
|
1073
|
+
|
1074
|
+
def download(var, time_s, time_e=None, lon_min=0, lon_max=359.92, lat_min=-80, lat_max=90, depth=None, level=None, store_path=None, dataset_name=None, version_name=None, num_workers=None, check=False, ftimes=1, idm_engine=None, fill_time=None, interval_hour=3):
|
1075
|
+
"""
|
1076
|
+
Description:
|
1077
|
+
Download the data of single time or a series of time
|
1078
|
+
|
1079
|
+
Parameters:
|
1080
|
+
var: str or list, the variable name, such as 'u', 'v', 'temp', 'salt', 'ssh', 'u_b', 'v_b', 'temp_b', 'salt_b' or 'water_u', 'water_v', 'water_temp', 'salinity', 'surf_el', 'water_u_bottom', 'water_v_bottom', 'water_temp_bottom', 'salinity_bottom'
|
1081
|
+
time_s: str, the start time, such as '2024110100' or '20241101', if add hour, the hour should be 00, 03, 06, 09, 12, 15, 18, 21
|
1082
|
+
time_e: str, the end time, such as '2024110221' or '20241102', if add hour, the hour should be 00, 03, 06, 09, 12, 15, 18, 21; default is None, if not set, the data of single time will be downloaded; or same as time_s, the data of single time will be downloaded
|
1083
|
+
lon_min: float, the minimum longitude, default is 0
|
1084
|
+
lon_max: float, the maximum longitude, default is 359.92
|
1085
|
+
lat_min: float, the minimum latitude, default is -80
|
1086
|
+
lat_max: float, the maximum latitude, default is 90
|
1087
|
+
depth: float, the depth, default is None, if you wanna get the data of single depth, you can set the depth, suggest to set the depth in [0, 5000]
|
1088
|
+
level: int, the level number, default is None, if you wanna get the data of single level, you can set the level, suggest to set the level in [1, 40]
|
1089
|
+
store_path: str, the path to store the data, default is None, if not set, the data will be stored in the current working directory
|
1090
|
+
dataset_name: str, the dataset name, default is None, example: 'GLBv0.08', 'GLBu0.08', 'GLBy0.08', if not set, the dataset will be chosen according to the download_time
|
1091
|
+
version_name: str, the version name, default is None, example: '53.X', '56.3', if not set, the version will be chosen according to the download_time
|
1092
|
+
num_workers: int, the number of workers, default is None, if not set, the number of workers will be 1; suggest not to set the number of workers too large
|
1093
|
+
check: bool, whether to check the existing file, default is False, if set to True, the existing file will be checked and not downloaded again; else, the existing file will be covered
|
1094
|
+
ftimes: int, the number of time in one file, default is 1, if set to 1, the data of single time will be downloaded; the maximum is 8, if set to 8, the data of 8 times will be downloaded in one file
|
1095
|
+
idm_engine: str, the IDM engine, default is None, if set, the IDM will be used to download the data; example: "D:\\Programs\\Internet Download Manager\\IDMan.exe"
|
1096
|
+
fill_time: bool or None, the mode to fill the time, default is None. None: only download the data; True: modify the real time of data to the time in the file name; False: check the time in the file name and the real time of data, if not match, delete the file
|
1097
|
+
interval_hour: int, the interval time to download the data, default is 3, if set, the interval time will be used to download the data; example: 3, 6, ...
|
1098
|
+
|
1099
|
+
Returns:
|
1100
|
+
None
|
1101
|
+
"""
|
1102
|
+
from oafuncs.oa_cmap import get as get_cmap
|
1103
|
+
from oafuncs.oa_tool import pbar
|
1104
|
+
|
1105
|
+
_get_initial_data()
|
1106
|
+
|
1107
|
+
# 打印信息并处理数据集和版本名称
|
1108
|
+
if dataset_name is None and version_name is None:
|
1109
|
+
print("The dataset_name and version_name are None, so the dataset and version will be chosen according to the download_time.\nIf there is more than one dataset and version in the time range, the first one will be chosen.")
|
1110
|
+
print("If you wanna choose the dataset and version by yourself, please set the dataset_name and version_name together.")
|
1111
|
+
elif dataset_name is None and version_name is not None:
|
1112
|
+
print("Please ensure the dataset_name is not None")
|
1113
|
+
print("If you do not add the dataset_name, both the dataset and version will be chosen according to the download_time.")
|
1114
|
+
elif dataset_name is not None and version_name is None:
|
1115
|
+
print("Please ensure the version_name is not None")
|
1116
|
+
print("If you do not add the version_name, both the dataset and version will be chosen according to the download_time.")
|
1117
|
+
else:
|
1118
|
+
print("The dataset_name and version_name are both set by yourself.")
|
1119
|
+
print("Please ensure the dataset_name and version_name are correct.")
|
1120
|
+
|
1121
|
+
if isinstance(var, list):
|
1122
|
+
if len(var) == 1:
|
1123
|
+
var = _convert_full_name_to_short_name(var[0])
|
1124
|
+
else:
|
1125
|
+
var = [_convert_full_name_to_short_name(v) for v in var]
|
1126
|
+
elif isinstance(var, str):
|
1127
|
+
var = _convert_full_name_to_short_name(var)
|
1128
|
+
else:
|
1129
|
+
raise ValueError("The var is invalid")
|
1130
|
+
if var is False:
|
1131
|
+
raise ValueError("The var is invalid")
|
1132
|
+
if lon_min < 0 or lon_min > 359.92 or lon_max < 0 or lon_max > 359.92 or lat_min < -80 or lat_min > 90 or lat_max < -80 or lat_max > 90:
|
1133
|
+
print("Please ensure the lon_min, lon_max, lat_min, lat_max are in the range")
|
1134
|
+
print("The range of lon_min, lon_max is 0~359.92")
|
1135
|
+
print("The range of lat_min, lat_max is -80~90")
|
1136
|
+
raise ValueError("The lon or lat is invalid")
|
1137
|
+
|
1138
|
+
if ftimes != 1:
|
1139
|
+
print("Please ensure the ftimes is in [1, 8]")
|
1140
|
+
ftimes = max(min(ftimes, 8), 1)
|
1141
|
+
|
1142
|
+
if store_path is None:
|
1143
|
+
store_path = str(Path.cwd())
|
1144
|
+
else:
|
1145
|
+
os.makedirs(str(store_path), exist_ok=True)
|
1146
|
+
|
1147
|
+
if num_workers is not None:
|
1148
|
+
num_workers = max(min(num_workers, 10), 1) # 暂时不限制最大值,再检查的时候可以多开一些线程
|
1149
|
+
# num_workers = int(max(num_workers, 1))
|
1150
|
+
time_s = str(time_s)
|
1151
|
+
if len(time_s) == 8:
|
1152
|
+
time_s += "00"
|
1153
|
+
if time_e is None:
|
1154
|
+
time_e = time_s[:]
|
1155
|
+
else:
|
1156
|
+
time_e = str(time_e)
|
1157
|
+
if len(time_e) == 8:
|
1158
|
+
time_e += "21"
|
1159
|
+
|
1160
|
+
global count_dict
|
1161
|
+
count_dict = {"success": 0, "fail": 0, "skip": 0, "no_data": 0, "total": 0, "no_data_list": []}
|
1162
|
+
|
1163
|
+
""" global current_platform
|
1164
|
+
current_platform = platform.system() """
|
1165
|
+
|
1166
|
+
global fsize_dict
|
1167
|
+
fsize_dict = {}
|
1168
|
+
|
1169
|
+
global fsize_dict_lock
|
1170
|
+
fsize_dict_lock = Lock()
|
1171
|
+
|
1172
|
+
if fill_time is not None:
|
1173
|
+
num_workers = 1
|
1174
|
+
|
1175
|
+
global use_idm, given_idm_engine, idm_download_list, bar_desc
|
1176
|
+
if idm_engine is not None:
|
1177
|
+
use_idm = True
|
1178
|
+
num_workers = 1
|
1179
|
+
given_idm_engine = idm_engine
|
1180
|
+
idm_download_list = []
|
1181
|
+
bar_desc = "Submitting to IDM ..."
|
1182
|
+
else:
|
1183
|
+
use_idm = False
|
1184
|
+
bar_desc = "Downloading ..."
|
1185
|
+
|
1186
|
+
global match_time
|
1187
|
+
match_time = fill_time
|
1188
|
+
|
1189
|
+
global mark_len
|
1190
|
+
mark_len = 100
|
1191
|
+
|
1192
|
+
_download_hourly_func(var, time_s, time_e, lon_min, lon_max, lat_min, lat_max, depth, level, store_path, dataset_name, version_name, num_workers, check, ftimes, int(interval_hour))
|
1193
|
+
|
1194
|
+
if idm_engine is not None:
|
1195
|
+
print("[bold #ecdbfe]*" * mark_len)
|
1196
|
+
str_info = "All files have been submitted to IDM for downloading"
|
1197
|
+
str_info = str_info.center(mark_len, "*")
|
1198
|
+
print(f"[bold #3dfc40]{str_info}")
|
1199
|
+
print("[bold #ecdbfe]*" * mark_len)
|
1200
|
+
if idm_download_list:
|
1201
|
+
""" file_download_time = 60 # 预设下载时间为1分钟
|
1202
|
+
for f in pbar(idm_download_list,cmap='bwr',description='HYCOM: '):
|
1203
|
+
file_download_start_time = time.time()
|
1204
|
+
wait_success = 0
|
1205
|
+
success = False
|
1206
|
+
while not success:
|
1207
|
+
if check_nc(f,print_switch=False):
|
1208
|
+
count_dict["success"] += 1
|
1209
|
+
success = True
|
1210
|
+
# print(f"[bold #3dfc40]File [bold #dfff73]{f} [#3dfc40]has been downloaded successfully")
|
1211
|
+
file_download_end_time = time.time()
|
1212
|
+
file_download_time = file_download_end_time - file_download_start_time
|
1213
|
+
file_download_time = int(file_download_time)
|
1214
|
+
# print(f"[bold #3dfc40]Time: {file_download_time} seconds")
|
1215
|
+
file_download_time = max(60, file_download_time) # 预设下载时间为1分钟起步
|
1216
|
+
else:
|
1217
|
+
wait_success += 1
|
1218
|
+
# print(f"[bold #ffe5c0]Waiting {file_download_time} seconds to check the file {f}...")
|
1219
|
+
time.sleep(file_download_time)
|
1220
|
+
if wait_success >= 10:
|
1221
|
+
success = True
|
1222
|
+
# print(f'{f} download failed')
|
1223
|
+
print(f"[bold #ffe5c0]Waiting for more than 10 times, skipping the file {f}...")
|
1224
|
+
count_dict["fail"] += 1
|
1225
|
+
# print("[bold #ecdbfe]-" * mark_len) """
|
1226
|
+
remain_list = idm_download_list.copy()
|
1227
|
+
for f_count in pbar(range(len(idm_download_list)), cmap=get_cmap("diverging_1"), description="HYCOM: "):
|
1228
|
+
success = False
|
1229
|
+
while not success:
|
1230
|
+
for f in remain_list:
|
1231
|
+
if check_nc(f, print_switch=False):
|
1232
|
+
count_dict["success"] += 1
|
1233
|
+
success = True
|
1234
|
+
remain_list.remove(f)
|
1235
|
+
break
|
1236
|
+
|
1237
|
+
count_dict["total"] = count_dict["success"] + count_dict["fail"] + count_dict["skip"] + count_dict["no_data"]
|
1238
|
+
print("[bold #ecdbfe]=" * mark_len)
|
1239
|
+
print(f"[bold #ff80ab]Total: {count_dict['total']}\nSuccess: {count_dict['success']}\nFail: {count_dict['fail']}\nSkip: {count_dict['skip']}\nNo data: {count_dict['no_data']}")
|
1240
|
+
print("[bold #ecdbfe]=" * mark_len)
|
1241
|
+
if count_dict["fail"] > 0:
|
1242
|
+
print("[bold #be5528]Please try again to download the failed data later")
|
1243
|
+
if count_dict["no_data"] > 0:
|
1244
|
+
if count_dict["no_data"] == 1:
|
1245
|
+
print(f"[bold #f90000]There is {count_dict['no_data']} data that does not exist in any dataset and version")
|
1246
|
+
else:
|
1247
|
+
print(f"[bold #f90000]These are {count_dict['no_data']} data that do not exist in any dataset and version")
|
1248
|
+
for no_data in count_dict["no_data_list"]:
|
1249
|
+
print(f"[bold #d81b60]{no_data}")
|
1250
|
+
print("[bold #ecdbfe]=" * mark_len)
|
1251
|
+
|
1252
|
+
|
1253
|
+
if __name__ == "__main__":
|
1254
|
+
download_dict = {
|
1255
|
+
"water_u": {"simple_name": "u", "download": 1},
|
1256
|
+
"water_v": {"simple_name": "v", "download": 1},
|
1257
|
+
"surf_el": {"simple_name": "ssh", "download": 1},
|
1258
|
+
"water_temp": {"simple_name": "temp", "download": 1},
|
1259
|
+
"salinity": {"simple_name": "salt", "download": 1},
|
1260
|
+
"water_u_bottom": {"simple_name": "u_b", "download": 0},
|
1261
|
+
"water_v_bottom": {"simple_name": "v_b", "download": 0},
|
1262
|
+
"water_temp_bottom": {"simple_name": "temp_b", "download": 0},
|
1263
|
+
"salinity_bottom": {"simple_name": "salt_b", "download": 0},
|
1264
|
+
}
|
1265
|
+
|
1266
|
+
var_list = [var_name for var_name in download_dict.keys() if download_dict[var_name]["download"]]
|
1267
|
+
|
1268
|
+
single_var = False
|
1269
|
+
|
1270
|
+
# draw_time_range(pic_save_folder=r'I:\Delete')
|
1271
|
+
|
1272
|
+
options = {
|
1273
|
+
"var": var_list,
|
1274
|
+
"time_s": "2025010300",
|
1275
|
+
"time_e": "2025010321",
|
1276
|
+
"store_path": r"I:\Data\HYCOM\3hourly",
|
1277
|
+
"lon_min": 105,
|
1278
|
+
"lon_max": 130,
|
1279
|
+
"lat_min": 15,
|
1280
|
+
"lat_max": 45,
|
1281
|
+
"num_workers": 3,
|
1282
|
+
"check": True,
|
1283
|
+
"depth": None, # or 0-5000 meters
|
1284
|
+
"level": None, # or 1-40 levels
|
1285
|
+
"ftimes": 1,
|
1286
|
+
# "idm_engine": r"D:\Programs\Internet Download Manager\IDMan.exe", # 查漏补缺不建议开启
|
1287
|
+
"fill_time": None,
|
1288
|
+
}
|
1289
|
+
|
1290
|
+
if single_var:
|
1291
|
+
for var_name in var_list:
|
1292
|
+
options["var"] = var_name
|
1293
|
+
download(**options)
|
1294
|
+
else:
|
1295
|
+
download(**options)
|