oafuncs 0.0.79__tar.gz → 0.0.81__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {oafuncs-0.0.79/oafuncs.egg-info → oafuncs-0.0.81}/PKG-INFO +1 -2
- oafuncs-0.0.81/oafuncs/__init__.py +44 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs/oa_cmap.py +31 -52
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs/oa_down/hycom_3hourly.py +68 -25
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs/oa_nc.py +120 -10
- {oafuncs-0.0.79 → oafuncs-0.0.81/oafuncs.egg-info}/PKG-INFO +1 -2
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs.egg-info/SOURCES.txt +1 -1
- {oafuncs-0.0.79 → oafuncs-0.0.81}/setup.py +2 -3
- oafuncs-0.0.79/oafuncs/__init__.py +0 -26
- {oafuncs-0.0.79 → oafuncs-0.0.81}/LICENSE.txt +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/MANIFEST.in +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/README.md +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs/oa_data.py +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs/oa_down/User_Agent-list.txt +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs/oa_down/__init__.py +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs/oa_down/literature.py +0 -0
- /oafuncs-0.0.79/oafuncs/oa_down/test.py → /oafuncs-0.0.81/oafuncs/oa_down/test_ua.py +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs/oa_draw.py +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs/oa_file.py +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs/oa_help.py +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs/oa_python.py +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs/oa_sign/__init__.py +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs/oa_sign/meteorological.py +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs/oa_sign/ocean.py +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs/oa_sign/scientific.py +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs/oa_tool/__init__.py +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs/oa_tool/email.py +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs.egg-info/dependency_links.txt +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs.egg-info/requires.txt +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/oafuncs.egg-info/top_level.txt +0 -0
- {oafuncs-0.0.79 → oafuncs-0.0.81}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: oafuncs
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.81
|
4
4
|
Summary: My short description for my project.
|
5
5
|
Home-page: https://github.com/Industry-Pays/OAFuncs
|
6
6
|
Author: Kun Liu
|
@@ -13,7 +13,6 @@ Classifier: Programming Language :: Python :: 3.9
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.10
|
14
14
|
Classifier: Programming Language :: Python :: 3.11
|
15
15
|
Classifier: Programming Language :: Python :: 3.12
|
16
|
-
Classifier: Programming Language :: Python :: 3.13
|
17
16
|
Classifier: Programming Language :: Python :: Implementation :: CPython
|
18
17
|
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
19
18
|
Requires-Python: >=3.9.0
|
@@ -0,0 +1,44 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# coding=utf-8
|
3
|
+
"""
|
4
|
+
Author: Liu Kun && 16031215@qq.com
|
5
|
+
Date: 2024-09-17 16:09:20
|
6
|
+
LastEditors: Liu Kun && 16031215@qq.com
|
7
|
+
LastEditTime: 2024-12-13 12:31:06
|
8
|
+
FilePath: \\Python\\My_Funcs\\OAFuncs\\oafuncs\\oa_s\\__init__.py
|
9
|
+
Description:
|
10
|
+
EditPlatform: vscode
|
11
|
+
ComputerInfo: XPS 15 9510
|
12
|
+
SystemInfo: Windows 11
|
13
|
+
Python Version: 3.12
|
14
|
+
"""
|
15
|
+
|
16
|
+
|
17
|
+
# 会导致OAFuncs直接导入所有函数,不符合模块化设计
|
18
|
+
# from oafuncs.oa_s.oa_cmap import *
|
19
|
+
# from oafuncs.oa_s.oa_data import *
|
20
|
+
# from oafuncs.oa_s.oa_draw import *
|
21
|
+
# from oafuncs.oa_s.oa_file import *
|
22
|
+
# from oafuncs.oa_s.oa_help import *
|
23
|
+
# from oafuncs.oa_s.oa_nc import *
|
24
|
+
# from oafuncs.oa_s.oa_python import *
|
25
|
+
|
26
|
+
# ------------------- 2024-12-13 12:31:06 -------------------
|
27
|
+
# path: My_Funcs/OAFuncs/oafuncs/
|
28
|
+
from .oa_cmap import *
|
29
|
+
from .oa_data import *
|
30
|
+
from .oa_draw import *
|
31
|
+
from .oa_file import *
|
32
|
+
from .oa_help import *
|
33
|
+
from .oa_nc import *
|
34
|
+
from .oa_python import *
|
35
|
+
# ------------------- 2024-12-13 12:31:06 -------------------
|
36
|
+
# path: My_Funcs/OAFuncs/oafuncs/oa_down/
|
37
|
+
from .oa_down import *
|
38
|
+
# ------------------- 2024-12-13 12:31:06 -------------------
|
39
|
+
# path: My_Funcs/OAFuncs/oafuncs/oa_sign/
|
40
|
+
from .oa_sign import *
|
41
|
+
# ------------------- 2024-12-13 12:31:06 -------------------
|
42
|
+
# path: My_Funcs/OAFuncs/oafuncs/oa_tool/
|
43
|
+
from .oa_tool import *
|
44
|
+
# ------------------- 2024-12-13 12:31:06 -------------------
|
@@ -17,11 +17,9 @@ import matplotlib as mpl
|
|
17
17
|
import matplotlib.pyplot as plt
|
18
18
|
import numpy as np
|
19
19
|
|
20
|
-
__all__ = ["show", "
|
20
|
+
__all__ = ["show", "cmap2colors", "create_cmap", "create_cmap_rgbtxt", "choose_cmap"]
|
21
21
|
|
22
22
|
# ** 将cmap用填色图可视化(官网摘抄函数)
|
23
|
-
|
24
|
-
|
25
23
|
def show(colormaps: list):
|
26
24
|
"""
|
27
25
|
Helper function to plot data with associated colormap.
|
@@ -40,28 +38,28 @@ def show(colormaps: list):
|
|
40
38
|
|
41
39
|
|
42
40
|
# ** 将cmap转为list,即多个颜色的列表
|
43
|
-
def
|
41
|
+
def cmap2colors(cmap, n=256):
|
44
42
|
"""
|
45
43
|
cmap : cmap名称
|
46
44
|
n : 提取颜色数量
|
47
45
|
return : 提取的颜色列表
|
48
|
-
example :
|
46
|
+
example : out_colors = cmap2colors('viridis', 256)
|
49
47
|
"""
|
50
48
|
c_map = mpl.colormaps.get_cmap(cmap)
|
51
|
-
|
52
|
-
return
|
49
|
+
out_colors = [c_map(i) for i in np.linspace(0, 1, n)]
|
50
|
+
return out_colors
|
53
51
|
|
54
52
|
|
55
53
|
# ** 自制cmap,多色,可带位置
|
56
|
-
def
|
54
|
+
def create_cmap(colors: list, nodes=None, under=None, over=None): # 利用颜色快速配色
|
57
55
|
"""
|
58
56
|
func : 自制cmap,自动确定颜色位置(等比例)
|
59
57
|
description : colors可以是颜色名称,也可以是十六进制颜色代码
|
60
58
|
param {*} colors 颜色
|
61
59
|
param {*} nodes 颜色位置,默认不提供,等间距
|
62
|
-
return {*}
|
63
|
-
example :
|
64
|
-
|
60
|
+
return {*} cmap
|
61
|
+
example : cmap = create_cmap(['#C2B7F3','#B3BBF2','#B0CBF1','#ACDCF0','#A8EEED'])
|
62
|
+
cmap = create_cmap(['aliceblue','skyblue','deepskyblue'],[0.0,0.5,1.0])
|
65
63
|
"""
|
66
64
|
if nodes is None: # 采取自动分配比例
|
67
65
|
cmap_color = mpl.colors.LinearSegmentedColormap.from_list("mycmap", colors)
|
@@ -74,46 +72,27 @@ def create_custom(colors: list, nodes=None, under=None, over=None): # 利用颜
|
|
74
72
|
return cmap_color
|
75
73
|
|
76
74
|
|
77
|
-
# ** 自制diverging型cmap,默认中间为白色
|
78
|
-
|
79
|
-
|
80
|
-
def create_diverging(colors: list):
|
81
|
-
"""
|
82
|
-
func : 自制cmap,双色,中间默认为白色;如果输入偶数个颜色,则中间为白,如果奇数个颜色,则中间色为中间色
|
83
|
-
description : colors可以是颜色名称,也可以是十六进制颜色代码
|
84
|
-
param {*} colors
|
85
|
-
return {*}
|
86
|
-
example : diverging_cmap = mk_cmap_diverging(["#00c0ff", "#a1d3ff", "#DCDCDC", "#FFD39B", "#FF8247"])
|
87
|
-
"""
|
88
|
-
# 自定义颜色位置
|
89
|
-
n = len(colors)
|
90
|
-
nodes = np.linspace(0.0, 1.0, n + 1 if n % 2 == 0 else n)
|
91
|
-
newcolors = colors
|
92
|
-
if n % 2 == 0:
|
93
|
-
newcolors.insert(int(n / 2), "#ffffff") # 偶数个颜色,中间为白色
|
94
|
-
cmap_color = mpl.colors.LinearSegmentedColormap.from_list("mycmap", list(zip(nodes, newcolors)))
|
95
|
-
return cmap_color
|
96
|
-
|
97
|
-
|
98
75
|
# ** 根据RGB的txt文档制作色卡(利用Grads调色盘)
|
99
|
-
|
100
|
-
|
101
|
-
def create_5rgb_txt(rgb_txt_filepath: str): # 根据RGB的txt文档制作色卡/根据rgb值制作
|
76
|
+
def create_cmap_rgbtxt(rgbtxt_file,split_mark=','): # 根据RGB的txt文档制作色卡/根据rgb值制作
|
102
77
|
"""
|
103
78
|
func : 根据RGB的txt文档制作色卡
|
104
|
-
description :
|
105
|
-
param {*}
|
79
|
+
description : rgbtxt_file='E:/python/colorbar/test.txt'
|
80
|
+
param {*} rgbtxt_file txt文件路径
|
106
81
|
return {*} camp
|
107
|
-
example :
|
82
|
+
example : cmap=create_cmap_rgbtxt(path,split_mark=',') #
|
83
|
+
|
84
|
+
txt example : 251,251,253
|
85
|
+
225,125,25
|
86
|
+
250,205,255
|
108
87
|
"""
|
109
|
-
with open(
|
88
|
+
with open(rgbtxt_file) as fid:
|
110
89
|
data = fid.readlines()
|
111
90
|
n = len(data)
|
112
91
|
rgb = np.zeros((n, 3))
|
113
92
|
for i in np.arange(n):
|
114
|
-
rgb[i][0] = data[i].split(
|
115
|
-
rgb[i][1] = data[i].split(
|
116
|
-
rgb[i][2] = data[i].split(
|
93
|
+
rgb[i][0] = data[i].split(split_mark)[0]
|
94
|
+
rgb[i][1] = data[i].split(split_mark)[1]
|
95
|
+
rgb[i][2] = data[i].split(split_mark)[2]
|
117
96
|
max_rgb = np.max(rgb)
|
118
97
|
if max_rgb > 2: # 如果rgb值大于2,则认为是0-255的值,需要归一化
|
119
98
|
rgb = rgb / 255.0
|
@@ -121,7 +100,7 @@ def create_5rgb_txt(rgb_txt_filepath: str): # 根据RGB的txt文档制作色卡
|
|
121
100
|
return icmap
|
122
101
|
|
123
102
|
|
124
|
-
def
|
103
|
+
def choose_cmap(cmap_name=None, query=False):
|
125
104
|
"""
|
126
105
|
description: Choosing a colormap from the list of available colormaps or a custom colormap
|
127
106
|
param {*} cmap_name:
|
@@ -130,9 +109,9 @@ def my_cmap(cmap_name=None, query=False):
|
|
130
109
|
"""
|
131
110
|
|
132
111
|
my_cmap_dict = {
|
133
|
-
"diverging_1":
|
134
|
-
"cold_1":
|
135
|
-
"warm_1":
|
112
|
+
"diverging_1": create_cmap(["#4e00b3", "#0000FF", "#00c0ff", "#a1d3ff", "#DCDCDC", "#FFD39B", "#FF8247", "#FF0000", "#FF5F9E"]),
|
113
|
+
"cold_1": create_cmap(["#4e00b3", "#0000FF", "#00c0ff", "#a1d3ff", "#DCDCDC"]),
|
114
|
+
"warm_1": create_cmap(["#DCDCDC", "#FFD39B", "#FF8247", "#FF0000", "#FF5F9E"]),
|
136
115
|
# "land_1": create_custom(["#3E6436", "#678A59", "#91A176", "#B8A87D", "#D9CBB2"], under="#A6CEE3", over="#FFFFFF"), # 陆地颜色从深绿到浅棕,表示从植被到沙地的递减
|
137
116
|
# "ocean_1": create_custom(["#126697", "#2D88B3", "#4EA1C9", "#78B9D8", "#A6CEE3"], under="#8470FF", over="#3E6436"), # 海洋颜色从深蓝到浅蓝,表示从深海到浅海的递减
|
138
117
|
# "ocean_land_1": create_custom(
|
@@ -150,7 +129,7 @@ def my_cmap(cmap_name=None, query=False):
|
|
150
129
|
# "#3E6436", # 深绿(高山)
|
151
130
|
# ]
|
152
131
|
# ),
|
153
|
-
"colorful_1":
|
132
|
+
"colorful_1": create_cmap(["#6d00db", "#9800cb", "#F2003C", "#ff4500", "#ff7f00", "#FE28A2", "#FFC0CB", "#DDA0DD", "#40E0D0", "#1a66f2", "#00f7fb", "#8fff88", "#E3FF00"]),
|
154
133
|
}
|
155
134
|
if query:
|
156
135
|
for key, _ in my_cmap_dict.items():
|
@@ -160,7 +139,7 @@ def my_cmap(cmap_name=None, query=False):
|
|
160
139
|
return my_cmap_dict[cmap_name]
|
161
140
|
else:
|
162
141
|
try:
|
163
|
-
return mpl.
|
142
|
+
return mpl.colormaps.get_cmap(cmap_name)
|
164
143
|
except ValueError:
|
165
144
|
raise ValueError(f"Unknown cmap name: {cmap_name}")
|
166
145
|
|
@@ -169,16 +148,16 @@ if __name__ == "__main__":
|
|
169
148
|
# ** 测试自制cmap
|
170
149
|
colors = ["#C2B7F3", "#B3BBF2", "#B0CBF1", "#ACDCF0", "#A8EEED"]
|
171
150
|
nodes = [0.0, 0.2, 0.4, 0.6, 1.0]
|
172
|
-
c_map =
|
151
|
+
c_map = create_cmap(colors, nodes)
|
173
152
|
show([c_map])
|
174
153
|
|
175
154
|
# ** 测试自制diverging型cmap
|
176
|
-
diverging_cmap =
|
155
|
+
diverging_cmap = create_cmap(["#4e00b3", "#0000FF", "#00c0ff", "#a1d3ff", "#DCDCDC", "#FFD39B", "#FF8247", "#FF0000", "#FF5F9E"])
|
177
156
|
show([diverging_cmap])
|
178
157
|
|
179
158
|
# ** 测试根据RGB的txt文档制作色卡
|
180
159
|
file_path = "E:/python/colorbar/test.txt"
|
181
|
-
|
160
|
+
cmap_rgb = create_cmap_rgbtxt(file_path)
|
182
161
|
|
183
162
|
# ** 测试将cmap转为list
|
184
|
-
|
163
|
+
out_colors = cmap2colors("viridis", 256)
|
@@ -55,17 +55,17 @@ data_info["hourly"]["dataset"]["GLBy0.08"]["version"] = {"93.0": {}}
|
|
55
55
|
# 在网页上提交超过范围的时间,会返回该数据集实际时间范围,从而纠正下面的时间范围
|
56
56
|
# 目前只纠正了GLBv0.08 93.0的时间范围,具体到小时了
|
57
57
|
# 其他数据集的时刻暂时默认为00起,21止
|
58
|
-
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["53.X"]["time_range"] = {"time_start": "
|
59
|
-
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["56.3"]["time_range"] = {"time_start": "
|
60
|
-
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["57.2"]["time_range"] = {"time_start": "
|
61
|
-
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["92.8"]["time_range"] = {"time_start": "
|
62
|
-
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["57.7"]["time_range"] = {"time_start": "
|
63
|
-
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["92.9"]["time_range"] = {"time_start": "
|
58
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["53.X"]["time_range"] = {"time_start": "1994010112", "time_end": "2015123109"}
|
59
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["56.3"]["time_range"] = {"time_start": "2014070112", "time_end": "2016093009"}
|
60
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["57.2"]["time_range"] = {"time_start": "2016050112", "time_end": "2017020109"}
|
61
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["92.8"]["time_range"] = {"time_start": "2017020112", "time_end": "2017060109"}
|
62
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["57.7"]["time_range"] = {"time_start": "2017060112", "time_end": "2017100109"}
|
63
|
+
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["92.9"]["time_range"] = {"time_start": "2017100112", "time_end": "2018032009"}
|
64
64
|
data_info["hourly"]["dataset"]["GLBv0.08"]["version"]["93.0"]["time_range"] = {"time_start": "2018010112", "time_end": "2020021909"}
|
65
65
|
# GLBu0.08
|
66
|
-
data_info["hourly"]["dataset"]["GLBu0.08"]["version"]["93.0"]["time_range"] = {"time_start": "
|
66
|
+
data_info["hourly"]["dataset"]["GLBu0.08"]["version"]["93.0"]["time_range"] = {"time_start": "2018091912", "time_end": "2018120909"}
|
67
67
|
# GLBy0.08
|
68
|
-
data_info["hourly"]["dataset"]["GLBy0.08"]["version"]["93.0"]["time_range"] = {"time_start": "
|
68
|
+
data_info["hourly"]["dataset"]["GLBy0.08"]["version"]["93.0"]["time_range"] = {"time_start": "2018120412", "time_end": "20300904"}
|
69
69
|
|
70
70
|
# variable
|
71
71
|
variable_info = {
|
@@ -141,10 +141,11 @@ data_info["hourly"]["dataset"]["GLBu0.08"]["version"]["93.0"]["url"] = url_930_u
|
|
141
141
|
uv3z_930_y = {}
|
142
142
|
ts3z_930_y = {}
|
143
143
|
ssh_930_y = {}
|
144
|
-
for y_930_y in range(2018,
|
144
|
+
for y_930_y in range(2018, 2030):
|
145
145
|
uv3z_930_y[str(y_930_y)] = rf"https://ncss.hycom.org/thredds/ncss/GLBy0.08/expt_93.0/uv3z/{y_930_y}?"
|
146
146
|
ts3z_930_y[str(y_930_y)] = rf"https://ncss.hycom.org/thredds/ncss/GLBy0.08/expt_93.0/ts3z/{y_930_y}?"
|
147
147
|
ssh_930_y[str(y_930_y)] = rf"https://ncss.hycom.org/thredds/ncss/GLBy0.08/expt_93.0/ssh/{y_930_y}?"
|
148
|
+
# GLBy0.08 93.0 data time range in each year: year-01-01 12:00 to year+1-01-01 09:00
|
148
149
|
url_930_y = {
|
149
150
|
"uv3z": uv3z_930_y,
|
150
151
|
"ts3z": ts3z_930_y,
|
@@ -372,7 +373,16 @@ def check_time_in_dataset_and_version(time_input, time_end=None):
|
|
372
373
|
if have_data:
|
373
374
|
for d, v, trange in zip(d_list, v_list, trange_list):
|
374
375
|
print(f"[bold blue]{d} {v} {trange}")
|
375
|
-
|
376
|
+
if is_single_time:
|
377
|
+
return True
|
378
|
+
else:
|
379
|
+
base_url_s = get_base_url(d_list[0], v_list[0], "u", str(time_start))
|
380
|
+
base_url_e = get_base_url(d_list[0], v_list[0], "u", str(time_end))
|
381
|
+
if base_url_s == base_url_e:
|
382
|
+
return True
|
383
|
+
else:
|
384
|
+
print(f"[bold red]{time_start} to {time_end} is in different datasets or versions, so you can't download them together")
|
385
|
+
return False
|
376
386
|
else:
|
377
387
|
print(f"[bold red]{time_input_str} is not in any dataset and version")
|
378
388
|
return False
|
@@ -456,7 +466,8 @@ def direct_choose_dataset_and_version(time_input, time_end=None):
|
|
456
466
|
return dataset_name_out, version_name_out
|
457
467
|
|
458
468
|
|
459
|
-
def get_base_url(dataset_name, version_name, var,
|
469
|
+
def get_base_url(dataset_name, version_name, var, ymdh_str):
|
470
|
+
year_str = int(ymdh_str[:4])
|
460
471
|
url_dict = data_info["hourly"]["dataset"][dataset_name]["version"][version_name]["url"]
|
461
472
|
classification_method = data_info["hourly"]["dataset"][dataset_name]["version"][version_name]["classification"]
|
462
473
|
if classification_method == "year_different":
|
@@ -472,6 +483,12 @@ def get_base_url(dataset_name, version_name, var, year_str):
|
|
472
483
|
if base_url is None:
|
473
484
|
print("Please ensure the var is in [u,v,temp,salt,ssh,u_b,v_b,temp_b,salt_b]")
|
474
485
|
elif classification_method == "var_year_different":
|
486
|
+
if dataset_name == "GLBy0.08" and version_name == "93.0":
|
487
|
+
mdh_str = ymdh_str[4:]
|
488
|
+
# GLBy0.08 93.0
|
489
|
+
# data time range in each year: year-01-01 12:00 to year+1-01-01 09:00
|
490
|
+
if mdh_str <= "010109":
|
491
|
+
year_str = int(ymdh_str[:4]) - 1
|
475
492
|
base_url = None
|
476
493
|
for key, value in var_group.items():
|
477
494
|
if var in value:
|
@@ -482,8 +499,8 @@ def get_base_url(dataset_name, version_name, var, year_str):
|
|
482
499
|
return base_url
|
483
500
|
|
484
501
|
|
485
|
-
def get_submit_url(dataset_name, version_name, var,
|
486
|
-
base_url = get_base_url(dataset_name, version_name, var,
|
502
|
+
def get_submit_url(dataset_name, version_name, var, ymdh_str, query_dict):
|
503
|
+
base_url = get_base_url(dataset_name, version_name, var, ymdh_str)
|
487
504
|
if isinstance(query_dict["var"], str):
|
488
505
|
query_dict["var"] = [query_dict["var"]]
|
489
506
|
target_url = base_url + "&".join(f"var={var}" for var in query_dict["var"]) + "&" + "&".join(f"{key}={value}" for key, value in query_dict.items() if key != "var")
|
@@ -496,10 +513,37 @@ def clear_existing_file(file_full_path):
|
|
496
513
|
print(f"{file_full_path} has been removed")
|
497
514
|
|
498
515
|
|
516
|
+
def _get_file_size(file_path, unit="KB"):
|
517
|
+
# 检查文件是否存在
|
518
|
+
if not os.path.exists(file_path):
|
519
|
+
return "文件不存在"
|
520
|
+
|
521
|
+
# 获取文件大小(字节)
|
522
|
+
file_size = os.path.getsize(file_path)
|
523
|
+
|
524
|
+
# 单位转换字典
|
525
|
+
unit_dict = {"PB": 1024**5, "TB": 1024**4, "GB": 1024**3, "MB": 1024**2, "KB": 1024}
|
526
|
+
|
527
|
+
# 检查传入的单位是否合法
|
528
|
+
if unit not in unit_dict:
|
529
|
+
return "单位不合法,请选择PB、TB、GB、MB、KB中的一个"
|
530
|
+
|
531
|
+
# 转换文件大小到指定单位
|
532
|
+
converted_size = file_size / unit_dict[unit]
|
533
|
+
|
534
|
+
return converted_size
|
535
|
+
|
536
|
+
|
499
537
|
def check_existing_file(file_full_path):
|
500
538
|
if os.path.exists(file_full_path):
|
501
539
|
print(f"[bold #FFA54F]{file_full_path} exists")
|
502
|
-
|
540
|
+
fsize = _get_file_size(file_full_path)
|
541
|
+
if fsize < 5:
|
542
|
+
print(f"[bold #FFA54F]{file_full_path} may be incomplete\nFile size: {fsize:.2f} KB")
|
543
|
+
# clear_existing_file(file_full_path)
|
544
|
+
return False
|
545
|
+
else:
|
546
|
+
return True
|
503
547
|
else:
|
504
548
|
# print(f'{file_full_path} does not exist')
|
505
549
|
return False
|
@@ -567,11 +611,12 @@ def scrape_and_categorize_proxies(choose_protocol="http"):
|
|
567
611
|
|
568
612
|
return proxies_list
|
569
613
|
|
614
|
+
|
570
615
|
def get_proxy():
|
571
616
|
ip_list = scrape_and_categorize_proxies(choose_protocol="http")
|
572
617
|
choose_ip = random.choice(ip_list)
|
573
618
|
proxies = {"http": f"http://{choose_ip}", "https": f"http://{choose_ip}"}
|
574
|
-
print(f
|
619
|
+
print(f"Using proxy: {proxies}")
|
575
620
|
return proxies
|
576
621
|
|
577
622
|
|
@@ -643,7 +688,7 @@ def download_file(target_url, store_path, file_name, check=False):
|
|
643
688
|
# 保存文件
|
644
689
|
with open(filename, 'wb') as f:
|
645
690
|
f.write(response.content) """
|
646
|
-
|
691
|
+
|
647
692
|
if find_proxy:
|
648
693
|
proxies = get_proxy()
|
649
694
|
response = s.get(target_url, headers=headers, proxies=proxies, stream=True, timeout=random.randint(5, max_timeout))
|
@@ -726,7 +771,8 @@ def check_dataset_version(dataset_name, version_name, download_time, download_ti
|
|
726
771
|
|
727
772
|
|
728
773
|
def get_submit_url_var(var, depth, level_num, lon_min, lon_max, lat_min, lat_max, dataset_name, version_name, download_time, download_time_end=None):
|
729
|
-
year_str = str(download_time)[:4]
|
774
|
+
# year_str = str(download_time)[:4]
|
775
|
+
ymdh_str = str(download_time)
|
730
776
|
if depth is not None and level_num is not None:
|
731
777
|
print("Please ensure the depth or level_num is None")
|
732
778
|
print("Progress will use the depth")
|
@@ -738,10 +784,10 @@ def get_submit_url_var(var, depth, level_num, lon_min, lon_max, lat_min, lat_max
|
|
738
784
|
print(f"Data of single level ({level_num}) will be downloaded...")
|
739
785
|
which_mode = "level"
|
740
786
|
else:
|
741
|
-
print("Full depth or full level data will be downloaded...")
|
787
|
+
# print("Full depth or full level data will be downloaded...")
|
742
788
|
which_mode = "full"
|
743
789
|
query_dict = get_query_dict(var, lon_min, lon_max, lat_min, lat_max, download_time, download_time_end, which_mode, depth, level_num)
|
744
|
-
submit_url = get_submit_url(dataset_name, version_name, var,
|
790
|
+
submit_url = get_submit_url(dataset_name, version_name, var, ymdh_str, query_dict)
|
745
791
|
return submit_url
|
746
792
|
|
747
793
|
|
@@ -992,7 +1038,7 @@ def download(var, time_s, time_e=None, lon_min=0, lon_max=359.92, lat_min=-80, l
|
|
992
1038
|
|
993
1039
|
""" global current_platform
|
994
1040
|
current_platform = platform.system() """
|
995
|
-
|
1041
|
+
|
996
1042
|
global find_proxy
|
997
1043
|
find_proxy = False
|
998
1044
|
|
@@ -1065,7 +1111,7 @@ def how_to_use():
|
|
1065
1111
|
|
1066
1112
|
if __name__ == "__main__":
|
1067
1113
|
# help(hycom3h.download)
|
1068
|
-
time_s, time_e = "
|
1114
|
+
time_s, time_e = "2023010100", "2023123121"
|
1069
1115
|
merge_name = f"{time_s}_{time_e}" # 合并后的文件名
|
1070
1116
|
root_path = r"G:\Data\HYCOM\3hourly"
|
1071
1117
|
location_dict = {"west": 105, "east": 130, "south": 15, "north": 45}
|
@@ -1081,10 +1127,7 @@ if __name__ == "__main__":
|
|
1081
1127
|
"salinity_bottom": {"simple_name": "salt_b", "download": 0},
|
1082
1128
|
}
|
1083
1129
|
|
1084
|
-
var_list = []
|
1085
|
-
for var_name in download_dict.keys():
|
1086
|
-
if download_dict[var_name]["download"] == 1:
|
1087
|
-
var_list.append(var_name)
|
1130
|
+
var_list = [var_name for var_name in download_dict.keys() if download_dict[var_name]["download"]]
|
1088
1131
|
|
1089
1132
|
# set depth or level, only one can be True
|
1090
1133
|
# if you wanna download all depth or level, set both False
|
@@ -19,7 +19,7 @@ import netCDF4 as nc
|
|
19
19
|
import numpy as np
|
20
20
|
import xarray as xr
|
21
21
|
|
22
|
-
__all__ = ["get_var", "extract5nc", "write2nc", "merge5nc", "modify_var_value", "modify_var_attr", "rename_var_or_dim", "check_ncfile"]
|
22
|
+
__all__ = ["get_var", "extract5nc", "write2nc", "merge5nc", "modify_var_value", "modify_var_attr", "rename_var_or_dim", "check_ncfile", "longitude_change", "nc_isel"]
|
23
23
|
|
24
24
|
|
25
25
|
def get_var(file, *vars):
|
@@ -38,7 +38,7 @@ def get_var(file, *vars):
|
|
38
38
|
return datas
|
39
39
|
|
40
40
|
|
41
|
-
def extract5nc(file, varname):
|
41
|
+
def extract5nc(file, varname, only_value=True):
|
42
42
|
"""
|
43
43
|
描述:
|
44
44
|
1、提取nc文件中的变量
|
@@ -47,16 +47,22 @@ def extract5nc(file, varname):
|
|
47
47
|
参数:
|
48
48
|
file: 文件路径
|
49
49
|
varname: 变量名
|
50
|
+
only_value: 变量和维度是否只保留数值
|
50
51
|
example: data, dimdict = extract5nc(file_ecm, 'h')
|
51
52
|
"""
|
52
53
|
ds = xr.open_dataset(file)
|
53
54
|
vardata = ds[varname]
|
55
|
+
ds.close()
|
54
56
|
dims = vardata.dims
|
55
57
|
dimdict = {}
|
56
58
|
for dim in dims:
|
57
|
-
|
58
|
-
|
59
|
-
|
59
|
+
if only_value:
|
60
|
+
dimdict[dim] = vardata[dim].values
|
61
|
+
else:
|
62
|
+
dimdict[dim] = ds[dim]
|
63
|
+
if only_value:
|
64
|
+
vardata = np.array(vardata)
|
65
|
+
return vardata, dimdict
|
60
66
|
|
61
67
|
|
62
68
|
def _numpy_to_nc_type(numpy_type):
|
@@ -76,15 +82,27 @@ def _numpy_to_nc_type(numpy_type):
|
|
76
82
|
return numpy_to_nc.get(str(numpy_type), "f4") # 默认使用 'float32'
|
77
83
|
|
78
84
|
|
79
|
-
def
|
85
|
+
def _calculate_scale_and_offset(data, n=16):
|
86
|
+
data_min, data_max = np.nanmin(data), np.nanmax(data)
|
87
|
+
scale_factor = (data_max - data_min) / (2 ** n - 1)
|
88
|
+
add_offset = data_min + 2 ** (n - 1) * scale_factor
|
89
|
+
# S = Q * scale_factor + add_offset
|
90
|
+
return scale_factor, add_offset
|
91
|
+
|
92
|
+
|
93
|
+
def write2nc(file, data, varname=None, coords=None, mode='w', scale_offset_switch=True, compile_switch=True):
|
80
94
|
"""
|
81
95
|
description: 写入数据到nc文件
|
96
|
+
|
82
97
|
参数:
|
83
98
|
file: 文件路径
|
84
99
|
data: 数据
|
85
100
|
varname: 变量名
|
86
101
|
coords: 坐标,字典,键为维度名称,值为坐标数据
|
87
102
|
mode: 写入模式,'w'为写入,'a'为追加
|
103
|
+
scale_offset_switch: 是否使用scale_factor和add_offset,默认为True
|
104
|
+
compile_switch: 是否使用压缩参数,默认为True
|
105
|
+
|
88
106
|
example: write2nc(r'test.nc', data, 'data', {'time': np.linspace(0, 120, 100), 'lev': np.linspace(0, 120, 50)}, 'a')
|
89
107
|
"""
|
90
108
|
# 判断mode是写入还是追加
|
@@ -96,6 +114,21 @@ def write2nc(file, data, varname, coords, mode):
|
|
96
114
|
if not os.path.exists(file):
|
97
115
|
print("Warning: File doesn't exist. Creating a new file.")
|
98
116
|
mode = "w"
|
117
|
+
|
118
|
+
complete = False
|
119
|
+
if varname is None and coords is None:
|
120
|
+
try:
|
121
|
+
data.to_netcdf(file)
|
122
|
+
complete = True
|
123
|
+
# 不能在这里return
|
124
|
+
except AttributeError:
|
125
|
+
raise ValueError("If varname and coords are None, data must be a DataArray.")
|
126
|
+
|
127
|
+
if complete:
|
128
|
+
return
|
129
|
+
|
130
|
+
kwargs = {'zlib': True, 'complevel': 4} # 压缩参数
|
131
|
+
# kwargs = {"compression": 'zlib', "complevel": 4} # 压缩参数
|
99
132
|
|
100
133
|
# 打开 NetCDF 文件
|
101
134
|
with nc.Dataset(file, mode, format="NETCDF4") as ncfile:
|
@@ -116,8 +149,17 @@ def write2nc(file, data, varname, coords, mode):
|
|
116
149
|
if add_coords:
|
117
150
|
# 创建新坐标
|
118
151
|
ncfile.createDimension(dim, len(coord_data))
|
119
|
-
|
152
|
+
if compile_switch:
|
153
|
+
ncfile.createVariable(dim, _numpy_to_nc_type(coord_data.dtype), (dim,), **kwargs)
|
154
|
+
else:
|
155
|
+
ncfile.createVariable(dim, _numpy_to_nc_type(coord_data.dtype), (dim,))
|
120
156
|
ncfile.variables[dim][:] = np.array(coord_data)
|
157
|
+
|
158
|
+
if isinstance(coord_data, xr.DataArray):
|
159
|
+
current_var = ncfile.variables[dim]
|
160
|
+
if coord_data.attrs:
|
161
|
+
for key, value in coord_data.attrs.items():
|
162
|
+
current_var.setncattr(key, value)
|
121
163
|
|
122
164
|
# 判断变量是否存在,若存在,则删除原变量
|
123
165
|
add_var = True
|
@@ -127,22 +169,48 @@ def write2nc(file, data, varname, coords, mode):
|
|
127
169
|
raise ValueError("Shape of data does not match the variable shape.")
|
128
170
|
else:
|
129
171
|
# 写入数据
|
130
|
-
ncfile.variables[varname][:] = data
|
172
|
+
ncfile.variables[varname][:] = np.array(data)
|
131
173
|
add_var = False
|
132
174
|
print(f"Warning: Variable '{varname}' already exists. Replacing it.")
|
133
175
|
|
134
176
|
if add_var:
|
135
177
|
# 创建变量及其维度
|
136
178
|
dim_names = tuple(coords.keys()) # 使用coords传入的维度名称
|
137
|
-
|
179
|
+
if scale_offset_switch:
|
180
|
+
scale_factor, add_offset = _calculate_scale_and_offset(np.array(data))
|
181
|
+
_FillValue = -32767
|
182
|
+
missing_value = -32767
|
183
|
+
dtype = 'i2' # short类型
|
184
|
+
else:
|
185
|
+
dtype = _numpy_to_nc_type(data.dtype)
|
186
|
+
|
187
|
+
if compile_switch:
|
188
|
+
ncfile.createVariable(varname, dtype, dim_names, **kwargs)
|
189
|
+
else:
|
190
|
+
ncfile.createVariable(varname, dtype, dim_names)
|
191
|
+
|
192
|
+
if scale_offset_switch: # 需要在写入数据之前设置scale_factor和add_offset
|
193
|
+
ncfile.variables[varname].setncattr('scale_factor', scale_factor)
|
194
|
+
ncfile.variables[varname].setncattr('add_offset', add_offset)
|
195
|
+
ncfile.variables[varname].setncattr('_FillValue', _FillValue)
|
196
|
+
ncfile.variables[varname].setncattr('missing_value', missing_value)
|
197
|
+
|
138
198
|
# ncfile.createVariable('data', 'f4', ('time','lev'))
|
139
199
|
|
140
200
|
# 写入数据
|
141
|
-
ncfile.variables[varname][:] = data
|
201
|
+
ncfile.variables[varname][:] = np.array(data)
|
142
202
|
|
143
203
|
# 判断维度是否匹配
|
144
204
|
if len(data.shape) != len(coords):
|
145
205
|
raise ValueError("Number of dimensions does not match the data shape.")
|
206
|
+
# 判断data是否带有属性信息,如果有,写入属性信息
|
207
|
+
if isinstance(data, xr.DataArray):
|
208
|
+
current_var = ncfile.variables[varname]
|
209
|
+
if data.attrs:
|
210
|
+
for key, value in data.attrs.items():
|
211
|
+
if key in ["scale_factor", "add_offset", "_FillValue", "missing_value"] and scale_offset_switch:
|
212
|
+
continue
|
213
|
+
current_var.setncattr(key, value)
|
146
214
|
|
147
215
|
|
148
216
|
def merge5nc(file_list, var_name=None, dim_name=None, target_filename=None):
|
@@ -330,6 +398,48 @@ def check_ncfile(ncfile, if_delete=False):
|
|
330
398
|
return False
|
331
399
|
|
332
400
|
|
401
|
+
def longitude_change(ds, lon_name="longitude", to_which="180"):
|
402
|
+
"""
|
403
|
+
将经度转换为 -180 到 180 之间
|
404
|
+
|
405
|
+
参数:
|
406
|
+
lon (numpy.ndarray): 经度数组
|
407
|
+
|
408
|
+
返回值:
|
409
|
+
numpy.ndarray: 转换后的经度数组
|
410
|
+
"""
|
411
|
+
# return (lon + 180) % 360 - 180
|
412
|
+
# ds = ds.assign_coords(longitude=(((ds.longitude + 180) % 360) - 180)).sortby("longitude")
|
413
|
+
if to_which == "180":
|
414
|
+
# ds = ds.assign_coords(**{lon_name: (((ds[lon_name] + 180) % 360) - 180)}).sortby(lon_name)
|
415
|
+
ds = ds.assign_coords(**{lon_name: (ds[lon_name] + 180) % 360 - 180}).sortby(lon_name)
|
416
|
+
elif to_which == "360":
|
417
|
+
# -180 to 180 to 0 to 360
|
418
|
+
ds = ds.assign_coords(**{lon_name: (ds[lon_name] + 360) % 360}).sortby(lon_name)
|
419
|
+
return ds
|
420
|
+
|
421
|
+
|
422
|
+
def nc_isel(ncfile, dim_name, slice_list):
|
423
|
+
"""
|
424
|
+
Description: Choose the data by the index of the dimension
|
425
|
+
|
426
|
+
Parameters:
|
427
|
+
ncfile: str, the path of the netCDF file
|
428
|
+
dim_name: str, the name of the dimension
|
429
|
+
slice_list: list, the index of the dimension
|
430
|
+
|
431
|
+
slice_list example: slice_list = [[y*12+m for m in range(11,14)] for y in range(84)]
|
432
|
+
or
|
433
|
+
slice_list = [y * 12 + m for y in range(84) for m in range(11, 14)]
|
434
|
+
"""
|
435
|
+
ds = xr.open_dataset(ncfile)
|
436
|
+
slice_list = np.array(slice_list).flatten()
|
437
|
+
slice_list = [int(i) for i in slice_list]
|
438
|
+
ds_new = ds.isel(**{dim_name: slice_list})
|
439
|
+
ds.close()
|
440
|
+
return ds_new
|
441
|
+
|
442
|
+
|
333
443
|
if __name__ == "__main__":
|
334
444
|
data = np.random.rand(100, 50)
|
335
445
|
write2nc(r"test.nc", data, "data", {"time": np.linspace(0, 120, 100), "lev": np.linspace(0, 120, 50)}, "a")
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: oafuncs
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.81
|
4
4
|
Summary: My short description for my project.
|
5
5
|
Home-page: https://github.com/Industry-Pays/OAFuncs
|
6
6
|
Author: Kun Liu
|
@@ -13,7 +13,6 @@ Classifier: Programming Language :: Python :: 3.9
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.10
|
14
14
|
Classifier: Programming Language :: Python :: 3.11
|
15
15
|
Classifier: Programming Language :: Python :: 3.12
|
16
|
-
Classifier: Programming Language :: Python :: 3.13
|
17
16
|
Classifier: Programming Language :: Python :: Implementation :: CPython
|
18
17
|
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
19
18
|
Requires-Python: >=3.9.0
|
@@ -19,7 +19,7 @@ oafuncs/oa_down/User_Agent-list.txt
|
|
19
19
|
oafuncs/oa_down/__init__.py
|
20
20
|
oafuncs/oa_down/hycom_3hourly.py
|
21
21
|
oafuncs/oa_down/literature.py
|
22
|
-
oafuncs/oa_down/
|
22
|
+
oafuncs/oa_down/test_ua.py
|
23
23
|
oafuncs/oa_sign/__init__.py
|
24
24
|
oafuncs/oa_sign/meteorological.py
|
25
25
|
oafuncs/oa_sign/ocean.py
|
@@ -18,7 +18,7 @@ URL = 'https://github.com/Industry-Pays/OAFuncs'
|
|
18
18
|
EMAIL = 'liukun0312@stu.ouc.edu.cn'
|
19
19
|
AUTHOR = 'Kun Liu'
|
20
20
|
REQUIRES_PYTHON = '>=3.9.0'
|
21
|
-
VERSION = '0.0.
|
21
|
+
VERSION = '0.0.81'
|
22
22
|
|
23
23
|
# What packages are required for this module to be executed?
|
24
24
|
REQUIRED = [
|
@@ -122,7 +122,7 @@ setup(
|
|
122
122
|
python_requires=REQUIRES_PYTHON,
|
123
123
|
url=URL,
|
124
124
|
packages=find_packages(
|
125
|
-
exclude=["
|
125
|
+
exclude=["oa_*", "oa_down", "oa_sign", "oa_tool"]),
|
126
126
|
# packages=find_packages(exclude=["nc", "file", "*.tests.*", "tests.*"]),
|
127
127
|
# If your package is a single module, use this instead of 'packages':
|
128
128
|
# py_modules=['mypackage'],
|
@@ -144,7 +144,6 @@ setup(
|
|
144
144
|
'Programming Language :: Python :: 3.10',
|
145
145
|
'Programming Language :: Python :: 3.11',
|
146
146
|
'Programming Language :: Python :: 3.12',
|
147
|
-
'Programming Language :: Python :: 3.13',
|
148
147
|
'Programming Language :: Python :: Implementation :: CPython',
|
149
148
|
'Programming Language :: Python :: Implementation :: PyPy'
|
150
149
|
],
|
@@ -1,26 +0,0 @@
|
|
1
|
-
#!/usr/bin/env python
|
2
|
-
# coding=utf-8
|
3
|
-
'''
|
4
|
-
Author: Liu Kun && 16031215@qq.com
|
5
|
-
Date: 2024-09-17 16:09:20
|
6
|
-
LastEditors: Liu Kun && 16031215@qq.com
|
7
|
-
LastEditTime: 2024-10-14 17:08:57
|
8
|
-
FilePath: \\Python\\My_Funcs\\OAFuncs\\OAFuncs\\__init__.py
|
9
|
-
Description:
|
10
|
-
EditPlatform: vscode
|
11
|
-
ComputerInfo: XPS 15 9510
|
12
|
-
SystemInfo: Windows 11
|
13
|
-
Python Version: 3.11
|
14
|
-
'''
|
15
|
-
|
16
|
-
# 会导致OAFuncs直接导入所有函数,不符合模块化设计
|
17
|
-
from .oa_cmap import *
|
18
|
-
from .oa_data import *
|
19
|
-
from .oa_down import *
|
20
|
-
from .oa_draw import *
|
21
|
-
from .oa_file import *
|
22
|
-
from .oa_help import *
|
23
|
-
from .oa_nc import *
|
24
|
-
from .oa_python import *
|
25
|
-
from .oa_sign import *
|
26
|
-
from .oa_tool import *
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|