oafuncs 0.0.98.42__tar.gz → 0.0.98.44__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. {oafuncs-0.0.98.42/oafuncs.egg-info → oafuncs-0.0.98.44}/PKG-INFO +9 -3
  2. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/README.md +8 -2
  3. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/__init__.py +3 -1
  4. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/_script/netcdf_write.py +149 -178
  5. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/_script/plot_dataset.py +8 -3
  6. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/_script/replace_file_content.py +2 -2
  7. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_data.py +2 -123
  8. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_file.py +3 -0
  9. oafuncs-0.0.98.44/oafuncs/oa_geo.py +148 -0
  10. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_nc.py +9 -5
  11. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44/oafuncs.egg-info}/PKG-INFO +9 -3
  12. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs.egg-info/SOURCES.txt +1 -0
  13. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/setup.py +1 -1
  14. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/LICENSE.txt +0 -0
  15. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/MANIFEST.in +0 -0
  16. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/_data/hycom.png +0 -0
  17. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/_data/oafuncs.png +0 -0
  18. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/_script/cprogressbar.py +0 -0
  19. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/_script/data_interp.py +0 -0
  20. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/_script/email.py +0 -0
  21. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/_script/netcdf_merge.py +0 -0
  22. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/_script/netcdf_modify.py +0 -0
  23. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/_script/parallel.py +0 -0
  24. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/_script/parallel_bak.py +0 -0
  25. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_cmap.py +0 -0
  26. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_date.py +0 -0
  27. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_down/User_Agent-list.txt +0 -0
  28. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_down/__init__.py +0 -0
  29. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_down/hycom_3hourly.py +0 -0
  30. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_down/idm.py +0 -0
  31. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_down/literature.py +0 -0
  32. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_down/read_proxy.py +0 -0
  33. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_down/test_ua.py +0 -0
  34. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_down/user_agent.py +0 -0
  35. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_draw.py +0 -0
  36. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_help.py +0 -0
  37. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_model/__init__.py +0 -0
  38. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_model/roms/__init__.py +0 -0
  39. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_model/roms/test.py +0 -0
  40. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_model/wrf/__init__.py +0 -0
  41. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_model/wrf/little_r.py +0 -0
  42. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_python.py +0 -0
  43. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_sign/__init__.py +0 -0
  44. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_sign/meteorological.py +0 -0
  45. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_sign/ocean.py +0 -0
  46. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_sign/scientific.py +0 -0
  47. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs/oa_tool.py +0 -0
  48. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs.egg-info/dependency_links.txt +0 -0
  49. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs.egg-info/requires.txt +0 -0
  50. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/oafuncs.egg-info/top_level.txt +0 -0
  51. {oafuncs-0.0.98.42 → oafuncs-0.0.98.44}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: oafuncs
3
- Version: 0.0.98.42
3
+ Version: 0.0.98.44
4
4
  Summary: Oceanic and Atmospheric Functions
5
5
  Home-page: https://github.com/Industry-Pays/OAFuncs
6
6
  Author: Kun Liu
@@ -65,8 +65,6 @@ Just for the convenience of daily use, some complex operations are integrated in
65
65
  The code will be optimized and updated from time to time, with additions, deletions, or modifications…
66
66
 
67
67
  Existing functions will not be completely removed, they might just have a different function name, or the parameter passing might have been optimized…
68
-
69
- Note: If there are any requirements, you can email to liukun0312@stu.ouc.edu.cn. Within my capabilities, I can consider implementing them.
70
68
  ```
71
69
 
72
70
  ## PyPI
@@ -81,6 +79,11 @@ https://pypi.org/project/oafuncs
81
79
  https://github.com/Industry-Pays/OAFuncs
82
80
  ```
83
81
 
82
+ ## Download information
83
+ ```html
84
+ https://pypistats.org/packages/oafuncs
85
+ ```
86
+
84
87
  ## Example
85
88
 
86
89
  ```python
@@ -182,3 +185,6 @@ query()
182
185
  <img title="" src="./oafuncs/data_store/OAFuncs.png" alt="">
183
186
 
184
187
  <img title="OAFuncs" src="https://raw.githubusercontent.com/Industry-Pays/OAFuncs/main/oafuncs/_data/oafuncs.png" alt="OAFuncs">
188
+
189
+ ## Wiki
190
+ 更多内容,查看[wiki](https://opendeep.wiki/Industry-Pays/OAFuncs/introduction)
@@ -12,8 +12,6 @@ Just for the convenience of daily use, some complex operations are integrated in
12
12
  The code will be optimized and updated from time to time, with additions, deletions, or modifications…
13
13
 
14
14
  Existing functions will not be completely removed, they might just have a different function name, or the parameter passing might have been optimized…
15
-
16
- Note: If there are any requirements, you can email to liukun0312@stu.ouc.edu.cn. Within my capabilities, I can consider implementing them.
17
15
  ```
18
16
 
19
17
  ## PyPI
@@ -28,6 +26,11 @@ https://pypi.org/project/oafuncs
28
26
  https://github.com/Industry-Pays/OAFuncs
29
27
  ```
30
28
 
29
+ ## Download information
30
+ ```html
31
+ https://pypistats.org/packages/oafuncs
32
+ ```
33
+
31
34
  ## Example
32
35
 
33
36
  ```python
@@ -129,3 +132,6 @@ query()
129
132
  <img title="" src="./oafuncs/data_store/OAFuncs.png" alt="">
130
133
 
131
134
  <img title="OAFuncs" src="https://raw.githubusercontent.com/Industry-Pays/OAFuncs/main/oafuncs/_data/oafuncs.png" alt="OAFuncs">
135
+
136
+ ## Wiki
137
+ 更多内容,查看[wiki](https://opendeep.wiki/Industry-Pays/OAFuncs/introduction)
@@ -40,4 +40,6 @@ from .oa_tool import *
40
40
  # from ._script import *
41
41
  # ------------------- 2025-03-16 15:56:01 -------------------
42
42
  from .oa_date import *
43
- # ------------------- 2025-03-27 16:56:57 -------------------
43
+ # ------------------- 2025-03-27 16:56:57 -------------------
44
+ from .oa_geo import *
45
+ # ------------------- 2025-09-04 14:08:26 -------------------
@@ -1,55 +1,28 @@
1
1
  import os
2
2
  import warnings
3
-
4
- import netCDF4 as nc
5
3
  import numpy as np
6
4
  import xarray as xr
5
+ import netCDF4 as nc
7
6
 
8
7
  warnings.filterwarnings("ignore", category=RuntimeWarning)
9
8
 
10
-
11
-
12
- def _nan_to_fillvalue(ncfile,set_fill_value):
9
+ def _get_dtype_info(dtype):
13
10
  """
14
- NetCDF 文件中所有变量的 NaN 和掩码值替换为其 _FillValue 属性(若无则自动添加 _FillValue=-32767 并替换)。
15
- 同时处理掩码数组中的无效值。
16
- 仅对数值型变量(浮点型、整型)生效。
11
+ 根据输入的 dtype 返回其 numpy_type, clip_min, clip_max。
12
+ 支持 int8, int16, int32, int64 四种整数类型。
13
+ 简化处理:不使用fill_value,所有特殊值统一为NaN。
14
+ 使用完整的数据类型范围,不预留填充值空间。
17
15
  """
18
- with nc.Dataset(ncfile, "r+") as ds:
19
- for var_name in ds.variables:
20
- var = ds.variables[var_name]
21
- # 只处理数值类型变量 (f:浮点型, i:有符号整型, u:无符号整型)
22
- if var.dtype.kind not in ["f", "i", "u"]:
23
- continue
24
-
25
- # 读取数据
26
- arr = var[:]
27
-
28
- # 确定填充值
29
- if "_FillValue" in var.ncattrs():
30
- fill_value = var.getncattr("_FillValue")
31
- elif hasattr(var, "missing_value"):
32
- fill_value = var.getncattr("missing_value")
33
- else:
34
- fill_value = set_fill_value
35
- try:
36
- var.setncattr("_FillValue", fill_value)
37
- except Exception:
38
- # 某些变量可能不允许动态添加 _FillValue
39
- continue
40
-
41
- # 处理掩码数组
42
- if hasattr(arr, "mask"):
43
- # 如果是掩码数组,将掩码位置的值设为 fill_value
44
- if np.any(arr.mask):
45
- arr = np.where(arr.mask, fill_value, arr.data if hasattr(arr, "data") else arr)
46
-
47
- # 处理剩余 NaN 和无穷值
48
- if arr.dtype.kind in ["f", "i", "u"] and np.any(~np.isfinite(arr)):
49
- arr = np.nan_to_num(arr, nan=fill_value, posinf=fill_value, neginf=fill_value)
50
-
51
- # 写回变量
52
- var[:] = arr
16
+ dtype_map = {
17
+ "int8": (np.int8, np.iinfo(np.int8).min, np.iinfo(np.int8).max),
18
+ "int16": (np.int16, np.iinfo(np.int16).min, np.iinfo(np.int16).max),
19
+ "int32": (np.int32, np.iinfo(np.int32).min, np.iinfo(np.int32).max),
20
+ "int64": (np.int64, np.iinfo(np.int64).min, np.iinfo(np.int64).max),
21
+ }
22
+ if dtype not in dtype_map:
23
+ raise ValueError(f"Unsupported dtype: {dtype}. Supported types are 'int8', 'int16', 'int32', and 'int64'.")
24
+
25
+ return dtype_map[dtype]
53
26
 
54
27
 
55
28
  def _numpy_to_nc_type(numpy_type):
@@ -72,109 +45,89 @@ def _numpy_to_nc_type(numpy_type):
72
45
 
73
46
  def _calculate_scale_and_offset(data, dtype="int32"):
74
47
  """
75
- 只对有效数据(非NaN、非填充值、非自定义缺失值)计算scale_factor和add_offset。
76
- 使用 int32 类型,n=32
48
+ 只对有效数据(非NaN、非无穷值、非自定义缺失值)计算scale_factor和add_offset。
49
+ 为填充值保留最小值位置,有效数据范围为 [clip_min+1, clip_max]。
77
50
  """
78
51
  if not isinstance(data, np.ndarray):
79
52
  raise ValueError("Input data must be a NumPy array.")
80
-
81
- if dtype == "int32":
82
- n = 32
83
- fill_value = np.iinfo(np.int32).min # -2147483648
84
- max_packed_value = np.iinfo(np.int32).max # 2147483647
85
- min_packed_value = np.iinfo(np.int32).min + 1 # -2147483647 (保留最小值作为填充值)
86
- elif dtype == "int16":
87
- n = 16
88
- fill_value = np.iinfo(np.int16).min # -32768
89
- max_packed_value = np.iinfo(np.int16).max # 32767
90
- min_packed_value = np.iinfo(np.int16).min + 1 # -32767 (保留最小值作为填充值)
91
- else:
92
- raise ValueError("Unsupported dtype. Supported types are 'int16' and 'int32'.")
93
53
 
94
- # 有效掩码:非NaN、非inf、非fill_value
95
- valid_mask = np.isfinite(data) & (data != fill_value)
54
+ np_dtype, clip_min, clip_max = _get_dtype_info(dtype)
55
+
56
+ # 创建有效数据掩码,只排除NaN和无穷值
57
+ valid_mask = np.isfinite(data)
96
58
  if hasattr(data, "mask") and np.ma.is_masked(data):
97
59
  valid_mask &= ~data.mask
98
60
 
99
- if np.any(valid_mask):
100
- data_min = np.min(data[valid_mask])
101
- data_max = np.max(data[valid_mask])
102
-
103
- # 添加一个小的缓冲以确保所有值都在范围内,但不要过大
104
- data_range = data_max - data_min
105
- if data_range > 0:
106
- buffer = data_range * 1e-6 # 使用相对缓冲而不是绝对值1
107
- data_min -= buffer
108
- data_max += buffer
109
- else:
110
- data_min, data_max = 0, 1
61
+ # 如果没有有效数据,返回默认值
62
+ if not np.any(valid_mask):
63
+ return 1.0, 0.0
64
+
65
+ # 基于有效数据计算最小值和最大值
66
+ data_min = np.min(data[valid_mask])
67
+ data_max = np.max(data[valid_mask])
111
68
 
112
- # 防止scale为0,且保证scale/offset不会影响缺省值
69
+ # 防止 scale 0
113
70
  if data_max == data_min:
114
71
  scale_factor = 1.0
115
72
  add_offset = data_min
116
73
  else:
117
- # 使用可用的打包值范围计算scale_factor
118
- packed_range = max_packed_value - min_packed_value
119
- scale_factor = (data_max - data_min) / packed_range
74
+ # 使用数据中心点作为offset
120
75
  add_offset = (data_max + data_min) / 2.0
76
+
77
+ # 计算数据范围相对于中心点的最大偏移
78
+ max_deviation = max(abs(data_max - add_offset), abs(data_min - add_offset))
79
+
80
+ # 可用的整数范围(为填充值保留最小值)
81
+ available_range = min(abs(clip_min + 1), abs(clip_max))
82
+ scale_factor = max_deviation / available_range
83
+
121
84
  return scale_factor, add_offset
122
85
 
123
86
 
124
- def _data_to_scale_offset(data, scale, offset, dtype='int32'):
87
+ def _data_to_scale_offset(data, scale, offset, dtype="int32"):
125
88
  """
126
- 只对有效数据做缩放,NaN/inf/填充值直接赋为fill_value。
127
- 掩码区域的值会被保留并进行缩放,除非掩码本身标记为无效。
128
- 使用 int32 类型
89
+ 将数据应用 scale 和 offset 转换,转换为整型以实现压缩。
90
+ NaN、inf 和掩码值将被转换为指定数据类型的最小值作为填充值。
91
+
92
+ 转换公式:scaled_value = (original_value - add_offset) / scale_factor
93
+ 返回整型数组,用最小值表示无效数据
129
94
  """
130
95
  if not isinstance(data, np.ndarray):
131
96
  raise ValueError("Input data must be a NumPy array.")
132
-
133
- if dtype == "int32":
134
- # n = 32
135
- np_dtype = np.int32
136
- fill_value = np.iinfo(np.int32).min # -2147483648
137
- clip_min = np.iinfo(np.int32).min + 1 # -2147483647
138
- clip_max = np.iinfo(np.int32).max # 2147483647
139
- elif dtype == "int16":
140
- # n = 16
141
- np_dtype = np.int16
142
- fill_value = np.iinfo(np.int16).min # -32768
143
- clip_min = np.iinfo(np.int16).min + 1 # -32767
144
- clip_max = np.iinfo(np.int16).max # 32767
145
- else:
146
- raise ValueError("Unsupported dtype. Supported types are 'int16' and 'int32'.")
147
97
 
148
- # 创建掩码,只排除 NaN/inf 和显式的填充值
98
+ np_dtype, clip_min, clip_max = _get_dtype_info(dtype)
99
+ fill_value = clip_min # 使用数据类型的最小值作为填充值
100
+
101
+ # 创建输出数组,初始化为填充值
102
+ result = np.full(data.shape, fill_value, dtype=np_dtype)
103
+
104
+ # 只对有限值进行转换
149
105
  valid_mask = np.isfinite(data)
150
- valid_mask &= data != fill_value
151
-
152
- # 如果数据有掩码属性,还需考虑掩码
106
+
107
+ # 对于掩码数组,排除掩码区域
153
108
  if hasattr(data, "mask") and np.ma.is_masked(data):
154
- # 只有掩码标记的区域视为无效
155
109
  valid_mask &= ~data.mask
156
-
157
- # 初始化结果数组为填充值
158
- result = np.full_like(data, fill_value, dtype=np_dtype)
159
110
 
160
111
  if np.any(valid_mask):
161
- # 标准的scale/offset转换公式:packed_value = (unpacked_value - add_offset) / scale_factor
112
+ # 进行scale/offset转换
162
113
  scaled = (data[valid_mask] - offset) / scale
163
- # 四舍五入到最近的整数
164
- scaled = np.round(scaled).astype(np_dtype)
165
- # clip到整数范围,保留最大范围供转换
166
- scaled = np.clip(scaled, clip_min, clip_max) # 不使用最小值,保留做 _FillValue
167
- result[valid_mask] = scaled
114
+ # 四舍五入并转换为目标整型,同时确保在有效范围内
115
+ scaled_int = np.round(scaled).astype(np_dtype)
116
+ # 由于我们使用了最小值作为填充值,所以有效数据范围是 [clip_min+1, clip_max]
117
+ scaled_int = np.clip(scaled_int, clip_min + 1, clip_max)
118
+ result[valid_mask] = scaled_int
168
119
 
169
- return result
120
+ return result, fill_value
170
121
 
171
122
 
172
- def save_to_nc(file, data, varname=None, coords=None, mode="w", convert_dtype='int32',scale_offset_switch=True, compile_switch=True, preserve_mask_values=True):
123
+ def save_to_nc(file, data, varname=None, coords=None, mode="w", convert_dtype='int16', scale_offset_switch=True, compile_switch=True, preserve_mask_values=True, missing_value=None):
173
124
  """
174
125
  保存数据到 NetCDF 文件,支持 xarray 对象(DataArray 或 Dataset)和 numpy 数组。
175
126
 
176
- 仅对数据变量中数值型数据进行压缩转换(利用 scale_factor/add_offset 转换后转为 int32),
127
+ 仅对数据变量中数值型数据进行压缩转换(利用 scale_factor/add_offset 转换后转为指定整数类型),
177
128
  非数值型数据以及所有坐标变量将禁用任何压缩,直接保存原始数据。
129
+
130
+ 简化处理:所有特殊值(missing_value、掩码、无穷值等)统一转换为NaN处理。
178
131
 
179
132
  参数:
180
133
  - file: 保存文件的路径
@@ -182,72 +135,80 @@ def save_to_nc(file, data, varname=None, coords=None, mode="w", convert_dtype='i
182
135
  - varname: 变量名(仅适用于传入 numpy 数组或 DataArray 时)
183
136
  - coords: 坐标字典(numpy 数组分支时使用),所有坐标变量均不压缩
184
137
  - mode: "w"(覆盖)或 "a"(追加)
185
- - convert_dtype: 转换为的数值类型("int16" "int32"),默认为 "int32"
138
+ - convert_dtype: 转换为的数值类型("int8", "int16", "int32", "int64"),默认为 "int32"
186
139
  - scale_offset_switch: 是否对数值型数据变量进行压缩转换
187
140
  - compile_switch: 是否启用 NetCDF4 的 zlib 压缩(仅针对数值型数据有效)
188
- - missing_value: 自定义缺失值,将被替换为 fill_value
189
141
  - preserve_mask_values: 是否保留掩码区域的原始值(True)或将其替换为缺省值(False)
142
+ - missing_value: 自定义缺失值,将被替换为 NaN
190
143
  """
191
- if convert_dtype not in ["int16", "int32"]:
144
+ if convert_dtype not in ["int8", "int16", "int32", "int64"]:
192
145
  convert_dtype = "int32"
193
146
  nc_dtype = _numpy_to_nc_type(convert_dtype)
194
- # fill_value = np.iinfo(np.convert_dtype).min # -2147483648 或 -32768
195
- # fill_value = np.iinfo(eval('np.' + convert_dtype)).min # -2147483648 或 -32768
196
- np_dtype = getattr(np, convert_dtype) # 更安全的类型获取方式
197
- fill_value = np.iinfo(np_dtype).min
147
+
198
148
  # ----------------------------------------------------------------------------
199
- # 处理 xarray 对象(DataArray 或 Dataset)的情况
149
+ # 处理 xarray 对象(DataArray 或 Dataset
200
150
  if isinstance(data, (xr.DataArray, xr.Dataset)):
201
151
  encoding = {}
202
-
203
152
  if isinstance(data, xr.DataArray):
204
153
  if data.name is None:
205
154
  data = data.rename("data")
206
155
  varname = data.name if varname is None else varname
207
156
  arr = np.array(data.values)
208
- try:
209
- data_missing_val = data.attrs.get("missing_value")
210
- except AttributeError:
211
- data_missing_val = data.attrs.get("_FillValue", None)
212
- # 只对有效数据计算scale/offset
213
- valid_mask = np.ones(arr.shape, dtype=bool) # 默认所有值都有效
214
- if arr.dtype.kind in ["f", "i", "u"]: # 仅对数值数据应用isfinite
157
+ data_missing_val = data.attrs.get("missing_value", None)
158
+
159
+ valid_mask = np.ones(arr.shape, dtype=bool)
160
+ if arr.dtype.kind in ["f", "i", "u"]:
215
161
  valid_mask = np.isfinite(arr)
216
162
  if data_missing_val is not None:
217
163
  valid_mask &= arr != data_missing_val
218
164
  if hasattr(arr, "mask"):
219
- valid_mask &= ~getattr(arr, "mask", False)
165
+ valid_mask &= ~arr.mask
166
+
220
167
  if np.issubdtype(arr.dtype, np.number) and scale_offset_switch:
168
+ # 确保有有效数据用于计算scale/offset
169
+ if not np.any(valid_mask):
170
+ # 如果没有有效数据,不进行压缩转换
171
+ for k in ["_FillValue", "missing_value"]:
172
+ if k in data.attrs:
173
+ del data.attrs[k]
174
+ data.to_dataset(name=varname).to_netcdf(file, mode=mode)
175
+ return
176
+
221
177
  arr_valid = arr[valid_mask]
222
178
  scale, offset = _calculate_scale_and_offset(arr_valid, convert_dtype)
223
- # 写入前处理无效值(只在这里做!)
179
+
180
+ # 创建需要转换的数据副本,但不修改特殊值
224
181
  arr_to_save = arr.copy()
225
- # 处理自定义缺失值
182
+
183
+ # 只处理自定义缺失值,转换为NaN(让后面统一处理)
226
184
  if data_missing_val is not None:
227
- arr_to_save[arr == data_missing_val] = fill_value
228
- # 处理 NaN/inf
229
- arr_to_save[~np.isfinite(arr_to_save)] = fill_value
230
- new_values = _data_to_scale_offset(arr_to_save, scale, offset)
185
+ arr_to_save[arr == data_missing_val] = np.nan
186
+
187
+ # 进行压缩转换(_data_to_scale_offset会正确处理NaN和掩码)
188
+ new_values, fill_value = _data_to_scale_offset(arr_to_save, scale, offset, convert_dtype)
231
189
  new_da = data.copy(data=new_values)
232
- # 移除 _FillValue 和 missing_value 属性
190
+
191
+ # 清除原有的填充值属性,设置新的压缩属性
233
192
  for k in ["_FillValue", "missing_value"]:
234
193
  if k in new_da.attrs:
235
194
  del new_da.attrs[k]
195
+
236
196
  new_da.attrs["scale_factor"] = float(scale)
237
197
  new_da.attrs["add_offset"] = float(offset)
198
+
238
199
  encoding[varname] = {
239
200
  "zlib": compile_switch,
240
201
  "complevel": 4,
241
202
  "dtype": nc_dtype,
242
- # "_FillValue": -2147483648,
203
+ "_FillValue": fill_value, # 使用计算出的填充值
243
204
  }
244
205
  new_da.to_dataset(name=varname).to_netcdf(file, mode=mode, encoding=encoding)
245
206
  else:
207
+ # 对于非数值数据或不压缩的情况,移除填充值属性防止冲突
246
208
  for k in ["_FillValue", "missing_value"]:
247
209
  if k in data.attrs:
248
210
  del data.attrs[k]
249
211
  data.to_dataset(name=varname).to_netcdf(file, mode=mode)
250
- _nan_to_fillvalue(file, fill_value)
251
212
  return
252
213
 
253
214
  else: # Dataset 情况
@@ -256,19 +217,16 @@ def save_to_nc(file, data, varname=None, coords=None, mode="w", convert_dtype='i
256
217
  for var in data.data_vars:
257
218
  da = data[var]
258
219
  arr = np.array(da.values)
259
- try:
260
- data_missing_val = da.attrs.get("missing_value")
261
- except AttributeError:
262
- data_missing_val = da.attrs.get("_FillValue", None)
263
- valid_mask = np.ones(arr.shape, dtype=bool) # 默认所有值都有效
264
- if arr.dtype.kind in ["f", "i", "u"]: # 仅对数值数据应用isfinite
220
+ data_missing_val = da.attrs.get("missing_value", None)
221
+
222
+ valid_mask = np.ones(arr.shape, dtype=bool)
223
+ if arr.dtype.kind in ["f", "i", "u"]:
265
224
  valid_mask = np.isfinite(arr)
266
225
  if data_missing_val is not None:
267
226
  valid_mask &= arr != data_missing_val
268
227
  if hasattr(arr, "mask"):
269
- valid_mask &= ~getattr(arr, "mask", False)
228
+ valid_mask &= ~arr.mask
270
229
 
271
- # 创建属性的副本以避免修改原始数据集
272
230
  attrs = da.attrs.copy()
273
231
  for k in ["_FillValue", "missing_value"]:
274
232
  if k in attrs:
@@ -285,23 +243,21 @@ def save_to_nc(file, data, varname=None, coords=None, mode="w", convert_dtype='i
285
243
  scale, offset = _calculate_scale_and_offset(arr_valid, convert_dtype)
286
244
  arr_to_save = arr.copy()
287
245
 
288
- # 使用与DataArray相同的逻辑,使用_data_to_scale_offset处理数据
289
- # 处理自定义缺失值
246
+ # 只处理自定义缺失值,转换为NaN(让后面统一处理)
290
247
  if data_missing_val is not None:
291
- arr_to_save[arr == data_missing_val] = fill_value
292
- # 处理 NaN/inf
293
- arr_to_save[~np.isfinite(arr_to_save)] = fill_value
294
- new_values = _data_to_scale_offset(arr_to_save, scale, offset)
248
+ arr_to_save[arr == data_missing_val] = np.nan
249
+
250
+ # 进行压缩转换(_data_to_scale_offset会正确处理NaN和掩码)
251
+ new_values, fill_value = _data_to_scale_offset(arr_to_save, scale, offset, convert_dtype)
295
252
  new_da = xr.DataArray(new_values, dims=da.dims, coords=da.coords, attrs=attrs)
296
253
  new_da.attrs["scale_factor"] = float(scale)
297
254
  new_da.attrs["add_offset"] = float(offset)
298
- # 不设置_FillValue属性,改为使用missing_value
299
- # new_da.attrs["missing_value"] = -2147483648
300
255
  new_vars[var] = new_da
301
256
  encoding[var] = {
302
257
  "zlib": compile_switch,
303
258
  "complevel": 4,
304
259
  "dtype": nc_dtype,
260
+ "_FillValue": fill_value, # 使用计算出的填充值
305
261
  }
306
262
  else:
307
263
  new_vars[var] = xr.DataArray(arr, dims=da.dims, coords=da.coords, attrs=attrs)
@@ -309,7 +265,6 @@ def save_to_nc(file, data, varname=None, coords=None, mode="w", convert_dtype='i
309
265
  # 确保坐标变量被正确复制
310
266
  new_ds = xr.Dataset(new_vars, coords=data.coords.copy())
311
267
  new_ds.to_netcdf(file, mode=mode, encoding=encoding if encoding else None)
312
- _nan_to_fillvalue(file, fill_value)
313
268
  return
314
269
 
315
270
  # 处理纯 numpy 数组情况
@@ -320,12 +275,12 @@ def save_to_nc(file, data, varname=None, coords=None, mode="w", convert_dtype='i
320
275
  data = np.asarray(data)
321
276
  is_numeric = np.issubdtype(data.dtype, np.number)
322
277
 
278
+ # 处理缺失值
323
279
  if hasattr(data, "mask") and np.ma.is_masked(data):
324
280
  # 处理掩码数组,获取缺失值
325
281
  data = data.data
326
- missing_value = getattr(data, "missing_value", None)
327
- else:
328
- missing_value = None
282
+ if missing_value is None:
283
+ missing_value = getattr(data, "missing_value", None)
329
284
 
330
285
  try:
331
286
  with nc.Dataset(file, mode, format="NETCDF4") as ncfile:
@@ -353,44 +308,60 @@ def save_to_nc(file, data, varname=None, coords=None, mode="w", convert_dtype='i
353
308
 
354
309
  # 确保有有效数据
355
310
  if not np.any(valid_mask):
356
- # 如果没有有效数据,不进行压缩,直接保存原始数据类型
311
+ # 如果没有有效数据,不进行压缩,直接保存原始数据类型
357
312
  dtype = _numpy_to_nc_type(data.dtype)
358
313
  var = ncfile.createVariable(varname, dtype, dims, zlib=False)
359
- # 确保没有 NaN
360
- clean_data = np.nan_to_num(data, nan=missing_value if missing_value is not None else fill_value)
314
+ # 确保没有 NaN,直接用0替换
315
+ clean_data = np.nan_to_num(data, nan=0.0)
361
316
  var[:] = clean_data
362
- return
363
-
364
- # 计算 scale 和 offset 仅使用有效区域数据
317
+ return # 计算 scale 和 offset 仅使用有效区域数据
365
318
  arr_valid = arr_to_save[valid_mask]
366
319
  scale, offset = _calculate_scale_and_offset(arr_valid, convert_dtype)
367
320
 
368
- # 执行压缩转换
369
- new_data = _data_to_scale_offset(arr_to_save, scale, offset)
321
+ # 只处理自定义缺失值,转换为NaN
322
+ if missing_value is not None:
323
+ arr_to_save[arr == missing_value] = np.nan
324
+
325
+ # 执行压缩转换(_data_to_scale_offset会正确处理NaN和掩码)
326
+ new_data, fill_value = _data_to_scale_offset(arr_to_save, scale, offset, convert_dtype)
370
327
 
371
328
  # 创建变量并设置属性
372
- var = ncfile.createVariable(varname, nc_dtype, dims, zlib=compile_switch)
329
+ var = ncfile.createVariable(varname, nc_dtype, dims, zlib=compile_switch, fill_value=fill_value)
373
330
  var.scale_factor = scale
374
331
  var.add_offset = offset
375
- var._FillValue = fill_value # 明确设置填充值
376
332
  var[:] = new_data
377
333
  else:
334
+ # 非压缩情况,直接保存但要处理特殊值
378
335
  dtype = _numpy_to_nc_type(data.dtype)
336
+
337
+ clean_data = data.copy()
338
+
339
+ # 处理自定义缺失值(转换为NaN)
340
+ if missing_value is not None:
341
+ clean_data[data == missing_value] = np.nan
342
+
343
+ # 对于整数类型,处理NaN和无穷值 - 用0替换
344
+ if not np.issubdtype(data.dtype, np.floating):
345
+ finite_mask = np.isfinite(clean_data)
346
+ if not np.all(finite_mask):
347
+ clean_data = clean_data.astype(float) # 转换为浮点型保持NaN
348
+
349
+ # 处理掩码(统一转换为NaN)
350
+ if hasattr(data, "mask") and np.ma.is_masked(data):
351
+ clean_data[data.mask] = np.nan
352
+
353
+ # 创建变量
379
354
  var = ncfile.createVariable(varname, dtype, dims, zlib=False)
380
- # 确保不写入 NaN
381
- if np.issubdtype(data.dtype, np.floating) and np.any(~np.isfinite(data)):
382
- fill_val = missing_value if missing_value is not None else fill_value
383
- var._FillValue = fill_val
384
- clean_data = np.nan_to_num(data, nan=fill_val)
385
- var[:] = clean_data
386
- else:
387
- var[:] = data
388
- # 最后确保所有 NaN 值被处理
389
- _nan_to_fillvalue(file, fill_value)
355
+ var[:] = clean_data
356
+ # 只对压缩数据调用_nan_to_fillvalue,处理掩码但保持NaN
357
+ if is_numeric and scale_offset_switch:
358
+ pass # 简化策略:不再需要后处理
390
359
  except Exception as e:
391
360
  raise RuntimeError(f"netCDF4 保存失败: {str(e)}") from e
392
361
 
393
362
 
363
+
364
+
394
365
  # 测试用例
395
366
  if __name__ == "__main__":
396
367
  # 示例文件路径,需根据实际情况修改
@@ -197,12 +197,12 @@ def select_colormap_and_levels(data_range: Tuple[float, float], plot_type: str)
197
197
  num_levels = 128
198
198
 
199
199
  if data_range[0] * data_range[1] < 0:
200
- cmap = oafuncs.oa_cmap.get("diverging_1")
200
+ cmap = oafuncs.oa_cmap.get(diverging_cmap)
201
201
  bdy = max(abs(data_range[0]), abs(data_range[1]))
202
202
  norm = mpl.colors.TwoSlopeNorm(vmin=-bdy, vcenter=0, vmax=bdy)
203
203
  levels = np.linspace(-bdy, bdy, num_levels)
204
204
  else:
205
- cmap = oafuncs.oa_cmap.get("cool_1") if data_range[0] < 0 else oafuncs.oa_cmap.get("warm_1")
205
+ cmap = oafuncs.oa_cmap.get(negative_cmap) if data_range[0] < 0 else oafuncs.oa_cmap.get(positive_cmap)
206
206
  norm = mpl.colors.Normalize(vmin=data_range[0], vmax=data_range[1])
207
207
  levels = np.linspace(data_range[0], data_range[1], num_levels)
208
208
 
@@ -320,9 +320,14 @@ def get_xyzt_names(ds_in, xyzt_dims):
320
320
  return x_dim, y_dim, z_dim, t_dim
321
321
 
322
322
 
323
- def func_plot_dataset(ds_in: Union[xr.Dataset, xr.DataArray], output_dir: str, xyzt_dims: Tuple[str, str, str, str] = None, plot_type: str = "contourf", fixed_colorscale: bool = False) -> None:
323
+ def func_plot_dataset(ds_in: Union[xr.Dataset, xr.DataArray], output_dir: str, cmap='diverging_3', pcmap='warm_3', ncmap='cool_3', xyzt_dims: Tuple[str, str, str, str] = None, plot_type: str = "contourf", fixed_colorscale: bool = False) -> None:
324
324
  """Plot variables from a NetCDF file and save the plots to the specified directory."""
325
325
  os.makedirs(output_dir, exist_ok=True)
326
+
327
+ global diverging_cmap, positive_cmap, negative_cmap
328
+ diverging_cmap = cmap
329
+ positive_cmap = pcmap
330
+ negative_cmap = ncmap
326
331
 
327
332
  # Main processing function
328
333
  try:
@@ -106,10 +106,10 @@ def replace_direct_content(source_file, target_dir, content_dict, key_value=Fals
106
106
  with open(target_file, "w") as f:
107
107
  f.write(content)
108
108
 
109
- print(f"[green]已将内容替换到新文件:{target_file}[/green]")
109
+ print(f"[green]Content replaced and saved to new file: {target_file}[/green]")
110
110
  return True
111
111
  except Exception as e:
112
- print(f"[red]替换内容时出错:{str(e)}[/red]")
112
+ print(f"[red]Error replacing content: {str(e)}[/red]")
113
113
  return False
114
114
 
115
115
 
@@ -1,11 +1,9 @@
1
- from typing import Any, List, Union, Literal
1
+ from typing import Any, List, Union
2
2
 
3
3
  import numpy as np
4
- import xarray as xr
5
- from rich import print
6
4
 
7
5
 
8
- __all__ = ["interp_along_dim", "interp_2d", "ensure_list", "mask_shapefile", "mask_land_ocean"]
6
+ __all__ = ["interp_along_dim", "interp_2d", "ensure_list"]
9
7
 
10
8
 
11
9
  def ensure_list(input_value: Any) -> List[str]:
@@ -146,125 +144,6 @@ def interp_2d(
146
144
  )
147
145
 
148
146
 
149
- def mask_shapefile(
150
- data_array: np.ndarray,
151
- longitudes: np.ndarray,
152
- latitudes: np.ndarray,
153
- shapefile_path: str,
154
- ) -> Union[xr.DataArray, None]:
155
- """
156
- Mask a 2D data array using a shapefile.
157
-
158
- Args:
159
- data_array (np.ndarray): 2D array of data to be masked.
160
- longitudes (np.ndarray): 1D array of longitudes.
161
- latitudes (np.ndarray): 1D array of latitudes.
162
- shapefile_path (str): Path to the shapefile used for masking.
163
-
164
- Returns:
165
- Union[xr.DataArray, None]: Masked xarray DataArray or None if an error occurs.
166
-
167
- Raises:
168
- FileNotFoundError: If the shapefile does not exist.
169
- ValueError: If the data dimensions do not match the coordinates.
170
-
171
- Examples:
172
- >>> data_array = np.random.rand(10, 10)
173
- >>> longitudes = np.linspace(-180, 180, 10)
174
- >>> latitudes = np.linspace(-90, 90, 10)
175
- >>> shapefile_path = "path/to/shapefile.shp"
176
- >>> masked_data = mask_shapefile(data_array, longitudes, latitudes, shapefile_path)
177
- >>> print(masked_data) # Expected output: Masked DataArray
178
-
179
- """
180
- import salem
181
- try:
182
- shp_f = salem.read_shapefile(shapefile_path)
183
- data_da = xr.DataArray(data_array, coords=[("latitude", latitudes), ("longitude", longitudes)])
184
- masked_data = data_da.salem.roi(shape=shp_f)
185
- return masked_data
186
- except Exception as e:
187
- print(f"[red]An error occurred: {e}[/red]")
188
- return None
189
-
190
-
191
-
192
- def _normalize_lon(lon: np.ndarray) -> np.ndarray:
193
- """将经度转换到 [-180, 180)。"""
194
- lon = np.asarray(lon, dtype=float)
195
- return np.where(lon >= 180, lon - 360, lon)
196
-
197
-
198
- def _land_sea_mask(
199
- lon: np.ndarray,
200
- lat: np.ndarray,
201
- keep: Literal["land", "ocean"],
202
- ) -> np.ndarray:
203
- """
204
- 根据 1-D 或 2-D 经纬度返回布尔掩膜。
205
- True 表示该位置 *保留*,False 表示该位置将被掩掉。
206
- """
207
- from global_land_mask import globe
208
-
209
- lon = _normalize_lon(lon)
210
- lat = np.asarray(lat, dtype=float)
211
-
212
- # 如果输入是 1-D,则网格化;2-D 则直接使用
213
- if lon.ndim == 1 and lat.ndim == 1:
214
- lon_2d, lat_2d = np.meshgrid(lon, lat)
215
- elif lon.ndim == 2 and lat.ndim == 2:
216
- lon_2d, lat_2d = lon, lat
217
- else:
218
- raise ValueError("经纬度必须是同维度的 1-D 或 2-D 数组")
219
-
220
- is_ocean = globe.is_ocean(lat_2d, lon_2d)
221
-
222
- if keep == "land":
223
- mask = ~is_ocean
224
- elif keep == "ocean":
225
- mask = is_ocean
226
- else:
227
- raise ValueError("keep 只能是 'land' 或 'ocean'")
228
-
229
- return mask
230
-
231
-
232
- def mask_land_ocean(
233
- data: xr.DataArray | xr.Dataset,
234
- lon: np.ndarray,
235
- lat: np.ndarray,
236
- *, # 强制关键字参数
237
- keep: Literal["land", "ocean"] = "land",
238
- ) -> xr.DataArray | xr.Dataset:
239
- """
240
- 根据海陆分布掩膜 xarray 对象。
241
-
242
- Parameters
243
- ----------
244
- data : xr.DataArray 或 xr.Dataset
245
- 至少包含 'lat' 和 'lon' 维度/坐标的数组。
246
- lon : array_like
247
- 经度,可以是 1-D 或 2-D。
248
- lat : array_like
249
- 纬度,可以是 1-D 或 2-D。
250
- keep : {'land', 'ocean'}, optional
251
- 指定要保留的部分,默认为 'land'。
252
-
253
- Returns
254
- -------
255
- 掩膜后的 xr.DataArray / xr.Dataset
256
- """
257
- mask = _land_sea_mask(lon, lat, keep)
258
-
259
- # 用 apply_ufunc 自动对齐并广播掩膜
260
- return xr.apply_ufunc(
261
- lambda x, m: x.where(m),
262
- data,
263
- xr.DataArray(mask, dims=("lat", "lon")),
264
- dask="parallelized",
265
- keep_attrs=True,
266
- )
267
-
268
147
 
269
148
  if __name__ == "__main__":
270
149
  pass
@@ -418,6 +418,9 @@ def replace_content(source_file: Union[str, os.PathLike], replacements: Dict[str
418
418
 
419
419
  if target_dir is None:
420
420
  target_dir = os.path.dirname(source_file)
421
+ # If source_file is just a filename without path, use current working directory
422
+ if not target_dir:
423
+ target_dir = os.getcwd()
421
424
  replace_direct_content(source_file, target_dir, replacements, key_value=use_key_value, new_name=new_filename)
422
425
 
423
426
 
@@ -0,0 +1,148 @@
1
+ from typing import Union, Literal
2
+
3
+ import numpy as np
4
+ import xarray as xr
5
+ from rich import print
6
+
7
+
8
+ __all__ = ["earth_distance", "mask_shapefile", "mask_land_ocean"]
9
+
10
+
11
+ def earth_distance(lon1, lat1, lon2, lat2):
12
+ """
13
+ 计算两点间的距离(km)
14
+ """
15
+ from math import asin, cos, radians, sin, sqrt
16
+ # 将经纬度转换为弧度
17
+ lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
18
+
19
+ # haversine公式
20
+ dlon = lon2 - lon1
21
+ dlat = lat2 - lat1
22
+ a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
23
+ c = 2 * asin(sqrt(a))
24
+ r = 6371 # 地球半径(公里)
25
+ return c * r
26
+
27
+
28
+ def mask_shapefile(
29
+ data_array: np.ndarray,
30
+ longitudes: np.ndarray,
31
+ latitudes: np.ndarray,
32
+ shapefile_path: str,
33
+ ) -> Union[xr.DataArray, None]:
34
+ """
35
+ Mask a 2D data array using a shapefile.
36
+
37
+ Args:
38
+ data_array (np.ndarray): 2D array of data to be masked.
39
+ longitudes (np.ndarray): 1D array of longitudes.
40
+ latitudes (np.ndarray): 1D array of latitudes.
41
+ shapefile_path (str): Path to the shapefile used for masking.
42
+
43
+ Returns:
44
+ Union[xr.DataArray, None]: Masked xarray DataArray or None if an error occurs.
45
+
46
+ Raises:
47
+ FileNotFoundError: If the shapefile does not exist.
48
+ ValueError: If the data dimensions do not match the coordinates.
49
+
50
+ Examples:
51
+ >>> data_array = np.random.rand(10, 10)
52
+ >>> longitudes = np.linspace(-180, 180, 10)
53
+ >>> latitudes = np.linspace(-90, 90, 10)
54
+ >>> shapefile_path = "path/to/shapefile.shp"
55
+ >>> masked_data = mask_shapefile(data_array, longitudes, latitudes, shapefile_path)
56
+ >>> print(masked_data) # Expected output: Masked DataArray
57
+
58
+ """
59
+ import salem
60
+ try:
61
+ shp_f = salem.read_shapefile(shapefile_path)
62
+ data_da = xr.DataArray(data_array, coords=[("latitude", latitudes), ("longitude", longitudes)])
63
+ masked_data = data_da.salem.roi(shape=shp_f)
64
+ return masked_data
65
+ except Exception as e:
66
+ print(f"[red]An error occurred: {e}[/red]")
67
+ return None
68
+
69
+
70
+
71
+ def _normalize_lon(lon: np.ndarray) -> np.ndarray:
72
+ """将经度转换到 [-180, 180)。"""
73
+ lon = np.asarray(lon, dtype=float)
74
+ return np.where(lon >= 180, lon - 360, lon)
75
+
76
+
77
+ def _land_sea_mask(
78
+ lon: np.ndarray,
79
+ lat: np.ndarray,
80
+ keep: Literal["land", "ocean"],
81
+ ) -> np.ndarray:
82
+ """
83
+ 根据 1-D 或 2-D 经纬度返回布尔掩膜。
84
+ True 表示该位置 *保留*,False 表示该位置将被掩掉。
85
+ """
86
+ from global_land_mask import globe
87
+
88
+ lon = _normalize_lon(lon)
89
+ lat = np.asarray(lat, dtype=float)
90
+
91
+ # 如果输入是 1-D,则网格化;2-D 则直接使用
92
+ if lon.ndim == 1 and lat.ndim == 1:
93
+ lon_2d, lat_2d = np.meshgrid(lon, lat)
94
+ elif lon.ndim == 2 and lat.ndim == 2:
95
+ lon_2d, lat_2d = lon, lat
96
+ else:
97
+ raise ValueError("经纬度必须是同维度的 1-D 或 2-D 数组")
98
+
99
+ is_ocean = globe.is_ocean(lat_2d, lon_2d)
100
+
101
+ if keep == "land":
102
+ mask = ~is_ocean
103
+ elif keep == "ocean":
104
+ mask = is_ocean
105
+ else:
106
+ raise ValueError("keep 只能是 'land' 或 'ocean'")
107
+
108
+ return mask
109
+
110
+
111
+ def mask_land_ocean(
112
+ data: xr.DataArray | xr.Dataset,
113
+ lon: np.ndarray,
114
+ lat: np.ndarray,
115
+ *, # 强制关键字参数
116
+ keep: Literal["land", "ocean"] = "land",
117
+ ) -> xr.DataArray | xr.Dataset:
118
+ """
119
+ 根据海陆分布掩膜 xarray 对象。
120
+
121
+ Parameters
122
+ ----------
123
+ data : xr.DataArray 或 xr.Dataset
124
+ 至少包含 'lat' 和 'lon' 维度/坐标的数组。
125
+ lon : array_like
126
+ 经度,可以是 1-D 或 2-D。
127
+ lat : array_like
128
+ 纬度,可以是 1-D 或 2-D。
129
+ keep : {'land', 'ocean'}, optional
130
+ 指定要保留的部分,默认为 'land'。
131
+
132
+ Returns
133
+ -------
134
+ 掩膜后的 xr.DataArray / xr.Dataset
135
+ """
136
+ mask = _land_sea_mask(lon, lat, keep)
137
+
138
+ # 用 apply_ufunc 自动对齐并广播掩膜
139
+ return xr.apply_ufunc(
140
+ lambda x, m: x.where(m),
141
+ data,
142
+ xr.DataArray(mask, dims=("lat", "lon")),
143
+ dask="parallelized",
144
+ keep_attrs=True,
145
+ )
146
+
147
+ if __name__ == "__main__":
148
+ pass
@@ -15,10 +15,11 @@ def save(
15
15
  variable_name: Optional[str] = None,
16
16
  coordinates: Optional[dict] = None,
17
17
  write_mode: str = "w",
18
- convert_dtype: str = "int32",
18
+ convert_dtype: str = "int16",
19
19
  use_scale_offset: bool = True,
20
20
  use_compression: bool = True,
21
21
  preserve_mask_values: bool = True,
22
+ missing_value: Optional[Union[float, int]] = None,
22
23
  ) -> None:
23
24
  """
24
25
  Write data to a NetCDF file.
@@ -42,7 +43,7 @@ def save(
42
43
  """
43
44
  from ._script.netcdf_write import save_to_nc
44
45
 
45
- save_to_nc(file_path, data, variable_name, coordinates, write_mode, convert_dtype,use_scale_offset, use_compression, preserve_mask_values)
46
+ save_to_nc(file_path, data, variable_name, coordinates, write_mode, convert_dtype,use_scale_offset, use_compression, preserve_mask_values, missing_value)
46
47
  print(f"[green]Data successfully saved to {file_path}[/green]")
47
48
 
48
49
 
@@ -270,6 +271,9 @@ def draw(
270
271
  output_directory: Optional[str] = None,
271
272
  dataset: Optional[xr.Dataset] = None,
272
273
  file_path: Optional[str] = None,
274
+ cmap='diverging_3',
275
+ pcmap='warm_3',
276
+ ncmap='cool_3',
273
277
  dims_xyzt: Union[List[str], Tuple[str, str, str, str]] = None,
274
278
  plot_style: str = "contourf",
275
279
  use_fixed_colorscale: bool = False,
@@ -296,18 +300,18 @@ def draw(
296
300
  raise ValueError("dimensions must be a list or tuple")
297
301
 
298
302
  if dataset is not None:
299
- func_plot_dataset(dataset, output_directory, dims_xyzt, plot_style, use_fixed_colorscale)
303
+ func_plot_dataset(dataset, output_directory, cmap, pcmap, ncmap, dims_xyzt, plot_style, use_fixed_colorscale)
300
304
  elif file_path is not None:
301
305
  if check(file_path):
302
306
  ds = xr.open_dataset(file_path)
303
- func_plot_dataset(ds, output_directory, dims_xyzt, plot_style, use_fixed_colorscale)
307
+ func_plot_dataset(ds, output_directory, cmap, pcmap, ncmap, dims_xyzt, plot_style, use_fixed_colorscale)
304
308
  else:
305
309
  print(f"[red]Invalid file: {file_path}[/red]")
306
310
  else:
307
311
  print("[red]No dataset or file provided.[/red]")
308
312
 
309
313
 
310
- def compress(src_path, dst_path=None,convert_dtype='int16'):
314
+ def compress(src_path, dst_path=None, convert_dtype='int16'):
311
315
  """
312
316
  压缩 NetCDF 文件,使用 scale_factor/add_offset 压缩数据。
313
317
  若 dst_path 省略,则自动生成新文件名,写出后删除原文件并将新文件改回原名。
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: oafuncs
3
- Version: 0.0.98.42
3
+ Version: 0.0.98.44
4
4
  Summary: Oceanic and Atmospheric Functions
5
5
  Home-page: https://github.com/Industry-Pays/OAFuncs
6
6
  Author: Kun Liu
@@ -65,8 +65,6 @@ Just for the convenience of daily use, some complex operations are integrated in
65
65
  The code will be optimized and updated from time to time, with additions, deletions, or modifications…
66
66
 
67
67
  Existing functions will not be completely removed, they might just have a different function name, or the parameter passing might have been optimized…
68
-
69
- Note: If there are any requirements, you can email to liukun0312@stu.ouc.edu.cn. Within my capabilities, I can consider implementing them.
70
68
  ```
71
69
 
72
70
  ## PyPI
@@ -81,6 +79,11 @@ https://pypi.org/project/oafuncs
81
79
  https://github.com/Industry-Pays/OAFuncs
82
80
  ```
83
81
 
82
+ ## Download information
83
+ ```html
84
+ https://pypistats.org/packages/oafuncs
85
+ ```
86
+
84
87
  ## Example
85
88
 
86
89
  ```python
@@ -182,3 +185,6 @@ query()
182
185
  <img title="" src="./oafuncs/data_store/OAFuncs.png" alt="">
183
186
 
184
187
  <img title="OAFuncs" src="https://raw.githubusercontent.com/Industry-Pays/OAFuncs/main/oafuncs/_data/oafuncs.png" alt="OAFuncs">
188
+
189
+ ## Wiki
190
+ 更多内容,查看[wiki](https://opendeep.wiki/Industry-Pays/OAFuncs/introduction)
@@ -8,6 +8,7 @@ oafuncs/oa_data.py
8
8
  oafuncs/oa_date.py
9
9
  oafuncs/oa_draw.py
10
10
  oafuncs/oa_file.py
11
+ oafuncs/oa_geo.py
11
12
  oafuncs/oa_help.py
12
13
  oafuncs/oa_nc.py
13
14
  oafuncs/oa_python.py
@@ -18,7 +18,7 @@ URL = "https://github.com/Industry-Pays/OAFuncs"
18
18
  EMAIL = "liukun0312@stu.ouc.edu.cn"
19
19
  AUTHOR = "Kun Liu"
20
20
  REQUIRES_PYTHON = ">=3.10.0" # 2025/03/13
21
- VERSION = "0.0.98.42"
21
+ VERSION = "0.0.98.44"
22
22
 
23
23
  # What packages are required for this module to be executed?
24
24
  REQUIRED = [
File without changes
File without changes
File without changes