oafuncs 0.0.97.6__py3-none-any.whl → 0.0.97.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,339 @@
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ """
4
+ Author: Liu Kun && 16031215@qq.com
5
+ Date: 2025-03-30 11:16:29
6
+ LastEditors: Liu Kun && 16031215@qq.com
7
+ LastEditTime: 2025-03-30 11:16:31
8
+ FilePath: \\Python\\My_Funcs\\OAFuncs\\oafuncs\\_script\\netcdf_merge.py
9
+ Description:
10
+ EditPlatform: vscode
11
+ ComputerInfo: XPS 15 9510
12
+ SystemInfo: Windows 11
13
+ Python Version: 3.12
14
+ """
15
+
16
+ import logging
17
+ import os
18
+ from typing import Dict, List, Union
19
+
20
+ import numpy as np
21
+ import xarray as xr
22
+ from dask.diagnostics import ProgressBar
23
+
24
+ # 配置日志
25
+ logging.basicConfig(level=logging.INFO)
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ def merge(file_list: Union[str, List[str]], var_name: Union[str, List[str], None] = None, dim_name: str = "time", target_filename: str = "merged.nc", chunk_config: Dict = {"time": 1000}, compression: Union[bool, Dict] = True, sanity_check: bool = True, overwrite: bool = True, parallel: bool = True) -> None:
30
+ """
31
+ 终极版NetCDF合并函数
32
+
33
+ Parameters:
34
+ file_list: 文件路径列表或单个文件路径
35
+ var_name: 需要合并的变量(单个变量名/变量列表/None表示全部)
36
+ dim_name: 合并维度,默认为'time'
37
+ target_filename: 输出文件路径
38
+ chunk_config: Dask分块配置,如{"time": 1000}
39
+ compression: 压缩配置(True启用默认压缩,或自定义编码字典)
40
+ sanity_check: 是否执行数据完整性校验
41
+ overwrite: 是否覆盖已存在文件
42
+ parallel: 是否启用并行处理
43
+
44
+ Example:
45
+ merge(["data1.nc", "data2.nc"],
46
+ var_name=["temp", "salt"],
47
+ target_filename="result.nc",
48
+ chunk_config={"time": 500})
49
+ """
50
+ # ------------------------ 参数预处理 ------------------------#
51
+ file_list = _validate_and_preprocess_inputs(file_list, target_filename, overwrite)
52
+ all_vars, var_names = _determine_variables(file_list, var_name)
53
+ static_vars = _identify_static_vars(file_list[0], var_names, dim_name)
54
+
55
+ # 估计处理所需的内存
56
+ _estimate_memory_usage(file_list, var_names, chunk_config)
57
+
58
+ # ------------------------ 数据校验阶段 ------------------------#
59
+ if sanity_check:
60
+ _perform_sanity_checks(file_list, var_names, dim_name, static_vars)
61
+
62
+ # ------------------------ 核心合并逻辑 ------------------------#
63
+ with xr.set_options(keep_attrs=True): # 保留元数据属性
64
+ # 动态变量合并
65
+ merged_ds = xr.open_mfdataset(
66
+ file_list,
67
+ combine="nested",
68
+ concat_dim=dim_name,
69
+ data_vars=[var for var in var_names if var not in static_vars],
70
+ chunks=chunk_config,
71
+ parallel=parallel,
72
+ preprocess=lambda ds: ds[var_names], # 仅加载目标变量
73
+ )
74
+
75
+ # 静态变量处理
76
+ if static_vars:
77
+ with xr.open_dataset(file_list[0], chunks=chunk_config) as ref_ds:
78
+ merged_ds = merged_ds.assign({var: ref_ds[var] for var in static_vars})
79
+
80
+ # ------------------------ 时间维度处理 ------------------------#
81
+ if dim_name == "time":
82
+ merged_ds = _process_time_dimension(merged_ds)
83
+
84
+ # ------------------------ 文件输出 ------------------------#
85
+ encoding = _generate_encoding_config(merged_ds, compression)
86
+ _write_to_netcdf(merged_ds, target_filename, encoding)
87
+
88
+
89
+ # ------------------------ 辅助函数 ------------------------#
90
+ def _validate_and_preprocess_inputs(file_list: Union[str, List[str]], target_filename: str, overwrite: bool) -> List[str]:
91
+ """输入参数校验与预处理"""
92
+ if not file_list:
93
+ raise ValueError("文件列表不能为空")
94
+
95
+ file_list = [file_list] if isinstance(file_list, str) else file_list
96
+ for f in file_list:
97
+ if not os.path.exists(f):
98
+ raise FileNotFoundError(f"输入文件不存在: {f}")
99
+
100
+ target_dir = os.path.dirname(os.path.abspath(target_filename))
101
+ os.makedirs(target_dir, exist_ok=True)
102
+
103
+ if os.path.exists(target_filename):
104
+ if overwrite:
105
+ logger.warning(f"覆盖已存在文件: {target_filename}")
106
+ os.remove(target_filename)
107
+ else:
108
+ raise FileExistsError(f"目标文件已存在: {target_filename}")
109
+
110
+ return file_list
111
+
112
+
113
+ def _determine_variables(file_list: List[str], var_name: Union[str, List[str], None]) -> tuple:
114
+ """确定需要处理的变量列表"""
115
+ with xr.open_dataset(file_list[0]) as ds:
116
+ all_vars = list(ds.data_vars.keys())
117
+
118
+ if var_name is None:
119
+ return all_vars, all_vars
120
+ elif isinstance(var_name, str):
121
+ if var_name not in all_vars:
122
+ raise ValueError(f"无效变量名: {var_name}")
123
+ return all_vars, [var_name]
124
+ elif isinstance(var_name, list):
125
+ if not var_name: # 处理空列表情况
126
+ logger.warning("提供了空的变量列表,将使用所有变量")
127
+ return all_vars, all_vars
128
+ invalid_vars = set(var_name) - set(all_vars)
129
+ if invalid_vars:
130
+ raise ValueError(f"无效变量名: {invalid_vars}")
131
+ return all_vars, var_name
132
+ else:
133
+ raise TypeError("var_name参数类型必须是str/list/None")
134
+
135
+
136
+ def _identify_static_vars(sample_file: str, var_names: List[str], dim_name: str) -> List[str]:
137
+ """识别静态变量"""
138
+ with xr.open_dataset(sample_file) as ds:
139
+ return [var for var in var_names if dim_name not in ds[var].dims]
140
+
141
+
142
+ def _perform_sanity_checks(file_list: List[str], var_names: List[str], dim_name: str, static_vars: List[str]) -> None:
143
+ """执行数据完整性校验"""
144
+ logger.info("正在执行数据完整性校验...")
145
+
146
+ # 静态变量一致性检查
147
+ with xr.open_dataset(file_list[0]) as ref_ds:
148
+ for var in static_vars:
149
+ ref = ref_ds[var]
150
+ for f in file_list[1:]:
151
+ with xr.open_dataset(f) as ds:
152
+ if not ref.equals(ds[var]):
153
+ raise ValueError(f"静态变量 {var} 不一致\n参考文件: {file_list[0]}\n问题文件: {f}")
154
+
155
+ # 动态变量维度检查
156
+ dim_sizes = {}
157
+ for f in file_list:
158
+ with xr.open_dataset(f) as ds:
159
+ for var in var_names:
160
+ if var not in static_vars:
161
+ dims = ds[var].dims
162
+ if dim_name not in dims:
163
+ raise ValueError(f"变量 {var} 在文件 {f} 中缺少合并维度 {dim_name}")
164
+ dim_sizes.setdefault(var, []).append(ds[var].sizes[dim_name])
165
+
166
+ # 检查维度连续性
167
+ for var, sizes in dim_sizes.items():
168
+ if len(set(sizes[1:])) > 1:
169
+ raise ValueError(f"变量 {var} 的 {dim_name} 维度长度不一致: {sizes}")
170
+
171
+
172
+ def _process_time_dimension(ds: xr.Dataset) -> xr.Dataset:
173
+ """时间维度特殊处理"""
174
+ if "time" not in ds.dims:
175
+ return ds
176
+
177
+ # 排序并去重
178
+ ds = ds.sortby("time")
179
+ # 找到唯一时间戳的索引
180
+ _, index = np.unique(ds["time"], return_index=True)
181
+ # 无需再次排序索引,因为我们需要保持时间的原始顺序
182
+ return ds.isel(time=index)
183
+
184
+
185
+ def _generate_encoding_config(ds: xr.Dataset, compression: Union[bool, Dict]) -> Dict:
186
+ """生成压缩编码配置"""
187
+ if not compression:
188
+ return {}
189
+
190
+ # 默认压缩设置基础
191
+ def _get_default_encoding(var):
192
+ return {"zlib": True, "complevel": 3, "dtype": "float32" if ds[var].dtype == "float64" else ds[var].dtype}
193
+
194
+ # 处理自定义压缩配置
195
+ encoding = {}
196
+ if isinstance(compression, dict):
197
+ for var in ds.data_vars:
198
+ encoding[var] = _get_default_encoding(var)
199
+ encoding[var].update(compression.get(var, {})) # 使用 dict.update() 合并字典
200
+ else:
201
+ for var in ds.data_vars:
202
+ encoding[var] = _get_default_encoding(var)
203
+
204
+ return encoding
205
+
206
+
207
+ def _write_to_netcdf(ds: xr.Dataset, filename: str, encoding: Dict) -> None:
208
+ """改进后的安全写入NetCDF文件"""
209
+ logger.info("开始写入文件...")
210
+ unlimited_dims = [dim for dim in ds.dims if ds[dim].encoding.get("unlimited", False)]
211
+
212
+ delayed = ds.to_netcdf(filename, encoding=encoding, compute=False, unlimited_dims=unlimited_dims)
213
+
214
+ try:
215
+ with ProgressBar():
216
+ delayed.compute()
217
+
218
+ logger.info(f"合并完成 → {filename}")
219
+ logger.info(f"文件大小: {os.path.getsize(filename) / 1e9:.2f}GB")
220
+ except MemoryError as e:
221
+ _handle_write_error(filename, "内存不足,无法完成文件写入。请尝试调整chunk_config参数减少内存使用", e)
222
+ except Exception as e:
223
+ _handle_write_error(filename, f"写入文件失败: {str(e)}", e)
224
+
225
+
226
+ def _handle_write_error(filename: str, message: str, exception: Exception) -> None:
227
+ """统一处理写入文件的异常"""
228
+ logger.error(message)
229
+ if os.path.exists(filename):
230
+ os.remove(filename)
231
+ raise exception
232
+
233
+
234
+ def _estimate_memory_usage(file_list: List[str], var_names: List[str], chunk_config: Dict) -> None:
235
+ """改进内存使用量估算"""
236
+ try:
237
+ total_size = 0
238
+ sample_file = file_list[0]
239
+ with xr.open_dataset(sample_file) as ds:
240
+ for var in var_names:
241
+ if var in ds:
242
+ # 考虑变量的维度大小
243
+ var_size = np.prod([ds[var].sizes[dim] for dim in ds[var].dims]) * ds[var].dtype.itemsize
244
+ total_size += var_size * len(file_list)
245
+
246
+ # 估算Dask处理时的内存使用量 (通常是原始数据的2-3倍)
247
+ estimated_memory = total_size * 3
248
+
249
+ if estimated_memory > 8e9:
250
+ logger.warning(f"预计内存使用可能较高 (约 {estimated_memory / 1e9:.1f}GB)。如果遇到内存问题,请调整chunk_config参数: {chunk_config}")
251
+ except Exception as e:
252
+ logger.debug(f"内存估计失败: {str(e)}")
253
+
254
+
255
+ if __name__ == "__main__":
256
+ # 示例文件列表(请替换为实际文件路径)
257
+ sample_files = ["data/file1.nc", "data/file2.nc", "data/file3.nc"]
258
+
259
+ # 示例1: 基础用法 - 合并全部变量
260
+ print("\n" + "=" * 40)
261
+ print("示例1: 合并所有变量(默认配置)")
262
+ merge(file_list=sample_files, target_filename="merged_all_vars.nc")
263
+
264
+ # 示例2: 合并指定变量
265
+ print("\n" + "=" * 40)
266
+ print("示例2: 合并指定变量(温度、盐度)")
267
+ merge(
268
+ file_list=sample_files,
269
+ var_name=["temperature", "salinity"],
270
+ target_filename="merged_selected_vars.nc",
271
+ chunk_config={"time": 500}, # 更保守的内存分配
272
+ )
273
+
274
+ # 示例3: 自定义压缩配置
275
+ print("\n" + "=" * 40)
276
+ print("示例3: 自定义压缩参数")
277
+ merge(file_list=sample_files, var_name="chlorophyll", compression={"chlorophyll": {"zlib": True, "complevel": 5, "dtype": "float32"}}, target_filename="merged_compressed.nc")
278
+
279
+ # 示例4: 处理大型数据集
280
+ print("\n" + "=" * 40)
281
+ print("示例4: 大文件分块策略")
282
+ merge(file_list=sample_files, chunk_config={"time": 2000, "lat": 100, "lon": 100}, target_filename="merged_large_dataset.nc", parallel=True)
283
+
284
+ # 示例5: 时间维度特殊处理
285
+ print("\n" + "=" * 40)
286
+ print("示例5: 时间维度排序去重")
287
+ merge(
288
+ file_list=sample_files,
289
+ dim_name="time",
290
+ target_filename="merged_time_processed.nc",
291
+ sanity_check=True, # 强制数据校验
292
+ )
293
+
294
+ # 示例6: 覆盖已存在文件
295
+ print("\n" + "=" * 40)
296
+ print("示例6: 强制覆盖现有文件")
297
+ try:
298
+ merge(
299
+ file_list=sample_files,
300
+ target_filename="merged_all_vars.nc", # 与示例1相同文件名
301
+ overwrite=True, # 显式启用覆盖
302
+ )
303
+ except FileExistsError as e:
304
+ print(f"捕获预期外异常: {str(e)}")
305
+
306
+ # 示例7: 禁用并行处理
307
+ print("\n" + "=" * 40)
308
+ print("示例7: 单线程模式运行")
309
+ merge(file_list=sample_files, target_filename="merged_single_thread.nc", parallel=False)
310
+
311
+ # 示例8: 处理特殊维度
312
+ print("\n" + "=" * 40)
313
+ print("示例8: 按深度维度合并")
314
+ merge(file_list=sample_files, dim_name="depth", var_name=["density", "oxygen"], target_filename="merged_by_depth.nc")
315
+
316
+ # 示例9: 混合变量类型处理
317
+ print("\n" + "=" * 40)
318
+ print("示例9: 混合静态/动态变量")
319
+ merge(
320
+ file_list=sample_files,
321
+ var_name=["bathymetry", "temperature"], # bathymetry为静态变量
322
+ target_filename="merged_mixed_vars.nc",
323
+ sanity_check=True, # 验证静态变量一致性
324
+ )
325
+
326
+ # 示例10: 完整配置演示
327
+ print("\n" + "=" * 40)
328
+ print("示例10: 全参数配置演示")
329
+ merge(
330
+ file_list=sample_files,
331
+ var_name=None, # 所有变量
332
+ dim_name="time",
333
+ target_filename="merged_full_config.nc",
334
+ chunk_config={"time": 1000, "lat": 500, "lon": 500},
335
+ compression={"temperature": {"complevel": 4}, "salinity": {"zlib": False}},
336
+ sanity_check=True,
337
+ overwrite=True,
338
+ parallel=True,
339
+ )
@@ -35,8 +35,8 @@ import xarray as xr
35
35
  from rich import print
36
36
  from rich.progress import Progress
37
37
 
38
- from oafuncs._oa_down.idm import downloader as idm_downloader
39
- from oafuncs._oa_down.user_agent import get_ua
38
+ from oafuncs.oa_down.idm import downloader as idm_downloader
39
+ from oafuncs.oa_down.user_agent import get_ua
40
40
  from oafuncs.oa_file import file_size, mean_size
41
41
  from oafuncs.oa_nc import check as check_nc
42
42
  from oafuncs.oa_nc import modify as modify_nc
@@ -22,7 +22,7 @@ import pandas as pd
22
22
  import requests
23
23
  from rich import print
24
24
  from rich.progress import track
25
- from oafuncs._oa_down.user_agent import get_ua
25
+ from oafuncs.oa_down.user_agent import get_ua
26
26
  from oafuncs.oa_file import remove
27
27
  from oafuncs.oa_data import ensure_list
28
28
 
oafuncs/oa_nc.py CHANGED
@@ -19,6 +19,7 @@ import netCDF4 as nc
19
19
  import numpy as np
20
20
  import xarray as xr
21
21
  from rich import print
22
+ from typing import Dict, List, Union
22
23
 
23
24
  __all__ = ["get_var", "extract", "save", "merge", "modify", "rename", "check", "convert_longitude", "isel", "draw"]
24
25
 
@@ -181,86 +182,29 @@ def save(file, data, varname=None, coords=None, mode="w", scale_offset_switch=Tr
181
182
  var.setncattr(key, value)
182
183
 
183
184
 
184
- def merge(file_list, var_name=None, dim_name=None, target_filename=None):
185
+ def merge(file_list: Union[str, List[str]], var_name: Union[str, List[str], None] = None, dim_name: str = "time", target_filename: str = "merged.nc", chunk_config: Dict = {"time": 1000}, compression: Union[bool, Dict] = True, sanity_check: bool = True, overwrite: bool = True, parallel: bool = True) -> None:
185
186
  """
186
- Description:
187
- Merge variables from multiple NetCDF files along a specified dimension and write to a new file.
188
- If var_name is a string, it is considered a single variable; if it is a list and has only one element, it is also a single variable;
189
- If the list has more than one element, it is a multi-variable; if var_name is None, all variables are merged.
187
+ NetCDF合并函数
190
188
 
191
189
  Parameters:
192
- file_list: List of NetCDF file paths
193
- var_name: Name of the variable to be extracted or a list of variable names, default is None, which means all variables are extracted
194
- dim_name: Dimension name used for merging
195
- target_filename: Target file name after merging
190
+ file_list: 文件路径列表或单个文件路径
191
+ var_name: 需要合并的变量(单个变量名/变量列表/None表示全部)
192
+ dim_name: 合并维度,默认为'time'
193
+ target_filename: 输出文件路径
194
+ chunk_config: Dask分块配置,如{"time": 1000}
195
+ compression: 压缩配置(True启用默认压缩,或自定义编码字典)
196
+ sanity_check: 是否执行数据完整性校验
197
+ overwrite: 是否覆盖已存在文件
198
+ parallel: 是否启用并行处理
196
199
 
197
200
  Example:
198
- merge(file_list, var_name='u', dim_name='time', target_filename='merged.nc')
199
- merge(file_list, var_name=['u', 'v'], dim_name='time', target_filename='merged.nc')
200
- merge(file_list, var_name=None, dim_name='time', target_filename='merged.nc')
201
+ merge(["data1.nc", "data2.nc"],
202
+ var_name=["temp", "salt"],
203
+ target_filename="result.nc",
204
+ chunk_config={"time": 500})
201
205
  """
202
- # 看看保存文件是单纯文件名还是包含路径的,如果有路径,需要确保路径存在
203
- if target_filename is None:
204
- target_filename = "merged.nc"
205
- if not os.path.exists(os.path.dirname(str(target_filename))):
206
- os.makedirs(os.path.dirname(str(target_filename)))
207
-
208
- if isinstance(file_list, str):
209
- file_list = [file_list]
210
-
211
- # 初始化变量名列表
212
- var_names = None
213
-
214
- # 判断 var_name 是单变量、多变量还是合并所有变量
215
- if var_name is None:
216
- # 获取第一个文件中的所有变量名
217
- ds = xr.open_dataset(file_list[0])
218
- var_names = list(ds.variables.keys())
219
- ds.close()
220
- elif isinstance(var_name, str):
221
- var_names = [var_name]
222
- elif isinstance(var_name, list):
223
- var_names = var_name
224
- else:
225
- raise ValueError("var_name must be a string, a list of strings, or None")
226
-
227
- # 初始化合并数据字典
228
- merged_data = {}
229
-
230
- # 遍历文件列表
231
- print("Reading file ...")
232
- for i, file in enumerate(file_list):
233
- # 更新track描述进度
234
- # print(f"\rReading file {i + 1}/{len(file_list)}...", end="")
235
- ds = xr.open_dataset(file)
236
- for var_name in var_names:
237
- var = ds[var_name]
238
- # 如果变量包含合并维度,则合并它们
239
- if dim_name in var.dims:
240
- if var_name not in merged_data:
241
- merged_data[var_name] = [var]
242
- else:
243
- merged_data[var_name].append(var)
244
- # 如果变量不包含合并维度,则仅保留第一个文件中的值
245
- else:
246
- if var_name not in merged_data:
247
- merged_data[var_name] = var
248
- ds.close()
249
-
250
- print("\nMerging data ...")
251
- for var_name in merged_data:
252
- if isinstance(merged_data[var_name], list):
253
- merged_data[var_name] = xr.concat(merged_data[var_name], dim=dim_name)
254
-
255
- merged_data = xr.Dataset(merged_data)
256
-
257
- print("\nWriting data to file ...")
258
- if os.path.exists(target_filename):
259
- print("Warning: The target file already exists.")
260
- print("Removing existing file ...")
261
- os.remove(target_filename)
262
- merged_data.to_netcdf(target_filename)
263
- print(f'\nFile "{target_filename}" has been created.')
206
+ from _script.netcdf_merge import merge as nc_merge
207
+ nc_merge(file_list, var_name, dim_name, target_filename, chunk_config, compression, sanity_check, overwrite, parallel)
264
208
 
265
209
 
266
210
  def _modify_var(nc_file_path, variable_name, new_value):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: oafuncs
3
- Version: 0.0.97.6
3
+ Version: 0.0.97.8
4
4
  Summary: Oceanic and Atmospheric Functions
5
5
  Home-page: https://github.com/Industry-Pays/OAFuncs
6
6
  Author: Kun Liu
@@ -33,6 +33,7 @@ Requires-Dist: Cartopy
33
33
  Requires-Dist: rasterio
34
34
  Requires-Dist: salem
35
35
  Requires-Dist: psutil
36
+ Requires-Dist: dask
36
37
  Dynamic: author
37
38
  Dynamic: author-email
38
39
  Dynamic: classifier
@@ -5,20 +5,20 @@ oafuncs/oa_date.py,sha256=--3uXYq3_n9HBJbY1io1H1PE-FyiVxbQCyFUcudxQks,3210
5
5
  oafuncs/oa_draw.py,sha256=d23R6OEco7EbvcrL5YsnrSupnKKUn55hLw1B_d-kxOg,12308
6
6
  oafuncs/oa_file.py,sha256=nYTsMataaC790lSqxjuMB4uVqUJz_tMm_kRve4CHql4,17235
7
7
  oafuncs/oa_help.py,sha256=loyzTbjU_0VpSIBvAEUA_tqxG8MVsO0xFE_2hgQ3zMw,4188
8
- oafuncs/oa_nc.py,sha256=eTioHFT8eeg3T7S5wFUiFm1zU1w8Fo11Un8fjBaPhuY,20860
8
+ oafuncs/oa_nc.py,sha256=ZNcbQBRF2hT22JEt8cINb3irJIiTPf7YUZ4R5sMsfCI,18717
9
9
  oafuncs/oa_python.py,sha256=Q-6UGGw_dJff7Ef8i87fsLPoGeHV5jBzfb-7HP4THR0,4018
10
10
  oafuncs/_data/OAFuncs.png,sha256=y1_x-mUP3gFjcy6m8FqfvQO_HgjzPhQKfXjnSHjslZE,3436152
11
11
  oafuncs/_data/hycom_3hourly.png,sha256=azt_uPcXtl_8CSKRLLPCIf5pPrcxMiOzvoFQnwb0zUo,12411415
12
12
  oafuncs/_script/auto_optimized_parallel_executor.py,sha256=4QaEk9AM-IneHm8KKSQ6MjSLNSaAWM4AQ-8OWXYdsaI,17300
13
+ oafuncs/_script/netcdf_merge.py,sha256=kZV9bKChcWppoZfZbMdIgWZuUlmFx5IYtLmO1rMioGk,13400
13
14
  oafuncs/_script/parallel_example_usage.py,sha256=uLvE7iwkMn9Cyq6-wk5_RpbQk7PXM9d16-26lTknW9s,2646
14
15
  oafuncs/_script/plot_dataset.py,sha256=4PEMXI7NUgLMKwo-33y_AUrmUYS7BzmInCDyFalaZSk,13136
15
16
  oafuncs/_script/replace_file_concent.py,sha256=eCFZjnZcwyRvy6b4mmIfBna-kylSZTyJRfgXd6DdCjk,5982
16
17
  oafuncs/oa_down/User_Agent-list.txt,sha256=pazxSip8_lphEBOPHG902zmIBUg8sBKXgmqp_g6j_E4,661062
17
18
  oafuncs/oa_down/__init__.py,sha256=kRX5eTUCbAiz3zTaQM1501paOYS_3fizDN4Pa0mtNUA,585
18
- oafuncs/oa_down/hycom_3hourly.py,sha256=Lw7BgK-6hHp93S2D1nf_k-7oKwDxTV8yYP7ZosfZRh0,65309
19
- oafuncs/oa_down/hycom_3hourly_20250129.py,sha256=wVOSR-n-7OZYNsLujV0XeMrNwZFuF-g6d7cSrGIReBs,65555
19
+ oafuncs/oa_down/hycom_3hourly.py,sha256=lCdbYQd7o_2jMgwmbrClNo5omrj5b5cnWnp6lnXMloQ,65307
20
20
  oafuncs/oa_down/idm.py,sha256=XfYCNnQWADxOhhJd-T8sNYN0nGiRrAs7zbQcsB5-UmI,1668
21
- oafuncs/oa_down/literature.py,sha256=D1ZpHULQ4OJ2WXG2TWgBovuucPrfSeyC0rr3P2vSKjo,11332
21
+ oafuncs/oa_down/literature.py,sha256=2bF9gSKQbzcci9LcKE81j8JEjIJwON7jbwQB3gDDA3E,11331
22
22
  oafuncs/oa_down/test_ua.py,sha256=0IQq3NjqfNr7KkyjS_U-a4mYu-r-E7gzawwo4IfEa6Y,10851
23
23
  oafuncs/oa_down/user_agent.py,sha256=TsPcAxFmMTYAEHRFjurI1bQBJfDhcA70MdHoUPwQmks,785
24
24
  oafuncs/oa_model/__init__.py,sha256=__ImltHkP1bSsIpsmKpDE8QwwA-2Z8K7mZUHGGcRdro,484
@@ -33,8 +33,8 @@ oafuncs/oa_sign/scientific.py,sha256=a4JxOBgm9vzNZKpJ_GQIQf7cokkraV5nh23HGbmTYKw
33
33
  oafuncs/oa_tool/__init__.py,sha256=AvrCNR2-xad9ZRjthIdAoSk8UX4vOpEWLg6CV1NQNKc,161
34
34
  oafuncs/oa_tool/email.py,sha256=4lJxV_KUzhxgLYfVwYTqp0qxRugD7fvsZkXDe5WkUKo,3052
35
35
  oafuncs/oa_tool/parallel.py,sha256=LBFWEKPcILVCbfSulETJE4wGPiOw1P_Fl9DzjYoCqgk,21844
36
- oafuncs-0.0.97.6.dist-info/licenses/LICENSE.txt,sha256=rMtLpVg8sKiSlwClfR9w_Dd_5WubTQgoOzE2PDFxzs4,1074
37
- oafuncs-0.0.97.6.dist-info/METADATA,sha256=3S9GNpCs63Ef6sJ_IcIoizHyQt10hprfwrv2YwxkmZs,4204
38
- oafuncs-0.0.97.6.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
39
- oafuncs-0.0.97.6.dist-info/top_level.txt,sha256=bgC35QkXbN4EmPHEveg_xGIZ5i9NNPYWqtJqaKqTPsQ,8
40
- oafuncs-0.0.97.6.dist-info/RECORD,,
36
+ oafuncs-0.0.97.8.dist-info/licenses/LICENSE.txt,sha256=rMtLpVg8sKiSlwClfR9w_Dd_5WubTQgoOzE2PDFxzs4,1074
37
+ oafuncs-0.0.97.8.dist-info/METADATA,sha256=7uztVgipsZwxNit00ccUhDXROuM9uVPmYRxkgBr1pOc,4225
38
+ oafuncs-0.0.97.8.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
39
+ oafuncs-0.0.97.8.dist-info/top_level.txt,sha256=bgC35QkXbN4EmPHEveg_xGIZ5i9NNPYWqtJqaKqTPsQ,8
40
+ oafuncs-0.0.97.8.dist-info/RECORD,,