oafuncs 0.0.97.9__py3-none-any.whl → 0.0.97.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -21,25 +21,25 @@ import numpy as np
21
21
  import xarray as xr
22
22
  from dask.diagnostics import ProgressBar
23
23
 
24
- # 配置日志
24
+ # Configure logging
25
25
  logging.basicConfig(level=logging.INFO)
26
26
  logger = logging.getLogger(__name__)
27
27
 
28
28
 
29
29
  def merge(file_list: Union[str, List[str]], var_name: Union[str, List[str], None] = None, dim_name: str = "time", target_filename: str = "merged.nc", chunk_config: Dict = {"time": 1000}, compression: Union[bool, Dict] = True, sanity_check: bool = True, overwrite: bool = True, parallel: bool = True) -> None:
30
30
  """
31
- 终极版NetCDF合并函数
31
+ Ultimate NetCDF merge function
32
32
 
33
33
  Parameters:
34
- file_list: 文件路径列表或单个文件路径
35
- var_name: 需要合并的变量(单个变量名/变量列表/None表示全部)
36
- dim_name: 合并维度,默认为'time'
37
- target_filename: 输出文件路径
38
- chunk_config: Dask分块配置,如{"time": 1000}
39
- compression: 压缩配置(True启用默认压缩,或自定义编码字典)
40
- sanity_check: 是否执行数据完整性校验
41
- overwrite: 是否覆盖已存在文件
42
- parallel: 是否启用并行处理
34
+ file_list: List of file paths or single file path
35
+ var_name: Variables to merge (single variable name/list of variables/None means all)
36
+ dim_name: Dimension to merge along, default is 'time'
37
+ target_filename: Output file path
38
+ chunk_config: Dask chunking configuration, e.g. {"time": 1000}
39
+ compression: Compression configuration (True enables default compression, or custom encoding dictionary)
40
+ sanity_check: Whether to perform data integrity validation
41
+ overwrite: Whether to overwrite existing files
42
+ parallel: Whether to enable parallel processing
43
43
 
44
44
  Example:
45
45
  merge(["data1.nc", "data2.nc"],
@@ -47,21 +47,21 @@ def merge(file_list: Union[str, List[str]], var_name: Union[str, List[str], None
47
47
  target_filename="result.nc",
48
48
  chunk_config={"time": 500})
49
49
  """
50
- # ------------------------ 参数预处理 ------------------------#
50
+ # ------------------------ Parameter preprocessing ------------------------#
51
51
  file_list = _validate_and_preprocess_inputs(file_list, target_filename, overwrite)
52
52
  all_vars, var_names = _determine_variables(file_list, var_name)
53
53
  static_vars = _identify_static_vars(file_list[0], var_names, dim_name)
54
54
 
55
- # 估计处理所需的内存
55
+ # Estimate required memory for processing
56
56
  _estimate_memory_usage(file_list, var_names, chunk_config)
57
57
 
58
- # ------------------------ 数据校验阶段 ------------------------#
58
+ # ------------------------ Data validation phase ------------------------#
59
59
  if sanity_check:
60
60
  _perform_sanity_checks(file_list, var_names, dim_name, static_vars)
61
61
 
62
- # ------------------------ 核心合并逻辑 ------------------------#
63
- with xr.set_options(keep_attrs=True): # 保留元数据属性
64
- # 动态变量合并
62
+ # ------------------------ Core merging logic ------------------------#
63
+ with xr.set_options(keep_attrs=True): # Preserve metadata attributes
64
+ # Merge dynamic variables
65
65
  merged_ds = xr.open_mfdataset(
66
66
  file_list,
67
67
  combine="nested",
@@ -69,49 +69,49 @@ def merge(file_list: Union[str, List[str]], var_name: Union[str, List[str], None
69
69
  data_vars=[var for var in var_names if var not in static_vars],
70
70
  chunks=chunk_config,
71
71
  parallel=parallel,
72
- preprocess=lambda ds: ds[var_names], # 仅加载目标变量
72
+ preprocess=lambda ds: ds[var_names], # Only load target variables
73
73
  )
74
74
 
75
- # 静态变量处理
75
+ # Process static variables
76
76
  if static_vars:
77
77
  with xr.open_dataset(file_list[0], chunks=chunk_config) as ref_ds:
78
78
  merged_ds = merged_ds.assign({var: ref_ds[var] for var in static_vars})
79
79
 
80
- # ------------------------ 时间维度处理 ------------------------#
80
+ # ------------------------ Time dimension processing ------------------------#
81
81
  if dim_name == "time":
82
82
  merged_ds = _process_time_dimension(merged_ds)
83
83
 
84
- # ------------------------ 文件输出 ------------------------#
84
+ # ------------------------ File output ------------------------#
85
85
  encoding = _generate_encoding_config(merged_ds, compression)
86
86
  _write_to_netcdf(merged_ds, target_filename, encoding)
87
87
 
88
88
 
89
- # ------------------------ 辅助函数 ------------------------#
89
+ # ------------------------ Helper functions ------------------------#
90
90
  def _validate_and_preprocess_inputs(file_list: Union[str, List[str]], target_filename: str, overwrite: bool) -> List[str]:
91
- """输入参数校验与预处理"""
91
+ """Input parameter validation and preprocessing"""
92
92
  if not file_list:
93
- raise ValueError("文件列表不能为空")
93
+ raise ValueError("File list cannot be empty")
94
94
 
95
95
  file_list = [file_list] if isinstance(file_list, str) else file_list
96
96
  for f in file_list:
97
97
  if not os.path.exists(f):
98
- raise FileNotFoundError(f"输入文件不存在: {f}")
98
+ raise FileNotFoundError(f"Input file does not exist: {f}")
99
99
 
100
100
  target_dir = os.path.dirname(os.path.abspath(target_filename))
101
101
  os.makedirs(target_dir, exist_ok=True)
102
102
 
103
103
  if os.path.exists(target_filename):
104
104
  if overwrite:
105
- logger.warning(f"覆盖已存在文件: {target_filename}")
105
+ logger.warning(f"Overwriting existing file: {target_filename}")
106
106
  os.remove(target_filename)
107
107
  else:
108
- raise FileExistsError(f"目标文件已存在: {target_filename}")
108
+ raise FileExistsError(f"Target file already exists: {target_filename}")
109
109
 
110
110
  return file_list
111
111
 
112
112
 
113
113
  def _determine_variables(file_list: List[str], var_name: Union[str, List[str], None]) -> tuple:
114
- """确定需要处理的变量列表"""
114
+ """Determine the list of variables to process"""
115
115
  with xr.open_dataset(file_list[0]) as ds:
116
116
  all_vars = list(ds.data_vars.keys())
117
117
 
@@ -119,40 +119,40 @@ def _determine_variables(file_list: List[str], var_name: Union[str, List[str], N
119
119
  return all_vars, all_vars
120
120
  elif isinstance(var_name, str):
121
121
  if var_name not in all_vars:
122
- raise ValueError(f"无效变量名: {var_name}")
122
+ raise ValueError(f"Invalid variable name: {var_name}")
123
123
  return all_vars, [var_name]
124
124
  elif isinstance(var_name, list):
125
- if not var_name: # 处理空列表情况
126
- logger.warning("提供了空的变量列表,将使用所有变量")
125
+ if not var_name: # Handle empty list case
126
+ logger.warning("Empty variable list provided, will use all variables")
127
127
  return all_vars, all_vars
128
128
  invalid_vars = set(var_name) - set(all_vars)
129
129
  if invalid_vars:
130
- raise ValueError(f"无效变量名: {invalid_vars}")
130
+ raise ValueError(f"Invalid variable names: {invalid_vars}")
131
131
  return all_vars, var_name
132
132
  else:
133
- raise TypeError("var_name参数类型必须是str/list/None")
133
+ raise TypeError("var_name parameter must be of type str/list/None")
134
134
 
135
135
 
136
136
  def _identify_static_vars(sample_file: str, var_names: List[str], dim_name: str) -> List[str]:
137
- """识别静态变量"""
137
+ """Identify static variables"""
138
138
  with xr.open_dataset(sample_file) as ds:
139
139
  return [var for var in var_names if dim_name not in ds[var].dims]
140
140
 
141
141
 
142
142
  def _perform_sanity_checks(file_list: List[str], var_names: List[str], dim_name: str, static_vars: List[str]) -> None:
143
- """执行数据完整性校验"""
144
- logger.info("正在执行数据完整性校验...")
143
+ """Perform data integrity validation"""
144
+ logger.info("Performing data integrity validation...")
145
145
 
146
- # 静态变量一致性检查
146
+ # Check consistency of static variables
147
147
  with xr.open_dataset(file_list[0]) as ref_ds:
148
148
  for var in static_vars:
149
149
  ref = ref_ds[var]
150
150
  for f in file_list[1:]:
151
151
  with xr.open_dataset(f) as ds:
152
152
  if not ref.equals(ds[var]):
153
- raise ValueError(f"静态变量 {var} 不一致\n参考文件: {file_list[0]}\n问题文件: {f}")
153
+ raise ValueError(f"Static variable {var} inconsistent\nReference file: {file_list[0]}\nProblem file: {f}")
154
154
 
155
- # 动态变量维度检查
155
+ # Check dimensions of dynamic variables
156
156
  dim_sizes = {}
157
157
  for f in file_list:
158
158
  with xr.open_dataset(f) as ds:
@@ -160,53 +160,67 @@ def _perform_sanity_checks(file_list: List[str], var_names: List[str], dim_name:
160
160
  if var not in static_vars:
161
161
  dims = ds[var].dims
162
162
  if dim_name not in dims:
163
- raise ValueError(f"变量 {var} 在文件 {f} 中缺少合并维度 {dim_name}")
163
+ raise ValueError(f"Variable {var} in file {f} missing merge dimension {dim_name}")
164
164
  dim_sizes.setdefault(var, []).append(ds[var].sizes[dim_name])
165
165
 
166
- # 检查维度连续性
166
+ # Check dimension continuity
167
167
  for var, sizes in dim_sizes.items():
168
168
  if len(set(sizes[1:])) > 1:
169
- raise ValueError(f"变量 {var} {dim_name} 维度长度不一致: {sizes}")
169
+ raise ValueError(f"Variable {var} has inconsistent {dim_name} dimension lengths: {sizes}")
170
170
 
171
171
 
172
172
  def _process_time_dimension(ds: xr.Dataset) -> xr.Dataset:
173
- """时间维度特殊处理"""
173
+ """Special processing for time dimension"""
174
174
  if "time" not in ds.dims:
175
175
  return ds
176
176
 
177
- # 排序并去重
177
+ # Sort and deduplicate
178
178
  ds = ds.sortby("time")
179
- # 找到唯一时间戳的索引
179
+ # Find indices of unique timestamps
180
180
  _, index = np.unique(ds["time"], return_index=True)
181
- # 无需再次排序索引,因为我们需要保持时间的原始顺序
181
+ # No need to sort indices again as we want to keep original time order
182
182
  return ds.isel(time=index)
183
183
 
184
184
 
185
185
  def _generate_encoding_config(ds: xr.Dataset, compression: Union[bool, Dict]) -> Dict:
186
- """生成压缩编码配置"""
186
+ """Generate compression encoding configuration"""
187
187
  if not compression:
188
188
  return {}
189
189
 
190
- # 默认压缩设置基础
190
+ # Default compression settings base
191
191
  def _get_default_encoding(var):
192
192
  return {"zlib": True, "complevel": 3, "dtype": "float32" if ds[var].dtype == "float64" else ds[var].dtype}
193
193
 
194
- # 处理自定义压缩配置
194
+ # Handle custom compression configuration
195
195
  encoding = {}
196
196
  if isinstance(compression, dict):
197
197
  for var in ds.data_vars:
198
198
  encoding[var] = _get_default_encoding(var)
199
- encoding[var].update(compression.get(var, {})) # 使用 dict.update() 合并字典
199
+ encoding[var].update(compression.get(var, {})) # Use dict.update() to merge dictionaries
200
200
  else:
201
201
  for var in ds.data_vars:
202
202
  encoding[var] = _get_default_encoding(var)
203
203
 
204
204
  return encoding
205
205
 
206
+ def _calculate_file_size(filepath: str) -> str:
207
+ """Calculate file size with adaptive unit conversion"""
208
+ if os.path.exists(filepath):
209
+ size_in_bytes = os.path.getsize(filepath)
210
+ if size_in_bytes < 1e3:
211
+ return f"{size_in_bytes:.2f} B"
212
+ elif size_in_bytes < 1e6:
213
+ return f"{size_in_bytes / 1e3:.2f} KB"
214
+ elif size_in_bytes < 1e9:
215
+ return f"{size_in_bytes / 1e6:.2f} MB"
216
+ else:
217
+ return f"{size_in_bytes / 1e9:.2f} GB"
218
+ else:
219
+ raise FileNotFoundError(f"File not found: {filepath}")
206
220
 
207
221
  def _write_to_netcdf(ds: xr.Dataset, filename: str, encoding: Dict) -> None:
208
- """改进后的安全写入NetCDF文件"""
209
- logger.info("开始写入文件...")
222
+ """Improved safe writing to NetCDF file"""
223
+ logger.info("Starting file write...")
210
224
  unlimited_dims = [dim for dim in ds.dims if ds[dim].encoding.get("unlimited", False)]
211
225
 
212
226
  delayed = ds.to_netcdf(filename, encoding=encoding, compute=False, unlimited_dims=unlimited_dims)
@@ -215,16 +229,17 @@ def _write_to_netcdf(ds: xr.Dataset, filename: str, encoding: Dict) -> None:
215
229
  with ProgressBar():
216
230
  delayed.compute()
217
231
 
218
- logger.info(f"合并完成 → {filename}")
219
- logger.info(f"文件大小: {os.path.getsize(filename) / 1e9:.2f}GB")
232
+ logger.info(f"Merge completed → {filename}")
233
+ # logger.info(f"File size: {os.path.getsize(filename) / 1e9:.2f}GB")
234
+ logger.info(f"File size: {_calculate_file_size(filename)}")
220
235
  except MemoryError as e:
221
- _handle_write_error(filename, "内存不足,无法完成文件写入。请尝试调整chunk_config参数减少内存使用", e)
236
+ _handle_write_error(filename, "Insufficient memory to complete file write. Try adjusting chunk_config parameter to reduce memory usage", e)
222
237
  except Exception as e:
223
- _handle_write_error(filename, f"写入文件失败: {str(e)}", e)
238
+ _handle_write_error(filename, f"Failed to write file: {str(e)}", e)
224
239
 
225
240
 
226
241
  def _handle_write_error(filename: str, message: str, exception: Exception) -> None:
227
- """统一处理写入文件的异常"""
242
+ """Unified handling of file write exceptions"""
228
243
  logger.error(message)
229
244
  if os.path.exists(filename):
230
245
  os.remove(filename)
@@ -232,24 +247,24 @@ def _handle_write_error(filename: str, message: str, exception: Exception) -> No
232
247
 
233
248
 
234
249
  def _estimate_memory_usage(file_list: List[str], var_names: List[str], chunk_config: Dict) -> None:
235
- """改进内存使用量估算"""
250
+ """Improved memory usage estimation"""
236
251
  try:
237
252
  total_size = 0
238
253
  sample_file = file_list[0]
239
254
  with xr.open_dataset(sample_file) as ds:
240
255
  for var in var_names:
241
256
  if var in ds:
242
- # 考虑变量的维度大小
257
+ # Consider variable dimension sizes
243
258
  var_size = np.prod([ds[var].sizes[dim] for dim in ds[var].dims]) * ds[var].dtype.itemsize
244
259
  total_size += var_size * len(file_list)
245
260
 
246
- # 估算Dask处理时的内存使用量 (通常是原始数据的2-3倍)
261
+ # Estimate memory usage during Dask processing (typically 2-3x original data)
247
262
  estimated_memory = total_size * 3
248
263
 
249
264
  if estimated_memory > 8e9:
250
- logger.warning(f"预计内存使用可能较高 ( {estimated_memory / 1e9:.1f}GB)。如果遇到内存问题,请调整chunk_config参数: {chunk_config}")
265
+ logger.warning(f"Estimated memory usage may be high (approx. {estimated_memory / 1e9:.1f}GB). If memory issues occur, adjust chunk_config parameter: {chunk_config}")
251
266
  except Exception as e:
252
- logger.debug(f"内存估计失败: {str(e)}")
267
+ logger.debug(f"Memory estimation failed: {str(e)}")
253
268
 
254
269
 
255
270
  if __name__ == "__main__":
@@ -1,18 +1,17 @@
1
1
  import os
2
- from typing import Optional, Tuple
2
+ from typing import Optional, Tuple, Union
3
3
 
4
4
  import matplotlib as mpl
5
5
 
6
6
  mpl.use("Agg") # Use non-interactive backend
7
7
 
8
+ import cartopy.crs as ccrs
8
9
  import cftime
9
10
  import matplotlib.pyplot as plt
10
11
  import numpy as np
11
- from rich import print
12
- import cartopy.crs as ccrs
13
- import xarray as xr
14
-
15
12
  import oafuncs
13
+ import xarray as xr
14
+ from rich import print
16
15
 
17
16
 
18
17
  def plot_1d(data: xr.DataArray, output_path: str, x_dim: str, y_dim: str, z_dim: str, t_dim: str) -> None:
@@ -264,33 +263,48 @@ def process_variable(var: str, data: xr.DataArray, dims: int, dims_name: Tuple[s
264
263
  print(f"Error processing {var}_{dims_name[0]}-{i}_{dims_name[1]}-{j}: {e}")
265
264
 
266
265
 
267
- def func_plot_dataset(ds_in: xr.Dataset, output_dir: str, xyzt_dims: Tuple[str, str, str, str] = ("longitude", "latitude", "level", "time"), plot_type: str = "contourf", fixed_colorscale: bool = False) -> None:
266
+ def func_plot_dataset(ds_in: Union[xr.Dataset, xr.DataArray], output_dir: str, xyzt_dims: Tuple[str, str, str, str] = ("longitude", "latitude", "level", "time"), plot_type: str = "contourf", fixed_colorscale: bool = False) -> None:
268
267
  """Plot variables from a NetCDF file and save the plots to the specified directory."""
269
268
  os.makedirs(output_dir, exist_ok=True)
270
269
  x_dim, y_dim, z_dim, t_dim = xyzt_dims
271
270
 
272
271
  # Main processing function
273
272
  try:
274
- ds = ds_in
275
- varlist = list(ds.data_vars)
276
- print(f"Found {len(varlist)} variables in dataset")
277
-
278
- for var in varlist:
273
+ # 检查输入是 DataArray 还是 Dataset
274
+ if isinstance(ds_in, xr.DataArray):
275
+ # 处理单个 DataArray
276
+ print("Processing a single DataArray")
277
+ var = ds_in.name if ds_in.name is not None else "unnamed_variable"
279
278
  print("=" * 120)
280
279
  print(f"Processing: {var}")
281
- data = ds[var]
282
- dims = len(data.shape)
283
- dims_name = data.dims
284
280
  try:
285
- process_variable(var, data, dims, dims_name, output_dir, x_dim, y_dim, z_dim, t_dim, fixed_colorscale, plot_type)
281
+ dims = len(ds_in.shape)
282
+ dims_name = ds_in.dims
283
+ process_variable(var, ds_in, dims, dims_name, output_dir, x_dim, y_dim, z_dim, t_dim, fixed_colorscale, plot_type)
286
284
  except Exception as e:
287
285
  print(f"Error processing variable {var}: {e}")
286
+ else:
287
+ # 处理包含多个变量的 Dataset
288
+ ds = ds_in
289
+ varlist = list(ds.data_vars)
290
+ print(f"Found {len(varlist)} variables in dataset")
291
+
292
+ for var in varlist:
293
+ print("=" * 120)
294
+ print(f"Processing: {var}")
295
+ data = ds[var]
296
+ dims = len(data.shape)
297
+ dims_name = data.dims
298
+ try:
299
+ process_variable(var, data, dims, dims_name, output_dir, x_dim, y_dim, z_dim, t_dim, fixed_colorscale, plot_type)
300
+ except Exception as e:
301
+ print(f"Error processing variable {var}: {e}")
288
302
 
289
303
  except Exception as e:
290
304
  print(f"Error processing dataset: {e}")
291
305
  finally:
292
- if "ds" in locals():
293
- ds.close()
306
+ if isinstance(ds_in, xr.Dataset) and "ds_in" in locals():
307
+ ds_in.close()
294
308
  print("Dataset closed")
295
309
 
296
310
 
@@ -1000,7 +1000,7 @@ def _done_callback(future, progress, task, total, counter_lock):
1000
1000
  progress.update(task, advance=1, description=f"[cyan]Downloading... {parallel_counter}/{total}")
1001
1001
 
1002
1002
 
1003
- def _download_hourly_func(var, time_s, time_e, lon_min=0, lon_max=359.92, lat_min=-80, lat_max=90, depth=None, level=None, store_path=None, dataset_name=None, version_name=None, num_workers=None, check=False, ftimes=1):
1003
+ def _download_hourly_func(var, time_s, time_e, lon_min=0, lon_max=359.92, lat_min=-80, lat_max=90, depth=None, level=None, store_path=None, dataset_name=None, version_name=None, num_workers=None, check=False, ftimes=1, download_interval=3):
1004
1004
  """
1005
1005
  Description:
1006
1006
  Download the data of single time or a series of time
@@ -1032,7 +1032,7 @@ def _download_hourly_func(var, time_s, time_e, lon_min=0, lon_max=359.92, lat_mi
1032
1032
  _prepare_url_to_download(var, lon_min, lon_max, lat_min, lat_max, ymdh_time_s, None, depth, level, store_path, dataset_name, version_name, check)
1033
1033
  elif int(ymdh_time_s) < int(ymdh_time_e):
1034
1034
  print("Downloading a series of files...")
1035
- time_list = get_time_list(ymdh_time_s, ymdh_time_e, 3, "hour")
1035
+ time_list = get_time_list(ymdh_time_s, ymdh_time_e, int(download_interval), "hour")
1036
1036
  with Progress() as progress:
1037
1037
  task = progress.add_task("[cyan]Downloading...", total=len(time_list))
1038
1038
  if ftimes == 1:
@@ -1071,7 +1071,7 @@ def _download_hourly_func(var, time_s, time_e, lon_min=0, lon_max=359.92, lat_mi
1071
1071
  print("[bold red]Please ensure the time_s is no more than time_e")
1072
1072
 
1073
1073
 
1074
- def download(var, time_s, time_e=None, lon_min=0, lon_max=359.92, lat_min=-80, lat_max=90, depth=None, level=None, store_path=None, dataset_name=None, version_name=None, num_workers=None, check=False, ftimes=1, idm_engine=None, fill_time=None):
1074
+ def download(var, time_s, time_e=None, lon_min=0, lon_max=359.92, lat_min=-80, lat_max=90, depth=None, level=None, store_path=None, dataset_name=None, version_name=None, num_workers=None, check=False, ftimes=1, idm_engine=None, fill_time=None, download_interval_hour=3):
1075
1075
  """
1076
1076
  Description:
1077
1077
  Download the data of single time or a series of time
@@ -1094,6 +1094,7 @@ def download(var, time_s, time_e=None, lon_min=0, lon_max=359.92, lat_min=-80, l
1094
1094
  ftimes: int, the number of time in one file, default is 1, if set to 1, the data of single time will be downloaded; the maximum is 8, if set to 8, the data of 8 times will be downloaded in one file
1095
1095
  idm_engine: str, the IDM engine, default is None, if set, the IDM will be used to download the data; example: "D:\\Programs\\Internet Download Manager\\IDMan.exe"
1096
1096
  fill_time: bool or None, the mode to fill the time, default is None. None: only download the data; True: modify the real time of data to the time in the file name; False: check the time in the file name and the real time of data, if not match, delete the file
1097
+ download_interval_hour: int, the interval time to download the data, default is 3, if set, the interval time will be used to download the data; example: 3, 6, ...
1097
1098
 
1098
1099
  Returns:
1099
1100
  None
@@ -1180,7 +1181,7 @@ def download(var, time_s, time_e=None, lon_min=0, lon_max=359.92, lat_min=-80, l
1180
1181
  global match_time
1181
1182
  match_time = fill_time
1182
1183
 
1183
- _download_hourly_func(var, time_s, time_e, lon_min, lon_max, lat_min, lat_max, depth, level, store_path, dataset_name, version_name, num_workers, check, ftimes)
1184
+ _download_hourly_func(var, time_s, time_e, lon_min, lon_max, lat_min, lat_max, depth, level, store_path, dataset_name, version_name, num_workers, check, ftimes, download_interval_hour)
1184
1185
 
1185
1186
  if idm_engine is not None:
1186
1187
  if idm_download_list:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: oafuncs
3
- Version: 0.0.97.9
3
+ Version: 0.0.97.10
4
4
  Summary: Oceanic and Atmospheric Functions
5
5
  Home-page: https://github.com/Industry-Pays/OAFuncs
6
6
  Author: Kun Liu
@@ -10,13 +10,13 @@ oafuncs/oa_python.py,sha256=Q-6UGGw_dJff7Ef8i87fsLPoGeHV5jBzfb-7HP4THR0,4018
10
10
  oafuncs/_data/OAFuncs.png,sha256=y1_x-mUP3gFjcy6m8FqfvQO_HgjzPhQKfXjnSHjslZE,3436152
11
11
  oafuncs/_data/hycom_3hourly.png,sha256=azt_uPcXtl_8CSKRLLPCIf5pPrcxMiOzvoFQnwb0zUo,12411415
12
12
  oafuncs/_script/auto_optimized_parallel_executor.py,sha256=4QaEk9AM-IneHm8KKSQ6MjSLNSaAWM4AQ-8OWXYdsaI,17300
13
- oafuncs/_script/netcdf_merge.py,sha256=kZV9bKChcWppoZfZbMdIgWZuUlmFx5IYtLmO1rMioGk,13400
13
+ oafuncs/_script/netcdf_merge.py,sha256=mKHnjStCqP7bwUMVA5k9ZKwT1ZVv1PR-ehMKdpHMJ4s,14439
14
14
  oafuncs/_script/parallel_example_usage.py,sha256=uLvE7iwkMn9Cyq6-wk5_RpbQk7PXM9d16-26lTknW9s,2646
15
- oafuncs/_script/plot_dataset.py,sha256=4PEMXI7NUgLMKwo-33y_AUrmUYS7BzmInCDyFalaZSk,13136
15
+ oafuncs/_script/plot_dataset.py,sha256=zkSEnO_-biyagorwWXPoihts_cwuvripzEt-l9bHJ2E,13989
16
16
  oafuncs/_script/replace_file_concent.py,sha256=eCFZjnZcwyRvy6b4mmIfBna-kylSZTyJRfgXd6DdCjk,5982
17
17
  oafuncs/oa_down/User_Agent-list.txt,sha256=pazxSip8_lphEBOPHG902zmIBUg8sBKXgmqp_g6j_E4,661062
18
18
  oafuncs/oa_down/__init__.py,sha256=kRX5eTUCbAiz3zTaQM1501paOYS_3fizDN4Pa0mtNUA,585
19
- oafuncs/oa_down/hycom_3hourly.py,sha256=lCdbYQd7o_2jMgwmbrClNo5omrj5b5cnWnp6lnXMloQ,65307
19
+ oafuncs/oa_down/hycom_3hourly.py,sha256=RlNJe5TLth1YjbnlJPgWtxkNKTy7rSYCo9Cwb9iID1s,65571
20
20
  oafuncs/oa_down/idm.py,sha256=XfYCNnQWADxOhhJd-T8sNYN0nGiRrAs7zbQcsB5-UmI,1668
21
21
  oafuncs/oa_down/literature.py,sha256=2bF9gSKQbzcci9LcKE81j8JEjIJwON7jbwQB3gDDA3E,11331
22
22
  oafuncs/oa_down/test_ua.py,sha256=0IQq3NjqfNr7KkyjS_U-a4mYu-r-E7gzawwo4IfEa6Y,10851
@@ -33,8 +33,8 @@ oafuncs/oa_sign/scientific.py,sha256=a4JxOBgm9vzNZKpJ_GQIQf7cokkraV5nh23HGbmTYKw
33
33
  oafuncs/oa_tool/__init__.py,sha256=AvrCNR2-xad9ZRjthIdAoSk8UX4vOpEWLg6CV1NQNKc,161
34
34
  oafuncs/oa_tool/email.py,sha256=4lJxV_KUzhxgLYfVwYTqp0qxRugD7fvsZkXDe5WkUKo,3052
35
35
  oafuncs/oa_tool/parallel.py,sha256=LBFWEKPcILVCbfSulETJE4wGPiOw1P_Fl9DzjYoCqgk,21844
36
- oafuncs-0.0.97.9.dist-info/licenses/LICENSE.txt,sha256=rMtLpVg8sKiSlwClfR9w_Dd_5WubTQgoOzE2PDFxzs4,1074
37
- oafuncs-0.0.97.9.dist-info/METADATA,sha256=8r2g8722d1jSwxy186xE7zP2arFfsZpgZ7omxoHDYSc,4225
38
- oafuncs-0.0.97.9.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
39
- oafuncs-0.0.97.9.dist-info/top_level.txt,sha256=bgC35QkXbN4EmPHEveg_xGIZ5i9NNPYWqtJqaKqTPsQ,8
40
- oafuncs-0.0.97.9.dist-info/RECORD,,
36
+ oafuncs-0.0.97.10.dist-info/licenses/LICENSE.txt,sha256=rMtLpVg8sKiSlwClfR9w_Dd_5WubTQgoOzE2PDFxzs4,1074
37
+ oafuncs-0.0.97.10.dist-info/METADATA,sha256=_nG65EsnOtswb-cfau1QhHjKq9iuhBPwUpYqp-L7_pw,4226
38
+ oafuncs-0.0.97.10.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
39
+ oafuncs-0.0.97.10.dist-info/top_level.txt,sha256=bgC35QkXbN4EmPHEveg_xGIZ5i9NNPYWqtJqaKqTPsQ,8
40
+ oafuncs-0.0.97.10.dist-info/RECORD,,