oafuncs 0.0.97.4__py3-none-any.whl → 0.0.97.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oafuncs/_script/auto_optimized_parallel_executor.py +459 -0
- oafuncs/_script/parallel_example_usage.py +83 -0
- oafuncs/_script/replace_file_concent.py +151 -0
- oafuncs/oa_file.py +30 -10
- oafuncs/oa_tool/parallel.py +479 -6
- {oafuncs-0.0.97.4.dist-info → oafuncs-0.0.97.5.dist-info}/METADATA +3 -2
- {oafuncs-0.0.97.4.dist-info → oafuncs-0.0.97.5.dist-info}/RECORD +10 -7
- {oafuncs-0.0.97.4.dist-info → oafuncs-0.0.97.5.dist-info}/WHEEL +1 -1
- {oafuncs-0.0.97.4.dist-info → oafuncs-0.0.97.5.dist-info/licenses}/LICENSE.txt +0 -0
- {oafuncs-0.0.97.4.dist-info → oafuncs-0.0.97.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,459 @@
|
|
1
|
+
import contextlib
|
2
|
+
import logging
|
3
|
+
import multiprocessing as mp
|
4
|
+
import os
|
5
|
+
import platform
|
6
|
+
import time
|
7
|
+
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed
|
8
|
+
|
9
|
+
import psutil
|
10
|
+
|
11
|
+
|
12
|
+
class ParallelExecutor:
|
13
|
+
"""
|
14
|
+
自动优化的并行执行器,根据平台和任务特性自动选择最佳执行模式和工作线程/进程数量。
|
15
|
+
|
16
|
+
特性:
|
17
|
+
- 自动检测平台并选择最佳执行模式
|
18
|
+
- 动态调整工作线程/进程数量
|
19
|
+
- 针对Linux和Windows的特定优化
|
20
|
+
- 任务批处理功能以提高小任务的效率
|
21
|
+
- 自动故障转移机制
|
22
|
+
"""
|
23
|
+
|
24
|
+
def __init__(self):
|
25
|
+
# 检测平台
|
26
|
+
self.platform = self._detect_platform()
|
27
|
+
# 自动选择最佳执行模式和工作线程/进程数量
|
28
|
+
self.mode, self.max_workers = self._determine_optimal_settings()
|
29
|
+
# 初始化执行器
|
30
|
+
self._executor = None
|
31
|
+
self.executor_class = ProcessPoolExecutor if self.mode == "process" else ThreadPoolExecutor
|
32
|
+
# 进程池重用策略
|
33
|
+
self.reuse_pool = self.mode == "process" and self.platform != "windows"
|
34
|
+
|
35
|
+
# 特定于平台的优化参数
|
36
|
+
self.mp_context = None
|
37
|
+
self.chunk_size = self._get_default_chunk_size()
|
38
|
+
self.timeout_per_task = 3600 # 默认任务超时时间(秒)
|
39
|
+
self.worker_init_func = None
|
40
|
+
|
41
|
+
# 针对Linux的特定优化
|
42
|
+
if self.platform == "linux":
|
43
|
+
self._setup_linux_optimizations()
|
44
|
+
# 针对Windows的特定优化
|
45
|
+
elif self.platform == "windows":
|
46
|
+
self._setup_windows_optimizations()
|
47
|
+
|
48
|
+
logging.info(f"Initialized {self.__class__.__name__} with mode={self.mode}, max_workers={self.max_workers} on {self.platform} platform")
|
49
|
+
|
50
|
+
def _detect_platform(self):
|
51
|
+
"""检测当前运行的平台"""
|
52
|
+
system = platform.system().lower()
|
53
|
+
if system == "linux":
|
54
|
+
return "linux"
|
55
|
+
elif system == "windows":
|
56
|
+
return "windows"
|
57
|
+
elif system == "darwin":
|
58
|
+
return "macos"
|
59
|
+
else:
|
60
|
+
return "unknown"
|
61
|
+
|
62
|
+
def _determine_optimal_settings(self):
|
63
|
+
"""确定最佳执行模式和工作线程/进程数量"""
|
64
|
+
mode = "process" # 默认使用进程模式
|
65
|
+
|
66
|
+
# Linux平台优化
|
67
|
+
if self.platform == "linux":
|
68
|
+
# 在Linux上,根据之前的问题,我们优先使用进程模式
|
69
|
+
mode = "process"
|
70
|
+
|
71
|
+
# 检查是否在容器中运行(如Docker)
|
72
|
+
in_container = self._is_in_container()
|
73
|
+
|
74
|
+
# 获取物理和逻辑CPU核心数
|
75
|
+
physical_cores = psutil.cpu_count(logical=False) or 1
|
76
|
+
logical_cores = psutil.cpu_count(logical=True) or 1
|
77
|
+
|
78
|
+
# 获取系统内存信息
|
79
|
+
mem = psutil.virtual_memory()
|
80
|
+
total_mem_gb = mem.total / (1024**3)
|
81
|
+
available_mem_gb = mem.available / (1024**3)
|
82
|
+
|
83
|
+
# 每个进程估计内存使用(根据应用程序特性调整)
|
84
|
+
est_mem_per_process_gb = 0.5
|
85
|
+
|
86
|
+
# 根据可用内存限制工作进程数
|
87
|
+
mem_limited_workers = max(1, int(available_mem_gb / est_mem_per_process_gb))
|
88
|
+
|
89
|
+
# 在容器环境中更保守一些
|
90
|
+
if in_container:
|
91
|
+
max_workers = min(physical_cores, mem_limited_workers, 4)
|
92
|
+
else:
|
93
|
+
max_workers = min(logical_cores, mem_limited_workers)
|
94
|
+
|
95
|
+
# Windows平台优化
|
96
|
+
elif self.platform == "windows":
|
97
|
+
# Windows上进程创建较快,线程和进程都可以考虑
|
98
|
+
# 但进程间通信开销大,所以对于小型任务,线程可能更高效
|
99
|
+
mode = "process" # 默认也使用进程模式,因为通常更可靠
|
100
|
+
|
101
|
+
# Windows通常使用超线程,所以我们可以使用逻辑核心数
|
102
|
+
logical_cores = psutil.cpu_count(logical=True) or 1
|
103
|
+
|
104
|
+
# Windows建议使用更少的进程以减少开销
|
105
|
+
if logical_cores > 4:
|
106
|
+
max_workers = logical_cores - 1
|
107
|
+
else:
|
108
|
+
max_workers = max(1, logical_cores)
|
109
|
+
|
110
|
+
# macOS平台优化
|
111
|
+
elif self.platform == "macos":
|
112
|
+
mode = "process"
|
113
|
+
logical_cores = psutil.cpu_count(logical=True) or 1
|
114
|
+
max_workers = max(1, logical_cores - 1)
|
115
|
+
|
116
|
+
# 未知平台的保守设置
|
117
|
+
else:
|
118
|
+
mode = "process"
|
119
|
+
max_workers = max(1, (psutil.cpu_count(logical=True) or 2) - 1)
|
120
|
+
|
121
|
+
return mode, max_workers
|
122
|
+
|
123
|
+
def _is_in_container(self):
|
124
|
+
"""检测是否在容器环境中运行"""
|
125
|
+
# 检查常见的容器环境指标
|
126
|
+
if os.path.exists("/.dockerenv"):
|
127
|
+
return True
|
128
|
+
|
129
|
+
try:
|
130
|
+
with open("/proc/1/cgroup", "rt") as f:
|
131
|
+
return any(("docker" in line or "kubepods" in line) for line in f)
|
132
|
+
except:
|
133
|
+
pass
|
134
|
+
|
135
|
+
return False
|
136
|
+
|
137
|
+
def _setup_linux_optimizations(self):
|
138
|
+
"""设置Linux特定的优化参数"""
|
139
|
+
try:
|
140
|
+
# 在Linux上,选择最适合的多进程上下文
|
141
|
+
# fork: 最快但可能会导致多线程程序出现问题
|
142
|
+
# spawn: 更安全但更慢
|
143
|
+
# forkserver: 中间解决方案
|
144
|
+
|
145
|
+
# 根据应用程序特性选择合适的上下文
|
146
|
+
self.mp_context = mp.get_context("fork")
|
147
|
+
|
148
|
+
# 设置进程初始化函数来设置CPU亲和性
|
149
|
+
self.worker_init_func = self._linux_worker_init
|
150
|
+
|
151
|
+
except Exception as e:
|
152
|
+
logging.warning(f"Failed to set Linux optimizations: {e}")
|
153
|
+
self.mp_context = None
|
154
|
+
|
155
|
+
def _setup_windows_optimizations(self):
|
156
|
+
"""设置Windows特定的优化参数"""
|
157
|
+
# Windows优化参数
|
158
|
+
# 进程创建和启动开销在Windows上较高,因此增加每批的任务数
|
159
|
+
self.chunk_size = 10
|
160
|
+
# Windows通常不需要特殊的工作进程初始化
|
161
|
+
self.worker_init_func = None
|
162
|
+
|
163
|
+
def _linux_worker_init(self):
|
164
|
+
"""Linux工作进程初始化函数"""
|
165
|
+
try:
|
166
|
+
# 获取当前进程
|
167
|
+
p = psutil.Process()
|
168
|
+
|
169
|
+
# 设置进程优先级为稍低于正常,以避免争抢重要系统资源
|
170
|
+
p.nice(10)
|
171
|
+
|
172
|
+
# 尝试设置CPU亲和性以提高缓存局部性
|
173
|
+
# 这里我们不设置特定的CPU核心,让系统调度,因为手动设置可能导致不平衡
|
174
|
+
|
175
|
+
# 设置进程I/O优先级
|
176
|
+
# 需要root权限,所以只是尝试一下
|
177
|
+
try:
|
178
|
+
os.system(f"ionice -c 2 -n 4 -p {os.getpid()} > /dev/null 2>&1")
|
179
|
+
except:
|
180
|
+
pass
|
181
|
+
|
182
|
+
except Exception as e:
|
183
|
+
logging.debug(f"Worker initialization warning (non-critical): {e}")
|
184
|
+
pass # 失败不中断程序运行
|
185
|
+
|
186
|
+
def _get_default_chunk_size(self):
|
187
|
+
"""获取默认任务分块大小"""
|
188
|
+
if self.platform == "linux":
|
189
|
+
# Linux下进程创建较快,可以使用较小的块大小
|
190
|
+
return 5
|
191
|
+
elif self.platform == "windows":
|
192
|
+
# Windows下进程创建较慢,使用较大的块大小
|
193
|
+
return 10
|
194
|
+
else:
|
195
|
+
return 5
|
196
|
+
|
197
|
+
@property
|
198
|
+
def executor(self):
|
199
|
+
"""懒加载并重用执行器"""
|
200
|
+
if self._executor is None and self.reuse_pool:
|
201
|
+
kwargs = {}
|
202
|
+
if self.mode == "process" and self.mp_context:
|
203
|
+
kwargs["mp_context"] = self.mp_context
|
204
|
+
|
205
|
+
if self.worker_init_func and self.mode == "process":
|
206
|
+
kwargs["initializer"] = self.worker_init_func
|
207
|
+
|
208
|
+
self._executor = self.executor_class(max_workers=self.max_workers, **kwargs)
|
209
|
+
return self._executor
|
210
|
+
|
211
|
+
@contextlib.contextmanager
|
212
|
+
def get_executor(self):
|
213
|
+
"""获取执行器的上下文管理器"""
|
214
|
+
if self.reuse_pool and self._executor:
|
215
|
+
yield self._executor
|
216
|
+
else:
|
217
|
+
kwargs = {}
|
218
|
+
if self.mode == "process" and self.mp_context:
|
219
|
+
kwargs["mp_context"] = self.mp_context
|
220
|
+
|
221
|
+
if self.worker_init_func and self.mode == "process":
|
222
|
+
kwargs["initializer"] = self.worker_init_func
|
223
|
+
|
224
|
+
with self.executor_class(max_workers=self.max_workers, **kwargs) as executor:
|
225
|
+
yield executor
|
226
|
+
|
227
|
+
def run(self, func, param_list, chunk_size=None, fallback_on_failure=True):
|
228
|
+
"""
|
229
|
+
并行执行函数
|
230
|
+
|
231
|
+
Args:
|
232
|
+
func (callable): 要执行的函数
|
233
|
+
param_list (list): 参数元组列表
|
234
|
+
chunk_size (int, optional): 任务分块大小,None表示使用默认值
|
235
|
+
fallback_on_failure (bool): 如果主执行模式失败,是否尝试其他模式
|
236
|
+
|
237
|
+
Returns:
|
238
|
+
list: 函数执行结果
|
239
|
+
"""
|
240
|
+
if not callable(func):
|
241
|
+
raise ValueError("func must be callable.")
|
242
|
+
if not isinstance(param_list, list):
|
243
|
+
raise ValueError("param_list must be a list.")
|
244
|
+
|
245
|
+
# 空列表直接返回
|
246
|
+
if not param_list:
|
247
|
+
return []
|
248
|
+
|
249
|
+
# 使用默认分块大小或自定义大小
|
250
|
+
effective_chunk_size = chunk_size or self.chunk_size
|
251
|
+
|
252
|
+
# 任务分块处理
|
253
|
+
if effective_chunk_size and len(param_list) > effective_chunk_size * 2:
|
254
|
+
return self._run_chunked(func, param_list, effective_chunk_size)
|
255
|
+
|
256
|
+
try:
|
257
|
+
return self._execute(func, param_list)
|
258
|
+
except Exception as e:
|
259
|
+
if fallback_on_failure:
|
260
|
+
logging.warning(f"Execution failed with {self.mode} mode: {e}. Trying fallback...")
|
261
|
+
# 如果当前模式失败,尝试其他模式
|
262
|
+
old_mode = self.mode
|
263
|
+
self.mode = "thread" if old_mode == "process" else "process"
|
264
|
+
self.executor_class = ProcessPoolExecutor if self.mode == "process" else ThreadPoolExecutor
|
265
|
+
self._executor = None # 重置执行器
|
266
|
+
|
267
|
+
try:
|
268
|
+
results = self._execute(func, param_list)
|
269
|
+
logging.info(f"Fallback to {self.mode} mode succeeded.")
|
270
|
+
return results
|
271
|
+
except Exception as e2:
|
272
|
+
logging.error(f"Fallback also failed: {e2}")
|
273
|
+
# 恢复原始模式
|
274
|
+
self.mode = old_mode
|
275
|
+
self.executor_class = ProcessPoolExecutor if self.mode == "process" else ThreadPoolExecutor
|
276
|
+
self._executor = None
|
277
|
+
raise
|
278
|
+
else:
|
279
|
+
raise
|
280
|
+
|
281
|
+
def _execute(self, func, param_list):
|
282
|
+
"""内部执行方法"""
|
283
|
+
results = [None] * len(param_list)
|
284
|
+
logging.info("Starting parallel execution in %s mode with %d workers.", self.mode, self.max_workers)
|
285
|
+
|
286
|
+
start_time = time.time()
|
287
|
+
|
288
|
+
with self.get_executor() as executor:
|
289
|
+
future_to_index = {executor.submit(func, *params): idx for idx, params in enumerate(param_list)}
|
290
|
+
|
291
|
+
for future in as_completed(future_to_index):
|
292
|
+
idx = future_to_index[future]
|
293
|
+
try:
|
294
|
+
# 添加超时保护
|
295
|
+
results[idx] = future.result(timeout=self.timeout_per_task)
|
296
|
+
except Exception as e:
|
297
|
+
logging.error("Task %d failed with error: %s", idx, e)
|
298
|
+
results[idx] = e
|
299
|
+
|
300
|
+
elapsed = time.time() - start_time
|
301
|
+
logging.info("Parallel execution completed in %.2f seconds.", elapsed)
|
302
|
+
return results
|
303
|
+
|
304
|
+
def _run_chunked(self, func, param_list, chunk_size):
|
305
|
+
"""处理大量小任务的批处理执行"""
|
306
|
+
|
307
|
+
def process_chunk(chunk):
|
308
|
+
return [func(*params) for params in chunk]
|
309
|
+
|
310
|
+
# 将参数列表分成多个块
|
311
|
+
chunks = [param_list[i : i + chunk_size] for i in range(0, len(param_list), chunk_size)]
|
312
|
+
|
313
|
+
logging.info(f"Processing {len(param_list)} tasks in {len(chunks)} chunks of size ~{chunk_size}")
|
314
|
+
|
315
|
+
chunk_results = self._execute(process_chunk, [(chunk,) for chunk in chunks])
|
316
|
+
|
317
|
+
# 将块结果展平成单个结果列表
|
318
|
+
return [result for sublist in chunk_results if isinstance(sublist, list) for result in sublist]
|
319
|
+
|
320
|
+
def map(self, func, *iterables, timeout=None, chunk_size=None):
|
321
|
+
"""
|
322
|
+
类似于内置map函数的并行版本
|
323
|
+
|
324
|
+
Args:
|
325
|
+
func: 要应用于每个元素的函数
|
326
|
+
*iterables: 一个或多个可迭代对象
|
327
|
+
timeout: 每个任务的超时时间
|
328
|
+
chunk_size: 任务分块大小
|
329
|
+
|
330
|
+
Returns:
|
331
|
+
生成器,产生的结果与输入顺序相同
|
332
|
+
"""
|
333
|
+
# 将zip后的可迭代对象转换为参数元组列表
|
334
|
+
param_list = [(args,) for args in zip(*iterables)]
|
335
|
+
|
336
|
+
# 临时存储超时设置
|
337
|
+
original_timeout = self.timeout_per_task
|
338
|
+
if timeout:
|
339
|
+
self.timeout_per_task = timeout
|
340
|
+
|
341
|
+
try:
|
342
|
+
results = self.run(lambda x: func(x), param_list, chunk_size=chunk_size)
|
343
|
+
for r in results:
|
344
|
+
yield r
|
345
|
+
finally:
|
346
|
+
# 恢复原超时设置
|
347
|
+
self.timeout_per_task = original_timeout
|
348
|
+
|
349
|
+
def __del__(self):
|
350
|
+
"""确保资源被正确释放"""
|
351
|
+
self.shutdown()
|
352
|
+
|
353
|
+
def shutdown(self):
|
354
|
+
"""显式关闭执行器"""
|
355
|
+
if self._executor:
|
356
|
+
try:
|
357
|
+
self._executor.shutdown(wait=True)
|
358
|
+
except:
|
359
|
+
pass
|
360
|
+
self._executor = None
|
361
|
+
|
362
|
+
def imap(self, func, *iterables, timeout=None, chunk_size=None):
|
363
|
+
"""
|
364
|
+
类似concurrent.futures.Executor.map的接口,但返回迭代器
|
365
|
+
"""
|
366
|
+
return self.map(func, *iterables, timeout=timeout, chunk_size=chunk_size)
|
367
|
+
|
368
|
+
def imap_unordered(self, func, *iterables, timeout=None, chunk_size=None):
|
369
|
+
"""
|
370
|
+
类似multiprocessing.Pool.imap_unordered的接口,结果可能乱序返回
|
371
|
+
"""
|
372
|
+
# 将zip后的可迭代对象转换为参数元组列表
|
373
|
+
param_list = [(args,) for args in zip(*iterables)]
|
374
|
+
|
375
|
+
# 空列表直接返回
|
376
|
+
if not param_list:
|
377
|
+
return
|
378
|
+
|
379
|
+
# 临时存储超时设置
|
380
|
+
original_timeout = self.timeout_per_task
|
381
|
+
if timeout:
|
382
|
+
self.timeout_per_task = timeout
|
383
|
+
|
384
|
+
try:
|
385
|
+
# 使用默认分块大小或自定义大小
|
386
|
+
effective_chunk_size = chunk_size or self.chunk_size
|
387
|
+
|
388
|
+
# 任务分块处理
|
389
|
+
if effective_chunk_size and len(param_list) > effective_chunk_size * 2:
|
390
|
+
chunks = [param_list[i : i + effective_chunk_size] for i in range(0, len(param_list), effective_chunk_size)]
|
391
|
+
|
392
|
+
with self.get_executor() as executor:
|
393
|
+
futures = [executor.submit(self._process_chunk_for_imap, func, chunk) for chunk in chunks]
|
394
|
+
|
395
|
+
for future in as_completed(futures):
|
396
|
+
try:
|
397
|
+
chunk_results = future.result(timeout=self.timeout_per_task)
|
398
|
+
for result in chunk_results:
|
399
|
+
yield result
|
400
|
+
except Exception as e:
|
401
|
+
logging.error(f"Chunk processing failed: {e}")
|
402
|
+
else:
|
403
|
+
with self.get_executor() as executor:
|
404
|
+
futures = [executor.submit(func, *params) for params in param_list]
|
405
|
+
|
406
|
+
for future in as_completed(futures):
|
407
|
+
try:
|
408
|
+
yield future.result(timeout=self.timeout_per_task)
|
409
|
+
except Exception as e:
|
410
|
+
logging.error(f"Task failed: {e}")
|
411
|
+
yield e
|
412
|
+
finally:
|
413
|
+
# 恢复原超时设置
|
414
|
+
self.timeout_per_task = original_timeout
|
415
|
+
|
416
|
+
def _process_chunk_for_imap(self, func, chunk):
|
417
|
+
"""处理imap_unordered的数据块"""
|
418
|
+
return [func(*params) for params in chunk]
|
419
|
+
|
420
|
+
def starmap(self, func, iterable, timeout=None, chunk_size=None):
|
421
|
+
"""
|
422
|
+
类似于内置starmap函数的并行版本
|
423
|
+
|
424
|
+
Args:
|
425
|
+
func: 要应用于每个元素的函数
|
426
|
+
iterable: 可迭代对象,每个元素是函数参数的元组
|
427
|
+
timeout: 每个任务的超时时间
|
428
|
+
chunk_size: 任务分块大小
|
429
|
+
|
430
|
+
Returns:
|
431
|
+
生成器,产生结果
|
432
|
+
"""
|
433
|
+
|
434
|
+
# 将每个元素转换为单参数函数调用
|
435
|
+
def wrapper(args):
|
436
|
+
return func(*args)
|
437
|
+
|
438
|
+
# 使用map实现
|
439
|
+
return self.map(wrapper, iterable, timeout=timeout, chunk_size=chunk_size)
|
440
|
+
|
441
|
+
def gather(self, funcs_and_args):
|
442
|
+
"""
|
443
|
+
并行执行多个不同的函数,类似于asyncio.gather
|
444
|
+
|
445
|
+
Args:
|
446
|
+
funcs_and_args: 列表,每个元素是(func, args)元组,
|
447
|
+
其中args是要传递给func的参数元组
|
448
|
+
|
449
|
+
Returns:
|
450
|
+
list: 函数执行结果,顺序与输入相同
|
451
|
+
"""
|
452
|
+
if not isinstance(funcs_and_args, list):
|
453
|
+
raise ValueError("funcs_and_args must be a list of (func, args) tuples")
|
454
|
+
|
455
|
+
def wrapper(func_and_args):
|
456
|
+
func, args = func_and_args
|
457
|
+
return func(*args)
|
458
|
+
|
459
|
+
return self.run(wrapper, [(item,) for item in funcs_and_args])
|
@@ -0,0 +1,83 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# coding=utf-8
|
3
|
+
"""
|
4
|
+
Author: Liu Kun && 16031215@qq.com
|
5
|
+
Date: 2025-03-18 19:14:19
|
6
|
+
LastEditors: Liu Kun && 16031215@qq.com
|
7
|
+
LastEditTime: 2025-03-18 19:18:38
|
8
|
+
FilePath: \\Python\\My_Funcs\\OAFuncs\\oafuncs\\_script\\parallel_example_usage.py
|
9
|
+
Description:
|
10
|
+
EditPlatform: vscode
|
11
|
+
ComputerInfo: XPS 15 9510
|
12
|
+
SystemInfo: Windows 11
|
13
|
+
Python Version: 3.12
|
14
|
+
"""
|
15
|
+
|
16
|
+
import logging
|
17
|
+
import time
|
18
|
+
from auto_optimized_parallel_executor import ParallelExecutor
|
19
|
+
|
20
|
+
# 设置日志
|
21
|
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
22
|
+
|
23
|
+
|
24
|
+
# 示例函数
|
25
|
+
def compute_intensive_task(n):
|
26
|
+
"""计算密集型任务示例"""
|
27
|
+
result = 0
|
28
|
+
for i in range(n):
|
29
|
+
result += i**0.5
|
30
|
+
return result
|
31
|
+
|
32
|
+
|
33
|
+
def io_intensive_task(seconds, value):
|
34
|
+
"""IO密集型任务示例"""
|
35
|
+
time.sleep(seconds) # 模拟IO操作
|
36
|
+
return f"Processed {value}"
|
37
|
+
|
38
|
+
|
39
|
+
def main():
|
40
|
+
# 创建自动优化的执行器
|
41
|
+
executor = ParallelExecutor()
|
42
|
+
|
43
|
+
# 打印选择的模式和工作线程/进程数量
|
44
|
+
print(f"自动选择的执行模式: {executor.mode}")
|
45
|
+
print(f"自动选择的工作线程/进程数: {executor.max_workers}")
|
46
|
+
print(f"运行平台: {executor.platform}")
|
47
|
+
|
48
|
+
# 示例1: 计算密集型任务
|
49
|
+
print("\n运行计算密集型任务...")
|
50
|
+
params = [(1000000,) for _ in range(20)]
|
51
|
+
results = executor.run(compute_intensive_task, params)
|
52
|
+
print(f"完成计算密集型任务,结果数量: {len(results)}")
|
53
|
+
|
54
|
+
# 示例2: IO密集型任务
|
55
|
+
print("\n运行IO密集型任务...")
|
56
|
+
io_params = [(0.1, f"item-{i}") for i in range(30)]
|
57
|
+
io_results = executor.run(io_intensive_task, io_params)
|
58
|
+
print(f"完成IO密集型任务,结果示例: {io_results[:3]}")
|
59
|
+
|
60
|
+
# 示例3: 使用map接口
|
61
|
+
print("\n使用map接口...")
|
62
|
+
numbers = list(range(1, 11))
|
63
|
+
squared = list(executor.map(lambda x: x * x, numbers))
|
64
|
+
print(f"Map结果: {squared}")
|
65
|
+
|
66
|
+
# 示例4: 使用imap_unordered接口(乱序返回结果)
|
67
|
+
print("\n使用imap_unordered接口...")
|
68
|
+
for i, result in enumerate(executor.imap_unordered(lambda x: x * x * x, range(1, 11))):
|
69
|
+
print(f"收到结果 #{i}: {result}")
|
70
|
+
|
71
|
+
# 示例5: 使用gather执行不同函数
|
72
|
+
print("\n使用gather接口执行不同函数...")
|
73
|
+
tasks = [(compute_intensive_task, (500000,)), (io_intensive_task, (0.2, "task1")), (io_intensive_task, (0.1, "task2")), (compute_intensive_task, (300000,))]
|
74
|
+
gather_results = executor.gather(tasks)
|
75
|
+
print(f"Gather结果: {gather_results}")
|
76
|
+
|
77
|
+
# 关闭执行器
|
78
|
+
executor.shutdown()
|
79
|
+
print("\n执行器已关闭")
|
80
|
+
|
81
|
+
|
82
|
+
if __name__ == "__main__":
|
83
|
+
main()
|
@@ -0,0 +1,151 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
# coding=utf-8
|
3
|
+
"""
|
4
|
+
Author: Liu Kun && 16031215@qq.com
|
5
|
+
Date: 2025-03-21 10:02:32
|
6
|
+
LastEditors: Liu Kun && 16031215@qq.com
|
7
|
+
LastEditTime: 2025-03-21 10:02:33
|
8
|
+
FilePath: \\Python\\My_Funcs\\OAFuncs\\oafuncs\\_script\\replace_file_concent.py
|
9
|
+
Description:
|
10
|
+
EditPlatform: vscode
|
11
|
+
ComputerInfo: XPS 15 9510
|
12
|
+
SystemInfo: Windows 11
|
13
|
+
Python Version: 3.12
|
14
|
+
"""
|
15
|
+
|
16
|
+
import datetime
|
17
|
+
import os
|
18
|
+
import re
|
19
|
+
from pathlib import Path
|
20
|
+
|
21
|
+
from rich import print
|
22
|
+
|
23
|
+
|
24
|
+
def _prepare_file_operation(source_file, target_dir, new_name=None):
|
25
|
+
"""
|
26
|
+
准备文件操作的公共逻辑
|
27
|
+
|
28
|
+
参数:
|
29
|
+
source_file: 源文件路径
|
30
|
+
target_dir: 目标目录路径
|
31
|
+
new_name: 新文件名,如果为None则使用原文件名
|
32
|
+
|
33
|
+
返回:
|
34
|
+
target_file: 目标文件路径
|
35
|
+
"""
|
36
|
+
os.makedirs(target_dir, exist_ok=True)
|
37
|
+
if new_name is None:
|
38
|
+
return os.path.join(target_dir, os.path.basename(source_file))
|
39
|
+
else:
|
40
|
+
return os.path.join(target_dir, new_name)
|
41
|
+
|
42
|
+
|
43
|
+
def replace_config_values(source_file, target_dir, param_dict, new_name=None):
|
44
|
+
"""
|
45
|
+
批量修改配置参数并保存到新路径(适用于等号赋值格式的参数)
|
46
|
+
|
47
|
+
参数:
|
48
|
+
source_file: 源文件路径
|
49
|
+
target_dir: 目标目录路径
|
50
|
+
param_dict: 要修改的参数字典 {参数名: 新值}
|
51
|
+
new_name: 新文件名,如果为None则使用原文件名
|
52
|
+
|
53
|
+
返回:
|
54
|
+
set: 成功修改的参数集合
|
55
|
+
"""
|
56
|
+
try:
|
57
|
+
target_file = _prepare_file_operation(source_file, target_dir, new_name)
|
58
|
+
|
59
|
+
with open(source_file, "r") as f:
|
60
|
+
lines = f.readlines()
|
61
|
+
|
62
|
+
modified = set()
|
63
|
+
for i in range(len(lines)):
|
64
|
+
line = lines[i]
|
65
|
+
stripped = line.lstrip()
|
66
|
+
|
67
|
+
# 跳过注释行和空行
|
68
|
+
if stripped.startswith(("!", "#", ";", "%")) or not stripped.strip():
|
69
|
+
continue
|
70
|
+
|
71
|
+
# 匹配所有参数
|
72
|
+
for param, new_val in param_dict.items():
|
73
|
+
# 构造动态正则表达式
|
74
|
+
pattern = re.compile(r'^(\s*{})(\s*=\s*)([\'"]?)(.*?)(\3)(\s*(!.*)?)$'.format(re.escape(param)), flags=re.IGNORECASE)
|
75
|
+
|
76
|
+
match = pattern.match(line.rstrip("\n"))
|
77
|
+
if match and param not in modified:
|
78
|
+
# 构造新行(保留原始格式)
|
79
|
+
new_line = f"{match.group(1)}{match.group(2)}{match.group(3)}{new_val}{match.group(5)}{match.group(6) or ''}\n"
|
80
|
+
lines[i] = new_line
|
81
|
+
modified.add(param)
|
82
|
+
break # 每行最多处理一个参数
|
83
|
+
|
84
|
+
with open(target_file, "w") as f:
|
85
|
+
f.writelines(lines)
|
86
|
+
|
87
|
+
print(f"[green]已将参数替换到新文件:{target_file}[/green]")
|
88
|
+
return modified
|
89
|
+
except Exception as e:
|
90
|
+
print(f"[red]替换参数时出错:{str(e)}[/red]")
|
91
|
+
return set()
|
92
|
+
|
93
|
+
|
94
|
+
def replace_direct_content(source_file, target_dir, content_dict, key_value=False, new_name=None):
|
95
|
+
"""
|
96
|
+
直接替换文件中的指定内容并保存到新路径
|
97
|
+
|
98
|
+
参数:
|
99
|
+
source_file: 源文件路径
|
100
|
+
target_dir: 目标目录路径
|
101
|
+
content_dict: 要替换的内容字典 {旧内容: 新内容}
|
102
|
+
key_value: 是否按键值对方式替换参数
|
103
|
+
new_name: 新文件名,如果为None则使用原文件名
|
104
|
+
|
105
|
+
返回:
|
106
|
+
bool: 替换是否成功
|
107
|
+
"""
|
108
|
+
try:
|
109
|
+
if key_value:
|
110
|
+
return len(replace_config_values(source_file, target_dir, content_dict, new_name)) > 0
|
111
|
+
|
112
|
+
target_file = _prepare_file_operation(source_file, target_dir, new_name)
|
113
|
+
|
114
|
+
with open(source_file, "r") as f:
|
115
|
+
content = f.read()
|
116
|
+
|
117
|
+
# 直接替换指定内容
|
118
|
+
for old_content, new_content in content_dict.items():
|
119
|
+
content = content.replace(old_content, new_content)
|
120
|
+
|
121
|
+
with open(target_file, "w") as f:
|
122
|
+
f.write(content)
|
123
|
+
|
124
|
+
print(f"[green]已将内容替换到新文件:{target_file}[/green]")
|
125
|
+
return True
|
126
|
+
except Exception as e:
|
127
|
+
print(f"[red]替换内容时出错:{str(e)}[/red]")
|
128
|
+
return False
|
129
|
+
|
130
|
+
|
131
|
+
if __name__ == "__main__":
|
132
|
+
control_file = Path(r"/data/hejx/liukun/Work/Model/cas_esm/data/control_file")
|
133
|
+
target_dir = r"/data/hejx/liukun/Work/Model/cas_esm/run"
|
134
|
+
|
135
|
+
force_time = 2023072900
|
136
|
+
ini_time = datetime.datetime.strptime(str(force_time), "%Y%m%d%H")
|
137
|
+
oisst_time = ini_time - datetime.timedelta(days=1) # 需要在前一天
|
138
|
+
|
139
|
+
replace_config_values(source_file=Path(r"/data/hejx/liukun/Work/Model/cas_esm/data/control_file") / "atm_in", target_dir=target_dir, param_dict={"ncdata": f"/data/hejx/liukun/Work/Model/cas_esm/data/IAP_ncep2_181x360_{ini_time.strftime('%Y%m%d')}_00_00_L35.nc"})
|
140
|
+
|
141
|
+
replace_direct_content(source_file=Path(r"/data/hejx/liukun/Work/Model/cas_esm/data/control_file") / "docn.stream.txt", target_dir=target_dir, content_dict={"oisst.forecast.20230727.nc": f"oisst.forecast.{oisst_time.strftime('%Y%m%d')}.nc"})
|
142
|
+
|
143
|
+
replace_config_values(source_file=Path(r"/data/hejx/liukun/Work/Model/cas_esm/data/control_file") / "drv_in", target_dir=target_dir, param_dict={"start_ymd": f"{ini_time.strftime('%Y%m%d')}"})
|
144
|
+
|
145
|
+
replace_config_values(source_file=Path(r"/data/hejx/liukun/Work/Model/cas_esm/data/control_file") / "ice_in", target_dir=target_dir, param_dict={"stream_fldfilename": f"/data/hejx/liukun/Work/Model/cas_esm/data/oisst.forecast.{ini_time.strftime('%Y%m%d')}.nc"})
|
146
|
+
|
147
|
+
replace_config_values(
|
148
|
+
source_file=Path(r"/data/hejx/liukun/Work/Model/cas_esm/data/control_file") / "lnd_in",
|
149
|
+
target_dir=target_dir,
|
150
|
+
param_dict={"fini": f"/data/hejx/liukun/Work/Model/cas_esm/run_p1x1/colm-spinup-colm-restart-{ini_time.strftime('%Y-%m-%d')}-00000", "fsbc": f"/data/hejx/liukun/Work/Model/cas_esm/run_p1x1/colm-spinup-colm-restart-{ini_time.strftime('%Y-%m-%d')}-00000-sbc"},
|
151
|
+
)
|