MeUtils 2025.3.14.8.43.3__py3-none-any.whl → 2025.3.19.19.13.35__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. {MeUtils-2025.3.14.8.43.3.dist-info → MeUtils-2025.3.19.19.13.35.dist-info}/METADATA +264 -264
  2. {MeUtils-2025.3.14.8.43.3.dist-info → MeUtils-2025.3.19.19.13.35.dist-info}/RECORD +43 -45
  3. apps/xfPPT_demo.py +251 -0
  4. examples/_openaisdk/4v.py +11 -6
  5. examples/_openaisdk/openai_chatfire.py +4 -3
  6. examples/_openaisdk/openai_embeddings.py +25 -7
  7. examples/_openaisdk/openai_siliconflow.py +1 -1
  8. examples/_openaisdk/zhipu_/346/231/272/350/203/275/344/275/223.py +76 -13
  9. meutils/apis/jimeng/common.py +2 -0
  10. meutils/apis/jimeng/images.py +6 -6
  11. meutils/apis/jina/__init__.py +11 -0
  12. meutils/apis/jina/common.py +43 -0
  13. meutils/apis/oneapi/channel.py +3 -2
  14. meutils/apis/oneapi/user.py +1 -1
  15. meutils/apis/search/_web_search.py +87 -0
  16. meutils/apis/search/metaso.py +9 -2
  17. meutils/apis/search/web_search.py +132 -0
  18. meutils/apis/siliconflow/image_to_image.py +3 -3
  19. meutils/apis/siliconflow/images.py +4 -2
  20. meutils/apis/siliconflow/text_to_image.py +1 -1
  21. meutils/apis/siliconflow/utils.py +1 -1
  22. meutils/config_utils/lark_utils/common.py +6 -2
  23. meutils/data/VERSION +1 -1
  24. meutils/data/oneapi/index.html +9 -0
  25. meutils/io/files_utils.py +12 -1
  26. meutils/io/openai_files.py +26 -1
  27. meutils/llm/check_api.py +1 -1
  28. meutils/llm/check_utils.py +13 -4
  29. meutils/llm/clients.py +23 -0
  30. meutils/llm/completions/{oi.py → assistants/__init__.py} +2 -7
  31. meutils/llm/completions/assistants/ppt.py +11 -0
  32. meutils/llm/completions/chat_gemini.py +1 -0
  33. meutils/llm/completions/chat_plus.py +162 -49
  34. meutils/llm/completions/chat_spark.py +3 -10
  35. meutils/llm/completions/qwenllm.py +11 -6
  36. meutils/request_utils/crawler.py +11 -11
  37. meutils/schemas/oneapi/common.py +9 -1
  38. meutils/schemas/openai_types.py +26 -4
  39. meutils/schemas/siliconflow_types.py +1 -1
  40. meutils/apis/search/zhipu.py +0 -80
  41. meutils/llm/completions/qwen_demo.py +0 -26
  42. meutils/other/aiomultiprocess/__init__.py +0 -14
  43. meutils/other/aiomultiprocess/__version__.py +0 -1
  44. meutils/other/aiomultiprocess/core.py +0 -241
  45. meutils/other/aiomultiprocess/pool.py +0 -379
  46. meutils/other/aiomultiprocess/scheduler.py +0 -83
  47. meutils/other/aiomultiprocess/types.py +0 -48
  48. {MeUtils-2025.3.14.8.43.3.dist-info → MeUtils-2025.3.19.19.13.35.dist-info}/LICENSE +0 -0
  49. {MeUtils-2025.3.14.8.43.3.dist-info → MeUtils-2025.3.19.19.13.35.dist-info}/WHEEL +0 -0
  50. {MeUtils-2025.3.14.8.43.3.dist-info → MeUtils-2025.3.19.19.13.35.dist-info}/entry_points.txt +0 -0
  51. {MeUtils-2025.3.14.8.43.3.dist-info → MeUtils-2025.3.19.19.13.35.dist-info}/top_level.txt +0 -0
@@ -1,26 +0,0 @@
1
-
2
- from openai import OpenAI
3
- cookies={'ssxmod_itna': 'euitD57IejxAo=DhODcmmv30j=DKDODl4BtGRDeq7tDRDFqAPiDHA17P5DU2OY2MGXxGDaieAqzRxGXxaexiNDAxq0iDCbeQ5ou2ivv3e3nemU4qRhnub1G7NQgR3+H3utAR5RMFgYDHxi8DBF4wCGYDenaDCeDQxirDD4DADibq4D1IDDkD0+m7UovW4GWDmbADYHGf78fDGpobRAfbDDCbPYDwEbpzDDzzQj5PmAqPm3DePAfIjkIoAQxb4G1fD0HaG65bzk9kMxA3d=xvUewDlFSDCKvu+gUuWEA1zSrtjeYtY+sYFrT+0x3GqliqB5Y7Di7i4nhhlwNf0xWA5DTttRPa4DG35zY=eBY3E5QiefL1XdWqbz+DreA=dh=2WeKR58GYxEwgG5/DYMADaA5+A4PADN0=a7eziDD',
4
- 'ssxmod_itna2': 'euitD57IejxAo=DhODcmmv30j=DKDODl4BtGRDeq7tDRDFqAPiDHA17P5DU2OY2MGXxGDaieAqzexD3raKY3rh4DFxnDKW1Bh0xDlxkDasWVgcuBepTBPecj=tk1QFgt3HxjB3pxx9=sdl5aSW9DG1yipgx+wgIO0mcDAoYQfrF4/xT++xEk02A7+4mHPbOA+oTOQBaahcHEWZfG3bok7rP8PcI8uRYYIUAHo0yE6aPp54bX0EQ8rUIHKYdC/1pjy6d0WqM90CG0PoMmhv3xrXwjP37hf4EgwgGqSxAhSaPAVxzGQUop4MXgBE+OX0DUmE5/no9eKynE/hjA6ECEP=oKnQo1DIjDq/YN00voKx0nLPbb3SG4DI4ZA/25ynwKc2a6rOEIQrN3+u0AiDbI5O2MCTK+LFfvq40LOQOALmFP0=bdo3w8onWpA+dhzA0C51ahgZjkR3y7olY=k8XW1ugrtAOIxC57jtViIOUdBtt7=D/0NE1oTf0k/d5aircE6f056c4rUy=c4wrh9XnhL1bIL9XZxdAcj8/YK4cRQN7Cc=XH6BTNFiFbTaOLxhrim5Q8p40K5fPAD4Ke+CYKKim2WDyieFrU3AqcDjfiGyUvmkP10CsY=3ZeLCx6YpkcELdkcAwlitYm6sFGq1tRkPRQewwrUYKiBh42Dzlpu=EuoV4aD2w24mPiDu1DZ51o3DA9ZW3h5leHGnB5Crc1KTbP03C2qlGQBezsq0xiHBeHDxncUxHTANQktHZ1KkxS6Hy4qQklKcxsKrxAqr2rmq32tnqViiY1spUPPi2mAw5hDD'}
5
- cookie = '; '.join([f'{i}={j}' for i,j in cookies.items()])
6
-
7
- headers = {
8
- # 'authorization':'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjIxZDJiMjhmLWZmMTctNGQ2MS1hYmI0LWM2NzJhZWNjMTQ5ZCIsImV4cCI6MTc0MzkzNTk1OX0.y3oSO7aOwmzuE3GI3_aSxd9c5iXz9Krw0zJDG1FCLBQ',
9
- 'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
10
- 'cookie':cookie
11
- }
12
- client=OpenAI(base_url='https://chat.qwen.ai/api',
13
- api_key='eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjIxZDJiMjhmLWZmMTctNGQ2MS1hYmI0LWM2NzJhZWNjMTQ5ZCIsImV4cCI6MTc0MzkzNTk1OX0.y3oSO7aOwmzuE3GI3_aSxd9c5iXz9Krw0zJDG1FCLBQ',
14
- # default_headers=headers
15
- )
16
-
17
- comp=client.chat.completions.create(
18
- model="qwen-max-latest",
19
- messages=[{"role": "user", "content": "你好"}],
20
- max_tokens=2,
21
- # extra_headers=headers, # 放在这里也可以
22
- extra_body={'chat_type':'t2t'},
23
- stream=False
24
- )
25
- print(comp.choices[0].message.content)
26
- # comp
@@ -1,14 +0,0 @@
1
- # Copyright 2018 John Reese
2
- # Licensed under the MIT license
3
-
4
- """
5
- AsyncIO version of the standard multiprocessing module
6
- """
7
-
8
- __author__ = "John Reese" # https://github.com/omnilib/aiomultiprocess
9
-
10
- from .__version__ import __version__
11
- from .core import Process, Worker, set_context, set_start_method
12
- from .pool import Pool, PoolResult
13
- from .scheduler import RoundRobin, Scheduler
14
- from .types import QueueID, TaskID
@@ -1 +0,0 @@
1
- __version__ = "0.9.0"
@@ -1,241 +0,0 @@
1
- # Copyright 2018 John Reese
2
- # Licensed under the MIT license
3
-
4
- import asyncio
5
- import logging
6
- import multiprocessing
7
- import multiprocessing.managers
8
- import os
9
- import sys
10
- from typing import Any, Callable, Dict, Optional, Sequence
11
-
12
- from .types import Context, R, Unit
13
-
14
- DEFAULT_START_METHOD = "spawn"
15
-
16
- # shared context for all multiprocessing primitives, for platform compatibility
17
- # "spawn" is default/required on windows and mac, but can't execute non-global functions
18
- # see https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
19
- context = multiprocessing.get_context(DEFAULT_START_METHOD)
20
- _manager = None
21
-
22
- log = logging.getLogger(__name__)
23
-
24
-
25
- def get_manager() -> multiprocessing.managers.SyncManager:
26
- """Return a singleton shared manager."""
27
- global _manager
28
- if _manager is None:
29
- _manager = context.Manager()
30
-
31
- return _manager
32
-
33
-
34
- def set_start_method(method: Optional[str] = DEFAULT_START_METHOD) -> None:
35
- """
36
- Set the start method and context used for future processes/pools.
37
-
38
- When given no parameters (`set_context()`), will default to using the "spawn" method
39
- as this provides a predictable set of features and compatibility across all major
40
- platforms, and trades a small cost on process startup for potentially large savings
41
- on memory usage of child processes.
42
-
43
- Passing an explicit string (eg, "fork") will force aiomultiprocess to use the given
44
- start method instead of "spawn".
45
-
46
- Passing an explicit `None` value will force aiomultiprocess to use CPython's default
47
- start method for the current platform rather than defaulting to "spawn".
48
-
49
- See the official multiprocessing documentation for details on start methods:
50
- https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
51
- """
52
- global context
53
- context = multiprocessing.get_context(method)
54
-
55
-
56
- def get_context() -> Context:
57
- """Get the current active global context."""
58
- global context
59
- return context
60
-
61
-
62
- def set_context(method: Optional[str] = None) -> None:
63
- """
64
- Set the start method and context used for future processes/pools. [DEPRECATED]
65
-
66
- Retained for backwards compatibility, and to retain prior behavior of "no parameter"
67
- resulting in selection of the platform's default start method.
68
- """
69
- return set_start_method(method)
70
-
71
-
72
- async def not_implemented(*args: Any, **kwargs: Any) -> None:
73
- """Default function to call when none given."""
74
- raise NotImplementedError()
75
-
76
-
77
- class Process:
78
- """Execute a coroutine on a separate process."""
79
-
80
- def __init__(
81
- self,
82
- group: None = None,
83
- target: Callable = None,
84
- name: str = None,
85
- args: Sequence[Any] = None,
86
- kwargs: Dict[str, Any] = None,
87
- *,
88
- daemon: bool = None,
89
- initializer: Optional[Callable] = None,
90
- initargs: Sequence[Any] = (),
91
- loop_initializer: Optional[Callable] = None,
92
- process_target: Optional[Callable] = None,
93
- ) -> None:
94
- if target is not None and not asyncio.iscoroutinefunction(target):
95
- raise ValueError("target must be coroutine function")
96
-
97
- if initializer is not None and asyncio.iscoroutinefunction(initializer):
98
- raise ValueError("initializer must be synchronous function")
99
-
100
- if loop_initializer is not None and asyncio.iscoroutinefunction(
101
- loop_initializer
102
- ):
103
- raise ValueError("loop_initializer must be synchronous function")
104
-
105
- self.unit = Unit(
106
- target=target or not_implemented,
107
- args=args or (),
108
- kwargs=kwargs or {},
109
- namespace=get_manager().Namespace(),
110
- initializer=initializer,
111
- initargs=initargs,
112
- loop_initializer=loop_initializer,
113
- )
114
- self.aio_process = context.Process(
115
- group=group,
116
- target=process_target or Process.run_async,
117
- args=(self.unit,),
118
- name=name,
119
- daemon=daemon,
120
- )
121
-
122
- def __await__(self) -> Any:
123
- """Enable awaiting of the process result by chaining to `start()` & `join()`."""
124
- if not self.is_alive() and self.exitcode is None:
125
- self.start()
126
-
127
- return self.join().__await__()
128
-
129
- @staticmethod
130
- def run_async(unit: Unit) -> R:
131
- """Initialize the child process and event loop, then execute the coroutine."""
132
- try:
133
- if unit.loop_initializer is None:
134
- loop = asyncio.new_event_loop()
135
- else:
136
- loop = unit.loop_initializer()
137
-
138
- asyncio.set_event_loop(loop)
139
-
140
- if unit.initializer:
141
- unit.initializer(*unit.initargs)
142
-
143
- result: R = loop.run_until_complete(unit.target(*unit.args, **unit.kwargs))
144
-
145
- return result
146
-
147
- except BaseException:
148
- log.exception(f"aio process {os.getpid()} failed")
149
- raise
150
-
151
- def start(self) -> None:
152
- """Start the child process."""
153
- return self.aio_process.start()
154
-
155
- async def join(self, timeout: int = None) -> None:
156
- """Wait for the process to finish execution without blocking the main thread."""
157
- if not self.is_alive() and self.exitcode is None:
158
- raise ValueError("must start process before joining it")
159
-
160
- if timeout is not None:
161
- return await asyncio.wait_for(self.join(), timeout)
162
-
163
- while self.exitcode is None:
164
- await asyncio.sleep(0.005)
165
-
166
- @property
167
- def name(self) -> str:
168
- """Child process name."""
169
- return self.aio_process.name
170
-
171
- def is_alive(self) -> bool:
172
- """Is child process running."""
173
- return self.aio_process.is_alive()
174
-
175
- @property
176
- def daemon(self) -> bool:
177
- """Should child process be daemon."""
178
- return self.aio_process.daemon
179
-
180
- @daemon.setter
181
- def daemon(self, value: bool) -> None:
182
- """Should child process be daemon."""
183
- self.aio_process.daemon = value
184
-
185
- @property
186
- def pid(self) -> Optional[int]:
187
- """Process ID of child, or None if not started."""
188
- return self.aio_process.pid
189
-
190
- @property
191
- def exitcode(self) -> Optional[int]:
192
- """Exit code from child process, or None if still running."""
193
- return self.aio_process.exitcode
194
-
195
- def terminate(self) -> None:
196
- """Send SIGTERM to child process."""
197
- return self.aio_process.terminate()
198
-
199
- # multiprocessing.Process methods added in 3.7
200
- if sys.version_info >= (3, 7):
201
-
202
- def kill(self) -> None:
203
- """Send SIGKILL to child process."""
204
- return self.aio_process.kill()
205
-
206
- def close(self) -> None:
207
- """Clean up child process once finished."""
208
- return self.aio_process.close()
209
-
210
-
211
- class Worker(Process):
212
- """Execute a coroutine on a separate process and return the result."""
213
-
214
- def __init__(self, *args, **kwargs) -> None:
215
- super().__init__(*args, process_target=Worker.run_async, **kwargs)
216
- self.unit.namespace.result = None
217
-
218
- @staticmethod
219
- def run_async(unit: Unit) -> R:
220
- """Initialize the child process and event loop, then execute the coroutine."""
221
- try:
222
- result: R = Process.run_async(unit)
223
- unit.namespace.result = result
224
- return result
225
-
226
- except BaseException as e:
227
- unit.namespace.result = e
228
- raise
229
-
230
- async def join(self, timeout: int = None) -> Any:
231
- """Wait for the worker to finish, and return the final result."""
232
- await super().join(timeout)
233
- return self.result
234
-
235
- @property
236
- def result(self) -> R:
237
- """Easy access to the resulting value from the coroutine."""
238
- if self.exitcode is None:
239
- raise ValueError("coroutine not completed")
240
-
241
- return self.unit.namespace.result
@@ -1,379 +0,0 @@
1
- # Copyright 2019 John Reese
2
- # Licensed under the MIT license
3
-
4
- import asyncio
5
- import logging
6
- import os
7
- import queue
8
- import traceback
9
- from typing import (
10
- Any,
11
- AsyncIterable,
12
- AsyncIterator,
13
- Awaitable,
14
- Callable,
15
- Dict,
16
- Generator,
17
- Optional,
18
- Sequence,
19
- Tuple,
20
- TypeVar,
21
- )
22
-
23
- from .core import Process, get_context
24
- from .scheduler import RoundRobin, Scheduler
25
- from .types import (
26
- LoopInitializer,
27
- PoolTask,
28
- ProxyException,
29
- Queue,
30
- QueueID,
31
- R,
32
- T,
33
- TaskID,
34
- TracebackStr,
35
- )
36
-
37
- MAX_TASKS_PER_CHILD = 0 # number of tasks to execute before recycling a child process
38
- CHILD_CONCURRENCY = 16 # number of tasks to execute simultaneously per child process
39
- _T = TypeVar("_T")
40
-
41
- log = logging.getLogger(__name__)
42
-
43
-
44
- class PoolWorker(Process):
45
- """Individual worker process for the async pool."""
46
-
47
- def __init__(
48
- self,
49
- tx: Queue,
50
- rx: Queue,
51
- ttl: int = MAX_TASKS_PER_CHILD,
52
- concurrency: int = CHILD_CONCURRENCY,
53
- *,
54
- initializer: Optional[Callable] = None,
55
- initargs: Sequence[Any] = (),
56
- loop_initializer: Optional[LoopInitializer] = None,
57
- exception_handler: Optional[Callable[[BaseException], None]] = None,
58
- ) -> None:
59
- super().__init__(
60
- target=self.run,
61
- initializer=initializer,
62
- initargs=initargs,
63
- loop_initializer=loop_initializer,
64
- )
65
- self.concurrency = max(1, concurrency)
66
- self.exception_handler = exception_handler
67
- self.ttl = max(0, ttl)
68
- self.tx = tx
69
- self.rx = rx
70
-
71
- async def run(self) -> None:
72
- """Pick up work, execute work, return results, rinse, repeat."""
73
- pending: Dict[asyncio.Future, TaskID] = {}
74
- completed = 0
75
- running = True
76
- while running or pending:
77
- # TTL, Tasks To Live, determines how many tasks to execute before dying
78
- if self.ttl and completed >= self.ttl:
79
- running = False
80
-
81
- # pick up new work as long as we're "running" and we have open slots
82
- while running and len(pending) < self.concurrency:
83
- try:
84
- task: PoolTask = self.tx.get_nowait()
85
- except queue.Empty:
86
- break
87
-
88
- if task is None:
89
- running = False
90
- break
91
-
92
- tid, func, args, kwargs = task
93
- future = asyncio.ensure_future(func(*args, **kwargs))
94
- pending[future] = tid
95
-
96
- if not pending:
97
- await asyncio.sleep(0.005)
98
- continue
99
-
100
- # return results and/or exceptions when completed
101
- done, _ = await asyncio.wait(
102
- pending.keys(), timeout=0.05, return_when=asyncio.FIRST_COMPLETED
103
- )
104
- for future in done:
105
- tid = pending.pop(future)
106
-
107
- result = None
108
- tb = None
109
- try:
110
- result = future.result()
111
- except BaseException as e:
112
- if self.exception_handler is not None:
113
- self.exception_handler(e)
114
-
115
- tb = traceback.format_exc()
116
-
117
- self.rx.put_nowait((tid, result, tb))
118
- completed += 1
119
-
120
-
121
- class PoolResult(Awaitable[Sequence[_T]], AsyncIterable[_T]):
122
- """
123
- Asynchronous proxy for map/starmap results. Can be awaited or used with `async for`.
124
- """
125
-
126
- def __init__(self, pool: "Pool", task_ids: Sequence[TaskID]):
127
- self.pool = pool
128
- self.task_ids = task_ids
129
-
130
- def __await__(self) -> Generator[Any, None, Sequence[_T]]:
131
- """Wait for all results and return them as a sequence"""
132
- return self.results().__await__()
133
-
134
- async def results(self) -> Sequence[_T]:
135
- """Wait for all results and return them as a sequence"""
136
- return await self.pool.results(self.task_ids)
137
-
138
- def __aiter__(self) -> AsyncIterator[_T]:
139
- """Return results one-by-one as they are ready"""
140
- return self.results_generator()
141
-
142
- async def results_generator(self) -> AsyncIterator[_T]:
143
- """Return results one-by-one as they are ready"""
144
- for task_id in self.task_ids:
145
- yield (await self.pool.results([task_id]))[0]
146
-
147
-
148
- class Pool:
149
- """Execute coroutines on a pool of child processes."""
150
-
151
- def __init__(
152
- self,
153
- processes: int = None,
154
- initializer: Callable[..., None] = None,
155
- initargs: Sequence[Any] = (),
156
- maxtasksperchild: int = MAX_TASKS_PER_CHILD,
157
- childconcurrency: int = CHILD_CONCURRENCY,
158
- queuecount: Optional[int] = None,
159
- scheduler: Scheduler = None,
160
- loop_initializer: Optional[LoopInitializer] = None,
161
- exception_handler: Optional[Callable[[BaseException], None]] = None,
162
- ) -> None:
163
- self.context = get_context()
164
-
165
- self.scheduler = scheduler or RoundRobin()
166
- self.process_count = max(1, processes or os.cpu_count() or 2)
167
- self.queue_count = max(1, queuecount or 1)
168
-
169
- if self.queue_count > self.process_count:
170
- raise ValueError("queue count must be <= process count")
171
-
172
- self.initializer = initializer
173
- self.initargs = initargs
174
- self.loop_initializer = loop_initializer
175
- self.maxtasksperchild = max(0, maxtasksperchild)
176
- self.childconcurrency = max(1, childconcurrency)
177
- self.exception_handler = exception_handler
178
-
179
- self.processes: Dict[Process, QueueID] = {}
180
- self.queues: Dict[QueueID, Tuple[Queue, Queue]] = {}
181
-
182
- self.running = True
183
- self.last_id = 0
184
- self._results: Dict[TaskID, Tuple[Any, Optional[TracebackStr]]] = {}
185
-
186
- self.init()
187
- self._loop = asyncio.ensure_future(self.loop())
188
-
189
- async def __aenter__(self) -> "Pool":
190
- """Enable `async with Pool() as pool` usage."""
191
- return self
192
-
193
- async def __aexit__(self, *args) -> None:
194
- """Automatically terminate the pool when falling out of scope."""
195
- self.terminate()
196
- await self.join()
197
-
198
- def init(self) -> None:
199
- """
200
- Create the initial mapping of processes and queues.
201
-
202
- :meta private:
203
- """
204
- for _ in range(self.queue_count):
205
- tx = self.context.Queue()
206
- rx = self.context.Queue()
207
- qid = self.scheduler.register_queue(tx)
208
-
209
- self.queues[qid] = (tx, rx)
210
-
211
- qids = list(self.queues.keys())
212
- for i in range(self.process_count):
213
- qid = qids[i % self.queue_count]
214
- self.processes[self.create_worker(qid)] = qid
215
- self.scheduler.register_process(qid)
216
-
217
- async def loop(self) -> None:
218
- """
219
- Maintain the pool of workers while open.
220
-
221
- :meta private:
222
- """
223
- while self.processes or self.running:
224
- # clean up workers that reached TTL
225
- for process in list(self.processes):
226
- if not process.is_alive():
227
- qid = self.processes.pop(process)
228
- if self.running:
229
- self.processes[self.create_worker(qid)] = qid
230
-
231
- # pull results into a shared dictionary for later retrieval
232
- for _, rx in self.queues.values():
233
- while True:
234
- try:
235
- task_id, value, tb = rx.get_nowait()
236
- self.finish_work(task_id, value, tb)
237
-
238
- except queue.Empty:
239
- break
240
-
241
- # let someone else do some work for once
242
- await asyncio.sleep(0.005)
243
-
244
- def create_worker(self, qid: QueueID) -> Process:
245
- """
246
- Create a worker process attached to the given transmit and receive queues.
247
-
248
- :meta private:
249
- """
250
- tx, rx = self.queues[qid]
251
- process = PoolWorker(
252
- tx,
253
- rx,
254
- self.maxtasksperchild,
255
- self.childconcurrency,
256
- initializer=self.initializer,
257
- initargs=self.initargs,
258
- loop_initializer=self.loop_initializer,
259
- exception_handler=self.exception_handler,
260
- )
261
- process.start()
262
- return process
263
-
264
- def queue_work(
265
- self,
266
- func: Callable[..., Awaitable[R]],
267
- args: Sequence[Any],
268
- kwargs: Dict[str, Any],
269
- ) -> TaskID:
270
- """
271
- Add a new work item to the outgoing queue.
272
-
273
- :meta private:
274
- """
275
- self.last_id += 1
276
- task_id = TaskID(self.last_id)
277
-
278
- qid = self.scheduler.schedule_task(task_id, func, args, kwargs)
279
- tx, _ = self.queues[qid]
280
- tx.put_nowait((task_id, func, args, kwargs))
281
- return task_id
282
-
283
- def finish_work(
284
- self, task_id: TaskID, value: Any, tb: Optional[TracebackStr]
285
- ) -> None:
286
- """
287
- Mark work items as completed.
288
-
289
- :meta private:
290
- """
291
- self._results[task_id] = value, tb
292
- self.scheduler.complete_task(task_id)
293
-
294
- async def results(self, tids: Sequence[TaskID]) -> Sequence[R]:
295
- """
296
- Wait for all tasks to complete, and return results, preserving order.
297
-
298
- :meta private:
299
- """
300
- pending = set(tids)
301
- ready: Dict[TaskID, R] = {}
302
-
303
- while pending:
304
- for tid in pending.copy():
305
- if tid in self._results:
306
- result, tb = self._results.pop(tid)
307
- if tb is not None:
308
- raise ProxyException(tb)
309
- ready[tid] = result
310
- pending.remove(tid)
311
-
312
- await asyncio.sleep(0.005)
313
-
314
- return [ready[tid] for tid in tids]
315
-
316
- async def apply(
317
- self,
318
- func: Callable[..., Awaitable[R]],
319
- args: Sequence[Any] = None,
320
- kwds: Dict[str, Any] = None,
321
- ) -> R:
322
- """Run a single coroutine on the pool."""
323
- if not self.running:
324
- raise RuntimeError("pool is closed")
325
-
326
- args = args or ()
327
- kwds = kwds or {}
328
-
329
- tid = self.queue_work(func, args, kwds)
330
- results: Sequence[R] = await self.results([tid])
331
- return results[0]
332
-
333
- def map(
334
- self,
335
- func: Callable[[T], Awaitable[R]],
336
- iterable: Sequence[T],
337
- # chunksize: int = None, # todo: implement chunking maybe
338
- ) -> PoolResult[R]:
339
- """Run a coroutine once for each item in the iterable."""
340
- if not self.running:
341
- raise RuntimeError("pool is closed")
342
-
343
- tids = [self.queue_work(func, (item,), {}) for item in iterable]
344
- return PoolResult(self, tids)
345
-
346
- def starmap(
347
- self,
348
- func: Callable[..., Awaitable[R]],
349
- iterable: Sequence[Sequence[T]],
350
- # chunksize: int = None, # todo: implement chunking maybe
351
- ) -> PoolResult[R]:
352
- """Run a coroutine once for each sequence of items in the iterable."""
353
- if not self.running:
354
- raise RuntimeError("pool is closed")
355
-
356
- tids = [self.queue_work(func, args, {}) for args in iterable]
357
- return PoolResult(self, tids)
358
-
359
- def close(self) -> None:
360
- """Close the pool to new visitors."""
361
- self.running = False
362
- for qid in self.processes.values():
363
- tx, _ = self.queues[qid]
364
- tx.put_nowait(None)
365
-
366
- def terminate(self) -> None:
367
- """No running by the pool!"""
368
- if self.running:
369
- self.close()
370
-
371
- for process in self.processes:
372
- process.terminate()
373
-
374
- async def join(self) -> None:
375
- """Wait for the pool to finish gracefully."""
376
- if self.running:
377
- raise RuntimeError("pool is still open")
378
-
379
- await self._loop