MeUtils 2025.3.14.8.41.43__py3-none-any.whl → 2025.3.19.18.18.56__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {MeUtils-2025.3.14.8.41.43.dist-info → MeUtils-2025.3.19.18.18.56.dist-info}/METADATA +263 -263
- {MeUtils-2025.3.14.8.41.43.dist-info → MeUtils-2025.3.19.18.18.56.dist-info}/RECORD +43 -45
- apps/xfPPT_demo.py +251 -0
- examples/_openaisdk/4v.py +11 -6
- examples/_openaisdk/openai_chatfire.py +4 -3
- examples/_openaisdk/openai_embeddings.py +25 -7
- examples/_openaisdk/openai_siliconflow.py +1 -1
- examples/_openaisdk/zhipu_/346/231/272/350/203/275/344/275/223.py +76 -13
- meutils/apis/jimeng/common.py +2 -0
- meutils/apis/jimeng/images.py +7 -7
- meutils/apis/jina/__init__.py +11 -0
- meutils/apis/jina/common.py +43 -0
- meutils/apis/oneapi/channel.py +3 -2
- meutils/apis/oneapi/user.py +1 -1
- meutils/apis/search/_web_search.py +87 -0
- meutils/apis/search/metaso.py +9 -2
- meutils/apis/search/web_search.py +132 -0
- meutils/apis/siliconflow/image_to_image.py +3 -3
- meutils/apis/siliconflow/images.py +4 -2
- meutils/apis/siliconflow/text_to_image.py +1 -1
- meutils/apis/siliconflow/utils.py +1 -1
- meutils/config_utils/lark_utils/common.py +6 -2
- meutils/data/VERSION +1 -1
- meutils/data/oneapi/index.html +9 -0
- meutils/io/files_utils.py +12 -1
- meutils/io/openai_files.py +26 -1
- meutils/llm/check_api.py +1 -1
- meutils/llm/check_utils.py +13 -4
- meutils/llm/clients.py +23 -0
- meutils/llm/completions/{oi.py → assistants/__init__.py} +2 -7
- meutils/llm/completions/assistants/ppt.py +11 -0
- meutils/llm/completions/chat_gemini.py +1 -0
- meutils/llm/completions/chat_plus.py +159 -49
- meutils/llm/completions/chat_spark.py +3 -10
- meutils/llm/completions/qwenllm.py +11 -6
- meutils/request_utils/crawler.py +11 -11
- meutils/schemas/oneapi/common.py +9 -1
- meutils/schemas/openai_types.py +26 -4
- meutils/schemas/siliconflow_types.py +1 -1
- meutils/apis/search/zhipu.py +0 -80
- meutils/llm/completions/qwen_demo.py +0 -26
- meutils/other/aiomultiprocess/__init__.py +0 -14
- meutils/other/aiomultiprocess/__version__.py +0 -1
- meutils/other/aiomultiprocess/core.py +0 -241
- meutils/other/aiomultiprocess/pool.py +0 -379
- meutils/other/aiomultiprocess/scheduler.py +0 -83
- meutils/other/aiomultiprocess/types.py +0 -48
- {MeUtils-2025.3.14.8.41.43.dist-info → MeUtils-2025.3.19.18.18.56.dist-info}/LICENSE +0 -0
- {MeUtils-2025.3.14.8.41.43.dist-info → MeUtils-2025.3.19.18.18.56.dist-info}/WHEEL +0 -0
- {MeUtils-2025.3.14.8.41.43.dist-info → MeUtils-2025.3.19.18.18.56.dist-info}/entry_points.txt +0 -0
- {MeUtils-2025.3.14.8.41.43.dist-info → MeUtils-2025.3.19.18.18.56.dist-info}/top_level.txt +0 -0
@@ -1,379 +0,0 @@
|
|
1
|
-
# Copyright 2019 John Reese
|
2
|
-
# Licensed under the MIT license
|
3
|
-
|
4
|
-
import asyncio
|
5
|
-
import logging
|
6
|
-
import os
|
7
|
-
import queue
|
8
|
-
import traceback
|
9
|
-
from typing import (
|
10
|
-
Any,
|
11
|
-
AsyncIterable,
|
12
|
-
AsyncIterator,
|
13
|
-
Awaitable,
|
14
|
-
Callable,
|
15
|
-
Dict,
|
16
|
-
Generator,
|
17
|
-
Optional,
|
18
|
-
Sequence,
|
19
|
-
Tuple,
|
20
|
-
TypeVar,
|
21
|
-
)
|
22
|
-
|
23
|
-
from .core import Process, get_context
|
24
|
-
from .scheduler import RoundRobin, Scheduler
|
25
|
-
from .types import (
|
26
|
-
LoopInitializer,
|
27
|
-
PoolTask,
|
28
|
-
ProxyException,
|
29
|
-
Queue,
|
30
|
-
QueueID,
|
31
|
-
R,
|
32
|
-
T,
|
33
|
-
TaskID,
|
34
|
-
TracebackStr,
|
35
|
-
)
|
36
|
-
|
37
|
-
MAX_TASKS_PER_CHILD = 0 # number of tasks to execute before recycling a child process
|
38
|
-
CHILD_CONCURRENCY = 16 # number of tasks to execute simultaneously per child process
|
39
|
-
_T = TypeVar("_T")
|
40
|
-
|
41
|
-
log = logging.getLogger(__name__)
|
42
|
-
|
43
|
-
|
44
|
-
class PoolWorker(Process):
|
45
|
-
"""Individual worker process for the async pool."""
|
46
|
-
|
47
|
-
def __init__(
|
48
|
-
self,
|
49
|
-
tx: Queue,
|
50
|
-
rx: Queue,
|
51
|
-
ttl: int = MAX_TASKS_PER_CHILD,
|
52
|
-
concurrency: int = CHILD_CONCURRENCY,
|
53
|
-
*,
|
54
|
-
initializer: Optional[Callable] = None,
|
55
|
-
initargs: Sequence[Any] = (),
|
56
|
-
loop_initializer: Optional[LoopInitializer] = None,
|
57
|
-
exception_handler: Optional[Callable[[BaseException], None]] = None,
|
58
|
-
) -> None:
|
59
|
-
super().__init__(
|
60
|
-
target=self.run,
|
61
|
-
initializer=initializer,
|
62
|
-
initargs=initargs,
|
63
|
-
loop_initializer=loop_initializer,
|
64
|
-
)
|
65
|
-
self.concurrency = max(1, concurrency)
|
66
|
-
self.exception_handler = exception_handler
|
67
|
-
self.ttl = max(0, ttl)
|
68
|
-
self.tx = tx
|
69
|
-
self.rx = rx
|
70
|
-
|
71
|
-
async def run(self) -> None:
|
72
|
-
"""Pick up work, execute work, return results, rinse, repeat."""
|
73
|
-
pending: Dict[asyncio.Future, TaskID] = {}
|
74
|
-
completed = 0
|
75
|
-
running = True
|
76
|
-
while running or pending:
|
77
|
-
# TTL, Tasks To Live, determines how many tasks to execute before dying
|
78
|
-
if self.ttl and completed >= self.ttl:
|
79
|
-
running = False
|
80
|
-
|
81
|
-
# pick up new work as long as we're "running" and we have open slots
|
82
|
-
while running and len(pending) < self.concurrency:
|
83
|
-
try:
|
84
|
-
task: PoolTask = self.tx.get_nowait()
|
85
|
-
except queue.Empty:
|
86
|
-
break
|
87
|
-
|
88
|
-
if task is None:
|
89
|
-
running = False
|
90
|
-
break
|
91
|
-
|
92
|
-
tid, func, args, kwargs = task
|
93
|
-
future = asyncio.ensure_future(func(*args, **kwargs))
|
94
|
-
pending[future] = tid
|
95
|
-
|
96
|
-
if not pending:
|
97
|
-
await asyncio.sleep(0.005)
|
98
|
-
continue
|
99
|
-
|
100
|
-
# return results and/or exceptions when completed
|
101
|
-
done, _ = await asyncio.wait(
|
102
|
-
pending.keys(), timeout=0.05, return_when=asyncio.FIRST_COMPLETED
|
103
|
-
)
|
104
|
-
for future in done:
|
105
|
-
tid = pending.pop(future)
|
106
|
-
|
107
|
-
result = None
|
108
|
-
tb = None
|
109
|
-
try:
|
110
|
-
result = future.result()
|
111
|
-
except BaseException as e:
|
112
|
-
if self.exception_handler is not None:
|
113
|
-
self.exception_handler(e)
|
114
|
-
|
115
|
-
tb = traceback.format_exc()
|
116
|
-
|
117
|
-
self.rx.put_nowait((tid, result, tb))
|
118
|
-
completed += 1
|
119
|
-
|
120
|
-
|
121
|
-
class PoolResult(Awaitable[Sequence[_T]], AsyncIterable[_T]):
|
122
|
-
"""
|
123
|
-
Asynchronous proxy for map/starmap results. Can be awaited or used with `async for`.
|
124
|
-
"""
|
125
|
-
|
126
|
-
def __init__(self, pool: "Pool", task_ids: Sequence[TaskID]):
|
127
|
-
self.pool = pool
|
128
|
-
self.task_ids = task_ids
|
129
|
-
|
130
|
-
def __await__(self) -> Generator[Any, None, Sequence[_T]]:
|
131
|
-
"""Wait for all results and return them as a sequence"""
|
132
|
-
return self.results().__await__()
|
133
|
-
|
134
|
-
async def results(self) -> Sequence[_T]:
|
135
|
-
"""Wait for all results and return them as a sequence"""
|
136
|
-
return await self.pool.results(self.task_ids)
|
137
|
-
|
138
|
-
def __aiter__(self) -> AsyncIterator[_T]:
|
139
|
-
"""Return results one-by-one as they are ready"""
|
140
|
-
return self.results_generator()
|
141
|
-
|
142
|
-
async def results_generator(self) -> AsyncIterator[_T]:
|
143
|
-
"""Return results one-by-one as they are ready"""
|
144
|
-
for task_id in self.task_ids:
|
145
|
-
yield (await self.pool.results([task_id]))[0]
|
146
|
-
|
147
|
-
|
148
|
-
class Pool:
|
149
|
-
"""Execute coroutines on a pool of child processes."""
|
150
|
-
|
151
|
-
def __init__(
|
152
|
-
self,
|
153
|
-
processes: int = None,
|
154
|
-
initializer: Callable[..., None] = None,
|
155
|
-
initargs: Sequence[Any] = (),
|
156
|
-
maxtasksperchild: int = MAX_TASKS_PER_CHILD,
|
157
|
-
childconcurrency: int = CHILD_CONCURRENCY,
|
158
|
-
queuecount: Optional[int] = None,
|
159
|
-
scheduler: Scheduler = None,
|
160
|
-
loop_initializer: Optional[LoopInitializer] = None,
|
161
|
-
exception_handler: Optional[Callable[[BaseException], None]] = None,
|
162
|
-
) -> None:
|
163
|
-
self.context = get_context()
|
164
|
-
|
165
|
-
self.scheduler = scheduler or RoundRobin()
|
166
|
-
self.process_count = max(1, processes or os.cpu_count() or 2)
|
167
|
-
self.queue_count = max(1, queuecount or 1)
|
168
|
-
|
169
|
-
if self.queue_count > self.process_count:
|
170
|
-
raise ValueError("queue count must be <= process count")
|
171
|
-
|
172
|
-
self.initializer = initializer
|
173
|
-
self.initargs = initargs
|
174
|
-
self.loop_initializer = loop_initializer
|
175
|
-
self.maxtasksperchild = max(0, maxtasksperchild)
|
176
|
-
self.childconcurrency = max(1, childconcurrency)
|
177
|
-
self.exception_handler = exception_handler
|
178
|
-
|
179
|
-
self.processes: Dict[Process, QueueID] = {}
|
180
|
-
self.queues: Dict[QueueID, Tuple[Queue, Queue]] = {}
|
181
|
-
|
182
|
-
self.running = True
|
183
|
-
self.last_id = 0
|
184
|
-
self._results: Dict[TaskID, Tuple[Any, Optional[TracebackStr]]] = {}
|
185
|
-
|
186
|
-
self.init()
|
187
|
-
self._loop = asyncio.ensure_future(self.loop())
|
188
|
-
|
189
|
-
async def __aenter__(self) -> "Pool":
|
190
|
-
"""Enable `async with Pool() as pool` usage."""
|
191
|
-
return self
|
192
|
-
|
193
|
-
async def __aexit__(self, *args) -> None:
|
194
|
-
"""Automatically terminate the pool when falling out of scope."""
|
195
|
-
self.terminate()
|
196
|
-
await self.join()
|
197
|
-
|
198
|
-
def init(self) -> None:
|
199
|
-
"""
|
200
|
-
Create the initial mapping of processes and queues.
|
201
|
-
|
202
|
-
:meta private:
|
203
|
-
"""
|
204
|
-
for _ in range(self.queue_count):
|
205
|
-
tx = self.context.Queue()
|
206
|
-
rx = self.context.Queue()
|
207
|
-
qid = self.scheduler.register_queue(tx)
|
208
|
-
|
209
|
-
self.queues[qid] = (tx, rx)
|
210
|
-
|
211
|
-
qids = list(self.queues.keys())
|
212
|
-
for i in range(self.process_count):
|
213
|
-
qid = qids[i % self.queue_count]
|
214
|
-
self.processes[self.create_worker(qid)] = qid
|
215
|
-
self.scheduler.register_process(qid)
|
216
|
-
|
217
|
-
async def loop(self) -> None:
|
218
|
-
"""
|
219
|
-
Maintain the pool of workers while open.
|
220
|
-
|
221
|
-
:meta private:
|
222
|
-
"""
|
223
|
-
while self.processes or self.running:
|
224
|
-
# clean up workers that reached TTL
|
225
|
-
for process in list(self.processes):
|
226
|
-
if not process.is_alive():
|
227
|
-
qid = self.processes.pop(process)
|
228
|
-
if self.running:
|
229
|
-
self.processes[self.create_worker(qid)] = qid
|
230
|
-
|
231
|
-
# pull results into a shared dictionary for later retrieval
|
232
|
-
for _, rx in self.queues.values():
|
233
|
-
while True:
|
234
|
-
try:
|
235
|
-
task_id, value, tb = rx.get_nowait()
|
236
|
-
self.finish_work(task_id, value, tb)
|
237
|
-
|
238
|
-
except queue.Empty:
|
239
|
-
break
|
240
|
-
|
241
|
-
# let someone else do some work for once
|
242
|
-
await asyncio.sleep(0.005)
|
243
|
-
|
244
|
-
def create_worker(self, qid: QueueID) -> Process:
|
245
|
-
"""
|
246
|
-
Create a worker process attached to the given transmit and receive queues.
|
247
|
-
|
248
|
-
:meta private:
|
249
|
-
"""
|
250
|
-
tx, rx = self.queues[qid]
|
251
|
-
process = PoolWorker(
|
252
|
-
tx,
|
253
|
-
rx,
|
254
|
-
self.maxtasksperchild,
|
255
|
-
self.childconcurrency,
|
256
|
-
initializer=self.initializer,
|
257
|
-
initargs=self.initargs,
|
258
|
-
loop_initializer=self.loop_initializer,
|
259
|
-
exception_handler=self.exception_handler,
|
260
|
-
)
|
261
|
-
process.start()
|
262
|
-
return process
|
263
|
-
|
264
|
-
def queue_work(
|
265
|
-
self,
|
266
|
-
func: Callable[..., Awaitable[R]],
|
267
|
-
args: Sequence[Any],
|
268
|
-
kwargs: Dict[str, Any],
|
269
|
-
) -> TaskID:
|
270
|
-
"""
|
271
|
-
Add a new work item to the outgoing queue.
|
272
|
-
|
273
|
-
:meta private:
|
274
|
-
"""
|
275
|
-
self.last_id += 1
|
276
|
-
task_id = TaskID(self.last_id)
|
277
|
-
|
278
|
-
qid = self.scheduler.schedule_task(task_id, func, args, kwargs)
|
279
|
-
tx, _ = self.queues[qid]
|
280
|
-
tx.put_nowait((task_id, func, args, kwargs))
|
281
|
-
return task_id
|
282
|
-
|
283
|
-
def finish_work(
|
284
|
-
self, task_id: TaskID, value: Any, tb: Optional[TracebackStr]
|
285
|
-
) -> None:
|
286
|
-
"""
|
287
|
-
Mark work items as completed.
|
288
|
-
|
289
|
-
:meta private:
|
290
|
-
"""
|
291
|
-
self._results[task_id] = value, tb
|
292
|
-
self.scheduler.complete_task(task_id)
|
293
|
-
|
294
|
-
async def results(self, tids: Sequence[TaskID]) -> Sequence[R]:
|
295
|
-
"""
|
296
|
-
Wait for all tasks to complete, and return results, preserving order.
|
297
|
-
|
298
|
-
:meta private:
|
299
|
-
"""
|
300
|
-
pending = set(tids)
|
301
|
-
ready: Dict[TaskID, R] = {}
|
302
|
-
|
303
|
-
while pending:
|
304
|
-
for tid in pending.copy():
|
305
|
-
if tid in self._results:
|
306
|
-
result, tb = self._results.pop(tid)
|
307
|
-
if tb is not None:
|
308
|
-
raise ProxyException(tb)
|
309
|
-
ready[tid] = result
|
310
|
-
pending.remove(tid)
|
311
|
-
|
312
|
-
await asyncio.sleep(0.005)
|
313
|
-
|
314
|
-
return [ready[tid] for tid in tids]
|
315
|
-
|
316
|
-
async def apply(
|
317
|
-
self,
|
318
|
-
func: Callable[..., Awaitable[R]],
|
319
|
-
args: Sequence[Any] = None,
|
320
|
-
kwds: Dict[str, Any] = None,
|
321
|
-
) -> R:
|
322
|
-
"""Run a single coroutine on the pool."""
|
323
|
-
if not self.running:
|
324
|
-
raise RuntimeError("pool is closed")
|
325
|
-
|
326
|
-
args = args or ()
|
327
|
-
kwds = kwds or {}
|
328
|
-
|
329
|
-
tid = self.queue_work(func, args, kwds)
|
330
|
-
results: Sequence[R] = await self.results([tid])
|
331
|
-
return results[0]
|
332
|
-
|
333
|
-
def map(
|
334
|
-
self,
|
335
|
-
func: Callable[[T], Awaitable[R]],
|
336
|
-
iterable: Sequence[T],
|
337
|
-
# chunksize: int = None, # todo: implement chunking maybe
|
338
|
-
) -> PoolResult[R]:
|
339
|
-
"""Run a coroutine once for each item in the iterable."""
|
340
|
-
if not self.running:
|
341
|
-
raise RuntimeError("pool is closed")
|
342
|
-
|
343
|
-
tids = [self.queue_work(func, (item,), {}) for item in iterable]
|
344
|
-
return PoolResult(self, tids)
|
345
|
-
|
346
|
-
def starmap(
|
347
|
-
self,
|
348
|
-
func: Callable[..., Awaitable[R]],
|
349
|
-
iterable: Sequence[Sequence[T]],
|
350
|
-
# chunksize: int = None, # todo: implement chunking maybe
|
351
|
-
) -> PoolResult[R]:
|
352
|
-
"""Run a coroutine once for each sequence of items in the iterable."""
|
353
|
-
if not self.running:
|
354
|
-
raise RuntimeError("pool is closed")
|
355
|
-
|
356
|
-
tids = [self.queue_work(func, args, {}) for args in iterable]
|
357
|
-
return PoolResult(self, tids)
|
358
|
-
|
359
|
-
def close(self) -> None:
|
360
|
-
"""Close the pool to new visitors."""
|
361
|
-
self.running = False
|
362
|
-
for qid in self.processes.values():
|
363
|
-
tx, _ = self.queues[qid]
|
364
|
-
tx.put_nowait(None)
|
365
|
-
|
366
|
-
def terminate(self) -> None:
|
367
|
-
"""No running by the pool!"""
|
368
|
-
if self.running:
|
369
|
-
self.close()
|
370
|
-
|
371
|
-
for process in self.processes:
|
372
|
-
process.terminate()
|
373
|
-
|
374
|
-
async def join(self) -> None:
|
375
|
-
"""Wait for the pool to finish gracefully."""
|
376
|
-
if self.running:
|
377
|
-
raise RuntimeError("pool is still open")
|
378
|
-
|
379
|
-
await self._loop
|
@@ -1,83 +0,0 @@
|
|
1
|
-
# Copyright 2019 John Reese
|
2
|
-
# Licensed under the MIT license
|
3
|
-
|
4
|
-
import itertools
|
5
|
-
from abc import ABC, abstractmethod
|
6
|
-
from typing import Any, Awaitable, Callable, Dict, Iterator, List, Sequence
|
7
|
-
|
8
|
-
from .types import Queue, QueueID, R, TaskID
|
9
|
-
|
10
|
-
|
11
|
-
class Scheduler(ABC):
|
12
|
-
@abstractmethod
|
13
|
-
def register_queue(self, tx: Queue) -> QueueID:
|
14
|
-
"""
|
15
|
-
Notify the scheduler when the pool creates a new transmit queue.
|
16
|
-
"""
|
17
|
-
|
18
|
-
@abstractmethod
|
19
|
-
def register_process(self, qid: QueueID) -> None:
|
20
|
-
"""
|
21
|
-
Notify the scheduler when a process is assigned to a queue.
|
22
|
-
|
23
|
-
This should be used for determining weights for the scheduler.
|
24
|
-
It will only be called during initial process mapping.
|
25
|
-
"""
|
26
|
-
|
27
|
-
@abstractmethod
|
28
|
-
def schedule_task(
|
29
|
-
self,
|
30
|
-
task_id: TaskID,
|
31
|
-
func: Callable[..., Awaitable[R]],
|
32
|
-
args: Sequence[Any],
|
33
|
-
kwargs: Dict[str, Any],
|
34
|
-
) -> QueueID:
|
35
|
-
"""
|
36
|
-
Given a task, return a queue ID that it should be sent to.
|
37
|
-
|
38
|
-
`func`, `args` and `kwargs` are just the exact same arguments
|
39
|
-
that `queue_work` takes, not every scheduler would be benefit from this.
|
40
|
-
Example that they would be useful, highly customized schedule may want
|
41
|
-
to schedule according to function/arguments weights.
|
42
|
-
"""
|
43
|
-
|
44
|
-
@abstractmethod
|
45
|
-
def complete_task(self, task_id: TaskID) -> None:
|
46
|
-
"""
|
47
|
-
Notify the scheduler that a task has been completed.
|
48
|
-
"""
|
49
|
-
|
50
|
-
|
51
|
-
class RoundRobin(Scheduler):
|
52
|
-
"""
|
53
|
-
The default scheduling algorithm that assigns tasks to queues in round robin order.
|
54
|
-
|
55
|
-
When multiple processes are assigned to the same queue, this will weight tasks
|
56
|
-
accordingly. For example, 12 processes over 8 queues should result in four queues
|
57
|
-
receiving double the number tasks compared to the other eight.
|
58
|
-
"""
|
59
|
-
|
60
|
-
def __init__(self) -> None:
|
61
|
-
super().__init__()
|
62
|
-
self.qids: List[QueueID] = []
|
63
|
-
self.next_id = itertools.count()
|
64
|
-
self.cycler: Iterator[QueueID] = itertools.cycle([])
|
65
|
-
|
66
|
-
def register_queue(self, tx: Queue) -> QueueID:
|
67
|
-
return QueueID(next(self.next_id))
|
68
|
-
|
69
|
-
def register_process(self, qid: QueueID) -> None:
|
70
|
-
self.qids.append(qid)
|
71
|
-
self.cycler = itertools.cycle(self.qids)
|
72
|
-
|
73
|
-
def schedule_task(
|
74
|
-
self,
|
75
|
-
_task_id: TaskID,
|
76
|
-
_func: Callable[..., Awaitable[R]],
|
77
|
-
_args: Sequence[Any],
|
78
|
-
_kwargs: Dict[str, Any],
|
79
|
-
) -> QueueID:
|
80
|
-
return next(self.cycler)
|
81
|
-
|
82
|
-
def complete_task(self, _task_id: TaskID) -> None:
|
83
|
-
pass
|
@@ -1,48 +0,0 @@
|
|
1
|
-
# Copyright 2018 John Reese
|
2
|
-
# Licensed under the MIT license
|
3
|
-
|
4
|
-
import multiprocessing
|
5
|
-
from asyncio import BaseEventLoop
|
6
|
-
from typing import (
|
7
|
-
Any,
|
8
|
-
Callable,
|
9
|
-
Dict,
|
10
|
-
NamedTuple,
|
11
|
-
NewType,
|
12
|
-
Optional,
|
13
|
-
Sequence,
|
14
|
-
Tuple,
|
15
|
-
TypeVar,
|
16
|
-
)
|
17
|
-
|
18
|
-
T = TypeVar("T")
|
19
|
-
R = TypeVar("R")
|
20
|
-
|
21
|
-
Context = multiprocessing.context.BaseContext
|
22
|
-
Queue = multiprocessing.Queue
|
23
|
-
|
24
|
-
TaskID = NewType("TaskID", int)
|
25
|
-
QueueID = NewType("QueueID", int)
|
26
|
-
|
27
|
-
TracebackStr = str
|
28
|
-
|
29
|
-
LoopInitializer = Callable[..., BaseEventLoop]
|
30
|
-
PoolTask = Optional[Tuple[TaskID, Callable[..., R], Sequence[T], Dict[str, T]]]
|
31
|
-
PoolResult = Tuple[TaskID, Optional[R], Optional[TracebackStr]]
|
32
|
-
|
33
|
-
|
34
|
-
class Unit(NamedTuple):
|
35
|
-
"""Container for what to call on the child process."""
|
36
|
-
|
37
|
-
target: Callable
|
38
|
-
args: Sequence[Any]
|
39
|
-
kwargs: Dict[str, Any]
|
40
|
-
namespace: Any
|
41
|
-
initializer: Optional[Callable] = None
|
42
|
-
initargs: Sequence[Any] = ()
|
43
|
-
loop_initializer: Optional[LoopInitializer] = None
|
44
|
-
runner: Optional[Callable] = None
|
45
|
-
|
46
|
-
|
47
|
-
class ProxyException(Exception):
|
48
|
-
pass
|
File without changes
|
File without changes
|
{MeUtils-2025.3.14.8.41.43.dist-info → MeUtils-2025.3.19.18.18.56.dist-info}/entry_points.txt
RENAMED
File without changes
|
File without changes
|