wool 0.1rc8__py3-none-any.whl → 0.1rc10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of wool might be problematic. Click here for more details.
- wool/__init__.py +71 -50
- wool/_protobuf/__init__.py +14 -0
- wool/_protobuf/exception.py +3 -0
- wool/_protobuf/task.py +11 -0
- wool/_protobuf/task_pb2.py +42 -0
- wool/_protobuf/task_pb2.pyi +43 -0
- wool/_protobuf/{mempool/metadata/metadata_pb2_grpc.py → task_pb2_grpc.py} +2 -2
- wool/_protobuf/worker.py +24 -0
- wool/_protobuf/worker_pb2.py +47 -0
- wool/_protobuf/worker_pb2.pyi +39 -0
- wool/_protobuf/worker_pb2_grpc.py +141 -0
- wool/_resource_pool.py +376 -0
- wool/_typing.py +0 -10
- wool/_work.py +553 -0
- wool/_worker.py +843 -169
- wool/_worker_discovery.py +1223 -0
- wool/_worker_pool.py +331 -0
- wool/_worker_proxy.py +515 -0
- {wool-0.1rc8.dist-info → wool-0.1rc10.dist-info}/METADATA +8 -7
- wool-0.1rc10.dist-info/RECORD +22 -0
- wool-0.1rc10.dist-info/entry_points.txt +2 -0
- wool/_cli.py +0 -262
- wool/_event.py +0 -109
- wool/_future.py +0 -171
- wool/_logging.py +0 -44
- wool/_manager.py +0 -181
- wool/_mempool/__init__.py +0 -4
- wool/_mempool/_mempool.py +0 -311
- wool/_mempool/_metadata.py +0 -39
- wool/_mempool/_service.py +0 -225
- wool/_pool.py +0 -524
- wool/_protobuf/mempool/mempool_pb2.py +0 -66
- wool/_protobuf/mempool/mempool_pb2.pyi +0 -108
- wool/_protobuf/mempool/mempool_pb2_grpc.py +0 -312
- wool/_protobuf/mempool/metadata/metadata_pb2.py +0 -36
- wool/_protobuf/mempool/metadata/metadata_pb2.pyi +0 -17
- wool/_queue.py +0 -32
- wool/_session.py +0 -429
- wool/_task.py +0 -366
- wool/_utils.py +0 -63
- wool-0.1rc8.dist-info/RECORD +0 -28
- wool-0.1rc8.dist-info/entry_points.txt +0 -2
- {wool-0.1rc8.dist-info → wool-0.1rc10.dist-info}/WHEEL +0 -0
wool/_worker_proxy.py
ADDED
|
@@ -0,0 +1,515 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import itertools
|
|
5
|
+
import uuid
|
|
6
|
+
from typing import TYPE_CHECKING
|
|
7
|
+
from typing import AsyncContextManager
|
|
8
|
+
from typing import AsyncIterator
|
|
9
|
+
from typing import Awaitable
|
|
10
|
+
from typing import Callable
|
|
11
|
+
from typing import ContextManager
|
|
12
|
+
from typing import Final
|
|
13
|
+
from typing import Generic
|
|
14
|
+
from typing import Protocol
|
|
15
|
+
from typing import Sequence
|
|
16
|
+
from typing import TypeAlias
|
|
17
|
+
from typing import TypeVar
|
|
18
|
+
from typing import overload
|
|
19
|
+
from typing import runtime_checkable
|
|
20
|
+
|
|
21
|
+
import grpc
|
|
22
|
+
import grpc.aio
|
|
23
|
+
|
|
24
|
+
import wool
|
|
25
|
+
from wool import _protobuf as pb
|
|
26
|
+
from wool._resource_pool import Resource
|
|
27
|
+
from wool._resource_pool import ResourcePool
|
|
28
|
+
from wool._worker import WorkerClient
|
|
29
|
+
from wool._worker_discovery import DiscoveryEvent
|
|
30
|
+
from wool._worker_discovery import Factory
|
|
31
|
+
from wool._worker_discovery import LocalDiscoveryService
|
|
32
|
+
from wool._worker_discovery import ReducibleAsyncIteratorLike
|
|
33
|
+
from wool._worker_discovery import WorkerInfo
|
|
34
|
+
|
|
35
|
+
if TYPE_CHECKING:
|
|
36
|
+
from wool._work import WoolTask
|
|
37
|
+
|
|
38
|
+
T = TypeVar("T")
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class ReducibleAsyncIterator(Generic[T]):
|
|
42
|
+
"""An async iterator that can be pickled via __reduce__.
|
|
43
|
+
|
|
44
|
+
Converts a sequence into an async iterator while maintaining
|
|
45
|
+
picklability for distributed task execution contexts.
|
|
46
|
+
|
|
47
|
+
:param items:
|
|
48
|
+
Sequence of items to convert to async iterator.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
def __init__(self, items: Sequence[T]):
|
|
52
|
+
self._items = items
|
|
53
|
+
self._index = 0
|
|
54
|
+
|
|
55
|
+
def __aiter__(self) -> AsyncIterator[T]:
|
|
56
|
+
return self
|
|
57
|
+
|
|
58
|
+
async def __anext__(self) -> T:
|
|
59
|
+
if self._index >= len(self._items):
|
|
60
|
+
raise StopAsyncIteration
|
|
61
|
+
item = self._items[self._index]
|
|
62
|
+
self._index += 1
|
|
63
|
+
return item
|
|
64
|
+
|
|
65
|
+
def __reduce__(self) -> tuple:
|
|
66
|
+
"""Return constructor args for unpickling."""
|
|
67
|
+
return (self.__class__, (self._items,))
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
async def client_factory(address: str) -> WorkerClient:
|
|
71
|
+
"""Factory function for creating gRPC channels.
|
|
72
|
+
|
|
73
|
+
Creates an insecure gRPC channel for the given address.
|
|
74
|
+
The address is passed as the key from ResourcePool.
|
|
75
|
+
|
|
76
|
+
:param address:
|
|
77
|
+
The network address (host:port) to create a channel for.
|
|
78
|
+
:returns:
|
|
79
|
+
A new gRPC channel for the address.
|
|
80
|
+
"""
|
|
81
|
+
return WorkerClient(address)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
async def client_finalizer(client: WorkerClient) -> None:
|
|
85
|
+
"""Finalizer function for gRPC channels.
|
|
86
|
+
|
|
87
|
+
Closes the gRPC client when it's being cleaned up from the resource pool.
|
|
88
|
+
|
|
89
|
+
:param client:
|
|
90
|
+
The gRPC client to close.
|
|
91
|
+
"""
|
|
92
|
+
try:
|
|
93
|
+
await client.stop()
|
|
94
|
+
except Exception:
|
|
95
|
+
pass
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
WorkerUri: TypeAlias = str
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
class NoWorkersAvailable(Exception):
|
|
102
|
+
"""Raised when no workers are available for task dispatch.
|
|
103
|
+
|
|
104
|
+
This exception indicates that either no workers exist in the worker pool
|
|
105
|
+
or all available workers have been tried and failed with transient errors.
|
|
106
|
+
"""
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
@runtime_checkable
|
|
110
|
+
class LoadBalancerLike(Protocol):
|
|
111
|
+
"""Protocol for load balancer v2 that directly dispatches tasks.
|
|
112
|
+
|
|
113
|
+
This simplified protocol does not manage discovery services and instead
|
|
114
|
+
operates on a dynamic list of (worker_uri, WorkerInfo) tuples sorted by
|
|
115
|
+
worker_uri. It only defines a dispatch method that accepts a WoolTask and
|
|
116
|
+
returns a task result.
|
|
117
|
+
|
|
118
|
+
Expected constructor signature (see LoadBalancerV2Factory):
|
|
119
|
+
__init__(self, workers: list[tuple[str, WorkerInfo]])
|
|
120
|
+
"""
|
|
121
|
+
|
|
122
|
+
def dispatch(self, task: WoolTask) -> AsyncIterator: ...
|
|
123
|
+
|
|
124
|
+
def worker_added_callback(
|
|
125
|
+
self, client: Resource[WorkerClient], info: WorkerInfo
|
|
126
|
+
): ...
|
|
127
|
+
|
|
128
|
+
def worker_updated_callback(
|
|
129
|
+
self, client: Resource[WorkerClient], info: WorkerInfo
|
|
130
|
+
): ...
|
|
131
|
+
|
|
132
|
+
def worker_removed_callback(self, info: WorkerInfo): ...
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
LoadBalancerFactory: TypeAlias = Factory[LoadBalancerLike]
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
DispatchCall: TypeAlias = grpc.aio.UnaryStreamCall[pb.task.Task, pb.worker.Response]
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
class RoundRobinLoadBalancer:
|
|
142
|
+
"""Round-robin load balancer for distributing tasks across workers.
|
|
143
|
+
|
|
144
|
+
Distributes tasks evenly across available workers using a simple round-robin
|
|
145
|
+
algorithm. Automatically handles worker failures by trying the next worker
|
|
146
|
+
when transient errors occur. Workers are dynamically managed through
|
|
147
|
+
callback methods for addition, updates, and removal.
|
|
148
|
+
"""
|
|
149
|
+
|
|
150
|
+
TRANSIENT_ERRORS: Final = {
|
|
151
|
+
grpc.StatusCode.UNAVAILABLE,
|
|
152
|
+
grpc.StatusCode.DEADLINE_EXCEEDED,
|
|
153
|
+
grpc.StatusCode.RESOURCE_EXHAUSTED,
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
_current_index: int
|
|
157
|
+
_workers: dict[WorkerInfo, Callable[[], Resource[WorkerClient]]]
|
|
158
|
+
|
|
159
|
+
def __init__(self):
|
|
160
|
+
"""Initialize the round-robin load balancer.
|
|
161
|
+
|
|
162
|
+
Sets up internal state for tracking workers and round-robin index.
|
|
163
|
+
Workers are managed dynamically through callback methods.
|
|
164
|
+
"""
|
|
165
|
+
self._current_index = 0
|
|
166
|
+
self._workers = {}
|
|
167
|
+
|
|
168
|
+
async def dispatch(self, task: WoolTask) -> AsyncIterator:
|
|
169
|
+
"""Dispatch a task to the next available worker using round-robin.
|
|
170
|
+
|
|
171
|
+
Tries all workers in one round-robin cycle. If a worker fails with a
|
|
172
|
+
transient error, continues to the next worker. Returns a streaming
|
|
173
|
+
result that automatically manages channel cleanup.
|
|
174
|
+
|
|
175
|
+
:param task:
|
|
176
|
+
The WoolTask to dispatch.
|
|
177
|
+
:returns:
|
|
178
|
+
A streaming dispatch result that yields worker responses.
|
|
179
|
+
:raises NoWorkersAvailable:
|
|
180
|
+
If no workers are available or all workers fail with transient errors.
|
|
181
|
+
"""
|
|
182
|
+
# Track the first worker URI we try to detect when we've looped back
|
|
183
|
+
checkpoint = None
|
|
184
|
+
|
|
185
|
+
while self._workers:
|
|
186
|
+
self._current_index = self._current_index + 1
|
|
187
|
+
if self._current_index >= len(self._workers):
|
|
188
|
+
# Reset index if it's out of bounds
|
|
189
|
+
self._current_index = 0
|
|
190
|
+
|
|
191
|
+
worker_info, worker_resource = next(
|
|
192
|
+
itertools.islice(
|
|
193
|
+
self._workers.items(), self._current_index, self._current_index + 1
|
|
194
|
+
)
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
# Check if we've looped back to the first worker we tried
|
|
198
|
+
if checkpoint is None:
|
|
199
|
+
checkpoint = worker_info.uid
|
|
200
|
+
elif worker_info.uid == checkpoint:
|
|
201
|
+
# We've tried all workers and looped back around
|
|
202
|
+
break
|
|
203
|
+
|
|
204
|
+
async with worker_resource() as worker:
|
|
205
|
+
async for result in worker.dispatch(task):
|
|
206
|
+
yield result
|
|
207
|
+
return
|
|
208
|
+
else:
|
|
209
|
+
raise NoWorkersAvailable("No workers available for dispatch")
|
|
210
|
+
|
|
211
|
+
# If we get here, all workers failed with transient errors
|
|
212
|
+
raise NoWorkersAvailable(
|
|
213
|
+
f"All {len(self._workers)} workers failed with transient errors"
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
def worker_added_callback(self, client: Resource[WorkerClient], info: WorkerInfo):
|
|
217
|
+
self._workers[info] = client
|
|
218
|
+
|
|
219
|
+
def worker_updated_callback(self, client: Resource[WorkerClient], info: WorkerInfo):
|
|
220
|
+
self._workers[info] = client
|
|
221
|
+
|
|
222
|
+
def worker_removed_callback(self, info: WorkerInfo):
|
|
223
|
+
if info in self._workers:
|
|
224
|
+
del self._workers[info]
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
# public
|
|
228
|
+
class WorkerProxy:
|
|
229
|
+
"""Client-side interface for task dispatch to distributed workers.
|
|
230
|
+
|
|
231
|
+
The WorkerProxy manages worker discovery, load balancing, and task routing
|
|
232
|
+
within the wool framework. It serves as the bridge between task decorators
|
|
233
|
+
and the underlying worker pool, handling connection management and fault
|
|
234
|
+
tolerance transparently.
|
|
235
|
+
|
|
236
|
+
Supports multiple configuration modes:
|
|
237
|
+
- Pool URI-based discovery for connecting to specific worker pools
|
|
238
|
+
- Custom discovery services for advanced deployment scenarios
|
|
239
|
+
- Static worker lists for testing and development
|
|
240
|
+
- Configurable load balancing strategies
|
|
241
|
+
|
|
242
|
+
:param pool_uri:
|
|
243
|
+
Unique identifier for connecting to a specific worker pool.
|
|
244
|
+
:param tags:
|
|
245
|
+
Additional capability tags for filtering discovered workers.
|
|
246
|
+
:param discovery:
|
|
247
|
+
Custom discovery service or event stream for finding workers.
|
|
248
|
+
:param workers:
|
|
249
|
+
Static list of workers for direct connection (testing/development).
|
|
250
|
+
:param loadbalancer:
|
|
251
|
+
Load balancer implementation or factory for task distribution.
|
|
252
|
+
"""
|
|
253
|
+
|
|
254
|
+
_discovery: (
|
|
255
|
+
ReducibleAsyncIteratorLike[DiscoveryEvent]
|
|
256
|
+
| Factory[AsyncIterator[DiscoveryEvent]]
|
|
257
|
+
)
|
|
258
|
+
_discovery_manager: (
|
|
259
|
+
AsyncContextManager[AsyncIterator[DiscoveryEvent]]
|
|
260
|
+
| ContextManager[AsyncIterator[DiscoveryEvent]]
|
|
261
|
+
)
|
|
262
|
+
_loadbalancer = LoadBalancerLike | LoadBalancerFactory
|
|
263
|
+
_loadbalancer_manager: (
|
|
264
|
+
AsyncContextManager[LoadBalancerLike] | ContextManager[LoadBalancerLike]
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
@overload
|
|
268
|
+
def __init__(
|
|
269
|
+
self,
|
|
270
|
+
*,
|
|
271
|
+
discovery: (
|
|
272
|
+
ReducibleAsyncIteratorLike[DiscoveryEvent]
|
|
273
|
+
| Factory[AsyncIterator[DiscoveryEvent]]
|
|
274
|
+
),
|
|
275
|
+
loadbalancer: LoadBalancerLike | LoadBalancerFactory = RoundRobinLoadBalancer,
|
|
276
|
+
): ...
|
|
277
|
+
|
|
278
|
+
@overload
|
|
279
|
+
def __init__(
|
|
280
|
+
self,
|
|
281
|
+
*,
|
|
282
|
+
workers: Sequence[WorkerInfo],
|
|
283
|
+
loadbalancer: LoadBalancerLike | LoadBalancerFactory = RoundRobinLoadBalancer,
|
|
284
|
+
): ...
|
|
285
|
+
|
|
286
|
+
@overload
|
|
287
|
+
def __init__(
|
|
288
|
+
self,
|
|
289
|
+
pool_uri: str,
|
|
290
|
+
*tags: str,
|
|
291
|
+
loadbalancer: LoadBalancerLike | LoadBalancerFactory = RoundRobinLoadBalancer,
|
|
292
|
+
): ...
|
|
293
|
+
|
|
294
|
+
def __init__(
|
|
295
|
+
self,
|
|
296
|
+
pool_uri: str | None = None,
|
|
297
|
+
*tags: str,
|
|
298
|
+
discovery: (
|
|
299
|
+
ReducibleAsyncIteratorLike[DiscoveryEvent]
|
|
300
|
+
| Factory[AsyncIterator[DiscoveryEvent]]
|
|
301
|
+
| None
|
|
302
|
+
) = None,
|
|
303
|
+
workers: Sequence[WorkerInfo] | None = None,
|
|
304
|
+
loadbalancer: LoadBalancerLike | LoadBalancerFactory = RoundRobinLoadBalancer,
|
|
305
|
+
):
|
|
306
|
+
if not (pool_uri or discovery or workers):
|
|
307
|
+
raise ValueError(
|
|
308
|
+
"Must specify either a workerpool URI, discovery event stream, or a "
|
|
309
|
+
"sequence of workers"
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
self._id: Final = uuid.uuid4()
|
|
313
|
+
self._started = False
|
|
314
|
+
self._workers: dict[WorkerInfo, Resource[WorkerClient]] = {}
|
|
315
|
+
self._loadbalancer = loadbalancer
|
|
316
|
+
|
|
317
|
+
match (pool_uri, discovery, workers):
|
|
318
|
+
case (pool_uri, None, None) if pool_uri is not None:
|
|
319
|
+
self._discovery = LocalDiscoveryService(
|
|
320
|
+
pool_uri, filter=lambda w: bool({pool_uri, *tags} & w.tags)
|
|
321
|
+
)
|
|
322
|
+
case (None, discovery, None) if discovery is not None:
|
|
323
|
+
self._discovery = discovery
|
|
324
|
+
case (None, None, workers) if workers is not None:
|
|
325
|
+
self._discovery = ReducibleAsyncIterator(
|
|
326
|
+
[DiscoveryEvent(type="worker_added", worker_info=w) for w in workers]
|
|
327
|
+
)
|
|
328
|
+
case _:
|
|
329
|
+
raise ValueError(
|
|
330
|
+
"Must specify exactly one of: "
|
|
331
|
+
"pool_uri, discovery_event_stream, or workers"
|
|
332
|
+
)
|
|
333
|
+
self._sentinel_task: asyncio.Task[None] | None = None
|
|
334
|
+
|
|
335
|
+
async def __aenter__(self):
|
|
336
|
+
"""Starts the proxy and sets it as the active context."""
|
|
337
|
+
await self.start()
|
|
338
|
+
return self
|
|
339
|
+
|
|
340
|
+
async def __aexit__(self, *args):
|
|
341
|
+
"""Stops the proxy and resets the active context."""
|
|
342
|
+
await self.stop(*args)
|
|
343
|
+
|
|
344
|
+
def __hash__(self) -> int:
|
|
345
|
+
return hash(str(self.id))
|
|
346
|
+
|
|
347
|
+
def __eq__(self, value: object) -> bool:
|
|
348
|
+
return isinstance(value, WorkerProxy) and hash(self) == hash(value)
|
|
349
|
+
|
|
350
|
+
def __reduce__(self) -> tuple:
|
|
351
|
+
"""Return constructor args for unpickling with proxy ID preserved.
|
|
352
|
+
|
|
353
|
+
Creates a new WorkerProxy instance with the same discovery stream and
|
|
354
|
+
load balancer type, then sets the preserved proxy ID on the new object.
|
|
355
|
+
Workers will be re-discovered on the new instance.
|
|
356
|
+
|
|
357
|
+
:returns:
|
|
358
|
+
Tuple of (callable, args, state) for unpickling.
|
|
359
|
+
"""
|
|
360
|
+
|
|
361
|
+
def _restore_proxy(discovery, loadbalancer, proxy_id):
|
|
362
|
+
proxy = WorkerProxy(discovery=discovery, loadbalancer=loadbalancer)
|
|
363
|
+
proxy._id = proxy_id
|
|
364
|
+
return proxy
|
|
365
|
+
|
|
366
|
+
return (
|
|
367
|
+
_restore_proxy,
|
|
368
|
+
(self._discovery, self._loadbalancer, self._id),
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
@property
|
|
372
|
+
def id(self) -> uuid.UUID:
|
|
373
|
+
return self._id
|
|
374
|
+
|
|
375
|
+
@property
|
|
376
|
+
def started(self) -> bool:
|
|
377
|
+
return self._started
|
|
378
|
+
|
|
379
|
+
@property
|
|
380
|
+
def workers(self) -> dict[WorkerInfo, Resource[WorkerClient]]:
|
|
381
|
+
"""A list of the currently discovered worker gRPC stubs."""
|
|
382
|
+
return self._workers
|
|
383
|
+
|
|
384
|
+
async def start(self) -> None:
|
|
385
|
+
"""Starts the proxy by initiating the worker discovery process.
|
|
386
|
+
|
|
387
|
+
:raises RuntimeError:
|
|
388
|
+
If the proxy has already been started.
|
|
389
|
+
"""
|
|
390
|
+
if self._started:
|
|
391
|
+
raise RuntimeError("Proxy already started")
|
|
392
|
+
|
|
393
|
+
(self._loadbalancer_service, self._loadbalancer_ctx) = await self._enter_context(
|
|
394
|
+
self._loadbalancer
|
|
395
|
+
)
|
|
396
|
+
if not isinstance(self._loadbalancer_service, LoadBalancerLike):
|
|
397
|
+
raise ValueError
|
|
398
|
+
|
|
399
|
+
self._discovery_service, self._discovery_ctx = await self._enter_context(
|
|
400
|
+
self._discovery
|
|
401
|
+
)
|
|
402
|
+
if not isinstance(self._discovery_service, AsyncIterator):
|
|
403
|
+
raise ValueError
|
|
404
|
+
|
|
405
|
+
self._proxy_token = wool.__proxy__.set(self)
|
|
406
|
+
self._client_pool = ResourcePool(
|
|
407
|
+
factory=client_factory, finalizer=client_finalizer, ttl=60
|
|
408
|
+
)
|
|
409
|
+
self._sentinel_task = asyncio.create_task(self._worker_sentinel())
|
|
410
|
+
self._started = True
|
|
411
|
+
|
|
412
|
+
async def stop(self, *args) -> None:
|
|
413
|
+
"""Stops the proxy, terminating discovery and clearing connections.
|
|
414
|
+
|
|
415
|
+
:raises RuntimeError:
|
|
416
|
+
If the proxy was not started first.
|
|
417
|
+
"""
|
|
418
|
+
if not self._started:
|
|
419
|
+
raise RuntimeError("Proxy not started - call start() first")
|
|
420
|
+
|
|
421
|
+
await self._exit_context(self._discovery_ctx, *args)
|
|
422
|
+
await self._exit_context(self._loadbalancer_ctx, *args)
|
|
423
|
+
|
|
424
|
+
wool.__proxy__.reset(self._proxy_token)
|
|
425
|
+
if self._sentinel_task:
|
|
426
|
+
self._sentinel_task.cancel()
|
|
427
|
+
try:
|
|
428
|
+
await self._sentinel_task
|
|
429
|
+
except asyncio.CancelledError:
|
|
430
|
+
pass
|
|
431
|
+
self._sentinel_task = None
|
|
432
|
+
await self._client_pool.clear()
|
|
433
|
+
|
|
434
|
+
self._workers.clear()
|
|
435
|
+
self._started = False
|
|
436
|
+
|
|
437
|
+
async def dispatch(self, task: WoolTask):
|
|
438
|
+
"""Dispatches a task to an available worker in the pool.
|
|
439
|
+
|
|
440
|
+
This method selects a worker using a round-robin strategy. If no
|
|
441
|
+
workers are available within the timeout period, it raises an
|
|
442
|
+
exception.
|
|
443
|
+
|
|
444
|
+
:param task:
|
|
445
|
+
The :py:class:`WoolTask` object to be dispatched.
|
|
446
|
+
:param timeout:
|
|
447
|
+
Timeout in seconds for getting a worker.
|
|
448
|
+
:returns:
|
|
449
|
+
A protobuf result object from the worker.
|
|
450
|
+
:raises RuntimeError:
|
|
451
|
+
If the proxy is not started.
|
|
452
|
+
:raises asyncio.TimeoutError:
|
|
453
|
+
If no worker is available within the timeout period.
|
|
454
|
+
"""
|
|
455
|
+
if not self._started:
|
|
456
|
+
raise RuntimeError("Proxy not started - call start() first")
|
|
457
|
+
|
|
458
|
+
await asyncio.wait_for(self._await_workers(), 60)
|
|
459
|
+
|
|
460
|
+
assert isinstance(self._loadbalancer_service, LoadBalancerLike)
|
|
461
|
+
async for result in self._loadbalancer_service.dispatch(task):
|
|
462
|
+
yield result
|
|
463
|
+
|
|
464
|
+
async def _enter_context(self, factory):
|
|
465
|
+
ctx = None
|
|
466
|
+
if callable(factory):
|
|
467
|
+
obj = factory()
|
|
468
|
+
if isinstance(obj, ContextManager):
|
|
469
|
+
ctx = obj
|
|
470
|
+
obj = obj.__enter__()
|
|
471
|
+
elif isinstance(obj, AsyncContextManager):
|
|
472
|
+
ctx = obj
|
|
473
|
+
obj = await obj.__aenter__()
|
|
474
|
+
elif isinstance(obj, Awaitable):
|
|
475
|
+
obj = await obj
|
|
476
|
+
else:
|
|
477
|
+
obj = factory
|
|
478
|
+
return obj, ctx
|
|
479
|
+
|
|
480
|
+
async def _exit_context(
|
|
481
|
+
self, ctx: AsyncContextManager | ContextManager | None, *args
|
|
482
|
+
):
|
|
483
|
+
if isinstance(ctx, AsyncContextManager):
|
|
484
|
+
await ctx.__aexit__(*args)
|
|
485
|
+
elif isinstance(ctx, ContextManager):
|
|
486
|
+
ctx.__exit__(*args)
|
|
487
|
+
|
|
488
|
+
async def _await_workers(self):
|
|
489
|
+
while not self._loadbalancer_service._workers:
|
|
490
|
+
await asyncio.sleep(0)
|
|
491
|
+
|
|
492
|
+
async def _worker_sentinel(self):
|
|
493
|
+
assert isinstance(self._discovery_service, AsyncIterator)
|
|
494
|
+
assert isinstance(self._loadbalancer_service, LoadBalancerLike)
|
|
495
|
+
async for event in self._discovery_service:
|
|
496
|
+
match event.type:
|
|
497
|
+
case "worker_added":
|
|
498
|
+
self._loadbalancer_service.worker_added_callback(
|
|
499
|
+
lambda: self._client_pool.get(
|
|
500
|
+
f"{event.worker_info.host}:{event.worker_info.port}",
|
|
501
|
+
),
|
|
502
|
+
event.worker_info,
|
|
503
|
+
)
|
|
504
|
+
case "worker_updated":
|
|
505
|
+
self._loadbalancer_service.worker_updated_callback(
|
|
506
|
+
lambda: self._client_pool.get(
|
|
507
|
+
f"{event.worker_info.host}:{event.worker_info.port}",
|
|
508
|
+
),
|
|
509
|
+
event.worker_info,
|
|
510
|
+
)
|
|
511
|
+
case "worker_removed":
|
|
512
|
+
if event.worker_info.uid in self._workers:
|
|
513
|
+
self._loadbalancer_service.worker_removed_callback(
|
|
514
|
+
event.worker_info
|
|
515
|
+
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: wool
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.1rc10
|
|
4
4
|
Summary: A Python framework for distributed multiprocessing.
|
|
5
5
|
Author-email: Conrad Bzura <conrad@wool.io>
|
|
6
6
|
Maintainer-email: maintainers@wool.io
|
|
@@ -208,22 +208,23 @@ License: Apache License
|
|
|
208
208
|
Classifier: Intended Audience :: Developers
|
|
209
209
|
Classifier: Operating System :: MacOS :: MacOS X
|
|
210
210
|
Classifier: Operating System :: POSIX :: Linux
|
|
211
|
-
Requires-Python: >=3.
|
|
212
|
-
Requires-Dist:
|
|
213
|
-
Requires-Dist: click
|
|
214
|
-
Requires-Dist: debugpy
|
|
211
|
+
Requires-Python: >=3.11
|
|
212
|
+
Requires-Dist: cloudpickle
|
|
215
213
|
Requires-Dist: grpcio
|
|
216
214
|
Requires-Dist: protobuf
|
|
217
215
|
Requires-Dist: shortuuid
|
|
218
216
|
Requires-Dist: tblib
|
|
219
217
|
Requires-Dist: typing-extensions
|
|
218
|
+
Requires-Dist: zeroconf
|
|
220
219
|
Provides-Extra: dev
|
|
220
|
+
Requires-Dist: debugpy; extra == 'dev'
|
|
221
|
+
Requires-Dist: hypothesis; extra == 'dev'
|
|
221
222
|
Requires-Dist: pytest; extra == 'dev'
|
|
222
223
|
Requires-Dist: pytest-asyncio; extra == 'dev'
|
|
224
|
+
Requires-Dist: pytest-cov; extra == 'dev'
|
|
223
225
|
Requires-Dist: pytest-grpc-aio~=0.2.0; extra == 'dev'
|
|
226
|
+
Requires-Dist: pytest-mock; extra == 'dev'
|
|
224
227
|
Requires-Dist: ruff; extra == 'dev'
|
|
225
|
-
Provides-Extra: locking
|
|
226
|
-
Requires-Dist: wool-locking==0.1rc8; extra == 'locking'
|
|
227
228
|
Description-Content-Type: text/markdown
|
|
228
229
|
|
|
229
230
|
# Wool
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
wool/__init__.py,sha256=H7BGRkdOp93uhm4c2dcnFBKhjTZGTRfeFYsFhrd9uaA,2644
|
|
2
|
+
wool/_resource_pool.py,sha256=B4Zk9kbspMY6V-H6unzup666alhI81YMBCyGvW-R5Ho,11559
|
|
3
|
+
wool/_typing.py,sha256=tZDbQN8DZqGf34fRfgnqITsCgAvXA02bgB_9uTaNACQ,191
|
|
4
|
+
wool/_work.py,sha256=I4eyCEWDFkfoRXhBvNcKuA1fGS55JMw4WEZhwU0DExM,16690
|
|
5
|
+
wool/_worker.py,sha256=dl6PeFkTAPCxTKuCiKOTMGeCP7YTVMrV3ipcv9kUl7g,29625
|
|
6
|
+
wool/_worker_discovery.py,sha256=t_w63snttrcCvtPu5F0xc0phY0oKnd8yVeeG63PJ7Ks,43357
|
|
7
|
+
wool/_worker_pool.py,sha256=b3NtDulvJSdtG1NpOOkPaHIiZsgExkRRB5OuVU-SPpE,11026
|
|
8
|
+
wool/_worker_proxy.py,sha256=cwsIFvGiRmXtGFHommEK94yeltLOnaAehzMN4QUod98,17500
|
|
9
|
+
wool/_protobuf/__init__.py,sha256=61kDOGjPz9FwZpEygbItQdJY1y2QK-ogEtUtwvsOEZQ,435
|
|
10
|
+
wool/_protobuf/exception.py,sha256=rBm4bpWBt9Pq6Ry-WyxSG9RSE3FBuUt1JF-HBR0TK7w,174
|
|
11
|
+
wool/_protobuf/task.py,sha256=YTBKhUgUAXF-wEecEYmieN87l9f4aYPhkqrm7hyVNDU,384
|
|
12
|
+
wool/_protobuf/task_pb2.py,sha256=Fb7z2Ox-9ImXHwNKFYvxuBbMsrWFo9lsitEctzk4ARE,1945
|
|
13
|
+
wool/_protobuf/task_pb2.pyi,sha256=mssrgYgww2n_3J05OtwIAgYHzLSu__3ngXV8CjIKoiU,1593
|
|
14
|
+
wool/_protobuf/task_pb2_grpc.py,sha256=L9KwCLmvA-Jfafabm9JYujLgThaUtM3MP9zhDfeSUfw,885
|
|
15
|
+
wool/_protobuf/worker.py,sha256=gmB-wowcCig9a_EIZB8eLYmfCvF7aAu4Zm2sZLTVYi4,734
|
|
16
|
+
wool/_protobuf/worker_pb2.py,sha256=LiotSqTMtFdVmjG3svVsSYfDyuJdsf-KwHPtRDNeluo,2393
|
|
17
|
+
wool/_protobuf/worker_pb2.pyi,sha256=TjLjxwAsr_NpWw1JLGUz77ANCPIwWoJJ5WmkX8CfnfU,1428
|
|
18
|
+
wool/_protobuf/worker_pb2_grpc.py,sha256=achgrR25vGveWJkYwWLw9YOuGvCUs9sIYEyv7ZqzO28,4992
|
|
19
|
+
wool-0.1rc10.dist-info/METADATA,sha256=XVesEYqGiOGUA6APFWgCH0IK2cbGHMVJsZM0VfW5xmo,17093
|
|
20
|
+
wool-0.1rc10.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
21
|
+
wool-0.1rc10.dist-info/entry_points.txt,sha256=U3V7wWNc3KLw4AOjJRpbcZcLJIdiA-7y1eqxn-Xa5rY,38
|
|
22
|
+
wool-0.1rc10.dist-info/RECORD,,
|