wool 0.1rc9__py3-none-any.whl → 0.1rc10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of wool might be problematic. Click here for more details.

Files changed (44) hide show
  1. wool/__init__.py +71 -50
  2. wool/_protobuf/__init__.py +12 -5
  3. wool/_protobuf/exception.py +3 -0
  4. wool/_protobuf/task.py +11 -0
  5. wool/_protobuf/task_pb2.py +42 -0
  6. wool/_protobuf/task_pb2.pyi +43 -0
  7. wool/_protobuf/{mempool/metadata_pb2_grpc.py → task_pb2_grpc.py} +2 -2
  8. wool/_protobuf/worker.py +24 -0
  9. wool/_protobuf/worker_pb2.py +47 -0
  10. wool/_protobuf/worker_pb2.pyi +39 -0
  11. wool/_protobuf/worker_pb2_grpc.py +141 -0
  12. wool/_resource_pool.py +376 -0
  13. wool/_typing.py +0 -10
  14. wool/_work.py +553 -0
  15. wool/_worker.py +843 -169
  16. wool/_worker_discovery.py +1223 -0
  17. wool/_worker_pool.py +331 -0
  18. wool/_worker_proxy.py +515 -0
  19. {wool-0.1rc9.dist-info → wool-0.1rc10.dist-info}/METADATA +7 -7
  20. wool-0.1rc10.dist-info/RECORD +22 -0
  21. wool-0.1rc10.dist-info/entry_points.txt +2 -0
  22. wool/_cli.py +0 -262
  23. wool/_event.py +0 -109
  24. wool/_future.py +0 -171
  25. wool/_logging.py +0 -44
  26. wool/_manager.py +0 -181
  27. wool/_mempool/__init__.py +0 -4
  28. wool/_mempool/_client.py +0 -167
  29. wool/_mempool/_mempool.py +0 -311
  30. wool/_mempool/_metadata.py +0 -35
  31. wool/_mempool/_service.py +0 -227
  32. wool/_pool.py +0 -524
  33. wool/_protobuf/mempool/metadata_pb2.py +0 -36
  34. wool/_protobuf/mempool/metadata_pb2.pyi +0 -17
  35. wool/_protobuf/mempool/service_pb2.py +0 -66
  36. wool/_protobuf/mempool/service_pb2.pyi +0 -108
  37. wool/_protobuf/mempool/service_pb2_grpc.py +0 -355
  38. wool/_queue.py +0 -32
  39. wool/_session.py +0 -429
  40. wool/_task.py +0 -366
  41. wool/_utils.py +0 -63
  42. wool-0.1rc9.dist-info/RECORD +0 -29
  43. wool-0.1rc9.dist-info/entry_points.txt +0 -2
  44. {wool-0.1rc9.dist-info → wool-0.1rc10.dist-info}/WHEEL +0 -0
wool/_worker_pool.py ADDED
@@ -0,0 +1,331 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import hashlib
5
+ import os
6
+ import uuid
7
+ from functools import partial
8
+ from multiprocessing.shared_memory import SharedMemory
9
+ from typing import AsyncIterator
10
+ from typing import Final
11
+ from typing import overload
12
+
13
+ from wool._worker import LocalWorker
14
+ from wool._worker import Worker
15
+ from wool._worker import WorkerFactory
16
+ from wool._worker_discovery import DiscoveryEvent
17
+ from wool._worker_discovery import Factory
18
+ from wool._worker_discovery import LocalDiscoveryService
19
+ from wool._worker_discovery import LocalRegistryService
20
+ from wool._worker_discovery import ReducibleAsyncIteratorLike
21
+ from wool._worker_discovery import RegistryServiceLike
22
+ from wool._worker_proxy import LoadBalancerFactory
23
+ from wool._worker_proxy import LoadBalancerLike
24
+ from wool._worker_proxy import RoundRobinLoadBalancer
25
+ from wool._worker_proxy import WorkerProxy
26
+
27
+
28
+ # public
29
+ class WorkerPool:
30
+ """Manages a pool of distributed worker processes for task execution.
31
+
32
+ The WorkerPool is the core orchestrator in the wool framework, providing
33
+ both ephemeral and durable pool configurations. It handles worker lifecycle,
34
+ service discovery, and load balancing through configurable components.
35
+
36
+ **Ephemeral Pools** spawn local worker processes with automatic cleanup,
37
+ ideal for development and single-machine deployments.
38
+
39
+ **Durable Pools** connect to existing distributed workers via discovery
40
+ services, supporting production deployments across multiple machines.
41
+
42
+ Example usage:
43
+
44
+ **Basic ephemeral pool (default configuration):**
45
+
46
+ .. code-block:: python
47
+
48
+ import wool
49
+
50
+
51
+ @wool.work
52
+ async def fibonacci(n: int) -> int:
53
+ if n <= 1:
54
+ return n
55
+ return await fibonacci(n - 1) + await fibonacci(n - 2)
56
+
57
+
58
+ async def main():
59
+ async with wool.WorkerPool() as pool:
60
+ result = await fibonacci(10)
61
+ print(f"Result: {result}")
62
+
63
+ **Ephemeral pool with custom configuration:**
64
+
65
+ .. code-block:: python
66
+
67
+ from wool import WorkerPool, LocalWorker
68
+ from wool._worker_discovery import LocalRegistryService
69
+ from functools import partial
70
+
71
+ # Custom worker factory with specific tags
72
+ worker_factory = partial(
73
+ LocalWorker, registry_service=LocalRegistryService("my-pool")
74
+ )
75
+
76
+ async with WorkerPool(
77
+ "gpu-capable",
78
+ "ml-model", # Worker tags
79
+ size=4, # Number of workers
80
+ worker=worker_factory, # Custom factory
81
+ ) as pool:
82
+ result = await process_data()
83
+
84
+ **Durable pool with LAN discovery:**
85
+
86
+ .. code-block:: python
87
+
88
+ from wool import WorkerPool
89
+ from wool._worker_discovery import LanDiscoveryService
90
+
91
+ # Connect to existing workers on the network
92
+ discovery = LanDiscoveryService(filter=lambda w: "production" in w.tags)
93
+
94
+ async with WorkerPool(discovery=discovery) as pool:
95
+ results = await gather_metrics()
96
+
97
+ **Durable pool with custom load balancer:**
98
+
99
+ .. code-block:: python
100
+
101
+ from wool import WorkerPool
102
+ from wool._worker_proxy import RoundRobinLoadBalancer
103
+
104
+
105
+ class WeightedLoadBalancer(RoundRobinLoadBalancer):
106
+ # Custom load balancing logic
107
+ pass
108
+
109
+
110
+ async with WorkerPool(
111
+ discovery=discovery_service, loadbalancer=WeightedLoadBalancer
112
+ ) as pool:
113
+ result = await distributed_computation()
114
+
115
+ :param tags:
116
+ Capability tags to associate with spawned workers (ephemeral pools only).
117
+ :param size:
118
+ Number of worker processes to spawn (ephemeral pools, 0 = CPU count).
119
+ :param worker:
120
+ Factory function for creating worker instances (ephemeral pools).
121
+ :param loadbalancer:
122
+ Load balancer for task distribution. Can be provided as:
123
+
124
+ - **Instance**: Direct loadbalancer object
125
+ - **Factory function**: Function returning a loadbalancer instance
126
+ - **Context manager factory**: Function returning a context manager
127
+ that yields a loadbalancer instance
128
+
129
+ Examples::
130
+
131
+ # Direct instance
132
+ loadbalancer = RoundRobinLoadBalancer()
133
+
134
+ # Instance factory
135
+ loadbalancer = lambda: CustomLoadBalancer(...)
136
+
137
+
138
+ # Context manager factory
139
+ @contextmanager
140
+ def loadbalancer():
141
+ async with CustomLoadBalancer() as lb:
142
+ ...
143
+ yield lb
144
+ ...
145
+
146
+
147
+ loadbalancer = loadbalancer
148
+
149
+ :param discovery:
150
+ Discovery service for finding existing workers (durable pools only).
151
+ Can be provided as:
152
+
153
+ - **Instance**: Direct discovery service object
154
+ - **Factory function**: Function returning a discovery service instance
155
+ - **Context manager factory**: Function returning a context manager that
156
+ yields a discovery service
157
+
158
+ Examples::
159
+
160
+ # Direct instance
161
+ discovery=LanDiscoveryService(filter=lambda w: "prod" in w.tags)
162
+
163
+ # Instance factory
164
+ discovery=lambda: LocalDiscoveryService("pool-123")
165
+
166
+ # Context manager factory
167
+ @asynccontextmanager
168
+ async def discovery():
169
+ service = await DatabaseDiscoveryService.create(connection_string)
170
+ try:
171
+ ...
172
+ yield service
173
+ finally:
174
+ ...
175
+ await service.close()
176
+ discovery=discovery
177
+ :raises ValueError:
178
+ If invalid configuration is provided or CPU count cannot be determined.
179
+ """
180
+
181
+ _workers: Final[list[Worker]]
182
+ _shared_memory = None
183
+
184
+ @overload
185
+ def __init__(
186
+ self,
187
+ *tags: str,
188
+ size: int = 0,
189
+ worker: WorkerFactory[RegistryServiceLike] = LocalWorker[LocalRegistryService],
190
+ loadbalancer: LoadBalancerLike | LoadBalancerFactory = RoundRobinLoadBalancer,
191
+ ):
192
+ """
193
+ Create an ephemeral pool of workers, spawning the specified quantity of workers
194
+ using the specified worker factory.
195
+ """
196
+ ...
197
+
198
+ @overload
199
+ def __init__(
200
+ self,
201
+ *,
202
+ discovery: (
203
+ ReducibleAsyncIteratorLike[DiscoveryEvent]
204
+ | Factory[AsyncIterator[DiscoveryEvent]]
205
+ ),
206
+ loadbalancer: LoadBalancerLike | LoadBalancerFactory = RoundRobinLoadBalancer,
207
+ ):
208
+ """
209
+ Connect to an existing pool of workers discovered by the specified discovery
210
+ protocol.
211
+ """
212
+ ...
213
+
214
+ def __init__(
215
+ self,
216
+ *tags: str,
217
+ size: int | None = None,
218
+ worker: WorkerFactory | None = None,
219
+ loadbalancer: LoadBalancerLike | LoadBalancerFactory = RoundRobinLoadBalancer,
220
+ discovery: (
221
+ ReducibleAsyncIteratorLike[DiscoveryEvent]
222
+ | Factory[AsyncIterator[DiscoveryEvent]]
223
+ | None
224
+ ) = None,
225
+ ):
226
+ self._workers = []
227
+
228
+ match (size, discovery):
229
+ case (None, None):
230
+ cpu_count = os.cpu_count()
231
+ if cpu_count is None:
232
+ raise ValueError("Unable to determine CPU count")
233
+ size = cpu_count
234
+
235
+ uri = f"pool-{uuid.uuid4().hex}"
236
+
237
+ async def create_proxy():
238
+ self._shared_memory = SharedMemory(
239
+ name=hashlib.sha256(uri.encode()).hexdigest()[:12],
240
+ create=True,
241
+ size=1024,
242
+ )
243
+ for i in range(1024):
244
+ self._shared_memory.buf[i] = 0
245
+ await self._spawn_workers(uri, *tags, size=size, factory=worker)
246
+ return WorkerProxy(
247
+ discovery=LocalDiscoveryService(uri),
248
+ loadbalancer=loadbalancer,
249
+ )
250
+
251
+ case (size, None) if size is not None:
252
+ if size == 0:
253
+ cpu_count = os.cpu_count()
254
+ if cpu_count is None:
255
+ raise ValueError("Unable to determine CPU count")
256
+ size = cpu_count
257
+ elif size < 0:
258
+ raise ValueError("Size must be non-negative")
259
+
260
+ uri = f"pool-{uuid.uuid4().hex}"
261
+
262
+ async def create_proxy():
263
+ self._shared_memory = SharedMemory(
264
+ name=hashlib.sha256(uri.encode()).hexdigest()[:12],
265
+ create=True,
266
+ size=1024,
267
+ )
268
+ for i in range(1024):
269
+ self._shared_memory.buf[i] = 0
270
+ await self._spawn_workers(uri, *tags, size=size, factory=worker)
271
+ return WorkerProxy(
272
+ discovery=LocalDiscoveryService(uri),
273
+ loadbalancer=loadbalancer,
274
+ )
275
+
276
+ case (None, discovery) if discovery is not None:
277
+
278
+ async def create_proxy():
279
+ return WorkerProxy(
280
+ discovery=discovery,
281
+ loadbalancer=loadbalancer,
282
+ )
283
+
284
+ case _:
285
+ raise RuntimeError
286
+
287
+ self._proxy_factory = create_proxy
288
+
289
+ async def __aenter__(self) -> WorkerPool:
290
+ """Starts the worker pool and its services, returning a session.
291
+
292
+ This method starts the worker registry, creates a client session,
293
+ launches all worker processes, and registers them.
294
+
295
+ :returns:
296
+ The :py:class:`WorkerPool` instance itself for method chaining.
297
+ """
298
+ self._proxy = await self._proxy_factory()
299
+ await self._proxy.__aenter__()
300
+ return self
301
+
302
+ async def __aexit__(self, *args):
303
+ """Stops all workers and tears down the pool and its services."""
304
+ try:
305
+ await self._stop_workers()
306
+ await self._proxy.__aexit__(*args)
307
+ finally:
308
+ if self._shared_memory is not None:
309
+ self._shared_memory.unlink()
310
+
311
+ async def _spawn_workers(
312
+ self, uri, *tags: str, size: int, factory: WorkerFactory | None
313
+ ):
314
+ if factory is None:
315
+ factory = partial(LocalWorker, registry_service=LocalRegistryService(uri))
316
+
317
+ tasks = []
318
+ for _ in range(size):
319
+ worker = factory(*tags)
320
+ task = asyncio.create_task(worker.start())
321
+ tasks.append(task)
322
+ self._workers.append(worker)
323
+
324
+ await asyncio.gather(*tasks, return_exceptions=True)
325
+
326
+ return [w.info for w in self._workers if w.info]
327
+
328
+ async def _stop_workers(self):
329
+ """Sends a stop command to all workers and unregisters them."""
330
+ tasks = [asyncio.create_task(worker.stop()) for worker in self._workers]
331
+ await asyncio.gather(*tasks, return_exceptions=True)