wool 0.1rc9__py3-none-any.whl → 0.1rc10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of wool might be problematic. Click here for more details.

Files changed (44) hide show
  1. wool/__init__.py +71 -50
  2. wool/_protobuf/__init__.py +12 -5
  3. wool/_protobuf/exception.py +3 -0
  4. wool/_protobuf/task.py +11 -0
  5. wool/_protobuf/task_pb2.py +42 -0
  6. wool/_protobuf/task_pb2.pyi +43 -0
  7. wool/_protobuf/{mempool/metadata_pb2_grpc.py → task_pb2_grpc.py} +2 -2
  8. wool/_protobuf/worker.py +24 -0
  9. wool/_protobuf/worker_pb2.py +47 -0
  10. wool/_protobuf/worker_pb2.pyi +39 -0
  11. wool/_protobuf/worker_pb2_grpc.py +141 -0
  12. wool/_resource_pool.py +376 -0
  13. wool/_typing.py +0 -10
  14. wool/_work.py +553 -0
  15. wool/_worker.py +843 -169
  16. wool/_worker_discovery.py +1223 -0
  17. wool/_worker_pool.py +331 -0
  18. wool/_worker_proxy.py +515 -0
  19. {wool-0.1rc9.dist-info → wool-0.1rc10.dist-info}/METADATA +7 -7
  20. wool-0.1rc10.dist-info/RECORD +22 -0
  21. wool-0.1rc10.dist-info/entry_points.txt +2 -0
  22. wool/_cli.py +0 -262
  23. wool/_event.py +0 -109
  24. wool/_future.py +0 -171
  25. wool/_logging.py +0 -44
  26. wool/_manager.py +0 -181
  27. wool/_mempool/__init__.py +0 -4
  28. wool/_mempool/_client.py +0 -167
  29. wool/_mempool/_mempool.py +0 -311
  30. wool/_mempool/_metadata.py +0 -35
  31. wool/_mempool/_service.py +0 -227
  32. wool/_pool.py +0 -524
  33. wool/_protobuf/mempool/metadata_pb2.py +0 -36
  34. wool/_protobuf/mempool/metadata_pb2.pyi +0 -17
  35. wool/_protobuf/mempool/service_pb2.py +0 -66
  36. wool/_protobuf/mempool/service_pb2.pyi +0 -108
  37. wool/_protobuf/mempool/service_pb2_grpc.py +0 -355
  38. wool/_queue.py +0 -32
  39. wool/_session.py +0 -429
  40. wool/_task.py +0 -366
  41. wool/_utils.py +0 -63
  42. wool-0.1rc9.dist-info/RECORD +0 -29
  43. wool-0.1rc9.dist-info/entry_points.txt +0 -2
  44. {wool-0.1rc9.dist-info → wool-0.1rc10.dist-info}/WHEEL +0 -0
wool/_pool.py DELETED
@@ -1,524 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import logging
4
- import os
5
- from contextvars import ContextVar
6
- from contextvars import Token
7
- from functools import partial
8
- from functools import wraps
9
- from multiprocessing import Pipe
10
- from multiprocessing import Process
11
- from multiprocessing import current_process
12
- from multiprocessing.managers import Server
13
- from signal import Signals
14
- from signal import signal
15
- from threading import Event
16
- from threading import Semaphore
17
- from threading import Thread
18
- from typing import TYPE_CHECKING
19
- from typing import Coroutine
20
-
21
- import wool
22
- from wool._manager import Manager
23
- from wool._session import WorkerPoolSession
24
- from wool._worker import Scheduler
25
- from wool._worker import Worker
26
-
27
- if TYPE_CHECKING:
28
- from wool._queue import TaskQueue
29
- from wool._task import AsyncCallable
30
-
31
-
32
- def _stop(pool: WorkerPool, wait: bool, *_):
33
- pool.stop(wait=wait)
34
-
35
-
36
- # PUBLIC
37
- def pool(
38
- host: str = "localhost",
39
- port: int = 48800,
40
- *,
41
- authkey: bytes | None = None,
42
- breadth: int = 0,
43
- log_level: int = logging.INFO,
44
- ) -> WorkerPool:
45
- """
46
- Convenience function to declare a worker pool context.
47
-
48
- :param host: The hostname of the worker pool. Defaults to "localhost".
49
- :param port: The port of the worker pool. Defaults to 48800.
50
- :param authkey: Optional authentication key for the worker pool.
51
- :param breadth: Number of worker processes in the pool. Defaults to 0
52
- (CPU count).
53
- :param log_level: Logging level for the worker pool. Defaults to
54
- logging.INFO.
55
- :return: A decorator that wraps the function to execute within the session.
56
-
57
- Usage:
58
-
59
- .. code-block:: python
60
-
61
- import wool
62
-
63
-
64
- @wool.pool(
65
- host="localhost",
66
- port=48800,
67
- authkey=b"deadbeef",
68
- breadth=4,
69
- )
70
- async def foo(): ...
71
-
72
- This is equivalent to:
73
-
74
- .. code-block:: python
75
-
76
- import wool
77
-
78
-
79
- async def foo():
80
- with wool.pool(
81
- host="localhost", port=48800, authkey=b"deadbeef", breadth=4
82
- ):
83
- ...
84
-
85
- This decorator can also be combined with the ``@wool.task`` decorator to
86
- declare a task that is tightly coupled with the specified pool:
87
-
88
- .. code-block:: python
89
-
90
- import wool
91
-
92
-
93
- @wool.pool(
94
- host="localhost",
95
- port=48800,
96
- authkey=b"deadbeef",
97
- breadth=4,
98
- )
99
- @wool.task
100
- async def foo(): ...
101
-
102
- .. note::
103
-
104
- The order of decorators matters. To ensure that invocations of the
105
- declared task are dispatched to the pool specified by ``@wool.pool``,
106
- the ``@wool.task`` decorator must be applied after ``@wool.pool``.
107
- """
108
- return WorkerPool(
109
- address=(host, port),
110
- authkey=authkey,
111
- breadth=breadth,
112
- log_level=log_level,
113
- )
114
-
115
-
116
- # PUBLIC
117
- class WorkerPool(Process):
118
- """
119
- A multiprocessing-based worker pool for executing asynchronous tasks. A
120
- pool consists of a single manager process and at least a single worker
121
- process. The manager process orchestrates its workers and serves client
122
- dispatch requests. The worker process(es) execute(s) dispatched tasks on a
123
- first-come, first-served basis.
124
-
125
- The worker pool class is implemented as a context manager and decorator,
126
- allowing users to easily spawn ephemeral pools that live for the duration
127
- of a client application's execution and tightly couple functions to a pool.
128
-
129
- :param address: The address of the worker pool (host, port).
130
- :param authkey: Optional authentication key for the pool. If not specified,
131
- the manager will inherit the authkey from the current process.
132
- :param breadth: Number of worker processes in the pool. Defaults to CPU
133
- count.
134
- :param log_level: Logging level for the pool.
135
- """
136
-
137
- _wait_event: Event | None = None
138
- _stop_event: Event | None = None
139
- _stopped: bool = False
140
-
141
- def __init__(
142
- self,
143
- address: tuple[str, int] = ("localhost", 5050),
144
- *args,
145
- authkey: bytes | None = None,
146
- breadth: int = 0,
147
- log_level: int = logging.INFO,
148
- **kwargs,
149
- ) -> None:
150
- super().__init__(*args, name=self.__class__.__name__, **kwargs)
151
- if authkey is not None:
152
- self.authkey: bytes = authkey
153
- if not breadth:
154
- if not (breadth := (os.cpu_count() or 0)):
155
- raise ValueError("Unable to determine CPU count")
156
- if not breadth > 0:
157
- raise ValueError("Breadth must be a positive integer")
158
- self._breadth: int = breadth
159
- self._address: tuple[str, int] = address
160
- self._log_level: int = log_level
161
- self._token: Token | None = None
162
- self._session = self.session_type(
163
- address=self._address, authkey=self.authkey
164
- )
165
- self._get_ready, self._set_ready = Pipe(duplex=False)
166
-
167
- def __call__(self, fn: AsyncCallable) -> AsyncCallable:
168
- """
169
- Decorate a function to be executed within the pool.
170
-
171
- :param fn: The function to be executed.
172
- :return: The wrapped function.
173
- """
174
-
175
- @wraps(fn)
176
- async def wrapper(*args, **kwargs) -> Coroutine:
177
- with self:
178
- return await fn(*args, **kwargs)
179
-
180
- return wrapper
181
-
182
- def __enter__(self):
183
- """
184
- Enter the context of the pool, starting the pool and connecting the
185
- session.
186
- """
187
- self.start()
188
- self._session.connect()
189
- self._token = self.session_context.set(self._session)
190
-
191
- def __exit__(self, *_) -> None:
192
- """
193
- Exit the context of the pool, stopping the pool and disconnecting the
194
- session.
195
- """
196
- assert self._token
197
- self.session_context.reset(self._token)
198
- assert self.pid
199
- try:
200
- self.stop(wait=True)
201
- except ConnectionRefusedError:
202
- logging.warning(
203
- f"Connection to manager at {self._address} refused."
204
- )
205
- finally:
206
- self.join()
207
-
208
- @property
209
- def session_type(self) -> type[WorkerPoolSession]:
210
- """
211
- Get the session type for the pool.
212
-
213
- :return: The session type.
214
- """
215
- return WorkerPoolSession
216
-
217
- @property
218
- def session_context(self) -> ContextVar[WorkerPoolSession]:
219
- """
220
- Get the session context variable for the pool.
221
-
222
- :return: The session context variable.
223
- """
224
- return wool.__wool_session__
225
-
226
- @property
227
- def scheduler_type(self) -> type[Scheduler]:
228
- """
229
- Get the scheduler type for the pool.
230
-
231
- :return: The scheduler type.
232
- """
233
- return Scheduler
234
-
235
- @property
236
- def log_level(self) -> int:
237
- """
238
- Get the logging level for the pool.
239
-
240
- :return: The logging level.
241
- """
242
- return self._log_level
243
-
244
- @log_level.setter
245
- def log_level(self, value: int) -> None:
246
- """
247
- Set the logging level for the pool.
248
-
249
- :param value: The new logging level.
250
- """
251
- if value < 0:
252
- raise ValueError("Log level must be non-negative")
253
- self._log_level = value
254
-
255
- @property
256
- def breadth(self) -> int:
257
- """
258
- Get the number of worker processes in the pool.
259
-
260
- :return: The number of worker processes.
261
- """
262
- return self._breadth
263
-
264
- @property
265
- def waiting(self) -> bool | None:
266
- """
267
- Check if the pool is in a waiting state.
268
-
269
- :return: True if waiting, False otherwise, or None if undefined.
270
- """
271
- return self._wait_event and self._wait_event.is_set()
272
-
273
- @property
274
- def stopping(self) -> bool | None:
275
- """
276
- Check if the pool is in a stopping state.
277
-
278
- :return: True if stopping, False otherwise, or None if undefined.
279
- """
280
- return self._stop_event and self._stop_event.is_set()
281
-
282
- def start(self) -> None:
283
- """
284
- Start the pool process and wait for it to be ready.
285
- """
286
- super().start()
287
- self._get_ready.recv()
288
- self._get_ready.close()
289
-
290
- def run(self) -> None:
291
- """
292
- Run the pool process, managing workers and the manager process.
293
- """
294
- if self.log_level:
295
- wool.__log_level__ = self.log_level
296
- logging.basicConfig(format=wool.__log_format__)
297
- logging.getLogger().setLevel(self.log_level)
298
- logging.info(f"Set log level to {self.log_level}")
299
-
300
- logging.debug("Thread started")
301
-
302
- signal(Signals.SIGINT, partial(_stop, self, False))
303
- signal(Signals.SIGTERM, partial(_stop, self, True))
304
-
305
- self.manager_sentinel = ManagerSentinel(
306
- address=self._address, authkey=self.authkey
307
- )
308
- self.manager_sentinel.start()
309
-
310
- self._wait_event = self.manager_sentinel.waiting
311
- self._stop_event = self.manager_sentinel.stopping
312
-
313
- worker_sentinels = []
314
- logging.info("Spawning workers...")
315
- try:
316
- for i in range(1, self.breadth + 1):
317
- if not self._stop_event.is_set():
318
- worker_sentinel = WorkerSentinel(
319
- address=self._address,
320
- log_level=self.log_level,
321
- id=i,
322
- scheduler=self.scheduler_type,
323
- )
324
- worker_sentinel.start()
325
- worker_sentinels.append(worker_sentinel)
326
- for worker_sentinel in worker_sentinels:
327
- worker_sentinel.ready.wait()
328
- self._set_ready.send(True)
329
- self._set_ready.close()
330
- except Exception:
331
- logging.exception("Error in worker pool")
332
- raise
333
- finally:
334
- while not self.idle and not self.stopping:
335
- self._stop_event.wait(1)
336
- else:
337
- self.stop(wait=bool(self.idle or self.waiting))
338
-
339
- logging.info("Stopping workers...")
340
- for worker_sentinel in worker_sentinels:
341
- if worker_sentinel.is_alive():
342
- worker_sentinel.stop(wait=self.waiting)
343
- for worker_sentinel in worker_sentinels:
344
- worker_sentinel.join()
345
-
346
- logging.info("Stopping manager...")
347
- if self.manager_sentinel.is_alive():
348
- self.manager_sentinel.stop()
349
- self.manager_sentinel.join()
350
-
351
- @property
352
- def idle(self):
353
- """
354
- Check if the pool is idle.
355
-
356
- :return: True if idle, False otherwise.
357
- """
358
- assert self.manager_sentinel
359
- try:
360
- return self.manager_sentinel.idle
361
- except (ConnectionRefusedError, ConnectionResetError):
362
- return True
363
-
364
- def stop(self, *, wait: bool = True) -> None:
365
- """
366
- Stop the pool process.
367
-
368
- :param wait: Whether to wait for the pool to stop gracefully.
369
- """
370
- if self.pid == current_process().pid:
371
- if wait and self.waiting is False and self.stopping is False:
372
- assert self._wait_event
373
- self._wait_event.set()
374
- if self.stopping is False:
375
- assert self._stop_event
376
- self._stop_event.set()
377
- elif self.pid:
378
- self._session.stop(wait=wait)
379
-
380
-
381
- class ManagerSentinel(Thread):
382
- _wait_event: Event | None = None
383
- _stop_event: Event | None = None
384
- _queue: TaskQueue | None = None
385
-
386
- def __init__(
387
- self, address: tuple[str, int], authkey: bytes, *args, **kwargs
388
- ) -> None:
389
- self._manager: Manager = Manager(address=address, authkey=authkey)
390
- self._server: Server = self._manager.get_server()
391
- super().__init__(*args, name=self.__class__.__name__, **kwargs)
392
-
393
- @property
394
- def waiting(self) -> Event:
395
- if not self._wait_event:
396
- self._manager.connect()
397
- self._wait_event = self._manager.waiting()
398
- return self._wait_event
399
-
400
- @property
401
- def stopping(self) -> Event:
402
- if not self._stop_event:
403
- self._manager.connect()
404
- self._stop_event = self._manager.stopping()
405
- return self._stop_event
406
-
407
- @property
408
- def idle(self) -> bool | None:
409
- if not self._queue:
410
- self._manager.connect()
411
- self._queue = self._manager.queue()
412
- return self._queue.idle()
413
-
414
- def run(self) -> None:
415
- self._server.serve_forever()
416
-
417
- def stop(self) -> None:
418
- stop_event = getattr(self._server, "stop_event")
419
- assert isinstance(stop_event, Event)
420
- logging.debug("Stopping manager...")
421
- stop_event.set()
422
-
423
-
424
- class WorkerSentinel(Thread):
425
- _worker: Worker | None = None
426
- _semaphore: Semaphore = Semaphore(8)
427
-
428
- def __init__(
429
- self,
430
- address: tuple[str, int],
431
- *args,
432
- id: int,
433
- cooldown: float = 1,
434
- log_level: int = logging.INFO,
435
- scheduler: type[Scheduler] = Scheduler,
436
- **kwargs,
437
- ) -> None:
438
- self._address: tuple[str, int] = address
439
- self._id: int = id
440
- self._cooldown: float = cooldown
441
- self._log_level: int = log_level
442
- self._scheduler_type = scheduler
443
- self._stop_event: Event = Event()
444
- self._wait_event: Event = Event()
445
- self._ready: Event = Event()
446
- super().__init__(
447
- *args, name=f"{self.__class__.__name__}-{self.id}", **kwargs
448
- )
449
-
450
- @property
451
- def worker(self) -> Worker | None:
452
- return self._worker
453
-
454
- @property
455
- def id(self) -> int:
456
- return self._id
457
-
458
- @property
459
- def ready(self) -> Event:
460
- return self._ready
461
-
462
- @property
463
- def cooldown(self) -> float:
464
- return self._cooldown
465
-
466
- @cooldown.setter
467
- def cooldown(self, value: float) -> None:
468
- if value < 0:
469
- raise ValueError("Cooldown must be non-negative")
470
- self._cooldown = value
471
-
472
- @property
473
- def log_level(self) -> int:
474
- return self._log_level
475
-
476
- @property
477
- def waiting(self) -> bool:
478
- return self._wait_event.is_set()
479
-
480
- @property
481
- def stopping(self) -> bool:
482
- return self._stop_event.is_set()
483
-
484
- @log_level.setter
485
- def log_level(self, value: int) -> None:
486
- if value < 0:
487
- raise ValueError("Log level must be non-negative")
488
- self._log_level = value
489
-
490
- def start(self) -> None:
491
- super().start()
492
-
493
- def run(self) -> None:
494
- logging.debug("Thread started")
495
- while not self._stop_event.is_set():
496
- worker = Worker(
497
- address=self._address,
498
- name=f"Worker-{self.id}",
499
- log_level=self.log_level,
500
- scheduler=self._scheduler_type,
501
- )
502
- with self._semaphore:
503
- worker.start()
504
- self._worker = worker
505
- logging.info(f"Spawned worker process {worker.pid}")
506
- self._ready.set()
507
- try:
508
- worker.join()
509
- except Exception as e:
510
- logging.error(e)
511
- finally:
512
- logging.info(f"Terminated worker process {worker.pid}")
513
- self._worker = None
514
- self._stop_event.wait(self.cooldown)
515
- logging.debug("Thread stopped")
516
-
517
- def stop(self, *, wait: bool = True) -> None:
518
- logging.info(f"Stopping thread {self.name}...")
519
- if wait and not self.waiting:
520
- self._wait_event.set()
521
- if not self.stopping:
522
- self._stop_event.set()
523
- if self._worker:
524
- self._worker.stop(wait=self._wait_event.is_set())
@@ -1,36 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Generated by the protocol buffer compiler. DO NOT EDIT!
3
- # NO CHECKED-IN PROTOBUF GENCODE
4
- # source: mempool/metadata.proto
5
- # Protobuf Python Version: 6.31.0
6
- """Generated protocol buffer code."""
7
- from google.protobuf import descriptor as _descriptor
8
- from google.protobuf import descriptor_pool as _descriptor_pool
9
- from google.protobuf import runtime_version as _runtime_version
10
- from google.protobuf import symbol_database as _symbol_database
11
- from google.protobuf.internal import builder as _builder
12
- _runtime_version.ValidateProtobufRuntimeVersion(
13
- _runtime_version.Domain.PUBLIC,
14
- 6,
15
- 31,
16
- 0,
17
- '',
18
- 'mempool/metadata.proto'
19
- )
20
- # @@protoc_insertion_point(imports)
21
-
22
- _sym_db = _symbol_database.Default()
23
-
24
-
25
-
26
-
27
- DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16mempool/metadata.proto\x12\x16wool._protobuf.mempool\"J\n\x0fMetadataMessage\x12\x0b\n\x03ref\x18\x01 \x01(\t\x12\x0f\n\x07mutable\x18\x02 \x01(\x08\x12\x0c\n\x04size\x18\x03 \x01(\x03\x12\x0b\n\x03md5\x18\x04 \x01(\x0c\x62\x06proto3')
28
-
29
- _globals = globals()
30
- _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
31
- _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'mempool.metadata_pb2', _globals)
32
- if not _descriptor._USE_C_DESCRIPTORS:
33
- DESCRIPTOR._loaded_options = None
34
- _globals['_METADATAMESSAGE']._serialized_start=50
35
- _globals['_METADATAMESSAGE']._serialized_end=124
36
- # @@protoc_insertion_point(module_scope)
@@ -1,17 +0,0 @@
1
- from google.protobuf import descriptor as _descriptor
2
- from google.protobuf import message as _message
3
- from typing import ClassVar as _ClassVar, Optional as _Optional
4
-
5
- DESCRIPTOR: _descriptor.FileDescriptor
6
-
7
- class MetadataMessage(_message.Message):
8
- __slots__ = ("ref", "mutable", "size", "md5")
9
- REF_FIELD_NUMBER: _ClassVar[int]
10
- MUTABLE_FIELD_NUMBER: _ClassVar[int]
11
- SIZE_FIELD_NUMBER: _ClassVar[int]
12
- MD5_FIELD_NUMBER: _ClassVar[int]
13
- ref: str
14
- mutable: bool
15
- size: int
16
- md5: bytes
17
- def __init__(self, ref: _Optional[str] = ..., mutable: bool = ..., size: _Optional[int] = ..., md5: _Optional[bytes] = ...) -> None: ...
@@ -1,66 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Generated by the protocol buffer compiler. DO NOT EDIT!
3
- # NO CHECKED-IN PROTOBUF GENCODE
4
- # source: mempool/service.proto
5
- # Protobuf Python Version: 6.31.0
6
- """Generated protocol buffer code."""
7
- from google.protobuf import descriptor as _descriptor
8
- from google.protobuf import descriptor_pool as _descriptor_pool
9
- from google.protobuf import runtime_version as _runtime_version
10
- from google.protobuf import symbol_database as _symbol_database
11
- from google.protobuf.internal import builder as _builder
12
- _runtime_version.ValidateProtobufRuntimeVersion(
13
- _runtime_version.Domain.PUBLIC,
14
- 6,
15
- 31,
16
- 0,
17
- '',
18
- 'mempool/service.proto'
19
- )
20
- # @@protoc_insertion_point(imports)
21
-
22
- _sym_db = _symbol_database.Default()
23
-
24
-
25
-
26
-
27
- DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15mempool/service.proto\x12\x16wool._protobuf.mempool\"\x10\n\x0eSessionRequest\"\x81\x01\n\x0fSessionResponse\x12\x32\n\x07session\x18\x01 \x01(\x0b\x32\x1f.wool._protobuf.mempool.SessionH\x00\x12.\n\x05\x65vent\x18\x02 \x01(\x0b\x32\x1d.wool._protobuf.mempool.EventH\x00\x42\n\n\x08response\"x\n\x0e\x41\x63quireRequest\x12\x34\n\treference\x18\x01 \x01(\x0b\x32!.wool._protobuf.mempool.Reference\x12\x30\n\x07session\x18\x02 \x01(\x0b\x32\x1f.wool._protobuf.mempool.Session\"\x11\n\x0f\x41\x63quireResponse\"]\n\nPutRequest\x12\x30\n\x07session\x18\x01 \x01(\x0b\x32\x1f.wool._protobuf.mempool.Session\x12\x0f\n\x07mutable\x18\x02 \x01(\x08\x12\x0c\n\x04\x64ump\x18\x03 \x01(\x0c\"C\n\x0bPutResponse\x12\x34\n\treference\x18\x01 \x01(\x0b\x32!.wool._protobuf.mempool.Reference\"\x83\x01\n\x0bPostRequest\x12\x30\n\x07session\x18\x01 \x01(\x0b\x32\x1f.wool._protobuf.mempool.Session\x12\x34\n\treference\x18\x02 \x01(\x0b\x32!.wool._protobuf.mempool.Reference\x12\x0c\n\x04\x64ump\x18\x03 \x01(\x0c\"\x1f\n\x0cPostResponse\x12\x0f\n\x07updated\x18\x01 \x01(\x08\"t\n\nGetRequest\x12\x34\n\treference\x18\x01 \x01(\x0b\x32!.wool._protobuf.mempool.Reference\x12\x30\n\x07session\x18\x02 \x01(\x0b\x32\x1f.wool._protobuf.mempool.Session\"\x1b\n\x0bGetResponse\x12\x0c\n\x04\x64ump\x18\x01 \x01(\x0c\"x\n\x0eReleaseRequest\x12\x34\n\treference\x18\x01 \x01(\x0b\x32!.wool._protobuf.mempool.Reference\x12\x30\n\x07session\x18\x02 \x01(\x0b\x32\x1f.wool._protobuf.mempool.Session\"\x11\n\x0fReleaseResponse\"\x17\n\tReference\x12\n\n\x02id\x18\x01 \x01(\t\"\x15\n\x07Session\x12\n\n\x02id\x18\x01 \x01(\t\"Q\n\x05\x45vent\x12\x34\n\treference\x18\x01 \x01(\x0b\x32!.wool._protobuf.mempool.Reference\x12\x12\n\nevent_type\x18\x02 \x01(\t2\xed\x04\n\nMemoryPool\x12\\\n\x07session\x12&.wool._protobuf.mempool.SessionRequest\x1a\'.wool._protobuf.mempool.SessionResponse0\x01\x12Z\n\x07\x61\x63quire\x12&.wool._protobuf.mempool.AcquireRequest\x1a\'.wool._protobuf.mempool.AcquireResponse\x12V\n\x03map\x12&.wool._protobuf.mempool.AcquireRequest\x1a\'.wool._protobuf.mempool.AcquireResponse\x12N\n\x03put\x12\".wool._protobuf.mempool.PutRequest\x1a#.wool._protobuf.mempool.PutResponse\x12Q\n\x04post\x12#.wool._protobuf.mempool.PostRequest\x1a$.wool._protobuf.mempool.PostResponse\x12N\n\x03get\x12\".wool._protobuf.mempool.GetRequest\x1a#.wool._protobuf.mempool.GetResponse\x12Z\n\x07release\x12&.wool._protobuf.mempool.ReleaseRequest\x1a\'.wool._protobuf.mempool.ReleaseResponseb\x06proto3')
28
-
29
- _globals = globals()
30
- _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
31
- _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'mempool.service_pb2', _globals)
32
- if not _descriptor._USE_C_DESCRIPTORS:
33
- DESCRIPTOR._loaded_options = None
34
- _globals['_SESSIONREQUEST']._serialized_start=49
35
- _globals['_SESSIONREQUEST']._serialized_end=65
36
- _globals['_SESSIONRESPONSE']._serialized_start=68
37
- _globals['_SESSIONRESPONSE']._serialized_end=197
38
- _globals['_ACQUIREREQUEST']._serialized_start=199
39
- _globals['_ACQUIREREQUEST']._serialized_end=319
40
- _globals['_ACQUIRERESPONSE']._serialized_start=321
41
- _globals['_ACQUIRERESPONSE']._serialized_end=338
42
- _globals['_PUTREQUEST']._serialized_start=340
43
- _globals['_PUTREQUEST']._serialized_end=433
44
- _globals['_PUTRESPONSE']._serialized_start=435
45
- _globals['_PUTRESPONSE']._serialized_end=502
46
- _globals['_POSTREQUEST']._serialized_start=505
47
- _globals['_POSTREQUEST']._serialized_end=636
48
- _globals['_POSTRESPONSE']._serialized_start=638
49
- _globals['_POSTRESPONSE']._serialized_end=669
50
- _globals['_GETREQUEST']._serialized_start=671
51
- _globals['_GETREQUEST']._serialized_end=787
52
- _globals['_GETRESPONSE']._serialized_start=789
53
- _globals['_GETRESPONSE']._serialized_end=816
54
- _globals['_RELEASEREQUEST']._serialized_start=818
55
- _globals['_RELEASEREQUEST']._serialized_end=938
56
- _globals['_RELEASERESPONSE']._serialized_start=940
57
- _globals['_RELEASERESPONSE']._serialized_end=957
58
- _globals['_REFERENCE']._serialized_start=959
59
- _globals['_REFERENCE']._serialized_end=982
60
- _globals['_SESSION']._serialized_start=984
61
- _globals['_SESSION']._serialized_end=1005
62
- _globals['_EVENT']._serialized_start=1007
63
- _globals['_EVENT']._serialized_end=1088
64
- _globals['_MEMORYPOOL']._serialized_start=1091
65
- _globals['_MEMORYPOOL']._serialized_end=1712
66
- # @@protoc_insertion_point(module_scope)