shared-multiprocess-queue 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: shared-multiprocess-queue
|
|
3
|
+
Version: 0.1.1
|
|
4
|
+
Summary: A shared-memory based multiprocessing queue for cross-process communication
|
|
5
|
+
Author-email: Raymond Chastain <RaymondLC92@protonmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Keywords: python,package
|
|
8
|
+
Classifier: Development Status :: 4 - Beta
|
|
9
|
+
Classifier: Intended Audience :: Developers
|
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
12
|
+
Requires-Python: >=3.13.7
|
|
13
|
+
Description-Content-Type: text/markdown
|
|
14
|
+
Requires-Dist: cloudpickle>=3.0.0
|
|
15
|
+
Requires-Dist: shared_memory_lock>=0.1.1
|
|
16
|
+
|
|
17
|
+
# Shared Queue
|
|
18
|
+
|
|
19
|
+
A high-performance shared-memory based multiprocessing queue for cross-process communication. Unlike `multiprocessing.Queue`, this queue is fully picklable, can be accessed by name from any process, and provides advanced features like batch operations and metrics.
|
|
20
|
+
|
|
21
|
+
## Features
|
|
22
|
+
|
|
23
|
+
- **Picklable**: Can be safely passed between processes via pickle
|
|
24
|
+
- **Named queues**: Multiple processes can connect to the same queue by name
|
|
25
|
+
- **Ring buffer design**: Efficient circular buffer with atomic head/tail pointers
|
|
26
|
+
- **Batch operations**: `put_batch()` for high-throughput scenarios
|
|
27
|
+
- **Shared memory backed**: Uses `multiprocessing.shared_memory` for zero-copy IPC
|
|
28
|
+
- **Thread and process safe**: Synchronized access with `shared_memory_lock`
|
|
29
|
+
- **Observable**: Built-in metrics for monitoring queue utilization
|
|
30
|
+
- **Run namespacing**: Isolate queues by run_id to prevent collisions
|
|
31
|
+
|
|
32
|
+
## Installation
|
|
33
|
+
|
|
34
|
+
```bash
|
|
35
|
+
pip install shared-multiprocess-queue
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
Or for development:
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
git clone <repo-url>
|
|
42
|
+
cd shared_queue
|
|
43
|
+
uv sync
|
|
44
|
+
uv pip install -e .
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Quick Start
|
|
48
|
+
|
|
49
|
+
```python
|
|
50
|
+
from shared_queue import SharedMemoryQueue
|
|
51
|
+
from multiprocessing import Process
|
|
52
|
+
import time
|
|
53
|
+
|
|
54
|
+
def worker(queue_name: str, run_id: str, worker_id: int):
|
|
55
|
+
# Connect to existing queue by name
|
|
56
|
+
queue = SharedMemoryQueue(name=queue_name, create=False, run_id=run_id)
|
|
57
|
+
|
|
58
|
+
while True:
|
|
59
|
+
try:
|
|
60
|
+
task = queue.get(timeout=1.0)
|
|
61
|
+
if task == "STOP":
|
|
62
|
+
break
|
|
63
|
+
print(f"Worker {worker_id} processing: {task}")
|
|
64
|
+
time.sleep(0.1) # Simulate work
|
|
65
|
+
except queue.Empty:
|
|
66
|
+
continue
|
|
67
|
+
|
|
68
|
+
queue.close()
|
|
69
|
+
|
|
70
|
+
def main():
|
|
71
|
+
run_id = "task_processor"
|
|
72
|
+
|
|
73
|
+
# Create the queue in main process
|
|
74
|
+
work_queue = SharedMemoryQueue(
|
|
75
|
+
name="tasks",
|
|
76
|
+
create=True,
|
|
77
|
+
capacity=1000,
|
|
78
|
+
item_size=4096,
|
|
79
|
+
run_id=run_id
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
# Start worker processes
|
|
83
|
+
workers = []
|
|
84
|
+
for i in range(3):
|
|
85
|
+
p = Process(target=worker, args=("tasks", run_id, i))
|
|
86
|
+
p.start()
|
|
87
|
+
workers.append(p)
|
|
88
|
+
|
|
89
|
+
# Add tasks to queue
|
|
90
|
+
for i in range(20):
|
|
91
|
+
work_queue.put(f"task_{i}")
|
|
92
|
+
|
|
93
|
+
# Signal workers to stop
|
|
94
|
+
for _ in workers:
|
|
95
|
+
work_queue.put("STOP")
|
|
96
|
+
|
|
97
|
+
# Wait for workers to finish
|
|
98
|
+
for p in workers:
|
|
99
|
+
p.join()
|
|
100
|
+
|
|
101
|
+
print(f"Final queue size: {work_queue.qsize()}")
|
|
102
|
+
|
|
103
|
+
# Cleanup
|
|
104
|
+
work_queue.unlink()
|
|
105
|
+
|
|
106
|
+
if __name__ == "__main__":
|
|
107
|
+
main()
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
## API Reference
|
|
111
|
+
|
|
112
|
+
### `SharedMemoryQueue(name, create=False, capacity=1000, item_size=4096, run_id="", lock=None)`
|
|
113
|
+
|
|
114
|
+
Creates or connects to a shared memory queue.
|
|
115
|
+
|
|
116
|
+
**Parameters:**
|
|
117
|
+
- `name` (str): Queue identifier
|
|
118
|
+
- `create` (bool): Whether to create new queue (`True`) or connect to existing (`False`)
|
|
119
|
+
- `capacity` (int): Maximum number of items the queue can hold
|
|
120
|
+
- `item_size` (int): Maximum size of each serialized item in bytes
|
|
121
|
+
- `run_id` (str): Optional run identifier for namespacing queues
|
|
122
|
+
- `lock` (Any): Ignored - `SharedMemoryLock` is always used internally
|
|
123
|
+
|
|
124
|
+
**Methods:**
|
|
125
|
+
- `put(item, block=True, timeout=None)`: Add item to queue
|
|
126
|
+
- `get(block=True, timeout=None)`: Remove and return item from queue
|
|
127
|
+
- `put_nowait(item)`: Add item without blocking (raises `Full` if queue is full)
|
|
128
|
+
- `get_nowait()`: Get item without blocking (raises `Empty` if queue is empty)
|
|
129
|
+
- `put_batch(items)`: Add multiple items atomically
|
|
130
|
+
- `empty()`: Check if queue is empty
|
|
131
|
+
- `full()`: Check if queue is full
|
|
132
|
+
- `qsize()`: Get approximate queue size
|
|
133
|
+
- `get_metrics()`: Return queue metrics dictionary
|
|
134
|
+
- `close()`: Close connection to shared memory
|
|
135
|
+
- `unlink()`: Delete the shared memory segment (call from creator process)
|
|
136
|
+
|
|
137
|
+
**Exceptions:**
|
|
138
|
+
- `Empty`: Raised when queue is empty and non-blocking operation requested
|
|
139
|
+
- `Full`: Raised when queue is full and non-blocking operation requested
|
|
140
|
+
|
|
141
|
+
## Advanced Usage
|
|
142
|
+
|
|
143
|
+
### Batch Operations
|
|
144
|
+
|
|
145
|
+
For high-throughput scenarios, use `put_batch()` to add multiple items atomically:
|
|
146
|
+
|
|
147
|
+
```python
|
|
148
|
+
queue = SharedMemoryQueue("batch_queue", create=True, capacity=10000)
|
|
149
|
+
|
|
150
|
+
# Process items in batches for better performance
|
|
151
|
+
batch = []
|
|
152
|
+
for i in range(100):
|
|
153
|
+
batch.append(f"item_{i}")
|
|
154
|
+
if len(batch) >= 50:
|
|
155
|
+
queue.put_batch(batch)
|
|
156
|
+
batch = []
|
|
157
|
+
|
|
158
|
+
# Don't forget remaining items
|
|
159
|
+
if batch:
|
|
160
|
+
queue.put_batch(batch)
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
### Queue Metrics
|
|
164
|
+
|
|
165
|
+
Monitor queue performance with built-in metrics:
|
|
166
|
+
|
|
167
|
+
```python
|
|
168
|
+
queue = SharedMemoryQueue("monitored_queue", create=True, capacity=1000)
|
|
169
|
+
|
|
170
|
+
# Add some items
|
|
171
|
+
for i in range(100):
|
|
172
|
+
queue.put(f"item_{i}")
|
|
173
|
+
|
|
174
|
+
metrics = queue.get_metrics()
|
|
175
|
+
print(f"Queue size: {metrics['queue_size']}")
|
|
176
|
+
print(f"Capacity: {metrics['queue_capacity']}")
|
|
177
|
+
print(f"Utilization: {metrics['queue_utilization_percent']:.1f}%")
|
|
178
|
+
print(f"Item size limit: {metrics['queue_item_size_bytes']} bytes")
|
|
179
|
+
```
|
|
180
|
+
|
|
181
|
+
### Cross-Process Communication
|
|
182
|
+
|
|
183
|
+
```python
|
|
184
|
+
# Producer process
|
|
185
|
+
def producer(queue_name, run_id):
|
|
186
|
+
queue = SharedMemoryQueue(queue_name, create=False, run_id=run_id)
|
|
187
|
+
for i in range(1000):
|
|
188
|
+
queue.put({"data": f"item_{i}", "timestamp": time.time()})
|
|
189
|
+
queue.put(None) # Sentinel value
|
|
190
|
+
|
|
191
|
+
# Consumer process
|
|
192
|
+
def consumer(queue_name, run_id):
|
|
193
|
+
queue = SharedMemoryQueue(queue_name, create=False, run_id=run_id)
|
|
194
|
+
results = []
|
|
195
|
+
while True:
|
|
196
|
+
item = queue.get()
|
|
197
|
+
if item is None: # Sentinel value
|
|
198
|
+
break
|
|
199
|
+
results.append(item)
|
|
200
|
+
return results
|
|
201
|
+
|
|
202
|
+
# Main process
|
|
203
|
+
run_id = "data_pipeline"
|
|
204
|
+
queue = SharedMemoryQueue("data_queue", create=True, capacity=2000, run_id=run_id)
|
|
205
|
+
|
|
206
|
+
# Start processes
|
|
207
|
+
p1 = Process(target=producer, args=("data_queue", run_id))
|
|
208
|
+
p2 = Process(target=consumer, args=("data_queue", run_id))
|
|
209
|
+
|
|
210
|
+
p1.start()
|
|
211
|
+
p2.start()
|
|
212
|
+
|
|
213
|
+
p1.join()
|
|
214
|
+
p2.join()
|
|
215
|
+
|
|
216
|
+
queue.unlink()
|
|
217
|
+
```
|
|
218
|
+
|
|
219
|
+
### Timeout Handling
|
|
220
|
+
|
|
221
|
+
```python
|
|
222
|
+
queue = SharedMemoryQueue("timeout_queue", create=True, capacity=10)
|
|
223
|
+
|
|
224
|
+
try:
|
|
225
|
+
# Will wait up to 5 seconds for space
|
|
226
|
+
queue.put("item", block=True, timeout=5.0)
|
|
227
|
+
except Full:
|
|
228
|
+
print("Queue was full for 5 seconds")
|
|
229
|
+
|
|
230
|
+
try:
|
|
231
|
+
# Will wait up to 2 seconds for an item
|
|
232
|
+
item = queue.get(block=True, timeout=2.0)
|
|
233
|
+
except Empty:
|
|
234
|
+
print("No items available for 2 seconds")
|
|
235
|
+
```
|
|
236
|
+
|
|
237
|
+
## Performance Characteristics
|
|
238
|
+
|
|
239
|
+
- **Ring buffer**: O(1) put/get operations
|
|
240
|
+
- **Zero-copy**: Items stored directly in shared memory
|
|
241
|
+
- **Atomic operations**: Thread and process-safe without Python GIL limitations
|
|
242
|
+
- **Batch operations**: Minimize lock contention for high-throughput scenarios
|
|
243
|
+
- **Configurable capacity**: Balance memory usage vs queue depth
|
|
244
|
+
|
|
245
|
+
## Use Cases
|
|
246
|
+
|
|
247
|
+
### 1. High-Throughput Task Distribution
|
|
248
|
+
|
|
249
|
+
```python
|
|
250
|
+
# Distribute work across multiple worker processes
|
|
251
|
+
task_queue = SharedMemoryQueue("tasks", create=True, capacity=50000, item_size=8192)
|
|
252
|
+
|
|
253
|
+
# Producer adds tasks
|
|
254
|
+
tasks = generate_large_task_list()
|
|
255
|
+
task_queue.put_batch(tasks) # Efficient batch insertion
|
|
256
|
+
|
|
257
|
+
# Multiple workers consume tasks
|
|
258
|
+
def worker():
|
|
259
|
+
while True:
|
|
260
|
+
try:
|
|
261
|
+
task = task_queue.get_nowait()
|
|
262
|
+
process_task(task)
|
|
263
|
+
except Empty:
|
|
264
|
+
time.sleep(0.01)
|
|
265
|
+
```
|
|
266
|
+
|
|
267
|
+
### 2. Inter-Service Communication
|
|
268
|
+
|
|
269
|
+
```python
|
|
270
|
+
# Service A publishes events
|
|
271
|
+
event_queue = SharedMemoryQueue("events", create=True, capacity=10000, run_id="system")
|
|
272
|
+
|
|
273
|
+
# Service B subscribes to events
|
|
274
|
+
event_queue = SharedMemoryQueue("events", create=False, run_id="system")
|
|
275
|
+
while True:
|
|
276
|
+
event = event_queue.get()
|
|
277
|
+
handle_event(event)
|
|
278
|
+
```
|
|
279
|
+
|
|
280
|
+
### 3. Real-Time Data Processing
|
|
281
|
+
|
|
282
|
+
```python
|
|
283
|
+
# High-frequency data ingestion
|
|
284
|
+
data_queue = SharedMemoryQueue("sensor_data", create=True, capacity=100000, item_size=1024)
|
|
285
|
+
|
|
286
|
+
# Sensor data producer
|
|
287
|
+
def collect_sensor_data():
|
|
288
|
+
batch = []
|
|
289
|
+
while True:
|
|
290
|
+
reading = read_sensor()
|
|
291
|
+
batch.append(reading)
|
|
292
|
+
if len(batch) >= 100: # Batch for efficiency
|
|
293
|
+
data_queue.put_batch(batch)
|
|
294
|
+
batch = []
|
|
295
|
+
```
|
|
296
|
+
|
|
297
|
+
## Implementation Details
|
|
298
|
+
|
|
299
|
+
- Uses `multiprocessing.shared_memory` for the underlying storage
|
|
300
|
+
- Ring buffer layout: `[head][tail][capacity][item_size][data_slots...]`
|
|
301
|
+
- Items serialized with `cloudpickle` for maximum compatibility
|
|
302
|
+
- Synchronized with `shared_memory_lock` for cross-process safety
|
|
303
|
+
- Header stores 64-bit integers for head/tail pointers
|
|
304
|
+
- Each slot prefixed with 32-bit length for variable-size items
|
|
305
|
+
|
|
306
|
+
## Development
|
|
307
|
+
|
|
308
|
+
```bash
|
|
309
|
+
# Install dependencies
|
|
310
|
+
uv sync
|
|
311
|
+
|
|
312
|
+
# Run tests
|
|
313
|
+
uv run pytest
|
|
314
|
+
|
|
315
|
+
# Run tests with coverage
|
|
316
|
+
uv run pytest --cov=shared_queue
|
|
317
|
+
|
|
318
|
+
# Type checking
|
|
319
|
+
uv run mypy .
|
|
320
|
+
|
|
321
|
+
# Linting
|
|
322
|
+
uv run ruff check .
|
|
323
|
+
```
|
|
324
|
+
|
|
325
|
+
## License
|
|
326
|
+
|
|
327
|
+
MIT
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
shared_queue/__init__.py,sha256=n7Ol-4ftiIarTO4vZpBMV-V1aAkxcjpHtpqzVFDMJPY,204
|
|
2
|
+
shared_queue/queue.py,sha256=tt2IA5jckivnOMY06Vrs4fEUDyCBWbJV-H4dnIi4470,24354
|
|
3
|
+
shared_multiprocess_queue-0.1.1.dist-info/METADATA,sha256=lQdVctNFcI1DgCL1Yo1u5qWCZ58jA97FwlCgtX1NxeU,8969
|
|
4
|
+
shared_multiprocess_queue-0.1.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
5
|
+
shared_multiprocess_queue-0.1.1.dist-info/top_level.txt,sha256=jyPOO-FyMPquhn_5tpGt4WS9V0Aa0JjoYgt-aAH8eyw,13
|
|
6
|
+
shared_multiprocess_queue-0.1.1.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
shared_queue
|
shared_queue/__init__.py
ADDED
shared_queue/queue.py
ADDED
|
@@ -0,0 +1,628 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Shared memory queue implementation for cross-process communication.
|
|
3
|
+
|
|
4
|
+
This queue can be created dynamically at runtime and accessed by name
|
|
5
|
+
from any process without requiring pickling or process restarts.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import asyncio
|
|
11
|
+
import struct
|
|
12
|
+
import time
|
|
13
|
+
from multiprocessing import shared_memory
|
|
14
|
+
from typing import Any, Protocol, Awaitable
|
|
15
|
+
|
|
16
|
+
import cloudpickle
|
|
17
|
+
|
|
18
|
+
from shared_memory_lock import SharedMemoryLock
|
|
19
|
+
|
|
20
|
+
# Queue layout in shared memory:
|
|
21
|
+
# [head: u64][tail: u64][capacity: u64][item_size: u64][data...]
|
|
22
|
+
_HEADER_FMT = "!QQQQ" # head, tail, capacity, item_size
|
|
23
|
+
_HEADER_SIZE = struct.calcsize(_HEADER_FMT)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class Observable(Protocol):
|
|
27
|
+
"""Protocol for objects that can provide metrics."""
|
|
28
|
+
|
|
29
|
+
def get_metrics(self) -> dict[str, Any]:
|
|
30
|
+
"""Return metrics dictionary."""
|
|
31
|
+
...
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class SharedMemoryQueue:
|
|
35
|
+
"""
|
|
36
|
+
A FIFO queue backed by shared memory that can be created and accessed by name.
|
|
37
|
+
|
|
38
|
+
This queue uses a ring buffer design with atomic head/tail pointers.
|
|
39
|
+
Items are serialized using cloudpickle and stored in fixed-size slots.
|
|
40
|
+
|
|
41
|
+
Usage:
|
|
42
|
+
# Process A creates a queue
|
|
43
|
+
queue = SharedMemoryQueue(name="work_queue", create=True, capacity=1000, run_id="app1")
|
|
44
|
+
queue.put({"task": "data"})
|
|
45
|
+
|
|
46
|
+
# Process B connects to it by name
|
|
47
|
+
queue = SharedMemoryQueue(name="work_queue", create=False, run_id="app1")
|
|
48
|
+
item = queue.get()
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
def __init__(
|
|
52
|
+
self,
|
|
53
|
+
name: str,
|
|
54
|
+
create: bool = False,
|
|
55
|
+
capacity: int = 1000,
|
|
56
|
+
item_size: int = 4096,
|
|
57
|
+
run_id: str = "",
|
|
58
|
+
lock: Any = None,
|
|
59
|
+
):
|
|
60
|
+
"""
|
|
61
|
+
Initialize a shared memory queue.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
name: Queue name (used to identify the shared memory segment)
|
|
65
|
+
create: Whether to create a new queue or connect to existing
|
|
66
|
+
capacity: Maximum number of items the queue can hold
|
|
67
|
+
item_size: Maximum size of each serialized item in bytes
|
|
68
|
+
run_id: Run identifier for namespacing
|
|
69
|
+
lock: Ignored - SharedMemoryLock is always used
|
|
70
|
+
"""
|
|
71
|
+
self.name = name
|
|
72
|
+
self.run_id = run_id
|
|
73
|
+
self.capacity = capacity
|
|
74
|
+
self.item_size = item_size
|
|
75
|
+
|
|
76
|
+
# Create or connect to shared memory lock
|
|
77
|
+
self._lock = SharedMemoryLock(
|
|
78
|
+
name=f"{name}_queue_lock",
|
|
79
|
+
create=create,
|
|
80
|
+
run_id=run_id
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
# Calculate total size needed
|
|
84
|
+
data_size = capacity * item_size
|
|
85
|
+
total_size = _HEADER_SIZE + data_size
|
|
86
|
+
|
|
87
|
+
# Create or connect to shared memory
|
|
88
|
+
shm_name = f"{run_id}-{name}-queue" if run_id else f"{name}-queue"
|
|
89
|
+
self._shm = shared_memory.SharedMemory(name=shm_name, create=create, size=total_size)
|
|
90
|
+
|
|
91
|
+
if create:
|
|
92
|
+
# Initialize header: head=0, tail=0, capacity, item_size
|
|
93
|
+
self._write_header(0, 0, capacity, item_size)
|
|
94
|
+
else:
|
|
95
|
+
# Read existing header to get queue parameters
|
|
96
|
+
head, tail, cap, isize = self._read_header()
|
|
97
|
+
self.capacity = cap
|
|
98
|
+
self.item_size = isize
|
|
99
|
+
|
|
100
|
+
def _read_header(self) -> tuple[int, int, int, int]:
|
|
101
|
+
"""Read header: (head, tail, capacity, item_size)"""
|
|
102
|
+
header_bytes = bytes(self._shm.buf[:_HEADER_SIZE])
|
|
103
|
+
head, tail, capacity, item_size = struct.unpack(_HEADER_FMT, header_bytes)
|
|
104
|
+
return int(head), int(tail), int(capacity), int(item_size)
|
|
105
|
+
|
|
106
|
+
def _write_header(self, head: int, tail: int, capacity: int, item_size: int) -> None:
|
|
107
|
+
"""Write header: (head, tail, capacity, item_size)"""
|
|
108
|
+
header_bytes = struct.pack(_HEADER_FMT, head, tail, capacity, item_size)
|
|
109
|
+
self._shm.buf[:_HEADER_SIZE] = header_bytes
|
|
110
|
+
|
|
111
|
+
def _get_slot_offset(self, index: int) -> int:
|
|
112
|
+
"""Calculate the offset for a given slot index"""
|
|
113
|
+
slot_index = index % self.capacity
|
|
114
|
+
return _HEADER_SIZE + (slot_index * self.item_size)
|
|
115
|
+
|
|
116
|
+
def put(self, item: Any, block: bool = True, timeout: float | None = None) -> None:
|
|
117
|
+
"""
|
|
118
|
+
Put an item into the queue.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
item: The item to enqueue
|
|
122
|
+
block: Whether to block if queue is full
|
|
123
|
+
timeout: Maximum time to wait if blocking
|
|
124
|
+
|
|
125
|
+
Raises:
|
|
126
|
+
Full: If queue is full and block=False or timeout expires
|
|
127
|
+
"""
|
|
128
|
+
start_time = time.time() if timeout else None
|
|
129
|
+
|
|
130
|
+
while True:
|
|
131
|
+
if self._lock:
|
|
132
|
+
with self._lock:
|
|
133
|
+
head, tail, capacity, item_size = self._read_header()
|
|
134
|
+
|
|
135
|
+
# Check if queue is full
|
|
136
|
+
if (tail - head) >= capacity:
|
|
137
|
+
if not block:
|
|
138
|
+
raise Full("Queue is full")
|
|
139
|
+
if timeout and (time.time() - start_time) >= timeout:
|
|
140
|
+
raise Full("Queue is full (timeout)")
|
|
141
|
+
# Release lock before sleeping
|
|
142
|
+
else:
|
|
143
|
+
# Serialize the item
|
|
144
|
+
try:
|
|
145
|
+
item_bytes = cloudpickle.dumps(item)
|
|
146
|
+
except Exception as e:
|
|
147
|
+
raise ValueError(f"Failed to serialize item: {e}")
|
|
148
|
+
|
|
149
|
+
if len(item_bytes) > item_size:
|
|
150
|
+
raise ValueError(f"Item too large: {len(item_bytes)} bytes > {item_size} bytes")
|
|
151
|
+
|
|
152
|
+
# Write item to tail slot
|
|
153
|
+
slot_offset = self._get_slot_offset(tail)
|
|
154
|
+
# Write item size first (4 bytes), then item data
|
|
155
|
+
size_bytes = struct.pack("!I", len(item_bytes))
|
|
156
|
+
self._shm.buf[slot_offset : slot_offset + 4] = size_bytes
|
|
157
|
+
self._shm.buf[slot_offset + 4 : slot_offset + 4 + len(item_bytes)] = item_bytes
|
|
158
|
+
|
|
159
|
+
# Update tail
|
|
160
|
+
self._write_header(head, tail + 1, capacity, item_size)
|
|
161
|
+
return
|
|
162
|
+
else:
|
|
163
|
+
head, tail, capacity, item_size = self._read_header()
|
|
164
|
+
|
|
165
|
+
# Check if queue is full
|
|
166
|
+
if (tail - head) >= capacity:
|
|
167
|
+
if not block:
|
|
168
|
+
raise Full("Queue is full")
|
|
169
|
+
else:
|
|
170
|
+
# Serialize the item
|
|
171
|
+
try:
|
|
172
|
+
item_bytes = cloudpickle.dumps(item)
|
|
173
|
+
except Exception as e:
|
|
174
|
+
raise ValueError(f"Failed to serialize item: {e}")
|
|
175
|
+
|
|
176
|
+
if len(item_bytes) > item_size:
|
|
177
|
+
raise ValueError(f"Item too large: {len(item_bytes)} bytes > {item_size} bytes")
|
|
178
|
+
|
|
179
|
+
# Write item to tail slot
|
|
180
|
+
slot_offset = self._get_slot_offset(tail)
|
|
181
|
+
# Write item size first (4 bytes), then item data
|
|
182
|
+
size_bytes = struct.pack("!I", len(item_bytes))
|
|
183
|
+
self._shm.buf[slot_offset : slot_offset + 4] = size_bytes
|
|
184
|
+
self._shm.buf[slot_offset + 4 : slot_offset + 4 + len(item_bytes)] = item_bytes
|
|
185
|
+
|
|
186
|
+
# Update tail
|
|
187
|
+
self._write_header(head, tail + 1, capacity, item_size)
|
|
188
|
+
return
|
|
189
|
+
|
|
190
|
+
# Queue was full - check timeout and sleep
|
|
191
|
+
if timeout and (time.time() - start_time) > timeout:
|
|
192
|
+
raise Full("Queue is full (timeout)")
|
|
193
|
+
time.sleep(0.001) # Brief sleep before retry
|
|
194
|
+
|
|
195
|
+
def get(self, block: bool = True, timeout: float | None = None) -> Any:
|
|
196
|
+
"""
|
|
197
|
+
Get an item from the queue.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
block: Whether to block if queue is empty
|
|
201
|
+
timeout: Maximum time to wait if blocking
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
The dequeued item
|
|
205
|
+
|
|
206
|
+
Raises:
|
|
207
|
+
Empty: If queue is empty and block=False or timeout expires
|
|
208
|
+
"""
|
|
209
|
+
start_time = time.time() if timeout else None
|
|
210
|
+
|
|
211
|
+
while True:
|
|
212
|
+
if self._lock:
|
|
213
|
+
with self._lock:
|
|
214
|
+
head, tail, capacity, item_size = self._read_header()
|
|
215
|
+
|
|
216
|
+
# Check if queue is empty
|
|
217
|
+
if head >= tail:
|
|
218
|
+
if not block:
|
|
219
|
+
raise Empty("Queue is empty")
|
|
220
|
+
if timeout and (time.time() - start_time) >= timeout:
|
|
221
|
+
raise Empty("Queue is empty (timeout)")
|
|
222
|
+
# Release lock before sleeping
|
|
223
|
+
else:
|
|
224
|
+
# Read item from head slot
|
|
225
|
+
slot_offset = self._get_slot_offset(head)
|
|
226
|
+
# Read item size first
|
|
227
|
+
size_bytes = bytes(self._shm.buf[slot_offset : slot_offset + 4])
|
|
228
|
+
item_len = struct.unpack("!I", size_bytes)[0]
|
|
229
|
+
|
|
230
|
+
# Read item data
|
|
231
|
+
item_bytes = bytes(self._shm.buf[slot_offset + 4 : slot_offset + 4 + item_len])
|
|
232
|
+
|
|
233
|
+
# Update head
|
|
234
|
+
self._write_header(head + 1, tail, capacity, item_size)
|
|
235
|
+
|
|
236
|
+
# Deserialize and return
|
|
237
|
+
try:
|
|
238
|
+
return cloudpickle.loads(item_bytes)
|
|
239
|
+
except Exception as e:
|
|
240
|
+
raise ValueError(f"Failed to deserialize item: {e}")
|
|
241
|
+
else:
|
|
242
|
+
head, tail, capacity, item_size = self._read_header()
|
|
243
|
+
|
|
244
|
+
# Check if queue is empty
|
|
245
|
+
if head >= tail:
|
|
246
|
+
if not block:
|
|
247
|
+
raise Empty("Queue is empty")
|
|
248
|
+
else:
|
|
249
|
+
# Read item from head slot
|
|
250
|
+
slot_offset = self._get_slot_offset(head)
|
|
251
|
+
# Read item size first
|
|
252
|
+
size_bytes = bytes(self._shm.buf[slot_offset : slot_offset + 4])
|
|
253
|
+
item_len = struct.unpack("!I", size_bytes)[0]
|
|
254
|
+
|
|
255
|
+
# Read item data
|
|
256
|
+
item_bytes = bytes(self._shm.buf[slot_offset + 4 : slot_offset + 4 + item_len])
|
|
257
|
+
|
|
258
|
+
# Update head
|
|
259
|
+
self._write_header(head + 1, tail, capacity, item_size)
|
|
260
|
+
|
|
261
|
+
# Deserialize and return
|
|
262
|
+
try:
|
|
263
|
+
return cloudpickle.loads(item_bytes)
|
|
264
|
+
except Exception as e:
|
|
265
|
+
raise ValueError(f"Failed to deserialize item: {e}")
|
|
266
|
+
|
|
267
|
+
# Queue was empty - check timeout and sleep
|
|
268
|
+
if timeout and (time.time() - start_time) > timeout:
|
|
269
|
+
raise Empty("Queue is empty (timeout)")
|
|
270
|
+
time.sleep(0.001) # Brief sleep before retry
|
|
271
|
+
|
|
272
|
+
def put_nowait(self, item: Any) -> None:
|
|
273
|
+
"""Put an item without blocking"""
|
|
274
|
+
self.put(item, block=False)
|
|
275
|
+
|
|
276
|
+
async def put_async(self, item: Any, timeout: float | None = None) -> None:
|
|
277
|
+
"""
|
|
278
|
+
Async put an item into the queue.
|
|
279
|
+
|
|
280
|
+
Args:
|
|
281
|
+
item: The item to enqueue
|
|
282
|
+
timeout: Maximum time to wait
|
|
283
|
+
|
|
284
|
+
Raises:
|
|
285
|
+
Full: If queue is full and timeout expires
|
|
286
|
+
"""
|
|
287
|
+
start_time = time.time() if timeout else None
|
|
288
|
+
|
|
289
|
+
while True:
|
|
290
|
+
if self._lock:
|
|
291
|
+
async with self._lock:
|
|
292
|
+
head, tail, capacity, item_size = self._read_header()
|
|
293
|
+
|
|
294
|
+
# Check if queue is full
|
|
295
|
+
if (tail - head) >= capacity:
|
|
296
|
+
if timeout and (time.time() - start_time) >= timeout:
|
|
297
|
+
raise Full("Queue is full (timeout)")
|
|
298
|
+
# Release lock before sleeping
|
|
299
|
+
else:
|
|
300
|
+
# Serialize the item
|
|
301
|
+
try:
|
|
302
|
+
item_bytes = cloudpickle.dumps(item)
|
|
303
|
+
except Exception as e:
|
|
304
|
+
raise ValueError(f"Failed to serialize item: {e}")
|
|
305
|
+
|
|
306
|
+
if len(item_bytes) > item_size:
|
|
307
|
+
raise ValueError(f"Item too large: {len(item_bytes)} bytes > {item_size} bytes")
|
|
308
|
+
|
|
309
|
+
# Write item to tail slot
|
|
310
|
+
slot_offset = self._get_slot_offset(tail)
|
|
311
|
+
# Write item size first (4 bytes), then item data
|
|
312
|
+
size_bytes = struct.pack("!I", len(item_bytes))
|
|
313
|
+
self._shm.buf[slot_offset : slot_offset + 4] = size_bytes
|
|
314
|
+
self._shm.buf[slot_offset + 4 : slot_offset + 4 + len(item_bytes)] = item_bytes
|
|
315
|
+
|
|
316
|
+
# Update tail
|
|
317
|
+
self._write_header(head, tail + 1, capacity, item_size)
|
|
318
|
+
return
|
|
319
|
+
else:
|
|
320
|
+
# No lock mode
|
|
321
|
+
head, tail, capacity, item_size = self._read_header()
|
|
322
|
+
|
|
323
|
+
# Check if queue is full
|
|
324
|
+
if (tail - head) >= capacity:
|
|
325
|
+
if timeout and (time.time() - start_time) >= timeout:
|
|
326
|
+
raise Full("Queue is full (timeout)")
|
|
327
|
+
else:
|
|
328
|
+
# Serialize the item
|
|
329
|
+
try:
|
|
330
|
+
item_bytes = cloudpickle.dumps(item)
|
|
331
|
+
except Exception as e:
|
|
332
|
+
raise ValueError(f"Failed to serialize item: {e}")
|
|
333
|
+
|
|
334
|
+
if len(item_bytes) > item_size:
|
|
335
|
+
raise ValueError(f"Item too large: {len(item_bytes)} bytes > {item_size} bytes")
|
|
336
|
+
|
|
337
|
+
# Write item to tail slot
|
|
338
|
+
slot_offset = self._get_slot_offset(tail)
|
|
339
|
+
# Write item size first (4 bytes), then item data
|
|
340
|
+
size_bytes = struct.pack("!I", len(item_bytes))
|
|
341
|
+
self._shm.buf[slot_offset : slot_offset + 4] = size_bytes
|
|
342
|
+
self._shm.buf[slot_offset + 4 : slot_offset + 4 + len(item_bytes)] = item_bytes
|
|
343
|
+
|
|
344
|
+
# Update tail
|
|
345
|
+
self._write_header(head, tail + 1, capacity, item_size)
|
|
346
|
+
return
|
|
347
|
+
|
|
348
|
+
# Queue was full - check timeout and sleep
|
|
349
|
+
if timeout and (time.time() - start_time) > timeout:
|
|
350
|
+
raise Full("Queue is full (timeout)")
|
|
351
|
+
await asyncio.sleep(0.001) # Brief async sleep before retry
|
|
352
|
+
|
|
353
|
+
async def get_async(self, timeout: float | None = None) -> Any:
|
|
354
|
+
"""
|
|
355
|
+
Async get an item from the queue.
|
|
356
|
+
|
|
357
|
+
Args:
|
|
358
|
+
timeout: Maximum time to wait
|
|
359
|
+
|
|
360
|
+
Returns:
|
|
361
|
+
The dequeued item
|
|
362
|
+
|
|
363
|
+
Raises:
|
|
364
|
+
Empty: If queue is empty and timeout expires
|
|
365
|
+
"""
|
|
366
|
+
start_time = time.time() if timeout else None
|
|
367
|
+
|
|
368
|
+
while True:
|
|
369
|
+
if self._lock:
|
|
370
|
+
async with self._lock:
|
|
371
|
+
head, tail, capacity, item_size = self._read_header()
|
|
372
|
+
|
|
373
|
+
# Check if queue is empty
|
|
374
|
+
if head >= tail:
|
|
375
|
+
if timeout and (time.time() - start_time) >= timeout:
|
|
376
|
+
raise Empty("Queue is empty (timeout)")
|
|
377
|
+
# Release lock before sleeping
|
|
378
|
+
else:
|
|
379
|
+
# Read item from head slot
|
|
380
|
+
slot_offset = self._get_slot_offset(head)
|
|
381
|
+
# Read item size first
|
|
382
|
+
size_bytes = bytes(self._shm.buf[slot_offset : slot_offset + 4])
|
|
383
|
+
item_len = struct.unpack("!I", size_bytes)[0]
|
|
384
|
+
|
|
385
|
+
# Read item data
|
|
386
|
+
item_bytes = bytes(self._shm.buf[slot_offset + 4 : slot_offset + 4 + item_len])
|
|
387
|
+
|
|
388
|
+
# Update head
|
|
389
|
+
self._write_header(head + 1, tail, capacity, item_size)
|
|
390
|
+
|
|
391
|
+
# Deserialize and return
|
|
392
|
+
try:
|
|
393
|
+
return cloudpickle.loads(item_bytes)
|
|
394
|
+
except Exception as e:
|
|
395
|
+
raise ValueError(f"Failed to deserialize item: {e}")
|
|
396
|
+
else:
|
|
397
|
+
# No lock mode
|
|
398
|
+
head, tail, capacity, item_size = self._read_header()
|
|
399
|
+
|
|
400
|
+
# Check if queue is empty
|
|
401
|
+
if head >= tail:
|
|
402
|
+
if timeout and (time.time() - start_time) >= timeout:
|
|
403
|
+
raise Empty("Queue is empty (timeout)")
|
|
404
|
+
else:
|
|
405
|
+
# Read item from head slot
|
|
406
|
+
slot_offset = self._get_slot_offset(head)
|
|
407
|
+
# Read item size first
|
|
408
|
+
size_bytes = bytes(self._shm.buf[slot_offset : slot_offset + 4])
|
|
409
|
+
item_len = struct.unpack("!I", size_bytes)[0]
|
|
410
|
+
|
|
411
|
+
# Read item data
|
|
412
|
+
item_bytes = bytes(self._shm.buf[slot_offset + 4 : slot_offset + 4 + item_len])
|
|
413
|
+
|
|
414
|
+
# Update head
|
|
415
|
+
self._write_header(head + 1, tail, capacity, item_size)
|
|
416
|
+
|
|
417
|
+
# Deserialize and return
|
|
418
|
+
try:
|
|
419
|
+
return cloudpickle.loads(item_bytes)
|
|
420
|
+
except Exception as e:
|
|
421
|
+
raise ValueError(f"Failed to deserialize item: {e}")
|
|
422
|
+
|
|
423
|
+
# Queue was empty - check timeout and sleep
|
|
424
|
+
if timeout and (time.time() - start_time) > timeout:
|
|
425
|
+
raise Empty("Queue is empty (timeout)")
|
|
426
|
+
await asyncio.sleep(0.001) # Brief async sleep before retry
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
def put_batch(self, items: list[Any]) -> None:
|
|
430
|
+
"""
|
|
431
|
+
Put multiple items atomically under a single lock acquisition.
|
|
432
|
+
|
|
433
|
+
Writes all items directly to the ring buffer in one operation.
|
|
434
|
+
|
|
435
|
+
Args:
|
|
436
|
+
items: List of items to enqueue
|
|
437
|
+
|
|
438
|
+
Raises:
|
|
439
|
+
Full: If queue doesn't have space for all items
|
|
440
|
+
"""
|
|
441
|
+
if not items:
|
|
442
|
+
return
|
|
443
|
+
|
|
444
|
+
if self._lock:
|
|
445
|
+
with self._lock:
|
|
446
|
+
# Read current state
|
|
447
|
+
head, tail, capacity, item_size = self._read_header()
|
|
448
|
+
|
|
449
|
+
# Check if we have space for all items
|
|
450
|
+
current_size = tail - head
|
|
451
|
+
if current_size + len(items) > capacity:
|
|
452
|
+
raise Full(f"Queue full: {current_size}/{capacity}, cannot add {len(items)} items")
|
|
453
|
+
|
|
454
|
+
# Serialize all items first
|
|
455
|
+
serialized_items = []
|
|
456
|
+
for item in items:
|
|
457
|
+
try:
|
|
458
|
+
item_bytes = cloudpickle.dumps(item)
|
|
459
|
+
except Exception as e:
|
|
460
|
+
raise ValueError(f"Failed to serialize item: {e}")
|
|
461
|
+
|
|
462
|
+
if len(item_bytes) > item_size:
|
|
463
|
+
raise ValueError(f"Item too large: {len(item_bytes)} bytes > {item_size} bytes")
|
|
464
|
+
|
|
465
|
+
serialized_items.append(item_bytes)
|
|
466
|
+
|
|
467
|
+
# Write all items to consecutive slots
|
|
468
|
+
for item_bytes in serialized_items:
|
|
469
|
+
slot_offset = self._get_slot_offset(tail)
|
|
470
|
+
# Write item size first (4 bytes), then item data
|
|
471
|
+
size_bytes = struct.pack("!I", len(item_bytes))
|
|
472
|
+
self._shm.buf[slot_offset : slot_offset + 4] = size_bytes
|
|
473
|
+
self._shm.buf[slot_offset + 4 : slot_offset + 4 + len(item_bytes)] = item_bytes
|
|
474
|
+
tail += 1
|
|
475
|
+
|
|
476
|
+
# Update header once with new tail
|
|
477
|
+
self._write_header(head, tail, capacity, item_size)
|
|
478
|
+
else:
|
|
479
|
+
# Read current state
|
|
480
|
+
head, tail, capacity, item_size = self._read_header()
|
|
481
|
+
|
|
482
|
+
# Check if we have space for all items
|
|
483
|
+
current_size = tail - head
|
|
484
|
+
if current_size + len(items) > capacity:
|
|
485
|
+
raise Full(f"Queue full: {current_size}/{capacity}, cannot add {len(items)} items")
|
|
486
|
+
|
|
487
|
+
# Serialize all items first
|
|
488
|
+
serialized_items = []
|
|
489
|
+
for item in items:
|
|
490
|
+
try:
|
|
491
|
+
item_bytes = cloudpickle.dumps(item)
|
|
492
|
+
except Exception as e:
|
|
493
|
+
raise ValueError(f"Failed to serialize item: {e}")
|
|
494
|
+
|
|
495
|
+
if len(item_bytes) > item_size:
|
|
496
|
+
raise ValueError(f"Item too large: {len(item_bytes)} bytes > {item_size} bytes")
|
|
497
|
+
|
|
498
|
+
serialized_items.append(item_bytes)
|
|
499
|
+
|
|
500
|
+
# Write all items to consecutive slots
|
|
501
|
+
for item_bytes in serialized_items:
|
|
502
|
+
slot_offset = self._get_slot_offset(tail)
|
|
503
|
+
# Write item size first (4 bytes), then item data
|
|
504
|
+
size_bytes = struct.pack("!I", len(item_bytes))
|
|
505
|
+
self._shm.buf[slot_offset : slot_offset + 4] = size_bytes
|
|
506
|
+
self._shm.buf[slot_offset + 4 : slot_offset + 4 + len(item_bytes)] = item_bytes
|
|
507
|
+
tail += 1
|
|
508
|
+
|
|
509
|
+
# Update header once with new tail
|
|
510
|
+
self._write_header(head, tail, capacity, item_size)
|
|
511
|
+
|
|
512
|
+
def get_nowait(self) -> Any:
|
|
513
|
+
"""Get an item without blocking"""
|
|
514
|
+
return self.get(block=False)
|
|
515
|
+
|
|
516
|
+
def empty(self) -> bool:
|
|
517
|
+
"""Check if queue is empty"""
|
|
518
|
+
head, tail, _, _ = self._read_header()
|
|
519
|
+
return head >= tail
|
|
520
|
+
|
|
521
|
+
def full(self) -> bool:
|
|
522
|
+
"""Check if queue is full"""
|
|
523
|
+
head, tail, capacity, _ = self._read_header()
|
|
524
|
+
return (tail - head) >= capacity
|
|
525
|
+
|
|
526
|
+
def qsize(self) -> int:
|
|
527
|
+
"""Get approximate queue size"""
|
|
528
|
+
head, tail, _, _ = self._read_header()
|
|
529
|
+
return max(0, tail - head)
|
|
530
|
+
|
|
531
|
+
def close(self) -> None:
|
|
532
|
+
"""Close the shared memory connection"""
|
|
533
|
+
if hasattr(self, "_shm") and self._shm:
|
|
534
|
+
try:
|
|
535
|
+
self._shm.close()
|
|
536
|
+
except Exception:
|
|
537
|
+
pass
|
|
538
|
+
if hasattr(self, "_lock") and self._lock:
|
|
539
|
+
try:
|
|
540
|
+
self._lock.close()
|
|
541
|
+
except Exception:
|
|
542
|
+
pass
|
|
543
|
+
|
|
544
|
+
def unlink(self) -> None:
|
|
545
|
+
"""Unlink (delete) the shared memory segment"""
|
|
546
|
+
self.close()
|
|
547
|
+
try:
|
|
548
|
+
shm_name = f"{self.run_id}-{self.name}-queue" if self.run_id else f"{self.name}-queue"
|
|
549
|
+
temp_shm = shared_memory.SharedMemory(name=shm_name)
|
|
550
|
+
temp_shm.unlink()
|
|
551
|
+
temp_shm.close()
|
|
552
|
+
except FileNotFoundError:
|
|
553
|
+
pass
|
|
554
|
+
except Exception:
|
|
555
|
+
pass
|
|
556
|
+
|
|
557
|
+
# Unlink the lock
|
|
558
|
+
if hasattr(self, "_lock") and self._lock:
|
|
559
|
+
try:
|
|
560
|
+
self._lock.unlink()
|
|
561
|
+
except Exception:
|
|
562
|
+
pass
|
|
563
|
+
|
|
564
|
+
def get_metrics(self) -> dict[str, Any]:
|
|
565
|
+
"""
|
|
566
|
+
Returns metrics for this queue.
|
|
567
|
+
|
|
568
|
+
Implements Observable protocol.
|
|
569
|
+
"""
|
|
570
|
+
try:
|
|
571
|
+
head, tail, capacity, item_size = self._read_header()
|
|
572
|
+
size = max(0, tail - head)
|
|
573
|
+
return {
|
|
574
|
+
"queue_size": size,
|
|
575
|
+
"queue_capacity": capacity,
|
|
576
|
+
"queue_utilization_percent": (size / capacity * 100) if capacity > 0 else 0.0,
|
|
577
|
+
"queue_item_size_bytes": item_size,
|
|
578
|
+
}
|
|
579
|
+
except Exception:
|
|
580
|
+
return {}
|
|
581
|
+
|
|
582
|
+
def __getstate__(self) -> dict[str, Any]:
|
|
583
|
+
"""
|
|
584
|
+
Prepare for pickling - return only connection info, not the lock.
|
|
585
|
+
|
|
586
|
+
The lock cannot be pickled and must be created fresh in each process.
|
|
587
|
+
"""
|
|
588
|
+
return {
|
|
589
|
+
"name": self.name,
|
|
590
|
+
"run_id": self.run_id,
|
|
591
|
+
"capacity": self.capacity,
|
|
592
|
+
"item_size": self.item_size,
|
|
593
|
+
}
|
|
594
|
+
|
|
595
|
+
def __setstate__(self, state: dict[str, Any]) -> None:
|
|
596
|
+
"""
|
|
597
|
+
Reconnect after unpickling in a new process.
|
|
598
|
+
|
|
599
|
+
Reconnects to the same shared memory lock and data segment.
|
|
600
|
+
"""
|
|
601
|
+
self.name = state["name"]
|
|
602
|
+
self.run_id = state["run_id"]
|
|
603
|
+
self.capacity = state["capacity"]
|
|
604
|
+
self.item_size = state["item_size"]
|
|
605
|
+
|
|
606
|
+
# Reconnect to the same shared memory lock
|
|
607
|
+
self._lock = SharedMemoryLock(
|
|
608
|
+
name=f"{self.name}_queue_lock",
|
|
609
|
+
create=False,
|
|
610
|
+
run_id=self.run_id
|
|
611
|
+
)
|
|
612
|
+
|
|
613
|
+
# Reconnect to existing shared memory
|
|
614
|
+
shm_name = f"{self.run_id}-{self.name}-queue" if self.run_id else f"{self.name}-queue"
|
|
615
|
+
total_size = _HEADER_SIZE + (self.capacity * self.item_size)
|
|
616
|
+
self._shm = shared_memory.SharedMemory(name=shm_name, create=False, size=total_size)
|
|
617
|
+
|
|
618
|
+
|
|
619
|
+
class Empty(Exception):
|
|
620
|
+
"""Raised when queue is empty"""
|
|
621
|
+
|
|
622
|
+
pass
|
|
623
|
+
|
|
624
|
+
|
|
625
|
+
class Full(Exception):
|
|
626
|
+
"""Raised when queue is full"""
|
|
627
|
+
|
|
628
|
+
pass
|