slick-queue-py 1.0.0__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,8 @@
1
+ atomic_ops.py,sha256=44j2Qm48owvs18fXbPmir4LZucoyYhXeJkEJzU6SLUs,20924
2
+ atomic_ops_ext.cp312-win_amd64.pyd,sha256=0UlQcv4FJIBXg5Al1FIiE6zbGFeeLY6ldZNH8YH-yno,12288
3
+ slick_queue_py.py,sha256=dWvLLHqwRg_Qslbs2RNT7YawDQkp2HUNDeBs-glePp4,21829
4
+ slick_queue_py-1.0.0.dist-info/licenses/LICENSE,sha256=GsgPE6yHSgjPd--0cXVu4tmzSrApWVih5TeqOHpVeo0,1089
5
+ slick_queue_py-1.0.0.dist-info/METADATA,sha256=kdu4QelkyRApAS-izosRWnwwwze05gLWSsCPcLScjNk,25103
6
+ slick_queue_py-1.0.0.dist-info/WHEEL,sha256=8UP9x9puWI0P1V_d7K2oMTBqfeLNm21CTzZ_Ptr0NXU,101
7
+ slick_queue_py-1.0.0.dist-info/top_level.txt,sha256=wSnL-GmRhMTtudT__dLgRw5eZUU_XHo2OZTOP3M1MwA,41
8
+ slick_queue_py-1.0.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: false
4
+ Tag: cp312-cp312-win_amd64
5
+
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Slick Quant
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,3 @@
1
+ atomic_ops
2
+ atomic_ops_ext
3
+ slick_queue_py
slick_queue_py.py ADDED
@@ -0,0 +1,520 @@
1
+ """
2
+ Python implementation of SlickQueue-compatible shared memory queue.
3
+
4
+ This implements the same memory layout as the C++ `slick::SlickQueue<T>`
5
+ header (64 bytes), an array of `slot` structures starting at offset 64, and
6
+ the data array immediately after the slot array.
7
+
8
+ Multi-Producer Multi-Consumer Support:
9
+ - This implementation now uses atomic operations via the atomic_ops module
10
+ - On platforms with hardware 128-bit CAS support (x86-64 with CMPXCHG16B),
11
+ provides true lock-free multi-producer and multi-consumer semantics
12
+ - On other platforms, falls back to lock-based synchronization
13
+
14
+ C++/Python Interoperability:
15
+ - Python processes can produce/consume to queues created by C++
16
+ - C++ processes can produce/consume to queues created by Python
17
+ - Memory layout and atomic operations match exactly
18
+
19
+ Supported on Python 3.8+ (uses multiprocessing.shared_memory).
20
+ """
21
+ from __future__ import annotations
22
+
23
+ __version__ = '1.0.0'
24
+
25
+ import struct
26
+ import sys
27
+ from typing import Optional, Tuple, Union
28
+ from atomic_ops import AtomicReservedInfo, AtomicUInt64, AtomicCursor, check_platform_support, make_reserved_info, get_index, get_size
29
+
30
+ # Use Python's built-in shared memory (available in Python 3.8+)
31
+ from multiprocessing.shared_memory import SharedMemory
32
+
33
+ # Layout constants
34
+ # Note: We add 8 bytes of padding at the start to ensure the atomic data (at offset 16)
35
+ # is 16-byte aligned for CMPXCHG16B instruction compatibility
36
+ HEADER_SIZE = 64
37
+ # reserved_info with alignment padding: 32 bytes (8+8+8+4+4)
38
+ RESERVED_INFO_SIZE = struct.calcsize(AtomicReservedInfo.RESERVED_INFO_FMT)
39
+
40
+ # slot: atomic_uint64 data_index; uint32 size; 4 bytes padding => 16 bytes
41
+ SLOT_FMT = "<Q I 4x"
42
+ SLOT_SIZE = struct.calcsize(SLOT_FMT)
43
+
44
+
45
+ class SlickQueue:
46
+ """A fixed-size ring queue compatible with C++ SlickQueue.
47
+
48
+ Supports two modes:
49
+ - **Shared memory mode** (when name is provided): Uses shared memory for inter-process communication
50
+ - **Local memory mode** (when name is None): Uses local memory (single process)
51
+
52
+ Elements are fixed-length byte blobs of `element_size`.
53
+
54
+ Args:
55
+ name: Shared memory segment name. If None, uses local memory mode.
56
+ size: Queue capacity (must be power of 2). Required when creating or using local mode.
57
+ element_size: Size of each element in bytes. Required.
58
+ create: If True, create new shared memory segment (only for shared memory mode).
59
+ """
60
+
61
+ def __init__(self, *, name: Optional[str] = None, size: Optional[int] = None, element_size: Optional[int] = None):
62
+ # On Linux, POSIX shared memory names must start with /
63
+ # The C++ slick_queue library passes the name directly to shm_open(),
64
+ # which requires the / prefix. Python's SharedMemory strips it from .name,
65
+ # but we need to add it for C++ interop.
66
+ self.name = name
67
+ if self.name is not None and sys.platform != 'win32' and not self.name.startswith('/'):
68
+ self.name = '/' + self.name
69
+ self.use_shm = name is not None
70
+ self._shm: Optional[SharedMemory] = None
71
+ self._local_buf: Optional[bytearray] = None
72
+ self.size = None
73
+ self._own = False
74
+
75
+ # Validate parameters
76
+ if size is not None:
77
+ self.size = int(size)
78
+ if self.size & (self.size - 1):
79
+ raise ValueError("size must be a power of two")
80
+ self.mask = self.size - 1
81
+
82
+ if element_size is not None:
83
+ self.element_size = int(element_size)
84
+
85
+ if self.use_shm:
86
+ # Shared memory mode (C++ with shm_name != nullptr)
87
+ if self.size:
88
+ # create shared memory
89
+ if element_size is None:
90
+ raise ValueError("size and element_size required when creating")
91
+ total = HEADER_SIZE + SLOT_SIZE * self.size + self.element_size * self.size
92
+ try:
93
+ self._shm = SharedMemory(name=self.name, create=True, size=total)
94
+ # print(f"**** create new shm {self.name}")
95
+ # initialize header: reserved_info zeros, size
96
+ buf = self._shm.buf
97
+ buf[:HEADER_SIZE] = bytes(HEADER_SIZE)
98
+ struct.pack_into("<I I", buf, RESERVED_INFO_SIZE, self.size, element_size)
99
+ # initialize slots data_index to max (uint64 max)
100
+ for i in range(self.size):
101
+ off = HEADER_SIZE + i * SLOT_SIZE
102
+ struct.pack_into(SLOT_FMT, buf, off, (2**64 - 1), 1)
103
+ self._own = True
104
+ except FileExistsError:
105
+ # print(f"**** open existing shm {self.name}")
106
+ # Queue already exists, open it (size is ignored for existing shm on Linux/Mac)
107
+ self._shm = SharedMemory(name=self.name, create=False)
108
+
109
+ # Validate the size in the header matches what we expect
110
+ ss = struct.unpack_from("<I I", self._shm.buf, RESERVED_INFO_SIZE)
111
+ if ss[0] != self.size:
112
+ self._shm.close()
113
+ raise ValueError(f"size mismatch. Expected {self.size} but got {ss[0]}")
114
+ if ss[1] != element_size:
115
+ self._shm.close()
116
+ raise ValueError(f"element size mismatch. Expected {element_size} but got {ss[1]}")
117
+ else:
118
+ # print(f"**** open existing shm {self.name}")
119
+ # open existing and read size from header
120
+ if element_size is None:
121
+ raise ValueError("element_size must be provided when opening existing shared memory")
122
+
123
+ # Open existing shared memory (size parameter not needed/ignored)
124
+ self._shm = SharedMemory(name=self.name, create=False)
125
+
126
+ # Read actual queue size from header
127
+ ss = struct.unpack_from("<I I", self._shm.buf, RESERVED_INFO_SIZE)
128
+ self.size = ss[0]
129
+ elem_sz = ss[1]
130
+
131
+ if element_size != elem_sz:
132
+ self._shm.close()
133
+ raise ValueError(f"SharedMemory element_size mismatch. Expecting {element_size} but got {elem_sz}")
134
+
135
+ self.mask = self.size - 1
136
+ self.element_size = int(element_size)
137
+
138
+ self._buf = self._shm.buf
139
+ self._control_offset = HEADER_SIZE
140
+ self._data_offset = HEADER_SIZE + SLOT_SIZE * self.size
141
+
142
+ # Initialize atomic wrappers for lock-free operations
143
+ self._atomic_reserved = AtomicReservedInfo(self._buf, 0)
144
+ self._atomic_slots = []
145
+ for i in range(self.size):
146
+ slot_offset = HEADER_SIZE + i * SLOT_SIZE
147
+ self._atomic_slots.append(AtomicUInt64(self._buf, slot_offset))
148
+ else:
149
+ # Local memory mode (C++ with shm_name == nullptr)
150
+ if size is None or element_size is None:
151
+ raise ValueError("size and element_size required for local memory mode")
152
+
153
+ # Create local buffers (equivalent to C++ new T[size_] and new slot[size_])
154
+ # We use a bytearray to simulate the memory layout
155
+ total = HEADER_SIZE + SLOT_SIZE * self.size + self.element_size * self.size
156
+ self._local_buf = bytearray(total)
157
+
158
+ # Initialize header
159
+ self._local_buf[:HEADER_SIZE] = bytes(HEADER_SIZE)
160
+ struct.pack_into("<I", self._local_buf, RESERVED_INFO_SIZE, self.size)
161
+
162
+ # Initialize slots data_index to max
163
+ for i in range(self.size):
164
+ off = HEADER_SIZE + i * SLOT_SIZE
165
+ struct.pack_into(SLOT_FMT, self._local_buf, off, (2**64 - 1), 1)
166
+
167
+ # Create a memoryview for consistency with shared memory path
168
+ self._buf = memoryview(self._local_buf)
169
+ self._control_offset = HEADER_SIZE
170
+ self._data_offset = HEADER_SIZE + SLOT_SIZE * self.size
171
+
172
+ # Initialize atomic wrappers (these work on local memory too)
173
+ # Local mode is always Python creator, but we still pass offset for consistency
174
+ self._atomic_reserved = AtomicReservedInfo(self._buf, 0)
175
+ self._atomic_slots = []
176
+ for i in range(self.size):
177
+ slot_offset = HEADER_SIZE + i * SLOT_SIZE
178
+ self._atomic_slots.append(AtomicUInt64(self._buf, slot_offset))
179
+
180
+ # low-level helpers
181
+ def _read_reserved(self) -> Tuple[int, int]:
182
+ buf = self._buf
183
+ packed = struct.unpack_from(AtomicReservedInfo.RESERVED_INFO_FMT, buf, 0)[0]
184
+ return get_index(packed), get_size(packed)
185
+
186
+ def _write_reserved(self, index: int, sz: int) -> None:
187
+ packed = make_reserved_info(int(index), int(sz))
188
+ struct.pack_into(AtomicReservedInfo.RESERVED_INFO_FMT, self._buf, 0, packed)
189
+
190
+ def _read_slot(self, idx: int) -> Tuple[int, int]:
191
+ off = self._control_offset + idx * SLOT_SIZE
192
+ data_index, size = struct.unpack_from(SLOT_FMT, self._buf, off)
193
+ return int(data_index), int(size)
194
+
195
+ def _write_slot(self, idx: int, data_index: int, size: int) -> None:
196
+ off = self._control_offset + idx * SLOT_SIZE
197
+ struct.pack_into(SLOT_FMT, self._buf, off, int(data_index), int(size))
198
+
199
+ def get_shm_name(self) -> Optional[str]:
200
+ """
201
+ Get the actual shared memory name for C++ interop.
202
+
203
+ Returns the name with POSIX / prefix on Linux (required by C++ shm_open).
204
+ Python's SharedMemory.name property strips the / prefix, but this method
205
+ returns self.name which preserves it for C++ interop.
206
+
207
+ Returns:
208
+ The shared memory name that C++ code should use to open the queue.
209
+ On Linux, this will have the / prefix that shm_open() requires.
210
+ """
211
+ # Return self.name (which has / prefix on Linux) rather than self._shm.name
212
+ # (which has / stripped by Python)
213
+ return self.name
214
+
215
+ # Public API mirroring C++ methods
216
+ def reserve(self, n: int = 1) -> int:
217
+ """
218
+ Reserve space in the queue for writing (multi-producer safe).
219
+
220
+ Uses atomic CAS to safely reserve slots from multiple producers.
221
+ Matches C++ queue.h:181-213.
222
+
223
+ Args:
224
+ n: Number of slots to reserve (default 1)
225
+
226
+ Returns:
227
+ Starting index of reserved space
228
+
229
+ Raises:
230
+ RuntimeError: If n > queue size
231
+ """
232
+ if n > self.size:
233
+ raise RuntimeError(f"required size {n} > queue size {self.size}")
234
+
235
+ # CAS loop for multi-producer safety (matching C++ line 189-205)
236
+ while True:
237
+ # Load current reserved_info with memory_order_relaxed (C++ line 185)
238
+ reserved_index, reserved_size = self._atomic_reserved.load()
239
+
240
+ index = reserved_index
241
+ idx = index & self.mask
242
+ buffer_wrapped = False
243
+
244
+ # Check if we need to wrap (C++ lines 194-204)
245
+ if (idx + n) > self.size:
246
+ # Wrap to beginning
247
+ index += self.size - idx
248
+ next_index = index + n
249
+ next_size = n
250
+ buffer_wrapped = True
251
+ else:
252
+ # Normal increment
253
+ next_index = reserved_index + n
254
+ next_size = n
255
+
256
+ # Atomic CAS with memory_order_release on success (C++ line 205)
257
+ success, actual = self._atomic_reserved.compare_exchange_weak(
258
+ expected=(reserved_index, reserved_size),
259
+ desired=(next_index, next_size)
260
+ )
261
+
262
+ if success:
263
+ # CAS succeeded, we own this reservation
264
+ if buffer_wrapped:
265
+ # Publish wrap marker (C++ lines 206-211)
266
+ slot_idx = reserved_index & self.mask
267
+ self._write_slot(slot_idx, index, n)
268
+ return index
269
+
270
+ # CAS failed, retry with updated value
271
+
272
+ def publish(self, index: int, n: int = 1) -> None:
273
+ """
274
+ Publish data written to reserved space (atomic with release semantics).
275
+
276
+ Makes the data visible to consumers. Matches C++ queue.h:239-242.
277
+
278
+ Args:
279
+ index: Index returned by reserve()
280
+ n: Number of slots to publish (default 1)
281
+ """
282
+ slot_idx = index & self.mask
283
+
284
+ # Write slot size (non-atomic part)
285
+ size_offset = self._control_offset + slot_idx * SLOT_SIZE + 8
286
+ struct.pack_into("<I 4x", self._buf, size_offset, n)
287
+
288
+ # Atomic store of data_index with memory_order_release (C++ line 242)
289
+ # This ensures all data writes are visible before the index is published
290
+ self._atomic_slots[slot_idx].store_release(index)
291
+
292
+ def __getitem__(self, index: int) -> memoryview:
293
+ off = self._data_offset + (index & self.mask) * self.element_size
294
+ return self._buf[off: off + self.element_size]
295
+
296
+ def read(self, read_index: Union[int, AtomicCursor]) -> Union[Tuple[Optional[bytes], int, int], Tuple[Optional[bytes], int]]:
297
+ """
298
+ Read data from the queue.
299
+
300
+ This method has two modes:
301
+ 1. Single-consumer mode: read(int) -> (data, size, new_index)
302
+ 2. Multi-consumer mode: read(AtomicCursor) -> (data, size)
303
+
304
+ Single-consumer mode (matches C++ queue.h:246-273):
305
+ Uses a plain int cursor for single-consumer scenarios.
306
+ Returns the new read_index.
307
+
308
+ Multi-consumer mode (matches C++ queue.h:283-314):
309
+ Uses an AtomicCursor for work-stealing/load-balancing across multiple consumers.
310
+ Each consumer atomically claims items, ensuring each item is consumed exactly once.
311
+
312
+ Note: Unlike C++, the single-consumer version returns the new read_index rather
313
+ than updating by reference, as Python doesn't have true pass-by-reference.
314
+
315
+ Args:
316
+ read_index: Either an int (single-consumer) or AtomicCursor (multi-consumer)
317
+
318
+ Returns:
319
+ Single-consumer: Tuple of (data_bytes or None, item_size, new_read_index)
320
+ Multi-consumer: Tuple of (data_bytes or None, item_size)
321
+ If no data available returns (None, 0) or (None, 0, read_index)
322
+
323
+ Examples:
324
+ # Single consumer
325
+ read_index = 0
326
+ data, size, read_index = q.read(read_index)
327
+
328
+ # Multi-consumer work-stealing
329
+ cursor = AtomicCursor(cursor_shm.buf, 0)
330
+ data, size = q.read(cursor) # Atomically claim next item
331
+ """
332
+ if isinstance(read_index, AtomicCursor):
333
+ return self._read_atomic_cursor(read_index)
334
+ else:
335
+ return self._read_single_consumer(read_index)
336
+
337
+ def _read_single_consumer(self, read_index: int) -> Tuple[Optional[bytes], int, int]:
338
+ """
339
+ Single-consumer read with atomic acquire semantics.
340
+
341
+ Matches C++ queue.h:246-273. For single-consumer use only.
342
+
343
+ Args:
344
+ read_index: Current read position
345
+
346
+ Returns:
347
+ Tuple of (data_bytes or None, item_size, new_read_index).
348
+ If no data available returns (None, 0, read_index).
349
+ """
350
+ while True:
351
+ idx = read_index & self.mask
352
+
353
+ # Atomic load with memory_order_acquire (C++ line 252)
354
+ data_index = self._atomic_slots[idx].load_acquire()
355
+
356
+ # Read slot size (non-atomic part)
357
+ size_offset = self._control_offset + idx * SLOT_SIZE + 8
358
+ slot_size = struct.unpack_from("<I", self._buf, size_offset)[0]
359
+
360
+ # Check for queue reset (C++ lines 253-256)
361
+ reserved_index, _ = self._atomic_reserved.load()
362
+ if data_index != (2**64 - 1) and reserved_index < data_index:
363
+ read_index = 0
364
+ continue
365
+
366
+ # Check if data is ready (C++ lines 258-261)
367
+ if data_index == (2**64 - 1) or data_index < read_index:
368
+ return None, 0, read_index
369
+
370
+ # Check for wrap (C++ lines 262-266)
371
+ if data_index > read_index and ((data_index & self.mask) != idx):
372
+ read_index = data_index
373
+ continue
374
+
375
+ # Read data (C++ lines 270-272)
376
+ data_off = self._data_offset + (read_index & self.mask) * self.element_size
377
+ data = bytes(self._buf[data_off: data_off + slot_size * self.element_size])
378
+ new_read_index = data_index + slot_size
379
+ return data, slot_size, new_read_index
380
+
381
+ def _read_atomic_cursor(self, read_index: AtomicCursor) -> Tuple[Optional[bytes], int]:
382
+ """
383
+ Multi-consumer read using a shared atomic cursor (work-stealing pattern).
384
+
385
+ Matches C++ queue.h:283-314. Multiple consumers share a single atomic cursor,
386
+ atomically claiming items to process. Each item is consumed by exactly one consumer.
387
+
388
+ Args:
389
+ read_index: Shared AtomicCursor for coordinating multiple consumers
390
+
391
+ Returns:
392
+ Tuple of (data_bytes or None, item_size).
393
+ If no data available returns (None, 0).
394
+ """
395
+ if self._buf is None:
396
+ raise RuntimeError("Queue buffer is not initialized")
397
+
398
+ while True:
399
+ # Load current cursor position (C++ line 285)
400
+ current_index = read_index.load()
401
+ idx = current_index & self.mask
402
+
403
+ # Load slot data_index (C++ line 288)
404
+ data_index = self._atomic_slots[idx].load_acquire()
405
+
406
+ # Read slot size (non-atomic part)
407
+ size_offset = self._control_offset + idx * SLOT_SIZE + 8
408
+ slot_size = struct.unpack_from("<I", self._buf, size_offset)[0]
409
+
410
+ # Check for queue reset (C++ lines 290-294)
411
+ reserved_index, _ = self._atomic_reserved.load()
412
+ if data_index != (2**64 - 1) and reserved_index < data_index:
413
+ read_index.store(0)
414
+ continue
415
+
416
+ # Check if data is ready (C++ lines 296-299)
417
+ if data_index == (2**64 - 1) or data_index < current_index:
418
+ return None, 0
419
+
420
+ # Check for wrap (C++ lines 300-304)
421
+ if data_index > current_index and ((data_index & self.mask) != idx):
422
+ # Try to atomically update cursor to skip wrapped slots
423
+ read_index.compare_exchange_weak(current_index, data_index)
424
+ continue
425
+
426
+ # Try to atomically claim this item (C++ lines 306-313)
427
+ next_index = data_index + slot_size
428
+ success, _ = read_index.compare_exchange_weak(current_index, next_index)
429
+
430
+ if success:
431
+ # Successfully claimed the item, read and return it
432
+ data_off = self._data_offset + (current_index & self.mask) * self.element_size
433
+ data = bytes(self._buf[data_off: data_off + slot_size * self.element_size])
434
+ return data, slot_size
435
+
436
+ # CAS failed, another consumer claimed it, retry
437
+
438
+ def read_last(self) -> Optional[bytes]:
439
+ reserved_index, reserved_size = self._read_reserved()
440
+ if reserved_index == 0:
441
+ return None
442
+ index = reserved_index - reserved_size
443
+ off = self._data_offset + (index & self.mask) * self.element_size
444
+ return bytes(self._buf[off: off + self.element_size])
445
+
446
+ def reset(self) -> None:
447
+ """Reset the queue to its initial state.
448
+
449
+ This is a low-level operation that should be used with caution.
450
+ It is typically used in testing or when the queue needs to be reinitialized.
451
+ """
452
+ # Reset all slots to their initial state
453
+ for i in range(self.size):
454
+ self._write_slot(i, 2**64 - 1, 1)
455
+
456
+ if (self.use_shm):
457
+ # Reset reserved_info to initial state
458
+ self._write_reserved(0, 0)
459
+
460
+ def close(self) -> None:
461
+ """Close the queue connection.
462
+
463
+ For shared memory mode: releases all references to avoid 'exported pointers exist' errors.
464
+ For local memory mode: releases local buffer.
465
+ """
466
+ try:
467
+ # Release atomic wrapper references to the buffer
468
+ if hasattr(self, '_atomic_reserved') and self._atomic_reserved:
469
+ self._atomic_reserved.release()
470
+ self._atomic_reserved = None
471
+
472
+ if hasattr(self, '_atomic_slots') and self._atomic_slots:
473
+ for slot in self._atomic_slots:
474
+ slot.release()
475
+ self._atomic_slots = None
476
+
477
+ self._buf = None
478
+
479
+ # Close shared memory if using it
480
+ if self.use_shm and self._shm:
481
+ try:
482
+ # prevent Exception ignored in: <function SharedMemory.__del__ at 0x00000176D1BFA8E0>
483
+ self._shm._mmap = None
484
+ self._shm.close()
485
+ self._shm = None
486
+ except Exception:
487
+ pass
488
+
489
+ # Clear local buffer if using it
490
+ if not self.use_shm and self._local_buf:
491
+ self._local_buf = None
492
+ except Exception as e:
493
+ print(e)
494
+ pass
495
+
496
+ def unlink(self) -> None:
497
+ """Unlink (delete) the shared memory segment.
498
+
499
+ Only applicable for shared memory mode. Does nothing for local memory mode.
500
+ """
501
+ if not self.use_shm:
502
+ return # Nothing to unlink for local memory
503
+
504
+ try:
505
+ if self._shm:
506
+ self._shm.unlink()
507
+ except Exception:
508
+ pass
509
+
510
+ def __enter__(self):
511
+ """Context manager entry."""
512
+ return self
513
+
514
+ def __exit__(self, exc_type, exc_val, exc_tb): # noqa: U100
515
+ """Context manager exit - ensures proper cleanup."""
516
+ self.close()
517
+ return False
518
+
519
+
520
+ __all__ = ["SlickQueue", "AtomicCursor", "__version__"]