slick-queue-py 1.1.0__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
atomic_ops.py ADDED
@@ -0,0 +1,628 @@
1
+ """
2
+ Platform-specific atomic operations for lock-free multi-producer multi-consumer queue.
3
+
4
+ This module provides atomic compare-and-swap (CAS) operations and memory barriers
5
+ that match the C++ std::atomic semantics used in slick::SlickQueue.
6
+
7
+ As of slick-queue v1.2.0+, reserved_info is a packed uint64_t (not a struct):
8
+ - Bits 0-15: size (16-bit, max 65535)
9
+ - Bits 16-63: index (48-bit, max 281 trillion)
10
+
11
+ Supported platforms:
12
+ - Windows x86-64: Uses C++ std::atomic<uint64_t> via extension
13
+ - Linux x86-64: Uses __atomic_compare_exchange_8 from libatomic
14
+ - macOS x86-64: Uses __atomic_compare_exchange_8 from libatomic
15
+
16
+ Memory ordering semantics:
17
+ - RELAXED: No synchronization or ordering constraints
18
+ - ACQUIRE: Subsequent loads/stores cannot be reordered before this operation
19
+ - RELEASE: Prior loads/stores cannot be reordered after this operation
20
+ - SEQ_CST: Sequential consistency (strongest guarantee)
21
+ """
22
+ from __future__ import annotations
23
+
24
+ __version__ = '1.1.0'
25
+
26
+ import sys
27
+ import struct
28
+ import ctypes
29
+ from typing import Tuple, Optional, Union
30
+ from enum import IntEnum
31
+
32
+
33
+ # Bit packing/unpacking helpers for reserved_info (matches C++ slick-queue v1.2.0+)
34
+ # reserved_info is a uint64_t with: [48-bit index | 16-bit size]
35
+ def make_reserved_info(index: int, size: int) -> int:
36
+ """Pack index (48-bit) and size (16-bit) into uint64_t.
37
+
38
+ Matches C++: ((index & 0xFFFFFFFFFFFFULL) << 16) | (size & 0xFFFF)
39
+
40
+ Args:
41
+ index: Queue index (0 to 2^48-1)
42
+ size: Reserved size (0 to 65535)
43
+
44
+ Returns:
45
+ Packed uint64_t value
46
+ """
47
+ return ((index & 0xFFFFFFFFFFFF) << 16) | (size & 0xFFFF)
48
+
49
+
50
+ def get_index(reserved: int) -> int:
51
+ """Extract index (upper 48 bits) from reserved_info.
52
+
53
+ Matches C++: reserved >> 16
54
+
55
+ Args:
56
+ reserved: Packed uint64_t reserved_info
57
+
58
+ Returns:
59
+ Queue index
60
+ """
61
+ return reserved >> 16
62
+
63
+
64
+ def get_size(reserved: int) -> int:
65
+ """Extract size (lower 16 bits) from reserved_info.
66
+
67
+ Matches C++: static_cast<uint32_t>(reserved & 0xFFFF)
68
+
69
+ Args:
70
+ reserved: Packed uint64_t reserved_info
71
+
72
+ Returns:
73
+ Reserved size
74
+ """
75
+ return reserved & 0xFFFF
76
+
77
+
78
+ class MemoryOrder(IntEnum):
79
+ """Memory ordering constants matching C++ std::memory_order."""
80
+ RELAXED = 0 # memory_order_relaxed
81
+ ACQUIRE = 2 # memory_order_acquire
82
+ RELEASE = 3 # memory_order_release
83
+ SEQ_CST = 5 # memory_order_seq_cst
84
+
85
+
86
+ # Platform detection
87
+ IS_WINDOWS = sys.platform == 'win32'
88
+ IS_LINUX = sys.platform.startswith('linux')
89
+ IS_MACOS = sys.platform == 'darwin'
90
+ IS_64BIT = sys.maxsize > 2**32
91
+
92
+
93
+ def check_platform_support() -> Tuple[bool, str]:
94
+ """
95
+ Check if current platform supports required atomic operations.
96
+
97
+ Returns:
98
+ Tuple of (supported: bool, message: str)
99
+ """
100
+ if not IS_64BIT:
101
+ return False, "64-bit platform required for atomic operations"
102
+
103
+ if IS_WINDOWS:
104
+ if _USE_EXTENSION:
105
+ return True, "Windows x86-64 with C++ std::atomic extension"
106
+ else:
107
+ return False, "Windows requires C++ atomic_ops_ext extension for cross-process synchronization"
108
+ elif IS_LINUX or IS_MACOS:
109
+ import platform
110
+ machine = platform.machine().lower()
111
+ if machine in ('x86_64', 'amd64'):
112
+ return True, f"{sys.platform} x86-64 with 64-bit atomic operations"
113
+ elif machine in ('aarch64', 'arm64'):
114
+ return True, f"{sys.platform} ARM64 with 64-bit atomic operations"
115
+ else:
116
+ return False, f"Unsupported architecture: {machine}"
117
+ else:
118
+ return False, f"Unsupported platform: {sys.platform}"
119
+
120
+
121
+ # Initialize platform-specific atomic functions
122
+ if IS_WINDOWS:
123
+ # ===== Windows Implementation =====
124
+ # Try to use our C++ extension for std::atomic operations
125
+ try:
126
+ import atomic_ops_ext
127
+ _atomic_ops_ext = atomic_ops_ext
128
+ _USE_EXTENSION = True
129
+ except ImportError as e:
130
+ # Try adding current file's directory to path and retry
131
+ # (helps with multiprocessing on Windows where child processes may not have correct path)
132
+ import os
133
+ current_dir = os.path.dirname(os.path.abspath(__file__))
134
+ if current_dir not in sys.path:
135
+ sys.path.insert(0, current_dir)
136
+ try:
137
+ import atomic_ops_ext
138
+ _atomic_ops_ext = atomic_ops_ext
139
+ _USE_EXTENSION = True
140
+ except ImportError as e2:
141
+ _USE_EXTENSION = False
142
+ else:
143
+ _USE_EXTENSION = False
144
+
145
+ _HAS_64BIT_CAS = _USE_EXTENSION # Requires C++ extension for proper atomic ops
146
+
147
+ def _platform_cas_64(buffer: memoryview, offset: int,
148
+ expected: int, desired: int) -> Tuple[bool, int]:
149
+ """Windows-specific 64-bit CAS using C++ std::atomic<uint64_t> wrapper.
150
+
151
+ Requires atomic_ops_ext C++ extension for cross-process synchronization.
152
+ """
153
+ if not _USE_EXTENSION:
154
+ raise RuntimeError(
155
+ "atomic_ops_ext C++ extension is required for cross-process atomic operations. "
156
+ "Build it with: python setup.py build_ext --inplace"
157
+ )
158
+
159
+ # Use C++ extension that wraps std::atomic<uint64_t>
160
+ # This ensures Python and C++ use the SAME atomic synchronization
161
+ buf_array = (ctypes.c_char * len(buffer)).from_buffer(buffer)
162
+ addr = ctypes.addressof(buf_array) + offset
163
+
164
+ success, actual = _atomic_ops_ext.atomic_compare_exchange_64(
165
+ addr, expected, desired
166
+ )
167
+ return bool(success), actual
168
+
169
+ def _atomic_store_64(buffer: memoryview, offset: int, value: int) -> None:
170
+ """Atomic 64-bit store with release semantics.
171
+
172
+ Requires atomic_ops_ext C++ extension for cross-process synchronization.
173
+ """
174
+ if not _USE_EXTENSION:
175
+ raise RuntimeError(
176
+ "atomic_ops_ext C++ extension is required for cross-process atomic operations. "
177
+ "Build it with: python setup.py build_ext --inplace"
178
+ )
179
+
180
+ buf_array = (ctypes.c_char * len(buffer)).from_buffer(buffer)
181
+ addr = ctypes.addressof(buf_array) + offset
182
+
183
+ # Use our C extension for atomic exchange (implements store via exchange)
184
+ _atomic_ops_ext.atomic_exchange_64(addr, value)
185
+
186
+ def _atomic_load_64(buffer: memoryview, offset: int) -> int:
187
+ """Atomic 64-bit load with acquire semantics.
188
+
189
+ Requires atomic_ops_ext C++ extension for cross-process synchronization.
190
+ """
191
+ if not _USE_EXTENSION:
192
+ raise RuntimeError(
193
+ "atomic_ops_ext C++ extension is required for cross-process atomic operations. "
194
+ "Build it with: python setup.py build_ext --inplace"
195
+ )
196
+
197
+ buf_array = (ctypes.c_char * len(buffer)).from_buffer(buffer)
198
+ addr = ctypes.addressof(buf_array) + offset
199
+
200
+ # Use C++ extension for proper atomic load with acquire semantics
201
+ return _atomic_ops_ext.atomic_load_64(addr)
202
+
203
+ elif IS_LINUX or IS_MACOS:
204
+ # ===== Linux/macOS Implementation =====
205
+ import ctypes.util
206
+
207
+ # Try to use C++ extension first (provides most reliable atomic operations)
208
+ try:
209
+ import atomic_ops_ext
210
+ _atomic_ops_ext = atomic_ops_ext
211
+ _USE_EXTENSION = True
212
+ except ImportError:
213
+ # Fall back to native atomic operations
214
+ _USE_EXTENSION = False
215
+
216
+ # Try to load libatomic
217
+ libatomic_path = ctypes.util.find_library('atomic')
218
+ if libatomic_path:
219
+ try:
220
+ libatomic = ctypes.CDLL(libatomic_path)
221
+ # Set up __atomic_compare_exchange_8 function signature
222
+ try:
223
+ libatomic.__atomic_compare_exchange_8.argtypes = [
224
+ ctypes.POINTER(ctypes.c_uint64), # ptr
225
+ ctypes.POINTER(ctypes.c_uint64), # expected
226
+ ctypes.c_uint64, # desired
227
+ ctypes.c_int, # success_memorder
228
+ ctypes.c_int # failure_memorder
229
+ ]
230
+ libatomic.__atomic_compare_exchange_8.restype = ctypes.c_bool
231
+ _HAS_LIBATOMIC = True
232
+ except AttributeError:
233
+ _HAS_LIBATOMIC = False
234
+ except OSError:
235
+ _HAS_LIBATOMIC = False
236
+ else:
237
+ _HAS_LIBATOMIC = False
238
+
239
+ # Try to load libc for basic atomic operations
240
+ libc_name = 'c.so.6' if IS_LINUX else 'c'
241
+ try:
242
+ libc = ctypes.CDLL(ctypes.util.find_library(libc_name) or libc_name)
243
+
244
+ # __sync_val_compare_and_swap for 64-bit CAS
245
+ try:
246
+ _sync_val_cas_8 = libc.__sync_val_compare_and_swap_8
247
+ _sync_val_cas_8.argtypes = [
248
+ ctypes.POINTER(ctypes.c_uint64),
249
+ ctypes.c_uint64,
250
+ ctypes.c_uint64
251
+ ]
252
+ _sync_val_cas_8.restype = ctypes.c_uint64
253
+ except AttributeError:
254
+ _sync_val_cas_8 = None
255
+
256
+ # __sync_synchronize for memory barrier
257
+ try:
258
+ _sync_synchronize = libc.__sync_synchronize
259
+ _sync_synchronize.restype = None
260
+ except AttributeError:
261
+ _sync_synchronize = None
262
+ except OSError:
263
+ libc = None
264
+ _sync_val_cas_8 = None
265
+ _sync_synchronize = None
266
+
267
+ def _platform_cas_64(buffer: memoryview, offset: int,
268
+ expected: int, desired: int) -> Tuple[bool, int]:
269
+ """Linux/macOS-specific 64-bit CAS using C++ extension, __sync_val_compare_and_swap, or libatomic."""
270
+ # Get pointer to buffer location
271
+ buf_array = (ctypes.c_char * len(buffer)).from_buffer(buffer)
272
+ addr = ctypes.addressof(buf_array) + offset
273
+
274
+ # Try C++ extension first (most reliable)
275
+ if _USE_EXTENSION:
276
+ return _atomic_ops_ext.atomic_cas_64(addr, expected, desired)
277
+
278
+ ptr = ctypes.cast(addr, ctypes.POINTER(ctypes.c_uint64))
279
+
280
+ # Try __sync_val_compare_and_swap (available on most systems)
281
+ if _sync_val_cas_8 is not None:
282
+ actual = _sync_val_cas_8(ptr, expected, desired)
283
+ success = (actual == expected)
284
+ return success, actual
285
+
286
+ # Fallback to libatomic if available
287
+ if _HAS_LIBATOMIC:
288
+ try:
289
+ # __atomic_compare_exchange_8(ptr, &expected, desired, success_order, failure_order)
290
+ expected_ref = ctypes.c_uint64(expected)
291
+ success = libatomic.__atomic_compare_exchange_8(
292
+ ptr,
293
+ ctypes.byref(expected_ref),
294
+ ctypes.c_uint64(desired),
295
+ ctypes.c_int(3), # __ATOMIC_RELEASE
296
+ ctypes.c_int(0) # __ATOMIC_RELAXED
297
+ )
298
+ return bool(success), expected_ref.value
299
+ except AttributeError:
300
+ pass
301
+
302
+ raise RuntimeError("64-bit atomic CAS not available (neither __sync_val_compare_and_swap_8 nor libatomic found)")
303
+
304
+ def _memory_fence_acquire():
305
+ """Acquire memory fence."""
306
+ if _sync_synchronize:
307
+ _sync_synchronize()
308
+
309
+ def _memory_fence_release():
310
+ """Release memory fence."""
311
+ if _sync_synchronize:
312
+ _sync_synchronize()
313
+
314
+ def _atomic_store_64(buffer: memoryview, offset: int, value: int) -> None:
315
+ """Atomic 64-bit store with release semantics."""
316
+ buf_array = (ctypes.c_char * len(buffer)).from_buffer(buffer)
317
+ addr = ctypes.addressof(buf_array) + offset
318
+
319
+ # Use C++ extension first
320
+ if _USE_EXTENSION:
321
+ _atomic_ops_ext.atomic_store_64(addr, value)
322
+ return
323
+
324
+ ptr = ctypes.cast(addr, ctypes.POINTER(ctypes.c_uint64))
325
+
326
+ # For simplicity, use CAS as atomic store
327
+ if _sync_val_cas_8:
328
+ # Read current value and swap with new value
329
+ while True:
330
+ current = ptr.contents.value
331
+ if _sync_val_cas_8(ptr, current, value) == current:
332
+ break
333
+ else:
334
+ _memory_fence_release()
335
+ struct.pack_into("<Q", buffer, offset, value)
336
+
337
+ def _atomic_load_64(buffer: memoryview, offset: int) -> int:
338
+ """Atomic 64-bit load with acquire semantics."""
339
+ # Use C++ extension first
340
+ if _USE_EXTENSION:
341
+ buf_array = (ctypes.c_char * len(buffer)).from_buffer(buffer)
342
+ addr = ctypes.addressof(buf_array) + offset
343
+ return _atomic_ops_ext.atomic_load_64(addr)
344
+
345
+ # On x86-64, aligned 64-bit reads are atomic
346
+ value = struct.unpack_from("<Q", buffer, offset)[0]
347
+ _memory_fence_acquire()
348
+ return value
349
+
350
+ else:
351
+ # Unsupported platform
352
+ _USE_EXTENSION = False
353
+
354
+ def _platform_cas_64(*args, **kwargs):
355
+ raise RuntimeError(f"64-bit atomic CAS not supported on {sys.platform}")
356
+
357
+ def _memory_fence_acquire():
358
+ pass
359
+
360
+ def _memory_fence_release():
361
+ pass
362
+
363
+ def _atomic_store_64(*args, **kwargs):
364
+ raise RuntimeError(f"64-bit atomic store not supported on {sys.platform}")
365
+
366
+ def _atomic_load_64(*args, **kwargs):
367
+ raise RuntimeError(f"64-bit atomic load not supported on {sys.platform}")
368
+
369
+
370
+ class AtomicReservedInfo:
371
+ """
372
+ Atomic operations on reserved_info (uint64_t with packed index/size).
373
+
374
+ As of slick-queue v1.2.0, reserved_info is a packed uint64_t:
375
+ - Bits 0-15: size (16-bit, max 65535)
376
+ - Bits 16-63: index (48-bit, max 281 trillion)
377
+
378
+ Memory layout:
379
+ - Offset 0-7: std::atomic<uint64_t> (single 64-bit value)
380
+ """
381
+
382
+ # Single uint64_t at offset 0 (matches C++ std::atomic<uint64_t>)
383
+ RESERVED_INFO_FMT = "Q" # 8 bytes
384
+
385
+ def __init__(self, buffer: memoryview, offset: int = 0):
386
+ """
387
+ Initialize atomic reserved_info wrapper.
388
+
389
+ Args:
390
+ buffer: Memory buffer (typically SharedMemory.buf)
391
+ offset: Byte offset in buffer (typically 0 for header)
392
+ """
393
+ # Store a weak reference to avoid holding the buffer
394
+ # This prevents "exported pointers exist" errors during cleanup
395
+ self.buffer = buffer
396
+ self.offset = offset
397
+
398
+ # Verify platform support
399
+ supported, msg = check_platform_support()
400
+ if not supported:
401
+ raise RuntimeError(f"Platform not supported for atomic operations: {msg}")
402
+
403
+ def release(self):
404
+ """Release buffer reference to allow proper cleanup."""
405
+ self.buffer = None
406
+
407
+ def load(self) -> Tuple[int, int]:
408
+ """
409
+ Load reserved_info with memory_order_relaxed.
410
+
411
+ Returns:
412
+ Tuple of (index: int, size: int)
413
+ """
414
+ # Use atomic load to avoid torn reads during concurrent updates
415
+ packed = _atomic_load_64(self.buffer, self.offset)
416
+ return get_index(packed), get_size(packed)
417
+
418
+ def compare_exchange_weak(
419
+ self,
420
+ expected: Tuple[int, int],
421
+ desired: Tuple[int, int]
422
+ ) -> Tuple[bool, Tuple[int, int]]:
423
+ """
424
+ Atomic compare-and-swap with memory_order_release on success,
425
+ memory_order_relaxed on failure (matching C++ queue.h:201).
426
+
427
+ This implements the weak version (may spuriously fail) to match
428
+ the C++ compare_exchange_weak semantics.
429
+
430
+ Args:
431
+ expected: Tuple of (expected_index, expected_size)
432
+ desired: Tuple of (desired_index, desired_size)
433
+
434
+ Returns:
435
+ Tuple of (success: bool, actual: Tuple[int, int])
436
+ If success is False, actual contains the current value.
437
+ """
438
+ expected_index, expected_size = expected
439
+ desired_index, desired_size = desired
440
+
441
+ # Pack to uint64_t (48-bit index in upper bits, 16-bit size in lower bits)
442
+ expected_packed = make_reserved_info(expected_index, expected_size)
443
+ desired_packed = make_reserved_info(desired_index, desired_size)
444
+
445
+ # Perform platform-specific 64-bit CAS
446
+ success, actual_packed = _platform_cas_64(
447
+ self.buffer, self.offset,
448
+ expected_packed, desired_packed
449
+ )
450
+
451
+ # Unpack actual value
452
+ actual_index = get_index(actual_packed)
453
+ actual_size = get_size(actual_packed)
454
+
455
+ return success, (actual_index, actual_size)
456
+
457
+
458
+ class AtomicUInt64:
459
+ """
460
+ Atomic operations on 8-byte uint64_t value.
461
+
462
+ This matches the C++ std::atomic<uint64_t> used for slot data_index
463
+ in queue.h.
464
+ """
465
+
466
+ def __init__(self, buffer: memoryview, offset: int):
467
+ """
468
+ Initialize atomic uint64_t wrapper.
469
+
470
+ Args:
471
+ buffer: Memory buffer (typically SharedMemory.buf)
472
+ offset: Byte offset in buffer
473
+ """
474
+ self.buffer = buffer
475
+ self.offset = offset
476
+
477
+ def release(self):
478
+ """Release buffer reference to allow proper cleanup."""
479
+ self.buffer = None
480
+
481
+ def load_acquire(self) -> int:
482
+ """
483
+ Load with memory_order_acquire (matching C++ queue.h:256, 292).
484
+
485
+ Acquire semantics ensure that subsequent loads/stores cannot be
486
+ reordered before this load.
487
+
488
+ Returns:
489
+ uint64_t value
490
+ """
491
+ return _atomic_load_64(self.buffer, self.offset)
492
+
493
+ def store_release(self, value: int) -> None:
494
+ """
495
+ Store with memory_order_release (matching C++ queue.h:211, 242).
496
+
497
+ Release semantics ensure that prior loads/stores cannot be
498
+ reordered after this store.
499
+
500
+ Args:
501
+ value: uint64_t value to store
502
+ """
503
+ _atomic_store_64(self.buffer, self.offset, value)
504
+
505
+ def compare_exchange_weak(
506
+ self,
507
+ expected: int,
508
+ desired: int
509
+ ) -> Tuple[bool, int]:
510
+ """
511
+ Atomic compare-and-swap with memory_order_release on success,
512
+ memory_order_relaxed on failure (matching C++ queue.h:306, 312).
513
+
514
+ Args:
515
+ expected: Expected uint64_t value
516
+ desired: Desired uint64_t value
517
+
518
+ Returns:
519
+ Tuple of (success: bool, actual: int)
520
+ If success is False, actual contains the current value.
521
+ """
522
+ success, actual = _platform_cas_64(self.buffer, self.offset, expected, desired)
523
+ return success, actual
524
+
525
+
526
+ class AtomicCursor:
527
+ """
528
+ An atomic cursor for multi-consumer work-stealing patterns.
529
+
530
+ This class wraps an atomic uint64_t that can be used for coordinating multiple
531
+ consumers. Each consumer atomically claims items to process, ensuring each item
532
+ is consumed exactly once.
533
+
534
+ Matches C++ std::atomic<uint64_t>& parameter in queue.h:283.
535
+
536
+ Supports two modes:
537
+ - **Local mode**: Pass a bytearray for single-process multi-threaded usage
538
+ - **Shared memory mode**: Pass SharedMemory.buf for multi-process usage
539
+
540
+ Examples:
541
+ # Local mode (multi-threading in single process)
542
+ from atomic_ops import AtomicCursor
543
+
544
+ cursor_buf = bytearray(8)
545
+ cursor = AtomicCursor(cursor_buf, 0)
546
+ cursor.store(0)
547
+ # Multiple threads can share this cursor
548
+
549
+ # Shared memory mode (multi-process)
550
+ from multiprocessing.shared_memory import SharedMemory
551
+ from atomic_ops import AtomicCursor
552
+
553
+ cursor_shm = SharedMemory(name='cursor', create=True, size=8)
554
+ cursor = AtomicCursor(cursor_shm.buf, 0)
555
+ cursor.store(0)
556
+ # Multiple processes can share this cursor
557
+ """
558
+
559
+ def __init__(self, buffer: Union[memoryview, bytearray], offset: int = 0):
560
+ """
561
+ Initialize atomic cursor wrapper.
562
+
563
+ Args:
564
+ buffer: Memory buffer (SharedMemory.buf for shared memory mode,
565
+ or bytearray for local mode)
566
+ offset: Byte offset in buffer (default 0)
567
+ """
568
+ # Convert bytearray to memoryview for consistency
569
+ if isinstance(buffer, bytearray):
570
+ buffer = memoryview(buffer)
571
+ self._atomic: Optional[AtomicUInt64] = AtomicUInt64(buffer, offset)
572
+
573
+ def release(self):
574
+ """Release buffer reference to allow proper cleanup."""
575
+ if self._atomic:
576
+ self._atomic.release()
577
+ self._atomic = None
578
+
579
+ def load(self) -> int:
580
+ """
581
+ Load cursor value with memory_order_acquire (matching C++ queue.h:285).
582
+
583
+ Returns:
584
+ Current cursor value
585
+ """
586
+ if self._atomic is None:
587
+ raise RuntimeError("AtomicCursor has been released")
588
+ return self._atomic.load_acquire()
589
+
590
+ def store(self, value: int) -> None:
591
+ """
592
+ Store cursor value with memory_order_release (matching C++ queue.h:292).
593
+
594
+ Args:
595
+ value: New cursor value
596
+ """
597
+ if self._atomic is None:
598
+ raise RuntimeError("AtomicCursor has been released")
599
+ self._atomic.store_release(value)
600
+
601
+ def compare_exchange_weak(self, expected: int, desired: int) -> Tuple[bool, int]:
602
+ """
603
+ Atomic compare-and-swap with memory_order_release on success,
604
+ memory_order_relaxed on failure (matching C++ queue.h:302, 308).
605
+
606
+ Args:
607
+ expected: Expected cursor value
608
+ desired: Desired cursor value
609
+
610
+ Returns:
611
+ Tuple of (success: bool, actual: int)
612
+ If success is False, actual contains the current value.
613
+ """
614
+ if self._atomic is None:
615
+ raise RuntimeError("AtomicCursor has been released")
616
+ return self._atomic.compare_exchange_weak(expected, desired)
617
+
618
+
619
+ __all__ = [
620
+ 'AtomicReservedInfo',
621
+ 'AtomicUInt64',
622
+ 'AtomicCursor',
623
+ 'MemoryOrder',
624
+ 'check_platform_support',
625
+ 'make_reserved_info',
626
+ 'get_index',
627
+ 'get_size'
628
+ ]
Binary file