zae-limiter 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
zae_limiter/limiter.py ADDED
@@ -0,0 +1,925 @@
1
+ """Main RateLimiter implementation."""
2
+
3
+ import asyncio
4
+ import time
5
+ from collections.abc import AsyncIterator, Coroutine, Iterator
6
+ from contextlib import asynccontextmanager, contextmanager
7
+ from enum import Enum
8
+ from typing import Any, TypeVar
9
+
10
+ from .bucket import (
11
+ calculate_available,
12
+ calculate_time_until_available,
13
+ try_consume,
14
+ )
15
+ from .exceptions import (
16
+ IncompatibleSchemaError,
17
+ RateLimiterUnavailable,
18
+ RateLimitExceeded,
19
+ VersionMismatchError,
20
+ )
21
+ from .lease import Lease, LeaseEntry, SyncLease
22
+ from .models import (
23
+ BucketState,
24
+ Entity,
25
+ EntityCapacity,
26
+ Limit,
27
+ LimitStatus,
28
+ ResourceCapacity,
29
+ )
30
+ from .repository import Repository
31
+ from .schema import DEFAULT_RESOURCE
32
+
33
+ _T = TypeVar("_T")
34
+
35
+
36
+ class FailureMode(Enum):
37
+ """Behavior when DynamoDB is unavailable."""
38
+
39
+ FAIL_OPEN = "open" # Allow requests
40
+ FAIL_CLOSED = "closed" # Reject requests
41
+
42
+
43
+ class RateLimiter:
44
+ """
45
+ Async rate limiter backed by DynamoDB.
46
+
47
+ Implements token bucket algorithm with support for:
48
+ - Multiple limits per entity/resource
49
+ - Two-level hierarchy (parent/child entities)
50
+ - Cascade mode (consume from entity + parent)
51
+ - Stored limit configs
52
+ - Usage analytics
53
+ """
54
+
55
+ def __init__(
56
+ self,
57
+ table_name: str,
58
+ region: str | None = None,
59
+ endpoint_url: str | None = None,
60
+ create_table: bool = False,
61
+ create_stack: bool = False,
62
+ stack_parameters: dict[str, str] | None = None,
63
+ failure_mode: FailureMode = FailureMode.FAIL_CLOSED,
64
+ auto_update: bool = True,
65
+ strict_version: bool = True,
66
+ skip_version_check: bool = False,
67
+ ) -> None:
68
+ """
69
+ Initialize the rate limiter.
70
+
71
+ Args:
72
+ table_name: DynamoDB table name
73
+ region: AWS region
74
+ endpoint_url: DynamoDB endpoint URL (for local development)
75
+ create_table: Deprecated, use create_stack instead
76
+ create_stack: Create CloudFormation stack if it doesn't exist
77
+ stack_parameters: Parameters for CloudFormation stack
78
+ failure_mode: Behavior when DynamoDB is unavailable
79
+ auto_update: Auto-update Lambda when version mismatch detected
80
+ strict_version: Fail if version mismatch (when auto_update is False)
81
+ skip_version_check: Skip all version checks (dangerous)
82
+ """
83
+ self.table_name = table_name
84
+ self.failure_mode = failure_mode
85
+ self._auto_update = auto_update
86
+ self._strict_version = strict_version
87
+ self._skip_version_check = skip_version_check
88
+
89
+ # Handle deprecation: create_table -> create_stack
90
+ if create_table and not create_stack:
91
+ import warnings
92
+
93
+ warnings.warn(
94
+ "create_table is deprecated, use create_stack instead",
95
+ DeprecationWarning,
96
+ stacklevel=2,
97
+ )
98
+ create_stack = create_table
99
+
100
+ self._create_stack = create_stack
101
+ self._stack_parameters = stack_parameters or {}
102
+ self._repository = Repository(
103
+ table_name=table_name,
104
+ region=region,
105
+ endpoint_url=endpoint_url,
106
+ )
107
+ self._initialized = False
108
+
109
+ async def _ensure_initialized(self) -> None:
110
+ """Ensure infrastructure exists and version is compatible."""
111
+ if self._initialized:
112
+ return
113
+
114
+ if self._create_stack:
115
+ await self._repository.create_table_or_stack(
116
+ use_cloudformation=True,
117
+ stack_parameters=self._stack_parameters,
118
+ )
119
+
120
+ # Version check (skip for local DynamoDB without CloudFormation)
121
+ if not self._skip_version_check:
122
+ await self._check_and_update_version()
123
+
124
+ self._initialized = True
125
+
126
+ async def _check_and_update_version(self) -> None:
127
+ """Check version compatibility and update Lambda if needed."""
128
+ from . import __version__
129
+ from .version import (
130
+ InfrastructureVersion,
131
+ check_compatibility,
132
+ )
133
+
134
+ # Get current infrastructure version
135
+ version_record = await self._repository.get_version_record()
136
+
137
+ if version_record is None:
138
+ # First time setup or legacy infrastructure - initialize version record
139
+ await self._initialize_version_record()
140
+ return
141
+
142
+ infra_version = InfrastructureVersion.from_record(version_record)
143
+ compatibility = check_compatibility(__version__, infra_version)
144
+
145
+ if compatibility.is_compatible and not compatibility.requires_lambda_update:
146
+ return
147
+
148
+ if compatibility.requires_schema_migration:
149
+ raise IncompatibleSchemaError(
150
+ client_version=__version__,
151
+ schema_version=infra_version.schema_version,
152
+ message=compatibility.message,
153
+ )
154
+
155
+ if compatibility.requires_lambda_update:
156
+ if self._auto_update and not self._repository.endpoint_url:
157
+ # Auto-update Lambda (skip for local DynamoDB)
158
+ await self._perform_lambda_update()
159
+ elif self._strict_version:
160
+ raise VersionMismatchError(
161
+ client_version=__version__,
162
+ schema_version=infra_version.schema_version,
163
+ lambda_version=infra_version.lambda_version,
164
+ message=compatibility.message,
165
+ can_auto_update=not self._repository.endpoint_url,
166
+ )
167
+ # else: continue with version mismatch (not strict)
168
+
169
+ async def _initialize_version_record(self) -> None:
170
+ """Initialize the version record for first-time setup."""
171
+ from . import __version__
172
+ from .version import get_schema_version
173
+
174
+ lambda_version = __version__ if not self._repository.endpoint_url else None
175
+
176
+ await self._repository.set_version_record(
177
+ schema_version=get_schema_version(),
178
+ lambda_version=lambda_version,
179
+ client_min_version="0.0.0",
180
+ updated_by=f"client:{__version__}",
181
+ )
182
+
183
+ async def _perform_lambda_update(self) -> None:
184
+ """Update Lambda code to match client version."""
185
+ from . import __version__
186
+ from .infra.stack_manager import StackManager
187
+ from .version import get_schema_version
188
+
189
+ async with StackManager(
190
+ self.table_name,
191
+ self._repository.region,
192
+ self._repository.endpoint_url,
193
+ ) as manager:
194
+ # Deploy updated Lambda code
195
+ await manager.deploy_lambda_code()
196
+
197
+ # Update version record in DynamoDB
198
+ await self._repository.set_version_record(
199
+ schema_version=get_schema_version(),
200
+ lambda_version=__version__,
201
+ client_min_version="0.0.0",
202
+ updated_by=f"client:{__version__}",
203
+ )
204
+
205
+ async def close(self) -> None:
206
+ """Close the underlying connections."""
207
+ await self._repository.close()
208
+
209
+ async def __aenter__(self) -> "RateLimiter":
210
+ await self._ensure_initialized()
211
+ return self
212
+
213
+ async def __aexit__(self, *args: Any) -> None:
214
+ await self.close()
215
+
216
+ # -------------------------------------------------------------------------
217
+ # Entity management
218
+ # -------------------------------------------------------------------------
219
+
220
+ async def create_entity(
221
+ self,
222
+ entity_id: str,
223
+ name: str | None = None,
224
+ parent_id: str | None = None,
225
+ metadata: dict[str, str] | None = None,
226
+ ) -> Entity:
227
+ """
228
+ Create a new entity.
229
+
230
+ Args:
231
+ entity_id: Unique identifier for the entity
232
+ name: Human-readable name (defaults to entity_id)
233
+ parent_id: Parent entity ID (None for root/project entities)
234
+ metadata: Additional metadata to store
235
+
236
+ Returns:
237
+ The created Entity
238
+
239
+ Raises:
240
+ EntityExistsError: If entity already exists
241
+ """
242
+ await self._ensure_initialized()
243
+ return await self._repository.create_entity(
244
+ entity_id=entity_id,
245
+ name=name,
246
+ parent_id=parent_id,
247
+ metadata=metadata,
248
+ )
249
+
250
+ async def get_entity(self, entity_id: str) -> Entity | None:
251
+ """Get an entity by ID."""
252
+ await self._ensure_initialized()
253
+ return await self._repository.get_entity(entity_id)
254
+
255
+ async def delete_entity(self, entity_id: str) -> None:
256
+ """Delete an entity and all its related data."""
257
+ await self._ensure_initialized()
258
+ await self._repository.delete_entity(entity_id)
259
+
260
+ async def get_children(self, parent_id: str) -> list[Entity]:
261
+ """Get all children of a parent entity."""
262
+ await self._ensure_initialized()
263
+ return await self._repository.get_children(parent_id)
264
+
265
+ # -------------------------------------------------------------------------
266
+ # Rate limiting
267
+ # -------------------------------------------------------------------------
268
+
269
+ @asynccontextmanager
270
+ async def acquire(
271
+ self,
272
+ entity_id: str,
273
+ resource: str,
274
+ limits: list[Limit],
275
+ consume: dict[str, int],
276
+ cascade: bool = False,
277
+ use_stored_limits: bool = False,
278
+ failure_mode: FailureMode | None = None,
279
+ ) -> AsyncIterator[Lease]:
280
+ """
281
+ Acquire rate limit capacity.
282
+
283
+ Args:
284
+ entity_id: Entity to acquire capacity for
285
+ resource: Resource being accessed (e.g., "gpt-4")
286
+ limits: Default limits to apply
287
+ consume: Amounts to consume by limit name
288
+ cascade: If True, also consume from parent entity
289
+ use_stored_limits: If True, use stored limits if available
290
+ failure_mode: Override default failure mode
291
+
292
+ Yields:
293
+ Lease for managing additional consumption
294
+
295
+ Raises:
296
+ RateLimitExceeded: If any limit would be exceeded
297
+ RateLimiterUnavailable: If DynamoDB unavailable and FAIL_CLOSED
298
+ """
299
+ await self._ensure_initialized()
300
+ mode = failure_mode or self.failure_mode
301
+
302
+ # Acquire the lease (this may fail due to rate limit or infrastructure)
303
+ try:
304
+ lease = await self._do_acquire(
305
+ entity_id=entity_id,
306
+ resource=resource,
307
+ limits=limits,
308
+ consume=consume,
309
+ cascade=cascade,
310
+ use_stored_limits=use_stored_limits,
311
+ )
312
+ except RateLimitExceeded:
313
+ raise
314
+ except Exception as e:
315
+ if mode == FailureMode.FAIL_OPEN:
316
+ # Return a no-op lease
317
+ yield Lease(repository=self._repository)
318
+ return
319
+ else:
320
+ raise RateLimiterUnavailable(str(e), cause=e) from e
321
+
322
+ # Lease acquired successfully - manage the context
323
+ try:
324
+ yield lease
325
+ await lease._commit()
326
+ except Exception:
327
+ await lease._rollback()
328
+ raise
329
+
330
+ async def _do_acquire(
331
+ self,
332
+ entity_id: str,
333
+ resource: str,
334
+ limits: list[Limit],
335
+ consume: dict[str, int],
336
+ cascade: bool,
337
+ use_stored_limits: bool,
338
+ ) -> Lease:
339
+ """Internal acquire implementation."""
340
+ now_ms = int(time.time() * 1000)
341
+
342
+ # Determine which entities to check
343
+ entity_ids = [entity_id]
344
+ if cascade:
345
+ entity = await self._repository.get_entity(entity_id)
346
+ if entity and entity.parent_id:
347
+ entity_ids.append(entity.parent_id)
348
+
349
+ # Resolve limits for each entity
350
+ entity_limits: dict[str, list[Limit]] = {}
351
+ for eid in entity_ids:
352
+ if use_stored_limits:
353
+ stored = await self._repository.get_limits(eid, resource)
354
+ if not stored:
355
+ stored = await self._repository.get_limits(eid, DEFAULT_RESOURCE)
356
+ entity_limits[eid] = stored if stored else limits
357
+ else:
358
+ entity_limits[eid] = limits
359
+
360
+ # Get or create buckets for each entity/limit
361
+ entries: list[LeaseEntry] = []
362
+ statuses: list[LimitStatus] = []
363
+
364
+ for eid in entity_ids:
365
+ for limit in entity_limits[eid]:
366
+ # Get existing bucket or create new one
367
+ state = await self._repository.get_bucket(eid, resource, limit.name)
368
+ if state is None:
369
+ state = BucketState.from_limit(eid, resource, limit, now_ms)
370
+
371
+ # Try to consume
372
+ amount = consume.get(limit.name, 0)
373
+ result = try_consume(state, amount, now_ms)
374
+
375
+ status = LimitStatus(
376
+ entity_id=eid,
377
+ resource=resource,
378
+ limit_name=limit.name,
379
+ limit=limit,
380
+ available=result.available,
381
+ requested=amount,
382
+ exceeded=not result.success,
383
+ retry_after_seconds=result.retry_after_seconds,
384
+ )
385
+ statuses.append(status)
386
+
387
+ if result.success:
388
+ # Update local state
389
+ state.tokens_milli = result.new_tokens_milli
390
+ state.last_refill_ms = result.new_last_refill_ms
391
+
392
+ entries.append(
393
+ LeaseEntry(
394
+ entity_id=eid,
395
+ resource=resource,
396
+ limit=limit,
397
+ state=state,
398
+ consumed=amount if result.success else 0,
399
+ )
400
+ )
401
+
402
+ # Check for any violations
403
+ violations = [s for s in statuses if s.exceeded]
404
+ if violations:
405
+ raise RateLimitExceeded(statuses)
406
+
407
+ return Lease(repository=self._repository, entries=entries)
408
+
409
+ async def available(
410
+ self,
411
+ entity_id: str,
412
+ resource: str,
413
+ limits: list[Limit],
414
+ use_stored_limits: bool = False,
415
+ ) -> dict[str, int]:
416
+ """
417
+ Check available capacity without consuming.
418
+
419
+ Returns minimum available across entity (and parent if cascade).
420
+ Can return negative values if bucket is in debt.
421
+
422
+ Args:
423
+ entity_id: Entity to check
424
+ resource: Resource to check
425
+ limits: Default limits
426
+ use_stored_limits: Use stored limits if available
427
+
428
+ Returns:
429
+ Dict mapping limit_name -> available tokens
430
+ """
431
+ await self._ensure_initialized()
432
+ now_ms = int(time.time() * 1000)
433
+
434
+ # Resolve limits
435
+ if use_stored_limits:
436
+ stored = await self._repository.get_limits(entity_id, resource)
437
+ if not stored:
438
+ stored = await self._repository.get_limits(entity_id, DEFAULT_RESOURCE)
439
+ limits = stored if stored else limits
440
+
441
+ result: dict[str, int] = {}
442
+ for limit in limits:
443
+ state = await self._repository.get_bucket(entity_id, resource, limit.name)
444
+ if state is None:
445
+ result[limit.name] = limit.burst
446
+ else:
447
+ result[limit.name] = calculate_available(state, now_ms)
448
+
449
+ return result
450
+
451
+ async def time_until_available(
452
+ self,
453
+ entity_id: str,
454
+ resource: str,
455
+ limits: list[Limit],
456
+ needed: dict[str, int],
457
+ use_stored_limits: bool = False,
458
+ ) -> float:
459
+ """
460
+ Calculate seconds until requested capacity is available.
461
+
462
+ Args:
463
+ entity_id: Entity to check
464
+ resource: Resource to check
465
+ limits: Default limits
466
+ needed: Required amounts by limit name
467
+ use_stored_limits: Use stored limits if available
468
+
469
+ Returns:
470
+ Seconds until available (0.0 if already available)
471
+ """
472
+ await self._ensure_initialized()
473
+ now_ms = int(time.time() * 1000)
474
+
475
+ # Resolve limits
476
+ if use_stored_limits:
477
+ stored = await self._repository.get_limits(entity_id, resource)
478
+ if not stored:
479
+ stored = await self._repository.get_limits(entity_id, DEFAULT_RESOURCE)
480
+ limits = stored if stored else limits
481
+
482
+ max_wait = 0.0
483
+ for limit in limits:
484
+ amount = needed.get(limit.name, 0)
485
+ if amount <= 0:
486
+ continue
487
+
488
+ state = await self._repository.get_bucket(entity_id, resource, limit.name)
489
+ if state is None:
490
+ continue # New bucket, will have full capacity
491
+
492
+ wait = calculate_time_until_available(state, amount, now_ms)
493
+ max_wait = max(max_wait, wait)
494
+
495
+ return max_wait
496
+
497
+ # -------------------------------------------------------------------------
498
+ # Stored limits management
499
+ # -------------------------------------------------------------------------
500
+
501
+ async def set_limits(
502
+ self,
503
+ entity_id: str,
504
+ limits: list[Limit],
505
+ resource: str = DEFAULT_RESOURCE,
506
+ ) -> None:
507
+ """
508
+ Store limit configs for an entity.
509
+
510
+ Args:
511
+ entity_id: Entity to set limits for
512
+ limits: Limits to store
513
+ resource: Resource these limits apply to (or _default_)
514
+ """
515
+ await self._ensure_initialized()
516
+ await self._repository.set_limits(entity_id, limits, resource)
517
+
518
+ async def get_limits(
519
+ self,
520
+ entity_id: str,
521
+ resource: str = DEFAULT_RESOURCE,
522
+ ) -> list[Limit]:
523
+ """
524
+ Get stored limit configs for an entity.
525
+
526
+ Args:
527
+ entity_id: Entity to get limits for
528
+ resource: Resource to get limits for
529
+
530
+ Returns:
531
+ List of stored limits (empty if none)
532
+ """
533
+ await self._ensure_initialized()
534
+ return await self._repository.get_limits(entity_id, resource)
535
+
536
+ async def delete_limits(
537
+ self,
538
+ entity_id: str,
539
+ resource: str = DEFAULT_RESOURCE,
540
+ ) -> None:
541
+ """
542
+ Delete stored limit configs for an entity.
543
+
544
+ Args:
545
+ entity_id: Entity to delete limits for
546
+ resource: Resource to delete limits for
547
+ """
548
+ await self._ensure_initialized()
549
+ await self._repository.delete_limits(entity_id, resource)
550
+
551
+ # -------------------------------------------------------------------------
552
+ # Capacity queries
553
+ # -------------------------------------------------------------------------
554
+
555
+ async def get_resource_capacity(
556
+ self,
557
+ resource: str,
558
+ limit_name: str,
559
+ parents_only: bool = False,
560
+ ) -> ResourceCapacity:
561
+ """
562
+ Get aggregated capacity for a resource across all entities.
563
+
564
+ Args:
565
+ resource: Resource to query
566
+ limit_name: Limit name to query
567
+ parents_only: If True, only include parent entities
568
+
569
+ Returns:
570
+ ResourceCapacity with aggregated data
571
+ """
572
+ await self._ensure_initialized()
573
+ now_ms = int(time.time() * 1000)
574
+
575
+ buckets = await self._repository.get_resource_buckets(resource, limit_name)
576
+
577
+ # Filter to parents only if requested
578
+ if parents_only:
579
+ parent_ids = set()
580
+ for bucket in buckets:
581
+ entity = await self._repository.get_entity(bucket.entity_id)
582
+ if entity and entity.is_parent:
583
+ parent_ids.add(bucket.entity_id)
584
+ buckets = [b for b in buckets if b.entity_id in parent_ids]
585
+
586
+ entities: list[EntityCapacity] = []
587
+ total_capacity = 0
588
+ total_available = 0
589
+
590
+ for bucket in buckets:
591
+ available = calculate_available(bucket, now_ms)
592
+ capacity = bucket.capacity
593
+
594
+ total_capacity += capacity
595
+ total_available += available
596
+
597
+ entities.append(
598
+ EntityCapacity(
599
+ entity_id=bucket.entity_id,
600
+ capacity=capacity,
601
+ available=available,
602
+ utilization_pct=(
603
+ ((capacity - available) / capacity * 100) if capacity > 0 else 0
604
+ ),
605
+ )
606
+ )
607
+
608
+ return ResourceCapacity(
609
+ resource=resource,
610
+ limit_name=limit_name,
611
+ total_capacity=total_capacity,
612
+ total_available=total_available,
613
+ utilization_pct=(
614
+ ((total_capacity - total_available) / total_capacity * 100)
615
+ if total_capacity > 0
616
+ else 0
617
+ ),
618
+ entities=entities,
619
+ )
620
+
621
+ # -------------------------------------------------------------------------
622
+ # Table management
623
+ # -------------------------------------------------------------------------
624
+
625
+ async def create_table(self) -> None:
626
+ """Create the DynamoDB table if it doesn't exist."""
627
+ await self._repository.create_table()
628
+ self._initialized = True
629
+
630
+ async def delete_table(self) -> None:
631
+ """Delete the DynamoDB table."""
632
+ await self._repository.delete_table()
633
+ self._initialized = False
634
+
635
+ async def create_stack(
636
+ self,
637
+ stack_name: str | None = None,
638
+ parameters: dict[str, str] | None = None,
639
+ ) -> dict[str, Any]:
640
+ """
641
+ Create CloudFormation stack for infrastructure.
642
+
643
+ Args:
644
+ stack_name: Override stack name (default: auto-generated)
645
+ parameters: Stack parameters dict (e.g.,
646
+ {'snapshot_windows': 'hourly,daily', 'retention_days': '90'})
647
+
648
+ Returns:
649
+ Dict with stack_id, stack_name, and status
650
+
651
+ Raises:
652
+ StackCreationError: If stack creation fails
653
+ """
654
+ from .infra.stack_manager import StackManager
655
+
656
+ async with StackManager(
657
+ self.table_name, self._repository.region, self._repository.endpoint_url
658
+ ) as manager:
659
+ return await manager.create_stack(stack_name, parameters)
660
+
661
+ async def delete_stack(self, stack_name: str | None = None) -> None:
662
+ """
663
+ Delete CloudFormation stack.
664
+
665
+ Args:
666
+ stack_name: Stack name (default: auto-generated from table name)
667
+
668
+ Raises:
669
+ StackCreationError: If deletion fails
670
+ """
671
+ from .infra.stack_manager import StackManager
672
+
673
+ async with StackManager(
674
+ self.table_name, self._repository.region, self._repository.endpoint_url
675
+ ) as manager:
676
+ stack_name = stack_name or manager.get_stack_name(self.table_name)
677
+ await manager.delete_stack(stack_name)
678
+
679
+
680
+ class SyncRateLimiter:
681
+ """
682
+ Synchronous rate limiter backed by DynamoDB.
683
+
684
+ Wraps RateLimiter, running async operations in an event loop.
685
+ """
686
+
687
+ def __init__(
688
+ self,
689
+ table_name: str,
690
+ region: str | None = None,
691
+ endpoint_url: str | None = None,
692
+ create_table: bool = False,
693
+ create_stack: bool = False,
694
+ stack_parameters: dict[str, str] | None = None,
695
+ failure_mode: FailureMode = FailureMode.FAIL_CLOSED,
696
+ auto_update: bool = True,
697
+ strict_version: bool = True,
698
+ skip_version_check: bool = False,
699
+ ) -> None:
700
+ self._limiter = RateLimiter(
701
+ table_name=table_name,
702
+ region=region,
703
+ endpoint_url=endpoint_url,
704
+ create_table=create_table,
705
+ create_stack=create_stack,
706
+ stack_parameters=stack_parameters,
707
+ failure_mode=failure_mode,
708
+ auto_update=auto_update,
709
+ strict_version=strict_version,
710
+ skip_version_check=skip_version_check,
711
+ )
712
+ self._loop: asyncio.AbstractEventLoop | None = None
713
+
714
+ def _get_loop(self) -> asyncio.AbstractEventLoop:
715
+ """Get or create an event loop."""
716
+ if self._loop is None or self._loop.is_closed():
717
+ try:
718
+ self._loop = asyncio.get_event_loop()
719
+ except RuntimeError:
720
+ self._loop = asyncio.new_event_loop()
721
+ asyncio.set_event_loop(self._loop)
722
+ return self._loop
723
+
724
+ def _run(self, coro: Coroutine[Any, Any, _T]) -> _T:
725
+ """Run a coroutine in the event loop."""
726
+ return self._get_loop().run_until_complete(coro)
727
+
728
+ def close(self) -> None:
729
+ """Close the underlying connections."""
730
+ self._run(self._limiter.close())
731
+
732
+ def __enter__(self) -> "SyncRateLimiter":
733
+ self._run(self._limiter._ensure_initialized())
734
+ return self
735
+
736
+ def __exit__(self, *args: Any) -> None:
737
+ self.close()
738
+
739
+ # -------------------------------------------------------------------------
740
+ # Entity management
741
+ # -------------------------------------------------------------------------
742
+
743
+ def create_entity(
744
+ self,
745
+ entity_id: str,
746
+ name: str | None = None,
747
+ parent_id: str | None = None,
748
+ metadata: dict[str, str] | None = None,
749
+ ) -> Entity:
750
+ """Create a new entity."""
751
+ return self._run(
752
+ self._limiter.create_entity(
753
+ entity_id=entity_id,
754
+ name=name,
755
+ parent_id=parent_id,
756
+ metadata=metadata,
757
+ )
758
+ )
759
+
760
+ def get_entity(self, entity_id: str) -> Entity | None:
761
+ """Get an entity by ID."""
762
+ return self._run(self._limiter.get_entity(entity_id))
763
+
764
+ def delete_entity(self, entity_id: str) -> None:
765
+ """Delete an entity and all its related data."""
766
+ self._run(self._limiter.delete_entity(entity_id))
767
+
768
+ def get_children(self, parent_id: str) -> list[Entity]:
769
+ """Get all children of a parent entity."""
770
+ return self._run(self._limiter.get_children(parent_id))
771
+
772
+ # -------------------------------------------------------------------------
773
+ # Rate limiting
774
+ # -------------------------------------------------------------------------
775
+
776
+ @contextmanager
777
+ def acquire(
778
+ self,
779
+ entity_id: str,
780
+ resource: str,
781
+ limits: list[Limit],
782
+ consume: dict[str, int],
783
+ cascade: bool = False,
784
+ use_stored_limits: bool = False,
785
+ failure_mode: FailureMode | None = None,
786
+ ) -> Iterator[SyncLease]:
787
+ """Acquire rate limit capacity (synchronous)."""
788
+ loop = self._get_loop()
789
+
790
+ async def do_acquire() -> tuple[Lease, bool]:
791
+ ctx = self._limiter.acquire(
792
+ entity_id=entity_id,
793
+ resource=resource,
794
+ limits=limits,
795
+ consume=consume,
796
+ cascade=cascade,
797
+ use_stored_limits=use_stored_limits,
798
+ failure_mode=failure_mode,
799
+ )
800
+ lease = await ctx.__aenter__()
801
+ return lease, True
802
+
803
+ async def do_commit(lease: Lease) -> None:
804
+ await lease._commit()
805
+
806
+ async def do_rollback(lease: Lease) -> None:
807
+ await lease._rollback()
808
+
809
+ lease, _ = loop.run_until_complete(do_acquire())
810
+ sync_lease = SyncLease(lease, loop)
811
+
812
+ try:
813
+ yield sync_lease
814
+ loop.run_until_complete(do_commit(lease))
815
+ except Exception:
816
+ loop.run_until_complete(do_rollback(lease))
817
+ raise
818
+
819
+ def available(
820
+ self,
821
+ entity_id: str,
822
+ resource: str,
823
+ limits: list[Limit],
824
+ use_stored_limits: bool = False,
825
+ ) -> dict[str, int]:
826
+ """Check available capacity without consuming."""
827
+ return self._run(
828
+ self._limiter.available(
829
+ entity_id=entity_id,
830
+ resource=resource,
831
+ limits=limits,
832
+ use_stored_limits=use_stored_limits,
833
+ )
834
+ )
835
+
836
+ def time_until_available(
837
+ self,
838
+ entity_id: str,
839
+ resource: str,
840
+ limits: list[Limit],
841
+ needed: dict[str, int],
842
+ use_stored_limits: bool = False,
843
+ ) -> float:
844
+ """Calculate seconds until requested capacity is available."""
845
+ return self._run(
846
+ self._limiter.time_until_available(
847
+ entity_id=entity_id,
848
+ resource=resource,
849
+ limits=limits,
850
+ needed=needed,
851
+ use_stored_limits=use_stored_limits,
852
+ )
853
+ )
854
+
855
+ # -------------------------------------------------------------------------
856
+ # Stored limits management
857
+ # -------------------------------------------------------------------------
858
+
859
+ def set_limits(
860
+ self,
861
+ entity_id: str,
862
+ limits: list[Limit],
863
+ resource: str = DEFAULT_RESOURCE,
864
+ ) -> None:
865
+ """Store limit configs for an entity."""
866
+ self._run(self._limiter.set_limits(entity_id, limits, resource))
867
+
868
+ def get_limits(
869
+ self,
870
+ entity_id: str,
871
+ resource: str = DEFAULT_RESOURCE,
872
+ ) -> list[Limit]:
873
+ """Get stored limit configs for an entity."""
874
+ return self._run(self._limiter.get_limits(entity_id, resource))
875
+
876
+ def delete_limits(
877
+ self,
878
+ entity_id: str,
879
+ resource: str = DEFAULT_RESOURCE,
880
+ ) -> None:
881
+ """Delete stored limit configs for an entity."""
882
+ self._run(self._limiter.delete_limits(entity_id, resource))
883
+
884
+ # -------------------------------------------------------------------------
885
+ # Capacity queries
886
+ # -------------------------------------------------------------------------
887
+
888
+ def get_resource_capacity(
889
+ self,
890
+ resource: str,
891
+ limit_name: str,
892
+ parents_only: bool = False,
893
+ ) -> ResourceCapacity:
894
+ """Get aggregated capacity for a resource across all entities."""
895
+ return self._run(
896
+ self._limiter.get_resource_capacity(
897
+ resource=resource,
898
+ limit_name=limit_name,
899
+ parents_only=parents_only,
900
+ )
901
+ )
902
+
903
+ # -------------------------------------------------------------------------
904
+ # Table management
905
+ # -------------------------------------------------------------------------
906
+
907
+ def create_table(self) -> None:
908
+ """Create the DynamoDB table if it doesn't exist."""
909
+ self._run(self._limiter.create_table())
910
+
911
+ def delete_table(self) -> None:
912
+ """Delete the DynamoDB table."""
913
+ self._run(self._limiter.delete_table())
914
+
915
+ def create_stack(
916
+ self,
917
+ stack_name: str | None = None,
918
+ parameters: dict[str, str] | None = None,
919
+ ) -> dict[str, Any]:
920
+ """Create CloudFormation stack for infrastructure."""
921
+ return self._run(self._limiter.create_stack(stack_name, parameters))
922
+
923
+ def delete_stack(self, stack_name: str | None = None) -> None:
924
+ """Delete CloudFormation stack."""
925
+ self._run(self._limiter.delete_stack(stack_name))