zae-limiter 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zae_limiter/__init__.py +130 -0
- zae_limiter/aggregator/__init__.py +11 -0
- zae_limiter/aggregator/handler.py +54 -0
- zae_limiter/aggregator/processor.py +270 -0
- zae_limiter/bucket.py +291 -0
- zae_limiter/cli.py +608 -0
- zae_limiter/exceptions.py +214 -0
- zae_limiter/infra/__init__.py +10 -0
- zae_limiter/infra/cfn_template.yaml +255 -0
- zae_limiter/infra/lambda_builder.py +85 -0
- zae_limiter/infra/stack_manager.py +536 -0
- zae_limiter/lease.py +196 -0
- zae_limiter/limiter.py +925 -0
- zae_limiter/migrations/__init__.py +114 -0
- zae_limiter/migrations/v1_0_0.py +55 -0
- zae_limiter/models.py +302 -0
- zae_limiter/repository.py +656 -0
- zae_limiter/schema.py +163 -0
- zae_limiter/version.py +214 -0
- zae_limiter-0.1.0.dist-info/METADATA +470 -0
- zae_limiter-0.1.0.dist-info/RECORD +24 -0
- zae_limiter-0.1.0.dist-info/WHEEL +4 -0
- zae_limiter-0.1.0.dist-info/entry_points.txt +2 -0
- zae_limiter-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Migration framework for zae-limiter schema changes.
|
|
3
|
+
|
|
4
|
+
This module provides infrastructure for managing schema migrations
|
|
5
|
+
when upgrading between major versions of zae-limiter.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from dataclasses import dataclass
|
|
11
|
+
from typing import TYPE_CHECKING, Protocol
|
|
12
|
+
|
|
13
|
+
from ..version import parse_version
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from ..repository import Repository
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class MigrationFunc(Protocol):
|
|
20
|
+
"""Protocol for migration functions."""
|
|
21
|
+
|
|
22
|
+
async def __call__(self, repository: Repository) -> None:
|
|
23
|
+
"""Execute the migration."""
|
|
24
|
+
...
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class Migration:
|
|
29
|
+
"""Represents a schema migration."""
|
|
30
|
+
|
|
31
|
+
version: str # Target schema version (e.g., "1.1.0")
|
|
32
|
+
description: str # Human-readable description
|
|
33
|
+
reversible: bool # Can this migration be rolled back?
|
|
34
|
+
migrate: MigrationFunc # Forward migration function
|
|
35
|
+
rollback: MigrationFunc | None = None # Rollback function (if reversible)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# Registry of all migrations, ordered by version
|
|
39
|
+
_MIGRATIONS: list[Migration] = []
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def register_migration(migration: Migration) -> None:
|
|
43
|
+
"""Register a migration in the global registry."""
|
|
44
|
+
_MIGRATIONS.append(migration)
|
|
45
|
+
_MIGRATIONS.sort(key=lambda m: parse_version(m.version))
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def get_migrations() -> list[Migration]:
|
|
49
|
+
"""Get all registered migrations."""
|
|
50
|
+
return _MIGRATIONS.copy()
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def get_migrations_between(from_version: str, to_version: str) -> list[Migration]:
|
|
54
|
+
"""
|
|
55
|
+
Get migrations needed to upgrade from one version to another.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
from_version: Current schema version
|
|
59
|
+
to_version: Target schema version
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
List of migrations to apply in order
|
|
63
|
+
"""
|
|
64
|
+
from_v = parse_version(from_version)
|
|
65
|
+
to_v = parse_version(to_version)
|
|
66
|
+
|
|
67
|
+
if from_v >= to_v:
|
|
68
|
+
return []
|
|
69
|
+
|
|
70
|
+
migrations = []
|
|
71
|
+
for migration in _MIGRATIONS:
|
|
72
|
+
migration_v = parse_version(migration.version)
|
|
73
|
+
if from_v < migration_v <= to_v:
|
|
74
|
+
migrations.append(migration)
|
|
75
|
+
|
|
76
|
+
return migrations
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
async def apply_migrations(
|
|
80
|
+
repository: Repository,
|
|
81
|
+
from_version: str,
|
|
82
|
+
to_version: str,
|
|
83
|
+
) -> list[str]:
|
|
84
|
+
"""
|
|
85
|
+
Apply all migrations between two versions.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
repository: Repository instance
|
|
89
|
+
from_version: Current schema version
|
|
90
|
+
to_version: Target schema version
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
List of applied migration versions
|
|
94
|
+
|
|
95
|
+
Raises:
|
|
96
|
+
Exception: If any migration fails
|
|
97
|
+
"""
|
|
98
|
+
migrations = get_migrations_between(from_version, to_version)
|
|
99
|
+
|
|
100
|
+
applied: list[str] = []
|
|
101
|
+
for migration in migrations:
|
|
102
|
+
try:
|
|
103
|
+
await migration.migrate(repository)
|
|
104
|
+
applied.append(migration.version)
|
|
105
|
+
except Exception as e:
|
|
106
|
+
raise RuntimeError(
|
|
107
|
+
f"Migration to {migration.version} failed: {e}. Applied migrations: {applied}"
|
|
108
|
+
) from e
|
|
109
|
+
|
|
110
|
+
return applied
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
# Import built-in migrations to register them
|
|
114
|
+
from . import v1_0_0 as _v1_0_0 # noqa: F401, E402
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Migration: v1.0.0 (Initial schema)
|
|
3
|
+
|
|
4
|
+
This is the baseline migration that represents the initial schema version.
|
|
5
|
+
It does not perform any actual migration - it simply documents the initial
|
|
6
|
+
schema structure.
|
|
7
|
+
|
|
8
|
+
Schema v1.0.0 includes:
|
|
9
|
+
- DynamoDB table with PAY_PER_REQUEST billing
|
|
10
|
+
- Primary key: PK (partition) + SK (sort)
|
|
11
|
+
- GSI1: Parent -> Children lookups
|
|
12
|
+
- GSI2: Resource aggregation
|
|
13
|
+
- TTL on 'ttl' attribute
|
|
14
|
+
- Streams enabled with NEW_AND_OLD_IMAGES
|
|
15
|
+
|
|
16
|
+
Key patterns:
|
|
17
|
+
- Entity metadata: PK=ENTITY#{id}, SK=#META
|
|
18
|
+
- Buckets: PK=ENTITY#{id}, SK=#BUCKET#{resource}#{limit_name}
|
|
19
|
+
- Limits: PK=ENTITY#{id}, SK=#LIMIT#{resource}#{limit_name}
|
|
20
|
+
- Usage: PK=ENTITY#{id}, SK=#USAGE#{resource}#{window_key}
|
|
21
|
+
- Version: PK=SYSTEM#, SK=#VERSION
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
from __future__ import annotations
|
|
25
|
+
|
|
26
|
+
from typing import TYPE_CHECKING
|
|
27
|
+
|
|
28
|
+
from . import Migration, register_migration
|
|
29
|
+
|
|
30
|
+
if TYPE_CHECKING:
|
|
31
|
+
from ..repository import Repository
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
async def migrate_v1_0_0(repository: Repository) -> None:
|
|
35
|
+
"""
|
|
36
|
+
Baseline migration - no action needed.
|
|
37
|
+
|
|
38
|
+
This migration represents the initial schema version.
|
|
39
|
+
It exists to document the baseline and serve as a reference
|
|
40
|
+
for future migrations.
|
|
41
|
+
"""
|
|
42
|
+
# No migration needed - this is the initial schema
|
|
43
|
+
pass
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
# Register the baseline migration
|
|
47
|
+
register_migration(
|
|
48
|
+
Migration(
|
|
49
|
+
version="1.0.0",
|
|
50
|
+
description="Initial schema (baseline)",
|
|
51
|
+
reversible=False,
|
|
52
|
+
migrate=migrate_v1_0_0,
|
|
53
|
+
rollback=None,
|
|
54
|
+
)
|
|
55
|
+
)
|
zae_limiter/models.py
ADDED
|
@@ -0,0 +1,302 @@
|
|
|
1
|
+
"""Core models for zae-limiter."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclass(frozen=True)
|
|
8
|
+
class Limit:
|
|
9
|
+
"""
|
|
10
|
+
Token bucket rate limit configuration.
|
|
11
|
+
|
|
12
|
+
Refill rate is stored as a fraction (refill_amount / refill_period_seconds)
|
|
13
|
+
to avoid floating point precision issues.
|
|
14
|
+
|
|
15
|
+
Attributes:
|
|
16
|
+
name: Unique identifier for this limit type (e.g., "rpm", "tpm")
|
|
17
|
+
capacity: Max tokens that refill over the period (sustained rate)
|
|
18
|
+
burst: Max tokens in bucket (>= capacity, allows bursting)
|
|
19
|
+
refill_amount: Numerator of refill rate
|
|
20
|
+
refill_period_seconds: Denominator of refill rate
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
name: str
|
|
24
|
+
capacity: int
|
|
25
|
+
burst: int
|
|
26
|
+
refill_amount: int
|
|
27
|
+
refill_period_seconds: int
|
|
28
|
+
|
|
29
|
+
def __post_init__(self) -> None:
|
|
30
|
+
if self.capacity <= 0:
|
|
31
|
+
raise ValueError("capacity must be positive")
|
|
32
|
+
if self.burst < self.capacity:
|
|
33
|
+
raise ValueError("burst must be >= capacity")
|
|
34
|
+
if self.refill_amount <= 0:
|
|
35
|
+
raise ValueError("refill_amount must be positive")
|
|
36
|
+
if self.refill_period_seconds <= 0:
|
|
37
|
+
raise ValueError("refill_period_seconds must be positive")
|
|
38
|
+
|
|
39
|
+
@classmethod
|
|
40
|
+
def per_second(
|
|
41
|
+
cls,
|
|
42
|
+
name: str,
|
|
43
|
+
capacity: int,
|
|
44
|
+
burst: int | None = None,
|
|
45
|
+
) -> "Limit":
|
|
46
|
+
"""Create a limit that refills `capacity` tokens per second."""
|
|
47
|
+
return cls(
|
|
48
|
+
name=name,
|
|
49
|
+
capacity=capacity,
|
|
50
|
+
burst=burst if burst is not None else capacity,
|
|
51
|
+
refill_amount=capacity,
|
|
52
|
+
refill_period_seconds=1,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
@classmethod
|
|
56
|
+
def per_minute(
|
|
57
|
+
cls,
|
|
58
|
+
name: str,
|
|
59
|
+
capacity: int,
|
|
60
|
+
burst: int | None = None,
|
|
61
|
+
) -> "Limit":
|
|
62
|
+
"""Create a limit that refills `capacity` tokens per minute."""
|
|
63
|
+
return cls(
|
|
64
|
+
name=name,
|
|
65
|
+
capacity=capacity,
|
|
66
|
+
burst=burst if burst is not None else capacity,
|
|
67
|
+
refill_amount=capacity,
|
|
68
|
+
refill_period_seconds=60,
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
@classmethod
|
|
72
|
+
def per_hour(
|
|
73
|
+
cls,
|
|
74
|
+
name: str,
|
|
75
|
+
capacity: int,
|
|
76
|
+
burst: int | None = None,
|
|
77
|
+
) -> "Limit":
|
|
78
|
+
"""Create a limit that refills `capacity` tokens per hour."""
|
|
79
|
+
return cls(
|
|
80
|
+
name=name,
|
|
81
|
+
capacity=capacity,
|
|
82
|
+
burst=burst if burst is not None else capacity,
|
|
83
|
+
refill_amount=capacity,
|
|
84
|
+
refill_period_seconds=3600,
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
@classmethod
|
|
88
|
+
def per_day(
|
|
89
|
+
cls,
|
|
90
|
+
name: str,
|
|
91
|
+
capacity: int,
|
|
92
|
+
burst: int | None = None,
|
|
93
|
+
) -> "Limit":
|
|
94
|
+
"""Create a limit that refills `capacity` tokens per day."""
|
|
95
|
+
return cls(
|
|
96
|
+
name=name,
|
|
97
|
+
capacity=capacity,
|
|
98
|
+
burst=burst if burst is not None else capacity,
|
|
99
|
+
refill_amount=capacity,
|
|
100
|
+
refill_period_seconds=86400,
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
@classmethod
|
|
104
|
+
def custom(
|
|
105
|
+
cls,
|
|
106
|
+
name: str,
|
|
107
|
+
capacity: int,
|
|
108
|
+
refill_amount: int,
|
|
109
|
+
refill_period_seconds: int,
|
|
110
|
+
burst: int | None = None,
|
|
111
|
+
) -> "Limit":
|
|
112
|
+
"""
|
|
113
|
+
Create a custom limit with explicit refill rate.
|
|
114
|
+
|
|
115
|
+
Example: Sustain 100/sec with burst of 1000
|
|
116
|
+
Limit.custom("requests", capacity=100, refill_amount=100,
|
|
117
|
+
refill_period_seconds=1, burst=1000)
|
|
118
|
+
"""
|
|
119
|
+
return cls(
|
|
120
|
+
name=name,
|
|
121
|
+
capacity=capacity,
|
|
122
|
+
burst=burst if burst is not None else capacity,
|
|
123
|
+
refill_amount=refill_amount,
|
|
124
|
+
refill_period_seconds=refill_period_seconds,
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
@property
|
|
128
|
+
def refill_rate(self) -> float:
|
|
129
|
+
"""Tokens per second (for display/debugging)."""
|
|
130
|
+
return self.refill_amount / self.refill_period_seconds
|
|
131
|
+
|
|
132
|
+
def to_dict(self) -> dict[str, str | int]:
|
|
133
|
+
"""Serialize to dictionary for storage."""
|
|
134
|
+
return {
|
|
135
|
+
"name": self.name,
|
|
136
|
+
"capacity": self.capacity,
|
|
137
|
+
"burst": self.burst,
|
|
138
|
+
"refill_amount": self.refill_amount,
|
|
139
|
+
"refill_period_seconds": self.refill_period_seconds,
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
@classmethod
|
|
143
|
+
def from_dict(cls, data: dict[str, Any]) -> "Limit":
|
|
144
|
+
"""Deserialize from dictionary."""
|
|
145
|
+
return cls(
|
|
146
|
+
name=data["name"],
|
|
147
|
+
capacity=data["capacity"],
|
|
148
|
+
burst=data["burst"],
|
|
149
|
+
refill_amount=data["refill_amount"],
|
|
150
|
+
refill_period_seconds=data["refill_period_seconds"],
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
@dataclass
|
|
155
|
+
class Entity:
|
|
156
|
+
"""
|
|
157
|
+
An entity that can have rate limits applied.
|
|
158
|
+
|
|
159
|
+
Entities can be parents (projects) or children (API keys).
|
|
160
|
+
Children have a parent_id reference.
|
|
161
|
+
"""
|
|
162
|
+
|
|
163
|
+
id: str
|
|
164
|
+
name: str | None = None
|
|
165
|
+
parent_id: str | None = None
|
|
166
|
+
metadata: dict[str, str] = field(default_factory=dict)
|
|
167
|
+
created_at: str | None = None
|
|
168
|
+
|
|
169
|
+
@property
|
|
170
|
+
def is_parent(self) -> bool:
|
|
171
|
+
"""True if this entity has no parent (is a root/project)."""
|
|
172
|
+
return self.parent_id is None
|
|
173
|
+
|
|
174
|
+
@property
|
|
175
|
+
def is_child(self) -> bool:
|
|
176
|
+
"""True if this entity has a parent."""
|
|
177
|
+
return self.parent_id is not None
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
@dataclass
|
|
181
|
+
class LimitStatus:
|
|
182
|
+
"""
|
|
183
|
+
Status of a specific limit check.
|
|
184
|
+
|
|
185
|
+
Returned in RateLimitExceeded to provide full visibility into
|
|
186
|
+
all limits that were checked.
|
|
187
|
+
"""
|
|
188
|
+
|
|
189
|
+
entity_id: str
|
|
190
|
+
resource: str
|
|
191
|
+
limit_name: str
|
|
192
|
+
limit: Limit
|
|
193
|
+
available: int # current available (can be negative)
|
|
194
|
+
requested: int # amount requested
|
|
195
|
+
exceeded: bool # True if this limit was exceeded
|
|
196
|
+
retry_after_seconds: float # time until `requested` is available (0 if not exceeded)
|
|
197
|
+
|
|
198
|
+
@property
|
|
199
|
+
def deficit(self) -> int:
|
|
200
|
+
"""How many tokens short we are (0 if not exceeded)."""
|
|
201
|
+
return max(0, self.requested - self.available)
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
@dataclass
|
|
205
|
+
class BucketState:
|
|
206
|
+
"""
|
|
207
|
+
Internal state of a token bucket.
|
|
208
|
+
|
|
209
|
+
All token values are stored in millitokens (x1000) for precision.
|
|
210
|
+
"""
|
|
211
|
+
|
|
212
|
+
entity_id: str
|
|
213
|
+
resource: str
|
|
214
|
+
limit_name: str
|
|
215
|
+
tokens_milli: int # current tokens (in millitokens)
|
|
216
|
+
last_refill_ms: int # epoch milliseconds
|
|
217
|
+
capacity_milli: int # max sustained (in millitokens)
|
|
218
|
+
burst_milli: int # max burst (in millitokens)
|
|
219
|
+
refill_amount_milli: int # refill numerator (in millitokens)
|
|
220
|
+
refill_period_ms: int # refill denominator (in milliseconds)
|
|
221
|
+
|
|
222
|
+
@property
|
|
223
|
+
def tokens(self) -> int:
|
|
224
|
+
"""Current tokens (not millitokens)."""
|
|
225
|
+
return self.tokens_milli // 1000
|
|
226
|
+
|
|
227
|
+
@property
|
|
228
|
+
def capacity(self) -> int:
|
|
229
|
+
"""Capacity (not millitokens)."""
|
|
230
|
+
return self.capacity_milli // 1000
|
|
231
|
+
|
|
232
|
+
@property
|
|
233
|
+
def burst(self) -> int:
|
|
234
|
+
"""Burst (not millitokens)."""
|
|
235
|
+
return self.burst_milli // 1000
|
|
236
|
+
|
|
237
|
+
@classmethod
|
|
238
|
+
def from_limit(
|
|
239
|
+
cls,
|
|
240
|
+
entity_id: str,
|
|
241
|
+
resource: str,
|
|
242
|
+
limit: Limit,
|
|
243
|
+
now_ms: int,
|
|
244
|
+
) -> "BucketState":
|
|
245
|
+
"""Create a new bucket at full capacity from a Limit."""
|
|
246
|
+
return cls(
|
|
247
|
+
entity_id=entity_id,
|
|
248
|
+
resource=resource,
|
|
249
|
+
limit_name=limit.name,
|
|
250
|
+
tokens_milli=limit.burst * 1000, # start at burst capacity
|
|
251
|
+
last_refill_ms=now_ms,
|
|
252
|
+
capacity_milli=limit.capacity * 1000,
|
|
253
|
+
burst_milli=limit.burst * 1000,
|
|
254
|
+
refill_amount_milli=limit.refill_amount * 1000,
|
|
255
|
+
refill_period_ms=limit.refill_period_seconds * 1000,
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
@dataclass
|
|
260
|
+
class UsageSnapshot:
|
|
261
|
+
"""Aggregated usage for a time window."""
|
|
262
|
+
|
|
263
|
+
entity_id: str
|
|
264
|
+
resource: str
|
|
265
|
+
window_start: str # ISO timestamp
|
|
266
|
+
window_end: str # ISO timestamp
|
|
267
|
+
window_type: str # "hourly", "daily"
|
|
268
|
+
counters: dict[str, int] # limit_name -> total consumed
|
|
269
|
+
total_events: int
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
@dataclass
|
|
273
|
+
class ResourceCapacity:
|
|
274
|
+
"""Aggregated capacity info for a resource across entities."""
|
|
275
|
+
|
|
276
|
+
resource: str
|
|
277
|
+
limit_name: str
|
|
278
|
+
total_capacity: int
|
|
279
|
+
total_available: int
|
|
280
|
+
utilization_pct: float
|
|
281
|
+
entities: list["EntityCapacity"]
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
@dataclass
|
|
285
|
+
class EntityCapacity:
|
|
286
|
+
"""Capacity info for a single entity."""
|
|
287
|
+
|
|
288
|
+
entity_id: str
|
|
289
|
+
capacity: int
|
|
290
|
+
available: int
|
|
291
|
+
utilization_pct: float
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
class LimitName:
|
|
295
|
+
"""Common limit name constants."""
|
|
296
|
+
|
|
297
|
+
RPM = "rpm" # requests per minute
|
|
298
|
+
RPH = "rph" # requests per hour
|
|
299
|
+
RPD = "rpd" # requests per day
|
|
300
|
+
TPM = "tpm" # tokens per minute
|
|
301
|
+
TPH = "tph" # tokens per hour
|
|
302
|
+
TPD = "tpd" # tokens per day
|