zae-limiter 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zae_limiter/__init__.py +130 -0
- zae_limiter/aggregator/__init__.py +11 -0
- zae_limiter/aggregator/handler.py +54 -0
- zae_limiter/aggregator/processor.py +270 -0
- zae_limiter/bucket.py +291 -0
- zae_limiter/cli.py +608 -0
- zae_limiter/exceptions.py +214 -0
- zae_limiter/infra/__init__.py +10 -0
- zae_limiter/infra/cfn_template.yaml +255 -0
- zae_limiter/infra/lambda_builder.py +85 -0
- zae_limiter/infra/stack_manager.py +536 -0
- zae_limiter/lease.py +196 -0
- zae_limiter/limiter.py +925 -0
- zae_limiter/migrations/__init__.py +114 -0
- zae_limiter/migrations/v1_0_0.py +55 -0
- zae_limiter/models.py +302 -0
- zae_limiter/repository.py +656 -0
- zae_limiter/schema.py +163 -0
- zae_limiter/version.py +214 -0
- zae_limiter-0.1.0.dist-info/METADATA +470 -0
- zae_limiter-0.1.0.dist-info/RECORD +24 -0
- zae_limiter-0.1.0.dist-info/WHEEL +4 -0
- zae_limiter-0.1.0.dist-info/entry_points.txt +2 -0
- zae_limiter-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,214 @@
|
|
|
1
|
+
"""Exceptions for zae-limiter."""
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Any
|
|
4
|
+
|
|
5
|
+
if TYPE_CHECKING:
|
|
6
|
+
from .models import LimitStatus
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class RateLimitError(Exception):
|
|
10
|
+
"""Base exception for rate limiting errors."""
|
|
11
|
+
|
|
12
|
+
pass
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class RateLimitExceeded(RateLimitError): # noqa: N818
|
|
16
|
+
"""
|
|
17
|
+
Raised when one or more rate limits would be exceeded.
|
|
18
|
+
|
|
19
|
+
Provides full visibility into ALL limits that were checked,
|
|
20
|
+
both passed and failed, to help callers understand the full picture.
|
|
21
|
+
|
|
22
|
+
Attributes:
|
|
23
|
+
statuses: Status of ALL limits checked (both passed and failed)
|
|
24
|
+
violations: Only the limits that were exceeded
|
|
25
|
+
passed: Only the limits that passed
|
|
26
|
+
retry_after_seconds: Time until ALL requested capacity is available
|
|
27
|
+
primary_violation: The violation with longest retry time (bottleneck)
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(self, statuses: list["LimitStatus"]) -> None:
|
|
31
|
+
self.statuses = statuses
|
|
32
|
+
self.violations = [s for s in statuses if s.exceeded]
|
|
33
|
+
self.passed = [s for s in statuses if not s.exceeded]
|
|
34
|
+
|
|
35
|
+
if not self.violations:
|
|
36
|
+
raise ValueError("RateLimitExceeded requires at least one violation")
|
|
37
|
+
|
|
38
|
+
self.primary_violation = max(self.violations, key=lambda v: v.retry_after_seconds)
|
|
39
|
+
self.retry_after_seconds = self.primary_violation.retry_after_seconds
|
|
40
|
+
|
|
41
|
+
super().__init__(self._format_message())
|
|
42
|
+
|
|
43
|
+
def _format_message(self) -> str:
|
|
44
|
+
v = self.primary_violation
|
|
45
|
+
exceeded_names = ", ".join(s.limit_name for s in self.violations)
|
|
46
|
+
return (
|
|
47
|
+
f"Rate limit exceeded for {v.entity_id}/{v.resource}: "
|
|
48
|
+
f"[{exceeded_names}]. "
|
|
49
|
+
f"Retry after {self.retry_after_seconds:.1f}s"
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
def as_dict(self) -> dict[str, Any]:
|
|
53
|
+
"""
|
|
54
|
+
Serialize for JSON API responses.
|
|
55
|
+
|
|
56
|
+
Returns a dictionary suitable for returning in a 429 response body.
|
|
57
|
+
"""
|
|
58
|
+
return {
|
|
59
|
+
"error": "rate_limit_exceeded",
|
|
60
|
+
"message": str(self),
|
|
61
|
+
"retry_after_seconds": self.retry_after_seconds,
|
|
62
|
+
"retry_after_ms": int(self.retry_after_seconds * 1000),
|
|
63
|
+
"limits": [
|
|
64
|
+
{
|
|
65
|
+
"entity_id": s.entity_id,
|
|
66
|
+
"resource": s.resource,
|
|
67
|
+
"limit_name": s.limit_name,
|
|
68
|
+
"capacity": s.limit.capacity,
|
|
69
|
+
"burst": s.limit.burst,
|
|
70
|
+
"available": s.available,
|
|
71
|
+
"requested": s.requested,
|
|
72
|
+
"exceeded": s.exceeded,
|
|
73
|
+
"retry_after_seconds": s.retry_after_seconds,
|
|
74
|
+
}
|
|
75
|
+
for s in self.statuses
|
|
76
|
+
],
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
def retry_after_header(self) -> str:
|
|
81
|
+
"""Value for HTTP Retry-After header (integer seconds)."""
|
|
82
|
+
return str(int(self.retry_after_seconds) + 1) # round up
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class RateLimiterUnavailable(RateLimitError): # noqa: N818
|
|
86
|
+
"""
|
|
87
|
+
Raised when DynamoDB is unavailable and failure_mode=FAIL_CLOSED.
|
|
88
|
+
|
|
89
|
+
This indicates a transient infrastructure issue, not a rate limit.
|
|
90
|
+
"""
|
|
91
|
+
|
|
92
|
+
def __init__(self, message: str, cause: Exception | None = None) -> None:
|
|
93
|
+
self.cause = cause
|
|
94
|
+
super().__init__(message)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
class EntityNotFoundError(RateLimitError):
|
|
98
|
+
"""Raised when an entity is not found."""
|
|
99
|
+
|
|
100
|
+
def __init__(self, entity_id: str) -> None:
|
|
101
|
+
self.entity_id = entity_id
|
|
102
|
+
super().__init__(f"Entity not found: {entity_id}")
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class EntityExistsError(RateLimitError):
|
|
106
|
+
"""Raised when trying to create an entity that already exists."""
|
|
107
|
+
|
|
108
|
+
def __init__(self, entity_id: str) -> None:
|
|
109
|
+
self.entity_id = entity_id
|
|
110
|
+
super().__init__(f"Entity already exists: {entity_id}")
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
class StackCreationError(Exception):
|
|
114
|
+
"""Raised when CloudFormation stack creation fails."""
|
|
115
|
+
|
|
116
|
+
def __init__(
|
|
117
|
+
self, stack_name: str, reason: str, events: list[dict[str, Any]] | None = None
|
|
118
|
+
) -> None:
|
|
119
|
+
self.stack_name = stack_name
|
|
120
|
+
self.reason = reason
|
|
121
|
+
self.events = events or []
|
|
122
|
+
super().__init__(f"Stack {stack_name} creation failed: {reason}")
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class StackAlreadyExistsError(StackCreationError):
|
|
126
|
+
"""Raised when stack already exists (informational)."""
|
|
127
|
+
|
|
128
|
+
pass
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
# ---------------------------------------------------------------------------
|
|
132
|
+
# Version-related exceptions
|
|
133
|
+
# ---------------------------------------------------------------------------
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
class VersionError(RateLimitError):
|
|
137
|
+
"""Base class for version-related errors."""
|
|
138
|
+
|
|
139
|
+
pass
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
class VersionMismatchError(VersionError):
|
|
143
|
+
"""
|
|
144
|
+
Raised when client and infrastructure versions are incompatible.
|
|
145
|
+
|
|
146
|
+
This error indicates that the client library version doesn't match
|
|
147
|
+
the deployed infrastructure and auto-update is disabled or failed.
|
|
148
|
+
"""
|
|
149
|
+
|
|
150
|
+
def __init__(
|
|
151
|
+
self,
|
|
152
|
+
client_version: str,
|
|
153
|
+
schema_version: str,
|
|
154
|
+
lambda_version: str | None,
|
|
155
|
+
message: str,
|
|
156
|
+
can_auto_update: bool = False,
|
|
157
|
+
) -> None:
|
|
158
|
+
self.client_version = client_version
|
|
159
|
+
self.schema_version = schema_version
|
|
160
|
+
self.lambda_version = lambda_version
|
|
161
|
+
self.can_auto_update = can_auto_update
|
|
162
|
+
super().__init__(self._format_message(message))
|
|
163
|
+
|
|
164
|
+
def _format_message(self, message: str) -> str:
|
|
165
|
+
return (
|
|
166
|
+
f"Version mismatch: client={self.client_version}, "
|
|
167
|
+
f"schema={self.schema_version}, "
|
|
168
|
+
f"lambda={self.lambda_version or 'unknown'}. {message}"
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
class IncompatibleSchemaError(VersionError):
|
|
173
|
+
"""
|
|
174
|
+
Raised when schema version requires manual migration.
|
|
175
|
+
|
|
176
|
+
This indicates a major version difference that cannot be
|
|
177
|
+
automatically reconciled.
|
|
178
|
+
"""
|
|
179
|
+
|
|
180
|
+
def __init__(
|
|
181
|
+
self,
|
|
182
|
+
client_version: str,
|
|
183
|
+
schema_version: str,
|
|
184
|
+
message: str,
|
|
185
|
+
migration_guide_url: str | None = None,
|
|
186
|
+
) -> None:
|
|
187
|
+
self.client_version = client_version
|
|
188
|
+
self.schema_version = schema_version
|
|
189
|
+
self.migration_guide_url = migration_guide_url
|
|
190
|
+
msg = (
|
|
191
|
+
f"Incompatible schema: client {client_version} is not compatible "
|
|
192
|
+
f"with schema {schema_version}. {message}"
|
|
193
|
+
)
|
|
194
|
+
if migration_guide_url:
|
|
195
|
+
msg += f" See: {migration_guide_url}"
|
|
196
|
+
super().__init__(msg)
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
class InfrastructureNotFoundError(VersionError):
|
|
200
|
+
"""
|
|
201
|
+
Raised when expected infrastructure doesn't exist.
|
|
202
|
+
|
|
203
|
+
This typically means the CloudFormation stack or DynamoDB table
|
|
204
|
+
hasn't been deployed yet.
|
|
205
|
+
"""
|
|
206
|
+
|
|
207
|
+
def __init__(self, table_name: str, stack_name: str | None = None) -> None:
|
|
208
|
+
self.table_name = table_name
|
|
209
|
+
self.stack_name = stack_name
|
|
210
|
+
msg = f"Infrastructure not found for table '{table_name}'"
|
|
211
|
+
if stack_name:
|
|
212
|
+
msg += f" (stack: {stack_name})"
|
|
213
|
+
msg += ". Run 'zae-limiter deploy' or use create_stack=True."
|
|
214
|
+
super().__init__(msg)
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
"""Infrastructure as code for zae-limiter."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
CFN_TEMPLATE_PATH = Path(__file__).parent / "cfn_template.yaml"
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def get_cfn_template() -> str:
|
|
9
|
+
"""Get the CloudFormation template as a string."""
|
|
10
|
+
return CFN_TEMPLATE_PATH.read_text()
|
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
AWSTemplateFormatVersion: '2010-09-09'
|
|
2
|
+
Description: >
|
|
3
|
+
zae-limiter infrastructure - DynamoDB table with streams and
|
|
4
|
+
Lambda aggregator for usage snapshots.
|
|
5
|
+
|
|
6
|
+
Parameters:
|
|
7
|
+
TableName:
|
|
8
|
+
Type: String
|
|
9
|
+
Default: rate_limits
|
|
10
|
+
Description: Name of the DynamoDB table
|
|
11
|
+
|
|
12
|
+
SnapshotWindows:
|
|
13
|
+
Type: String
|
|
14
|
+
Default: hourly,daily
|
|
15
|
+
Description: Comma-separated list of snapshot windows (hourly, daily, monthly)
|
|
16
|
+
|
|
17
|
+
SnapshotRetentionDays:
|
|
18
|
+
Type: Number
|
|
19
|
+
Default: 90
|
|
20
|
+
Description: Number of days to retain usage snapshots
|
|
21
|
+
MinValue: 1
|
|
22
|
+
MaxValue: 3650
|
|
23
|
+
|
|
24
|
+
LambdaMemorySize:
|
|
25
|
+
Type: Number
|
|
26
|
+
Default: 256
|
|
27
|
+
Description: Memory size for the aggregator Lambda
|
|
28
|
+
MinValue: 128
|
|
29
|
+
MaxValue: 3008
|
|
30
|
+
|
|
31
|
+
LambdaTimeout:
|
|
32
|
+
Type: Number
|
|
33
|
+
Default: 60
|
|
34
|
+
Description: Timeout for the aggregator Lambda in seconds
|
|
35
|
+
MinValue: 1
|
|
36
|
+
MaxValue: 900
|
|
37
|
+
|
|
38
|
+
EnableAggregator:
|
|
39
|
+
Type: String
|
|
40
|
+
Default: 'true'
|
|
41
|
+
AllowedValues:
|
|
42
|
+
- 'true'
|
|
43
|
+
- 'false'
|
|
44
|
+
Description: Whether to deploy the aggregator Lambda
|
|
45
|
+
|
|
46
|
+
SchemaVersion:
|
|
47
|
+
Type: String
|
|
48
|
+
Default: '1.0.0'
|
|
49
|
+
Description: Schema version for the rate limiter infrastructure
|
|
50
|
+
|
|
51
|
+
Conditions:
|
|
52
|
+
DeployAggregator: !Equals [!Ref EnableAggregator, 'true']
|
|
53
|
+
|
|
54
|
+
Resources:
|
|
55
|
+
# -------------------------------------------------------------------------
|
|
56
|
+
# DynamoDB Table
|
|
57
|
+
# -------------------------------------------------------------------------
|
|
58
|
+
RateLimitsTable:
|
|
59
|
+
Type: AWS::DynamoDB::Table
|
|
60
|
+
Properties:
|
|
61
|
+
TableName: !Ref TableName
|
|
62
|
+
BillingMode: PAY_PER_REQUEST
|
|
63
|
+
|
|
64
|
+
AttributeDefinitions:
|
|
65
|
+
- AttributeName: PK
|
|
66
|
+
AttributeType: S
|
|
67
|
+
- AttributeName: SK
|
|
68
|
+
AttributeType: S
|
|
69
|
+
- AttributeName: GSI1PK
|
|
70
|
+
AttributeType: S
|
|
71
|
+
- AttributeName: GSI1SK
|
|
72
|
+
AttributeType: S
|
|
73
|
+
- AttributeName: GSI2PK
|
|
74
|
+
AttributeType: S
|
|
75
|
+
- AttributeName: GSI2SK
|
|
76
|
+
AttributeType: S
|
|
77
|
+
|
|
78
|
+
KeySchema:
|
|
79
|
+
- AttributeName: PK
|
|
80
|
+
KeyType: HASH
|
|
81
|
+
- AttributeName: SK
|
|
82
|
+
KeyType: RANGE
|
|
83
|
+
|
|
84
|
+
GlobalSecondaryIndexes:
|
|
85
|
+
- IndexName: GSI1
|
|
86
|
+
KeySchema:
|
|
87
|
+
- AttributeName: GSI1PK
|
|
88
|
+
KeyType: HASH
|
|
89
|
+
- AttributeName: GSI1SK
|
|
90
|
+
KeyType: RANGE
|
|
91
|
+
Projection:
|
|
92
|
+
ProjectionType: ALL
|
|
93
|
+
|
|
94
|
+
- IndexName: GSI2
|
|
95
|
+
KeySchema:
|
|
96
|
+
- AttributeName: GSI2PK
|
|
97
|
+
KeyType: HASH
|
|
98
|
+
- AttributeName: GSI2SK
|
|
99
|
+
KeyType: RANGE
|
|
100
|
+
Projection:
|
|
101
|
+
ProjectionType: ALL
|
|
102
|
+
|
|
103
|
+
TimeToLiveSpecification:
|
|
104
|
+
AttributeName: ttl
|
|
105
|
+
Enabled: true
|
|
106
|
+
|
|
107
|
+
StreamSpecification:
|
|
108
|
+
StreamEnabled: true
|
|
109
|
+
StreamViewType: NEW_AND_OLD_IMAGES
|
|
110
|
+
|
|
111
|
+
Tags:
|
|
112
|
+
- Key: Application
|
|
113
|
+
Value: zae-limiter
|
|
114
|
+
|
|
115
|
+
# -------------------------------------------------------------------------
|
|
116
|
+
# Lambda Aggregator
|
|
117
|
+
# -------------------------------------------------------------------------
|
|
118
|
+
AggregatorLogGroup:
|
|
119
|
+
Type: AWS::Logs::LogGroup
|
|
120
|
+
Condition: DeployAggregator
|
|
121
|
+
Properties:
|
|
122
|
+
LogGroupName: !Sub /aws/lambda/${TableName}-aggregator
|
|
123
|
+
RetentionInDays: 30
|
|
124
|
+
|
|
125
|
+
AggregatorRole:
|
|
126
|
+
Type: AWS::IAM::Role
|
|
127
|
+
Condition: DeployAggregator
|
|
128
|
+
Properties:
|
|
129
|
+
RoleName: !Sub ${TableName}-aggregator-role
|
|
130
|
+
AssumeRolePolicyDocument:
|
|
131
|
+
Version: '2012-10-17'
|
|
132
|
+
Statement:
|
|
133
|
+
- Effect: Allow
|
|
134
|
+
Principal:
|
|
135
|
+
Service: lambda.amazonaws.com
|
|
136
|
+
Action: sts:AssumeRole
|
|
137
|
+
|
|
138
|
+
ManagedPolicyArns:
|
|
139
|
+
- arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
|
|
140
|
+
|
|
141
|
+
Policies:
|
|
142
|
+
- PolicyName: DynamoDBAccess
|
|
143
|
+
PolicyDocument:
|
|
144
|
+
Version: '2012-10-17'
|
|
145
|
+
Statement:
|
|
146
|
+
# Read/write access to table
|
|
147
|
+
- Effect: Allow
|
|
148
|
+
Action:
|
|
149
|
+
- dynamodb:GetItem
|
|
150
|
+
- dynamodb:PutItem
|
|
151
|
+
- dynamodb:UpdateItem
|
|
152
|
+
- dynamodb:Query
|
|
153
|
+
Resource:
|
|
154
|
+
- !GetAtt RateLimitsTable.Arn
|
|
155
|
+
- !Sub ${RateLimitsTable.Arn}/index/*
|
|
156
|
+
|
|
157
|
+
# Stream access
|
|
158
|
+
- Effect: Allow
|
|
159
|
+
Action:
|
|
160
|
+
- dynamodb:GetRecords
|
|
161
|
+
- dynamodb:GetShardIterator
|
|
162
|
+
- dynamodb:DescribeStream
|
|
163
|
+
- dynamodb:ListStreams
|
|
164
|
+
Resource:
|
|
165
|
+
- !GetAtt RateLimitsTable.StreamArn
|
|
166
|
+
|
|
167
|
+
AggregatorFunction:
|
|
168
|
+
Type: AWS::Lambda::Function
|
|
169
|
+
Condition: DeployAggregator
|
|
170
|
+
DependsOn: AggregatorLogGroup
|
|
171
|
+
Properties:
|
|
172
|
+
FunctionName: !Sub ${TableName}-aggregator
|
|
173
|
+
Description: Aggregates rate limiter usage into hourly/daily snapshots
|
|
174
|
+
Runtime: python3.12
|
|
175
|
+
Handler: zae_limiter.aggregator.handler.handler
|
|
176
|
+
MemorySize: !Ref LambdaMemorySize
|
|
177
|
+
Timeout: !Ref LambdaTimeout
|
|
178
|
+
Role: !GetAtt AggregatorRole.Arn
|
|
179
|
+
|
|
180
|
+
Environment:
|
|
181
|
+
Variables:
|
|
182
|
+
TABLE_NAME: !Ref TableName
|
|
183
|
+
SNAPSHOT_WINDOWS: !Ref SnapshotWindows
|
|
184
|
+
SNAPSHOT_TTL_DAYS: !Ref SnapshotRetentionDays
|
|
185
|
+
ZAE_LIMITER_SCHEMA_VERSION: !Ref SchemaVersion
|
|
186
|
+
|
|
187
|
+
# Note: Code must be deployed separately via SAM, CDK, or manual upload
|
|
188
|
+
# This is a placeholder that will need to be replaced
|
|
189
|
+
Code:
|
|
190
|
+
ZipFile: |
|
|
191
|
+
def handler(event, context):
|
|
192
|
+
# Placeholder - replace with actual deployment
|
|
193
|
+
return {"statusCode": 200, "body": "Placeholder"}
|
|
194
|
+
|
|
195
|
+
Tags:
|
|
196
|
+
- Key: Application
|
|
197
|
+
Value: zae-limiter
|
|
198
|
+
|
|
199
|
+
AggregatorEventSourceMapping:
|
|
200
|
+
Type: AWS::Lambda::EventSourceMapping
|
|
201
|
+
Condition: DeployAggregator
|
|
202
|
+
Properties:
|
|
203
|
+
EventSourceArn: !GetAtt RateLimitsTable.StreamArn
|
|
204
|
+
FunctionName: !Ref AggregatorFunction
|
|
205
|
+
StartingPosition: LATEST
|
|
206
|
+
BatchSize: 100
|
|
207
|
+
MaximumBatchingWindowInSeconds: 5
|
|
208
|
+
|
|
209
|
+
# Filter to only process MODIFY events on BUCKET records
|
|
210
|
+
FilterCriteria:
|
|
211
|
+
Filters:
|
|
212
|
+
- Pattern: >-
|
|
213
|
+
{
|
|
214
|
+
"eventName": ["MODIFY"],
|
|
215
|
+
"dynamodb": {
|
|
216
|
+
"NewImage": {
|
|
217
|
+
"SK": {
|
|
218
|
+
"S": [{"prefix": "#BUCKET#"}]
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
Outputs:
|
|
225
|
+
TableName:
|
|
226
|
+
Description: DynamoDB table name
|
|
227
|
+
Value: !Ref RateLimitsTable
|
|
228
|
+
Export:
|
|
229
|
+
Name: !Sub ${AWS::StackName}-TableName
|
|
230
|
+
|
|
231
|
+
TableArn:
|
|
232
|
+
Description: DynamoDB table ARN
|
|
233
|
+
Value: !GetAtt RateLimitsTable.Arn
|
|
234
|
+
Export:
|
|
235
|
+
Name: !Sub ${AWS::StackName}-TableArn
|
|
236
|
+
|
|
237
|
+
StreamArn:
|
|
238
|
+
Description: DynamoDB stream ARN
|
|
239
|
+
Value: !GetAtt RateLimitsTable.StreamArn
|
|
240
|
+
Export:
|
|
241
|
+
Name: !Sub ${AWS::StackName}-StreamArn
|
|
242
|
+
|
|
243
|
+
AggregatorFunctionArn:
|
|
244
|
+
Condition: DeployAggregator
|
|
245
|
+
Description: Aggregator Lambda function ARN
|
|
246
|
+
Value: !GetAtt AggregatorFunction.Arn
|
|
247
|
+
Export:
|
|
248
|
+
Name: !Sub ${AWS::StackName}-AggregatorArn
|
|
249
|
+
|
|
250
|
+
AggregatorFunctionName:
|
|
251
|
+
Condition: DeployAggregator
|
|
252
|
+
Description: Aggregator Lambda function name
|
|
253
|
+
Value: !Ref AggregatorFunction
|
|
254
|
+
Export:
|
|
255
|
+
Name: !Sub ${AWS::StackName}-AggregatorName
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
"""Build Lambda deployment packages for zae-limiter aggregator."""
|
|
2
|
+
|
|
3
|
+
import io
|
|
4
|
+
import zipfile
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def build_lambda_package() -> bytes:
|
|
9
|
+
"""
|
|
10
|
+
Build Lambda deployment package from installed zae_limiter package.
|
|
11
|
+
|
|
12
|
+
Creates a zip file containing the entire zae_limiter package, which
|
|
13
|
+
includes all necessary code for the aggregator Lambda function.
|
|
14
|
+
|
|
15
|
+
The package only depends on boto3, which is provided by the Lambda
|
|
16
|
+
runtime, so no external dependencies need to be bundled.
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
Zip file contents as bytes
|
|
20
|
+
"""
|
|
21
|
+
import zae_limiter
|
|
22
|
+
|
|
23
|
+
# Find installed package location
|
|
24
|
+
package_path = Path(zae_limiter.__file__).parent
|
|
25
|
+
|
|
26
|
+
zip_buffer = io.BytesIO()
|
|
27
|
+
|
|
28
|
+
with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zf:
|
|
29
|
+
# Add all Python files from the package
|
|
30
|
+
for py_file in package_path.rglob("*.py"):
|
|
31
|
+
# Create archive name: zae_limiter/...
|
|
32
|
+
arcname = py_file.relative_to(package_path.parent)
|
|
33
|
+
zf.write(py_file, arcname)
|
|
34
|
+
|
|
35
|
+
# Also include the CloudFormation template for reference
|
|
36
|
+
# (not used by Lambda, but useful for debugging)
|
|
37
|
+
cfn_template = package_path / "infra" / "cfn_template.yaml"
|
|
38
|
+
if cfn_template.exists():
|
|
39
|
+
arcname = cfn_template.relative_to(package_path.parent)
|
|
40
|
+
zf.write(cfn_template, arcname)
|
|
41
|
+
|
|
42
|
+
zip_buffer.seek(0)
|
|
43
|
+
return zip_buffer.getvalue()
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def write_lambda_package(output_path: str | Path) -> int:
|
|
47
|
+
"""
|
|
48
|
+
Build and write Lambda package to a file.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
output_path: Path where to write the zip file
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Size of the written file in bytes
|
|
55
|
+
"""
|
|
56
|
+
zip_bytes = build_lambda_package()
|
|
57
|
+
output_path = Path(output_path)
|
|
58
|
+
|
|
59
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
60
|
+
output_path.write_bytes(zip_bytes)
|
|
61
|
+
|
|
62
|
+
return len(zip_bytes)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def get_package_info() -> dict[str, str | int]:
|
|
66
|
+
"""
|
|
67
|
+
Get information about the Lambda package without building it.
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
Dict with package metadata
|
|
71
|
+
"""
|
|
72
|
+
import zae_limiter
|
|
73
|
+
|
|
74
|
+
package_path = Path(zae_limiter.__file__).parent
|
|
75
|
+
|
|
76
|
+
# Count files
|
|
77
|
+
py_files = list(package_path.rglob("*.py"))
|
|
78
|
+
total_size = sum(f.stat().st_size for f in py_files)
|
|
79
|
+
|
|
80
|
+
return {
|
|
81
|
+
"package_path": str(package_path),
|
|
82
|
+
"python_files": len(py_files),
|
|
83
|
+
"uncompressed_size": total_size,
|
|
84
|
+
"handler": "zae_limiter.aggregator.handler.handler",
|
|
85
|
+
}
|