localstack-core 4.3.1.dev6__py3-none-any.whl → 4.3.1.dev28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- localstack/aws/api/ec2/__init__.py +597 -0
- localstack/aws/api/events/__init__.py +18 -12
- localstack/aws/api/route53/__init__.py +2 -0
- localstack/aws/api/s3control/__init__.py +89 -0
- localstack/aws/api/transcribe/__init__.py +1 -0
- localstack/services/cloudformation/engine/entities.py +18 -1
- localstack/services/cloudformation/engine/template_deployer.py +0 -9
- localstack/services/cloudformation/engine/v2/change_set_model.py +164 -35
- localstack/services/cloudformation/engine/v2/change_set_model_describer.py +143 -69
- localstack/services/cloudformation/engine/v2/change_set_model_executor.py +170 -0
- localstack/services/cloudformation/engine/v2/change_set_model_visitor.py +8 -0
- localstack/services/cloudformation/v2/provider.py +72 -6
- localstack/services/ec2/patches.py +31 -3
- localstack/services/events/provider.py +6 -1
- localstack/services/kms/models.py +1 -1
- localstack/services/lambda_/event_source_mapping/pollers/dynamodb_poller.py +2 -0
- localstack/services/lambda_/event_source_mapping/pollers/kinesis_poller.py +2 -0
- localstack/services/lambda_/event_source_mapping/pollers/stream_poller.py +4 -2
- localstack/services/lambda_/invocation/assignment.py +4 -2
- localstack/services/lambda_/invocation/execution_environment.py +16 -4
- localstack/services/lambda_/invocation/logs.py +28 -4
- localstack/services/lambda_/provider.py +18 -3
- localstack/services/lambda_/runtimes.py +15 -2
- localstack/services/s3/presigned_url.py +15 -11
- localstack/services/secretsmanager/provider.py +13 -4
- localstack/services/sqs/models.py +22 -3
- localstack/services/sqs/utils.py +16 -7
- localstack/services/ssm/resource_providers/aws_ssm_parameter.py +1 -5
- localstack/services/stepfunctions/asl/utils/json_path.py +9 -0
- localstack/testing/snapshots/transformer_utility.py +13 -0
- localstack/utils/aws/client_types.py +8 -0
- localstack/utils/docker_utils.py +2 -2
- localstack/version.py +2 -2
- {localstack_core-4.3.1.dev6.dist-info → localstack_core-4.3.1.dev28.dist-info}/METADATA +5 -5
- {localstack_core-4.3.1.dev6.dist-info → localstack_core-4.3.1.dev28.dist-info}/RECORD +43 -42
- localstack_core-4.3.1.dev28.dist-info/plux.json +1 -0
- localstack_core-4.3.1.dev6.dist-info/plux.json +0 -1
- {localstack_core-4.3.1.dev6.data → localstack_core-4.3.1.dev28.data}/scripts/localstack +0 -0
- {localstack_core-4.3.1.dev6.data → localstack_core-4.3.1.dev28.data}/scripts/localstack-supervisor +0 -0
- {localstack_core-4.3.1.dev6.data → localstack_core-4.3.1.dev28.data}/scripts/localstack.bat +0 -0
- {localstack_core-4.3.1.dev6.dist-info → localstack_core-4.3.1.dev28.dist-info}/WHEEL +0 -0
- {localstack_core-4.3.1.dev6.dist-info → localstack_core-4.3.1.dev28.dist-info}/entry_points.txt +0 -0
- {localstack_core-4.3.1.dev6.dist-info → localstack_core-4.3.1.dev28.dist-info}/licenses/LICENSE.txt +0 -0
- {localstack_core-4.3.1.dev6.dist-info → localstack_core-4.3.1.dev28.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,170 @@
|
|
1
|
+
import logging
|
2
|
+
import uuid
|
3
|
+
from typing import Final
|
4
|
+
|
5
|
+
from localstack.aws.api.cloudformation import ChangeAction
|
6
|
+
from localstack.constants import INTERNAL_AWS_SECRET_ACCESS_KEY
|
7
|
+
from localstack.services.cloudformation.engine.v2.change_set_model import (
|
8
|
+
NodeIntrinsicFunction,
|
9
|
+
NodeResource,
|
10
|
+
NodeTemplate,
|
11
|
+
TerminalValue,
|
12
|
+
)
|
13
|
+
from localstack.services.cloudformation.engine.v2.change_set_model_describer import (
|
14
|
+
ChangeSetModelDescriber,
|
15
|
+
DescribeUnit,
|
16
|
+
)
|
17
|
+
from localstack.services.cloudformation.resource_provider import (
|
18
|
+
Credentials,
|
19
|
+
OperationStatus,
|
20
|
+
ProgressEvent,
|
21
|
+
ResourceProviderExecutor,
|
22
|
+
ResourceProviderPayload,
|
23
|
+
get_resource_type,
|
24
|
+
)
|
25
|
+
|
26
|
+
LOG = logging.getLogger(__name__)
|
27
|
+
|
28
|
+
|
29
|
+
class ChangeSetModelExecutor(ChangeSetModelDescriber):
|
30
|
+
account_id: Final[str]
|
31
|
+
region: Final[str]
|
32
|
+
|
33
|
+
def __init__(
|
34
|
+
self,
|
35
|
+
node_template: NodeTemplate,
|
36
|
+
account_id: str,
|
37
|
+
region: str,
|
38
|
+
stack_name: str,
|
39
|
+
stack_id: str,
|
40
|
+
):
|
41
|
+
super().__init__(node_template)
|
42
|
+
self.account_id = account_id
|
43
|
+
self.region = region
|
44
|
+
self.stack_name = stack_name
|
45
|
+
self.stack_id = stack_id
|
46
|
+
self.resources = {}
|
47
|
+
|
48
|
+
def execute(self) -> dict:
|
49
|
+
self.visit(self._node_template)
|
50
|
+
return self.resources
|
51
|
+
|
52
|
+
def visit_node_resource(self, node_resource: NodeResource) -> DescribeUnit:
|
53
|
+
resource_provider_executor = ResourceProviderExecutor(
|
54
|
+
stack_name=self.stack_name, stack_id=self.stack_id
|
55
|
+
)
|
56
|
+
|
57
|
+
# TODO: investigate effects on type changes
|
58
|
+
properties_describe_unit = self.visit_node_properties(node_resource.properties)
|
59
|
+
LOG.info("SRW: describe unit: %s", properties_describe_unit)
|
60
|
+
|
61
|
+
action = node_resource.change_type.to_action()
|
62
|
+
if action is None:
|
63
|
+
raise RuntimeError(
|
64
|
+
f"Action should always be present, got change type: {node_resource.change_type}"
|
65
|
+
)
|
66
|
+
|
67
|
+
# TODO
|
68
|
+
resource_type = get_resource_type({"Type": "AWS::SSM::Parameter"})
|
69
|
+
payload = self.create_resource_provider_payload(
|
70
|
+
properties_describe_unit,
|
71
|
+
action,
|
72
|
+
node_resource.name,
|
73
|
+
resource_type,
|
74
|
+
)
|
75
|
+
resource_provider = resource_provider_executor.try_load_resource_provider(resource_type)
|
76
|
+
|
77
|
+
extra_resource_properties = {}
|
78
|
+
if resource_provider is not None:
|
79
|
+
# TODO: stack events
|
80
|
+
event = resource_provider_executor.deploy_loop(
|
81
|
+
resource_provider, extra_resource_properties, payload
|
82
|
+
)
|
83
|
+
else:
|
84
|
+
event = ProgressEvent(OperationStatus.SUCCESS, resource_model={})
|
85
|
+
|
86
|
+
self.resources.setdefault(node_resource.name, {"Properties": {}})
|
87
|
+
match event.status:
|
88
|
+
case OperationStatus.SUCCESS:
|
89
|
+
# merge the resources state with the external state
|
90
|
+
# TODO: this is likely a duplicate of updating from extra_resource_properties
|
91
|
+
self.resources[node_resource.name]["Properties"].update(event.resource_model)
|
92
|
+
self.resources[node_resource.name].update(extra_resource_properties)
|
93
|
+
# XXX for legacy delete_stack compatibility
|
94
|
+
self.resources[node_resource.name]["LogicalResourceId"] = node_resource.name
|
95
|
+
self.resources[node_resource.name]["Type"] = resource_type
|
96
|
+
case any:
|
97
|
+
raise NotImplementedError(f"Event status '{any}' not handled")
|
98
|
+
|
99
|
+
return DescribeUnit(before_context=None, after_context={})
|
100
|
+
|
101
|
+
def visit_node_intrinsic_function_fn_get_att(
|
102
|
+
self, node_intrinsic_function: NodeIntrinsicFunction
|
103
|
+
) -> DescribeUnit:
|
104
|
+
arguments_unit = self.visit(node_intrinsic_function.arguments)
|
105
|
+
before_arguments_list = arguments_unit.before_context
|
106
|
+
after_arguments_list = arguments_unit.after_context
|
107
|
+
if before_arguments_list:
|
108
|
+
logical_name_of_resource = before_arguments_list[0]
|
109
|
+
attribute_name = before_arguments_list[1]
|
110
|
+
before_node_resource = self._get_node_resource_for(
|
111
|
+
resource_name=logical_name_of_resource, node_template=self._node_template
|
112
|
+
)
|
113
|
+
node_property: TerminalValue = self._get_node_property_for(
|
114
|
+
property_name=attribute_name, node_resource=before_node_resource
|
115
|
+
)
|
116
|
+
before_context = self.visit(node_property.value).before_context
|
117
|
+
else:
|
118
|
+
before_context = None
|
119
|
+
|
120
|
+
if after_arguments_list:
|
121
|
+
logical_name_of_resource = after_arguments_list[0]
|
122
|
+
attribute_name = after_arguments_list[1]
|
123
|
+
after_node_resource = self._get_node_resource_for(
|
124
|
+
resource_name=logical_name_of_resource, node_template=self._node_template
|
125
|
+
)
|
126
|
+
node_property: TerminalValue = self._get_node_property_for(
|
127
|
+
property_name=attribute_name, node_resource=after_node_resource
|
128
|
+
)
|
129
|
+
after_context = self.visit(node_property.value).after_context
|
130
|
+
else:
|
131
|
+
after_context = None
|
132
|
+
|
133
|
+
return DescribeUnit(before_context=before_context, after_context=after_context)
|
134
|
+
|
135
|
+
def create_resource_provider_payload(
|
136
|
+
self,
|
137
|
+
describe_unit: DescribeUnit,
|
138
|
+
action: ChangeAction,
|
139
|
+
logical_resource_id: str,
|
140
|
+
resource_type: str,
|
141
|
+
) -> ResourceProviderPayload:
|
142
|
+
# FIXME: use proper credentials
|
143
|
+
creds: Credentials = {
|
144
|
+
"accessKeyId": self.account_id,
|
145
|
+
"secretAccessKey": INTERNAL_AWS_SECRET_ACCESS_KEY,
|
146
|
+
"sessionToken": "",
|
147
|
+
}
|
148
|
+
resource_provider_payload: ResourceProviderPayload = {
|
149
|
+
"awsAccountId": self.account_id,
|
150
|
+
"callbackContext": {},
|
151
|
+
"stackId": self.stack_name,
|
152
|
+
"resourceType": resource_type,
|
153
|
+
"resourceTypeVersion": "000000",
|
154
|
+
# TODO: not actually a UUID
|
155
|
+
"bearerToken": str(uuid.uuid4()),
|
156
|
+
"region": self.region,
|
157
|
+
"action": str(action),
|
158
|
+
"requestData": {
|
159
|
+
"logicalResourceId": logical_resource_id,
|
160
|
+
"resourceProperties": describe_unit.after_context["Properties"],
|
161
|
+
"previousResourceProperties": describe_unit.before_context["Properties"],
|
162
|
+
"callerCredentials": creds,
|
163
|
+
"providerCredentials": creds,
|
164
|
+
"systemTags": {},
|
165
|
+
"previousSystemTags": {},
|
166
|
+
"stackTags": {},
|
167
|
+
"previousStackTags": {},
|
168
|
+
},
|
169
|
+
}
|
170
|
+
return resource_provider_payload
|
@@ -10,6 +10,8 @@ from localstack.services.cloudformation.engine.v2.change_set_model import (
|
|
10
10
|
NodeMapping,
|
11
11
|
NodeMappings,
|
12
12
|
NodeObject,
|
13
|
+
NodeOutput,
|
14
|
+
NodeOutputs,
|
13
15
|
NodeParameter,
|
14
16
|
NodeParameters,
|
15
17
|
NodeProperties,
|
@@ -53,6 +55,12 @@ class ChangeSetModelVisitor(abc.ABC):
|
|
53
55
|
def visit_node_mappings(self, node_mappings: NodeMappings):
|
54
56
|
self.visit_children(node_mappings)
|
55
57
|
|
58
|
+
def visit_node_outputs(self, node_outputs: NodeOutputs):
|
59
|
+
self.visit_children(node_outputs)
|
60
|
+
|
61
|
+
def visit_node_output(self, node_output: NodeOutput):
|
62
|
+
self.visit_children(node_output)
|
63
|
+
|
56
64
|
def visit_node_parameters(self, node_parameters: NodeParameters):
|
57
65
|
self.visit_children(node_parameters)
|
58
66
|
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import logging
|
1
2
|
from copy import deepcopy
|
2
3
|
|
3
4
|
from localstack.aws.api import RequestContext, handler
|
@@ -5,12 +6,18 @@ from localstack.aws.api.cloudformation import (
|
|
5
6
|
ChangeSetNameOrId,
|
6
7
|
ChangeSetNotFoundException,
|
7
8
|
ChangeSetType,
|
9
|
+
ClientRequestToken,
|
8
10
|
CreateChangeSetInput,
|
9
11
|
CreateChangeSetOutput,
|
10
12
|
DescribeChangeSetOutput,
|
13
|
+
DisableRollback,
|
14
|
+
ExecuteChangeSetOutput,
|
15
|
+
ExecutionStatus,
|
11
16
|
IncludePropertyValues,
|
17
|
+
InvalidChangeSetStatusException,
|
12
18
|
NextToken,
|
13
19
|
Parameter,
|
20
|
+
RetainExceptOnCreate,
|
14
21
|
StackNameOrId,
|
15
22
|
StackStatus,
|
16
23
|
)
|
@@ -27,6 +34,9 @@ from localstack.services.cloudformation.engine.template_utils import resolve_sta
|
|
27
34
|
from localstack.services.cloudformation.engine.v2.change_set_model_describer import (
|
28
35
|
ChangeSetModelDescriber,
|
29
36
|
)
|
37
|
+
from localstack.services.cloudformation.engine.v2.change_set_model_executor import (
|
38
|
+
ChangeSetModelExecutor,
|
39
|
+
)
|
30
40
|
from localstack.services.cloudformation.engine.validations import ValidationError
|
31
41
|
from localstack.services.cloudformation.provider import (
|
32
42
|
ARN_CHANGESET_REGEX,
|
@@ -41,6 +51,8 @@ from localstack.services.cloudformation.stores import (
|
|
41
51
|
)
|
42
52
|
from localstack.utils.collections import remove_attributes
|
43
53
|
|
54
|
+
LOG = logging.getLogger(__name__)
|
55
|
+
|
44
56
|
|
45
57
|
class CloudformationProviderV2(CloudformationProvider):
|
46
58
|
@handler("CreateChangeSet", expand=False)
|
@@ -178,7 +190,12 @@ class CloudformationProviderV2(CloudformationProvider):
|
|
178
190
|
|
179
191
|
# create change set for the stack and apply changes
|
180
192
|
change_set = StackChangeSet(
|
181
|
-
context.account_id,
|
193
|
+
context.account_id,
|
194
|
+
context.region,
|
195
|
+
stack,
|
196
|
+
req_params,
|
197
|
+
transformed_template,
|
198
|
+
change_set_type=change_set_type,
|
182
199
|
)
|
183
200
|
# only set parameters for the changeset, then switch to stack on execute_change_set
|
184
201
|
change_set.template_body = template_body
|
@@ -233,14 +250,61 @@ class CloudformationProviderV2(CloudformationProvider):
|
|
233
250
|
|
234
251
|
return CreateChangeSetOutput(StackId=change_set.stack_id, Id=change_set.change_set_id)
|
235
252
|
|
253
|
+
@handler("ExecuteChangeSet")
|
254
|
+
def execute_change_set(
|
255
|
+
self,
|
256
|
+
context: RequestContext,
|
257
|
+
change_set_name: ChangeSetNameOrId,
|
258
|
+
stack_name: StackNameOrId | None = None,
|
259
|
+
client_request_token: ClientRequestToken | None = None,
|
260
|
+
disable_rollback: DisableRollback | None = None,
|
261
|
+
retain_except_on_create: RetainExceptOnCreate | None = None,
|
262
|
+
**kwargs,
|
263
|
+
) -> ExecuteChangeSetOutput:
|
264
|
+
change_set = find_change_set(
|
265
|
+
context.account_id,
|
266
|
+
context.region,
|
267
|
+
change_set_name,
|
268
|
+
stack_name=stack_name,
|
269
|
+
active_only=True,
|
270
|
+
)
|
271
|
+
if not change_set:
|
272
|
+
raise ChangeSetNotFoundException(f"ChangeSet [{change_set_name}] does not exist")
|
273
|
+
if change_set.metadata.get("ExecutionStatus") != ExecutionStatus.AVAILABLE:
|
274
|
+
LOG.debug("Change set %s not in execution status 'AVAILABLE'", change_set_name)
|
275
|
+
raise InvalidChangeSetStatusException(
|
276
|
+
f"ChangeSet [{change_set.metadata['ChangeSetId']}] cannot be executed in its current status of [{change_set.metadata.get('Status')}]"
|
277
|
+
)
|
278
|
+
stack_name = change_set.stack.stack_name
|
279
|
+
LOG.debug(
|
280
|
+
'Executing change set "%s" for stack "%s" with %s resources ...',
|
281
|
+
change_set_name,
|
282
|
+
stack_name,
|
283
|
+
len(change_set.template_resources),
|
284
|
+
)
|
285
|
+
if not change_set.update_graph:
|
286
|
+
raise RuntimeError("Programming error: no update graph found for change set")
|
287
|
+
|
288
|
+
change_set_executor = ChangeSetModelExecutor(
|
289
|
+
change_set.update_graph,
|
290
|
+
account_id=context.account_id,
|
291
|
+
region=context.region,
|
292
|
+
stack_name=change_set.stack.stack_name,
|
293
|
+
stack_id=change_set.stack.stack_id,
|
294
|
+
)
|
295
|
+
new_resources = change_set_executor.execute()
|
296
|
+
change_set.stack.set_stack_status(f"{change_set.change_set_type or 'UPDATE'}_COMPLETE")
|
297
|
+
change_set.stack.resources = new_resources
|
298
|
+
return ExecuteChangeSetOutput()
|
299
|
+
|
236
300
|
@handler("DescribeChangeSet")
|
237
301
|
def describe_change_set(
|
238
302
|
self,
|
239
303
|
context: RequestContext,
|
240
304
|
change_set_name: ChangeSetNameOrId,
|
241
|
-
stack_name: StackNameOrId = None,
|
242
|
-
next_token: NextToken = None,
|
243
|
-
include_property_values: IncludePropertyValues = None,
|
305
|
+
stack_name: StackNameOrId | None = None,
|
306
|
+
next_token: NextToken | None = None,
|
307
|
+
include_property_values: IncludePropertyValues | None = None,
|
244
308
|
**kwargs,
|
245
309
|
) -> DescribeChangeSetOutput:
|
246
310
|
# TODO add support for include_property_values
|
@@ -261,8 +325,10 @@ class CloudformationProviderV2(CloudformationProvider):
|
|
261
325
|
if not change_set:
|
262
326
|
raise ChangeSetNotFoundException(f"ChangeSet [{change_set_name}] does not exist")
|
263
327
|
|
264
|
-
change_set_describer = ChangeSetModelDescriber(
|
265
|
-
|
328
|
+
change_set_describer = ChangeSetModelDescriber(
|
329
|
+
node_template=change_set.update_graph, include_property_values=include_property_values
|
330
|
+
)
|
331
|
+
resource_changes = change_set_describer.get_changes()
|
266
332
|
|
267
333
|
attrs = [
|
268
334
|
"ChangeSetType",
|
@@ -78,15 +78,22 @@ def apply_patches():
|
|
78
78
|
tags: Optional[dict[str, str]] = None,
|
79
79
|
**kwargs,
|
80
80
|
):
|
81
|
+
# Patch this method so that we can create a subnet with a specific "custom"
|
82
|
+
# ID. The custom ID that we will use is contained within a special tag.
|
81
83
|
vpc_id: str = args[0] if len(args) >= 1 else kwargs["vpc_id"]
|
82
84
|
cidr_block: str = args[1] if len(args) >= 1 else kwargs["cidr_block"]
|
83
85
|
resource_identifier = SubnetIdentifier(
|
84
86
|
self.account_id, self.region_name, vpc_id, cidr_block
|
85
87
|
)
|
86
|
-
|
88
|
+
|
89
|
+
# tags has the format: {"subnet": {"Key": ..., "Value": ...}}, but we need
|
90
|
+
# to pass this to the generate method as {"Key": ..., "Value": ...}. Take
|
91
|
+
# care not to alter the original tags dict otherwise moto will not be able
|
92
|
+
# to understand it.
|
93
|
+
subnet_tags = None
|
87
94
|
if tags is not None:
|
88
|
-
|
89
|
-
custom_id = resource_identifier.generate(tags=
|
95
|
+
subnet_tags = tags.get("subnet", tags)
|
96
|
+
custom_id = resource_identifier.generate(tags=subnet_tags)
|
90
97
|
|
91
98
|
if custom_id:
|
92
99
|
# Check if custom id is unique within a given VPC
|
@@ -102,9 +109,16 @@ def apply_patches():
|
|
102
109
|
if custom_id:
|
103
110
|
# Remove the subnet from the default dict and add it back with the custom id
|
104
111
|
self.subnets[availability_zone].pop(result.id)
|
112
|
+
old_id = result.id
|
105
113
|
result.id = custom_id
|
106
114
|
self.subnets[availability_zone][custom_id] = result
|
107
115
|
|
116
|
+
# Tags are not stored in the Subnet object, but instead stored in a separate
|
117
|
+
# dict in the EC2 backend, keyed by subnet id. That therefore requires
|
118
|
+
# updating as well.
|
119
|
+
if old_id in self.tags:
|
120
|
+
self.tags[custom_id] = self.tags.pop(old_id)
|
121
|
+
|
108
122
|
# Return the subnet with the patched custom id
|
109
123
|
return result
|
110
124
|
|
@@ -132,9 +146,16 @@ def apply_patches():
|
|
132
146
|
if custom_id:
|
133
147
|
# Remove the security group from the default dict and add it back with the custom id
|
134
148
|
self.groups[result.vpc_id].pop(result.group_id)
|
149
|
+
old_id = result.group_id
|
135
150
|
result.group_id = result.id = custom_id
|
136
151
|
self.groups[result.vpc_id][custom_id] = result
|
137
152
|
|
153
|
+
# Tags are not stored in the Security Group object, but instead are stored in a
|
154
|
+
# separate dict in the EC2 backend, keyed by id. That therefore requires
|
155
|
+
# updating as well.
|
156
|
+
if old_id in self.tags:
|
157
|
+
self.tags[custom_id] = self.tags.pop(old_id)
|
158
|
+
|
138
159
|
return result
|
139
160
|
|
140
161
|
@patch(ec2_models.vpcs.VPCBackend.create_vpc)
|
@@ -175,9 +196,16 @@ def apply_patches():
|
|
175
196
|
|
176
197
|
# Remove the VPC from the default dict and add it back with the custom id
|
177
198
|
self.vpcs.pop(vpc_id)
|
199
|
+
old_id = result.id
|
178
200
|
result.id = custom_id
|
179
201
|
self.vpcs[custom_id] = result
|
180
202
|
|
203
|
+
# Tags are not stored in the VPC object, but instead stored in a separate
|
204
|
+
# dict in the EC2 backend, keyed by VPC id. That therefore requires
|
205
|
+
# updating as well.
|
206
|
+
if old_id in self.tags:
|
207
|
+
self.tags[custom_id] = self.tags.pop(old_id)
|
208
|
+
|
181
209
|
# Create default network ACL, route table, and security group for custom ID VPC
|
182
210
|
self.create_route_table(
|
183
211
|
vpc_id=custom_id,
|
@@ -44,6 +44,7 @@ from localstack.aws.api.events import (
|
|
44
44
|
DescribeReplayResponse,
|
45
45
|
DescribeRuleResponse,
|
46
46
|
EndpointId,
|
47
|
+
EventBusArn,
|
47
48
|
EventBusDescription,
|
48
49
|
EventBusList,
|
49
50
|
EventBusName,
|
@@ -921,12 +922,14 @@ class EventsProvider(EventsApi, ServiceLifecycleHook):
|
|
921
922
|
self,
|
922
923
|
context: RequestContext,
|
923
924
|
archive_name: ArchiveName,
|
924
|
-
event_source_arn:
|
925
|
+
event_source_arn: EventBusArn,
|
925
926
|
description: ArchiveDescription = None,
|
926
927
|
event_pattern: EventPattern = None,
|
927
928
|
retention_days: RetentionDays = None,
|
929
|
+
kms_key_identifier: KmsKeyIdentifier = None,
|
928
930
|
**kwargs,
|
929
931
|
) -> CreateArchiveResponse:
|
932
|
+
# TODO add support for kms_key_identifier
|
930
933
|
region = context.region
|
931
934
|
account_id = context.account_id
|
932
935
|
store = self.get_store(region, account_id)
|
@@ -1022,8 +1025,10 @@ class EventsProvider(EventsApi, ServiceLifecycleHook):
|
|
1022
1025
|
description: ArchiveDescription = None,
|
1023
1026
|
event_pattern: EventPattern = None,
|
1024
1027
|
retention_days: RetentionDays = None,
|
1028
|
+
kms_key_identifier: KmsKeyIdentifier = None,
|
1025
1029
|
**kwargs,
|
1026
1030
|
) -> UpdateArchiveResponse:
|
1031
|
+
# TODO add support for kms_key_identifier
|
1027
1032
|
region = context.region
|
1028
1033
|
account_id = context.account_id
|
1029
1034
|
store = self.get_store(region, account_id)
|
@@ -476,7 +476,7 @@ class KmsKey:
|
|
476
476
|
if "PKCS" in signing_algorithm:
|
477
477
|
return padding.PKCS1v15()
|
478
478
|
elif "PSS" in signing_algorithm:
|
479
|
-
return padding.PSS(mgf=padding.MGF1(hasher), salt_length=padding.PSS.
|
479
|
+
return padding.PSS(mgf=padding.MGF1(hasher), salt_length=padding.PSS.DIGEST_LENGTH)
|
480
480
|
else:
|
481
481
|
LOG.warning("Unsupported padding in SigningAlgorithm '%s'", signing_algorithm)
|
482
482
|
|
@@ -61,6 +61,8 @@ class DynamoDBPoller(StreamPoller):
|
|
61
61
|
**kwargs,
|
62
62
|
)
|
63
63
|
shards[shard_id] = get_shard_iterator_response["ShardIterator"]
|
64
|
+
|
65
|
+
LOG.debug("Event source %s has %d shards.", self.source_arn, len(self.shards))
|
64
66
|
return shards
|
65
67
|
|
66
68
|
def stream_arn_param(self) -> dict:
|
@@ -84,6 +84,8 @@ class KinesisPoller(StreamPoller):
|
|
84
84
|
**kwargs,
|
85
85
|
)
|
86
86
|
shards[shard_id] = get_shard_iterator_response["ShardIterator"]
|
87
|
+
|
88
|
+
LOG.debug("Event source %s has %d shards.", self.source_arn, len(self.shards))
|
87
89
|
return shards
|
88
90
|
|
89
91
|
def stream_arn_param(self) -> dict:
|
@@ -154,7 +154,6 @@ class StreamPoller(Poller):
|
|
154
154
|
LOG.debug("No shards found for %s.", self.source_arn)
|
155
155
|
raise EmptyPollResultsException(service=self.event_source(), source_arn=self.source_arn)
|
156
156
|
else:
|
157
|
-
LOG.debug("Event source %s has %d shards.", self.source_arn, len(self.shards))
|
158
157
|
# Remove all shard batchers without corresponding shards
|
159
158
|
for shard_id in self.shard_batcher.keys() - self.shards.keys():
|
160
159
|
self.shard_batcher.pop(shard_id, None)
|
@@ -185,7 +184,10 @@ class StreamPoller(Poller):
|
|
185
184
|
def poll_events_from_shard(self, shard_id: str, shard_iterator: str):
|
186
185
|
get_records_response = self.get_records(shard_iterator)
|
187
186
|
records: list[dict] = get_records_response.get("Records", [])
|
188
|
-
next_shard_iterator
|
187
|
+
if not (next_shard_iterator := get_records_response.get("NextShardIterator")):
|
188
|
+
# If the next shard iterator is None, we can assume the shard is closed or
|
189
|
+
# has expired on the DynamoDB Local server, hence we should re-initialize.
|
190
|
+
self.shards = self.initialize_shards()
|
189
191
|
|
190
192
|
# We cannot reliably back-off when no records found since an iterator
|
191
193
|
# may have to move multiple times until records are returned.
|
@@ -86,7 +86,9 @@ class AssignmentService(OtherServiceEndpoint):
|
|
86
86
|
except InvalidStatusException as invalid_e:
|
87
87
|
LOG.error("InvalidStatusException: %s", invalid_e)
|
88
88
|
except Exception as e:
|
89
|
-
LOG.error(
|
89
|
+
LOG.error(
|
90
|
+
"Failed invocation <%s>: %s", type(e), e, exc_info=LOG.isEnabledFor(logging.DEBUG)
|
91
|
+
)
|
90
92
|
self.stop_environment(execution_environment)
|
91
93
|
raise e
|
92
94
|
|
@@ -107,7 +109,7 @@ class AssignmentService(OtherServiceEndpoint):
|
|
107
109
|
except EnvironmentStartupTimeoutException:
|
108
110
|
raise
|
109
111
|
except Exception as e:
|
110
|
-
message = f"Could not start new environment: {e}"
|
112
|
+
message = f"Could not start new environment: {type(e).__name__}:{e}"
|
111
113
|
raise AssignmentException(message) from e
|
112
114
|
return execution_environment
|
113
115
|
|
@@ -37,10 +37,11 @@ class RuntimeStatus(Enum):
|
|
37
37
|
INACTIVE = auto()
|
38
38
|
STARTING = auto()
|
39
39
|
READY = auto()
|
40
|
-
|
40
|
+
INVOKING = auto()
|
41
41
|
STARTUP_FAILED = auto()
|
42
42
|
STARTUP_TIMED_OUT = auto()
|
43
43
|
STOPPED = auto()
|
44
|
+
TIMING_OUT = auto()
|
44
45
|
|
45
46
|
|
46
47
|
class InvalidStatusException(Exception):
|
@@ -246,7 +247,7 @@ class ExecutionEnvironment:
|
|
246
247
|
def release(self) -> None:
|
247
248
|
self.last_returned = datetime.now()
|
248
249
|
with self.status_lock:
|
249
|
-
if self.status != RuntimeStatus.
|
250
|
+
if self.status != RuntimeStatus.INVOKING:
|
250
251
|
raise InvalidStatusException(
|
251
252
|
f"Execution environment {self.id} can only be set to status ready while running."
|
252
253
|
f" Current status: {self.status}"
|
@@ -264,7 +265,7 @@ class ExecutionEnvironment:
|
|
264
265
|
f"Execution environment {self.id} can only be reserved if ready. "
|
265
266
|
f" Current status: {self.status}"
|
266
267
|
)
|
267
|
-
self.status = RuntimeStatus.
|
268
|
+
self.status = RuntimeStatus.INVOKING
|
268
269
|
|
269
270
|
self.keepalive_timer.cancel()
|
270
271
|
|
@@ -274,6 +275,17 @@ class ExecutionEnvironment:
|
|
274
275
|
self.id,
|
275
276
|
self.function_version.qualified_arn,
|
276
277
|
)
|
278
|
+
# The stop() method allows to interrupt invocations (on purpose), which might cancel running invocations
|
279
|
+
# which we should not do when the keepalive timer passed.
|
280
|
+
# The new TIMING_OUT state prevents this race condition
|
281
|
+
with self.status_lock:
|
282
|
+
if self.status != RuntimeStatus.READY:
|
283
|
+
LOG.debug(
|
284
|
+
"Keepalive timer passed, but current runtime status is %s. Aborting keepalive stop.",
|
285
|
+
self.status,
|
286
|
+
)
|
287
|
+
return
|
288
|
+
self.status = RuntimeStatus.TIMING_OUT
|
277
289
|
self.stop()
|
278
290
|
# Notify assignment service via callback to remove from environments list
|
279
291
|
self.on_timeout(self.version_manager_id, self.id)
|
@@ -340,7 +352,7 @@ class ExecutionEnvironment:
|
|
340
352
|
return f"{prefix}{prefixed_logs}"
|
341
353
|
|
342
354
|
def invoke(self, invocation: Invocation) -> InvocationResult:
|
343
|
-
assert self.status == RuntimeStatus.
|
355
|
+
assert self.status == RuntimeStatus.INVOKING
|
344
356
|
# Async/event invokes might miss an aws_trace_header, then we need to create a new root trace id.
|
345
357
|
aws_trace_header = (
|
346
358
|
invocation.trace_context.get("aws_trace_header") or TraceHeader().ensure_root_exists()
|
@@ -1,13 +1,13 @@
|
|
1
1
|
import dataclasses
|
2
2
|
import logging
|
3
3
|
import threading
|
4
|
+
import time
|
4
5
|
from queue import Queue
|
5
6
|
from typing import Optional, Union
|
6
7
|
|
7
8
|
from localstack.aws.connect import connect_to
|
8
9
|
from localstack.utils.aws.client_types import ServicePrincipal
|
9
10
|
from localstack.utils.bootstrap import is_api_enabled
|
10
|
-
from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs
|
11
11
|
from localstack.utils.threads import FuncThread
|
12
12
|
|
13
13
|
LOG = logging.getLogger(__name__)
|
@@ -50,10 +50,34 @@ class LogHandler:
|
|
50
50
|
log_item = self.log_queue.get()
|
51
51
|
if log_item is QUEUE_SHUTDOWN:
|
52
52
|
return
|
53
|
+
# we need to split by newline - but keep the newlines in the strings
|
54
|
+
# strips empty lines, as they are not accepted by cloudwatch
|
55
|
+
logs = [line + "\n" for line in log_item.logs.split("\n") if line]
|
56
|
+
# until we have a better way to have timestamps, log events have the same time for a single invocation
|
57
|
+
log_events = [
|
58
|
+
{"timestamp": int(time.time() * 1000), "message": log_line} for log_line in logs
|
59
|
+
]
|
53
60
|
try:
|
54
|
-
|
55
|
-
logs_client
|
56
|
-
|
61
|
+
try:
|
62
|
+
logs_client.put_log_events(
|
63
|
+
logGroupName=log_item.log_group,
|
64
|
+
logStreamName=log_item.log_stream,
|
65
|
+
logEvents=log_events,
|
66
|
+
)
|
67
|
+
except logs_client.exceptions.ResourceNotFoundException:
|
68
|
+
# create new log group
|
69
|
+
try:
|
70
|
+
logs_client.create_log_group(logGroupName=log_item.log_group)
|
71
|
+
except logs_client.exceptions.ResourceAlreadyExistsException:
|
72
|
+
pass
|
73
|
+
logs_client.create_log_stream(
|
74
|
+
logGroupName=log_item.log_group, logStreamName=log_item.log_stream
|
75
|
+
)
|
76
|
+
logs_client.put_log_events(
|
77
|
+
logGroupName=log_item.log_group,
|
78
|
+
logStreamName=log_item.log_stream,
|
79
|
+
logEvents=log_events,
|
80
|
+
)
|
57
81
|
except Exception as e:
|
58
82
|
LOG.warning(
|
59
83
|
"Error saving logs to group %s in region %s: %s",
|
@@ -223,6 +223,7 @@ from localstack.services.lambda_.runtimes import (
|
|
223
223
|
DEPRECATED_RUNTIMES,
|
224
224
|
DEPRECATED_RUNTIMES_UPGRADES,
|
225
225
|
RUNTIMES_AGGREGATED,
|
226
|
+
SNAP_START_SUPPORTED_RUNTIMES,
|
226
227
|
VALID_RUNTIMES,
|
227
228
|
)
|
228
229
|
from localstack.services.lambda_.urlrouter import FunctionUrlRouter
|
@@ -718,6 +719,11 @@ class LambdaProvider(LambdaApi, ServiceLifecycleHook):
|
|
718
719
|
f"1 validation error detected: Value '{apply_on}' at 'snapStart.applyOn' failed to satisfy constraint: Member must satisfy enum value set: [PublishedVersions, None]"
|
719
720
|
)
|
720
721
|
|
722
|
+
if runtime not in SNAP_START_SUPPORTED_RUNTIMES:
|
723
|
+
raise InvalidParameterValueException(
|
724
|
+
f"{runtime} is not supported for SnapStart enabled functions.", Type="User"
|
725
|
+
)
|
726
|
+
|
721
727
|
def _validate_layers(self, new_layers: list[str], region: str, account_id: str):
|
722
728
|
if len(new_layers) > LAMBDA_LAYERS_LIMIT_PER_FUNCTION:
|
723
729
|
raise InvalidParameterValueException(
|
@@ -1597,10 +1603,19 @@ class LambdaProvider(LambdaApi, ServiceLifecycleHook):
|
|
1597
1603
|
except ServiceException:
|
1598
1604
|
raise
|
1599
1605
|
except EnvironmentStartupTimeoutException as e:
|
1600
|
-
raise LambdaServiceException(
|
1606
|
+
raise LambdaServiceException(
|
1607
|
+
f"[{context.request_id}] Timeout while starting up lambda environment for function {function_name}:{qualifier}"
|
1608
|
+
) from e
|
1601
1609
|
except Exception as e:
|
1602
|
-
LOG.error(
|
1603
|
-
|
1610
|
+
LOG.error(
|
1611
|
+
"[%s] Error while invoking lambda %s",
|
1612
|
+
context.request_id,
|
1613
|
+
function_name,
|
1614
|
+
exc_info=LOG.isEnabledFor(logging.DEBUG),
|
1615
|
+
)
|
1616
|
+
raise LambdaServiceException(
|
1617
|
+
f"[{context.request_id}] Internal error while executing lambda {function_name}:{qualifier}. Caused by {type(e).__name__}: {e}"
|
1618
|
+
) from e
|
1604
1619
|
|
1605
1620
|
if invocation_type == InvocationType.Event:
|
1606
1621
|
# This happens when invocation type is event
|