localstack-core 4.3.1.dev5__py3-none-any.whl → 4.3.1.dev27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. localstack/services/cloudformation/engine/entities.py +18 -1
  2. localstack/services/cloudformation/engine/template_deployer.py +0 -9
  3. localstack/services/cloudformation/engine/v2/change_set_model.py +281 -36
  4. localstack/services/cloudformation/engine/v2/change_set_model_describer.py +187 -70
  5. localstack/services/cloudformation/engine/v2/change_set_model_executor.py +170 -0
  6. localstack/services/cloudformation/engine/v2/change_set_model_visitor.py +21 -0
  7. localstack/services/cloudformation/v2/provider.py +72 -6
  8. localstack/services/ec2/patches.py +31 -3
  9. localstack/services/kms/models.py +1 -1
  10. localstack/services/lambda_/event_source_mapping/pollers/dynamodb_poller.py +2 -0
  11. localstack/services/lambda_/event_source_mapping/pollers/kinesis_poller.py +2 -0
  12. localstack/services/lambda_/event_source_mapping/pollers/stream_poller.py +4 -2
  13. localstack/services/lambda_/invocation/assignment.py +4 -2
  14. localstack/services/lambda_/invocation/execution_environment.py +16 -4
  15. localstack/services/lambda_/invocation/logs.py +28 -4
  16. localstack/services/lambda_/provider.py +18 -3
  17. localstack/services/lambda_/runtimes.py +15 -2
  18. localstack/services/s3/presigned_url.py +15 -11
  19. localstack/services/secretsmanager/provider.py +13 -4
  20. localstack/services/sqs/models.py +22 -3
  21. localstack/services/sqs/utils.py +16 -7
  22. localstack/services/ssm/resource_providers/aws_ssm_parameter.py +1 -5
  23. localstack/services/stepfunctions/asl/utils/json_path.py +9 -0
  24. localstack/testing/snapshots/transformer_utility.py +13 -0
  25. localstack/utils/aws/client_types.py +8 -0
  26. localstack/utils/docker_utils.py +2 -2
  27. localstack/version.py +2 -2
  28. {localstack_core-4.3.1.dev5.dist-info → localstack_core-4.3.1.dev27.dist-info}/METADATA +3 -3
  29. {localstack_core-4.3.1.dev5.dist-info → localstack_core-4.3.1.dev27.dist-info}/RECORD +37 -36
  30. localstack_core-4.3.1.dev27.dist-info/plux.json +1 -0
  31. localstack_core-4.3.1.dev5.dist-info/plux.json +0 -1
  32. {localstack_core-4.3.1.dev5.data → localstack_core-4.3.1.dev27.data}/scripts/localstack +0 -0
  33. {localstack_core-4.3.1.dev5.data → localstack_core-4.3.1.dev27.data}/scripts/localstack-supervisor +0 -0
  34. {localstack_core-4.3.1.dev5.data → localstack_core-4.3.1.dev27.data}/scripts/localstack.bat +0 -0
  35. {localstack_core-4.3.1.dev5.dist-info → localstack_core-4.3.1.dev27.dist-info}/WHEEL +0 -0
  36. {localstack_core-4.3.1.dev5.dist-info → localstack_core-4.3.1.dev27.dist-info}/entry_points.txt +0 -0
  37. {localstack_core-4.3.1.dev5.dist-info → localstack_core-4.3.1.dev27.dist-info}/licenses/LICENSE.txt +0 -0
  38. {localstack_core-4.3.1.dev5.dist-info → localstack_core-4.3.1.dev27.dist-info}/top_level.txt +0 -0
@@ -1,3 +1,4 @@
1
+ import logging
1
2
  from copy import deepcopy
2
3
 
3
4
  from localstack.aws.api import RequestContext, handler
@@ -5,12 +6,18 @@ from localstack.aws.api.cloudformation import (
5
6
  ChangeSetNameOrId,
6
7
  ChangeSetNotFoundException,
7
8
  ChangeSetType,
9
+ ClientRequestToken,
8
10
  CreateChangeSetInput,
9
11
  CreateChangeSetOutput,
10
12
  DescribeChangeSetOutput,
13
+ DisableRollback,
14
+ ExecuteChangeSetOutput,
15
+ ExecutionStatus,
11
16
  IncludePropertyValues,
17
+ InvalidChangeSetStatusException,
12
18
  NextToken,
13
19
  Parameter,
20
+ RetainExceptOnCreate,
14
21
  StackNameOrId,
15
22
  StackStatus,
16
23
  )
@@ -27,6 +34,9 @@ from localstack.services.cloudformation.engine.template_utils import resolve_sta
27
34
  from localstack.services.cloudformation.engine.v2.change_set_model_describer import (
28
35
  ChangeSetModelDescriber,
29
36
  )
37
+ from localstack.services.cloudformation.engine.v2.change_set_model_executor import (
38
+ ChangeSetModelExecutor,
39
+ )
30
40
  from localstack.services.cloudformation.engine.validations import ValidationError
31
41
  from localstack.services.cloudformation.provider import (
32
42
  ARN_CHANGESET_REGEX,
@@ -41,6 +51,8 @@ from localstack.services.cloudformation.stores import (
41
51
  )
42
52
  from localstack.utils.collections import remove_attributes
43
53
 
54
+ LOG = logging.getLogger(__name__)
55
+
44
56
 
45
57
  class CloudformationProviderV2(CloudformationProvider):
46
58
  @handler("CreateChangeSet", expand=False)
@@ -178,7 +190,12 @@ class CloudformationProviderV2(CloudformationProvider):
178
190
 
179
191
  # create change set for the stack and apply changes
180
192
  change_set = StackChangeSet(
181
- context.account_id, context.region, stack, req_params, transformed_template
193
+ context.account_id,
194
+ context.region,
195
+ stack,
196
+ req_params,
197
+ transformed_template,
198
+ change_set_type=change_set_type,
182
199
  )
183
200
  # only set parameters for the changeset, then switch to stack on execute_change_set
184
201
  change_set.template_body = template_body
@@ -233,14 +250,61 @@ class CloudformationProviderV2(CloudformationProvider):
233
250
 
234
251
  return CreateChangeSetOutput(StackId=change_set.stack_id, Id=change_set.change_set_id)
235
252
 
253
+ @handler("ExecuteChangeSet")
254
+ def execute_change_set(
255
+ self,
256
+ context: RequestContext,
257
+ change_set_name: ChangeSetNameOrId,
258
+ stack_name: StackNameOrId | None = None,
259
+ client_request_token: ClientRequestToken | None = None,
260
+ disable_rollback: DisableRollback | None = None,
261
+ retain_except_on_create: RetainExceptOnCreate | None = None,
262
+ **kwargs,
263
+ ) -> ExecuteChangeSetOutput:
264
+ change_set = find_change_set(
265
+ context.account_id,
266
+ context.region,
267
+ change_set_name,
268
+ stack_name=stack_name,
269
+ active_only=True,
270
+ )
271
+ if not change_set:
272
+ raise ChangeSetNotFoundException(f"ChangeSet [{change_set_name}] does not exist")
273
+ if change_set.metadata.get("ExecutionStatus") != ExecutionStatus.AVAILABLE:
274
+ LOG.debug("Change set %s not in execution status 'AVAILABLE'", change_set_name)
275
+ raise InvalidChangeSetStatusException(
276
+ f"ChangeSet [{change_set.metadata['ChangeSetId']}] cannot be executed in its current status of [{change_set.metadata.get('Status')}]"
277
+ )
278
+ stack_name = change_set.stack.stack_name
279
+ LOG.debug(
280
+ 'Executing change set "%s" for stack "%s" with %s resources ...',
281
+ change_set_name,
282
+ stack_name,
283
+ len(change_set.template_resources),
284
+ )
285
+ if not change_set.update_graph:
286
+ raise RuntimeError("Programming error: no update graph found for change set")
287
+
288
+ change_set_executor = ChangeSetModelExecutor(
289
+ change_set.update_graph,
290
+ account_id=context.account_id,
291
+ region=context.region,
292
+ stack_name=change_set.stack.stack_name,
293
+ stack_id=change_set.stack.stack_id,
294
+ )
295
+ new_resources = change_set_executor.execute()
296
+ change_set.stack.set_stack_status(f"{change_set.change_set_type or 'UPDATE'}_COMPLETE")
297
+ change_set.stack.resources = new_resources
298
+ return ExecuteChangeSetOutput()
299
+
236
300
  @handler("DescribeChangeSet")
237
301
  def describe_change_set(
238
302
  self,
239
303
  context: RequestContext,
240
304
  change_set_name: ChangeSetNameOrId,
241
- stack_name: StackNameOrId = None,
242
- next_token: NextToken = None,
243
- include_property_values: IncludePropertyValues = None,
305
+ stack_name: StackNameOrId | None = None,
306
+ next_token: NextToken | None = None,
307
+ include_property_values: IncludePropertyValues | None = None,
244
308
  **kwargs,
245
309
  ) -> DescribeChangeSetOutput:
246
310
  # TODO add support for include_property_values
@@ -261,8 +325,10 @@ class CloudformationProviderV2(CloudformationProvider):
261
325
  if not change_set:
262
326
  raise ChangeSetNotFoundException(f"ChangeSet [{change_set_name}] does not exist")
263
327
 
264
- change_set_describer = ChangeSetModelDescriber(node_template=change_set.update_graph)
265
- resource_changes = change_set_describer.get_resource_changes()
328
+ change_set_describer = ChangeSetModelDescriber(
329
+ node_template=change_set.update_graph, include_property_values=include_property_values
330
+ )
331
+ resource_changes = change_set_describer.get_changes()
266
332
 
267
333
  attrs = [
268
334
  "ChangeSetType",
@@ -78,15 +78,22 @@ def apply_patches():
78
78
  tags: Optional[dict[str, str]] = None,
79
79
  **kwargs,
80
80
  ):
81
+ # Patch this method so that we can create a subnet with a specific "custom"
82
+ # ID. The custom ID that we will use is contained within a special tag.
81
83
  vpc_id: str = args[0] if len(args) >= 1 else kwargs["vpc_id"]
82
84
  cidr_block: str = args[1] if len(args) >= 1 else kwargs["cidr_block"]
83
85
  resource_identifier = SubnetIdentifier(
84
86
  self.account_id, self.region_name, vpc_id, cidr_block
85
87
  )
86
- # tags has the format: {"subnet": {"Key": ..., "Value": ...}}
88
+
89
+ # tags has the format: {"subnet": {"Key": ..., "Value": ...}}, but we need
90
+ # to pass this to the generate method as {"Key": ..., "Value": ...}. Take
91
+ # care not to alter the original tags dict otherwise moto will not be able
92
+ # to understand it.
93
+ subnet_tags = None
87
94
  if tags is not None:
88
- tags = tags.get("subnet", tags)
89
- custom_id = resource_identifier.generate(tags=tags)
95
+ subnet_tags = tags.get("subnet", tags)
96
+ custom_id = resource_identifier.generate(tags=subnet_tags)
90
97
 
91
98
  if custom_id:
92
99
  # Check if custom id is unique within a given VPC
@@ -102,9 +109,16 @@ def apply_patches():
102
109
  if custom_id:
103
110
  # Remove the subnet from the default dict and add it back with the custom id
104
111
  self.subnets[availability_zone].pop(result.id)
112
+ old_id = result.id
105
113
  result.id = custom_id
106
114
  self.subnets[availability_zone][custom_id] = result
107
115
 
116
+ # Tags are not stored in the Subnet object, but instead stored in a separate
117
+ # dict in the EC2 backend, keyed by subnet id. That therefore requires
118
+ # updating as well.
119
+ if old_id in self.tags:
120
+ self.tags[custom_id] = self.tags.pop(old_id)
121
+
108
122
  # Return the subnet with the patched custom id
109
123
  return result
110
124
 
@@ -132,9 +146,16 @@ def apply_patches():
132
146
  if custom_id:
133
147
  # Remove the security group from the default dict and add it back with the custom id
134
148
  self.groups[result.vpc_id].pop(result.group_id)
149
+ old_id = result.group_id
135
150
  result.group_id = result.id = custom_id
136
151
  self.groups[result.vpc_id][custom_id] = result
137
152
 
153
+ # Tags are not stored in the Security Group object, but instead are stored in a
154
+ # separate dict in the EC2 backend, keyed by id. That therefore requires
155
+ # updating as well.
156
+ if old_id in self.tags:
157
+ self.tags[custom_id] = self.tags.pop(old_id)
158
+
138
159
  return result
139
160
 
140
161
  @patch(ec2_models.vpcs.VPCBackend.create_vpc)
@@ -175,9 +196,16 @@ def apply_patches():
175
196
 
176
197
  # Remove the VPC from the default dict and add it back with the custom id
177
198
  self.vpcs.pop(vpc_id)
199
+ old_id = result.id
178
200
  result.id = custom_id
179
201
  self.vpcs[custom_id] = result
180
202
 
203
+ # Tags are not stored in the VPC object, but instead stored in a separate
204
+ # dict in the EC2 backend, keyed by VPC id. That therefore requires
205
+ # updating as well.
206
+ if old_id in self.tags:
207
+ self.tags[custom_id] = self.tags.pop(old_id)
208
+
181
209
  # Create default network ACL, route table, and security group for custom ID VPC
182
210
  self.create_route_table(
183
211
  vpc_id=custom_id,
@@ -476,7 +476,7 @@ class KmsKey:
476
476
  if "PKCS" in signing_algorithm:
477
477
  return padding.PKCS1v15()
478
478
  elif "PSS" in signing_algorithm:
479
- return padding.PSS(mgf=padding.MGF1(hasher), salt_length=padding.PSS.MAX_LENGTH)
479
+ return padding.PSS(mgf=padding.MGF1(hasher), salt_length=padding.PSS.DIGEST_LENGTH)
480
480
  else:
481
481
  LOG.warning("Unsupported padding in SigningAlgorithm '%s'", signing_algorithm)
482
482
 
@@ -61,6 +61,8 @@ class DynamoDBPoller(StreamPoller):
61
61
  **kwargs,
62
62
  )
63
63
  shards[shard_id] = get_shard_iterator_response["ShardIterator"]
64
+
65
+ LOG.debug("Event source %s has %d shards.", self.source_arn, len(self.shards))
64
66
  return shards
65
67
 
66
68
  def stream_arn_param(self) -> dict:
@@ -84,6 +84,8 @@ class KinesisPoller(StreamPoller):
84
84
  **kwargs,
85
85
  )
86
86
  shards[shard_id] = get_shard_iterator_response["ShardIterator"]
87
+
88
+ LOG.debug("Event source %s has %d shards.", self.source_arn, len(self.shards))
87
89
  return shards
88
90
 
89
91
  def stream_arn_param(self) -> dict:
@@ -154,7 +154,6 @@ class StreamPoller(Poller):
154
154
  LOG.debug("No shards found for %s.", self.source_arn)
155
155
  raise EmptyPollResultsException(service=self.event_source(), source_arn=self.source_arn)
156
156
  else:
157
- LOG.debug("Event source %s has %d shards.", self.source_arn, len(self.shards))
158
157
  # Remove all shard batchers without corresponding shards
159
158
  for shard_id in self.shard_batcher.keys() - self.shards.keys():
160
159
  self.shard_batcher.pop(shard_id, None)
@@ -185,7 +184,10 @@ class StreamPoller(Poller):
185
184
  def poll_events_from_shard(self, shard_id: str, shard_iterator: str):
186
185
  get_records_response = self.get_records(shard_iterator)
187
186
  records: list[dict] = get_records_response.get("Records", [])
188
- next_shard_iterator = get_records_response["NextShardIterator"]
187
+ if not (next_shard_iterator := get_records_response.get("NextShardIterator")):
188
+ # If the next shard iterator is None, we can assume the shard is closed or
189
+ # has expired on the DynamoDB Local server, hence we should re-initialize.
190
+ self.shards = self.initialize_shards()
189
191
 
190
192
  # We cannot reliably back-off when no records found since an iterator
191
193
  # may have to move multiple times until records are returned.
@@ -86,7 +86,9 @@ class AssignmentService(OtherServiceEndpoint):
86
86
  except InvalidStatusException as invalid_e:
87
87
  LOG.error("InvalidStatusException: %s", invalid_e)
88
88
  except Exception as e:
89
- LOG.error("Failed invocation %s", e)
89
+ LOG.error(
90
+ "Failed invocation <%s>: %s", type(e), e, exc_info=LOG.isEnabledFor(logging.DEBUG)
91
+ )
90
92
  self.stop_environment(execution_environment)
91
93
  raise e
92
94
 
@@ -107,7 +109,7 @@ class AssignmentService(OtherServiceEndpoint):
107
109
  except EnvironmentStartupTimeoutException:
108
110
  raise
109
111
  except Exception as e:
110
- message = f"Could not start new environment: {e}"
112
+ message = f"Could not start new environment: {type(e).__name__}:{e}"
111
113
  raise AssignmentException(message) from e
112
114
  return execution_environment
113
115
 
@@ -37,10 +37,11 @@ class RuntimeStatus(Enum):
37
37
  INACTIVE = auto()
38
38
  STARTING = auto()
39
39
  READY = auto()
40
- RUNNING = auto()
40
+ INVOKING = auto()
41
41
  STARTUP_FAILED = auto()
42
42
  STARTUP_TIMED_OUT = auto()
43
43
  STOPPED = auto()
44
+ TIMING_OUT = auto()
44
45
 
45
46
 
46
47
  class InvalidStatusException(Exception):
@@ -246,7 +247,7 @@ class ExecutionEnvironment:
246
247
  def release(self) -> None:
247
248
  self.last_returned = datetime.now()
248
249
  with self.status_lock:
249
- if self.status != RuntimeStatus.RUNNING:
250
+ if self.status != RuntimeStatus.INVOKING:
250
251
  raise InvalidStatusException(
251
252
  f"Execution environment {self.id} can only be set to status ready while running."
252
253
  f" Current status: {self.status}"
@@ -264,7 +265,7 @@ class ExecutionEnvironment:
264
265
  f"Execution environment {self.id} can only be reserved if ready. "
265
266
  f" Current status: {self.status}"
266
267
  )
267
- self.status = RuntimeStatus.RUNNING
268
+ self.status = RuntimeStatus.INVOKING
268
269
 
269
270
  self.keepalive_timer.cancel()
270
271
 
@@ -274,6 +275,17 @@ class ExecutionEnvironment:
274
275
  self.id,
275
276
  self.function_version.qualified_arn,
276
277
  )
278
+ # The stop() method allows to interrupt invocations (on purpose), which might cancel running invocations
279
+ # which we should not do when the keepalive timer passed.
280
+ # The new TIMING_OUT state prevents this race condition
281
+ with self.status_lock:
282
+ if self.status != RuntimeStatus.READY:
283
+ LOG.debug(
284
+ "Keepalive timer passed, but current runtime status is %s. Aborting keepalive stop.",
285
+ self.status,
286
+ )
287
+ return
288
+ self.status = RuntimeStatus.TIMING_OUT
277
289
  self.stop()
278
290
  # Notify assignment service via callback to remove from environments list
279
291
  self.on_timeout(self.version_manager_id, self.id)
@@ -340,7 +352,7 @@ class ExecutionEnvironment:
340
352
  return f"{prefix}{prefixed_logs}"
341
353
 
342
354
  def invoke(self, invocation: Invocation) -> InvocationResult:
343
- assert self.status == RuntimeStatus.RUNNING
355
+ assert self.status == RuntimeStatus.INVOKING
344
356
  # Async/event invokes might miss an aws_trace_header, then we need to create a new root trace id.
345
357
  aws_trace_header = (
346
358
  invocation.trace_context.get("aws_trace_header") or TraceHeader().ensure_root_exists()
@@ -1,13 +1,13 @@
1
1
  import dataclasses
2
2
  import logging
3
3
  import threading
4
+ import time
4
5
  from queue import Queue
5
6
  from typing import Optional, Union
6
7
 
7
8
  from localstack.aws.connect import connect_to
8
9
  from localstack.utils.aws.client_types import ServicePrincipal
9
10
  from localstack.utils.bootstrap import is_api_enabled
10
- from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs
11
11
  from localstack.utils.threads import FuncThread
12
12
 
13
13
  LOG = logging.getLogger(__name__)
@@ -50,10 +50,34 @@ class LogHandler:
50
50
  log_item = self.log_queue.get()
51
51
  if log_item is QUEUE_SHUTDOWN:
52
52
  return
53
+ # we need to split by newline - but keep the newlines in the strings
54
+ # strips empty lines, as they are not accepted by cloudwatch
55
+ logs = [line + "\n" for line in log_item.logs.split("\n") if line]
56
+ # until we have a better way to have timestamps, log events have the same time for a single invocation
57
+ log_events = [
58
+ {"timestamp": int(time.time() * 1000), "message": log_line} for log_line in logs
59
+ ]
53
60
  try:
54
- store_cloudwatch_logs(
55
- logs_client, log_item.log_group, log_item.log_stream, log_item.logs
56
- )
61
+ try:
62
+ logs_client.put_log_events(
63
+ logGroupName=log_item.log_group,
64
+ logStreamName=log_item.log_stream,
65
+ logEvents=log_events,
66
+ )
67
+ except logs_client.exceptions.ResourceNotFoundException:
68
+ # create new log group
69
+ try:
70
+ logs_client.create_log_group(logGroupName=log_item.log_group)
71
+ except logs_client.exceptions.ResourceAlreadyExistsException:
72
+ pass
73
+ logs_client.create_log_stream(
74
+ logGroupName=log_item.log_group, logStreamName=log_item.log_stream
75
+ )
76
+ logs_client.put_log_events(
77
+ logGroupName=log_item.log_group,
78
+ logStreamName=log_item.log_stream,
79
+ logEvents=log_events,
80
+ )
57
81
  except Exception as e:
58
82
  LOG.warning(
59
83
  "Error saving logs to group %s in region %s: %s",
@@ -223,6 +223,7 @@ from localstack.services.lambda_.runtimes import (
223
223
  DEPRECATED_RUNTIMES,
224
224
  DEPRECATED_RUNTIMES_UPGRADES,
225
225
  RUNTIMES_AGGREGATED,
226
+ SNAP_START_SUPPORTED_RUNTIMES,
226
227
  VALID_RUNTIMES,
227
228
  )
228
229
  from localstack.services.lambda_.urlrouter import FunctionUrlRouter
@@ -718,6 +719,11 @@ class LambdaProvider(LambdaApi, ServiceLifecycleHook):
718
719
  f"1 validation error detected: Value '{apply_on}' at 'snapStart.applyOn' failed to satisfy constraint: Member must satisfy enum value set: [PublishedVersions, None]"
719
720
  )
720
721
 
722
+ if runtime not in SNAP_START_SUPPORTED_RUNTIMES:
723
+ raise InvalidParameterValueException(
724
+ f"{runtime} is not supported for SnapStart enabled functions.", Type="User"
725
+ )
726
+
721
727
  def _validate_layers(self, new_layers: list[str], region: str, account_id: str):
722
728
  if len(new_layers) > LAMBDA_LAYERS_LIMIT_PER_FUNCTION:
723
729
  raise InvalidParameterValueException(
@@ -1597,10 +1603,19 @@ class LambdaProvider(LambdaApi, ServiceLifecycleHook):
1597
1603
  except ServiceException:
1598
1604
  raise
1599
1605
  except EnvironmentStartupTimeoutException as e:
1600
- raise LambdaServiceException("Internal error while executing lambda") from e
1606
+ raise LambdaServiceException(
1607
+ f"[{context.request_id}] Timeout while starting up lambda environment for function {function_name}:{qualifier}"
1608
+ ) from e
1601
1609
  except Exception as e:
1602
- LOG.error("Error while invoking lambda", exc_info=e)
1603
- raise LambdaServiceException("Internal error while executing lambda") from e
1610
+ LOG.error(
1611
+ "[%s] Error while invoking lambda %s",
1612
+ context.request_id,
1613
+ function_name,
1614
+ exc_info=LOG.isEnabledFor(logging.DEBUG),
1615
+ )
1616
+ raise LambdaServiceException(
1617
+ f"[{context.request_id}] Internal error while executing lambda {function_name}:{qualifier}. Caused by {type(e).__name__}: {e}"
1618
+ ) from e
1604
1619
 
1605
1620
  if invocation_type == InvocationType.Event:
1606
1621
  # This happens when invocation type is event
@@ -59,6 +59,7 @@ IMAGE_MAPPING: dict[Runtime, str] = {
59
59
  Runtime.dotnet6: "dotnet:6",
60
60
  Runtime.dotnetcore3_1: "dotnet:core3.1", # deprecated Apr 3, 2023 => Apr 3, 2023 => May 3, 2023
61
61
  Runtime.go1_x: "go:1", # deprecated Jan 8, 2024 => Feb 8, 2024 => Mar 12, 2024
62
+ Runtime.ruby3_4: "ruby:3.4",
62
63
  Runtime.ruby3_3: "ruby:3.3",
63
64
  Runtime.ruby3_2: "ruby:3.2",
64
65
  Runtime.ruby2_7: "ruby:2.7", # deprecated Dec 7, 2023 => Jan 9, 2024 => Feb 8, 2024
@@ -133,6 +134,7 @@ RUNTIMES_AGGREGATED = {
133
134
  "ruby": [
134
135
  Runtime.ruby3_2,
135
136
  Runtime.ruby3_3,
137
+ Runtime.ruby3_4,
136
138
  ],
137
139
  "dotnet": [
138
140
  Runtime.dotnet6,
@@ -149,7 +151,18 @@ TESTED_RUNTIMES: list[Runtime] = [
149
151
  runtime for runtime_group in RUNTIMES_AGGREGATED.values() for runtime in runtime_group
150
152
  ]
151
153
 
154
+ # An unordered list of snapstart-enabled runtimes. Related to snapshots in test_snapstart_exceptions
155
+ # https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html
156
+ SNAP_START_SUPPORTED_RUNTIMES = [
157
+ Runtime.java11,
158
+ Runtime.java17,
159
+ Runtime.java21,
160
+ Runtime.python3_12,
161
+ Runtime.python3_13,
162
+ Runtime.dotnet8,
163
+ ]
164
+
152
165
  # An ordered list of all Lambda runtimes considered valid by AWS. Matching snapshots in test_create_lambda_exceptions
153
- VALID_RUNTIMES: str = "[nodejs20.x, provided.al2023, python3.12, python3.13, nodejs22.x, java17, nodejs16.x, dotnet8, python3.10, java11, python3.11, dotnet6, java21, nodejs18.x, provided.al2, ruby3.3, java8.al2, ruby3.2, python3.8, python3.9]"
166
+ VALID_RUNTIMES: str = "[nodejs20.x, provided.al2023, python3.12, python3.13, nodejs22.x, java17, nodejs16.x, dotnet8, python3.10, java11, python3.11, dotnet6, java21, nodejs18.x, provided.al2, ruby3.3, ruby3.4, java8.al2, ruby3.2, python3.8, python3.9]"
154
167
  # An ordered list of all Lambda runtimes for layers considered valid by AWS. Matching snapshots in test_layer_exceptions
155
- VALID_LAYER_RUNTIMES: str = "[ruby2.6, dotnetcore1.0, python3.7, nodejs8.10, nasa, ruby2.7, python2.7-greengrass, dotnetcore2.0, python3.8, java21, dotnet6, dotnetcore2.1, python3.9, java11, nodejs6.10, provided, dotnetcore3.1, dotnet8, java17, nodejs, nodejs4.3, java8.al2, go1.x, nodejs20.x, go1.9, byol, nodejs10.x, provided.al2023, nodejs22.x, python3.10, java8, nodejs12.x, python3.11, nodejs8.x, python3.12, nodejs14.x, nodejs8.9, python3.13, nodejs16.x, provided.al2, nodejs4.3-edge, nodejs18.x, ruby3.2, python3.4, ruby3.3, ruby2.5, python3.6, python2.7]"
168
+ VALID_LAYER_RUNTIMES: str = "[ruby2.6, dotnetcore1.0, python3.7, nodejs8.10, nasa, ruby2.7, python2.7-greengrass, dotnetcore2.0, python3.8, java21, dotnet6, dotnetcore2.1, python3.9, java11, nodejs6.10, provided, dotnetcore3.1, dotnet8, java25, java17, nodejs, nodejs4.3, java8.al2, go1.x, dotnet10, nodejs20.x, go1.9, byol, nodejs10.x, provided.al2023, nodejs22.x, python3.10, java8, nodejs12.x, python3.11, nodejs24.x, nodejs8.x, python3.12, nodejs14.x, nodejs8.9, python3.13, python3.14, nodejs16.x, provided.al2, nodejs4.3-edge, nodejs18.x, ruby3.2, python3.4, ruby3.3, ruby3.4, ruby2.5, python3.6, python2.7]"
@@ -60,7 +60,7 @@ LOG = logging.getLogger(__name__)
60
60
 
61
61
  SIGNATURE_V2_POST_FIELDS = [
62
62
  "signature",
63
- "AWSAccessKeyId",
63
+ "awsaccesskeyid",
64
64
  ]
65
65
 
66
66
  SIGNATURE_V4_POST_FIELDS = [
@@ -768,13 +768,17 @@ def validate_post_policy(
768
768
  )
769
769
  raise ex
770
770
 
771
- if not (policy := request_form.get("policy")):
771
+ form_dict = {k.lower(): v for k, v in request_form.items()}
772
+
773
+ policy = form_dict.get("policy")
774
+ if not policy:
772
775
  # A POST request needs a policy except if the bucket is publicly writable
773
776
  return
774
777
 
775
778
  # TODO: this does validation of fields only for now
776
- is_v4 = _is_match_with_signature_fields(request_form, SIGNATURE_V4_POST_FIELDS)
777
- is_v2 = _is_match_with_signature_fields(request_form, SIGNATURE_V2_POST_FIELDS)
779
+ is_v4 = _is_match_with_signature_fields(form_dict, SIGNATURE_V4_POST_FIELDS)
780
+ is_v2 = _is_match_with_signature_fields(form_dict, SIGNATURE_V2_POST_FIELDS)
781
+
778
782
  if not is_v2 and not is_v4:
779
783
  ex: AccessDenied = AccessDenied("Access Denied")
780
784
  ex.HostId = FAKE_HOST_ID
@@ -784,7 +788,7 @@ def validate_post_policy(
784
788
  policy_decoded = json.loads(base64.b64decode(policy).decode("utf-8"))
785
789
  except ValueError:
786
790
  # this means the policy has been tampered with
787
- signature = request_form.get("signature") if is_v2 else request_form.get("x-amz-signature")
791
+ signature = form_dict.get("signature") if is_v2 else form_dict.get("x-amz-signature")
788
792
  credentials = get_credentials_from_parameters(request_form, "us-east-1")
789
793
  ex: SignatureDoesNotMatch = create_signature_does_not_match_sig_v2(
790
794
  request_signature=signature,
@@ -813,7 +817,6 @@ def validate_post_policy(
813
817
  return
814
818
 
815
819
  conditions = policy_decoded.get("conditions", [])
816
- form_dict = {k.lower(): v for k, v in request_form.items()}
817
820
  for condition in conditions:
818
821
  if not _verify_condition(condition, form_dict, additional_policy_metadata):
819
822
  str_condition = str(condition).replace("'", '"')
@@ -896,7 +899,7 @@ def _parse_policy_expiration_date(expiration_string: str) -> datetime.datetime:
896
899
 
897
900
 
898
901
  def _is_match_with_signature_fields(
899
- request_form: ImmutableMultiDict, signature_fields: list[str]
902
+ request_form: dict[str, str], signature_fields: list[str]
900
903
  ) -> bool:
901
904
  """
902
905
  Checks if the form contains at least one of the required fields passed in `signature_fields`
@@ -910,12 +913,13 @@ def _is_match_with_signature_fields(
910
913
  for p in signature_fields:
911
914
  if p not in request_form:
912
915
  LOG.info("POST pre-sign missing fields")
913
- # .capitalize() does not work here, because of AWSAccessKeyId casing
914
916
  argument_name = (
915
- capitalize_header_name_from_snake_case(p)
916
- if "-" in p
917
- else f"{p[0].upper()}{p[1:]}"
917
+ capitalize_header_name_from_snake_case(p) if "-" in p else p.capitalize()
918
918
  )
919
+ # AWSAccessKeyId is a special case
920
+ if argument_name == "Awsaccesskeyid":
921
+ argument_name = "AWSAccessKeyId"
922
+
919
923
  ex: InvalidArgument = _create_invalid_argument_exc(
920
924
  message=f"Bucket POST must contain a field named '{argument_name}'. If it is specified, please check the order of the fields.",
921
925
  name=argument_name,
@@ -729,17 +729,28 @@ def backend_rotate_secret(
729
729
  if not self._is_valid_identifier(secret_id):
730
730
  raise SecretNotFoundException()
731
731
 
732
- if self.secrets[secret_id].is_deleted():
732
+ secret = self.secrets[secret_id]
733
+ if secret.is_deleted():
733
734
  raise InvalidRequestException(
734
735
  "An error occurred (InvalidRequestException) when calling the RotateSecret operation: You tried to \
735
736
  perform the operation on a secret that's currently marked deleted."
736
737
  )
738
+ # Resolve rotation_lambda_arn and fallback to previous value if its missing
739
+ # from the current request
740
+ rotation_lambda_arn = rotation_lambda_arn or secret.rotation_lambda_arn
741
+ if not rotation_lambda_arn:
742
+ raise InvalidRequestException(
743
+ "No Lambda rotation function ARN is associated with this secret."
744
+ )
737
745
 
738
746
  if rotation_lambda_arn:
739
747
  if len(rotation_lambda_arn) > 2048:
740
748
  msg = "RotationLambdaARN must <= 2048 characters long."
741
749
  raise InvalidParameterException(msg)
742
750
 
751
+ # In case rotation_period is not provided, resolve auto_rotate_after_days
752
+ # and fallback to previous value if its missing from the current request.
753
+ rotation_period = secret.auto_rotate_after_days or 0
743
754
  if rotation_rules:
744
755
  if rotation_days in rotation_rules:
745
756
  rotation_period = rotation_rules[rotation_days]
@@ -753,8 +764,6 @@ def backend_rotate_secret(
753
764
  except Exception:
754
765
  raise ResourceNotFoundException("Lambda does not exist or could not be accessed")
755
766
 
756
- secret = self.secrets[secret_id]
757
-
758
767
  # The rotation function must end with the versions of the secret in
759
768
  # one of two states:
760
769
  #
@@ -782,7 +791,7 @@ def backend_rotate_secret(
782
791
  pass
783
792
 
784
793
  secret.rotation_lambda_arn = rotation_lambda_arn
785
- secret.auto_rotate_after_days = rotation_rules.get(rotation_days, 0)
794
+ secret.auto_rotate_after_days = rotation_period
786
795
  if secret.auto_rotate_after_days > 0:
787
796
  wait_interval_s = int(rotation_period) * 86400
788
797
  secret.next_rotation_date = int(time.time()) + wait_interval_s
@@ -30,9 +30,9 @@ from localstack.services.sqs.exceptions import (
30
30
  )
31
31
  from localstack.services.sqs.queue import InterruptiblePriorityQueue, InterruptibleQueue
32
32
  from localstack.services.sqs.utils import (
33
- decode_receipt_handle,
34
33
  encode_move_task_handle,
35
34
  encode_receipt_handle,
35
+ extract_receipt_handle_info,
36
36
  global_message_sequence,
37
37
  guess_endpoint_strategy_and_host,
38
38
  is_message_deduplication_id_required,
@@ -445,7 +445,7 @@ class SqsQueue:
445
445
  return len(self.delayed)
446
446
 
447
447
  def validate_receipt_handle(self, receipt_handle: str):
448
- if self.arn != decode_receipt_handle(receipt_handle):
448
+ if self.arn != extract_receipt_handle_info(receipt_handle).queue_arn:
449
449
  raise ReceiptHandleIsInvalid(
450
450
  f'The input receipt handle "{receipt_handle}" is not a valid receipt handle.'
451
451
  )
@@ -490,6 +490,7 @@ class SqsQueue:
490
490
  return
491
491
 
492
492
  standard_message = self.receipts[receipt_handle]
493
+ self._pre_delete_checks(standard_message, receipt_handle)
493
494
  standard_message.deleted = True
494
495
  LOG.debug(
495
496
  "deleting message %s from queue %s",
@@ -724,6 +725,18 @@ class SqsQueue:
724
725
 
725
726
  return expired
726
727
 
728
+ def _pre_delete_checks(self, standard_message: SqsMessage, receipt_handle: str) -> None:
729
+ """
730
+ Runs any potential checks if a message that has been successfully identified via a receipt handle
731
+ is indeed supposed to be deleted.
732
+ For example, a receipt handle that has expired might not lead to deletion.
733
+
734
+ :param standard_message: The message to be deleted
735
+ :param receipt_handle: The handle associated with the message
736
+ :return: None. Potential violations raise errors.
737
+ """
738
+ pass
739
+
727
740
 
728
741
  class StandardQueue(SqsQueue):
729
742
  visible: InterruptiblePriorityQueue[SqsMessage]
@@ -1001,9 +1014,15 @@ class FifoQueue(SqsQueue):
1001
1014
  for message in self.delayed:
1002
1015
  message.delay_seconds = value
1003
1016
 
1017
+ def _pre_delete_checks(self, message: SqsMessage, receipt_handle: str) -> None:
1018
+ _, _, _, last_received = extract_receipt_handle_info(receipt_handle)
1019
+ if time.time() - float(last_received) > message.visibility_timeout:
1020
+ raise InvalidParameterValueException(
1021
+ f"Value {receipt_handle} for parameter ReceiptHandle is invalid. Reason: The receipt handle has expired."
1022
+ )
1023
+
1004
1024
  def remove(self, receipt_handle: str):
1005
1025
  self.validate_receipt_handle(receipt_handle)
1006
- decode_receipt_handle(receipt_handle)
1007
1026
 
1008
1027
  super().remove(receipt_handle)
1009
1028