localstack-core 4.10.1.dev42__py3-none-any.whl → 4.11.2.dev14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. localstack/aws/api/apigateway/__init__.py +42 -0
  2. localstack/aws/api/cloudformation/__init__.py +161 -0
  3. localstack/aws/api/ec2/__init__.py +1165 -12
  4. localstack/aws/api/iam/__init__.py +227 -0
  5. localstack/aws/api/kms/__init__.py +1 -0
  6. localstack/aws/api/lambda_/__init__.py +418 -66
  7. localstack/aws/api/logs/__init__.py +312 -0
  8. localstack/aws/api/opensearch/__init__.py +89 -0
  9. localstack/aws/api/redshift/__init__.py +69 -0
  10. localstack/aws/api/resourcegroupstaggingapi/__init__.py +36 -0
  11. localstack/aws/api/route53/__init__.py +42 -0
  12. localstack/aws/api/route53resolver/__init__.py +1 -0
  13. localstack/aws/api/s3/__init__.py +62 -0
  14. localstack/aws/api/secretsmanager/__init__.py +28 -23
  15. localstack/aws/api/stepfunctions/__init__.py +52 -10
  16. localstack/aws/api/sts/__init__.py +52 -0
  17. localstack/aws/handlers/logging.py +8 -4
  18. localstack/aws/handlers/service.py +11 -2
  19. localstack/aws/protocol/serializer.py +1 -1
  20. localstack/deprecations.py +0 -6
  21. localstack/services/acm/provider.py +4 -0
  22. localstack/services/apigateway/legacy/provider.py +28 -15
  23. localstack/services/cloudformation/engine/template_preparer.py +6 -2
  24. localstack/services/cloudformation/engine/v2/change_set_model_preproc.py +12 -0
  25. localstack/services/cloudwatch/provider.py +10 -3
  26. localstack/services/cloudwatch/provider_v2.py +6 -3
  27. localstack/services/configservice/provider.py +5 -1
  28. localstack/services/dynamodb/provider.py +1 -0
  29. localstack/services/dynamodb/v2/provider.py +1 -0
  30. localstack/services/dynamodbstreams/provider.py +6 -0
  31. localstack/services/dynamodbstreams/v2/provider.py +6 -0
  32. localstack/services/ec2/provider.py +6 -0
  33. localstack/services/es/provider.py +6 -0
  34. localstack/services/events/provider.py +4 -0
  35. localstack/services/events/v1/provider.py +9 -0
  36. localstack/services/firehose/provider.py +5 -0
  37. localstack/services/iam/provider.py +4 -0
  38. localstack/services/kms/models.py +10 -20
  39. localstack/services/kms/provider.py +4 -0
  40. localstack/services/lambda_/api_utils.py +37 -20
  41. localstack/services/lambda_/event_source_mapping/pollers/stream_poller.py +1 -1
  42. localstack/services/lambda_/invocation/assignment.py +4 -1
  43. localstack/services/lambda_/invocation/execution_environment.py +21 -2
  44. localstack/services/lambda_/invocation/lambda_models.py +27 -2
  45. localstack/services/lambda_/invocation/lambda_service.py +51 -3
  46. localstack/services/lambda_/invocation/models.py +9 -1
  47. localstack/services/lambda_/invocation/version_manager.py +18 -3
  48. localstack/services/lambda_/provider.py +239 -95
  49. localstack/services/lambda_/resource_providers/aws_lambda_function.py +33 -1
  50. localstack/services/lambda_/runtimes.py +3 -1
  51. localstack/services/logs/provider.py +9 -0
  52. localstack/services/opensearch/provider.py +53 -3
  53. localstack/services/resource_groups/provider.py +5 -1
  54. localstack/services/resourcegroupstaggingapi/provider.py +6 -1
  55. localstack/services/s3/provider.py +28 -15
  56. localstack/services/s3/utils.py +35 -14
  57. localstack/services/s3control/provider.py +101 -2
  58. localstack/services/s3control/validation.py +50 -0
  59. localstack/services/sns/constants.py +3 -1
  60. localstack/services/sns/publisher.py +15 -6
  61. localstack/services/sns/v2/models.py +6 -0
  62. localstack/services/sns/v2/provider.py +650 -19
  63. localstack/services/sns/v2/utils.py +12 -0
  64. localstack/services/stepfunctions/asl/component/common/path/result_path.py +1 -1
  65. localstack/services/stepfunctions/asl/component/state/state_execution/execute_state.py +0 -1
  66. localstack/services/stepfunctions/asl/component/state/state_execution/state_map/state_map.py +0 -1
  67. localstack/services/stepfunctions/asl/component/state/state_execution/state_task/lambda_eval_utils.py +8 -8
  68. localstack/services/stepfunctions/asl/component/state/state_execution/state_task/{mock_eval_utils.py → local_mock_eval_utils.py} +13 -9
  69. localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service.py +6 -6
  70. localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_callback.py +1 -1
  71. localstack/services/stepfunctions/asl/component/state/state_fail/state_fail.py +4 -0
  72. localstack/services/stepfunctions/asl/component/test_state/state/base_mock.py +118 -0
  73. localstack/services/stepfunctions/asl/component/test_state/state/common.py +82 -0
  74. localstack/services/stepfunctions/asl/component/test_state/state/execution.py +139 -0
  75. localstack/services/stepfunctions/asl/component/test_state/state/map.py +77 -0
  76. localstack/services/stepfunctions/asl/component/test_state/state/task.py +44 -0
  77. localstack/services/stepfunctions/asl/eval/environment.py +30 -22
  78. localstack/services/stepfunctions/asl/eval/states.py +1 -1
  79. localstack/services/stepfunctions/asl/eval/test_state/environment.py +49 -9
  80. localstack/services/stepfunctions/asl/eval/test_state/program_state.py +22 -0
  81. localstack/services/stepfunctions/asl/jsonata/jsonata.py +5 -1
  82. localstack/services/stepfunctions/asl/parse/preprocessor.py +67 -24
  83. localstack/services/stepfunctions/asl/parse/test_state/asl_parser.py +5 -4
  84. localstack/services/stepfunctions/asl/parse/test_state/preprocessor.py +222 -31
  85. localstack/services/stepfunctions/asl/static_analyser/test_state/test_state_analyser.py +170 -22
  86. localstack/services/stepfunctions/backend/execution.py +6 -6
  87. localstack/services/stepfunctions/backend/execution_worker.py +5 -5
  88. localstack/services/stepfunctions/backend/test_state/execution.py +36 -0
  89. localstack/services/stepfunctions/backend/test_state/execution_worker.py +33 -1
  90. localstack/services/stepfunctions/backend/test_state/test_state_mock.py +127 -0
  91. localstack/services/stepfunctions/local_mocking/__init__.py +9 -0
  92. localstack/services/stepfunctions/{mocking → local_mocking}/mock_config.py +24 -17
  93. localstack/services/stepfunctions/provider.py +78 -27
  94. localstack/services/stepfunctions/test_state/mock_config.py +47 -0
  95. localstack/testing/pytest/fixtures.py +28 -0
  96. localstack/testing/snapshots/transformer_utility.py +5 -0
  97. localstack/utils/analytics/publisher.py +37 -155
  98. localstack/utils/analytics/service_request_aggregator.py +6 -4
  99. localstack/utils/aws/arns.py +7 -0
  100. localstack/utils/batching.py +258 -0
  101. localstack/utils/collections.py +23 -11
  102. localstack/version.py +2 -2
  103. {localstack_core-4.10.1.dev42.dist-info → localstack_core-4.11.2.dev14.dist-info}/METADATA +5 -5
  104. {localstack_core-4.10.1.dev42.dist-info → localstack_core-4.11.2.dev14.dist-info}/RECORD +113 -105
  105. localstack_core-4.11.2.dev14.dist-info/plux.json +1 -0
  106. localstack/services/stepfunctions/mocking/__init__.py +0 -0
  107. localstack/utils/batch_policy.py +0 -124
  108. localstack_core-4.10.1.dev42.dist-info/plux.json +0 -1
  109. /localstack/services/stepfunctions/{mocking → local_mocking}/mock_config_file.py +0 -0
  110. {localstack_core-4.10.1.dev42.data → localstack_core-4.11.2.dev14.data}/scripts/localstack +0 -0
  111. {localstack_core-4.10.1.dev42.data → localstack_core-4.11.2.dev14.data}/scripts/localstack-supervisor +0 -0
  112. {localstack_core-4.10.1.dev42.data → localstack_core-4.11.2.dev14.data}/scripts/localstack.bat +0 -0
  113. {localstack_core-4.10.1.dev42.dist-info → localstack_core-4.11.2.dev14.dist-info}/WHEEL +0 -0
  114. {localstack_core-4.10.1.dev42.dist-info → localstack_core-4.11.2.dev14.dist-info}/entry_points.txt +0 -0
  115. {localstack_core-4.10.1.dev42.dist-info → localstack_core-4.11.2.dev14.dist-info}/licenses/LICENSE.txt +0 -0
  116. {localstack_core-4.10.1.dev42.dist-info → localstack_core-4.11.2.dev14.dist-info}/top_level.txt +0 -0
@@ -1426,6 +1426,34 @@ def create_lambda_function(aws_client, wait_until_lambda_ready, lambda_su_role):
1426
1426
  LOG.debug("Unable to delete log group %s in cleanup", log_group_name)
1427
1427
 
1428
1428
 
1429
+ @pytest.fixture
1430
+ def lambda_is_function_deleted(aws_client):
1431
+ """Example usage:
1432
+ wait_until(lambda_is_function_deleted(function_name))
1433
+ wait_until(lambda_is_function_deleted(function_name, Qualifier="my-alias"))
1434
+
1435
+ function_name can be a function name, function ARN, or partial function ARN.
1436
+ """
1437
+ return _lambda_is_function_deleted(aws_client.lambda_)
1438
+
1439
+
1440
+ def _lambda_is_function_deleted(lambda_client):
1441
+ def _is_function_deleted(
1442
+ function_name: str,
1443
+ **kwargs,
1444
+ ) -> Callable[[], bool]:
1445
+ def _inner() -> bool:
1446
+ try:
1447
+ lambda_client.get_function(FunctionName=function_name, **kwargs)
1448
+ return False
1449
+ except lambda_client.exceptions.ResourceNotFoundException:
1450
+ return True
1451
+
1452
+ return _inner
1453
+
1454
+ return _is_function_deleted
1455
+
1456
+
1429
1457
  @pytest.fixture
1430
1458
  def create_echo_http_server(aws_client, create_lambda_function):
1431
1459
  from localstack.aws.api.lambda_ import Runtime
@@ -791,6 +791,11 @@ class TransformerUtility:
791
791
  "x-amzn-RequestId",
792
792
  replace_reference=False,
793
793
  ),
794
+ JsonpathTransformer(
795
+ "$..x-amzn-requestid",
796
+ "x-amzn-requestid",
797
+ replace_reference=False,
798
+ ),
794
799
  KeyValueBasedTransformer(_transform_stepfunctions_cause_details, "json-input"),
795
800
  ]
796
801
 
@@ -2,11 +2,10 @@ import abc
2
2
  import atexit
3
3
  import logging
4
4
  import threading
5
- import time
6
- from queue import Full, Queue
7
5
 
8
6
  from localstack import config
9
- from localstack.utils.threads import start_thread, start_worker_thread
7
+ from localstack.utils.batching import AsyncBatcher
8
+ from localstack.utils.threads import FuncThread, start_thread, start_worker_thread
10
9
 
11
10
  from .client import AnalyticsClient
12
11
  from .events import Event, EventHandler
@@ -51,139 +50,36 @@ class Printer(Publisher):
51
50
  print(event.asdict())
52
51
 
53
52
 
54
- class PublisherBuffer(EventHandler):
55
- """
56
- A PublisherBuffer is an EventHandler that collects events into a buffer until a flush condition is
57
- met, and then flushes the buffer to a Publisher. The condition is either a given buffer size or
58
- a time interval, whatever occurs first. The buffer is also flushed when the recorder is stopped
59
- via `close()`. Internally it uses a simple event-loop mechanism to multiplex commands on a
60
- single thread.
61
- """
62
-
63
- flush_size: int
64
- flush_interval: float
65
-
66
- _cmd_flush = "__FLUSH__"
67
- _cmd_stop = "__STOP__"
68
-
69
- # FIXME: figure out good default values
70
- def __init__(
71
- self, publisher: Publisher, flush_size: int = 20, flush_interval: float = 10, maxsize=0
72
- ):
73
- self._publisher = publisher
74
- self._queue = Queue(maxsize=maxsize)
75
- self._command_queue = Queue()
76
-
77
- self.flush_size = flush_size
78
- self.flush_interval = flush_interval
79
-
80
- self._last_flush = time.time()
81
- self._stopping = threading.Event()
82
- self._stopped = threading.Event()
83
-
84
- def handle(self, event: Event):
85
- self._queue.put_nowait(event)
86
- self.checked_flush()
87
-
88
- def close(self):
89
- if self._stopping.is_set():
90
- return
91
-
92
- self._stopping.set()
93
- self._command_queue.put(self._cmd_stop)
53
+ class GlobalAnalyticsBus(EventHandler):
54
+ _batcher: AsyncBatcher[Event]
55
+ _client: AnalyticsClient
56
+ _worker_thread: FuncThread | None
94
57
 
95
- def close_sync(self, timeout: float | None = None):
96
- self.close()
97
- return self._stopped.wait(timeout)
98
-
99
- def flush(self):
100
- self._command_queue.put(self._cmd_flush)
101
- self._last_flush = time.time()
102
-
103
- def checked_flush(self):
104
- """
105
- Runs flush only if a flush condition is met.
106
- """
107
- if config.DEBUG_ANALYTICS:
108
- LOG.debug(
109
- "analytics queue size: %d, command queue size: %d, time since last flush: %.1fs",
110
- self._queue.qsize(),
111
- self._command_queue.qsize(),
112
- time.time() - self._last_flush,
113
- )
114
-
115
- if self._queue.qsize() >= self.flush_size:
116
- self.flush()
117
- return
118
- if time.time() - self._last_flush >= self.flush_interval:
119
- self.flush()
120
- return
121
-
122
- def _run_flush_schedule(self, *_):
123
- while True:
124
- if self._stopping.wait(self.flush_interval):
125
- return
126
- self.checked_flush()
127
-
128
- def run(self, *_):
129
- flush_scheduler = start_thread(self._run_flush_schedule, name="analytics-publishbuffer")
130
-
131
- try:
132
- while True:
133
- command = self._command_queue.get()
134
-
135
- if command is self._cmd_flush or command is self._cmd_stop:
136
- try:
137
- self._do_flush()
138
- except Exception:
139
- if config.DEBUG_ANALYTICS:
140
- LOG.exception("error while flushing events")
141
-
142
- if command is self._cmd_stop:
143
- return
144
- finally:
145
- self._stopped.set()
146
- flush_scheduler.stop()
147
- self._publisher.close()
148
- if config.DEBUG_ANALYTICS:
149
- LOG.debug("Exit analytics publisher")
150
-
151
- def _do_flush(self):
152
- queue = self._queue
153
- events = []
154
-
155
- for _ in range(queue.qsize()):
156
- event = queue.get_nowait()
157
- events.append(event)
158
-
159
- if config.DEBUG_ANALYTICS:
160
- LOG.debug("collected %d events to publish", len(events))
161
-
162
- self._publisher.publish(events)
163
-
164
-
165
- class GlobalAnalyticsBus(PublisherBuffer):
166
- def __init__(
167
- self, client: AnalyticsClient = None, flush_size=20, flush_interval=10, max_buffer_size=1000
168
- ) -> None:
58
+ def __init__(self, client: AnalyticsClient = None, flush_size=20, flush_interval=10) -> None:
169
59
  self._client = client or AnalyticsClient()
170
60
  self._publisher = AnalyticsClientPublisher(self._client)
171
-
172
- super().__init__(
173
- self._publisher,
174
- flush_size=flush_size,
175
- flush_interval=flush_interval,
176
- maxsize=max_buffer_size,
61
+ self._batcher = AsyncBatcher(
62
+ self._handle_batch,
63
+ max_batch_size=flush_size,
64
+ max_flush_interval=flush_interval,
177
65
  )
178
66
 
179
67
  self._started = False
180
- self._startup_complete = False
181
68
  self._startup_mutex = threading.Lock()
182
- self._buffer_thread = None
69
+ self._worker_thread = None
183
70
 
184
71
  self.force_tracking = False # allow class to ignore all other tracking config
185
72
  self.tracking_disabled = False # disables tracking if global config would otherwise track
186
73
 
74
+ def _handle_batch(self, batch: list[Event]):
75
+ """Method that satisfies the BatchHandler[Event] protocol and is passed to AsyncBatcher."""
76
+ try:
77
+ self._publisher.publish(batch)
78
+ except Exception:
79
+ # currently we're just dropping events if something goes wrong during publishing
80
+ if config.DEBUG_ANALYTICS:
81
+ LOG.exception("error while publishing analytics events")
82
+
187
83
  @property
188
84
  def is_tracking_disabled(self):
189
85
  if self.force_tracking:
@@ -200,44 +96,20 @@ class GlobalAnalyticsBus(PublisherBuffer):
200
96
 
201
97
  return False
202
98
 
203
- def _do_flush(self):
204
- if self.tracking_disabled:
205
- # flushing although tracking has been disabled most likely means that _do_start_retry
206
- # has failed, tracking is now disabled, and the system tries to flush the queued
207
- # events. we use this opportunity to shut down the tracker and clear the queue, since
208
- # no tracking should happen from this point on.
209
- if config.DEBUG_ANALYTICS:
210
- LOG.debug("attempting to flush while tracking is disabled, shutting down tracker")
211
- self.close_sync(timeout=10)
212
- self._queue.queue.clear()
213
- return
214
-
215
- super()._do_flush()
216
-
217
- def flush(self):
218
- if not self._startup_complete:
219
- # don't flush until _do_start_retry has completed (command queue would fill up)
220
- return
221
-
222
- super().flush()
223
-
224
99
  def handle(self, event: Event):
225
100
  """
226
101
  Publish an event to the global analytics event publisher.
227
102
  """
228
103
  if self.is_tracking_disabled:
229
104
  if config.DEBUG_ANALYTICS:
230
- LOG.debug("skipping event %s", event)
105
+ LOG.debug("tracking disabled, skipping event %s", event)
231
106
  return
232
107
 
233
108
  if not self._started:
109
+ # we make sure the batching worker is started
234
110
  self._start()
235
111
 
236
- try:
237
- super().handle(event)
238
- except Full:
239
- if config.DEBUG_ANALYTICS:
240
- LOG.warning("event queue is full, dropping event %s", event)
112
+ self._batcher.add(event)
241
113
 
242
114
  def _start(self):
243
115
  with self._startup_mutex:
@@ -267,12 +139,22 @@ class GlobalAnalyticsBus(PublisherBuffer):
267
139
  if config.DEBUG_ANALYTICS:
268
140
  LOG.exception("error while registering session. disabling tracking")
269
141
  return
270
- finally:
271
- self._startup_complete = True
272
142
 
273
- start_thread(self.run, name="global-analytics-bus")
143
+ self._worker_thread = start_thread(self._run, name="global-analytics-bus")
274
144
 
145
+ # given the "Global" nature of this class, we register a global atexit hook to make sure all events are flushed
146
+ # when localstack shuts down.
275
147
  def _do_close():
276
148
  self.close_sync(timeout=2)
277
149
 
278
150
  atexit.register(_do_close)
151
+
152
+ def _run(self, *_):
153
+ # main control loop, simply runs the batcher
154
+ self._batcher.run()
155
+
156
+ def close_sync(self, timeout=None):
157
+ self._batcher.close()
158
+
159
+ if self._worker_thread:
160
+ self._worker_thread.join(timeout=timeout)
@@ -34,7 +34,7 @@ class ServiceRequestAggregator:
34
34
  self._flush_interval = flush_interval
35
35
  self._flush_scheduler = Scheduler()
36
36
  self._mutex = threading.RLock()
37
- self._period_start_time = datetime.datetime.utcnow()
37
+ self._period_start_time = datetime.datetime.now(datetime.UTC)
38
38
  self._is_started = False
39
39
  self._is_shutdown = False
40
40
 
@@ -101,12 +101,14 @@ class ServiceRequestAggregator:
101
101
  self._emit_payload(analytics_payload)
102
102
  self.counter.clear()
103
103
  finally:
104
- self._period_start_time = datetime.datetime.utcnow()
104
+ self._period_start_time = datetime.datetime.now(datetime.UTC)
105
105
 
106
106
  def _create_analytics_payload(self):
107
107
  return {
108
- "period_start_time": self._period_start_time.isoformat() + "Z",
109
- "period_end_time": datetime.datetime.utcnow().isoformat() + "Z",
108
+ "period_start_time": self._period_start_time.isoformat().replace("+00:00", "Z"),
109
+ "period_end_time": datetime.datetime.now(datetime.UTC)
110
+ .isoformat()
111
+ .replace("+00:00", "Z"),
110
112
  "api_calls": self._aggregate_api_calls(self.counter),
111
113
  }
112
114
 
@@ -284,6 +284,13 @@ def lambda_event_source_mapping_arn(uuid: str, account_id: str, region_name: str
284
284
  return _resource_arn(uuid, pattern, account_id=account_id, region_name=region_name)
285
285
 
286
286
 
287
+ def capacity_provider_arn(capacity_provider_name: str, account_id: str, region_name: str) -> str:
288
+ pattern = "arn:%s:lambda:%s:%s:capacity-provider:%s"
289
+ return _resource_arn(
290
+ capacity_provider_name, pattern, account_id=account_id, region_name=region_name
291
+ )
292
+
293
+
287
294
  def lambda_function_or_layer_arn(
288
295
  type: str,
289
296
  entity_name: str,
@@ -0,0 +1,258 @@
1
+ import copy
2
+ import logging
3
+ import threading
4
+ import time
5
+ from typing import Generic, Protocol, TypeVar, overload
6
+
7
+ LOG = logging.getLogger(__name__)
8
+
9
+ T = TypeVar("T")
10
+
11
+ # alias to signify whether a batch policy has been triggered
12
+ BatchPolicyTriggered = bool
13
+
14
+
15
+ # TODO: Add batching on bytes as well.
16
+ class Batcher(Generic[T]):
17
+ """
18
+ A utility for collecting items into batches and flushing them when one or more batch policy conditions are met.
19
+
20
+ The batch policy can be created to trigger on:
21
+ - max_count: Maximum number of items added
22
+ - max_window: Maximum time window (in seconds)
23
+
24
+ If no limits are specified, the batcher is always in triggered state.
25
+
26
+ Example usage:
27
+
28
+ import time
29
+
30
+ # Triggers when 2 (or more) items are added
31
+ batcher = Batcher(max_count=2)
32
+ assert batcher.add(["item1", "item2", "item3"])
33
+ assert batcher.flush() == ["item1", "item2", "item3"]
34
+
35
+ # Triggers partially when 2 (or more) items are added
36
+ batcher = Batcher(max_count=2)
37
+ assert batcher.add(["item1", "item2", "item3"])
38
+ assert batcher.flush(partial=True) == ["item1", "item2"]
39
+ assert batcher.add("item4")
40
+ assert batcher.flush(partial=True) == ["item3", "item4"]
41
+
42
+ # Trigger 2 seconds after the first add
43
+ batcher = Batcher(max_window=2.0)
44
+ assert not batcher.add(["item1", "item2", "item3"])
45
+ time.sleep(2.1)
46
+ assert not batcher.add(["item4"])
47
+ assert batcher.flush() == ["item1", "item2", "item3", "item4"]
48
+ """
49
+
50
+ max_count: int | None
51
+ """
52
+ Maximum number of items, must be None or positive.
53
+ """
54
+
55
+ max_window: float | None
56
+ """
57
+ Maximum time window in seconds, must be None or positive.
58
+ """
59
+
60
+ _triggered: bool
61
+ _last_batch_time: float
62
+ _batch: list[T]
63
+
64
+ def __init__(self, max_count: int | None = None, max_window: float | None = None):
65
+ """
66
+ Initialize a new Batcher instance.
67
+
68
+ :param max_count: Maximum number of items that be None or positive.
69
+ :param max_window: Maximum time window in seconds that must be None or positive.
70
+ """
71
+ self.max_count = max_count
72
+ self.max_window = max_window
73
+
74
+ self._triggered = False
75
+ self._last_batch_time = time.monotonic()
76
+ self._batch = []
77
+
78
+ @property
79
+ def period(self) -> float:
80
+ return time.monotonic() - self._last_batch_time
81
+
82
+ def _check_batch_policy(self) -> bool:
83
+ """Check if any batch policy conditions are met"""
84
+ if self.max_count is not None and len(self._batch) >= self.max_count:
85
+ self._triggered = True
86
+ elif self.max_window is not None and self.period >= self.max_window:
87
+ self._triggered = True
88
+ elif not self.max_count and not self.max_window:
89
+ # always return true
90
+ self._triggered = True
91
+
92
+ return self._triggered
93
+
94
+ @overload
95
+ def add(self, item: T, *, deep_copy: bool = False) -> BatchPolicyTriggered: ...
96
+
97
+ @overload
98
+ def add(self, items: list[T], *, deep_copy: bool = False) -> BatchPolicyTriggered: ...
99
+
100
+ def add(self, item_or_items: T | list[T], *, deep_copy: bool = False) -> BatchPolicyTriggered:
101
+ """
102
+ Add an item or list of items to the collected batch.
103
+
104
+ Returns:
105
+ BatchPolicyTriggered: True if the batch policy was triggered during addition, False otherwise.
106
+ """
107
+ if deep_copy:
108
+ item_or_items = copy.deepcopy(item_or_items)
109
+
110
+ if isinstance(item_or_items, list):
111
+ self._batch.extend(item_or_items)
112
+ else:
113
+ self._batch.append(item_or_items)
114
+
115
+ # Check if the last addition triggered the batch policy
116
+ return self.is_triggered()
117
+
118
+ def flush(self, *, partial=False) -> list[T]:
119
+ result = []
120
+ if not partial or not self.max_count:
121
+ result = self._batch.copy()
122
+ self._batch.clear()
123
+ else:
124
+ batch_size = min(self.max_count, len(self._batch))
125
+ result = self._batch[:batch_size].copy()
126
+ self._batch = self._batch[batch_size:]
127
+
128
+ self._last_batch_time = time.monotonic()
129
+ self._triggered = False
130
+ self._check_batch_policy()
131
+
132
+ return result
133
+
134
+ def duration_until_next_batch(self) -> float:
135
+ if not self.max_window:
136
+ return -1
137
+ return max(self.max_window - self.period, -1)
138
+
139
+ def get_current_size(self) -> int:
140
+ return len(self._batch)
141
+
142
+ def is_triggered(self):
143
+ return self._triggered or self._check_batch_policy()
144
+
145
+
146
+ class BatchHandler(Protocol[T]):
147
+ """
148
+ A BatchHandler is a callable that processes a list of items handed down by the AsyncBatcher.
149
+ """
150
+
151
+ def __call__(self, batch: list[T]) -> None: ...
152
+
153
+
154
+ class AsyncBatcher(Generic[T]):
155
+ """
156
+ Class for managing asynchronous batching of items.
157
+
158
+ This class allows for efficient buffering and processing of items in batches by
159
+ periodically flushing the buffer to a given handler, or by automatically flushing
160
+ when the maximum batch size is reached. It is designed to be used in asynchronous
161
+ scenarios where the caller does not execute the flushing IO call itself, like with ``Batcher``.
162
+
163
+ :ivar max_flush_interval: Maximum time interval in seconds between
164
+ automatic flushes, regardless of the batch size.
165
+ :ivar max_batch_size: Maximum number of items in a batch. When reached,
166
+ the batch is flushed automatically.
167
+ :ivar handler: Callable handler that processes each flushed batch. The handler must
168
+ be provided during initialization and must accept a list of items as input.
169
+ """
170
+
171
+ max_flush_interval: float
172
+ max_batch_size: int
173
+ handler: BatchHandler[T]
174
+
175
+ _buffer: list[T]
176
+ _flush_lock: threading.Condition
177
+ _closed: bool
178
+
179
+ def __init__(
180
+ self,
181
+ handler: BatchHandler[T],
182
+ max_flush_interval: float = 10,
183
+ max_batch_size: int = 20,
184
+ ):
185
+ self.handler = handler
186
+ self.max_flush_interval = max_flush_interval
187
+ self.max_batch_size = max_batch_size
188
+
189
+ self._buffer = []
190
+ self._flush_lock = threading.Condition()
191
+ self._closed = False
192
+
193
+ def add(self, item: T):
194
+ """
195
+ Adds an item to the buffer.
196
+
197
+ :param item: the item to add
198
+ """
199
+ with self._flush_lock:
200
+ if self._closed:
201
+ raise ValueError("Batcher is stopped, can no longer add items")
202
+
203
+ self._buffer.append(item)
204
+
205
+ if len(self._buffer) >= self.max_batch_size:
206
+ self._flush_lock.notify_all()
207
+
208
+ @property
209
+ def current_batch_size(self) -> int:
210
+ """
211
+ Returns the current number of items in the buffer waiting to be flushed.
212
+ """
213
+ return len(self._buffer)
214
+
215
+ def run(self):
216
+ """
217
+ Runs the event loop that flushes the buffer to the handler based on the configured rules, and blocks until
218
+ ``close()`` is called. This method is meant to be run in a separate thread.
219
+ """
220
+ while not self._closed:
221
+ with self._flush_lock:
222
+ # wait returns once either the condition is notified (in which case wait returns True, indicating that
223
+ # something has triggered a flush manually), or the timeout expires (in which case wait returns False)
224
+ self._flush_lock.wait(self.max_flush_interval)
225
+
226
+ # if _flush_condition was notified because close() was called, we should still make sure we flush the
227
+ # last batch
228
+
229
+ # perform the flush, if there are any items in the buffer
230
+ if not self._buffer:
231
+ continue
232
+
233
+ batch = self._buffer.copy()
234
+ self._buffer.clear()
235
+
236
+ # we can call the processor outside the lock so we can continue adding items into the next batch without
237
+ # waiting on the processor to return.
238
+ try:
239
+ self.handler(batch)
240
+ except Exception as e:
241
+ LOG.error(
242
+ "Unhandled exception while processing a batch: %s",
243
+ e,
244
+ exc_info=LOG.isEnabledFor(logging.DEBUG),
245
+ )
246
+
247
+ # this marks that the main control loop is done
248
+ return
249
+
250
+ def close(self):
251
+ """
252
+ Triggers a close of the batcher, which will cause one last flush, and then end the main event loop.
253
+ """
254
+ with self._flush_lock:
255
+ if self._closed:
256
+ return
257
+ self._closed = True
258
+ self._flush_lock.notify_all()
@@ -5,7 +5,7 @@ and manipulate python collection (dicts, list, sets).
5
5
 
6
6
  import logging
7
7
  import re
8
- from collections.abc import Callable, Iterable, Iterator, Mapping, Sized
8
+ from collections.abc import Callable, Generator, Iterable, Iterator, Mapping, Sized
9
9
  from typing import (
10
10
  Any,
11
11
  Optional,
@@ -24,6 +24,9 @@ LOG = logging.getLogger(__name__)
24
24
  # default regex to match an item in a comma-separated list string
25
25
  DEFAULT_REGEX_LIST_ITEM = r"[\w-]+"
26
26
 
27
+ _E = TypeVar("_E")
28
+ """TypeVar var used internally for container type parameters."""
29
+
27
30
 
28
31
  class AccessTrackingDict(dict):
29
32
  """
@@ -101,21 +104,18 @@ class HashableJsonDict(ImmutableDict):
101
104
  return hash(canonical_json(self._dict))
102
105
 
103
106
 
104
- _ListType = TypeVar("_ListType")
105
-
106
-
107
- class PaginatedList(list[_ListType]):
107
+ class PaginatedList(list[_E]):
108
108
  """List which can be paginated and filtered. For usage in AWS APIs with paginated responses"""
109
109
 
110
110
  DEFAULT_PAGE_SIZE = 50
111
111
 
112
112
  def get_page(
113
113
  self,
114
- token_generator: Callable[[_ListType], str],
114
+ token_generator: Callable[[_E], str],
115
115
  next_token: str = None,
116
116
  page_size: int = None,
117
- filter_function: Callable[[_ListType], bool] = None,
118
- ) -> tuple[list[_ListType], str | None]:
117
+ filter_function: Callable[[_E], bool] = None,
118
+ ) -> tuple[list[_E], str | None]:
119
119
  if filter_function is not None:
120
120
  result_list = list(filter(filter_function, self))
121
121
  else:
@@ -528,9 +528,6 @@ def is_comma_delimited_list(string: str, item_regex: str | None = None) -> bool:
528
528
  return True
529
529
 
530
530
 
531
- _E = TypeVar("_E")
532
-
533
-
534
531
  def optional_list(condition: bool, items: Iterable[_E]) -> list[_E]:
535
532
  """
536
533
  Given an iterable, either create a list out of the entire iterable (if `condition` is `True`), or return the empty list.
@@ -540,3 +537,18 @@ def optional_list(condition: bool, items: Iterable[_E]) -> list[_E]:
540
537
  []
541
538
  """
542
539
  return list(filter(lambda _: condition, items))
540
+
541
+
542
+ def iter_chunks(items: list[_E], chunk_size: int) -> Generator[list[_E], None, None]:
543
+ """
544
+ Split a list into smaller chunks of a specified size and iterate over them.
545
+
546
+ It is implemented as a generator and yields each chunk as needed, making it memory-efficient for large lists.
547
+
548
+ :param items: A list of elements to be divided into chunks.
549
+ :param chunk_size: The maximum number of elements that a single chunk can contain.
550
+ :return: A generator that yields chunks (sublists) of the original list. Each chunk contains up to `chunk_size`
551
+ elements.
552
+ """
553
+ for i in range(0, len(items), chunk_size):
554
+ yield items[i : i + chunk_size]
localstack/version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '4.10.1.dev42'
32
- __version_tuple__ = version_tuple = (4, 10, 1, 'dev42')
31
+ __version__ = version = '4.11.2.dev14'
32
+ __version_tuple__ = version_tuple = (4, 11, 2, 'dev14')
33
33
 
34
34
  __commit_id__ = commit_id = None