localstack-core 4.10.1.dev7__py3-none-any.whl → 4.11.2.dev14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of localstack-core might be problematic. Click here for more details.

Files changed (152) hide show
  1. localstack/aws/api/acm/__init__.py +122 -122
  2. localstack/aws/api/apigateway/__init__.py +604 -561
  3. localstack/aws/api/cloudcontrol/__init__.py +63 -63
  4. localstack/aws/api/cloudformation/__init__.py +1201 -969
  5. localstack/aws/api/cloudwatch/__init__.py +375 -375
  6. localstack/aws/api/config/__init__.py +784 -786
  7. localstack/aws/api/dynamodb/__init__.py +753 -759
  8. localstack/aws/api/dynamodbstreams/__init__.py +74 -74
  9. localstack/aws/api/ec2/__init__.py +10062 -8826
  10. localstack/aws/api/es/__init__.py +453 -453
  11. localstack/aws/api/events/__init__.py +552 -552
  12. localstack/aws/api/firehose/__init__.py +541 -543
  13. localstack/aws/api/iam/__init__.py +866 -572
  14. localstack/aws/api/kinesis/__init__.py +235 -147
  15. localstack/aws/api/kms/__init__.py +341 -336
  16. localstack/aws/api/lambda_/__init__.py +974 -621
  17. localstack/aws/api/logs/__init__.py +988 -675
  18. localstack/aws/api/opensearch/__init__.py +903 -785
  19. localstack/aws/api/pipes/__init__.py +336 -336
  20. localstack/aws/api/redshift/__init__.py +1257 -1166
  21. localstack/aws/api/resource_groups/__init__.py +175 -175
  22. localstack/aws/api/resourcegroupstaggingapi/__init__.py +103 -67
  23. localstack/aws/api/route53/__init__.py +296 -254
  24. localstack/aws/api/route53resolver/__init__.py +397 -396
  25. localstack/aws/api/s3/__init__.py +1412 -1349
  26. localstack/aws/api/s3control/__init__.py +594 -594
  27. localstack/aws/api/scheduler/__init__.py +118 -118
  28. localstack/aws/api/secretsmanager/__init__.py +221 -216
  29. localstack/aws/api/ses/__init__.py +227 -227
  30. localstack/aws/api/sns/__init__.py +115 -115
  31. localstack/aws/api/sqs/__init__.py +100 -100
  32. localstack/aws/api/ssm/__init__.py +1977 -1971
  33. localstack/aws/api/stepfunctions/__init__.py +375 -333
  34. localstack/aws/api/sts/__init__.py +142 -66
  35. localstack/aws/api/support/__init__.py +112 -112
  36. localstack/aws/api/swf/__init__.py +378 -386
  37. localstack/aws/api/transcribe/__init__.py +425 -425
  38. localstack/aws/handlers/logging.py +8 -4
  39. localstack/aws/handlers/service.py +22 -3
  40. localstack/aws/protocol/parser.py +1 -1
  41. localstack/aws/protocol/serializer.py +1 -1
  42. localstack/aws/scaffold.py +15 -17
  43. localstack/cli/localstack.py +6 -1
  44. localstack/deprecations.py +0 -6
  45. localstack/dev/kubernetes/__main__.py +38 -3
  46. localstack/services/acm/provider.py +4 -0
  47. localstack/services/apigateway/helpers.py +5 -9
  48. localstack/services/apigateway/legacy/provider.py +60 -24
  49. localstack/services/apigateway/patches.py +0 -9
  50. localstack/services/cloudformation/engine/template_preparer.py +6 -2
  51. localstack/services/cloudformation/engine/v2/change_set_model_preproc.py +12 -0
  52. localstack/services/cloudformation/provider.py +2 -2
  53. localstack/services/cloudformation/v2/provider.py +6 -6
  54. localstack/services/cloudwatch/provider.py +10 -3
  55. localstack/services/cloudwatch/provider_v2.py +6 -3
  56. localstack/services/configservice/provider.py +5 -1
  57. localstack/services/dynamodb/provider.py +1 -0
  58. localstack/services/dynamodb/v2/provider.py +1 -0
  59. localstack/services/dynamodbstreams/provider.py +6 -0
  60. localstack/services/dynamodbstreams/v2/provider.py +6 -0
  61. localstack/services/ec2/provider.py +6 -0
  62. localstack/services/es/provider.py +6 -0
  63. localstack/services/events/provider.py +4 -0
  64. localstack/services/events/v1/provider.py +9 -0
  65. localstack/services/firehose/provider.py +5 -0
  66. localstack/services/iam/provider.py +4 -0
  67. localstack/services/kinesis/packages.py +1 -1
  68. localstack/services/kms/models.py +44 -24
  69. localstack/services/kms/provider.py +97 -16
  70. localstack/services/lambda_/api_utils.py +40 -21
  71. localstack/services/lambda_/event_source_mapping/pollers/stream_poller.py +1 -1
  72. localstack/services/lambda_/invocation/assignment.py +4 -1
  73. localstack/services/lambda_/invocation/execution_environment.py +21 -2
  74. localstack/services/lambda_/invocation/lambda_models.py +27 -2
  75. localstack/services/lambda_/invocation/lambda_service.py +51 -3
  76. localstack/services/lambda_/invocation/models.py +9 -1
  77. localstack/services/lambda_/invocation/version_manager.py +18 -3
  78. localstack/services/lambda_/packages.py +1 -1
  79. localstack/services/lambda_/provider.py +240 -96
  80. localstack/services/lambda_/resource_providers/aws_lambda_function.py +33 -1
  81. localstack/services/lambda_/runtimes.py +10 -3
  82. localstack/services/logs/provider.py +45 -19
  83. localstack/services/opensearch/provider.py +53 -3
  84. localstack/services/resource_groups/provider.py +5 -1
  85. localstack/services/resourcegroupstaggingapi/provider.py +6 -1
  86. localstack/services/s3/provider.py +29 -16
  87. localstack/services/s3/utils.py +35 -14
  88. localstack/services/s3control/provider.py +101 -2
  89. localstack/services/s3control/validation.py +50 -0
  90. localstack/services/sns/constants.py +3 -1
  91. localstack/services/sns/publisher.py +15 -6
  92. localstack/services/sns/v2/models.py +30 -1
  93. localstack/services/sns/v2/provider.py +794 -31
  94. localstack/services/sns/v2/utils.py +20 -0
  95. localstack/services/sqs/models.py +37 -10
  96. localstack/services/stepfunctions/asl/component/common/path/result_path.py +1 -1
  97. localstack/services/stepfunctions/asl/component/state/state_execution/execute_state.py +0 -1
  98. localstack/services/stepfunctions/asl/component/state/state_execution/state_map/state_map.py +0 -1
  99. localstack/services/stepfunctions/asl/component/state/state_execution/state_task/lambda_eval_utils.py +8 -8
  100. localstack/services/stepfunctions/asl/component/state/state_execution/state_task/{mock_eval_utils.py → local_mock_eval_utils.py} +13 -9
  101. localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service.py +6 -6
  102. localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_callback.py +1 -1
  103. localstack/services/stepfunctions/asl/component/state/state_fail/state_fail.py +4 -0
  104. localstack/services/stepfunctions/asl/component/test_state/state/base_mock.py +118 -0
  105. localstack/services/stepfunctions/asl/component/test_state/state/common.py +82 -0
  106. localstack/services/stepfunctions/asl/component/test_state/state/execution.py +139 -0
  107. localstack/services/stepfunctions/asl/component/test_state/state/map.py +77 -0
  108. localstack/services/stepfunctions/asl/component/test_state/state/task.py +44 -0
  109. localstack/services/stepfunctions/asl/eval/environment.py +30 -22
  110. localstack/services/stepfunctions/asl/eval/states.py +1 -1
  111. localstack/services/stepfunctions/asl/eval/test_state/environment.py +49 -9
  112. localstack/services/stepfunctions/asl/eval/test_state/program_state.py +22 -0
  113. localstack/services/stepfunctions/asl/jsonata/jsonata.py +5 -1
  114. localstack/services/stepfunctions/asl/parse/preprocessor.py +67 -24
  115. localstack/services/stepfunctions/asl/parse/test_state/asl_parser.py +5 -4
  116. localstack/services/stepfunctions/asl/parse/test_state/preprocessor.py +222 -31
  117. localstack/services/stepfunctions/asl/static_analyser/test_state/test_state_analyser.py +170 -22
  118. localstack/services/stepfunctions/backend/execution.py +6 -6
  119. localstack/services/stepfunctions/backend/execution_worker.py +5 -5
  120. localstack/services/stepfunctions/backend/test_state/execution.py +36 -0
  121. localstack/services/stepfunctions/backend/test_state/execution_worker.py +33 -1
  122. localstack/services/stepfunctions/backend/test_state/test_state_mock.py +127 -0
  123. localstack/services/stepfunctions/local_mocking/__init__.py +9 -0
  124. localstack/services/stepfunctions/{mocking → local_mocking}/mock_config.py +24 -17
  125. localstack/services/stepfunctions/provider.py +78 -27
  126. localstack/services/stepfunctions/test_state/mock_config.py +47 -0
  127. localstack/testing/pytest/fixtures.py +28 -0
  128. localstack/testing/snapshots/transformer_utility.py +7 -0
  129. localstack/testing/testselection/matching.py +0 -1
  130. localstack/utils/analytics/publisher.py +37 -155
  131. localstack/utils/analytics/service_request_aggregator.py +6 -4
  132. localstack/utils/aws/arns.py +7 -0
  133. localstack/utils/aws/client_types.py +0 -8
  134. localstack/utils/batching.py +258 -0
  135. localstack/utils/catalog/catalog_loader.py +111 -3
  136. localstack/utils/collections.py +23 -11
  137. localstack/utils/crypto.py +109 -0
  138. localstack/version.py +2 -2
  139. {localstack_core-4.10.1.dev7.dist-info → localstack_core-4.11.2.dev14.dist-info}/METADATA +7 -6
  140. {localstack_core-4.10.1.dev7.dist-info → localstack_core-4.11.2.dev14.dist-info}/RECORD +149 -141
  141. localstack_core-4.11.2.dev14.dist-info/plux.json +1 -0
  142. localstack/services/stepfunctions/mocking/__init__.py +0 -0
  143. localstack/utils/batch_policy.py +0 -124
  144. localstack_core-4.10.1.dev7.dist-info/plux.json +0 -1
  145. /localstack/services/stepfunctions/{mocking → local_mocking}/mock_config_file.py +0 -0
  146. {localstack_core-4.10.1.dev7.data → localstack_core-4.11.2.dev14.data}/scripts/localstack +0 -0
  147. {localstack_core-4.10.1.dev7.data → localstack_core-4.11.2.dev14.data}/scripts/localstack-supervisor +0 -0
  148. {localstack_core-4.10.1.dev7.data → localstack_core-4.11.2.dev14.data}/scripts/localstack.bat +0 -0
  149. {localstack_core-4.10.1.dev7.dist-info → localstack_core-4.11.2.dev14.dist-info}/WHEEL +0 -0
  150. {localstack_core-4.10.1.dev7.dist-info → localstack_core-4.11.2.dev14.dist-info}/entry_points.txt +0 -0
  151. {localstack_core-4.10.1.dev7.dist-info → localstack_core-4.11.2.dev14.dist-info}/licenses/LICENSE.txt +0 -0
  152. {localstack_core-4.10.1.dev7.dist-info → localstack_core-4.11.2.dev14.dist-info}/top_level.txt +0 -0
@@ -1426,6 +1426,34 @@ def create_lambda_function(aws_client, wait_until_lambda_ready, lambda_su_role):
1426
1426
  LOG.debug("Unable to delete log group %s in cleanup", log_group_name)
1427
1427
 
1428
1428
 
1429
+ @pytest.fixture
1430
+ def lambda_is_function_deleted(aws_client):
1431
+ """Example usage:
1432
+ wait_until(lambda_is_function_deleted(function_name))
1433
+ wait_until(lambda_is_function_deleted(function_name, Qualifier="my-alias"))
1434
+
1435
+ function_name can be a function name, function ARN, or partial function ARN.
1436
+ """
1437
+ return _lambda_is_function_deleted(aws_client.lambda_)
1438
+
1439
+
1440
+ def _lambda_is_function_deleted(lambda_client):
1441
+ def _is_function_deleted(
1442
+ function_name: str,
1443
+ **kwargs,
1444
+ ) -> Callable[[], bool]:
1445
+ def _inner() -> bool:
1446
+ try:
1447
+ lambda_client.get_function(FunctionName=function_name, **kwargs)
1448
+ return False
1449
+ except lambda_client.exceptions.ResourceNotFoundException:
1450
+ return True
1451
+
1452
+ return _inner
1453
+
1454
+ return _is_function_deleted
1455
+
1456
+
1429
1457
  @pytest.fixture
1430
1458
  def create_echo_http_server(aws_client, create_lambda_function):
1431
1459
  from localstack.aws.api.lambda_ import Runtime
@@ -566,6 +566,8 @@ class TransformerUtility:
566
566
  """
567
567
  return [
568
568
  TransformerUtility.key_value("KeyId"),
569
+ TransformerUtility.key_value("KeyMaterialId"),
570
+ TransformerUtility.key_value("CurrentKeyMaterialId"),
569
571
  TransformerUtility.jsonpath(
570
572
  jsonpath="$..Signature",
571
573
  value_replacement="<signature>",
@@ -789,6 +791,11 @@ class TransformerUtility:
789
791
  "x-amzn-RequestId",
790
792
  replace_reference=False,
791
793
  ),
794
+ JsonpathTransformer(
795
+ "$..x-amzn-requestid",
796
+ "x-amzn-requestid",
797
+ replace_reference=False,
798
+ ),
792
799
  KeyValueBasedTransformer(_transform_stepfunctions_cause_details, "json-input"),
793
800
  ]
794
801
 
@@ -181,7 +181,6 @@ MATCHING_RULES: list[MatchingRule] = [
181
181
  ).passthrough(), # changes in a test file should always at least test that file
182
182
  # CI
183
183
  Matchers.glob(".github/**").full_suite(),
184
- Matchers.glob(".circleci/**").full_suite(),
185
184
  # dependencies / project setup
186
185
  Matchers.glob("requirements*.txt").full_suite(),
187
186
  Matchers.glob("setup.cfg").full_suite(),
@@ -2,11 +2,10 @@ import abc
2
2
  import atexit
3
3
  import logging
4
4
  import threading
5
- import time
6
- from queue import Full, Queue
7
5
 
8
6
  from localstack import config
9
- from localstack.utils.threads import start_thread, start_worker_thread
7
+ from localstack.utils.batching import AsyncBatcher
8
+ from localstack.utils.threads import FuncThread, start_thread, start_worker_thread
10
9
 
11
10
  from .client import AnalyticsClient
12
11
  from .events import Event, EventHandler
@@ -51,139 +50,36 @@ class Printer(Publisher):
51
50
  print(event.asdict())
52
51
 
53
52
 
54
- class PublisherBuffer(EventHandler):
55
- """
56
- A PublisherBuffer is an EventHandler that collects events into a buffer until a flush condition is
57
- met, and then flushes the buffer to a Publisher. The condition is either a given buffer size or
58
- a time interval, whatever occurs first. The buffer is also flushed when the recorder is stopped
59
- via `close()`. Internally it uses a simple event-loop mechanism to multiplex commands on a
60
- single thread.
61
- """
62
-
63
- flush_size: int
64
- flush_interval: float
65
-
66
- _cmd_flush = "__FLUSH__"
67
- _cmd_stop = "__STOP__"
68
-
69
- # FIXME: figure out good default values
70
- def __init__(
71
- self, publisher: Publisher, flush_size: int = 20, flush_interval: float = 10, maxsize=0
72
- ):
73
- self._publisher = publisher
74
- self._queue = Queue(maxsize=maxsize)
75
- self._command_queue = Queue()
76
-
77
- self.flush_size = flush_size
78
- self.flush_interval = flush_interval
79
-
80
- self._last_flush = time.time()
81
- self._stopping = threading.Event()
82
- self._stopped = threading.Event()
83
-
84
- def handle(self, event: Event):
85
- self._queue.put_nowait(event)
86
- self.checked_flush()
87
-
88
- def close(self):
89
- if self._stopping.is_set():
90
- return
91
-
92
- self._stopping.set()
93
- self._command_queue.put(self._cmd_stop)
53
+ class GlobalAnalyticsBus(EventHandler):
54
+ _batcher: AsyncBatcher[Event]
55
+ _client: AnalyticsClient
56
+ _worker_thread: FuncThread | None
94
57
 
95
- def close_sync(self, timeout: float | None = None):
96
- self.close()
97
- return self._stopped.wait(timeout)
98
-
99
- def flush(self):
100
- self._command_queue.put(self._cmd_flush)
101
- self._last_flush = time.time()
102
-
103
- def checked_flush(self):
104
- """
105
- Runs flush only if a flush condition is met.
106
- """
107
- if config.DEBUG_ANALYTICS:
108
- LOG.debug(
109
- "analytics queue size: %d, command queue size: %d, time since last flush: %.1fs",
110
- self._queue.qsize(),
111
- self._command_queue.qsize(),
112
- time.time() - self._last_flush,
113
- )
114
-
115
- if self._queue.qsize() >= self.flush_size:
116
- self.flush()
117
- return
118
- if time.time() - self._last_flush >= self.flush_interval:
119
- self.flush()
120
- return
121
-
122
- def _run_flush_schedule(self, *_):
123
- while True:
124
- if self._stopping.wait(self.flush_interval):
125
- return
126
- self.checked_flush()
127
-
128
- def run(self, *_):
129
- flush_scheduler = start_thread(self._run_flush_schedule, name="analytics-publishbuffer")
130
-
131
- try:
132
- while True:
133
- command = self._command_queue.get()
134
-
135
- if command is self._cmd_flush or command is self._cmd_stop:
136
- try:
137
- self._do_flush()
138
- except Exception:
139
- if config.DEBUG_ANALYTICS:
140
- LOG.exception("error while flushing events")
141
-
142
- if command is self._cmd_stop:
143
- return
144
- finally:
145
- self._stopped.set()
146
- flush_scheduler.stop()
147
- self._publisher.close()
148
- if config.DEBUG_ANALYTICS:
149
- LOG.debug("Exit analytics publisher")
150
-
151
- def _do_flush(self):
152
- queue = self._queue
153
- events = []
154
-
155
- for _ in range(queue.qsize()):
156
- event = queue.get_nowait()
157
- events.append(event)
158
-
159
- if config.DEBUG_ANALYTICS:
160
- LOG.debug("collected %d events to publish", len(events))
161
-
162
- self._publisher.publish(events)
163
-
164
-
165
- class GlobalAnalyticsBus(PublisherBuffer):
166
- def __init__(
167
- self, client: AnalyticsClient = None, flush_size=20, flush_interval=10, max_buffer_size=1000
168
- ) -> None:
58
+ def __init__(self, client: AnalyticsClient = None, flush_size=20, flush_interval=10) -> None:
169
59
  self._client = client or AnalyticsClient()
170
60
  self._publisher = AnalyticsClientPublisher(self._client)
171
-
172
- super().__init__(
173
- self._publisher,
174
- flush_size=flush_size,
175
- flush_interval=flush_interval,
176
- maxsize=max_buffer_size,
61
+ self._batcher = AsyncBatcher(
62
+ self._handle_batch,
63
+ max_batch_size=flush_size,
64
+ max_flush_interval=flush_interval,
177
65
  )
178
66
 
179
67
  self._started = False
180
- self._startup_complete = False
181
68
  self._startup_mutex = threading.Lock()
182
- self._buffer_thread = None
69
+ self._worker_thread = None
183
70
 
184
71
  self.force_tracking = False # allow class to ignore all other tracking config
185
72
  self.tracking_disabled = False # disables tracking if global config would otherwise track
186
73
 
74
+ def _handle_batch(self, batch: list[Event]):
75
+ """Method that satisfies the BatchHandler[Event] protocol and is passed to AsyncBatcher."""
76
+ try:
77
+ self._publisher.publish(batch)
78
+ except Exception:
79
+ # currently we're just dropping events if something goes wrong during publishing
80
+ if config.DEBUG_ANALYTICS:
81
+ LOG.exception("error while publishing analytics events")
82
+
187
83
  @property
188
84
  def is_tracking_disabled(self):
189
85
  if self.force_tracking:
@@ -200,44 +96,20 @@ class GlobalAnalyticsBus(PublisherBuffer):
200
96
 
201
97
  return False
202
98
 
203
- def _do_flush(self):
204
- if self.tracking_disabled:
205
- # flushing although tracking has been disabled most likely means that _do_start_retry
206
- # has failed, tracking is now disabled, and the system tries to flush the queued
207
- # events. we use this opportunity to shut down the tracker and clear the queue, since
208
- # no tracking should happen from this point on.
209
- if config.DEBUG_ANALYTICS:
210
- LOG.debug("attempting to flush while tracking is disabled, shutting down tracker")
211
- self.close_sync(timeout=10)
212
- self._queue.queue.clear()
213
- return
214
-
215
- super()._do_flush()
216
-
217
- def flush(self):
218
- if not self._startup_complete:
219
- # don't flush until _do_start_retry has completed (command queue would fill up)
220
- return
221
-
222
- super().flush()
223
-
224
99
  def handle(self, event: Event):
225
100
  """
226
101
  Publish an event to the global analytics event publisher.
227
102
  """
228
103
  if self.is_tracking_disabled:
229
104
  if config.DEBUG_ANALYTICS:
230
- LOG.debug("skipping event %s", event)
105
+ LOG.debug("tracking disabled, skipping event %s", event)
231
106
  return
232
107
 
233
108
  if not self._started:
109
+ # we make sure the batching worker is started
234
110
  self._start()
235
111
 
236
- try:
237
- super().handle(event)
238
- except Full:
239
- if config.DEBUG_ANALYTICS:
240
- LOG.warning("event queue is full, dropping event %s", event)
112
+ self._batcher.add(event)
241
113
 
242
114
  def _start(self):
243
115
  with self._startup_mutex:
@@ -267,12 +139,22 @@ class GlobalAnalyticsBus(PublisherBuffer):
267
139
  if config.DEBUG_ANALYTICS:
268
140
  LOG.exception("error while registering session. disabling tracking")
269
141
  return
270
- finally:
271
- self._startup_complete = True
272
142
 
273
- start_thread(self.run, name="global-analytics-bus")
143
+ self._worker_thread = start_thread(self._run, name="global-analytics-bus")
274
144
 
145
+ # given the "Global" nature of this class, we register a global atexit hook to make sure all events are flushed
146
+ # when localstack shuts down.
275
147
  def _do_close():
276
148
  self.close_sync(timeout=2)
277
149
 
278
150
  atexit.register(_do_close)
151
+
152
+ def _run(self, *_):
153
+ # main control loop, simply runs the batcher
154
+ self._batcher.run()
155
+
156
+ def close_sync(self, timeout=None):
157
+ self._batcher.close()
158
+
159
+ if self._worker_thread:
160
+ self._worker_thread.join(timeout=timeout)
@@ -34,7 +34,7 @@ class ServiceRequestAggregator:
34
34
  self._flush_interval = flush_interval
35
35
  self._flush_scheduler = Scheduler()
36
36
  self._mutex = threading.RLock()
37
- self._period_start_time = datetime.datetime.utcnow()
37
+ self._period_start_time = datetime.datetime.now(datetime.UTC)
38
38
  self._is_started = False
39
39
  self._is_shutdown = False
40
40
 
@@ -101,12 +101,14 @@ class ServiceRequestAggregator:
101
101
  self._emit_payload(analytics_payload)
102
102
  self.counter.clear()
103
103
  finally:
104
- self._period_start_time = datetime.datetime.utcnow()
104
+ self._period_start_time = datetime.datetime.now(datetime.UTC)
105
105
 
106
106
  def _create_analytics_payload(self):
107
107
  return {
108
- "period_start_time": self._period_start_time.isoformat() + "Z",
109
- "period_end_time": datetime.datetime.utcnow().isoformat() + "Z",
108
+ "period_start_time": self._period_start_time.isoformat().replace("+00:00", "Z"),
109
+ "period_end_time": datetime.datetime.now(datetime.UTC)
110
+ .isoformat()
111
+ .replace("+00:00", "Z"),
110
112
  "api_calls": self._aggregate_api_calls(self.counter),
111
113
  }
112
114
 
@@ -284,6 +284,13 @@ def lambda_event_source_mapping_arn(uuid: str, account_id: str, region_name: str
284
284
  return _resource_arn(uuid, pattern, account_id=account_id, region_name=region_name)
285
285
 
286
286
 
287
+ def capacity_provider_arn(capacity_provider_name: str, account_id: str, region_name: str) -> str:
288
+ pattern = "arn:%s:lambda:%s:%s:capacity-provider:%s"
289
+ return _resource_arn(
290
+ capacity_provider_name, pattern, account_id=account_id, region_name=region_name
291
+ )
292
+
293
+
287
294
  def lambda_function_or_layer_arn(
288
295
  type: str,
289
296
  entity_name: str,
@@ -65,7 +65,6 @@ if TYPE_CHECKING:
65
65
  from mypy_boto3_iotwireless import IoTWirelessClient
66
66
  from mypy_boto3_kafka import KafkaClient
67
67
  from mypy_boto3_kinesis import KinesisClient
68
- from mypy_boto3_kinesisanalytics import KinesisAnalyticsClient
69
68
  from mypy_boto3_kinesisanalyticsv2 import KinesisAnalyticsV2Client
70
69
  from mypy_boto3_kms import KMSClient
71
70
  from mypy_boto3_lakeformation import LakeFormationClient
@@ -82,8 +81,6 @@ if TYPE_CHECKING:
82
81
  from mypy_boto3_pi import PIClient
83
82
  from mypy_boto3_pinpoint import PinpointClient
84
83
  from mypy_boto3_pipes import EventBridgePipesClient
85
- from mypy_boto3_qldb import QLDBClient
86
- from mypy_boto3_qldb_session import QLDBSessionClient
87
84
  from mypy_boto3_rds import RDSClient
88
85
  from mypy_boto3_rds_data import RDSDataServiceClient
89
86
  from mypy_boto3_redshift import RedshiftClient
@@ -191,9 +188,6 @@ class TypedServiceClientFactory(abc.ABC):
191
188
  iotwireless: Union["IoTWirelessClient", "MetadataRequestInjector[IoTWirelessClient]"]
192
189
  kafka: Union["KafkaClient", "MetadataRequestInjector[KafkaClient]"]
193
190
  kinesis: Union["KinesisClient", "MetadataRequestInjector[KinesisClient]"]
194
- kinesisanalytics: Union[
195
- "KinesisAnalyticsClient", "MetadataRequestInjector[KinesisAnalyticsClient]"
196
- ]
197
191
  kinesisanalyticsv2: Union[
198
192
  "KinesisAnalyticsV2Client", "MetadataRequestInjector[KinesisAnalyticsV2Client]"
199
193
  ]
@@ -214,8 +208,6 @@ class TypedServiceClientFactory(abc.ABC):
214
208
  pi: Union["PIClient", "MetadataRequestInjector[PIClient]"]
215
209
  pinpoint: Union["PinpointClient", "MetadataRequestInjector[PinpointClient]"]
216
210
  pipes: Union["EventBridgePipesClient", "MetadataRequestInjector[EventBridgePipesClient]"]
217
- qldb: Union["QLDBClient", "MetadataRequestInjector[QLDBClient]"]
218
- qldb_session: Union["QLDBSessionClient", "MetadataRequestInjector[QLDBSessionClient]"]
219
211
  rds: Union["RDSClient", "MetadataRequestInjector[RDSClient]"]
220
212
  rds_data: Union["RDSDataServiceClient", "MetadataRequestInjector[RDSDataServiceClient]"]
221
213
  redshift: Union["RedshiftClient", "MetadataRequestInjector[RedshiftClient]"]
@@ -0,0 +1,258 @@
1
+ import copy
2
+ import logging
3
+ import threading
4
+ import time
5
+ from typing import Generic, Protocol, TypeVar, overload
6
+
7
+ LOG = logging.getLogger(__name__)
8
+
9
+ T = TypeVar("T")
10
+
11
+ # alias to signify whether a batch policy has been triggered
12
+ BatchPolicyTriggered = bool
13
+
14
+
15
+ # TODO: Add batching on bytes as well.
16
+ class Batcher(Generic[T]):
17
+ """
18
+ A utility for collecting items into batches and flushing them when one or more batch policy conditions are met.
19
+
20
+ The batch policy can be created to trigger on:
21
+ - max_count: Maximum number of items added
22
+ - max_window: Maximum time window (in seconds)
23
+
24
+ If no limits are specified, the batcher is always in triggered state.
25
+
26
+ Example usage:
27
+
28
+ import time
29
+
30
+ # Triggers when 2 (or more) items are added
31
+ batcher = Batcher(max_count=2)
32
+ assert batcher.add(["item1", "item2", "item3"])
33
+ assert batcher.flush() == ["item1", "item2", "item3"]
34
+
35
+ # Triggers partially when 2 (or more) items are added
36
+ batcher = Batcher(max_count=2)
37
+ assert batcher.add(["item1", "item2", "item3"])
38
+ assert batcher.flush(partial=True) == ["item1", "item2"]
39
+ assert batcher.add("item4")
40
+ assert batcher.flush(partial=True) == ["item3", "item4"]
41
+
42
+ # Trigger 2 seconds after the first add
43
+ batcher = Batcher(max_window=2.0)
44
+ assert not batcher.add(["item1", "item2", "item3"])
45
+ time.sleep(2.1)
46
+ assert not batcher.add(["item4"])
47
+ assert batcher.flush() == ["item1", "item2", "item3", "item4"]
48
+ """
49
+
50
+ max_count: int | None
51
+ """
52
+ Maximum number of items, must be None or positive.
53
+ """
54
+
55
+ max_window: float | None
56
+ """
57
+ Maximum time window in seconds, must be None or positive.
58
+ """
59
+
60
+ _triggered: bool
61
+ _last_batch_time: float
62
+ _batch: list[T]
63
+
64
+ def __init__(self, max_count: int | None = None, max_window: float | None = None):
65
+ """
66
+ Initialize a new Batcher instance.
67
+
68
+ :param max_count: Maximum number of items that be None or positive.
69
+ :param max_window: Maximum time window in seconds that must be None or positive.
70
+ """
71
+ self.max_count = max_count
72
+ self.max_window = max_window
73
+
74
+ self._triggered = False
75
+ self._last_batch_time = time.monotonic()
76
+ self._batch = []
77
+
78
+ @property
79
+ def period(self) -> float:
80
+ return time.monotonic() - self._last_batch_time
81
+
82
+ def _check_batch_policy(self) -> bool:
83
+ """Check if any batch policy conditions are met"""
84
+ if self.max_count is not None and len(self._batch) >= self.max_count:
85
+ self._triggered = True
86
+ elif self.max_window is not None and self.period >= self.max_window:
87
+ self._triggered = True
88
+ elif not self.max_count and not self.max_window:
89
+ # always return true
90
+ self._triggered = True
91
+
92
+ return self._triggered
93
+
94
+ @overload
95
+ def add(self, item: T, *, deep_copy: bool = False) -> BatchPolicyTriggered: ...
96
+
97
+ @overload
98
+ def add(self, items: list[T], *, deep_copy: bool = False) -> BatchPolicyTriggered: ...
99
+
100
+ def add(self, item_or_items: T | list[T], *, deep_copy: bool = False) -> BatchPolicyTriggered:
101
+ """
102
+ Add an item or list of items to the collected batch.
103
+
104
+ Returns:
105
+ BatchPolicyTriggered: True if the batch policy was triggered during addition, False otherwise.
106
+ """
107
+ if deep_copy:
108
+ item_or_items = copy.deepcopy(item_or_items)
109
+
110
+ if isinstance(item_or_items, list):
111
+ self._batch.extend(item_or_items)
112
+ else:
113
+ self._batch.append(item_or_items)
114
+
115
+ # Check if the last addition triggered the batch policy
116
+ return self.is_triggered()
117
+
118
+ def flush(self, *, partial=False) -> list[T]:
119
+ result = []
120
+ if not partial or not self.max_count:
121
+ result = self._batch.copy()
122
+ self._batch.clear()
123
+ else:
124
+ batch_size = min(self.max_count, len(self._batch))
125
+ result = self._batch[:batch_size].copy()
126
+ self._batch = self._batch[batch_size:]
127
+
128
+ self._last_batch_time = time.monotonic()
129
+ self._triggered = False
130
+ self._check_batch_policy()
131
+
132
+ return result
133
+
134
+ def duration_until_next_batch(self) -> float:
135
+ if not self.max_window:
136
+ return -1
137
+ return max(self.max_window - self.period, -1)
138
+
139
+ def get_current_size(self) -> int:
140
+ return len(self._batch)
141
+
142
+ def is_triggered(self):
143
+ return self._triggered or self._check_batch_policy()
144
+
145
+
146
+ class BatchHandler(Protocol[T]):
147
+ """
148
+ A BatchHandler is a callable that processes a list of items handed down by the AsyncBatcher.
149
+ """
150
+
151
+ def __call__(self, batch: list[T]) -> None: ...
152
+
153
+
154
+ class AsyncBatcher(Generic[T]):
155
+ """
156
+ Class for managing asynchronous batching of items.
157
+
158
+ This class allows for efficient buffering and processing of items in batches by
159
+ periodically flushing the buffer to a given handler, or by automatically flushing
160
+ when the maximum batch size is reached. It is designed to be used in asynchronous
161
+ scenarios where the caller does not execute the flushing IO call itself, like with ``Batcher``.
162
+
163
+ :ivar max_flush_interval: Maximum time interval in seconds between
164
+ automatic flushes, regardless of the batch size.
165
+ :ivar max_batch_size: Maximum number of items in a batch. When reached,
166
+ the batch is flushed automatically.
167
+ :ivar handler: Callable handler that processes each flushed batch. The handler must
168
+ be provided during initialization and must accept a list of items as input.
169
+ """
170
+
171
+ max_flush_interval: float
172
+ max_batch_size: int
173
+ handler: BatchHandler[T]
174
+
175
+ _buffer: list[T]
176
+ _flush_lock: threading.Condition
177
+ _closed: bool
178
+
179
+ def __init__(
180
+ self,
181
+ handler: BatchHandler[T],
182
+ max_flush_interval: float = 10,
183
+ max_batch_size: int = 20,
184
+ ):
185
+ self.handler = handler
186
+ self.max_flush_interval = max_flush_interval
187
+ self.max_batch_size = max_batch_size
188
+
189
+ self._buffer = []
190
+ self._flush_lock = threading.Condition()
191
+ self._closed = False
192
+
193
+ def add(self, item: T):
194
+ """
195
+ Adds an item to the buffer.
196
+
197
+ :param item: the item to add
198
+ """
199
+ with self._flush_lock:
200
+ if self._closed:
201
+ raise ValueError("Batcher is stopped, can no longer add items")
202
+
203
+ self._buffer.append(item)
204
+
205
+ if len(self._buffer) >= self.max_batch_size:
206
+ self._flush_lock.notify_all()
207
+
208
+ @property
209
+ def current_batch_size(self) -> int:
210
+ """
211
+ Returns the current number of items in the buffer waiting to be flushed.
212
+ """
213
+ return len(self._buffer)
214
+
215
+ def run(self):
216
+ """
217
+ Runs the event loop that flushes the buffer to the handler based on the configured rules, and blocks until
218
+ ``close()`` is called. This method is meant to be run in a separate thread.
219
+ """
220
+ while not self._closed:
221
+ with self._flush_lock:
222
+ # wait returns once either the condition is notified (in which case wait returns True, indicating that
223
+ # something has triggered a flush manually), or the timeout expires (in which case wait returns False)
224
+ self._flush_lock.wait(self.max_flush_interval)
225
+
226
+ # if _flush_condition was notified because close() was called, we should still make sure we flush the
227
+ # last batch
228
+
229
+ # perform the flush, if there are any items in the buffer
230
+ if not self._buffer:
231
+ continue
232
+
233
+ batch = self._buffer.copy()
234
+ self._buffer.clear()
235
+
236
+ # we can call the processor outside the lock so we can continue adding items into the next batch without
237
+ # waiting on the processor to return.
238
+ try:
239
+ self.handler(batch)
240
+ except Exception as e:
241
+ LOG.error(
242
+ "Unhandled exception while processing a batch: %s",
243
+ e,
244
+ exc_info=LOG.isEnabledFor(logging.DEBUG),
245
+ )
246
+
247
+ # this marks that the main control loop is done
248
+ return
249
+
250
+ def close(self):
251
+ """
252
+ Triggers a close of the batcher, which will cause one last flush, and then end the main event loop.
253
+ """
254
+ with self._flush_lock:
255
+ if self._closed:
256
+ return
257
+ self._closed = True
258
+ self._flush_lock.notify_all()