sentry-arroyo 2.32.2__tar.gz → 2.32.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sentry_arroyo-2.32.2/sentry_arroyo.egg-info → sentry_arroyo-2.32.4}/PKG-INFO +1 -1
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/backends/kafka/configuration.py +3 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/backends/kafka/consumer.py +40 -9
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/utils/metricDefs.json +1 -1
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/utils/metric_defs.py +3 -1
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4/sentry_arroyo.egg-info}/PKG-INFO +1 -1
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/sentry_arroyo.egg-info/SOURCES.txt +1 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/setup.py +1 -1
- sentry_arroyo-2.32.4/tests/backends/test_kafka_commit_callback.py +69 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/LICENSE +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/MANIFEST.in +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/README.md +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/__init__.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/backends/__init__.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/backends/abstract.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/backends/kafka/__init__.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/backends/kafka/commit.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/backends/local/__init__.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/backends/local/backend.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/backends/local/storages/__init__.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/backends/local/storages/abstract.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/backends/local/storages/memory.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/commit.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/dlq.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/errors.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/__init__.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/processor.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/strategies/__init__.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/strategies/abstract.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/strategies/batching.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/strategies/buffer.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/strategies/commit.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/strategies/filter.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/strategies/guard.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/strategies/healthcheck.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/strategies/noop.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/strategies/produce.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/strategies/reduce.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/strategies/run_task.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/strategies/run_task_in_threads.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/strategies/run_task_with_multiprocessing.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/strategies/unfold.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/py.typed +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/types.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/utils/__init__.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/utils/clock.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/utils/codecs.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/utils/concurrent.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/utils/logging.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/utils/metrics.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/utils/profiler.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/utils/retries.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/examples/transform_and_produce/__init__.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/examples/transform_and_produce/batched.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/examples/transform_and_produce/script.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/examples/transform_and_produce/simple.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/requirements.txt +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/sentry_arroyo.egg-info/dependency_links.txt +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/sentry_arroyo.egg-info/not-zip-safe +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/sentry_arroyo.egg-info/requires.txt +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/sentry_arroyo.egg-info/top_level.txt +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/setup.cfg +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/backends/__init__.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/backends/mixins.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/backends/test_commit.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/backends/test_confluent_producer.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/backends/test_kafka.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/backends/test_kafka_producer.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/backends/test_local.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/processing/__init__.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/processing/strategies/__init__.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/processing/strategies/test_all.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/processing/strategies/test_batching.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/processing/strategies/test_buffer.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/processing/strategies/test_commit.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/processing/strategies/test_filter.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/processing/strategies/test_guard.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/processing/strategies/test_noop.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/processing/strategies/test_produce.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/processing/strategies/test_reduce.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/processing/strategies/test_run_task.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/processing/strategies/test_run_task_in_threads.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/processing/strategies/test_run_task_with_multiprocessing.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/processing/strategies/test_unfold.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/processing/test_processor.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/test_commit.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/test_dlq.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/test_kip848_e2e.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/test_types.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/utils/__init__.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/utils/test_concurrent.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/utils/test_metrics.py +0 -0
- {sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/tests/utils/test_retries.py +0 -0
|
@@ -238,6 +238,7 @@ def build_kafka_consumer_configuration(
|
|
|
238
238
|
override_params: Optional[Mapping[str, Any]] = None,
|
|
239
239
|
strict_offset_reset: Optional[bool] = None,
|
|
240
240
|
enable_auto_commit: bool = False,
|
|
241
|
+
retry_handle_destroyed: bool = False,
|
|
241
242
|
) -> KafkaBrokerConfig:
|
|
242
243
|
|
|
243
244
|
if auto_offset_reset is None:
|
|
@@ -263,6 +264,8 @@ def build_kafka_consumer_configuration(
|
|
|
263
264
|
"arroyo.strict.offset.reset": strict_offset_reset,
|
|
264
265
|
# this is an arroyo specific flag to enable auto-commit mode
|
|
265
266
|
"arroyo.enable.auto.commit": enable_auto_commit,
|
|
267
|
+
# arroyo specific flag to enable retries when hitting `KafkaError._DESTROY` while committing
|
|
268
|
+
"arroyo.retry.broker.handle.destroyed": retry_handle_destroyed,
|
|
266
269
|
# overridden to reduce memory usage when there's a large backlog
|
|
267
270
|
"queued.max.messages.kbytes": queued_max_messages_kbytes,
|
|
268
271
|
"queued.min.messages": queued_min_messages,
|
|
@@ -157,21 +157,32 @@ class KafkaConsumer(Consumer[KafkaPayload]):
|
|
|
157
157
|
self,
|
|
158
158
|
configuration: Mapping[str, Any],
|
|
159
159
|
) -> None:
|
|
160
|
+
configuration = dict(configuration)
|
|
161
|
+
|
|
162
|
+
# Feature flag to enable retrying on `Broker handle destroyed` errors
|
|
163
|
+
# which can occur if we attempt to commit during a rebalance when
|
|
164
|
+
# the consumer group coordinator changed
|
|
165
|
+
self.__retry_handle_destroyed = as_kafka_configuration_bool(
|
|
166
|
+
configuration.pop("arroyo.retry.broker.handle.destroyed", False)
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
retryable_errors: Tuple[int, ...] = (
|
|
170
|
+
KafkaError.REQUEST_TIMED_OUT,
|
|
171
|
+
KafkaError.NOT_COORDINATOR,
|
|
172
|
+
KafkaError._WAIT_COORD,
|
|
173
|
+
KafkaError.STALE_MEMBER_EPOCH, # kip-848
|
|
174
|
+
KafkaError.COORDINATOR_LOAD_IN_PROGRESS,
|
|
175
|
+
)
|
|
176
|
+
if self.__retry_handle_destroyed:
|
|
177
|
+
retryable_errors += (KafkaError._DESTROY,)
|
|
178
|
+
|
|
160
179
|
commit_retry_policy = BasicRetryPolicy(
|
|
161
180
|
3,
|
|
162
181
|
1,
|
|
163
182
|
lambda e: isinstance(e, KafkaException)
|
|
164
|
-
and e.args[0].code()
|
|
165
|
-
in (
|
|
166
|
-
KafkaError.REQUEST_TIMED_OUT,
|
|
167
|
-
KafkaError.NOT_COORDINATOR,
|
|
168
|
-
KafkaError._WAIT_COORD,
|
|
169
|
-
KafkaError.STALE_MEMBER_EPOCH, # kip-848
|
|
170
|
-
KafkaError.COORDINATOR_LOAD_IN_PROGRESS,
|
|
171
|
-
),
|
|
183
|
+
and e.args[0].code() in retryable_errors,
|
|
172
184
|
)
|
|
173
185
|
|
|
174
|
-
configuration = dict(configuration)
|
|
175
186
|
self.__is_cooperative_sticky = (
|
|
176
187
|
configuration.get("partition.assignment.strategy") == "cooperative-sticky"
|
|
177
188
|
)
|
|
@@ -253,6 +264,9 @@ class KafkaConsumer(Consumer[KafkaPayload]):
|
|
|
253
264
|
|
|
254
265
|
self.__state = KafkaConsumerState.CONSUMING
|
|
255
266
|
|
|
267
|
+
self.__metrics = get_metrics()
|
|
268
|
+
self.__group_id = configuration.get("group.id")
|
|
269
|
+
|
|
256
270
|
def __on_commit_callback(
|
|
257
271
|
self,
|
|
258
272
|
error: Optional[KafkaException],
|
|
@@ -265,6 +279,23 @@ class KafkaConsumer(Consumer[KafkaPayload]):
|
|
|
265
279
|
error,
|
|
266
280
|
partition_info,
|
|
267
281
|
)
|
|
282
|
+
tags = {"status": "error"}
|
|
283
|
+
if self.__group_id:
|
|
284
|
+
tags["group_id"] = self.__group_id
|
|
285
|
+
self.__metrics.increment(
|
|
286
|
+
name="arroyo.consumer.commit_status",
|
|
287
|
+
value=1,
|
|
288
|
+
tags=tags,
|
|
289
|
+
)
|
|
290
|
+
else:
|
|
291
|
+
tags = {"status": "success"}
|
|
292
|
+
if self.__group_id:
|
|
293
|
+
tags["group_id"] = self.__group_id
|
|
294
|
+
self.__metrics.increment(
|
|
295
|
+
name="arroyo.consumer.commit_status",
|
|
296
|
+
value=1,
|
|
297
|
+
tags=tags,
|
|
298
|
+
)
|
|
268
299
|
|
|
269
300
|
def __resolve_partition_offset_earliest(
|
|
270
301
|
self, partition: ConfluentTopicPartition
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"arroyo.strategies.run_task_with_multiprocessing.batch.size.msg": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.size.msg", "type": "Time", "description": "Number of messages in a multiprocessing batch"}, "arroyo.strategies.run_task_with_multiprocessing.batch.size.bytes": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.size.bytes", "type": "Time", "description": "Number of bytes in a multiprocessing batch"}, "arroyo.strategies.run_task_with_multiprocessing.batch.submit.time": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.submit.time", "type": "Time", "description": "How long it took to submit a batch to multiprocessing"}, "arroyo.strategies.run_task_with_multiprocessing.output_batch.size.msg": {"name": "arroyo.strategies.run_task_with_multiprocessing.output_batch.size.msg", "type": "Time", "description": "Number of messages in a multiprocessing batch after the message transformation"}, "arroyo.strategies.run_task_with_multiprocessing.output_batch.size.bytes": {"name": "arroyo.strategies.run_task_with_multiprocessing.output_batch.size.bytes", "type": "Time", "description": "Number of bytes in a multiprocessing batch after the message transformation"}, "arroyo.consumer.run.count": {"name": "arroyo.consumer.run.count", "type": "Counter", "description": "Number of times the consumer is spinning"}, "arroyo.consumer.invalid_message.count": {"name": "arroyo.consumer.invalid_message.count", "type": "Counter", "description": "Number of times the consumer encountered an invalid message."}, "arroyo.strategies.reduce.batch_time": {"name": "arroyo.strategies.reduce.batch_time", "type": "Time", "description": "How long it took the Reduce step to fill up a batch"}, "arroyo.strategies.run_task_with_multiprocessing.batch.backpressure": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.backpressure", "type": "Counter", "description": "Incremented when a strategy after multiprocessing applies\nbackpressure to multiprocessing. May be a reason why CPU cannot be\nsaturated."}, "arroyo.strategies.run_task_with_multiprocessing.batch.input.overflow": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.input.overflow", "type": "Counter", "description": "Incremented when multiprocessing cannot fill the input batch\nbecause not enough memory was allocated. This results in batches smaller\nthan configured. Increase `input_block_size` to fix."}, "arroyo.strategies.run_task_with_multiprocessing.batch.output.overflow": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.output.overflow", "type": "Counter", "description": "Incremented when multiprocessing cannot pull results in batches\nequal to the input batch size, because not enough memory was allocated.\nThis can be devastating for throughput. Increase `output_block_size` to\nfix."}, "arroyo.strategies.run_task_with_multiprocessing.batch.input.resize": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.input.resize", "type": "Counter", "description": "Arroyo has decided to re-allocate a block in order to combat input\nbuffer overflow. This behavior can be disabled by explicitly setting\n`input_block_size` to a not-None value in `RunTaskWithMultiprocessing`."}, "arroyo.strategies.run_task_with_multiprocessing.batch.output.resize": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.output.resize", "type": "Counter", "description": "Arroyo has decided to re-allocate a block in order to combat output\nbuffer overflow. This behavior can be disabled by explicitly setting\n`output_block_size` to a not-None value in `RunTaskWithMultiprocessing`."}, "arroyo.strategies.run_task_with_multiprocessing.batches_in_progress": {"name": "arroyo.strategies.run_task_with_multiprocessing.batches_in_progress", "type": "Gauge", "description": "How many batches are being processed in parallel by multiprocessing."}, "arroyo.strategies.run_task_with_multiprocessing.processes": {"name": "arroyo.strategies.run_task_with_multiprocessing.processes", "type": "Gauge", "description": "Shows the total number of available processes in the pool."}, "arroyo.strategies.run_task_with_multiprocessing.pool.create": {"name": "arroyo.strategies.run_task_with_multiprocessing.pool.create", "type": "Counter", "description": "A subprocess by multiprocessing unexpectedly died.\n\"sigchld.detected\",\nCounter: Incremented when the multiprocessing pool is created (or re-created)."}, "arroyo.consumer.poll.time": {"name": "arroyo.consumer.poll.time", "type": "Time", "description": "(unitless) spent polling librdkafka for new messages."}, "arroyo.consumer.processing.time": {"name": "arroyo.consumer.processing.time", "type": "Time", "description": "(unitless) spent in strategies (blocking in strategy.submit or\nstrategy.poll)"}, "arroyo.consumer.backpressure.time": {"name": "arroyo.consumer.backpressure.time", "type": "Time", "description": "(unitless) spent pausing the consumer due to backpressure (MessageRejected)"}, "arroyo.consumer.dlq.time": {"name": "arroyo.consumer.dlq.time", "type": "Time", "description": "(unitless) spent in handling `InvalidMessage` exceptions and sending\nmessages to the the DLQ."}, "arroyo.consumer.join.time": {"name": "arroyo.consumer.join.time", "type": "Time", "description": "(unitless) spent in waiting for the strategy to exit, such as during\nshutdown or rebalancing."}, "arroyo.consumer.callback.time": {"name": "arroyo.consumer.callback.time", "type": "Time", "description": "(unitless) spent in librdkafka callbacks. This metric's timings\noverlap other timings, and might spike at the same time."}, "arroyo.consumer.shutdown.time": {"name": "arroyo.consumer.shutdown.time", "type": "Time", "description": "(unitless) spent in shutting down the consumer. This metric's\ntimings overlap other timings, and might spike at the same time."}, "arroyo.consumer.run.callback": {"name": "arroyo.consumer.run.callback", "type": "Time", "description": "A regular duration metric where each datapoint is measuring the time it\ntook to execute a single callback. This metric is distinct from the\narroyo.consumer.*.time metrics as it does not attempt to accumulate time\nspent per second in an attempt to keep monitoring overhead low.\nThe metric is tagged by the name of the internal callback function being\nexecuted, as 'callback_name'. Possible values are on_partitions_assigned\nand on_partitions_revoked."}, "arroyo.consumer.run.close_strategy": {"name": "arroyo.consumer.run.close_strategy", "type": "Time", "description": "Duration metric measuring the time it took to flush in-flight messages\nand shut down the strategies."}, "arroyo.consumer.run.create_strategy": {"name": "arroyo.consumer.run.create_strategy", "type": "Time", "description": "Duration metric measuring the time it took to create the processing strategy."}, "arroyo.consumer.partitions_revoked.count": {"name": "arroyo.consumer.partitions_revoked.count", "type": "Counter", "description": "How many partitions have been revoked just now."}, "arroyo.consumer.partitions_assigned.count": {"name": "arroyo.consumer.partitions_assigned.count", "type": "Counter", "description": "How many partitions have been assigned just now."}, "arroyo.consumer.latency": {"name": "arroyo.consumer.latency", "type": "Time", "description": "Consumer latency in seconds. Recorded by the commit offsets strategy."}, "arroyo.consumer.pause": {"name": "arroyo.consumer.pause", "type": "Counter", "description": "Metric for when the underlying rdkafka consumer is being paused.\nThis flushes internal prefetch buffers."}, "arroyo.consumer.resume": {"name": "arroyo.consumer.resume", "type": "Counter", "description": "Metric for when the underlying rdkafka consumer is being resumed.\nThis might cause increased network usage as messages are being re-fetched."}, "arroyo.consumer.librdkafka.total_queue_size": {"name": "arroyo.consumer.librdkafka.total_queue_size", "type": "Gauge", "description": "Queue size of background queue that librdkafka uses to prefetch messages."}, "arroyo.processing.strategies.healthcheck.touch": {"name": "arroyo.processing.strategies.healthcheck.touch", "type": "Counter", "description": "Counter metric to measure how often the healthcheck file has been touched."}, "arroyo.strategies.filter.dropped_messages": {"name": "arroyo.strategies.filter.dropped_messages", "type": "Counter", "description": "Number of messages dropped in the FilterStep strategy"}, "arroyo.consumer.dlq.dropped_messages": {"name": "arroyo.consumer.dlq.dropped_messages", "type": "Counter", "description": "how many messages are dropped due to errors producing to the dlq"}, "arroyo.consumer.dlq_buffer.len": {"name": "arroyo.consumer.dlq_buffer.len", "type": "Gauge", "description": "Current length of the DLQ buffer deque"}, "arroyo.consumer.dlq_buffer.exceeded": {"name": "arroyo.consumer.dlq_buffer.exceeded", "type": "Counter", "description": "Number of times the DLQ buffer size has been exceeded, causing messages to be dropped"}, "arroyo.consumer.dlq_buffer.assigned_partitions": {"name": "arroyo.consumer.dlq_buffer.assigned_partitions", "type": "Gauge", "description": "Number of partitions being tracked in the DLQ buffer"}, "arroyo.producer.librdkafka.p99_int_latency": {"name": "arroyo.producer.librdkafka.p99_int_latency", "type": "Time", "description": "Internal producer queue latency from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.p99_outbuf_latency": {"name": "arroyo.producer.librdkafka.p99_outbuf_latency", "type": "Time", "description": "Output buffer latency from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.p99_rtt": {"name": "arroyo.producer.librdkafka.p99_rtt", "type": "Time", "description": "Round-trip time to brokers from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.avg_int_latency": {"name": "arroyo.producer.librdkafka.avg_int_latency", "type": "Time", "description": "Average internal producer queue latency from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.avg_outbuf_latency": {"name": "arroyo.producer.librdkafka.avg_outbuf_latency", "type": "Time", "description": "Average output buffer latency from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.avg_rtt": {"name": "arroyo.producer.librdkafka.avg_rtt", "type": "Time", "description": "Average round-trip time to brokers from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.produce_status": {"name": "arroyo.producer.produce_status", "type": "Counter", "description": "Number of times the produce strategy failed to produce a message"}, "arroyo.producer.librdkafka.message_count": {"name": "arroyo.producer.librdkafka.message_count", "type": "Gauge", "description": "Producer message count metric from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.message_count_max": {"name": "arroyo.producer.librdkafka.message_count_max", "type": "Gauge", "description": "Maximum producer message count from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.message_size": {"name": "arroyo.producer.librdkafka.message_size", "type": "Gauge", "description": "Producer message size from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.message_size_max": {"name": "arroyo.producer.librdkafka.message_size_max", "type": "Gauge", "description": "Maximum producer message size from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.txmsgs": {"name": "arroyo.producer.librdkafka.txmsgs", "type": "Gauge", "description": "Total number of messages transmitted from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.broker_tx": {"name": "arroyo.producer.librdkafka.broker_tx", "type": "Gauge", "description": "Total number of transmission requests from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_txbytes": {"name": "arroyo.producer.librdkafka.broker_txbytes", "type": "Gauge", "description": "Total number of bytes transmitted from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_outbuf_requests": {"name": "arroyo.producer.librdkafka.broker_outbuf_requests", "type": "Gauge", "description": "Number of requests awaiting transmission to broker from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_outbuf_messages": {"name": "arroyo.producer.librdkafka.broker_outbuf_messages", "type": "Gauge", "description": "Number of messages awaiting transmission to broker from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_connects": {"name": "arroyo.producer.librdkafka.broker_connects", "type": "Gauge", "description": "Number of connection attempts to broker from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_disconnects": {"name": "arroyo.producer.librdkafka.broker_disconnects", "type": "Gauge", "description": "Number of disconnections from broker from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_txerrs": {"name": "arroyo.producer.librdkafka.broker_txerrs", "type": "Gauge", "description": "Total number of transmission errors from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_txretries": {"name": "arroyo.producer.librdkafka.broker_txretries", "type": "Gauge", "description": "Total number of request retries from librdkafka statistics\nTagged by broker_id, producer_name"}}
|
|
1
|
+
{"arroyo.strategies.run_task_with_multiprocessing.batch.size.msg": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.size.msg", "type": "Time", "description": "Number of messages in a multiprocessing batch"}, "arroyo.strategies.run_task_with_multiprocessing.batch.size.bytes": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.size.bytes", "type": "Time", "description": "Number of bytes in a multiprocessing batch"}, "arroyo.strategies.run_task_with_multiprocessing.batch.submit.time": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.submit.time", "type": "Time", "description": "How long it took to submit a batch to multiprocessing"}, "arroyo.strategies.run_task_with_multiprocessing.output_batch.size.msg": {"name": "arroyo.strategies.run_task_with_multiprocessing.output_batch.size.msg", "type": "Time", "description": "Number of messages in a multiprocessing batch after the message transformation"}, "arroyo.strategies.run_task_with_multiprocessing.output_batch.size.bytes": {"name": "arroyo.strategies.run_task_with_multiprocessing.output_batch.size.bytes", "type": "Time", "description": "Number of bytes in a multiprocessing batch after the message transformation"}, "arroyo.consumer.run.count": {"name": "arroyo.consumer.run.count", "type": "Counter", "description": "Number of times the consumer is spinning"}, "arroyo.consumer.invalid_message.count": {"name": "arroyo.consumer.invalid_message.count", "type": "Counter", "description": "Number of times the consumer encountered an invalid message."}, "arroyo.strategies.reduce.batch_time": {"name": "arroyo.strategies.reduce.batch_time", "type": "Time", "description": "How long it took the Reduce step to fill up a batch"}, "arroyo.strategies.run_task_with_multiprocessing.batch.backpressure": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.backpressure", "type": "Counter", "description": "Incremented when a strategy after multiprocessing applies\nbackpressure to multiprocessing. May be a reason why CPU cannot be\nsaturated."}, "arroyo.strategies.run_task_with_multiprocessing.batch.input.overflow": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.input.overflow", "type": "Counter", "description": "Incremented when multiprocessing cannot fill the input batch\nbecause not enough memory was allocated. This results in batches smaller\nthan configured. Increase `input_block_size` to fix."}, "arroyo.strategies.run_task_with_multiprocessing.batch.output.overflow": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.output.overflow", "type": "Counter", "description": "Incremented when multiprocessing cannot pull results in batches\nequal to the input batch size, because not enough memory was allocated.\nThis can be devastating for throughput. Increase `output_block_size` to\nfix."}, "arroyo.strategies.run_task_with_multiprocessing.batch.input.resize": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.input.resize", "type": "Counter", "description": "Arroyo has decided to re-allocate a block in order to combat input\nbuffer overflow. This behavior can be disabled by explicitly setting\n`input_block_size` to a not-None value in `RunTaskWithMultiprocessing`."}, "arroyo.strategies.run_task_with_multiprocessing.batch.output.resize": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.output.resize", "type": "Counter", "description": "Arroyo has decided to re-allocate a block in order to combat output\nbuffer overflow. This behavior can be disabled by explicitly setting\n`output_block_size` to a not-None value in `RunTaskWithMultiprocessing`."}, "arroyo.strategies.run_task_with_multiprocessing.batches_in_progress": {"name": "arroyo.strategies.run_task_with_multiprocessing.batches_in_progress", "type": "Gauge", "description": "How many batches are being processed in parallel by multiprocessing."}, "arroyo.strategies.run_task_with_multiprocessing.processes": {"name": "arroyo.strategies.run_task_with_multiprocessing.processes", "type": "Gauge", "description": "Shows the total number of available processes in the pool."}, "arroyo.strategies.run_task_with_multiprocessing.pool.create": {"name": "arroyo.strategies.run_task_with_multiprocessing.pool.create", "type": "Counter", "description": "A subprocess by multiprocessing unexpectedly died.\n\"sigchld.detected\",\nCounter: Incremented when the multiprocessing pool is created (or re-created)."}, "arroyo.consumer.poll.time": {"name": "arroyo.consumer.poll.time", "type": "Time", "description": "(unitless) spent polling librdkafka for new messages."}, "arroyo.consumer.processing.time": {"name": "arroyo.consumer.processing.time", "type": "Time", "description": "(unitless) spent in strategies (blocking in strategy.submit or\nstrategy.poll)"}, "arroyo.consumer.backpressure.time": {"name": "arroyo.consumer.backpressure.time", "type": "Time", "description": "(unitless) spent pausing the consumer due to backpressure (MessageRejected)"}, "arroyo.consumer.dlq.time": {"name": "arroyo.consumer.dlq.time", "type": "Time", "description": "(unitless) spent in handling `InvalidMessage` exceptions and sending\nmessages to the the DLQ."}, "arroyo.consumer.join.time": {"name": "arroyo.consumer.join.time", "type": "Time", "description": "(unitless) spent in waiting for the strategy to exit, such as during\nshutdown or rebalancing."}, "arroyo.consumer.callback.time": {"name": "arroyo.consumer.callback.time", "type": "Time", "description": "(unitless) spent in librdkafka callbacks. This metric's timings\noverlap other timings, and might spike at the same time."}, "arroyo.consumer.shutdown.time": {"name": "arroyo.consumer.shutdown.time", "type": "Time", "description": "(unitless) spent in shutting down the consumer. This metric's\ntimings overlap other timings, and might spike at the same time."}, "arroyo.consumer.run.callback": {"name": "arroyo.consumer.run.callback", "type": "Time", "description": "A regular duration metric where each datapoint is measuring the time it\ntook to execute a single callback. This metric is distinct from the\narroyo.consumer.*.time metrics as it does not attempt to accumulate time\nspent per second in an attempt to keep monitoring overhead low.\nThe metric is tagged by the name of the internal callback function being\nexecuted, as 'callback_name'. Possible values are on_partitions_assigned\nand on_partitions_revoked."}, "arroyo.consumer.run.close_strategy": {"name": "arroyo.consumer.run.close_strategy", "type": "Time", "description": "Duration metric measuring the time it took to flush in-flight messages\nand shut down the strategies."}, "arroyo.consumer.run.create_strategy": {"name": "arroyo.consumer.run.create_strategy", "type": "Time", "description": "Duration metric measuring the time it took to create the processing strategy."}, "arroyo.consumer.partitions_revoked.count": {"name": "arroyo.consumer.partitions_revoked.count", "type": "Counter", "description": "How many partitions have been revoked just now."}, "arroyo.consumer.partitions_assigned.count": {"name": "arroyo.consumer.partitions_assigned.count", "type": "Counter", "description": "How many partitions have been assigned just now."}, "arroyo.consumer.latency": {"name": "arroyo.consumer.latency", "type": "Time", "description": "Consumer latency in seconds. Recorded by the commit offsets strategy."}, "arroyo.consumer.pause": {"name": "arroyo.consumer.pause", "type": "Counter", "description": "Metric for when the underlying rdkafka consumer is being paused.\nThis flushes internal prefetch buffers."}, "arroyo.consumer.resume": {"name": "arroyo.consumer.resume", "type": "Counter", "description": "Metric for when the underlying rdkafka consumer is being resumed.\nThis might cause increased network usage as messages are being re-fetched."}, "arroyo.consumer.librdkafka.total_queue_size": {"name": "arroyo.consumer.librdkafka.total_queue_size", "type": "Gauge", "description": "Queue size of background queue that librdkafka uses to prefetch messages."}, "arroyo.processing.strategies.healthcheck.touch": {"name": "arroyo.processing.strategies.healthcheck.touch", "type": "Counter", "description": "Counter metric to measure how often the healthcheck file has been touched."}, "arroyo.strategies.filter.dropped_messages": {"name": "arroyo.strategies.filter.dropped_messages", "type": "Counter", "description": "Number of messages dropped in the FilterStep strategy"}, "arroyo.consumer.dlq.dropped_messages": {"name": "arroyo.consumer.dlq.dropped_messages", "type": "Counter", "description": "how many messages are dropped due to errors producing to the dlq"}, "arroyo.consumer.commit_status": {"name": "arroyo.consumer.commit_status", "type": "Counter", "description": "Number of times consumer commit succeeds or fails"}, "arroyo.consumer.dlq_buffer.len": {"name": "arroyo.consumer.dlq_buffer.len", "type": "Gauge", "description": "Current length of the DLQ buffer deque"}, "arroyo.consumer.dlq_buffer.exceeded": {"name": "arroyo.consumer.dlq_buffer.exceeded", "type": "Counter", "description": "Number of times the DLQ buffer size has been exceeded, causing messages to be dropped"}, "arroyo.consumer.dlq_buffer.assigned_partitions": {"name": "arroyo.consumer.dlq_buffer.assigned_partitions", "type": "Gauge", "description": "Number of partitions being tracked in the DLQ buffer"}, "arroyo.producer.librdkafka.p99_int_latency": {"name": "arroyo.producer.librdkafka.p99_int_latency", "type": "Time", "description": "Internal producer queue latency from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.p99_outbuf_latency": {"name": "arroyo.producer.librdkafka.p99_outbuf_latency", "type": "Time", "description": "Output buffer latency from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.p99_rtt": {"name": "arroyo.producer.librdkafka.p99_rtt", "type": "Time", "description": "Round-trip time to brokers from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.avg_int_latency": {"name": "arroyo.producer.librdkafka.avg_int_latency", "type": "Time", "description": "Average internal producer queue latency from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.avg_outbuf_latency": {"name": "arroyo.producer.librdkafka.avg_outbuf_latency", "type": "Time", "description": "Average output buffer latency from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.avg_rtt": {"name": "arroyo.producer.librdkafka.avg_rtt", "type": "Time", "description": "Average round-trip time to brokers from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.produce_status": {"name": "arroyo.producer.produce_status", "type": "Counter", "description": "Number of times the produce strategy succeeds or fails"}, "arroyo.producer.librdkafka.message_count": {"name": "arroyo.producer.librdkafka.message_count", "type": "Gauge", "description": "Producer message count metric from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.message_count_max": {"name": "arroyo.producer.librdkafka.message_count_max", "type": "Gauge", "description": "Maximum producer message count from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.message_size": {"name": "arroyo.producer.librdkafka.message_size", "type": "Gauge", "description": "Producer message size from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.message_size_max": {"name": "arroyo.producer.librdkafka.message_size_max", "type": "Gauge", "description": "Maximum producer message size from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.txmsgs": {"name": "arroyo.producer.librdkafka.txmsgs", "type": "Gauge", "description": "Total number of messages transmitted from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.broker_tx": {"name": "arroyo.producer.librdkafka.broker_tx", "type": "Gauge", "description": "Total number of transmission requests from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_txbytes": {"name": "arroyo.producer.librdkafka.broker_txbytes", "type": "Gauge", "description": "Total number of bytes transmitted from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_outbuf_requests": {"name": "arroyo.producer.librdkafka.broker_outbuf_requests", "type": "Gauge", "description": "Number of requests awaiting transmission to broker from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_outbuf_messages": {"name": "arroyo.producer.librdkafka.broker_outbuf_messages", "type": "Gauge", "description": "Number of messages awaiting transmission to broker from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_connects": {"name": "arroyo.producer.librdkafka.broker_connects", "type": "Gauge", "description": "Number of connection attempts to broker from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_disconnects": {"name": "arroyo.producer.librdkafka.broker_disconnects", "type": "Gauge", "description": "Number of disconnections from broker from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_txerrs": {"name": "arroyo.producer.librdkafka.broker_txerrs", "type": "Gauge", "description": "Total number of transmission errors from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_txretries": {"name": "arroyo.producer.librdkafka.broker_txretries", "type": "Gauge", "description": "Total number of request retries from librdkafka statistics\nTagged by broker_id, producer_name"}}
|
|
@@ -101,6 +101,8 @@ MetricName = Literal[
|
|
|
101
101
|
"arroyo.strategies.filter.dropped_messages",
|
|
102
102
|
# Counter: how many messages are dropped due to errors producing to the dlq
|
|
103
103
|
"arroyo.consumer.dlq.dropped_messages",
|
|
104
|
+
# Counter: Number of times consumer commit succeeds or fails
|
|
105
|
+
"arroyo.consumer.commit_status",
|
|
104
106
|
# Gauge: Current length of the DLQ buffer deque
|
|
105
107
|
"arroyo.consumer.dlq_buffer.len",
|
|
106
108
|
# Counter: Number of times the DLQ buffer size has been exceeded, causing messages to be dropped
|
|
@@ -125,7 +127,7 @@ MetricName = Literal[
|
|
|
125
127
|
# Time: Average round-trip time to brokers from librdkafka statistics.
|
|
126
128
|
# Tagged by broker_id.
|
|
127
129
|
"arroyo.producer.librdkafka.avg_rtt",
|
|
128
|
-
# Counter: Number of times the produce strategy
|
|
130
|
+
# Counter: Number of times the produce strategy succeeds or fails
|
|
129
131
|
"arroyo.producer.produce_status",
|
|
130
132
|
# Gauge: Producer message count metric from librdkafka statistics
|
|
131
133
|
# Tagged by producer_name
|
|
@@ -66,6 +66,7 @@ tests/backends/mixins.py
|
|
|
66
66
|
tests/backends/test_commit.py
|
|
67
67
|
tests/backends/test_confluent_producer.py
|
|
68
68
|
tests/backends/test_kafka.py
|
|
69
|
+
tests/backends/test_kafka_commit_callback.py
|
|
69
70
|
tests/backends/test_kafka_producer.py
|
|
70
71
|
tests/backends/test_local.py
|
|
71
72
|
tests/processing/__init__.py
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
from confluent_kafka import KafkaError, KafkaException
|
|
2
|
+
from confluent_kafka import TopicPartition as ConfluentTopicPartition
|
|
3
|
+
|
|
4
|
+
from arroyo.backends.kafka import KafkaConsumer
|
|
5
|
+
from arroyo.backends.kafka.configuration import build_kafka_configuration
|
|
6
|
+
from tests.metrics import Increment, TestingMetricsBackend
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def test_commit_callback_success_metric() -> None:
|
|
10
|
+
configuration = build_kafka_configuration(
|
|
11
|
+
{
|
|
12
|
+
"bootstrap.servers": "localhost:9092",
|
|
13
|
+
"group.id": "test-group",
|
|
14
|
+
"arroyo.enable.auto.commit": True,
|
|
15
|
+
}
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
consumer = KafkaConsumer(configuration)
|
|
19
|
+
|
|
20
|
+
TestingMetricsBackend.calls.clear()
|
|
21
|
+
|
|
22
|
+
partitions = [
|
|
23
|
+
ConfluentTopicPartition("test-topic", 0, 10),
|
|
24
|
+
ConfluentTopicPartition("test-topic", 1, 20),
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
consumer._KafkaConsumer__on_commit_callback(None, partitions) # type: ignore[attr-defined]
|
|
28
|
+
|
|
29
|
+
assert (
|
|
30
|
+
Increment(
|
|
31
|
+
"arroyo.consumer.commit_status",
|
|
32
|
+
1,
|
|
33
|
+
{"group_id": "test-group", "status": "success"},
|
|
34
|
+
)
|
|
35
|
+
in TestingMetricsBackend.calls
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def test_commit_callback_error_metric() -> None:
|
|
40
|
+
"""Test that failed commits emit a metric with status=error"""
|
|
41
|
+
configuration = build_kafka_configuration(
|
|
42
|
+
{
|
|
43
|
+
"bootstrap.servers": "localhost:9092",
|
|
44
|
+
"group.id": "test-group",
|
|
45
|
+
"arroyo.enable.auto.commit": True,
|
|
46
|
+
}
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
consumer = KafkaConsumer(configuration)
|
|
50
|
+
|
|
51
|
+
TestingMetricsBackend.calls.clear()
|
|
52
|
+
|
|
53
|
+
kafka_error = KafkaError(1, "Test commit error")
|
|
54
|
+
error = KafkaException(kafka_error)
|
|
55
|
+
|
|
56
|
+
partitions = [
|
|
57
|
+
ConfluentTopicPartition("test-topic", 0, 10),
|
|
58
|
+
]
|
|
59
|
+
|
|
60
|
+
consumer._KafkaConsumer__on_commit_callback(error, partitions) # type: ignore[attr-defined]
|
|
61
|
+
|
|
62
|
+
assert (
|
|
63
|
+
Increment(
|
|
64
|
+
"arroyo.consumer.commit_status",
|
|
65
|
+
1,
|
|
66
|
+
{"group_id": "test-group", "status": "error"},
|
|
67
|
+
)
|
|
68
|
+
in TestingMetricsBackend.calls
|
|
69
|
+
)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{sentry_arroyo-2.32.2 → sentry_arroyo-2.32.4}/arroyo/processing/strategies/run_task_in_threads.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|