sentry-arroyo 2.33.0__tar.gz → 2.34.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sentry_arroyo-2.33.0/sentry_arroyo.egg-info → sentry_arroyo-2.34.0}/PKG-INFO +1 -1
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/processor.py +59 -3
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/utils/metricDefs.json +1 -1
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/utils/metric_defs.py +2 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0/sentry_arroyo.egg-info}/PKG-INFO +1 -1
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/setup.py +1 -1
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/processing/test_processor.py +42 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/LICENSE +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/MANIFEST.in +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/README.md +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/__init__.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/backends/__init__.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/backends/abstract.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/backends/kafka/__init__.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/backends/kafka/commit.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/backends/kafka/configuration.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/backends/kafka/consumer.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/backends/local/__init__.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/backends/local/backend.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/backends/local/storages/__init__.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/backends/local/storages/abstract.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/backends/local/storages/memory.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/commit.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/dlq.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/errors.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/__init__.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/strategies/__init__.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/strategies/abstract.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/strategies/batching.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/strategies/buffer.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/strategies/commit.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/strategies/filter.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/strategies/guard.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/strategies/healthcheck.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/strategies/noop.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/strategies/produce.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/strategies/reduce.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/strategies/run_task.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/strategies/run_task_in_threads.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/strategies/run_task_with_multiprocessing.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/strategies/unfold.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/py.typed +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/types.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/utils/__init__.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/utils/clock.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/utils/codecs.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/utils/concurrent.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/utils/logging.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/utils/metrics.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/utils/profiler.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/utils/retries.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/examples/transform_and_produce/__init__.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/examples/transform_and_produce/batched.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/examples/transform_and_produce/script.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/examples/transform_and_produce/simple.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/requirements.txt +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/sentry_arroyo.egg-info/SOURCES.txt +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/sentry_arroyo.egg-info/dependency_links.txt +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/sentry_arroyo.egg-info/not-zip-safe +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/sentry_arroyo.egg-info/requires.txt +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/sentry_arroyo.egg-info/top_level.txt +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/setup.cfg +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/backends/__init__.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/backends/mixins.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/backends/test_commit.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/backends/test_confluent_producer.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/backends/test_kafka.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/backends/test_kafka_commit_callback.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/backends/test_kafka_producer.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/backends/test_local.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/processing/__init__.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/processing/strategies/__init__.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/processing/strategies/test_all.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/processing/strategies/test_batching.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/processing/strategies/test_buffer.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/processing/strategies/test_commit.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/processing/strategies/test_filter.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/processing/strategies/test_guard.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/processing/strategies/test_noop.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/processing/strategies/test_produce.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/processing/strategies/test_reduce.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/processing/strategies/test_run_task.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/processing/strategies/test_run_task_in_threads.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/processing/strategies/test_run_task_with_multiprocessing.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/processing/strategies/test_unfold.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/test_commit.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/test_dlq.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/test_kip848_e2e.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/test_types.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/utils/__init__.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/utils/test_concurrent.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/utils/test_metrics.py +0 -0
- {sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/tests/utils/test_retries.py +0 -0
|
@@ -40,6 +40,25 @@ DEFAULT_JOIN_TIMEOUT = 25.0 # In seconds
|
|
|
40
40
|
F = TypeVar("F", bound=Callable[[Any], Any])
|
|
41
41
|
|
|
42
42
|
|
|
43
|
+
def get_all_thread_stacks() -> str:
|
|
44
|
+
"""Get stack traces from all threads without using signals."""
|
|
45
|
+
import sys
|
|
46
|
+
import threading
|
|
47
|
+
import traceback
|
|
48
|
+
|
|
49
|
+
stacks = []
|
|
50
|
+
frames = sys._current_frames()
|
|
51
|
+
threads_by_id = {t.ident: t for t in threading.enumerate()}
|
|
52
|
+
|
|
53
|
+
for thread_id, frame in frames.items():
|
|
54
|
+
thread = threads_by_id.get(thread_id)
|
|
55
|
+
thread_name = thread.name if thread else f"Unknown-{thread_id}"
|
|
56
|
+
stack = "".join(traceback.format_stack(frame))
|
|
57
|
+
stacks.append(f"Thread {thread_name} ({thread_id}):\n{stack}")
|
|
58
|
+
|
|
59
|
+
return "\n\n".join(stacks)
|
|
60
|
+
|
|
61
|
+
|
|
43
62
|
def _rdkafka_callback(metrics: MetricsBuffer) -> Callable[[F], F]:
|
|
44
63
|
def decorator(f: F) -> F:
|
|
45
64
|
@functools.wraps(f)
|
|
@@ -86,6 +105,7 @@ ConsumerCounter = Literal[
|
|
|
86
105
|
"arroyo.consumer.pause",
|
|
87
106
|
"arroyo.consumer.resume",
|
|
88
107
|
"arroyo.consumer.dlq.dropped_messages",
|
|
108
|
+
"arroyo.consumer.stuck",
|
|
89
109
|
]
|
|
90
110
|
|
|
91
111
|
|
|
@@ -140,6 +160,7 @@ class StreamProcessor(Generic[TStrategyPayload]):
|
|
|
140
160
|
commit_policy: CommitPolicy = ONCE_PER_SECOND,
|
|
141
161
|
dlq_policy: Optional[DlqPolicy[TStrategyPayload]] = None,
|
|
142
162
|
join_timeout: Optional[float] = None,
|
|
163
|
+
stuck_detector_timeout: Optional[int] = None,
|
|
143
164
|
) -> None:
|
|
144
165
|
self.__consumer = consumer
|
|
145
166
|
self.__processor_factory = processor_factory
|
|
@@ -164,6 +185,7 @@ class StreamProcessor(Generic[TStrategyPayload]):
|
|
|
164
185
|
)
|
|
165
186
|
|
|
166
187
|
self.__shutdown_requested = False
|
|
188
|
+
self.__shutdown_done = False
|
|
167
189
|
|
|
168
190
|
# Buffers messages for DLQ. Messages are added when they are submitted for processing and
|
|
169
191
|
# removed once the commit callback is fired as they are guaranteed to be valid at that point.
|
|
@@ -175,6 +197,11 @@ class StreamProcessor(Generic[TStrategyPayload]):
|
|
|
175
197
|
DlqPolicyWrapper(dlq_policy) if dlq_policy is not None else None
|
|
176
198
|
)
|
|
177
199
|
|
|
200
|
+
self.__last_run = time.time()
|
|
201
|
+
|
|
202
|
+
if stuck_detector_timeout:
|
|
203
|
+
self.stuck_detector_run(stuck_detector_timeout)
|
|
204
|
+
|
|
178
205
|
def _close_strategy() -> None:
|
|
179
206
|
self._close_processing_strategy()
|
|
180
207
|
|
|
@@ -355,6 +382,31 @@ class StreamProcessor(Generic[TStrategyPayload]):
|
|
|
355
382
|
self.__processor_factory.shutdown()
|
|
356
383
|
logger.info("Processor terminated")
|
|
357
384
|
raise
|
|
385
|
+
finally:
|
|
386
|
+
self.__shutdown_done = True
|
|
387
|
+
|
|
388
|
+
def stuck_detector_run(self, stuck_detector_timeout: int) -> None:
|
|
389
|
+
import threading
|
|
390
|
+
|
|
391
|
+
def f() -> None:
|
|
392
|
+
while not self.__shutdown_done:
|
|
393
|
+
time_since_last_run = time.time() - self.__last_run
|
|
394
|
+
if time_since_last_run > stuck_detector_timeout:
|
|
395
|
+
stack_traces = get_all_thread_stacks()
|
|
396
|
+
logger.warning(
|
|
397
|
+
"main thread stuck for more than %s seconds, stacks: %s",
|
|
398
|
+
stuck_detector_timeout,
|
|
399
|
+
stack_traces,
|
|
400
|
+
)
|
|
401
|
+
self.__metrics_buffer.incr_counter("arroyo.consumer.stuck", 1)
|
|
402
|
+
self.__metrics_buffer.flush()
|
|
403
|
+
return
|
|
404
|
+
|
|
405
|
+
time.sleep(1)
|
|
406
|
+
|
|
407
|
+
t = threading.Thread(target=f)
|
|
408
|
+
t.daemon = True
|
|
409
|
+
t.start()
|
|
358
410
|
|
|
359
411
|
def _clear_backpressure(self) -> None:
|
|
360
412
|
if self.__backpressure_timestamp is not None:
|
|
@@ -381,9 +433,12 @@ class StreamProcessor(Generic[TStrategyPayload]):
|
|
|
381
433
|
start_dlq = time.time()
|
|
382
434
|
invalid_message = self.__buffered_messages.pop(exc.partition, exc.offset)
|
|
383
435
|
if invalid_message is None:
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
436
|
+
logger.error(
|
|
437
|
+
"Invalid message not found in buffer %s %s",
|
|
438
|
+
exc.partition,
|
|
439
|
+
exc.offset,
|
|
440
|
+
)
|
|
441
|
+
return
|
|
387
442
|
|
|
388
443
|
# XXX: This blocks if there are more than MAX_PENDING_FUTURES in the queue.
|
|
389
444
|
try:
|
|
@@ -402,6 +457,7 @@ class StreamProcessor(Generic[TStrategyPayload]):
|
|
|
402
457
|
|
|
403
458
|
def _run_once(self) -> None:
|
|
404
459
|
self.__metrics_buffer.incr_counter("arroyo.consumer.run.count", 1)
|
|
460
|
+
self.__last_run = time.time()
|
|
405
461
|
|
|
406
462
|
message_carried_over = self.__message is not None
|
|
407
463
|
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"arroyo.strategies.run_task_with_multiprocessing.batch.size.msg": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.size.msg", "type": "Time", "description": "Number of messages in a multiprocessing batch"}, "arroyo.strategies.run_task_with_multiprocessing.batch.size.bytes": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.size.bytes", "type": "Time", "description": "Number of bytes in a multiprocessing batch"}, "arroyo.strategies.run_task_with_multiprocessing.batch.submit.time": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.submit.time", "type": "Time", "description": "How long it took to submit a batch to multiprocessing"}, "arroyo.strategies.run_task_with_multiprocessing.output_batch.size.msg": {"name": "arroyo.strategies.run_task_with_multiprocessing.output_batch.size.msg", "type": "Time", "description": "Number of messages in a multiprocessing batch after the message transformation"}, "arroyo.strategies.run_task_with_multiprocessing.output_batch.size.bytes": {"name": "arroyo.strategies.run_task_with_multiprocessing.output_batch.size.bytes", "type": "Time", "description": "Number of bytes in a multiprocessing batch after the message transformation"}, "arroyo.consumer.run.count": {"name": "arroyo.consumer.run.count", "type": "Counter", "description": "Number of times the consumer is spinning"}, "arroyo.consumer.invalid_message.count": {"name": "arroyo.consumer.invalid_message.count", "type": "Counter", "description": "Number of times the consumer encountered an invalid message."}, "arroyo.strategies.reduce.batch_time": {"name": "arroyo.strategies.reduce.batch_time", "type": "Time", "description": "How long it took the Reduce step to fill up a batch"}, "arroyo.strategies.run_task_with_multiprocessing.batch.backpressure": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.backpressure", "type": "Counter", "description": "Incremented when a strategy after multiprocessing applies\nbackpressure to multiprocessing. May be a reason why CPU cannot be\nsaturated."}, "arroyo.strategies.run_task_with_multiprocessing.batch.input.overflow": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.input.overflow", "type": "Counter", "description": "Incremented when multiprocessing cannot fill the input batch\nbecause not enough memory was allocated. This results in batches smaller\nthan configured. Increase `input_block_size` to fix."}, "arroyo.strategies.run_task_with_multiprocessing.batch.output.overflow": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.output.overflow", "type": "Counter", "description": "Incremented when multiprocessing cannot pull results in batches\nequal to the input batch size, because not enough memory was allocated.\nThis can be devastating for throughput. Increase `output_block_size` to\nfix."}, "arroyo.strategies.run_task_with_multiprocessing.batch.input.resize": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.input.resize", "type": "Counter", "description": "Arroyo has decided to re-allocate a block in order to combat input\nbuffer overflow. This behavior can be disabled by explicitly setting\n`input_block_size` to a not-None value in `RunTaskWithMultiprocessing`."}, "arroyo.strategies.run_task_with_multiprocessing.batch.output.resize": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.output.resize", "type": "Counter", "description": "Arroyo has decided to re-allocate a block in order to combat output\nbuffer overflow. This behavior can be disabled by explicitly setting\n`output_block_size` to a not-None value in `RunTaskWithMultiprocessing`."}, "arroyo.strategies.run_task_with_multiprocessing.batches_in_progress": {"name": "arroyo.strategies.run_task_with_multiprocessing.batches_in_progress", "type": "Gauge", "description": "How many batches are being processed in parallel by multiprocessing."}, "arroyo.strategies.run_task_with_multiprocessing.processes": {"name": "arroyo.strategies.run_task_with_multiprocessing.processes", "type": "Gauge", "description": "Shows the total number of available processes in the pool."}, "arroyo.strategies.run_task_with_multiprocessing.pool.create": {"name": "arroyo.strategies.run_task_with_multiprocessing.pool.create", "type": "Counter", "description": "A subprocess by multiprocessing unexpectedly died.\n\"sigchld.detected\",\nCounter: Incremented when the multiprocessing pool is created (or re-created)."}, "arroyo.consumer.poll.time": {"name": "arroyo.consumer.poll.time", "type": "Time", "description": "(unitless) spent polling librdkafka for new messages."}, "arroyo.consumer.processing.time": {"name": "arroyo.consumer.processing.time", "type": "Time", "description": "(unitless) spent in strategies (blocking in strategy.submit or\nstrategy.poll)"}, "arroyo.consumer.backpressure.time": {"name": "arroyo.consumer.backpressure.time", "type": "Time", "description": "(unitless) spent pausing the consumer due to backpressure (MessageRejected)"}, "arroyo.consumer.dlq.time": {"name": "arroyo.consumer.dlq.time", "type": "Time", "description": "(unitless) spent in handling `InvalidMessage` exceptions and sending\nmessages to the the DLQ."}, "arroyo.consumer.join.time": {"name": "arroyo.consumer.join.time", "type": "Time", "description": "(unitless) spent in waiting for the strategy to exit, such as during\nshutdown or rebalancing."}, "arroyo.consumer.callback.time": {"name": "arroyo.consumer.callback.time", "type": "Time", "description": "(unitless) spent in librdkafka callbacks. This metric's timings\noverlap other timings, and might spike at the same time."}, "arroyo.consumer.shutdown.time": {"name": "arroyo.consumer.shutdown.time", "type": "Time", "description": "(unitless) spent in shutting down the consumer. This metric's\ntimings overlap other timings, and might spike at the same time."}, "arroyo.consumer.run.callback": {"name": "arroyo.consumer.run.callback", "type": "Time", "description": "A regular duration metric where each datapoint is measuring the time it\ntook to execute a single callback. This metric is distinct from the\narroyo.consumer.*.time metrics as it does not attempt to accumulate time\nspent per second in an attempt to keep monitoring overhead low.\nThe metric is tagged by the name of the internal callback function being\nexecuted, as 'callback_name'. Possible values are on_partitions_assigned\nand on_partitions_revoked."}, "arroyo.consumer.run.close_strategy": {"name": "arroyo.consumer.run.close_strategy", "type": "Time", "description": "Duration metric measuring the time it took to flush in-flight messages\nand shut down the strategies."}, "arroyo.consumer.run.create_strategy": {"name": "arroyo.consumer.run.create_strategy", "type": "Time", "description": "Duration metric measuring the time it took to create the processing strategy."}, "arroyo.consumer.partitions_revoked.count": {"name": "arroyo.consumer.partitions_revoked.count", "type": "Counter", "description": "How many partitions have been revoked just now."}, "arroyo.consumer.partitions_assigned.count": {"name": "arroyo.consumer.partitions_assigned.count", "type": "Counter", "description": "How many partitions have been assigned just now."}, "arroyo.consumer.latency": {"name": "arroyo.consumer.latency", "type": "Time", "description": "Consumer latency in seconds. Recorded by the commit offsets strategy."}, "arroyo.consumer.pause": {"name": "arroyo.consumer.pause", "type": "Counter", "description": "Metric for when the underlying rdkafka consumer is being paused.\nThis flushes internal prefetch buffers."}, "arroyo.consumer.resume": {"name": "arroyo.consumer.resume", "type": "Counter", "description": "Metric for when the underlying rdkafka consumer is being resumed.\nThis might cause increased network usage as messages are being re-fetched."}, "arroyo.consumer.librdkafka.total_queue_size": {"name": "arroyo.consumer.librdkafka.total_queue_size", "type": "Gauge", "description": "Queue size of background queue that librdkafka uses to prefetch messages."}, "arroyo.processing.strategies.healthcheck.touch": {"name": "arroyo.processing.strategies.healthcheck.touch", "type": "Counter", "description": "Counter metric to measure how often the healthcheck file has been touched."}, "arroyo.strategies.filter.dropped_messages": {"name": "arroyo.strategies.filter.dropped_messages", "type": "Counter", "description": "Number of messages dropped in the FilterStep strategy"}, "arroyo.consumer.dlq.dropped_messages": {"name": "arroyo.consumer.dlq.dropped_messages", "type": "Counter", "description": "how many messages are dropped due to errors producing to the dlq"}, "arroyo.consumer.commit_status": {"name": "arroyo.consumer.commit_status", "type": "Counter", "description": "Number of times consumer commit succeeds or fails"}, "arroyo.consumer.dlq_buffer.len": {"name": "arroyo.consumer.dlq_buffer.len", "type": "Gauge", "description": "Current length of the DLQ buffer deque"}, "arroyo.consumer.dlq_buffer.exceeded": {"name": "arroyo.consumer.dlq_buffer.exceeded", "type": "Counter", "description": "Number of times the DLQ buffer size has been exceeded, causing messages to be dropped"}, "arroyo.consumer.dlq_buffer.assigned_partitions": {"name": "arroyo.consumer.dlq_buffer.assigned_partitions", "type": "Gauge", "description": "Number of partitions being tracked in the DLQ buffer"}, "arroyo.producer.librdkafka.p99_int_latency": {"name": "arroyo.producer.librdkafka.p99_int_latency", "type": "Time", "description": "Internal producer queue latency from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.p99_outbuf_latency": {"name": "arroyo.producer.librdkafka.p99_outbuf_latency", "type": "Time", "description": "Output buffer latency from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.p99_rtt": {"name": "arroyo.producer.librdkafka.p99_rtt", "type": "Time", "description": "Round-trip time to brokers from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.avg_int_latency": {"name": "arroyo.producer.librdkafka.avg_int_latency", "type": "Time", "description": "Average internal producer queue latency from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.avg_outbuf_latency": {"name": "arroyo.producer.librdkafka.avg_outbuf_latency", "type": "Time", "description": "Average output buffer latency from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.avg_rtt": {"name": "arroyo.producer.librdkafka.avg_rtt", "type": "Time", "description": "Average round-trip time to brokers from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.produce_status": {"name": "arroyo.producer.produce_status", "type": "Counter", "description": "Number of times the produce strategy succeeds or fails"}, "arroyo.producer.librdkafka.message_count": {"name": "arroyo.producer.librdkafka.message_count", "type": "Gauge", "description": "Producer message count metric from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.message_count_max": {"name": "arroyo.producer.librdkafka.message_count_max", "type": "Gauge", "description": "Maximum producer message count from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.message_size": {"name": "arroyo.producer.librdkafka.message_size", "type": "Gauge", "description": "Producer message size from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.message_size_max": {"name": "arroyo.producer.librdkafka.message_size_max", "type": "Gauge", "description": "Maximum producer message size from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.txmsgs": {"name": "arroyo.producer.librdkafka.txmsgs", "type": "Gauge", "description": "Total number of messages transmitted from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.broker_tx": {"name": "arroyo.producer.librdkafka.broker_tx", "type": "Gauge", "description": "Total number of transmission requests from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_txbytes": {"name": "arroyo.producer.librdkafka.broker_txbytes", "type": "Gauge", "description": "Total number of bytes transmitted from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_outbuf_requests": {"name": "arroyo.producer.librdkafka.broker_outbuf_requests", "type": "Gauge", "description": "Number of requests awaiting transmission to broker from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_outbuf_messages": {"name": "arroyo.producer.librdkafka.broker_outbuf_messages", "type": "Gauge", "description": "Number of messages awaiting transmission to broker from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_connects": {"name": "arroyo.producer.librdkafka.broker_connects", "type": "Gauge", "description": "Number of connection attempts to broker from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_disconnects": {"name": "arroyo.producer.librdkafka.broker_disconnects", "type": "Gauge", "description": "Number of disconnections from broker from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_txerrs": {"name": "arroyo.producer.librdkafka.broker_txerrs", "type": "Gauge", "description": "Total number of transmission errors from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_txretries": {"name": "arroyo.producer.librdkafka.broker_txretries", "type": "Gauge", "description": "Total number of request retries from librdkafka statistics\nTagged by broker_id, producer_name"}}
|
|
1
|
+
{"arroyo.strategies.run_task_with_multiprocessing.batch.size.msg": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.size.msg", "type": "Time", "description": "Number of messages in a multiprocessing batch"}, "arroyo.strategies.run_task_with_multiprocessing.batch.size.bytes": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.size.bytes", "type": "Time", "description": "Number of bytes in a multiprocessing batch"}, "arroyo.strategies.run_task_with_multiprocessing.batch.submit.time": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.submit.time", "type": "Time", "description": "How long it took to submit a batch to multiprocessing"}, "arroyo.strategies.run_task_with_multiprocessing.output_batch.size.msg": {"name": "arroyo.strategies.run_task_with_multiprocessing.output_batch.size.msg", "type": "Time", "description": "Number of messages in a multiprocessing batch after the message transformation"}, "arroyo.strategies.run_task_with_multiprocessing.output_batch.size.bytes": {"name": "arroyo.strategies.run_task_with_multiprocessing.output_batch.size.bytes", "type": "Time", "description": "Number of bytes in a multiprocessing batch after the message transformation"}, "arroyo.consumer.run.count": {"name": "arroyo.consumer.run.count", "type": "Counter", "description": "Number of times the consumer is spinning"}, "arroyo.consumer.invalid_message.count": {"name": "arroyo.consumer.invalid_message.count", "type": "Counter", "description": "Number of times the consumer encountered an invalid message."}, "arroyo.strategies.reduce.batch_time": {"name": "arroyo.strategies.reduce.batch_time", "type": "Time", "description": "How long it took the Reduce step to fill up a batch"}, "arroyo.strategies.run_task_with_multiprocessing.batch.backpressure": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.backpressure", "type": "Counter", "description": "Incremented when a strategy after multiprocessing applies\nbackpressure to multiprocessing. May be a reason why CPU cannot be\nsaturated."}, "arroyo.strategies.run_task_with_multiprocessing.batch.input.overflow": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.input.overflow", "type": "Counter", "description": "Incremented when multiprocessing cannot fill the input batch\nbecause not enough memory was allocated. This results in batches smaller\nthan configured. Increase `input_block_size` to fix."}, "arroyo.strategies.run_task_with_multiprocessing.batch.output.overflow": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.output.overflow", "type": "Counter", "description": "Incremented when multiprocessing cannot pull results in batches\nequal to the input batch size, because not enough memory was allocated.\nThis can be devastating for throughput. Increase `output_block_size` to\nfix."}, "arroyo.strategies.run_task_with_multiprocessing.batch.input.resize": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.input.resize", "type": "Counter", "description": "Arroyo has decided to re-allocate a block in order to combat input\nbuffer overflow. This behavior can be disabled by explicitly setting\n`input_block_size` to a not-None value in `RunTaskWithMultiprocessing`."}, "arroyo.strategies.run_task_with_multiprocessing.batch.output.resize": {"name": "arroyo.strategies.run_task_with_multiprocessing.batch.output.resize", "type": "Counter", "description": "Arroyo has decided to re-allocate a block in order to combat output\nbuffer overflow. This behavior can be disabled by explicitly setting\n`output_block_size` to a not-None value in `RunTaskWithMultiprocessing`."}, "arroyo.strategies.run_task_with_multiprocessing.batches_in_progress": {"name": "arroyo.strategies.run_task_with_multiprocessing.batches_in_progress", "type": "Gauge", "description": "How many batches are being processed in parallel by multiprocessing."}, "arroyo.strategies.run_task_with_multiprocessing.processes": {"name": "arroyo.strategies.run_task_with_multiprocessing.processes", "type": "Gauge", "description": "Shows the total number of available processes in the pool."}, "arroyo.strategies.run_task_with_multiprocessing.pool.create": {"name": "arroyo.strategies.run_task_with_multiprocessing.pool.create", "type": "Counter", "description": "A subprocess by multiprocessing unexpectedly died.\n\"sigchld.detected\",\nCounter: Incremented when the multiprocessing pool is created (or re-created)."}, "arroyo.consumer.poll.time": {"name": "arroyo.consumer.poll.time", "type": "Time", "description": "(unitless) spent polling librdkafka for new messages."}, "arroyo.consumer.processing.time": {"name": "arroyo.consumer.processing.time", "type": "Time", "description": "(unitless) spent in strategies (blocking in strategy.submit or\nstrategy.poll)"}, "arroyo.consumer.backpressure.time": {"name": "arroyo.consumer.backpressure.time", "type": "Time", "description": "(unitless) spent pausing the consumer due to backpressure (MessageRejected)"}, "arroyo.consumer.dlq.time": {"name": "arroyo.consumer.dlq.time", "type": "Time", "description": "(unitless) spent in handling `InvalidMessage` exceptions and sending\nmessages to the the DLQ."}, "arroyo.consumer.join.time": {"name": "arroyo.consumer.join.time", "type": "Time", "description": "(unitless) spent in waiting for the strategy to exit, such as during\nshutdown or rebalancing."}, "arroyo.consumer.callback.time": {"name": "arroyo.consumer.callback.time", "type": "Time", "description": "(unitless) spent in librdkafka callbacks. This metric's timings\noverlap other timings, and might spike at the same time."}, "arroyo.consumer.shutdown.time": {"name": "arroyo.consumer.shutdown.time", "type": "Time", "description": "(unitless) spent in shutting down the consumer. This metric's\ntimings overlap other timings, and might spike at the same time."}, "arroyo.consumer.run.callback": {"name": "arroyo.consumer.run.callback", "type": "Time", "description": "A regular duration metric where each datapoint is measuring the time it\ntook to execute a single callback. This metric is distinct from the\narroyo.consumer.*.time metrics as it does not attempt to accumulate time\nspent per second in an attempt to keep monitoring overhead low.\nThe metric is tagged by the name of the internal callback function being\nexecuted, as 'callback_name'. Possible values are on_partitions_assigned\nand on_partitions_revoked."}, "arroyo.consumer.run.close_strategy": {"name": "arroyo.consumer.run.close_strategy", "type": "Time", "description": "Duration metric measuring the time it took to flush in-flight messages\nand shut down the strategies."}, "arroyo.consumer.run.create_strategy": {"name": "arroyo.consumer.run.create_strategy", "type": "Time", "description": "Duration metric measuring the time it took to create the processing strategy."}, "arroyo.consumer.partitions_revoked.count": {"name": "arroyo.consumer.partitions_revoked.count", "type": "Counter", "description": "How many partitions have been revoked just now."}, "arroyo.consumer.partitions_assigned.count": {"name": "arroyo.consumer.partitions_assigned.count", "type": "Counter", "description": "How many partitions have been assigned just now."}, "arroyo.consumer.latency": {"name": "arroyo.consumer.latency", "type": "Time", "description": "Consumer latency in seconds. Recorded by the commit offsets strategy."}, "arroyo.consumer.pause": {"name": "arroyo.consumer.pause", "type": "Counter", "description": "Metric for when the underlying rdkafka consumer is being paused.\nThis flushes internal prefetch buffers."}, "arroyo.consumer.resume": {"name": "arroyo.consumer.resume", "type": "Counter", "description": "Metric for when the underlying rdkafka consumer is being resumed.\nThis might cause increased network usage as messages are being re-fetched."}, "arroyo.consumer.stuck": {"name": "arroyo.consumer.stuck", "type": "Counter", "description": "Incremented when the consumer main thread is stuck and not processing messages."}, "arroyo.consumer.librdkafka.total_queue_size": {"name": "arroyo.consumer.librdkafka.total_queue_size", "type": "Gauge", "description": "Queue size of background queue that librdkafka uses to prefetch messages."}, "arroyo.processing.strategies.healthcheck.touch": {"name": "arroyo.processing.strategies.healthcheck.touch", "type": "Counter", "description": "Counter metric to measure how often the healthcheck file has been touched."}, "arroyo.strategies.filter.dropped_messages": {"name": "arroyo.strategies.filter.dropped_messages", "type": "Counter", "description": "Number of messages dropped in the FilterStep strategy"}, "arroyo.consumer.dlq.dropped_messages": {"name": "arroyo.consumer.dlq.dropped_messages", "type": "Counter", "description": "how many messages are dropped due to errors producing to the dlq"}, "arroyo.consumer.commit_status": {"name": "arroyo.consumer.commit_status", "type": "Counter", "description": "Number of times consumer commit succeeds or fails"}, "arroyo.consumer.dlq_buffer.len": {"name": "arroyo.consumer.dlq_buffer.len", "type": "Gauge", "description": "Current length of the DLQ buffer deque"}, "arroyo.consumer.dlq_buffer.exceeded": {"name": "arroyo.consumer.dlq_buffer.exceeded", "type": "Counter", "description": "Number of times the DLQ buffer size has been exceeded, causing messages to be dropped"}, "arroyo.consumer.dlq_buffer.assigned_partitions": {"name": "arroyo.consumer.dlq_buffer.assigned_partitions", "type": "Gauge", "description": "Number of partitions being tracked in the DLQ buffer"}, "arroyo.producer.librdkafka.p99_int_latency": {"name": "arroyo.producer.librdkafka.p99_int_latency", "type": "Time", "description": "Internal producer queue latency from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.p99_outbuf_latency": {"name": "arroyo.producer.librdkafka.p99_outbuf_latency", "type": "Time", "description": "Output buffer latency from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.p99_rtt": {"name": "arroyo.producer.librdkafka.p99_rtt", "type": "Time", "description": "Round-trip time to brokers from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.avg_int_latency": {"name": "arroyo.producer.librdkafka.avg_int_latency", "type": "Time", "description": "Average internal producer queue latency from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.avg_outbuf_latency": {"name": "arroyo.producer.librdkafka.avg_outbuf_latency", "type": "Time", "description": "Average output buffer latency from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.librdkafka.avg_rtt": {"name": "arroyo.producer.librdkafka.avg_rtt", "type": "Time", "description": "Average round-trip time to brokers from librdkafka statistics.\nTagged by broker_id."}, "arroyo.producer.produce_status": {"name": "arroyo.producer.produce_status", "type": "Counter", "description": "Number of times the produce strategy succeeds or fails"}, "arroyo.producer.librdkafka.message_count": {"name": "arroyo.producer.librdkafka.message_count", "type": "Gauge", "description": "Producer message count metric from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.message_count_max": {"name": "arroyo.producer.librdkafka.message_count_max", "type": "Gauge", "description": "Maximum producer message count from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.message_size": {"name": "arroyo.producer.librdkafka.message_size", "type": "Gauge", "description": "Producer message size from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.message_size_max": {"name": "arroyo.producer.librdkafka.message_size_max", "type": "Gauge", "description": "Maximum producer message size from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.txmsgs": {"name": "arroyo.producer.librdkafka.txmsgs", "type": "Gauge", "description": "Total number of messages transmitted from librdkafka statistics\nTagged by producer_name"}, "arroyo.producer.librdkafka.broker_tx": {"name": "arroyo.producer.librdkafka.broker_tx", "type": "Gauge", "description": "Total number of transmission requests from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_txbytes": {"name": "arroyo.producer.librdkafka.broker_txbytes", "type": "Gauge", "description": "Total number of bytes transmitted from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_outbuf_requests": {"name": "arroyo.producer.librdkafka.broker_outbuf_requests", "type": "Gauge", "description": "Number of requests awaiting transmission to broker from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_outbuf_messages": {"name": "arroyo.producer.librdkafka.broker_outbuf_messages", "type": "Gauge", "description": "Number of messages awaiting transmission to broker from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_connects": {"name": "arroyo.producer.librdkafka.broker_connects", "type": "Gauge", "description": "Number of connection attempts to broker from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_disconnects": {"name": "arroyo.producer.librdkafka.broker_disconnects", "type": "Gauge", "description": "Number of disconnections from broker from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_txerrs": {"name": "arroyo.producer.librdkafka.broker_txerrs", "type": "Gauge", "description": "Total number of transmission errors from librdkafka statistics\nTagged by broker_id, producer_name"}, "arroyo.producer.librdkafka.broker_txretries": {"name": "arroyo.producer.librdkafka.broker_txretries", "type": "Gauge", "description": "Total number of request retries from librdkafka statistics\nTagged by broker_id, producer_name"}}
|
|
@@ -93,6 +93,8 @@ MetricName = Literal[
|
|
|
93
93
|
#
|
|
94
94
|
# This might cause increased network usage as messages are being re-fetched.
|
|
95
95
|
"arroyo.consumer.resume",
|
|
96
|
+
# Counter: Incremented when the consumer main thread is stuck and not processing messages.
|
|
97
|
+
"arroyo.consumer.stuck",
|
|
96
98
|
# Gauge: Queue size of background queue that librdkafka uses to prefetch messages.
|
|
97
99
|
"arroyo.consumer.librdkafka.total_queue_size",
|
|
98
100
|
# Counter: Counter metric to measure how often the healthcheck file has been touched.
|
|
@@ -763,3 +763,45 @@ def test_processor_poll_while_paused() -> None:
|
|
|
763
763
|
|
|
764
764
|
processor._run_once()
|
|
765
765
|
assert strategy.submit.call_args_list[-1] == mock.call(new_message)
|
|
766
|
+
|
|
767
|
+
|
|
768
|
+
def test_stuck_detector(request: pytest.FixtureRequest) -> None:
|
|
769
|
+
"""Test that stuck detector emits a metric when strategy blocks."""
|
|
770
|
+
topic = Topic("topic")
|
|
771
|
+
partition = Partition(topic, 0)
|
|
772
|
+
|
|
773
|
+
consumer = mock.Mock()
|
|
774
|
+
consumer.tell.return_value = {}
|
|
775
|
+
|
|
776
|
+
strategy = mock.Mock()
|
|
777
|
+
real_sleep = time.sleep
|
|
778
|
+
|
|
779
|
+
factory = mock.Mock()
|
|
780
|
+
factory.create_with_partitions.return_value = strategy
|
|
781
|
+
|
|
782
|
+
TestingMetricsBackend.calls.clear()
|
|
783
|
+
|
|
784
|
+
with mock.patch("time.time", return_value=0.0) as mock_time:
|
|
785
|
+
with mock.patch("time.sleep", side_effect=lambda s: real_sleep(0.01)):
|
|
786
|
+
processor: StreamProcessor[int] = StreamProcessor(
|
|
787
|
+
consumer, topic, factory, IMMEDIATE, stuck_detector_timeout=2
|
|
788
|
+
)
|
|
789
|
+
|
|
790
|
+
request.addfinalizer(processor.signal_shutdown)
|
|
791
|
+
|
|
792
|
+
assignment_callback = consumer.subscribe.call_args.kwargs["on_assign"]
|
|
793
|
+
assignment_callback({partition: 0})
|
|
794
|
+
|
|
795
|
+
consumer.poll.return_value = BrokerValue(0, partition, 0, datetime.now())
|
|
796
|
+
processor._run_once()
|
|
797
|
+
|
|
798
|
+
mock_time.return_value = 5.0
|
|
799
|
+
real_sleep(0.2)
|
|
800
|
+
|
|
801
|
+
stuck_metrics = [
|
|
802
|
+
call
|
|
803
|
+
for call in TestingMetricsBackend.calls
|
|
804
|
+
if isinstance(call, Increment) and call.name == "arroyo.consumer.stuck"
|
|
805
|
+
]
|
|
806
|
+
assert len(stuck_metrics) == 1
|
|
807
|
+
assert stuck_metrics[0].value == 1
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{sentry_arroyo-2.33.0 → sentry_arroyo-2.34.0}/arroyo/processing/strategies/run_task_in_threads.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|