omnibase_infra 0.2.7__py3-none-any.whl → 0.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. omnibase_infra/__init__.py +1 -1
  2. omnibase_infra/enums/__init__.py +4 -0
  3. omnibase_infra/enums/enum_declarative_node_violation.py +102 -0
  4. omnibase_infra/event_bus/adapters/__init__.py +31 -0
  5. omnibase_infra/event_bus/adapters/adapter_protocol_event_publisher_kafka.py +517 -0
  6. omnibase_infra/mixins/mixin_async_circuit_breaker.py +113 -1
  7. omnibase_infra/models/__init__.py +9 -0
  8. omnibase_infra/models/event_bus/__init__.py +22 -0
  9. omnibase_infra/models/event_bus/model_consumer_retry_config.py +367 -0
  10. omnibase_infra/models/event_bus/model_dlq_config.py +177 -0
  11. omnibase_infra/models/event_bus/model_idempotency_config.py +131 -0
  12. omnibase_infra/models/event_bus/model_offset_policy_config.py +107 -0
  13. omnibase_infra/models/resilience/model_circuit_breaker_config.py +15 -0
  14. omnibase_infra/models/validation/__init__.py +8 -0
  15. omnibase_infra/models/validation/model_declarative_node_validation_result.py +139 -0
  16. omnibase_infra/models/validation/model_declarative_node_violation.py +169 -0
  17. omnibase_infra/nodes/architecture_validator/__init__.py +28 -7
  18. omnibase_infra/nodes/architecture_validator/constants.py +36 -0
  19. omnibase_infra/nodes/architecture_validator/handlers/__init__.py +28 -0
  20. omnibase_infra/nodes/architecture_validator/handlers/contract.yaml +120 -0
  21. omnibase_infra/nodes/architecture_validator/handlers/handler_architecture_validation.py +359 -0
  22. omnibase_infra/nodes/architecture_validator/node.py +1 -0
  23. omnibase_infra/nodes/architecture_validator/node_architecture_validator.py +48 -336
  24. omnibase_infra/nodes/node_ledger_projection_compute/__init__.py +16 -2
  25. omnibase_infra/nodes/node_ledger_projection_compute/contract.yaml +14 -4
  26. omnibase_infra/nodes/node_ledger_projection_compute/handlers/__init__.py +18 -0
  27. omnibase_infra/nodes/node_ledger_projection_compute/handlers/contract.yaml +53 -0
  28. omnibase_infra/nodes/node_ledger_projection_compute/handlers/handler_ledger_projection.py +354 -0
  29. omnibase_infra/nodes/node_ledger_projection_compute/node.py +20 -256
  30. omnibase_infra/nodes/node_registry_effect/node.py +20 -73
  31. omnibase_infra/protocols/protocol_dispatch_engine.py +90 -0
  32. omnibase_infra/runtime/__init__.py +11 -0
  33. omnibase_infra/runtime/baseline_subscriptions.py +150 -0
  34. omnibase_infra/runtime/event_bus_subcontract_wiring.py +455 -24
  35. omnibase_infra/runtime/kafka_contract_source.py +13 -5
  36. omnibase_infra/runtime/service_message_dispatch_engine.py +112 -0
  37. omnibase_infra/runtime/service_runtime_host_process.py +6 -11
  38. omnibase_infra/services/__init__.py +36 -0
  39. omnibase_infra/services/contract_publisher/__init__.py +95 -0
  40. omnibase_infra/services/contract_publisher/config.py +199 -0
  41. omnibase_infra/services/contract_publisher/errors.py +243 -0
  42. omnibase_infra/services/contract_publisher/models/__init__.py +28 -0
  43. omnibase_infra/services/contract_publisher/models/model_contract_error.py +67 -0
  44. omnibase_infra/services/contract_publisher/models/model_infra_error.py +62 -0
  45. omnibase_infra/services/contract_publisher/models/model_publish_result.py +112 -0
  46. omnibase_infra/services/contract_publisher/models/model_publish_stats.py +79 -0
  47. omnibase_infra/services/contract_publisher/service.py +617 -0
  48. omnibase_infra/services/contract_publisher/sources/__init__.py +52 -0
  49. omnibase_infra/services/contract_publisher/sources/model_discovered.py +155 -0
  50. omnibase_infra/services/contract_publisher/sources/protocol.py +101 -0
  51. omnibase_infra/services/contract_publisher/sources/source_composite.py +309 -0
  52. omnibase_infra/services/contract_publisher/sources/source_filesystem.py +174 -0
  53. omnibase_infra/services/contract_publisher/sources/source_package.py +221 -0
  54. omnibase_infra/services/observability/__init__.py +40 -0
  55. omnibase_infra/services/observability/agent_actions/__init__.py +64 -0
  56. omnibase_infra/services/observability/agent_actions/config.py +209 -0
  57. omnibase_infra/services/observability/agent_actions/consumer.py +1320 -0
  58. omnibase_infra/services/observability/agent_actions/models/__init__.py +87 -0
  59. omnibase_infra/services/observability/agent_actions/models/model_agent_action.py +142 -0
  60. omnibase_infra/services/observability/agent_actions/models/model_detection_failure.py +125 -0
  61. omnibase_infra/services/observability/agent_actions/models/model_envelope.py +85 -0
  62. omnibase_infra/services/observability/agent_actions/models/model_execution_log.py +159 -0
  63. omnibase_infra/services/observability/agent_actions/models/model_performance_metric.py +130 -0
  64. omnibase_infra/services/observability/agent_actions/models/model_routing_decision.py +138 -0
  65. omnibase_infra/services/observability/agent_actions/models/model_transformation_event.py +124 -0
  66. omnibase_infra/services/observability/agent_actions/tests/__init__.py +20 -0
  67. omnibase_infra/services/observability/agent_actions/tests/test_consumer.py +1154 -0
  68. omnibase_infra/services/observability/agent_actions/tests/test_models.py +645 -0
  69. omnibase_infra/services/observability/agent_actions/tests/test_writer.py +709 -0
  70. omnibase_infra/services/observability/agent_actions/writer_postgres.py +926 -0
  71. omnibase_infra/validation/__init__.py +12 -0
  72. omnibase_infra/validation/contracts/declarative_node.validation.yaml +143 -0
  73. omnibase_infra/validation/validation_exemptions.yaml +93 -0
  74. omnibase_infra/validation/validator_declarative_node.py +850 -0
  75. {omnibase_infra-0.2.7.dist-info → omnibase_infra-0.2.9.dist-info}/METADATA +3 -3
  76. {omnibase_infra-0.2.7.dist-info → omnibase_infra-0.2.9.dist-info}/RECORD +79 -27
  77. {omnibase_infra-0.2.7.dist-info → omnibase_infra-0.2.9.dist-info}/WHEEL +0 -0
  78. {omnibase_infra-0.2.7.dist-info → omnibase_infra-0.2.9.dist-info}/entry_points.txt +0 -0
  79. {omnibase_infra-0.2.7.dist-info → omnibase_infra-0.2.9.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1154 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2025 OmniNode Team
3
+ """Unit tests for AgentActionsConsumer.
4
+
5
+ This module tests:
6
+ - Batch processing (message parsing, model validation, routing)
7
+ - Offset tracking (per-partition, only on success)
8
+ - Health check endpoint (status transitions)
9
+ - Consumer lifecycle (start, stop, context manager)
10
+
11
+ All tests mock aiokafka and asyncpg - no real Kafka/PostgreSQL required.
12
+
13
+ Related Tickets:
14
+ - OMN-1743: Migrate agent_actions_consumer to omnibase_infra
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import json
20
+ from datetime import UTC, datetime
21
+ from typing import TYPE_CHECKING
22
+ from unittest.mock import AsyncMock, MagicMock, patch
23
+ from uuid import uuid4
24
+
25
+ import pytest
26
+ from aiohttp.test_utils import TestClient
27
+
28
+ from omnibase_core.errors import OnexError
29
+ from omnibase_infra.services.observability.agent_actions.config import (
30
+ ConfigAgentActionsConsumer,
31
+ )
32
+ from omnibase_infra.services.observability.agent_actions.consumer import (
33
+ TOPIC_TO_MODEL,
34
+ TOPIC_TO_WRITER_METHOD,
35
+ AgentActionsConsumer,
36
+ ConsumerMetrics,
37
+ EnumHealthStatus,
38
+ mask_dsn_password,
39
+ )
40
+
41
+ if TYPE_CHECKING:
42
+ from aiokafka import TopicPartition
43
+
44
+
45
+ # =============================================================================
46
+ # Fixtures
47
+ # =============================================================================
48
+
49
+
50
+ @pytest.fixture
51
+ def mock_config() -> ConfigAgentActionsConsumer:
52
+ """Create a test configuration."""
53
+ return ConfigAgentActionsConsumer(
54
+ kafka_bootstrap_servers="localhost:9092",
55
+ postgres_dsn="postgresql://test:test@localhost:5432/test",
56
+ batch_size=10,
57
+ batch_timeout_ms=500,
58
+ health_check_port=18087, # Non-standard port to avoid conflicts
59
+ )
60
+
61
+
62
+ @pytest.fixture
63
+ def consumer(mock_config: ConfigAgentActionsConsumer) -> AgentActionsConsumer:
64
+ """Create a consumer instance (not started)."""
65
+ return AgentActionsConsumer(mock_config)
66
+
67
+
68
+ @pytest.fixture
69
+ def sample_agent_action_payload() -> dict[str, object]:
70
+ """Create a sample agent action JSON payload."""
71
+ return {
72
+ "id": str(uuid4()),
73
+ "correlation_id": str(uuid4()),
74
+ "agent_name": "test-agent",
75
+ "action_type": "tool_call",
76
+ "action_name": "Read",
77
+ "created_at": datetime.now(UTC).isoformat(),
78
+ }
79
+
80
+
81
+ @pytest.fixture
82
+ def sample_routing_decision_payload() -> dict[str, object]:
83
+ """Create a sample routing decision JSON payload."""
84
+ return {
85
+ "id": str(uuid4()),
86
+ "correlation_id": str(uuid4()),
87
+ "selected_agent": "api-architect",
88
+ "confidence_score": 0.95,
89
+ "created_at": datetime.now(UTC).isoformat(),
90
+ }
91
+
92
+
93
+ def make_mock_consumer_record(
94
+ topic: str,
95
+ partition: int,
96
+ offset: int,
97
+ value: dict[str, object],
98
+ ) -> MagicMock:
99
+ """Create a mock ConsumerRecord."""
100
+ record = MagicMock()
101
+ record.topic = topic
102
+ record.partition = partition
103
+ record.offset = offset
104
+ record.value = json.dumps(value).encode("utf-8")
105
+ return record
106
+
107
+
108
+ # =============================================================================
109
+ # Topic/Model Mapping Tests
110
+ # =============================================================================
111
+
112
+
113
+ class TestTopicModelMapping:
114
+ """Test topic to model and writer method mappings."""
115
+
116
+ def test_all_topics_have_models(self) -> None:
117
+ """All configured topics should have corresponding models."""
118
+ expected_topics = {
119
+ "agent-actions",
120
+ "agent-routing-decisions",
121
+ "agent-transformation-events",
122
+ "router-performance-metrics",
123
+ "agent-detection-failures",
124
+ "agent-execution-logs",
125
+ }
126
+ assert set(TOPIC_TO_MODEL.keys()) == expected_topics
127
+
128
+ def test_all_topics_have_writer_methods(self) -> None:
129
+ """All configured topics should have corresponding writer methods."""
130
+ expected_topics = {
131
+ "agent-actions",
132
+ "agent-routing-decisions",
133
+ "agent-transformation-events",
134
+ "router-performance-metrics",
135
+ "agent-detection-failures",
136
+ "agent-execution-logs",
137
+ }
138
+ assert set(TOPIC_TO_WRITER_METHOD.keys()) == expected_topics
139
+
140
+ def test_topic_to_model_mapping_correct(self) -> None:
141
+ """Topic to model mapping should be correct."""
142
+ from omnibase_infra.services.observability.agent_actions.models import (
143
+ ModelAgentAction,
144
+ ModelDetectionFailure,
145
+ ModelExecutionLog,
146
+ ModelPerformanceMetric,
147
+ ModelRoutingDecision,
148
+ ModelTransformationEvent,
149
+ )
150
+
151
+ assert TOPIC_TO_MODEL["agent-actions"] is ModelAgentAction
152
+ assert TOPIC_TO_MODEL["agent-routing-decisions"] is ModelRoutingDecision
153
+ assert TOPIC_TO_MODEL["agent-transformation-events"] is ModelTransformationEvent
154
+ assert TOPIC_TO_MODEL["router-performance-metrics"] is ModelPerformanceMetric
155
+ assert TOPIC_TO_MODEL["agent-detection-failures"] is ModelDetectionFailure
156
+ assert TOPIC_TO_MODEL["agent-execution-logs"] is ModelExecutionLog
157
+
158
+
159
+ # =============================================================================
160
+ # DSN Password Masking Tests
161
+ # =============================================================================
162
+
163
+
164
+ class TestMaskDsnPassword:
165
+ """Test DSN password masking utility function."""
166
+
167
+ def test_mask_standard_dsn_with_password(self) -> None:
168
+ """Standard DSN with password should have password masked."""
169
+ dsn = "postgresql://user:secret@localhost:5432/db"
170
+ result = mask_dsn_password(dsn)
171
+ assert result == "postgresql://user:***@localhost:5432/db"
172
+
173
+ def test_mask_dsn_without_port(self) -> None:
174
+ """DSN without explicit port should be handled correctly."""
175
+ dsn = "postgresql://user:password@localhost/db"
176
+ result = mask_dsn_password(dsn)
177
+ assert result == "postgresql://user:***@localhost/db"
178
+
179
+ def test_mask_dsn_without_password(self) -> None:
180
+ """DSN without password should be returned unchanged."""
181
+ dsn = "postgresql://user@localhost:5432/db"
182
+ result = mask_dsn_password(dsn)
183
+ assert result == dsn
184
+
185
+ def test_mask_dsn_with_complex_password(self) -> None:
186
+ """DSN with special characters in password should be masked."""
187
+ dsn = "postgresql://user:p%40ss%2Fword@localhost:5432/db"
188
+ result = mask_dsn_password(dsn)
189
+ assert result == "postgresql://user:***@localhost:5432/db"
190
+
191
+ def test_mask_dsn_with_query_params(self) -> None:
192
+ """DSN with query parameters should preserve them."""
193
+ dsn = "postgresql://user:secret@localhost:5432/db?sslmode=require"
194
+ result = mask_dsn_password(dsn)
195
+ assert result == "postgresql://user:***@localhost:5432/db?sslmode=require"
196
+
197
+ def test_mask_invalid_dsn_returns_original(self) -> None:
198
+ """Invalid DSN should be returned unchanged."""
199
+ dsn = "not-a-valid-dsn"
200
+ result = mask_dsn_password(dsn)
201
+ assert result == dsn
202
+
203
+ def test_mask_empty_string(self) -> None:
204
+ """Empty string should be returned unchanged."""
205
+ result = mask_dsn_password("")
206
+ assert result == ""
207
+
208
+ def test_mask_dsn_ipv4_host(self) -> None:
209
+ """DSN with IPv4 host should be handled correctly."""
210
+ dsn = "postgresql://postgres:mysecret@192.168.1.100:5436/omninode_bridge"
211
+ result = mask_dsn_password(dsn)
212
+ assert result == "postgresql://postgres:***@192.168.1.100:5436/omninode_bridge"
213
+
214
+
215
+ # =============================================================================
216
+ # Consumer Metrics Tests
217
+ # =============================================================================
218
+
219
+
220
+ class TestConsumerMetrics:
221
+ """Test ConsumerMetrics tracking."""
222
+
223
+ @pytest.mark.asyncio
224
+ async def test_initial_metrics_zero(self) -> None:
225
+ """Metrics should start at zero."""
226
+ metrics = ConsumerMetrics()
227
+
228
+ assert metrics.messages_received == 0
229
+ assert metrics.messages_processed == 0
230
+ assert metrics.messages_failed == 0
231
+ assert metrics.messages_skipped == 0
232
+ assert metrics.batches_processed == 0
233
+ assert metrics.last_poll_at is None
234
+ assert metrics.last_successful_write_at is None
235
+
236
+ @pytest.mark.asyncio
237
+ async def test_record_received_increments(self) -> None:
238
+ """record_received should increment counter and update timestamp."""
239
+ metrics = ConsumerMetrics()
240
+
241
+ await metrics.record_received(5)
242
+
243
+ assert metrics.messages_received == 5
244
+ assert metrics.last_poll_at is not None
245
+
246
+ @pytest.mark.asyncio
247
+ async def test_record_processed_increments(self) -> None:
248
+ """record_processed should increment counter and update timestamp."""
249
+ metrics = ConsumerMetrics()
250
+
251
+ await metrics.record_processed(10)
252
+
253
+ assert metrics.messages_processed == 10
254
+ assert metrics.last_successful_write_at is not None
255
+
256
+ @pytest.mark.asyncio
257
+ async def test_record_failed_increments(self) -> None:
258
+ """record_failed should increment counter."""
259
+ metrics = ConsumerMetrics()
260
+
261
+ await metrics.record_failed(3)
262
+
263
+ assert metrics.messages_failed == 3
264
+
265
+ @pytest.mark.asyncio
266
+ async def test_record_skipped_increments(self) -> None:
267
+ """record_skipped should increment counter."""
268
+ metrics = ConsumerMetrics()
269
+
270
+ await metrics.record_skipped(2)
271
+
272
+ assert metrics.messages_skipped == 2
273
+
274
+ @pytest.mark.asyncio
275
+ async def test_record_batch_processed_increments(self) -> None:
276
+ """record_batch_processed should increment counter."""
277
+ metrics = ConsumerMetrics()
278
+
279
+ await metrics.record_batch_processed()
280
+ await metrics.record_batch_processed()
281
+
282
+ assert metrics.batches_processed == 2
283
+
284
+ @pytest.mark.asyncio
285
+ async def test_record_polled_updates_last_poll_at(self) -> None:
286
+ """record_polled should update last_poll_at timestamp.
287
+
288
+ This ensures empty polls still update the timestamp, preventing
289
+ false DEGRADED health status on low-traffic topics.
290
+ """
291
+ metrics = ConsumerMetrics()
292
+
293
+ assert metrics.last_poll_at is None
294
+
295
+ await metrics.record_polled()
296
+
297
+ assert metrics.last_poll_at is not None
298
+
299
+ @pytest.mark.asyncio
300
+ async def test_record_polled_does_not_increment_received(self) -> None:
301
+ """record_polled should NOT increment messages_received counter.
302
+
303
+ This distinguishes it from record_received() which increments
304
+ the counter AND updates the timestamp.
305
+ """
306
+ metrics = ConsumerMetrics()
307
+
308
+ await metrics.record_polled()
309
+
310
+ assert metrics.messages_received == 0
311
+ assert metrics.last_poll_at is not None
312
+
313
+ @pytest.mark.asyncio
314
+ async def test_snapshot_returns_dict(self) -> None:
315
+ """snapshot should return dictionary with all metrics."""
316
+ metrics = ConsumerMetrics()
317
+ await metrics.record_received(10)
318
+ await metrics.record_processed(8)
319
+ await metrics.record_failed(1)
320
+ await metrics.record_skipped(1)
321
+ await metrics.record_batch_processed()
322
+
323
+ snapshot = await metrics.snapshot()
324
+
325
+ assert snapshot["messages_received"] == 10
326
+ assert snapshot["messages_processed"] == 8
327
+ assert snapshot["messages_failed"] == 1
328
+ assert snapshot["messages_skipped"] == 1
329
+ assert snapshot["batches_processed"] == 1
330
+ assert snapshot["last_poll_at"] is not None
331
+ assert snapshot["last_successful_write_at"] is not None
332
+
333
+
334
+ # =============================================================================
335
+ # Health Status Enum Tests
336
+ # =============================================================================
337
+
338
+
339
+ class TestEnumHealthStatus:
340
+ """Test health status enum values."""
341
+
342
+ def test_health_status_values(self) -> None:
343
+ """Health status should have expected values."""
344
+ assert EnumHealthStatus.HEALTHY.value == "healthy"
345
+ assert EnumHealthStatus.DEGRADED.value == "degraded"
346
+ assert EnumHealthStatus.UNHEALTHY.value == "unhealthy"
347
+
348
+
349
+ # =============================================================================
350
+ # Consumer Initialization Tests
351
+ # =============================================================================
352
+
353
+
354
+ class TestConsumerInitialization:
355
+ """Test consumer initialization."""
356
+
357
+ def test_consumer_not_running_initially(
358
+ self,
359
+ consumer: AgentActionsConsumer,
360
+ ) -> None:
361
+ """Consumer should not be running after initialization."""
362
+ assert consumer.is_running is False
363
+
364
+ def test_consumer_has_unique_id(
365
+ self,
366
+ consumer: AgentActionsConsumer,
367
+ ) -> None:
368
+ """Consumer should have a unique consumer_id."""
369
+ assert consumer.consumer_id.startswith("agent-actions-consumer-")
370
+ assert len(consumer.consumer_id) > len("agent-actions-consumer-")
371
+
372
+ def test_consumer_metrics_initialized(
373
+ self,
374
+ consumer: AgentActionsConsumer,
375
+ ) -> None:
376
+ """Consumer should have initialized metrics."""
377
+ assert consumer.metrics is not None
378
+ assert consumer.metrics.messages_received == 0
379
+
380
+
381
+ # =============================================================================
382
+ # Batch Processing Tests
383
+ # =============================================================================
384
+
385
+
386
+ class TestBatchProcessing:
387
+ """Test batch processing logic."""
388
+
389
+ @pytest.mark.asyncio
390
+ async def test_process_batch_parses_json_messages(
391
+ self,
392
+ consumer: AgentActionsConsumer,
393
+ sample_agent_action_payload: dict[str, object],
394
+ ) -> None:
395
+ """Batch processing should parse JSON messages correctly."""
396
+ # Setup mocks
397
+ mock_writer = AsyncMock()
398
+ mock_writer.write_agent_actions = AsyncMock(return_value=1)
399
+ consumer._writer = mock_writer
400
+ consumer._running = True
401
+
402
+ # Create mock message
403
+ message = make_mock_consumer_record(
404
+ topic="agent-actions",
405
+ partition=0,
406
+ offset=100,
407
+ value=sample_agent_action_payload,
408
+ )
409
+
410
+ # Process batch
411
+ result = await consumer._process_batch([message], uuid4())
412
+
413
+ # Verify writer was called
414
+ mock_writer.write_agent_actions.assert_called_once()
415
+
416
+ # Verify offset tracking
417
+ from aiokafka import TopicPartition
418
+
419
+ tp = TopicPartition("agent-actions", 0)
420
+ assert tp in result
421
+ assert result[tp] == 100
422
+
423
+ @pytest.mark.asyncio
424
+ async def test_process_batch_routes_to_correct_writer(
425
+ self,
426
+ consumer: AgentActionsConsumer,
427
+ sample_routing_decision_payload: dict[str, object],
428
+ ) -> None:
429
+ """Batch processing should route messages to correct writer method."""
430
+ mock_writer = AsyncMock()
431
+ mock_writer.write_routing_decisions = AsyncMock(return_value=1)
432
+ consumer._writer = mock_writer
433
+ consumer._running = True
434
+
435
+ message = make_mock_consumer_record(
436
+ topic="agent-routing-decisions",
437
+ partition=0,
438
+ offset=50,
439
+ value=sample_routing_decision_payload,
440
+ )
441
+
442
+ await consumer._process_batch([message], uuid4())
443
+
444
+ # Should call routing decisions writer, not agent actions
445
+ mock_writer.write_routing_decisions.assert_called_once()
446
+
447
+ @pytest.mark.asyncio
448
+ async def test_process_batch_skips_invalid_json(
449
+ self,
450
+ consumer: AgentActionsConsumer,
451
+ ) -> None:
452
+ """Invalid JSON messages should be skipped."""
453
+ mock_writer = AsyncMock()
454
+ consumer._writer = mock_writer
455
+ consumer._running = True
456
+
457
+ # Create message with invalid JSON
458
+ message = MagicMock()
459
+ message.topic = "agent-actions"
460
+ message.partition = 0
461
+ message.offset = 100
462
+ message.value = b"not valid json"
463
+
464
+ result = await consumer._process_batch([message], uuid4())
465
+
466
+ # Should still track offset (skip and continue)
467
+ from aiokafka import TopicPartition
468
+
469
+ tp = TopicPartition("agent-actions", 0)
470
+ assert tp in result
471
+ assert result[tp] == 100
472
+
473
+ @pytest.mark.asyncio
474
+ async def test_process_batch_skips_validation_failures(
475
+ self,
476
+ consumer: AgentActionsConsumer,
477
+ ) -> None:
478
+ """Messages failing validation should be skipped."""
479
+ mock_writer = AsyncMock()
480
+ consumer._writer = mock_writer
481
+ consumer._running = True
482
+
483
+ # Create message with valid JSON but invalid model (missing required fields)
484
+ message = make_mock_consumer_record(
485
+ topic="agent-actions",
486
+ partition=0,
487
+ offset=100,
488
+ value={"invalid": "payload"}, # Missing required fields
489
+ )
490
+
491
+ result = await consumer._process_batch([message], uuid4())
492
+
493
+ # Should track offset for skipped message
494
+ from aiokafka import TopicPartition
495
+
496
+ tp = TopicPartition("agent-actions", 0)
497
+ assert tp in result
498
+ assert result[tp] == 100
499
+
500
+ @pytest.mark.asyncio
501
+ async def test_process_batch_handles_unknown_topic(
502
+ self,
503
+ consumer: AgentActionsConsumer,
504
+ sample_agent_action_payload: dict[str, object],
505
+ ) -> None:
506
+ """Unknown topics should be skipped but offset tracked."""
507
+ mock_writer = AsyncMock()
508
+ consumer._writer = mock_writer
509
+ consumer._running = True
510
+
511
+ message = make_mock_consumer_record(
512
+ topic="unknown-topic",
513
+ partition=0,
514
+ offset=100,
515
+ value=sample_agent_action_payload,
516
+ )
517
+
518
+ result = await consumer._process_batch([message], uuid4())
519
+
520
+ from aiokafka import TopicPartition
521
+
522
+ tp = TopicPartition("unknown-topic", 0)
523
+ assert tp in result
524
+ assert result[tp] == 100
525
+
526
+ @pytest.mark.asyncio
527
+ async def test_process_batch_skips_tombstone_messages(
528
+ self,
529
+ consumer: AgentActionsConsumer,
530
+ ) -> None:
531
+ """Tombstone messages (value=None) should be skipped but offset tracked."""
532
+ mock_writer = AsyncMock()
533
+ consumer._writer = mock_writer
534
+ consumer._running = True
535
+
536
+ # Create tombstone message (value is None)
537
+ message = MagicMock()
538
+ message.topic = "agent-actions"
539
+ message.partition = 0
540
+ message.offset = 100
541
+ message.value = None # Tombstone
542
+
543
+ result = await consumer._process_batch([message], uuid4())
544
+
545
+ from aiokafka import TopicPartition
546
+
547
+ tp = TopicPartition("agent-actions", 0)
548
+ assert tp in result
549
+ assert result[tp] == 100
550
+
551
+ @pytest.mark.asyncio
552
+ async def test_process_batch_skips_invalid_utf8_messages(
553
+ self,
554
+ consumer: AgentActionsConsumer,
555
+ ) -> None:
556
+ """Messages with invalid UTF-8 encoding should be skipped but offset tracked."""
557
+ mock_writer = AsyncMock()
558
+ consumer._writer = mock_writer
559
+ consumer._running = True
560
+
561
+ # Create message with invalid UTF-8 bytes
562
+ message = MagicMock()
563
+ message.topic = "agent-actions"
564
+ message.partition = 0
565
+ message.offset = 100
566
+ message.value = b"\xff\xfe invalid utf-8" # Invalid UTF-8 sequence
567
+
568
+ result = await consumer._process_batch([message], uuid4())
569
+
570
+ from aiokafka import TopicPartition
571
+
572
+ tp = TopicPartition("agent-actions", 0)
573
+ assert tp in result
574
+ assert result[tp] == 100
575
+
576
+ @pytest.mark.asyncio
577
+ async def test_skipped_offsets_preserved_on_write_failure(
578
+ self,
579
+ consumer: AgentActionsConsumer,
580
+ sample_agent_action_payload: dict[str, object],
581
+ ) -> None:
582
+ """Skipped message offsets should be preserved even when write fails.
583
+
584
+ Scenario:
585
+ - Message A (offset 100): Invalid JSON -> skipped, offset tracked
586
+ - Message B (offset 101): Valid -> write attempt fails
587
+ - Expected: offset 100 should still be in result (not lost)
588
+ """
589
+ mock_writer = AsyncMock()
590
+ mock_writer.write_agent_actions = AsyncMock(
591
+ side_effect=Exception("Database error")
592
+ )
593
+ consumer._writer = mock_writer
594
+ consumer._running = True
595
+
596
+ # Skipped message (invalid JSON) at offset 100
597
+ skipped_message = MagicMock()
598
+ skipped_message.topic = "agent-actions"
599
+ skipped_message.partition = 0
600
+ skipped_message.offset = 100
601
+ skipped_message.value = b"not valid json"
602
+
603
+ # Valid message at offset 101 (will fail write)
604
+ valid_message = make_mock_consumer_record(
605
+ topic="agent-actions",
606
+ partition=0,
607
+ offset=101,
608
+ value=sample_agent_action_payload,
609
+ )
610
+
611
+ result = await consumer._process_batch(
612
+ [skipped_message, valid_message], uuid4()
613
+ )
614
+
615
+ from aiokafka import TopicPartition
616
+
617
+ tp = TopicPartition("agent-actions", 0)
618
+ # Skipped offset should be preserved even though write failed
619
+ assert tp in result
620
+ # The highest skipped offset (100) should be there, not 101 (which failed)
621
+ assert result[tp] == 100
622
+
623
+
624
+ # =============================================================================
625
+ # Offset Tracking Tests
626
+ # =============================================================================
627
+
628
+
629
+ class TestOffsetTracking:
630
+ """Test per-partition offset tracking."""
631
+
632
+ @pytest.mark.asyncio
633
+ async def test_successful_write_updates_offsets(
634
+ self,
635
+ consumer: AgentActionsConsumer,
636
+ sample_agent_action_payload: dict[str, object],
637
+ ) -> None:
638
+ """Successful writes should update offsets for that partition."""
639
+ mock_writer = AsyncMock()
640
+ mock_writer.write_agent_actions = AsyncMock(return_value=1)
641
+ consumer._writer = mock_writer
642
+ consumer._running = True
643
+
644
+ message = make_mock_consumer_record(
645
+ topic="agent-actions",
646
+ partition=2,
647
+ offset=500,
648
+ value=sample_agent_action_payload,
649
+ )
650
+
651
+ result = await consumer._process_batch([message], uuid4())
652
+
653
+ from aiokafka import TopicPartition
654
+
655
+ tp = TopicPartition("agent-actions", 2)
656
+ assert tp in result
657
+ assert result[tp] == 500
658
+
659
+ @pytest.mark.asyncio
660
+ async def test_failed_write_does_not_update_offsets(
661
+ self,
662
+ consumer: AgentActionsConsumer,
663
+ sample_agent_action_payload: dict[str, object],
664
+ ) -> None:
665
+ """Failed writes should not update offsets for that partition."""
666
+ mock_writer = AsyncMock()
667
+ mock_writer.write_agent_actions = AsyncMock(
668
+ side_effect=Exception("Database error")
669
+ )
670
+ consumer._writer = mock_writer
671
+ consumer._running = True
672
+
673
+ message = make_mock_consumer_record(
674
+ topic="agent-actions",
675
+ partition=0,
676
+ offset=100,
677
+ value=sample_agent_action_payload,
678
+ )
679
+
680
+ result = await consumer._process_batch([message], uuid4())
681
+
682
+ from aiokafka import TopicPartition
683
+
684
+ tp = TopicPartition("agent-actions", 0)
685
+ # Failed partition should NOT be in result
686
+ assert tp not in result
687
+
688
+ @pytest.mark.asyncio
689
+ async def test_multiple_partitions_tracked_independently(
690
+ self,
691
+ consumer: AgentActionsConsumer,
692
+ sample_agent_action_payload: dict[str, object],
693
+ ) -> None:
694
+ """Each partition should have independent offset tracking."""
695
+ mock_writer = AsyncMock()
696
+ mock_writer.write_agent_actions = AsyncMock(return_value=2)
697
+ consumer._writer = mock_writer
698
+ consumer._running = True
699
+
700
+ # Messages from different partitions
701
+ payload1 = sample_agent_action_payload.copy()
702
+ payload1["id"] = str(uuid4())
703
+ payload2 = sample_agent_action_payload.copy()
704
+ payload2["id"] = str(uuid4())
705
+
706
+ messages = [
707
+ make_mock_consumer_record(
708
+ topic="agent-actions",
709
+ partition=0,
710
+ offset=100,
711
+ value=payload1,
712
+ ),
713
+ make_mock_consumer_record(
714
+ topic="agent-actions",
715
+ partition=1,
716
+ offset=200,
717
+ value=payload2,
718
+ ),
719
+ ]
720
+
721
+ result = await consumer._process_batch(messages, uuid4())
722
+
723
+ from aiokafka import TopicPartition
724
+
725
+ tp0 = TopicPartition("agent-actions", 0)
726
+ tp1 = TopicPartition("agent-actions", 1)
727
+
728
+ assert tp0 in result
729
+ assert tp1 in result
730
+ assert result[tp0] == 100
731
+ assert result[tp1] == 200
732
+
733
+ @pytest.mark.asyncio
734
+ async def test_highest_offset_per_partition_tracked(
735
+ self,
736
+ consumer: AgentActionsConsumer,
737
+ sample_agent_action_payload: dict[str, object],
738
+ ) -> None:
739
+ """Highest offset should be tracked for each partition."""
740
+ mock_writer = AsyncMock()
741
+ mock_writer.write_agent_actions = AsyncMock(return_value=3)
742
+ consumer._writer = mock_writer
743
+ consumer._running = True
744
+
745
+ # Multiple messages from same partition with different offsets
746
+ messages = []
747
+ for offset in [100, 150, 125]: # Out of order
748
+ payload = sample_agent_action_payload.copy()
749
+ payload["id"] = str(uuid4())
750
+ messages.append(
751
+ make_mock_consumer_record(
752
+ topic="agent-actions",
753
+ partition=0,
754
+ offset=offset,
755
+ value=payload,
756
+ )
757
+ )
758
+
759
+ result = await consumer._process_batch(messages, uuid4())
760
+
761
+ from aiokafka import TopicPartition
762
+
763
+ tp = TopicPartition("agent-actions", 0)
764
+ # Should track highest offset (150)
765
+ assert result[tp] == 150
766
+
767
+
768
+ # =============================================================================
769
+ # Health Check Tests
770
+ # =============================================================================
771
+
772
+
773
+ class TestHealthCheck:
774
+ """Test health check functionality."""
775
+
776
+ @pytest.mark.asyncio
777
+ async def test_health_check_unhealthy_when_not_running(
778
+ self,
779
+ consumer: AgentActionsConsumer,
780
+ ) -> None:
781
+ """Health check should return UNHEALTHY when consumer not running."""
782
+ consumer._running = False
783
+
784
+ health = await consumer.health_check()
785
+
786
+ assert health["status"] == EnumHealthStatus.UNHEALTHY.value
787
+ assert health["consumer_running"] is False
788
+
789
+ @pytest.mark.asyncio
790
+ async def test_health_check_healthy_when_running(
791
+ self,
792
+ consumer: AgentActionsConsumer,
793
+ ) -> None:
794
+ """Health check should return HEALTHY when running with closed circuit."""
795
+ consumer._running = True
796
+
797
+ # Mock writer with closed circuit
798
+ mock_writer = MagicMock()
799
+ mock_writer.get_circuit_breaker_state = MagicMock(
800
+ return_value={"state": "closed", "failure_count": 0}
801
+ )
802
+ consumer._writer = mock_writer
803
+
804
+ health = await consumer.health_check()
805
+
806
+ assert health["status"] == EnumHealthStatus.HEALTHY.value
807
+ assert health["consumer_running"] is True
808
+
809
+ @pytest.mark.asyncio
810
+ async def test_health_check_degraded_when_circuit_open(
811
+ self,
812
+ consumer: AgentActionsConsumer,
813
+ ) -> None:
814
+ """Health check should return DEGRADED when circuit breaker is open."""
815
+ consumer._running = True
816
+
817
+ # Mock writer with open circuit
818
+ mock_writer = MagicMock()
819
+ mock_writer.get_circuit_breaker_state = MagicMock(
820
+ return_value={"state": "open", "failure_count": 5}
821
+ )
822
+ consumer._writer = mock_writer
823
+
824
+ health = await consumer.health_check()
825
+
826
+ assert health["status"] == EnumHealthStatus.DEGRADED.value
827
+
828
+ @pytest.mark.asyncio
829
+ async def test_health_check_returns_expected_fields(
830
+ self,
831
+ consumer: AgentActionsConsumer,
832
+ ) -> None:
833
+ """Health check should return all expected fields."""
834
+ consumer._running = True
835
+
836
+ mock_writer = MagicMock()
837
+ mock_writer.get_circuit_breaker_state = MagicMock(
838
+ return_value={"state": "closed"}
839
+ )
840
+ consumer._writer = mock_writer
841
+
842
+ health = await consumer.health_check()
843
+
844
+ assert "status" in health
845
+ assert "consumer_running" in health
846
+ assert "consumer_id" in health
847
+ assert "group_id" in health
848
+ assert "topics" in health
849
+ assert "circuit_breaker_state" in health
850
+ assert "metrics" in health
851
+
852
+ @pytest.mark.asyncio
853
+ async def test_health_check_degraded_when_circuit_half_open(
854
+ self,
855
+ consumer: AgentActionsConsumer,
856
+ ) -> None:
857
+ """Health check should return DEGRADED when circuit breaker is half-open."""
858
+ consumer._running = True
859
+
860
+ # Mock writer with half-open circuit
861
+ mock_writer = MagicMock()
862
+ mock_writer.get_circuit_breaker_state = MagicMock(
863
+ return_value={"state": "half_open", "failure_count": 3}
864
+ )
865
+ consumer._writer = mock_writer
866
+
867
+ health = await consumer.health_check()
868
+
869
+ assert health["status"] == EnumHealthStatus.DEGRADED.value
870
+
871
+ @pytest.mark.asyncio
872
+ async def test_health_check_degraded_when_poll_stale(
873
+ self,
874
+ consumer: AgentActionsConsumer,
875
+ ) -> None:
876
+ """Health check should return DEGRADED when last poll exceeds threshold."""
877
+ from datetime import timedelta
878
+
879
+ consumer._running = True
880
+
881
+ # Mock writer with closed circuit
882
+ mock_writer = MagicMock()
883
+ mock_writer.get_circuit_breaker_state = MagicMock(
884
+ return_value={"state": "closed", "failure_count": 0}
885
+ )
886
+ consumer._writer = mock_writer
887
+
888
+ # Set last poll to be stale (older than poll_staleness_seconds threshold)
889
+ stale_time = datetime.now(UTC) - timedelta(
890
+ seconds=consumer._config.health_check_poll_staleness_seconds + 10
891
+ )
892
+ consumer.metrics.last_poll_at = stale_time
893
+
894
+ health = await consumer.health_check()
895
+
896
+ assert health["status"] == EnumHealthStatus.DEGRADED.value
897
+
898
+ @pytest.mark.asyncio
899
+ async def test_health_check_healthy_when_poll_recent(
900
+ self,
901
+ consumer: AgentActionsConsumer,
902
+ ) -> None:
903
+ """Health check should return HEALTHY when last poll is recent."""
904
+ from datetime import timedelta
905
+
906
+ consumer._running = True
907
+
908
+ # Mock writer with closed circuit
909
+ mock_writer = MagicMock()
910
+ mock_writer.get_circuit_breaker_state = MagicMock(
911
+ return_value={"state": "closed", "failure_count": 0}
912
+ )
913
+ consumer._writer = mock_writer
914
+
915
+ # Set last poll to be recent (within poll_staleness_seconds threshold)
916
+ recent_time = datetime.now(UTC) - timedelta(seconds=5)
917
+ consumer.metrics.last_poll_at = recent_time
918
+ consumer.metrics.last_successful_write_at = recent_time
919
+
920
+ health = await consumer.health_check()
921
+
922
+ assert health["status"] == EnumHealthStatus.HEALTHY.value
923
+
924
+ @pytest.mark.asyncio
925
+ async def test_health_check_degraded_when_write_stale_with_traffic(
926
+ self,
927
+ consumer: AgentActionsConsumer,
928
+ ) -> None:
929
+ """Health check should return DEGRADED when write is stale AND messages received."""
930
+ from datetime import timedelta
931
+
932
+ consumer._running = True
933
+
934
+ # Mock writer with closed circuit
935
+ mock_writer = MagicMock()
936
+ mock_writer.get_circuit_breaker_state = MagicMock(
937
+ return_value={"state": "closed", "failure_count": 0}
938
+ )
939
+ consumer._writer = mock_writer
940
+
941
+ # Set last poll to be recent
942
+ recent_time = datetime.now(UTC) - timedelta(seconds=5)
943
+ consumer.metrics.last_poll_at = recent_time
944
+
945
+ # Set last write to be stale (older than staleness_seconds threshold)
946
+ stale_time = datetime.now(UTC) - timedelta(
947
+ seconds=consumer._config.health_check_staleness_seconds + 10
948
+ )
949
+ consumer.metrics.last_successful_write_at = stale_time
950
+
951
+ # Set messages received > 0 (traffic has been received)
952
+ consumer.metrics.messages_received = 100
953
+
954
+ health = await consumer.health_check()
955
+
956
+ assert health["status"] == EnumHealthStatus.DEGRADED.value
957
+
958
+ @pytest.mark.asyncio
959
+ async def test_health_check_healthy_when_write_stale_but_no_traffic(
960
+ self,
961
+ consumer: AgentActionsConsumer,
962
+ ) -> None:
963
+ """Health check should return HEALTHY when write stale but no messages received."""
964
+ from datetime import timedelta
965
+
966
+ consumer._running = True
967
+
968
+ # Mock writer with closed circuit
969
+ mock_writer = MagicMock()
970
+ mock_writer.get_circuit_breaker_state = MagicMock(
971
+ return_value={"state": "closed", "failure_count": 0}
972
+ )
973
+ consumer._writer = mock_writer
974
+
975
+ # Set last poll to be recent
976
+ recent_time = datetime.now(UTC) - timedelta(seconds=5)
977
+ consumer.metrics.last_poll_at = recent_time
978
+
979
+ # Set last write to be stale (older than staleness_seconds threshold)
980
+ stale_time = datetime.now(UTC) - timedelta(
981
+ seconds=consumer._config.health_check_staleness_seconds + 10
982
+ )
983
+ consumer.metrics.last_successful_write_at = stale_time
984
+
985
+ # No messages received (no traffic)
986
+ consumer.metrics.messages_received = 0
987
+
988
+ health = await consumer.health_check()
989
+
990
+ # Should be HEALTHY because no traffic means stale write is expected
991
+ assert health["status"] == EnumHealthStatus.HEALTHY.value
992
+
993
+
994
+ # =============================================================================
995
+ # Consumer Lifecycle Tests
996
+ # =============================================================================
997
+
998
+
999
+ class TestConsumerLifecycle:
1000
+ """Test consumer lifecycle methods."""
1001
+
1002
+ @pytest.mark.asyncio
1003
+ async def test_consumer_context_manager(
1004
+ self,
1005
+ mock_config: ConfigAgentActionsConsumer,
1006
+ ) -> None:
1007
+ """Consumer should work as async context manager."""
1008
+ with patch("asyncpg.create_pool", new_callable=AsyncMock) as mock_pool:
1009
+ with patch(
1010
+ "omnibase_infra.services.observability.agent_actions.consumer.AIOKafkaConsumer"
1011
+ ) as mock_kafka:
1012
+ mock_pool.return_value = AsyncMock()
1013
+ mock_pool.return_value.close = AsyncMock()
1014
+
1015
+ mock_kafka_instance = AsyncMock()
1016
+ mock_kafka.return_value = mock_kafka_instance
1017
+
1018
+ consumer = AgentActionsConsumer(mock_config)
1019
+
1020
+ # Patch health server to avoid binding
1021
+ object.__setattr__(consumer, "_start_health_server", AsyncMock())
1022
+
1023
+ async with consumer as ctx:
1024
+ assert ctx is consumer
1025
+ assert ctx.is_running is True
1026
+
1027
+ # After exit, should be stopped
1028
+ assert consumer.is_running is False
1029
+
1030
+ @pytest.mark.asyncio
1031
+ async def test_stop_when_not_running_is_safe(
1032
+ self,
1033
+ consumer: AgentActionsConsumer,
1034
+ ) -> None:
1035
+ """Calling stop() when not running should be safe (no-op)."""
1036
+ consumer._running = False
1037
+
1038
+ # Should not raise
1039
+ await consumer.stop()
1040
+
1041
+ assert consumer.is_running is False
1042
+
1043
+ @pytest.mark.asyncio
1044
+ async def test_start_when_already_running_logs_warning(
1045
+ self,
1046
+ consumer: AgentActionsConsumer,
1047
+ ) -> None:
1048
+ """Calling start() when already running should log warning and return."""
1049
+ consumer._running = True
1050
+
1051
+ # Should not raise, should return early
1052
+ await consumer.start()
1053
+
1054
+ # Still running
1055
+ assert consumer.is_running is True
1056
+
1057
+
1058
+ # =============================================================================
1059
+ # Commit Offsets Tests
1060
+ # =============================================================================
1061
+
1062
+
1063
+ class TestCommitOffsets:
1064
+ """Test offset commit logic."""
1065
+
1066
+ @pytest.mark.asyncio
1067
+ async def test_commit_offsets_increments_offset(
1068
+ self,
1069
+ consumer: AgentActionsConsumer,
1070
+ ) -> None:
1071
+ """Commit should use offset + 1 (next offset to consume)."""
1072
+ mock_kafka = AsyncMock()
1073
+ consumer._consumer = mock_kafka
1074
+
1075
+ from aiokafka import TopicPartition
1076
+
1077
+ offsets = {
1078
+ TopicPartition("agent-actions", 0): 100,
1079
+ TopicPartition("agent-actions", 1): 200,
1080
+ }
1081
+
1082
+ await consumer._commit_offsets(offsets, uuid4())
1083
+
1084
+ mock_kafka.commit.assert_called_once()
1085
+ call_args = mock_kafka.commit.call_args[0][0]
1086
+
1087
+ # Should commit offset + 1
1088
+ assert call_args[TopicPartition("agent-actions", 0)] == 101
1089
+ assert call_args[TopicPartition("agent-actions", 1)] == 201
1090
+
1091
+ @pytest.mark.asyncio
1092
+ async def test_commit_offsets_empty_dict_skips_commit(
1093
+ self,
1094
+ consumer: AgentActionsConsumer,
1095
+ ) -> None:
1096
+ """Empty offsets dict should skip commit call."""
1097
+ mock_kafka = AsyncMock()
1098
+ consumer._consumer = mock_kafka
1099
+
1100
+ await consumer._commit_offsets({}, uuid4())
1101
+
1102
+ mock_kafka.commit.assert_not_called()
1103
+
1104
+ @pytest.mark.asyncio
1105
+ async def test_commit_offsets_handles_kafka_error(
1106
+ self,
1107
+ consumer: AgentActionsConsumer,
1108
+ ) -> None:
1109
+ """Kafka commit errors should be logged but not raised."""
1110
+ from aiokafka.errors import KafkaError
1111
+
1112
+ mock_kafka = AsyncMock()
1113
+ mock_kafka.commit = AsyncMock(side_effect=KafkaError())
1114
+ consumer._consumer = mock_kafka
1115
+
1116
+ from aiokafka import TopicPartition
1117
+
1118
+ offsets = {TopicPartition("agent-actions", 0): 100}
1119
+
1120
+ # Should not raise
1121
+ await consumer._commit_offsets(offsets, uuid4())
1122
+
1123
+
1124
+ # =============================================================================
1125
+ # Run Method Tests
1126
+ # =============================================================================
1127
+
1128
+
1129
+ class TestRunMethod:
1130
+ """Test run() method behavior."""
1131
+
1132
+ @pytest.mark.asyncio
1133
+ async def test_run_raises_when_not_started(
1134
+ self,
1135
+ consumer: AgentActionsConsumer,
1136
+ ) -> None:
1137
+ """run() should raise OnexError if not started."""
1138
+ with pytest.raises(OnexError, match="Consumer not started"):
1139
+ await consumer.run()
1140
+
1141
+
1142
+ __all__ = [
1143
+ "TestMaskDsnPassword",
1144
+ "TestTopicModelMapping",
1145
+ "TestConsumerMetrics",
1146
+ "TestEnumHealthStatus",
1147
+ "TestConsumerInitialization",
1148
+ "TestBatchProcessing",
1149
+ "TestOffsetTracking",
1150
+ "TestHealthCheck",
1151
+ "TestConsumerLifecycle",
1152
+ "TestCommitOffsets",
1153
+ "TestRunMethod",
1154
+ ]