claude-mpm 4.0.32__py3-none-any.whl → 4.0.34__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/VERSION +1 -1
- claude_mpm/agents/templates/documentation.json +51 -34
- claude_mpm/agents/templates/research.json +0 -11
- claude_mpm/cli/__init__.py +63 -26
- claude_mpm/cli/commands/agent_manager.py +10 -8
- claude_mpm/core/framework_loader.py +173 -84
- claude_mpm/dashboard/static/css/dashboard.css +449 -0
- claude_mpm/dashboard/static/dist/components/agent-inference.js +1 -1
- claude_mpm/dashboard/static/dist/components/event-viewer.js +1 -1
- claude_mpm/dashboard/static/dist/components/file-tool-tracker.js +1 -1
- claude_mpm/dashboard/static/dist/components/module-viewer.js +1 -1
- claude_mpm/dashboard/static/dist/components/session-manager.js +1 -1
- claude_mpm/dashboard/static/dist/dashboard.js +1 -1
- claude_mpm/dashboard/static/dist/socket-client.js +1 -1
- claude_mpm/dashboard/static/js/components/agent-hierarchy.js +774 -0
- claude_mpm/dashboard/static/js/components/agent-inference.js +257 -3
- claude_mpm/dashboard/static/js/components/build-tracker.js +289 -0
- claude_mpm/dashboard/static/js/components/event-viewer.js +168 -39
- claude_mpm/dashboard/static/js/components/file-tool-tracker.js +17 -0
- claude_mpm/dashboard/static/js/components/session-manager.js +23 -3
- claude_mpm/dashboard/static/js/components/socket-manager.js +2 -0
- claude_mpm/dashboard/static/js/dashboard.js +207 -31
- claude_mpm/dashboard/static/js/socket-client.js +85 -6
- claude_mpm/dashboard/templates/index.html +1 -0
- claude_mpm/hooks/claude_hooks/connection_pool.py +12 -2
- claude_mpm/hooks/claude_hooks/event_handlers.py +81 -19
- claude_mpm/hooks/claude_hooks/hook_handler.py +72 -10
- claude_mpm/hooks/claude_hooks/hook_handler_eventbus.py +398 -0
- claude_mpm/hooks/claude_hooks/response_tracking.py +10 -0
- claude_mpm/services/agents/deployment/agent_deployment.py +34 -48
- claude_mpm/services/agents/deployment/agent_template_builder.py +18 -10
- claude_mpm/services/agents/deployment/agents_directory_resolver.py +10 -25
- claude_mpm/services/agents/deployment/multi_source_deployment_service.py +189 -3
- claude_mpm/services/agents/deployment/pipeline/steps/target_directory_step.py +3 -2
- claude_mpm/services/agents/deployment/strategies/system_strategy.py +10 -3
- claude_mpm/services/agents/deployment/strategies/user_strategy.py +10 -14
- claude_mpm/services/agents/deployment/system_instructions_deployer.py +8 -85
- claude_mpm/services/agents/memory/content_manager.py +98 -105
- claude_mpm/services/event_bus/__init__.py +18 -0
- claude_mpm/services/event_bus/event_bus.py +334 -0
- claude_mpm/services/event_bus/relay.py +301 -0
- claude_mpm/services/events/__init__.py +44 -0
- claude_mpm/services/events/consumers/__init__.py +18 -0
- claude_mpm/services/events/consumers/dead_letter.py +296 -0
- claude_mpm/services/events/consumers/logging.py +183 -0
- claude_mpm/services/events/consumers/metrics.py +242 -0
- claude_mpm/services/events/consumers/socketio.py +376 -0
- claude_mpm/services/events/core.py +470 -0
- claude_mpm/services/events/interfaces.py +230 -0
- claude_mpm/services/events/producers/__init__.py +14 -0
- claude_mpm/services/events/producers/hook.py +269 -0
- claude_mpm/services/events/producers/system.py +327 -0
- claude_mpm/services/mcp_gateway/core/process_pool.py +411 -0
- claude_mpm/services/mcp_gateway/server/stdio_server.py +13 -0
- claude_mpm/services/monitor_build_service.py +345 -0
- claude_mpm/services/socketio/event_normalizer.py +667 -0
- claude_mpm/services/socketio/handlers/connection.py +78 -20
- claude_mpm/services/socketio/handlers/hook.py +14 -5
- claude_mpm/services/socketio/migration_utils.py +329 -0
- claude_mpm/services/socketio/server/broadcaster.py +26 -33
- claude_mpm/services/socketio/server/core.py +4 -3
- {claude_mpm-4.0.32.dist-info → claude_mpm-4.0.34.dist-info}/METADATA +4 -3
- {claude_mpm-4.0.32.dist-info → claude_mpm-4.0.34.dist-info}/RECORD +67 -46
- {claude_mpm-4.0.32.dist-info → claude_mpm-4.0.34.dist-info}/WHEEL +0 -0
- {claude_mpm-4.0.32.dist-info → claude_mpm-4.0.34.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.0.32.dist-info → claude_mpm-4.0.34.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.0.32.dist-info → claude_mpm-4.0.34.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Dead Letter Queue Consumer
|
|
3
|
+
=========================
|
|
4
|
+
|
|
5
|
+
Handles events that failed processing in other consumers.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import os
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Any, Dict, List, Optional
|
|
13
|
+
|
|
14
|
+
from claude_mpm.core.logging_config import get_logger
|
|
15
|
+
|
|
16
|
+
from ..core import Event
|
|
17
|
+
from ..interfaces import ConsumerConfig, ConsumerPriority, IEventConsumer
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class DeadLetterConsumer(IEventConsumer):
|
|
21
|
+
"""
|
|
22
|
+
Handles failed events by persisting them for later analysis.
|
|
23
|
+
|
|
24
|
+
Features:
|
|
25
|
+
- Persist failed events to disk
|
|
26
|
+
- Configurable retention policy
|
|
27
|
+
- Event replay capability
|
|
28
|
+
- Failed event analysis
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
output_dir: Optional[Path] = None,
|
|
34
|
+
max_file_size: int = 10 * 1024 * 1024, # 10MB
|
|
35
|
+
retention_days: int = 7,
|
|
36
|
+
topics: List[str] = None,
|
|
37
|
+
):
|
|
38
|
+
"""
|
|
39
|
+
Initialize dead letter consumer.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
output_dir: Directory to store failed events
|
|
43
|
+
max_file_size: Maximum size per file (bytes)
|
|
44
|
+
retention_days: How long to keep failed events
|
|
45
|
+
topics: Topics to handle (None = all failed events)
|
|
46
|
+
"""
|
|
47
|
+
self.logger = get_logger("DeadLetterConsumer")
|
|
48
|
+
|
|
49
|
+
# Configuration
|
|
50
|
+
self.output_dir = output_dir or Path.home() / ".claude-mpm" / "dead-letter"
|
|
51
|
+
self.max_file_size = max_file_size
|
|
52
|
+
self.retention_days = retention_days
|
|
53
|
+
|
|
54
|
+
# State
|
|
55
|
+
self._initialized = False
|
|
56
|
+
self._current_file: Optional[Path] = None
|
|
57
|
+
self._current_file_size = 0
|
|
58
|
+
|
|
59
|
+
# Metrics
|
|
60
|
+
self._metrics = {
|
|
61
|
+
"events_stored": 0,
|
|
62
|
+
"events_replayed": 0,
|
|
63
|
+
"files_created": 0,
|
|
64
|
+
"total_size_bytes": 0,
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
# Consumer configuration
|
|
68
|
+
self._config = ConsumerConfig(
|
|
69
|
+
name="DeadLetterConsumer",
|
|
70
|
+
topics=topics or ["error.**", "failed.**"],
|
|
71
|
+
priority=ConsumerPriority.CRITICAL, # Process failed events first
|
|
72
|
+
filter_func=self._should_store,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
async def initialize(self) -> bool:
|
|
76
|
+
"""Initialize the dead letter consumer."""
|
|
77
|
+
try:
|
|
78
|
+
# Create output directory
|
|
79
|
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
|
80
|
+
|
|
81
|
+
# Clean old files
|
|
82
|
+
await self._cleanup_old_files()
|
|
83
|
+
|
|
84
|
+
# Initialize current file
|
|
85
|
+
self._rotate_file()
|
|
86
|
+
|
|
87
|
+
self._initialized = True
|
|
88
|
+
self.logger.info(f"Dead letter consumer initialized (output: {self.output_dir})")
|
|
89
|
+
return True
|
|
90
|
+
|
|
91
|
+
except Exception as e:
|
|
92
|
+
self.logger.error(f"Failed to initialize dead letter consumer: {e}")
|
|
93
|
+
return False
|
|
94
|
+
|
|
95
|
+
async def consume(self, event: Event) -> bool:
|
|
96
|
+
"""Store a failed event."""
|
|
97
|
+
if not self._initialized:
|
|
98
|
+
return False
|
|
99
|
+
|
|
100
|
+
try:
|
|
101
|
+
# Serialize event
|
|
102
|
+
event_data = self._serialize_event(event)
|
|
103
|
+
event_json = json.dumps(event_data) + "\n"
|
|
104
|
+
event_bytes = event_json.encode("utf-8")
|
|
105
|
+
|
|
106
|
+
# Check if rotation needed
|
|
107
|
+
if self._current_file_size + len(event_bytes) > self.max_file_size:
|
|
108
|
+
self._rotate_file()
|
|
109
|
+
|
|
110
|
+
# Write to file
|
|
111
|
+
with open(self._current_file, "a") as f:
|
|
112
|
+
f.write(event_json)
|
|
113
|
+
|
|
114
|
+
# Update metrics
|
|
115
|
+
self._current_file_size += len(event_bytes)
|
|
116
|
+
self._metrics["events_stored"] += 1
|
|
117
|
+
self._metrics["total_size_bytes"] += len(event_bytes)
|
|
118
|
+
|
|
119
|
+
self.logger.debug(
|
|
120
|
+
f"Stored failed event: {event.topic}/{event.type} "
|
|
121
|
+
f"(reason: {event.metadata.error_messages if event.metadata else 'unknown'})"
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
return True
|
|
125
|
+
|
|
126
|
+
except Exception as e:
|
|
127
|
+
self.logger.error(f"Error storing failed event: {e}")
|
|
128
|
+
return False
|
|
129
|
+
|
|
130
|
+
async def consume_batch(self, events: List[Event]) -> int:
|
|
131
|
+
"""Store multiple failed events."""
|
|
132
|
+
successful = 0
|
|
133
|
+
for event in events:
|
|
134
|
+
if await self.consume(event):
|
|
135
|
+
successful += 1
|
|
136
|
+
return successful
|
|
137
|
+
|
|
138
|
+
async def shutdown(self) -> None:
|
|
139
|
+
"""Shutdown the consumer."""
|
|
140
|
+
self.logger.info(
|
|
141
|
+
f"Dead letter consumer shutdown - stored {self._metrics['events_stored']} events"
|
|
142
|
+
)
|
|
143
|
+
self._initialized = False
|
|
144
|
+
|
|
145
|
+
async def replay_events(
|
|
146
|
+
self,
|
|
147
|
+
start_time: Optional[datetime] = None,
|
|
148
|
+
end_time: Optional[datetime] = None,
|
|
149
|
+
topic_filter: Optional[str] = None,
|
|
150
|
+
) -> List[Event]:
|
|
151
|
+
"""
|
|
152
|
+
Replay stored events for reprocessing.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
start_time: Start of time range
|
|
156
|
+
end_time: End of time range
|
|
157
|
+
topic_filter: Topic pattern to filter
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
List of events matching criteria
|
|
161
|
+
"""
|
|
162
|
+
replayed_events = []
|
|
163
|
+
|
|
164
|
+
# Find files in time range
|
|
165
|
+
for file_path in sorted(self.output_dir.glob("dead-letter-*.jsonl")):
|
|
166
|
+
try:
|
|
167
|
+
with open(file_path, "r") as f:
|
|
168
|
+
for line in f:
|
|
169
|
+
event_data = json.loads(line)
|
|
170
|
+
|
|
171
|
+
# Apply filters
|
|
172
|
+
event_time = datetime.fromisoformat(event_data["timestamp"])
|
|
173
|
+
|
|
174
|
+
if start_time and event_time < start_time:
|
|
175
|
+
continue
|
|
176
|
+
if end_time and event_time > end_time:
|
|
177
|
+
continue
|
|
178
|
+
if topic_filter and not event_data["topic"].startswith(topic_filter):
|
|
179
|
+
continue
|
|
180
|
+
|
|
181
|
+
# Reconstruct event
|
|
182
|
+
event = self._deserialize_event(event_data)
|
|
183
|
+
replayed_events.append(event)
|
|
184
|
+
|
|
185
|
+
except Exception as e:
|
|
186
|
+
self.logger.error(f"Error replaying events from {file_path}: {e}")
|
|
187
|
+
|
|
188
|
+
self._metrics["events_replayed"] += len(replayed_events)
|
|
189
|
+
self.logger.info(f"Replayed {len(replayed_events)} events")
|
|
190
|
+
|
|
191
|
+
return replayed_events
|
|
192
|
+
|
|
193
|
+
@property
|
|
194
|
+
def config(self) -> ConsumerConfig:
|
|
195
|
+
"""Get consumer configuration."""
|
|
196
|
+
return self._config
|
|
197
|
+
|
|
198
|
+
@property
|
|
199
|
+
def is_healthy(self) -> bool:
|
|
200
|
+
"""Check if consumer is healthy."""
|
|
201
|
+
return self._initialized and self._current_file is not None
|
|
202
|
+
|
|
203
|
+
def get_metrics(self) -> Dict[str, Any]:
|
|
204
|
+
"""Get consumer metrics."""
|
|
205
|
+
return {
|
|
206
|
+
**self._metrics,
|
|
207
|
+
"current_file": str(self._current_file) if self._current_file else None,
|
|
208
|
+
"current_file_size": self._current_file_size,
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
def _should_store(self, event: Event) -> bool:
|
|
212
|
+
"""
|
|
213
|
+
Determine if an event should be stored.
|
|
214
|
+
|
|
215
|
+
Only store events that have actually failed processing.
|
|
216
|
+
"""
|
|
217
|
+
if not event.metadata:
|
|
218
|
+
return False
|
|
219
|
+
|
|
220
|
+
# Store if any consumers failed
|
|
221
|
+
if event.metadata.consumers_failed:
|
|
222
|
+
return True
|
|
223
|
+
|
|
224
|
+
# Store if max retries exceeded
|
|
225
|
+
if event.metadata.retry_count >= event.metadata.max_retries:
|
|
226
|
+
return True
|
|
227
|
+
|
|
228
|
+
# Store if event has error messages
|
|
229
|
+
if event.metadata.error_messages:
|
|
230
|
+
return True
|
|
231
|
+
|
|
232
|
+
return False
|
|
233
|
+
|
|
234
|
+
def _serialize_event(self, event: Event) -> Dict[str, Any]:
|
|
235
|
+
"""Serialize an event for storage."""
|
|
236
|
+
return {
|
|
237
|
+
"id": event.id,
|
|
238
|
+
"topic": event.topic,
|
|
239
|
+
"type": event.type,
|
|
240
|
+
"timestamp": event.timestamp.isoformat(),
|
|
241
|
+
"source": event.source,
|
|
242
|
+
"data": event.data,
|
|
243
|
+
"correlation_id": event.correlation_id,
|
|
244
|
+
"priority": event.priority.name,
|
|
245
|
+
"metadata": {
|
|
246
|
+
"retry_count": event.metadata.retry_count if event.metadata else 0,
|
|
247
|
+
"consumers_failed": list(event.metadata.consumers_failed) if event.metadata else [],
|
|
248
|
+
"error_messages": event.metadata.error_messages if event.metadata else [],
|
|
249
|
+
} if event.metadata else None,
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
def _deserialize_event(self, data: Dict[str, Any]) -> Event:
|
|
253
|
+
"""Deserialize an event from storage."""
|
|
254
|
+
from ..core import EventMetadata, EventPriority
|
|
255
|
+
|
|
256
|
+
# Reconstruct metadata
|
|
257
|
+
metadata = None
|
|
258
|
+
if data.get("metadata"):
|
|
259
|
+
metadata = EventMetadata(
|
|
260
|
+
retry_count=data["metadata"].get("retry_count", 0),
|
|
261
|
+
consumers_failed=set(data["metadata"].get("consumers_failed", [])),
|
|
262
|
+
error_messages=data["metadata"].get("error_messages", []),
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
return Event(
|
|
266
|
+
id=data["id"],
|
|
267
|
+
topic=data["topic"],
|
|
268
|
+
type=data["type"],
|
|
269
|
+
timestamp=datetime.fromisoformat(data["timestamp"]),
|
|
270
|
+
source=data["source"],
|
|
271
|
+
data=data["data"],
|
|
272
|
+
metadata=metadata,
|
|
273
|
+
correlation_id=data.get("correlation_id"),
|
|
274
|
+
priority=EventPriority[data.get("priority", "NORMAL")],
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
def _rotate_file(self) -> None:
|
|
278
|
+
"""Rotate to a new output file."""
|
|
279
|
+
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
|
|
280
|
+
self._current_file = self.output_dir / f"dead-letter-{timestamp}.jsonl"
|
|
281
|
+
self._current_file_size = 0
|
|
282
|
+
self._metrics["files_created"] += 1
|
|
283
|
+
|
|
284
|
+
self.logger.debug(f"Rotated to new file: {self._current_file}")
|
|
285
|
+
|
|
286
|
+
async def _cleanup_old_files(self) -> None:
|
|
287
|
+
"""Remove files older than retention period."""
|
|
288
|
+
cutoff_time = datetime.now().timestamp() - (self.retention_days * 86400)
|
|
289
|
+
|
|
290
|
+
for file_path in self.output_dir.glob("dead-letter-*.jsonl"):
|
|
291
|
+
try:
|
|
292
|
+
if file_path.stat().st_mtime < cutoff_time:
|
|
293
|
+
file_path.unlink()
|
|
294
|
+
self.logger.info(f"Removed old dead letter file: {file_path}")
|
|
295
|
+
except Exception as e:
|
|
296
|
+
self.logger.error(f"Error removing old file {file_path}: {e}")
|
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Logging Event Consumer
|
|
3
|
+
=====================
|
|
4
|
+
|
|
5
|
+
Logs events for debugging and monitoring purposes.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
from typing import Any, Dict, List
|
|
10
|
+
|
|
11
|
+
from claude_mpm.core.logging_config import get_logger
|
|
12
|
+
|
|
13
|
+
from ..core import Event
|
|
14
|
+
from ..interfaces import ConsumerConfig, ConsumerPriority, IEventConsumer
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class LoggingConsumer(IEventConsumer):
|
|
18
|
+
"""
|
|
19
|
+
Logs events for debugging and monitoring.
|
|
20
|
+
|
|
21
|
+
Features:
|
|
22
|
+
- Configurable log levels per topic
|
|
23
|
+
- Structured logging with JSON support
|
|
24
|
+
- Event filtering
|
|
25
|
+
- Performance metrics
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
log_level: str = "INFO",
|
|
31
|
+
topics: List[str] = None,
|
|
32
|
+
format_json: bool = True,
|
|
33
|
+
include_data: bool = True,
|
|
34
|
+
max_data_length: int = 1000,
|
|
35
|
+
):
|
|
36
|
+
"""
|
|
37
|
+
Initialize logging consumer.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
log_level: Default log level (DEBUG, INFO, WARNING, ERROR)
|
|
41
|
+
topics: Topics to log (None = all)
|
|
42
|
+
format_json: Format data as JSON
|
|
43
|
+
include_data: Include event data in logs
|
|
44
|
+
max_data_length: Maximum data length to log
|
|
45
|
+
"""
|
|
46
|
+
self.logger = get_logger("EventLogger")
|
|
47
|
+
|
|
48
|
+
# Configuration
|
|
49
|
+
self.log_level = log_level
|
|
50
|
+
self.format_json = format_json
|
|
51
|
+
self.include_data = include_data
|
|
52
|
+
self.max_data_length = max_data_length
|
|
53
|
+
|
|
54
|
+
# State
|
|
55
|
+
self._initialized = False
|
|
56
|
+
|
|
57
|
+
# Metrics
|
|
58
|
+
self._metrics = {
|
|
59
|
+
"events_logged": 0,
|
|
60
|
+
"events_filtered": 0,
|
|
61
|
+
"errors": 0,
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
# Consumer configuration
|
|
65
|
+
self._config = ConsumerConfig(
|
|
66
|
+
name="LoggingConsumer",
|
|
67
|
+
topics=topics or ["**"],
|
|
68
|
+
priority=ConsumerPriority.LOW, # Log after other processing
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
async def initialize(self) -> bool:
|
|
72
|
+
"""Initialize the logging consumer."""
|
|
73
|
+
self._initialized = True
|
|
74
|
+
self.logger.info(
|
|
75
|
+
f"Logging consumer initialized (level={self.log_level}, "
|
|
76
|
+
f"topics={self._config.topics})"
|
|
77
|
+
)
|
|
78
|
+
return True
|
|
79
|
+
|
|
80
|
+
async def consume(self, event: Event) -> bool:
|
|
81
|
+
"""Log a single event."""
|
|
82
|
+
if not self._initialized:
|
|
83
|
+
return False
|
|
84
|
+
|
|
85
|
+
try:
|
|
86
|
+
# Format log message
|
|
87
|
+
message = self._format_event(event)
|
|
88
|
+
|
|
89
|
+
# Determine log level
|
|
90
|
+
level = self._get_log_level(event)
|
|
91
|
+
|
|
92
|
+
# Log the event
|
|
93
|
+
getattr(self.logger, level.lower())(message)
|
|
94
|
+
|
|
95
|
+
self._metrics["events_logged"] += 1
|
|
96
|
+
return True
|
|
97
|
+
|
|
98
|
+
except Exception as e:
|
|
99
|
+
self.logger.error(f"Error logging event: {e}")
|
|
100
|
+
self._metrics["errors"] += 1
|
|
101
|
+
return False
|
|
102
|
+
|
|
103
|
+
async def consume_batch(self, events: List[Event]) -> int:
|
|
104
|
+
"""Log multiple events."""
|
|
105
|
+
successful = 0
|
|
106
|
+
for event in events:
|
|
107
|
+
if await self.consume(event):
|
|
108
|
+
successful += 1
|
|
109
|
+
return successful
|
|
110
|
+
|
|
111
|
+
async def shutdown(self) -> None:
|
|
112
|
+
"""Shutdown the consumer."""
|
|
113
|
+
self.logger.info(f"Shutting down logging consumer (logged {self._metrics['events_logged']} events)")
|
|
114
|
+
self._initialized = False
|
|
115
|
+
|
|
116
|
+
@property
|
|
117
|
+
def config(self) -> ConsumerConfig:
|
|
118
|
+
"""Get consumer configuration."""
|
|
119
|
+
return self._config
|
|
120
|
+
|
|
121
|
+
@property
|
|
122
|
+
def is_healthy(self) -> bool:
|
|
123
|
+
"""Check if consumer is healthy."""
|
|
124
|
+
return self._initialized
|
|
125
|
+
|
|
126
|
+
def get_metrics(self) -> Dict[str, Any]:
|
|
127
|
+
"""Get consumer metrics."""
|
|
128
|
+
return self._metrics
|
|
129
|
+
|
|
130
|
+
def _format_event(self, event: Event) -> str:
|
|
131
|
+
"""Format an event for logging."""
|
|
132
|
+
# Build base message
|
|
133
|
+
message = (
|
|
134
|
+
f"[{event.topic}] {event.type} "
|
|
135
|
+
f"(id={event.id[:8]}, source={event.source})"
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
# Add data if configured
|
|
139
|
+
if self.include_data and event.data:
|
|
140
|
+
if self.format_json:
|
|
141
|
+
data_str = json.dumps(event.data, indent=2)
|
|
142
|
+
else:
|
|
143
|
+
data_str = str(event.data)
|
|
144
|
+
|
|
145
|
+
# Truncate if too long
|
|
146
|
+
if len(data_str) > self.max_data_length:
|
|
147
|
+
data_str = data_str[:self.max_data_length] + "..."
|
|
148
|
+
|
|
149
|
+
message += f"\n{data_str}"
|
|
150
|
+
|
|
151
|
+
# Add metadata if present
|
|
152
|
+
if event.metadata:
|
|
153
|
+
meta_info = []
|
|
154
|
+
if event.metadata.retry_count > 0:
|
|
155
|
+
meta_info.append(f"retries={event.metadata.retry_count}")
|
|
156
|
+
if event.metadata.consumers_failed:
|
|
157
|
+
meta_info.append(f"failed={event.metadata.consumers_failed}")
|
|
158
|
+
|
|
159
|
+
if meta_info:
|
|
160
|
+
message += f" [{', '.join(meta_info)}]"
|
|
161
|
+
|
|
162
|
+
return message
|
|
163
|
+
|
|
164
|
+
def _get_log_level(self, event: Event) -> str:
|
|
165
|
+
"""Determine log level for an event."""
|
|
166
|
+
# Use ERROR for failed events
|
|
167
|
+
if event.metadata and event.metadata.consumers_failed:
|
|
168
|
+
return "ERROR"
|
|
169
|
+
|
|
170
|
+
# Use WARNING for retried events
|
|
171
|
+
if event.metadata and event.metadata.retry_count > 0:
|
|
172
|
+
return "WARNING"
|
|
173
|
+
|
|
174
|
+
# Use configured level for specific topics
|
|
175
|
+
if event.topic.startswith("error."):
|
|
176
|
+
return "ERROR"
|
|
177
|
+
elif event.topic.startswith("warning."):
|
|
178
|
+
return "WARNING"
|
|
179
|
+
elif event.topic.startswith("debug."):
|
|
180
|
+
return "DEBUG"
|
|
181
|
+
|
|
182
|
+
# Default to configured level
|
|
183
|
+
return self.log_level
|