mapify-cli 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapify_cli/__init__.py +1946 -0
- mapify_cli/playbook_manager.py +517 -0
- mapify_cli/recitation_manager.py +551 -0
- mapify_cli/semantic_search.py +405 -0
- mapify_cli/templates/agents/CHANGELOG.md +108 -0
- mapify_cli/templates/agents/MCP-PATTERNS.md +343 -0
- mapify_cli/templates/agents/README.md +183 -0
- mapify_cli/templates/agents/actor.md +650 -0
- mapify_cli/templates/agents/curator.md +1155 -0
- mapify_cli/templates/agents/documentation-reviewer.md +1282 -0
- mapify_cli/templates/agents/evaluator.md +843 -0
- mapify_cli/templates/agents/monitor.md +977 -0
- mapify_cli/templates/agents/predictor.md +965 -0
- mapify_cli/templates/agents/reflector.md +1048 -0
- mapify_cli/templates/agents/task-decomposer.md +1169 -0
- mapify_cli/templates/agents/test-generator.md +1175 -0
- mapify_cli/templates/commands/map-debug.md +315 -0
- mapify_cli/templates/commands/map-feature.md +454 -0
- mapify_cli/templates/commands/map-refactor.md +317 -0
- mapify_cli/templates/commands/map-review.md +29 -0
- mapify_cli/templates/hooks/README.md +55 -0
- mapify_cli/templates/hooks/validate-agent-templates.sh +94 -0
- mapify_cli/templates/settings.hooks.json +20 -0
- mapify_cli/workflow_logger.py +411 -0
- mapify_cli-1.0.0.dist-info/METADATA +310 -0
- mapify_cli-1.0.0.dist-info/RECORD +28 -0
- mapify_cli-1.0.0.dist-info/WHEEL +4 -0
- mapify_cli-1.0.0.dist-info/entry_points.txt +2 -0
|
@@ -0,0 +1,411 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Workflow Logger for MAP Framework
|
|
3
|
+
|
|
4
|
+
Implements comprehensive workflow logging for debugging and analysis.
|
|
5
|
+
Part of Phase 1.2 (Подробное логирование) from Context Engineering improvements.
|
|
6
|
+
|
|
7
|
+
Based on: "Context Engineering for AI Agents: Lessons from Building Manus"
|
|
8
|
+
and CONTEXT-ENGINEERING-IMPROVEMENTS.md
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
from dataclasses import dataclass, field
|
|
13
|
+
from datetime import datetime
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import Optional, Dict, Any
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class AgentInvocation:
|
|
20
|
+
"""Represents a single agent invocation in the workflow"""
|
|
21
|
+
agent_name: str
|
|
22
|
+
timestamp: str
|
|
23
|
+
prompt_preview: str # Truncated prompt for readability
|
|
24
|
+
response_preview: str # Truncated response
|
|
25
|
+
duration_ms: Optional[float] = None
|
|
26
|
+
status: str = "success" # 'success', 'error', 'timeout'
|
|
27
|
+
error_message: Optional[str] = None
|
|
28
|
+
task_id: Optional[str] = None # For correlation with RecitationManager
|
|
29
|
+
subtask_id: Optional[int] = None
|
|
30
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class MapWorkflowLogger:
|
|
34
|
+
"""
|
|
35
|
+
Manages detailed workflow logging for MAP Framework.
|
|
36
|
+
|
|
37
|
+
Key features:
|
|
38
|
+
1. JSON Lines format (one JSON object per line) for easy parsing
|
|
39
|
+
2. Logs agent invocations with timestamps, prompts, responses, timing
|
|
40
|
+
3. Optional enable/disable flag for production vs debug mode
|
|
41
|
+
4. Integration with RecitationManager via task_id correlation
|
|
42
|
+
5. All methods are no-ops when disabled
|
|
43
|
+
|
|
44
|
+
Storage location: .map/logs/workflow_TIMESTAMP.log
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
def __init__(self, project_root: Path, enabled: bool = False):
|
|
48
|
+
"""
|
|
49
|
+
Initialize the workflow logger.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
project_root: Root directory of the project
|
|
53
|
+
enabled: Whether logging is enabled (default: False for production)
|
|
54
|
+
"""
|
|
55
|
+
self.project_root = Path(project_root)
|
|
56
|
+
self.enabled = enabled
|
|
57
|
+
self.map_dir = self.project_root / ".map"
|
|
58
|
+
self.logs_dir = self.map_dir / "logs"
|
|
59
|
+
self.current_log_file: Optional[Path] = None
|
|
60
|
+
self.session_start_time: Optional[datetime] = None
|
|
61
|
+
self.task_id: Optional[str] = None
|
|
62
|
+
|
|
63
|
+
# Create .map/logs directory if logging is enabled
|
|
64
|
+
if self.enabled:
|
|
65
|
+
self.logs_dir.mkdir(parents=True, exist_ok=True)
|
|
66
|
+
|
|
67
|
+
def start_session(self, task_id: Optional[str] = None) -> Optional[Path]:
|
|
68
|
+
"""
|
|
69
|
+
Start a new logging session with a timestamped log file.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
task_id: Optional task identifier for correlation with RecitationManager
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
Path to the log file if enabled, None otherwise
|
|
76
|
+
"""
|
|
77
|
+
if not self.enabled:
|
|
78
|
+
return None
|
|
79
|
+
|
|
80
|
+
self.session_start_time = datetime.now()
|
|
81
|
+
self.task_id = task_id
|
|
82
|
+
|
|
83
|
+
# Create log file with timestamp
|
|
84
|
+
timestamp = self.session_start_time.strftime("%Y%m%d_%H%M%S")
|
|
85
|
+
log_filename = f"workflow_{timestamp}.log"
|
|
86
|
+
self.current_log_file = self.logs_dir / log_filename
|
|
87
|
+
|
|
88
|
+
# Write session start marker
|
|
89
|
+
session_info = {
|
|
90
|
+
"event": "session_start",
|
|
91
|
+
"timestamp": self.session_start_time.isoformat(),
|
|
92
|
+
"task_id": task_id,
|
|
93
|
+
"project_root": str(self.project_root)
|
|
94
|
+
}
|
|
95
|
+
self._write_log_entry(session_info)
|
|
96
|
+
|
|
97
|
+
return self.current_log_file
|
|
98
|
+
|
|
99
|
+
def end_session(self) -> None:
|
|
100
|
+
"""
|
|
101
|
+
End the current logging session.
|
|
102
|
+
|
|
103
|
+
Writes session summary and cleans up state.
|
|
104
|
+
"""
|
|
105
|
+
if not self.enabled or not self.current_log_file:
|
|
106
|
+
return
|
|
107
|
+
|
|
108
|
+
session_end_time = datetime.now()
|
|
109
|
+
duration_seconds = (
|
|
110
|
+
(session_end_time - self.session_start_time).total_seconds()
|
|
111
|
+
if self.session_start_time
|
|
112
|
+
else None
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Write session end marker
|
|
116
|
+
session_info = {
|
|
117
|
+
"event": "session_end",
|
|
118
|
+
"timestamp": session_end_time.isoformat(),
|
|
119
|
+
"task_id": self.task_id,
|
|
120
|
+
"duration_seconds": duration_seconds
|
|
121
|
+
}
|
|
122
|
+
self._write_log_entry(session_info)
|
|
123
|
+
|
|
124
|
+
# Clean up state
|
|
125
|
+
self.current_log_file = None
|
|
126
|
+
self.session_start_time = None
|
|
127
|
+
self.task_id = None
|
|
128
|
+
|
|
129
|
+
def log_agent_invocation(
|
|
130
|
+
self,
|
|
131
|
+
agent_name: str,
|
|
132
|
+
prompt: str,
|
|
133
|
+
response: str,
|
|
134
|
+
duration_ms: Optional[float] = None,
|
|
135
|
+
status: str = "success",
|
|
136
|
+
error_message: Optional[str] = None,
|
|
137
|
+
subtask_id: Optional[int] = None,
|
|
138
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
139
|
+
) -> None:
|
|
140
|
+
"""
|
|
141
|
+
Log an agent invocation with full details.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
agent_name: Name of the agent (e.g., 'actor', 'monitor')
|
|
145
|
+
prompt: Full prompt sent to the agent
|
|
146
|
+
response: Full response from the agent
|
|
147
|
+
duration_ms: Execution time in milliseconds
|
|
148
|
+
status: Invocation status ('success', 'error', 'timeout')
|
|
149
|
+
error_message: Error message if status is 'error'
|
|
150
|
+
subtask_id: Current subtask ID for correlation
|
|
151
|
+
metadata: Additional metadata to log
|
|
152
|
+
"""
|
|
153
|
+
if not self.enabled:
|
|
154
|
+
return
|
|
155
|
+
|
|
156
|
+
# Truncate prompt and response for preview (first 500 chars)
|
|
157
|
+
prompt_preview = self._truncate_text(prompt, max_length=500)
|
|
158
|
+
response_preview = self._truncate_text(response, max_length=1000)
|
|
159
|
+
|
|
160
|
+
invocation = AgentInvocation(
|
|
161
|
+
agent_name=agent_name,
|
|
162
|
+
timestamp=datetime.now().isoformat(),
|
|
163
|
+
prompt_preview=prompt_preview,
|
|
164
|
+
response_preview=response_preview,
|
|
165
|
+
duration_ms=duration_ms,
|
|
166
|
+
status=status,
|
|
167
|
+
error_message=error_message,
|
|
168
|
+
task_id=self.task_id,
|
|
169
|
+
subtask_id=subtask_id,
|
|
170
|
+
metadata=metadata or {}
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
# Convert to dict and write as JSON line
|
|
174
|
+
log_entry = {
|
|
175
|
+
"event": "agent_invocation",
|
|
176
|
+
"agent_name": invocation.agent_name,
|
|
177
|
+
"timestamp": invocation.timestamp,
|
|
178
|
+
"prompt_preview": invocation.prompt_preview,
|
|
179
|
+
"response_preview": invocation.response_preview,
|
|
180
|
+
"duration_ms": invocation.duration_ms,
|
|
181
|
+
"status": invocation.status,
|
|
182
|
+
"error_message": invocation.error_message,
|
|
183
|
+
"task_id": invocation.task_id,
|
|
184
|
+
"subtask_id": invocation.subtask_id,
|
|
185
|
+
"metadata": invocation.metadata
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
self._write_log_entry(log_entry)
|
|
189
|
+
|
|
190
|
+
def log_error(
|
|
191
|
+
self,
|
|
192
|
+
error_message: str,
|
|
193
|
+
agent_name: Optional[str] = None,
|
|
194
|
+
subtask_id: Optional[int] = None,
|
|
195
|
+
stack_trace: Optional[str] = None,
|
|
196
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
197
|
+
) -> None:
|
|
198
|
+
"""
|
|
199
|
+
Log an error that occurred during workflow execution.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
error_message: Human-readable error message
|
|
203
|
+
agent_name: Name of the agent where error occurred (if applicable)
|
|
204
|
+
subtask_id: Current subtask ID (if applicable)
|
|
205
|
+
stack_trace: Full stack trace (if available)
|
|
206
|
+
metadata: Additional error context
|
|
207
|
+
"""
|
|
208
|
+
if not self.enabled:
|
|
209
|
+
return
|
|
210
|
+
|
|
211
|
+
log_entry = {
|
|
212
|
+
"event": "error",
|
|
213
|
+
"timestamp": datetime.now().isoformat(),
|
|
214
|
+
"error_message": error_message,
|
|
215
|
+
"agent_name": agent_name,
|
|
216
|
+
"subtask_id": subtask_id,
|
|
217
|
+
"task_id": self.task_id,
|
|
218
|
+
"stack_trace": self._truncate_text(stack_trace, max_length=2000) if stack_trace else None,
|
|
219
|
+
"metadata": metadata or {}
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
self._write_log_entry(log_entry)
|
|
223
|
+
|
|
224
|
+
def log_timing(
|
|
225
|
+
self,
|
|
226
|
+
operation_name: str,
|
|
227
|
+
duration_ms: float,
|
|
228
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
229
|
+
) -> None:
|
|
230
|
+
"""
|
|
231
|
+
Log timing information for performance analysis.
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
operation_name: Name of the operation being timed
|
|
235
|
+
duration_ms: Duration in milliseconds
|
|
236
|
+
metadata: Additional timing context
|
|
237
|
+
"""
|
|
238
|
+
if not self.enabled:
|
|
239
|
+
return
|
|
240
|
+
|
|
241
|
+
log_entry = {
|
|
242
|
+
"event": "timing",
|
|
243
|
+
"timestamp": datetime.now().isoformat(),
|
|
244
|
+
"operation_name": operation_name,
|
|
245
|
+
"duration_ms": duration_ms,
|
|
246
|
+
"task_id": self.task_id,
|
|
247
|
+
"metadata": metadata or {}
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
self._write_log_entry(log_entry)
|
|
251
|
+
|
|
252
|
+
def log_event(
|
|
253
|
+
self,
|
|
254
|
+
event_type: str,
|
|
255
|
+
message: str,
|
|
256
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
257
|
+
) -> None:
|
|
258
|
+
"""
|
|
259
|
+
Log a custom workflow event.
|
|
260
|
+
|
|
261
|
+
Args:
|
|
262
|
+
event_type: Type of event (e.g., 'subtask_start', 'decomposition_complete')
|
|
263
|
+
message: Human-readable event message
|
|
264
|
+
metadata: Additional event data
|
|
265
|
+
"""
|
|
266
|
+
if not self.enabled:
|
|
267
|
+
return
|
|
268
|
+
|
|
269
|
+
log_entry = {
|
|
270
|
+
"event": event_type,
|
|
271
|
+
"timestamp": datetime.now().isoformat(),
|
|
272
|
+
"message": message,
|
|
273
|
+
"task_id": self.task_id,
|
|
274
|
+
"metadata": metadata or {}
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
self._write_log_entry(log_entry)
|
|
278
|
+
|
|
279
|
+
def _write_log_entry(self, entry: Dict[str, Any]) -> None:
|
|
280
|
+
"""
|
|
281
|
+
Write a log entry as a JSON line.
|
|
282
|
+
|
|
283
|
+
Args:
|
|
284
|
+
entry: Dictionary to write as JSON
|
|
285
|
+
"""
|
|
286
|
+
if not self.enabled or not self.current_log_file:
|
|
287
|
+
return
|
|
288
|
+
|
|
289
|
+
try:
|
|
290
|
+
with open(self.current_log_file, "a", encoding="utf-8") as f:
|
|
291
|
+
f.write(json.dumps(entry, ensure_ascii=False) + "\n")
|
|
292
|
+
except Exception as e:
|
|
293
|
+
# Don't fail workflow execution if logging fails
|
|
294
|
+
# Just print to stderr for debugging
|
|
295
|
+
import sys
|
|
296
|
+
print(f"Warning: Failed to write log entry: {e}", file=sys.stderr)
|
|
297
|
+
|
|
298
|
+
def _truncate_text(self, text: Optional[str], max_length: int = 500) -> Optional[str]:
|
|
299
|
+
"""
|
|
300
|
+
Truncate text to maximum length with ellipsis.
|
|
301
|
+
|
|
302
|
+
Args:
|
|
303
|
+
text: Text to truncate
|
|
304
|
+
max_length: Maximum length before truncation
|
|
305
|
+
|
|
306
|
+
Returns:
|
|
307
|
+
Truncated text or None if input is None
|
|
308
|
+
"""
|
|
309
|
+
if text is None:
|
|
310
|
+
return None
|
|
311
|
+
|
|
312
|
+
if len(text) <= max_length:
|
|
313
|
+
return text
|
|
314
|
+
|
|
315
|
+
return text[:max_length] + "..."
|
|
316
|
+
|
|
317
|
+
def get_log_file_path(self) -> Optional[Path]:
|
|
318
|
+
"""
|
|
319
|
+
Get the path to the current log file.
|
|
320
|
+
|
|
321
|
+
Returns:
|
|
322
|
+
Path to current log file if logging is enabled and session started, None otherwise
|
|
323
|
+
"""
|
|
324
|
+
return self.current_log_file if self.enabled else None
|
|
325
|
+
|
|
326
|
+
def is_enabled(self) -> bool:
|
|
327
|
+
"""
|
|
328
|
+
Check if logging is currently enabled.
|
|
329
|
+
|
|
330
|
+
Returns:
|
|
331
|
+
True if logging is enabled, False otherwise
|
|
332
|
+
"""
|
|
333
|
+
return self.enabled
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
# CLI interface for testing and manual log inspection
|
|
337
|
+
if __name__ == "__main__":
|
|
338
|
+
import sys
|
|
339
|
+
|
|
340
|
+
if len(sys.argv) < 2:
|
|
341
|
+
print("Usage:")
|
|
342
|
+
print(" python -m mapify_cli.workflow_logger test")
|
|
343
|
+
print(" python -m mapify_cli.workflow_logger parse <log_file>")
|
|
344
|
+
print("\nExamples:")
|
|
345
|
+
print(" # Test logger functionality")
|
|
346
|
+
print(" python -m mapify_cli.workflow_logger test")
|
|
347
|
+
print("\n # Parse and display log file")
|
|
348
|
+
print(" python -m mapify_cli.workflow_logger parse .map/logs/workflow_20251018_143022.log")
|
|
349
|
+
sys.exit(1)
|
|
350
|
+
|
|
351
|
+
command = sys.argv[1]
|
|
352
|
+
|
|
353
|
+
if command == "test":
|
|
354
|
+
# Test logger functionality
|
|
355
|
+
logger = MapWorkflowLogger(Path.cwd(), enabled=True)
|
|
356
|
+
log_file = logger.start_session(task_id="test_task_123")
|
|
357
|
+
print(f"Started logging session: {log_file}")
|
|
358
|
+
|
|
359
|
+
# Log some test events
|
|
360
|
+
logger.log_event("test_start", "Testing workflow logger")
|
|
361
|
+
logger.log_agent_invocation(
|
|
362
|
+
agent_name="test-agent",
|
|
363
|
+
prompt="This is a test prompt" * 50, # Long prompt to test truncation
|
|
364
|
+
response="This is a test response" * 50,
|
|
365
|
+
duration_ms=123.45,
|
|
366
|
+
status="success",
|
|
367
|
+
subtask_id=1,
|
|
368
|
+
metadata={"test_key": "test_value"}
|
|
369
|
+
)
|
|
370
|
+
logger.log_error(
|
|
371
|
+
error_message="Test error message",
|
|
372
|
+
agent_name="test-agent",
|
|
373
|
+
subtask_id=1,
|
|
374
|
+
metadata={"error_code": "TEST_001"}
|
|
375
|
+
)
|
|
376
|
+
logger.log_timing("test_operation", 456.78, metadata={"step": "initialization"})
|
|
377
|
+
logger.end_session()
|
|
378
|
+
|
|
379
|
+
print(f"\nLog file created: {log_file}")
|
|
380
|
+
print("\nContents:")
|
|
381
|
+
if log_file and log_file.exists():
|
|
382
|
+
print(log_file.read_text())
|
|
383
|
+
|
|
384
|
+
elif command == "parse":
|
|
385
|
+
if len(sys.argv) < 3:
|
|
386
|
+
print("Error: parse requires <log_file>")
|
|
387
|
+
sys.exit(1)
|
|
388
|
+
|
|
389
|
+
log_file_path = Path(sys.argv[2])
|
|
390
|
+
if not log_file_path.exists():
|
|
391
|
+
print(f"Error: Log file not found: {log_file_path}")
|
|
392
|
+
sys.exit(1)
|
|
393
|
+
|
|
394
|
+
# Parse and pretty-print log file
|
|
395
|
+
print(f"Parsing log file: {log_file_path}\n")
|
|
396
|
+
with open(log_file_path, "r", encoding="utf-8") as f:
|
|
397
|
+
for line_num, line in enumerate(f, 1):
|
|
398
|
+
try:
|
|
399
|
+
entry = json.loads(line)
|
|
400
|
+
print(f"--- Entry {line_num} ---")
|
|
401
|
+
print(json.dumps(entry, indent=2, ensure_ascii=False))
|
|
402
|
+
print()
|
|
403
|
+
except json.JSONDecodeError as e:
|
|
404
|
+
print(f"Error parsing line {line_num}: {e}")
|
|
405
|
+
print(f"Line content: {line}")
|
|
406
|
+
print()
|
|
407
|
+
|
|
408
|
+
else:
|
|
409
|
+
print(f"Error: Unknown command '{command}'")
|
|
410
|
+
print("Run without arguments to see usage")
|
|
411
|
+
sys.exit(1)
|