nc1709 1.15.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. nc1709/__init__.py +13 -0
  2. nc1709/agent/__init__.py +36 -0
  3. nc1709/agent/core.py +505 -0
  4. nc1709/agent/mcp_bridge.py +245 -0
  5. nc1709/agent/permissions.py +298 -0
  6. nc1709/agent/tools/__init__.py +21 -0
  7. nc1709/agent/tools/base.py +440 -0
  8. nc1709/agent/tools/bash_tool.py +367 -0
  9. nc1709/agent/tools/file_tools.py +454 -0
  10. nc1709/agent/tools/notebook_tools.py +516 -0
  11. nc1709/agent/tools/search_tools.py +322 -0
  12. nc1709/agent/tools/task_tool.py +284 -0
  13. nc1709/agent/tools/web_tools.py +555 -0
  14. nc1709/agents/__init__.py +17 -0
  15. nc1709/agents/auto_fix.py +506 -0
  16. nc1709/agents/test_generator.py +507 -0
  17. nc1709/checkpoints.py +372 -0
  18. nc1709/cli.py +3380 -0
  19. nc1709/cli_ui.py +1080 -0
  20. nc1709/cognitive/__init__.py +149 -0
  21. nc1709/cognitive/anticipation.py +594 -0
  22. nc1709/cognitive/context_engine.py +1046 -0
  23. nc1709/cognitive/council.py +824 -0
  24. nc1709/cognitive/learning.py +761 -0
  25. nc1709/cognitive/router.py +583 -0
  26. nc1709/cognitive/system.py +519 -0
  27. nc1709/config.py +155 -0
  28. nc1709/custom_commands.py +300 -0
  29. nc1709/executor.py +333 -0
  30. nc1709/file_controller.py +354 -0
  31. nc1709/git_integration.py +308 -0
  32. nc1709/github_integration.py +477 -0
  33. nc1709/image_input.py +446 -0
  34. nc1709/linting.py +519 -0
  35. nc1709/llm_adapter.py +667 -0
  36. nc1709/logger.py +192 -0
  37. nc1709/mcp/__init__.py +18 -0
  38. nc1709/mcp/client.py +370 -0
  39. nc1709/mcp/manager.py +407 -0
  40. nc1709/mcp/protocol.py +210 -0
  41. nc1709/mcp/server.py +473 -0
  42. nc1709/memory/__init__.py +20 -0
  43. nc1709/memory/embeddings.py +325 -0
  44. nc1709/memory/indexer.py +474 -0
  45. nc1709/memory/sessions.py +432 -0
  46. nc1709/memory/vector_store.py +451 -0
  47. nc1709/models/__init__.py +86 -0
  48. nc1709/models/detector.py +377 -0
  49. nc1709/models/formats.py +315 -0
  50. nc1709/models/manager.py +438 -0
  51. nc1709/models/registry.py +497 -0
  52. nc1709/performance/__init__.py +343 -0
  53. nc1709/performance/cache.py +705 -0
  54. nc1709/performance/pipeline.py +611 -0
  55. nc1709/performance/tiering.py +543 -0
  56. nc1709/plan_mode.py +362 -0
  57. nc1709/plugins/__init__.py +17 -0
  58. nc1709/plugins/agents/__init__.py +18 -0
  59. nc1709/plugins/agents/django_agent.py +912 -0
  60. nc1709/plugins/agents/docker_agent.py +623 -0
  61. nc1709/plugins/agents/fastapi_agent.py +887 -0
  62. nc1709/plugins/agents/git_agent.py +731 -0
  63. nc1709/plugins/agents/nextjs_agent.py +867 -0
  64. nc1709/plugins/base.py +359 -0
  65. nc1709/plugins/manager.py +411 -0
  66. nc1709/plugins/registry.py +337 -0
  67. nc1709/progress.py +443 -0
  68. nc1709/prompts/__init__.py +22 -0
  69. nc1709/prompts/agent_system.py +180 -0
  70. nc1709/prompts/task_prompts.py +340 -0
  71. nc1709/prompts/unified_prompt.py +133 -0
  72. nc1709/reasoning_engine.py +541 -0
  73. nc1709/remote_client.py +266 -0
  74. nc1709/shell_completions.py +349 -0
  75. nc1709/slash_commands.py +649 -0
  76. nc1709/task_classifier.py +408 -0
  77. nc1709/version_check.py +177 -0
  78. nc1709/web/__init__.py +8 -0
  79. nc1709/web/server.py +950 -0
  80. nc1709/web/templates/index.html +1127 -0
  81. nc1709-1.15.4.dist-info/METADATA +858 -0
  82. nc1709-1.15.4.dist-info/RECORD +86 -0
  83. nc1709-1.15.4.dist-info/WHEEL +5 -0
  84. nc1709-1.15.4.dist-info/entry_points.txt +2 -0
  85. nc1709-1.15.4.dist-info/licenses/LICENSE +9 -0
  86. nc1709-1.15.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,541 @@
1
+ """
2
+ Multi-Step Reasoning Engine
3
+ Handles complex tasks by breaking them down into manageable steps
4
+ """
5
+ import json
6
+ import re
7
+ from typing import List, Dict, Any, Optional, Tuple
8
+ from enum import Enum
9
+
10
+ from .llm_adapter import LLMAdapter, TaskType
11
+ from .file_controller import FileController
12
+ from .executor import CommandExecutor
13
+ from .cli_ui import (
14
+ ActionSpinner, TaskDisplay, Color, Icons,
15
+ status, thinking, success, error, warning, info,
16
+ action_spinner, StreamingOutput
17
+ )
18
+
19
+
20
+ class StepType(Enum):
21
+ """Types of steps in a plan"""
22
+ ANALYZE = "analyze"
23
+ READ_FILE = "read_file"
24
+ WRITE_FILE = "write_file"
25
+ EXECUTE_COMMAND = "execute_command"
26
+ REASON = "reason"
27
+ VERIFY = "verify"
28
+
29
+
30
+ class Step:
31
+ """Represents a single step in a plan"""
32
+
33
+ def __init__(
34
+ self,
35
+ step_type: StepType,
36
+ description: str,
37
+ details: Dict[str, Any],
38
+ dependencies: Optional[List[int]] = None
39
+ ):
40
+ """Initialize a step
41
+
42
+ Args:
43
+ step_type: Type of step
44
+ description: Human-readable description
45
+ details: Step-specific details
46
+ dependencies: List of step indices this depends on
47
+ """
48
+ self.step_type = step_type
49
+ self.description = description
50
+ self.details = details
51
+ self.dependencies = dependencies or []
52
+ self.status = "pending" # pending, running, completed, failed
53
+ self.result: Optional[Any] = None
54
+ self.error: Optional[str] = None
55
+
56
+
57
+ class ReasoningEngine:
58
+ """Engine for multi-step reasoning and task execution"""
59
+
60
+ # System prompt that defines NC1709's identity and capabilities
61
+ SYSTEM_PROMPT = """You are NC1709, a local-first AI developer assistant running entirely on the user's machine.
62
+
63
+ Your capabilities include:
64
+ - Reading and writing files in the project
65
+ - Executing shell commands safely
66
+ - Searching code semantically
67
+ - Git operations (status, diff, commit, branch, etc.)
68
+ - Docker operations (containers, images, compose)
69
+ - Scaffolding projects (FastAPI, Next.js, Django)
70
+ - MCP (Model Context Protocol) tool integration
71
+
72
+ Key principles:
73
+ - You run 100% locally - no data leaves the user's machine
74
+ - Be helpful and concise in your responses
75
+ - When asked to do something, explain what you'll do briefly, then do it
76
+ - For code tasks, provide working code with explanations
77
+ - Ask clarifying questions if the request is ambiguous
78
+
79
+ Current context:
80
+ - You are responding through the NC1709 dashboard or CLI
81
+ - The user's project is in their current working directory
82
+ - You have access to their codebase and can help with development tasks
83
+
84
+ Respond helpfully and directly to the user's request."""
85
+
86
+ def __init__(self):
87
+ """Initialize the reasoning engine"""
88
+ self.llm = LLMAdapter()
89
+ self.file_controller = FileController()
90
+ self.executor = CommandExecutor()
91
+ self.current_plan: Optional[List[Step]] = None
92
+ self.execution_context: Dict[str, Any] = {}
93
+
94
+ def process_request(self, user_request: str, context: Optional[Dict[str, Any]] = None) -> str:
95
+ """Process a user request with multi-step reasoning
96
+
97
+ Args:
98
+ user_request: User's request
99
+ context: Additional context (e.g., current directory, files)
100
+
101
+ Returns:
102
+ Final result or response
103
+ """
104
+ # Determine if this needs multi-step reasoning
105
+ if self._needs_multi_step(user_request):
106
+ return self._execute_multi_step(user_request, context)
107
+ else:
108
+ # Simple single-step request
109
+ return self._execute_single_step(user_request)
110
+
111
+ def _needs_multi_step(self, request: str) -> bool:
112
+ """Determine if a request needs multi-step reasoning
113
+
114
+ Args:
115
+ request: User's request
116
+
117
+ Returns:
118
+ True if multi-step reasoning is needed
119
+ """
120
+ # Keywords that indicate complex, multi-step tasks
121
+ multi_step_keywords = [
122
+ "create a project", "build a", "set up", "implement",
123
+ "refactor", "migrate", "convert", "analyze and",
124
+ "first.*then", "step by step", "plan"
125
+ ]
126
+
127
+ request_lower = request.lower()
128
+ return any(re.search(keyword, request_lower) for keyword in multi_step_keywords)
129
+
130
+ def _execute_single_step(self, request: str) -> str:
131
+ """Execute a simple, single-step request
132
+
133
+ Args:
134
+ request: User's request
135
+
136
+ Returns:
137
+ Response
138
+ """
139
+ # Use spinner for visual feedback during LLM call
140
+ spinner = ActionSpinner("Processing your request")
141
+ spinner.start()
142
+
143
+ try:
144
+ spinner.update("Generating response")
145
+ response = self.llm.complete(request, system_prompt=self.SYSTEM_PROMPT)
146
+ spinner.success("Response generated")
147
+ return response
148
+ except Exception as e:
149
+ spinner.failure(f"Error: {e}")
150
+ raise
151
+
152
+ def _execute_multi_step(self, request: str, context: Optional[Dict[str, Any]] = None) -> str:
153
+ """Execute a complex, multi-step request
154
+
155
+ Args:
156
+ request: User's request
157
+ context: Additional context
158
+
159
+ Returns:
160
+ Final result
161
+ """
162
+ # Use spinner for plan creation
163
+ spinner = ActionSpinner("Analyzing request")
164
+ spinner.start()
165
+
166
+ try:
167
+ spinner.update("Creating execution plan")
168
+ plan = self._create_plan(request, context)
169
+
170
+ if not plan:
171
+ spinner.failure("Could not create plan")
172
+ return "I couldn't create a plan for this request. Please try rephrasing."
173
+
174
+ self.current_plan = plan
175
+ spinner.success(f"Created plan with {len(plan)} steps")
176
+
177
+ except Exception as e:
178
+ spinner.failure(f"Planning error: {e}")
179
+ return f"Error creating plan: {e}"
180
+
181
+ # Show plan to user
182
+ self._display_plan(plan)
183
+
184
+ # Ask for confirmation
185
+ response = input(f"\n{Color.BOLD}Proceed with this plan?{Color.RESET} [y/N]: ").strip().lower()
186
+ if response != 'y':
187
+ warning("Plan cancelled by user")
188
+ return "Plan cancelled by user."
189
+
190
+ # Execute plan with TaskDisplay
191
+ task = TaskDisplay(f"Executing {len(plan)}-step plan")
192
+ task.start()
193
+ result = self._execute_plan_with_display(plan, task)
194
+ task.finish()
195
+
196
+ return result
197
+
198
+ def _create_plan(self, request: str, context: Optional[Dict[str, Any]] = None) -> List[Step]:
199
+ """Create an execution plan for a request
200
+
201
+ Args:
202
+ request: User's request
203
+ context: Additional context
204
+
205
+ Returns:
206
+ List of steps
207
+ """
208
+ # Use reasoning model to create a plan
209
+ planning_prompt = f"""You are a task planning assistant. Break down the following request into clear, actionable steps.
210
+
211
+ User Request: {request}
212
+
213
+ Context: {json.dumps(context or {}, indent=2)}
214
+
215
+ Create a step-by-step plan. For each step, specify:
216
+ 1. What type of action it is (analyze, read_file, write_file, execute_command, reason, verify)
217
+ 2. A clear description
218
+ 3. Specific details needed to execute the step
219
+
220
+ Format your response as a JSON array of steps:
221
+ [
222
+ {{
223
+ "type": "analyze",
224
+ "description": "Understand the requirements",
225
+ "details": {{"focus": "key requirements"}}
226
+ }},
227
+ {{
228
+ "type": "write_file",
229
+ "description": "Create main.py",
230
+ "details": {{"file_path": "main.py", "content_description": "Python script with..."}}
231
+ }}
232
+ ]
233
+
234
+ Provide ONLY the JSON array, no additional text."""
235
+
236
+ try:
237
+ response = self.llm.complete(planning_prompt, task_type=TaskType.REASONING)
238
+
239
+ # Try to extract and parse JSON from the response
240
+ plan_data = self._extract_json_from_response(response)
241
+
242
+ if not plan_data:
243
+ print("⚠️ Could not extract valid JSON plan from LLM response")
244
+ return []
245
+
246
+ # Convert to Step objects
247
+ steps = []
248
+ for step_data in plan_data:
249
+ try:
250
+ step_type_str = step_data.get("type", "reason")
251
+ # Handle case where type might not be a valid StepType
252
+ try:
253
+ step_type = StepType(step_type_str)
254
+ except ValueError:
255
+ step_type = StepType.REASON # Default to REASON for unknown types
256
+
257
+ description = step_data.get("description", "Unknown step")
258
+ details = step_data.get("details", {})
259
+
260
+ # Ensure details is a dict
261
+ if not isinstance(details, dict):
262
+ details = {"value": details}
263
+
264
+ steps.append(Step(step_type, description, details))
265
+ except Exception as step_error:
266
+ print(f"⚠️ Skipping malformed step: {step_error}")
267
+ continue
268
+
269
+ return steps
270
+
271
+ except Exception as e:
272
+ print(f"⚠️ Error creating plan: {e}")
273
+ return []
274
+
275
+ def _extract_json_from_response(self, response: str) -> Optional[List[Dict]]:
276
+ """Extract JSON array from LLM response with multiple fallback strategies
277
+
278
+ Args:
279
+ response: Raw LLM response text
280
+
281
+ Returns:
282
+ Parsed JSON list or None if extraction fails
283
+ """
284
+ # Strategy 1: Try to parse the entire response as JSON
285
+ try:
286
+ data = json.loads(response.strip())
287
+ if isinstance(data, list):
288
+ return data
289
+ except json.JSONDecodeError:
290
+ pass
291
+
292
+ # Strategy 2: Find JSON array using regex (handles markdown code blocks)
293
+ patterns = [
294
+ r'```json\s*(\[.*?\])\s*```', # JSON in code block
295
+ r'```\s*(\[.*?\])\s*```', # Array in generic code block
296
+ r'(\[\s*\{.*?\}\s*\])', # Bare JSON array
297
+ ]
298
+
299
+ for pattern in patterns:
300
+ match = re.search(pattern, response, re.DOTALL)
301
+ if match:
302
+ try:
303
+ return json.loads(match.group(1))
304
+ except json.JSONDecodeError:
305
+ continue
306
+
307
+ # Strategy 3: Find the first '[' and last ']' and try to parse
308
+ start = response.find('[')
309
+ end = response.rfind(']')
310
+ if start != -1 and end != -1 and end > start:
311
+ try:
312
+ return json.loads(response[start:end + 1])
313
+ except json.JSONDecodeError:
314
+ pass
315
+
316
+ # Strategy 4: Try to fix common JSON issues
317
+ try:
318
+ # Remove trailing commas before ] or }
319
+ cleaned = re.sub(r',(\s*[\]\}])', r'\1', response)
320
+ start = cleaned.find('[')
321
+ end = cleaned.rfind(']')
322
+ if start != -1 and end != -1:
323
+ return json.loads(cleaned[start:end + 1])
324
+ except json.JSONDecodeError:
325
+ pass
326
+
327
+ return None
328
+
329
+ def _display_plan(self, plan: List[Step]) -> None:
330
+ """Display the execution plan to the user
331
+
332
+ Args:
333
+ plan: List of steps
334
+ """
335
+ print(f"\n{Color.DIM}{'─'*60}{Color.RESET}")
336
+ print(f"{Color.BOLD}EXECUTION PLAN{Color.RESET}")
337
+ print(f"{Color.DIM}{'─'*60}{Color.RESET}")
338
+
339
+ for i, step in enumerate(plan, 1):
340
+ icon = self._get_step_icon(step.step_type)
341
+ print(f"{Color.CYAN}{i}.{Color.RESET} {icon} {step.description}")
342
+ if step.details:
343
+ for key, value in step.details.items():
344
+ if len(str(value)) < 100:
345
+ print(f" {Color.DIM}{Icons.TREE_BRANCH} {key}: {value}{Color.RESET}")
346
+
347
+ print(f"{Color.DIM}{'─'*60}{Color.RESET}")
348
+
349
+ def _get_step_icon(self, step_type: StepType) -> str:
350
+ """Get an icon for a step type
351
+
352
+ Args:
353
+ step_type: Type of step
354
+
355
+ Returns:
356
+ Icon string
357
+ """
358
+ icons = {
359
+ StepType.ANALYZE: "🔍",
360
+ StepType.READ_FILE: "📖",
361
+ StepType.WRITE_FILE: "✏️",
362
+ StepType.EXECUTE_COMMAND: "💻",
363
+ StepType.REASON: "🧠",
364
+ StepType.VERIFY: "✅"
365
+ }
366
+ return icons.get(step_type, "•")
367
+
368
+ def _execute_plan_with_display(self, plan: List[Step], task: TaskDisplay) -> str:
369
+ """Execute a plan with visual feedback using TaskDisplay
370
+
371
+ Args:
372
+ plan: List of steps to execute
373
+ task: TaskDisplay instance for visual feedback
374
+
375
+ Returns:
376
+ Final result
377
+ """
378
+ results = []
379
+
380
+ for i, step in enumerate(plan, 1):
381
+ step.status = "running"
382
+ task.step(f"Step {i}/{len(plan)}: {step.description}")
383
+
384
+ try:
385
+ # Add action based on step type
386
+ action_name = step.step_type.value.replace("_", " ").title()
387
+ target = None
388
+
389
+ if step.step_type == StepType.READ_FILE:
390
+ target = step.details.get("file_path", "file")
391
+ action_name = "Read"
392
+ elif step.step_type == StepType.WRITE_FILE:
393
+ target = step.details.get("file_path", "file")
394
+ action_name = "Write"
395
+ elif step.step_type == StepType.EXECUTE_COMMAND:
396
+ target = step.details.get("command", "command")[:30]
397
+ action_name = "Execute"
398
+ elif step.step_type in (StepType.ANALYZE, StepType.REASON):
399
+ action_name = "Analyze"
400
+ target = step.description[:30]
401
+
402
+ action_idx = task.action(action_name, target) if target else -1
403
+
404
+ result = self._execute_step(step)
405
+ step.result = result
406
+ step.status = "completed"
407
+ results.append(result)
408
+
409
+ if action_idx >= 0:
410
+ task.complete_action(action_idx)
411
+ task.complete_step(f"Step {i} complete")
412
+
413
+ except Exception as e:
414
+ step.error = str(e)
415
+ step.status = "failed"
416
+
417
+ if action_idx >= 0:
418
+ task.fail_action(action_idx, str(e))
419
+ task.fail_step(f"Step {i} failed: {e}")
420
+
421
+ # Ask user if they want to continue
422
+ response = input(f"\n{Color.YELLOW}Continue with remaining steps?{Color.RESET} [y/N]: ").strip().lower()
423
+ if response != 'y':
424
+ break
425
+
426
+ # Generate final summary
427
+ return self._generate_summary(plan, results)
428
+
429
+ def _execute_plan(self, plan: List[Step]) -> str:
430
+ """Execute a plan (legacy method for compatibility)
431
+
432
+ Args:
433
+ plan: List of steps to execute
434
+
435
+ Returns:
436
+ Final result
437
+ """
438
+ # Use TaskDisplay for visual feedback
439
+ task = TaskDisplay(f"Executing {len(plan)}-step plan")
440
+ task.start()
441
+ result = self._execute_plan_with_display(plan, task)
442
+ task.finish()
443
+ return result
444
+
445
+ def _execute_step(self, step: Step) -> Any:
446
+ """Execute a single step
447
+
448
+ Args:
449
+ step: Step to execute
450
+
451
+ Returns:
452
+ Step result
453
+ """
454
+ if step.step_type == StepType.ANALYZE:
455
+ # Use LLM to analyze
456
+ prompt = f"Analyze: {step.description}\nDetails: {json.dumps(step.details)}"
457
+ return self.llm.complete(prompt, task_type=TaskType.REASONING)
458
+
459
+ elif step.step_type == StepType.READ_FILE:
460
+ file_path = step.details.get("file_path")
461
+ if not file_path:
462
+ raise ValueError("No file_path specified for read_file step")
463
+ content = self.file_controller.read_file(file_path)
464
+ print(f"📖 Read {len(content)} characters from {file_path}")
465
+ return content
466
+
467
+ elif step.step_type == StepType.WRITE_FILE:
468
+ file_path = step.details.get("file_path")
469
+ content = step.details.get("content")
470
+
471
+ if not file_path:
472
+ raise ValueError("No file_path specified for write_file step")
473
+
474
+ # If content not provided, generate it with LLM
475
+ if not content:
476
+ content_description = step.details.get("content_description", "")
477
+ prompt = f"Generate content for {file_path}: {content_description}"
478
+ content = self.llm.complete(prompt, task_type=TaskType.CODING)
479
+
480
+ success = self.file_controller.write_file(file_path, content)
481
+ if not success:
482
+ raise RuntimeError(f"Failed to write file: {file_path}")
483
+
484
+ return f"File written: {file_path}"
485
+
486
+ elif step.step_type == StepType.EXECUTE_COMMAND:
487
+ command = step.details.get("command")
488
+ if not command:
489
+ raise ValueError("No command specified for execute_command step")
490
+
491
+ return_code, stdout, stderr = self.executor.execute(command, confirm=False)
492
+
493
+ if return_code != 0:
494
+ raise RuntimeError(f"Command failed: {stderr}")
495
+
496
+ return stdout
497
+
498
+ elif step.step_type == StepType.REASON:
499
+ prompt = f"{step.description}\nContext: {json.dumps(self.execution_context)}"
500
+ return self.llm.complete(prompt, task_type=TaskType.REASONING)
501
+
502
+ elif step.step_type == StepType.VERIFY:
503
+ # Verification step
504
+ verification_prompt = f"Verify: {step.description}\nDetails: {json.dumps(step.details)}"
505
+ return self.llm.complete(verification_prompt, task_type=TaskType.REASONING)
506
+
507
+ else:
508
+ raise ValueError(f"Unknown step type: {step.step_type}")
509
+
510
+ def _generate_summary(self, plan: List[Step], results: List[Any]) -> str:
511
+ """Generate a summary of plan execution
512
+
513
+ Args:
514
+ plan: Executed plan
515
+ results: Results from each step
516
+
517
+ Returns:
518
+ Summary text
519
+ """
520
+ completed = sum(1 for step in plan if step.status == "completed")
521
+ failed = sum(1 for step in plan if step.status == "failed")
522
+
523
+ summary = f"\n{Color.DIM}{'─'*60}{Color.RESET}\n"
524
+ summary += f"{Color.BOLD}EXECUTION SUMMARY{Color.RESET}\n"
525
+ summary += f"{Color.DIM}{'─'*60}{Color.RESET}\n"
526
+ summary += f"Total steps: {len(plan)}\n"
527
+ summary += f"{Color.GREEN}Completed: {completed}{Color.RESET}\n"
528
+ if failed > 0:
529
+ summary += f"{Color.RED}Failed: {failed}{Color.RESET}\n"
530
+ summary += f"{Color.DIM}{'─'*60}{Color.RESET}\n"
531
+
532
+ if failed == 0:
533
+ summary += f"{Color.GREEN}{Icons.SUCCESS} All steps completed successfully!{Color.RESET}\n"
534
+ else:
535
+ summary += f"{Color.YELLOW}{Icons.WARNING} {failed} step(s) failed{Color.RESET}\n"
536
+ for i, step in enumerate(plan, 1):
537
+ if step.status == "failed":
538
+ summary += f" {Color.RED}{Icons.FAILURE}{Color.RESET} Step {i}: {step.description}\n"
539
+ summary += f" {Color.DIM}{Icons.TREE_BRANCH} {step.error}{Color.RESET}\n"
540
+
541
+ return summary