amd-gaia 0.15.0__py3-none-any.whl → 0.15.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (181) hide show
  1. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/METADATA +223 -223
  2. amd_gaia-0.15.1.dist-info/RECORD +178 -0
  3. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/entry_points.txt +1 -0
  4. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/licenses/LICENSE.md +20 -20
  5. gaia/__init__.py +29 -29
  6. gaia/agents/__init__.py +19 -19
  7. gaia/agents/base/__init__.py +9 -9
  8. gaia/agents/base/agent.py +2177 -2177
  9. gaia/agents/base/api_agent.py +120 -120
  10. gaia/agents/base/console.py +1841 -1841
  11. gaia/agents/base/errors.py +237 -237
  12. gaia/agents/base/mcp_agent.py +86 -86
  13. gaia/agents/base/tools.py +83 -83
  14. gaia/agents/blender/agent.py +556 -556
  15. gaia/agents/blender/agent_simple.py +133 -135
  16. gaia/agents/blender/app.py +211 -211
  17. gaia/agents/blender/app_simple.py +41 -41
  18. gaia/agents/blender/core/__init__.py +16 -16
  19. gaia/agents/blender/core/materials.py +506 -506
  20. gaia/agents/blender/core/objects.py +316 -316
  21. gaia/agents/blender/core/rendering.py +225 -225
  22. gaia/agents/blender/core/scene.py +220 -220
  23. gaia/agents/blender/core/view.py +146 -146
  24. gaia/agents/chat/__init__.py +9 -9
  25. gaia/agents/chat/agent.py +835 -835
  26. gaia/agents/chat/app.py +1058 -1058
  27. gaia/agents/chat/session.py +508 -508
  28. gaia/agents/chat/tools/__init__.py +15 -15
  29. gaia/agents/chat/tools/file_tools.py +96 -96
  30. gaia/agents/chat/tools/rag_tools.py +1729 -1729
  31. gaia/agents/chat/tools/shell_tools.py +436 -436
  32. gaia/agents/code/__init__.py +7 -7
  33. gaia/agents/code/agent.py +549 -549
  34. gaia/agents/code/cli.py +377 -0
  35. gaia/agents/code/models.py +135 -135
  36. gaia/agents/code/orchestration/__init__.py +24 -24
  37. gaia/agents/code/orchestration/checklist_executor.py +1763 -1763
  38. gaia/agents/code/orchestration/checklist_generator.py +713 -713
  39. gaia/agents/code/orchestration/factories/__init__.py +9 -9
  40. gaia/agents/code/orchestration/factories/base.py +63 -63
  41. gaia/agents/code/orchestration/factories/nextjs_factory.py +118 -118
  42. gaia/agents/code/orchestration/factories/python_factory.py +106 -106
  43. gaia/agents/code/orchestration/orchestrator.py +841 -841
  44. gaia/agents/code/orchestration/project_analyzer.py +391 -391
  45. gaia/agents/code/orchestration/steps/__init__.py +67 -67
  46. gaia/agents/code/orchestration/steps/base.py +188 -188
  47. gaia/agents/code/orchestration/steps/error_handler.py +314 -314
  48. gaia/agents/code/orchestration/steps/nextjs.py +828 -828
  49. gaia/agents/code/orchestration/steps/python.py +307 -307
  50. gaia/agents/code/orchestration/template_catalog.py +469 -469
  51. gaia/agents/code/orchestration/workflows/__init__.py +14 -14
  52. gaia/agents/code/orchestration/workflows/base.py +80 -80
  53. gaia/agents/code/orchestration/workflows/nextjs.py +186 -186
  54. gaia/agents/code/orchestration/workflows/python.py +94 -94
  55. gaia/agents/code/prompts/__init__.py +11 -11
  56. gaia/agents/code/prompts/base_prompt.py +77 -77
  57. gaia/agents/code/prompts/code_patterns.py +2036 -2036
  58. gaia/agents/code/prompts/nextjs_prompt.py +40 -40
  59. gaia/agents/code/prompts/python_prompt.py +109 -109
  60. gaia/agents/code/schema_inference.py +365 -365
  61. gaia/agents/code/system_prompt.py +41 -41
  62. gaia/agents/code/tools/__init__.py +42 -42
  63. gaia/agents/code/tools/cli_tools.py +1138 -1138
  64. gaia/agents/code/tools/code_formatting.py +319 -319
  65. gaia/agents/code/tools/code_tools.py +769 -769
  66. gaia/agents/code/tools/error_fixing.py +1347 -1347
  67. gaia/agents/code/tools/external_tools.py +180 -180
  68. gaia/agents/code/tools/file_io.py +845 -845
  69. gaia/agents/code/tools/prisma_tools.py +190 -190
  70. gaia/agents/code/tools/project_management.py +1016 -1016
  71. gaia/agents/code/tools/testing.py +321 -321
  72. gaia/agents/code/tools/typescript_tools.py +122 -122
  73. gaia/agents/code/tools/validation_parsing.py +461 -461
  74. gaia/agents/code/tools/validation_tools.py +806 -806
  75. gaia/agents/code/tools/web_dev_tools.py +1758 -1758
  76. gaia/agents/code/validators/__init__.py +16 -16
  77. gaia/agents/code/validators/antipattern_checker.py +241 -241
  78. gaia/agents/code/validators/ast_analyzer.py +197 -197
  79. gaia/agents/code/validators/requirements_validator.py +145 -145
  80. gaia/agents/code/validators/syntax_validator.py +171 -171
  81. gaia/agents/docker/__init__.py +7 -7
  82. gaia/agents/docker/agent.py +642 -642
  83. gaia/agents/emr/__init__.py +8 -8
  84. gaia/agents/emr/agent.py +1506 -1506
  85. gaia/agents/emr/cli.py +1322 -1322
  86. gaia/agents/emr/constants.py +475 -475
  87. gaia/agents/emr/dashboard/__init__.py +4 -4
  88. gaia/agents/emr/dashboard/server.py +1974 -1974
  89. gaia/agents/jira/__init__.py +11 -11
  90. gaia/agents/jira/agent.py +894 -894
  91. gaia/agents/jira/jql_templates.py +299 -299
  92. gaia/agents/routing/__init__.py +7 -7
  93. gaia/agents/routing/agent.py +567 -570
  94. gaia/agents/routing/system_prompt.py +75 -75
  95. gaia/agents/summarize/__init__.py +11 -0
  96. gaia/agents/summarize/agent.py +885 -0
  97. gaia/agents/summarize/prompts.py +129 -0
  98. gaia/api/__init__.py +23 -23
  99. gaia/api/agent_registry.py +238 -238
  100. gaia/api/app.py +305 -305
  101. gaia/api/openai_server.py +575 -575
  102. gaia/api/schemas.py +186 -186
  103. gaia/api/sse_handler.py +373 -373
  104. gaia/apps/__init__.py +4 -4
  105. gaia/apps/llm/__init__.py +6 -6
  106. gaia/apps/llm/app.py +173 -169
  107. gaia/apps/summarize/app.py +116 -633
  108. gaia/apps/summarize/html_viewer.py +133 -133
  109. gaia/apps/summarize/pdf_formatter.py +284 -284
  110. gaia/audio/__init__.py +2 -2
  111. gaia/audio/audio_client.py +439 -439
  112. gaia/audio/audio_recorder.py +269 -269
  113. gaia/audio/kokoro_tts.py +599 -599
  114. gaia/audio/whisper_asr.py +432 -432
  115. gaia/chat/__init__.py +16 -16
  116. gaia/chat/app.py +430 -430
  117. gaia/chat/prompts.py +522 -522
  118. gaia/chat/sdk.py +1228 -1225
  119. gaia/cli.py +5481 -5632
  120. gaia/database/__init__.py +10 -10
  121. gaia/database/agent.py +176 -176
  122. gaia/database/mixin.py +290 -290
  123. gaia/database/testing.py +64 -64
  124. gaia/eval/batch_experiment.py +2332 -2332
  125. gaia/eval/claude.py +542 -542
  126. gaia/eval/config.py +37 -37
  127. gaia/eval/email_generator.py +512 -512
  128. gaia/eval/eval.py +3179 -3179
  129. gaia/eval/groundtruth.py +1130 -1130
  130. gaia/eval/transcript_generator.py +582 -582
  131. gaia/eval/webapp/README.md +167 -167
  132. gaia/eval/webapp/package-lock.json +875 -875
  133. gaia/eval/webapp/package.json +20 -20
  134. gaia/eval/webapp/public/app.js +3402 -3402
  135. gaia/eval/webapp/public/index.html +87 -87
  136. gaia/eval/webapp/public/styles.css +3661 -3661
  137. gaia/eval/webapp/server.js +415 -415
  138. gaia/eval/webapp/test-setup.js +72 -72
  139. gaia/llm/__init__.py +9 -2
  140. gaia/llm/base_client.py +60 -0
  141. gaia/llm/exceptions.py +12 -0
  142. gaia/llm/factory.py +70 -0
  143. gaia/llm/lemonade_client.py +3236 -3221
  144. gaia/llm/lemonade_manager.py +294 -294
  145. gaia/llm/providers/__init__.py +9 -0
  146. gaia/llm/providers/claude.py +108 -0
  147. gaia/llm/providers/lemonade.py +120 -0
  148. gaia/llm/providers/openai_provider.py +79 -0
  149. gaia/llm/vlm_client.py +382 -382
  150. gaia/logger.py +189 -189
  151. gaia/mcp/agent_mcp_server.py +245 -245
  152. gaia/mcp/blender_mcp_client.py +138 -138
  153. gaia/mcp/blender_mcp_server.py +648 -648
  154. gaia/mcp/context7_cache.py +332 -332
  155. gaia/mcp/external_services.py +518 -518
  156. gaia/mcp/mcp_bridge.py +811 -550
  157. gaia/mcp/servers/__init__.py +6 -6
  158. gaia/mcp/servers/docker_mcp.py +83 -83
  159. gaia/perf_analysis.py +361 -0
  160. gaia/rag/__init__.py +10 -10
  161. gaia/rag/app.py +293 -293
  162. gaia/rag/demo.py +304 -304
  163. gaia/rag/pdf_utils.py +235 -235
  164. gaia/rag/sdk.py +2194 -2194
  165. gaia/security.py +163 -163
  166. gaia/talk/app.py +289 -289
  167. gaia/talk/sdk.py +538 -538
  168. gaia/testing/__init__.py +87 -87
  169. gaia/testing/assertions.py +330 -330
  170. gaia/testing/fixtures.py +333 -333
  171. gaia/testing/mocks.py +493 -493
  172. gaia/util.py +46 -46
  173. gaia/utils/__init__.py +33 -33
  174. gaia/utils/file_watcher.py +675 -675
  175. gaia/utils/parsing.py +223 -223
  176. gaia/version.py +100 -100
  177. amd_gaia-0.15.0.dist-info/RECORD +0 -168
  178. gaia/agents/code/app.py +0 -266
  179. gaia/llm/llm_client.py +0 -723
  180. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/WHEEL +0 -0
  181. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/top_level.txt +0 -0
gaia/agents/code/agent.py CHANGED
@@ -1,549 +1,549 @@
1
- #!/usr/bin/env python
2
- # Copyright(C) 2024-2025 Advanced Micro Devices, Inc. All rights reserved.
3
- # SPDX-License-Identifier: MIT
4
- """
5
- Code Agent for GAIA.
6
-
7
- This agent provides intelligent code operations and assistance, focusing on
8
- comprehensive Python support with capabilities for code understanding, generation,
9
- modification, and validation.
10
-
11
- """
12
-
13
- import json
14
- import logging
15
- import os
16
- import time
17
- from pathlib import Path
18
- from typing import Any, Callable, Dict, Optional
19
-
20
- from gaia.agents.base.agent import Agent
21
- from gaia.agents.base.api_agent import ApiAgent
22
- from gaia.agents.base.console import AgentConsole, SilentConsole
23
- from gaia.agents.base.tools import _TOOL_REGISTRY
24
- from gaia.security import PathValidator
25
-
26
- from .orchestration import (
27
- ExecutionResult,
28
- Orchestrator,
29
- UserContext,
30
- )
31
- from .system_prompt import get_system_prompt
32
- from .tools import (
33
- CodeFormattingMixin,
34
- CodeToolsMixin,
35
- ErrorFixingMixin,
36
- ExternalToolsMixin,
37
- FileIOToolsMixin,
38
- ProjectManagementMixin,
39
- TestingMixin,
40
- TypeScriptToolsMixin,
41
- ValidationAndParsingMixin,
42
- ValidationToolsMixin,
43
- WebToolsMixin,
44
- )
45
-
46
- # Import CLI tools
47
- from .tools.cli_tools import CLIToolsMixin
48
-
49
- # Import Prisma tools
50
- from .tools.prisma_tools import PrismaToolsMixin
51
-
52
- # Import refactored modules
53
- from .validators import (
54
- AntipatternChecker,
55
- ASTAnalyzer,
56
- RequirementsValidator,
57
- SyntaxValidator,
58
- )
59
-
60
- logger = logging.getLogger(__name__)
61
-
62
-
63
- class CodeAgent(
64
- ApiAgent, # API support for VSCode integration
65
- Agent,
66
- CodeToolsMixin, # Code generation, analysis, helpers
67
- ValidationAndParsingMixin, # Validation, AST parsing, error fixing helpers
68
- FileIOToolsMixin, # File I/O operations
69
- CodeFormattingMixin, # Code formatting (Black, etc.)
70
- ProjectManagementMixin, # Project/workspace management
71
- TestingMixin, # Testing tools
72
- ErrorFixingMixin, # Error fixing tools
73
- TypeScriptToolsMixin, # TypeScript runtime tools (npm, template fetching, validation)
74
- WebToolsMixin, # Next.js full-stack web development tools (replaces frontend/backend)
75
- PrismaToolsMixin, # Prisma database setup and management
76
- CLIToolsMixin, # Universal CLI execution with process management
77
- ExternalToolsMixin, # Context7 and Perplexity integration for documentation and web search
78
- ValidationToolsMixin, # Validation and testing tools
79
- ):
80
- """
81
- Intelligent autonomous code agent for comprehensive Python development workflows.
82
-
83
- This agent autonomously handles complex coding tasks including:
84
- - Workflow planning from requirements
85
- - Code generation with best practices
86
- - Automatic linting and formatting
87
- - Error detection and correction
88
- - Code execution and verification
89
-
90
- Usage:
91
- agent = CodeAgent()
92
- result = agent.process_query("Create a calculator app with error handling")
93
- # Agent will plan, generate, lint, fix, test, and verify automatically
94
- """
95
-
96
- def __init__(self, language="python", project_type="script", **kwargs):
97
- """Initialize the Code agent.
98
-
99
- Args:
100
- language: Programming language ('python' or 'typescript', default: 'python')
101
- project_type: Project type ('frontend', 'backend', 'fullstack', or 'script', default: 'script')
102
- **kwargs: Agent initialization parameters:
103
- - max_steps: Maximum conversation steps (default: 100)
104
- - model_id: LLM model to use (default: Qwen3-Coder-30B-A3B-Instruct-GGUF)
105
- - silent_mode: Suppress console output (default: False)
106
- - debug: Enable debug logging (default: False)
107
- - show_prompts: Display prompts sent to LLM (default: False)
108
- - streaming: Enable real-time LLM response streaming (default: False)
109
- """
110
- # Store language and project type for prompt selection
111
- self.language = language
112
- self.project_type = project_type
113
-
114
- # Default to more steps for complex workflows
115
- if "max_steps" not in kwargs:
116
- kwargs["max_steps"] = 100 # Increased for complex project generation
117
- # Use the coding model for better code understanding
118
- if "model_id" not in kwargs:
119
- kwargs["model_id"] = "Qwen3-Coder-30B-A3B-Instruct-GGUF"
120
- # Disable streaming by default (shows duplicate output)
121
- # Users can enable with --streaming flag if desired
122
- if "streaming" not in kwargs:
123
- kwargs["streaming"] = False
124
- # Code agent needs more plan iterations for complex projects
125
- if "max_plan_iterations" not in kwargs:
126
- kwargs["max_plan_iterations"] = 100
127
-
128
- # Ensure .gaia cache directory exists for temporary files
129
- self.cache_dir = Path.home() / ".gaia" / "cache"
130
- self.cache_dir.mkdir(parents=True, exist_ok=True)
131
-
132
- # Security: Configure allowed paths for file operations
133
- self.allowed_paths = kwargs.pop("allowed_paths", None)
134
- self.path_validator = PathValidator(self.allowed_paths)
135
-
136
- # Workspace root for API mode (passed from VSCode)
137
- self.workspace_root = None
138
-
139
- # Progress callback for real-time updates
140
- self.progress_callback = None
141
-
142
- super().__init__(**kwargs)
143
-
144
- # Store the tools description for later prompt reconstruction
145
- # (base Agent's __init__ already appended tools to self.system_prompt)
146
- self.tools_description = self._format_tools_for_prompt()
147
-
148
- # Initialize validators and analyzers
149
- self.syntax_validator = SyntaxValidator()
150
- self.antipattern_checker = AntipatternChecker()
151
- self.ast_analyzer = ASTAnalyzer()
152
- self.requirements_validator = RequirementsValidator()
153
-
154
- # Log context size requirement if not using cloud LLMs
155
- if not kwargs.get("use_claude") and not kwargs.get("use_chatgpt"):
156
- logger.debug(
157
- "Code Agent requires large context size (32768 tokens). "
158
- "Ensure Lemonade server is started with: lemonade-server serve --ctx-size 32768"
159
- )
160
-
161
- def _get_system_prompt(self, _user_input: Optional[str] = None) -> str:
162
- """Generate the system prompt for the Code agent.
163
-
164
- Uses the language and project_type set during initialization to
165
- select the appropriate prompt (no runtime detection).
166
-
167
- Args:
168
- _user_input: Optional user query (not used for detection anymore)
169
-
170
- Returns:
171
- str: System prompt for code operations
172
- """
173
- return get_system_prompt(language=self.language, project_type=self.project_type)
174
-
175
- def _create_console(self):
176
- """Create console for Code agent output.
177
-
178
- Returns:
179
- AgentConsole or SilentConsole: Console instance
180
- """
181
- if self.silent_mode:
182
- return SilentConsole()
183
- return AgentConsole()
184
-
185
- def _register_tools(self) -> None:
186
- """Register Code-specific tools from mixins."""
187
- # Register all tools from consolidated mixins
188
- self.register_code_tools() # CodeToolsMixin
189
- self.register_file_io_tools() # FileIOToolsMixin
190
- self.register_code_formatting_tools() # CodeFormattingMixin
191
- self.register_project_management_tools() # ProjectManagementMixin
192
- self.register_testing_tools() # TestingMixin
193
- self.register_error_fixing_tools() # ErrorFixingMixin
194
- self.register_typescript_tools() # TypeScriptToolsMixin
195
- self.register_web_tools() # WebToolsMixin (Next.js unified approach)
196
- self.register_prisma_tools() # PrismaToolsMixin (Prisma database management)
197
- self.register_cli_tools() # CLIToolsMixin (Universal CLI execution)
198
- self.register_external_tools() # ExternalToolsMixin (Context7 & Perplexity)
199
- self.register_validation_tools() # ValidationToolsMixin (Testing and validation)
200
-
201
- def process_query(
202
- self, user_input: str, workspace_root=None, progress_callback=None, **kwargs
203
- ): # pylint: disable=arguments-differ,unused-argument
204
- """Process a query using the orchestrator workflow.
205
-
206
- Args:
207
- user_input: The user's query
208
- workspace_root: Optional workspace directory for file operations (from VSCode)
209
- progress_callback: Optional callback function for progress updates
210
- **kwargs: Additional arguments:
211
- - step_through: Enable step-through debugging (pause after each step)
212
-
213
- Returns:
214
- Execution result summary from the orchestrator
215
- """
216
- # Extract trace options
217
- trace = kwargs.get("trace", False)
218
- trace_filename = kwargs.get("filename")
219
-
220
- # Extract step_through from kwargs
221
- step_through = kwargs.get("step_through", False)
222
-
223
- del kwargs # Unused - accept for CLI compatibility
224
- # Store workspace root and change to it if provided
225
- if workspace_root:
226
- self.workspace_root = workspace_root
227
- self.path_validator.add_allowed_path(workspace_root)
228
- original_cwd = os.getcwd()
229
- os.chdir(workspace_root)
230
- logger.debug(f"Changed working directory to: {workspace_root}")
231
-
232
- # Store progress callback for tools to use
233
- if progress_callback:
234
- self.progress_callback = progress_callback
235
-
236
- # Update system prompt based on actual user input for language detection
237
- # Reconstruct full prompt with language-specific base + tools
238
- base_prompt = self._get_system_prompt(user_input)
239
-
240
- # AI-powered schema inference (Perplexity -> Local LLM -> fallback)
241
- # This dynamically determines what fields the app needs without hardcoding
242
- schema_context = ""
243
- inferred_entity = None
244
- inferred_fields = None
245
- try:
246
- from .schema_inference import format_schema_context, infer_schema
247
-
248
- # Use self.chat for local LLM fallback if Perplexity unavailable
249
- chat_sdk = getattr(self, "chat", None)
250
- schema_result = infer_schema(user_input, chat_sdk)
251
-
252
- if schema_result.get("entity"):
253
- schema_context = format_schema_context(schema_result)
254
- inferred_entity = schema_result["entity"]
255
- # Convert fields from list format [{"name": "x", "type": "y"}]
256
- # to dict format {"x": "y"} expected by tools
257
- raw_fields = schema_result.get("fields", [])
258
- if isinstance(raw_fields, list):
259
- inferred_fields = {
260
- f["name"]: f.get("type", "string")
261
- for f in raw_fields
262
- if isinstance(f, dict) and "name" in f
263
- }
264
- else:
265
- inferred_fields = raw_fields
266
- logger.debug(
267
- f"Schema inferred: {inferred_entity} "
268
- f"({len(inferred_fields)} fields) via {schema_result['source']}"
269
- )
270
- except Exception as e:
271
- logger.warning(f"Schema inference failed (continuing without): {e}")
272
-
273
- # Add current working directory context
274
- workspace_context = ""
275
- if workspace_root:
276
- workspace_context = (
277
- f"\n\nProject directory (dedicated): {os.getcwd()}\n"
278
- f"IMPORTANT: When creating new projects (e.g., npx create-next-app, cargo new, etc.), "
279
- f"use '.' as the project name to install directly in this directory, NOT in a subdirectory.\n"
280
- )
281
- else:
282
- workspace_context = f"\n\nCurrent working directory: {os.getcwd()}\n"
283
-
284
- self.system_prompt = (
285
- base_prompt
286
- + schema_context # AI-inferred schema (if available)
287
- + workspace_context
288
- + f"\n\n==== AVAILABLE TOOLS ====\n{self.tools_description}\n\n"
289
- )
290
-
291
- try:
292
- # Orchestrator is the ONLY workflow path
293
- # Handles correct step ordering for all project types
294
- execution_result = self._process_with_orchestrator(
295
- user_input,
296
- workspace_root,
297
- entity_name=inferred_entity,
298
- schema_fields=inferred_fields,
299
- step_through=step_through,
300
- )
301
-
302
- # Write trace to file if requested
303
- if trace:
304
- try:
305
- # Construct trace data
306
- trace_data = {
307
- "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
308
- "agent": "CodeAgent",
309
- "query": user_input,
310
- "workspace_root": workspace_root or os.getcwd(),
311
- "result": {
312
- "success": execution_result.success,
313
- "summary": execution_result.summary,
314
- "outputs": execution_result.outputs,
315
- "errors": execution_result.errors,
316
- },
317
- }
318
-
319
- if not trace_filename:
320
- timestamp = time.strftime("%Y%m%d_%H%M%S")
321
- trace_filename = f"agent_trace_{timestamp}.json"
322
-
323
- # Write to file
324
- with open(trace_filename, "w", encoding="utf-8") as f:
325
- json.dump(trace_data, f, indent=2)
326
-
327
- logger.info(f"Trace written to {trace_filename}")
328
- if not self.silent_mode:
329
- self.console.print(f"\nTrace written to {trace_filename}")
330
-
331
- except Exception as e:
332
- logger.error(f"Failed to write trace file: {e}")
333
-
334
- # Return dict matching app.py's expected format
335
- project_dir = execution_result.outputs.get(
336
- "project_dir", workspace_root or os.getcwd()
337
- )
338
- return {
339
- "status": "success" if execution_result.success else "error",
340
- "result": execution_result.summary,
341
- "phases_completed": execution_result.phases_completed,
342
- "phases_failed": execution_result.phases_failed,
343
- "steps_succeeded": execution_result.steps_succeeded,
344
- "steps_failed": execution_result.steps_failed,
345
- "errors": execution_result.errors,
346
- "project_dir": project_dir,
347
- }
348
- finally:
349
- # Restore original working directory if we changed it
350
- if workspace_root:
351
- os.chdir(original_cwd)
352
- logger.info(f"Restored working directory to: {original_cwd}")
353
-
354
- def _create_tool_executor(self) -> Callable[[str, Dict[str, Any]], Any]:
355
- """Create a tool executor function that uses registered tools.
356
-
357
- Returns:
358
- Function that executes tools by name
359
- """
360
-
361
- def execute_tool(tool_name: str, tool_args: Dict[str, Any]) -> Any:
362
- """Execute a registered tool."""
363
- if tool_name not in _TOOL_REGISTRY:
364
- return {"success": False, "error": f"Unknown tool: {tool_name}"}
365
-
366
- tool_func = _TOOL_REGISTRY[tool_name]["function"]
367
- try:
368
- return tool_func(**tool_args)
369
- except Exception as e: # pylint: disable=broad-exception-caught
370
- logger.exception(f"Tool execution failed: {tool_name}")
371
- return {"success": False, "error": str(e)}
372
-
373
- return execute_tool
374
-
375
- def _process_with_orchestrator(
376
- self,
377
- user_input: str,
378
- workspace_root: Optional[str] = None,
379
- entity_name: Optional[str] = None,
380
- schema_fields: Optional[Dict[str, str]] = None,
381
- step_through: bool = False,
382
- ) -> ExecutionResult:
383
- """Process request using the LLM-driven orchestrator.
384
-
385
- Args:
386
- user_input: User's request
387
- workspace_root: Optional workspace directory
388
- entity_name: Entity name from schema inference (e.g., "Todo")
389
- schema_fields: Field definitions from schema inference
390
- step_through: Enable step-through debugging
391
-
392
- Returns:
393
- ExecutionResult with workflow execution status
394
-
395
- Raises:
396
- ValueError: If no LLM client (chat) is available
397
- """
398
- tool_executor = self._create_tool_executor()
399
-
400
- # Create user context with inferred schema
401
- context = UserContext(
402
- user_request=user_input,
403
- project_dir=workspace_root or os.getcwd(),
404
- language=self.language,
405
- project_type=self.project_type,
406
- entity_name=entity_name,
407
- schema_fields=schema_fields,
408
- )
409
-
410
- # Create LLM fixer wrapper that adapts signature
411
- # ErrorHandler expects (error_text, code) -> Optional[fixed_code]
412
- # _fix_code_with_llm expects (code, file_path, error_msg) -> Optional[fixed_code]
413
- def llm_fixer(error_text: str, code: str) -> Optional[str]:
414
- """Wrapper to adapt _fix_code_with_llm signature for ErrorHandler."""
415
- return self._fix_code_with_llm(code, "file.ts", error_text)
416
-
417
- # Get LLM client for checklist generation (required)
418
- # The chat SDK has a send(message, timeout) method compatible with ChatSDK protocol
419
- llm_client = getattr(self, "chat", None)
420
- if llm_client is None:
421
- raise ValueError(
422
- "LLM client (chat) is required for orchestrator. "
423
- "Ensure the agent has a chat SDK configured."
424
- )
425
-
426
- orchestrator = Orchestrator(
427
- tool_executor=tool_executor,
428
- llm_client=llm_client,
429
- llm_fixer=llm_fixer,
430
- progress_callback=self._orchestrator_progress_callback,
431
- console=self.console,
432
- )
433
-
434
- logger.debug("Running LLM-driven orchestrator")
435
- return orchestrator.execute(context, step_through=step_through)
436
-
437
- def _orchestrator_progress_callback(
438
- self, phase: str, step: str, current: int, total: int
439
- ) -> None:
440
- """Handle progress updates from orchestrator."""
441
- if self.progress_callback:
442
- self.progress_callback(
443
- {
444
- "type": "progress",
445
- "phase": phase,
446
- "step": step,
447
- "current": current,
448
- "total": total,
449
- }
450
- )
451
- # Also print to console if not silent
452
- # Skip "checklist" phase printing as ChecklistExecutor handles its own output
453
- if not self.silent_mode and hasattr(self, "console") and phase != "checklist":
454
- self.console.print_info(f"[{current}/{total}] {phase}: {step}")
455
-
456
- def display_result(
457
- self,
458
- title: str = "Result",
459
- result: Dict[str, Any] = None,
460
- print_result: bool = False,
461
- ) -> None:
462
- """Display orchestrator execution result with a nice summary.
463
-
464
- Args:
465
- title: Title for the result display
466
- result: Orchestrator result dictionary
467
- print_result: If True, also print raw JSON
468
- """
469
- if result is None:
470
- self.console.print_warning("No result available to display.")
471
- return
472
-
473
- # Print raw JSON if requested
474
- if print_result:
475
- self.console.pretty_print_json(result, title)
476
- return
477
-
478
- # Build a nice summary for orchestrator results
479
- status = result.get("status", "unknown")
480
- phases_completed = result.get("phases_completed", [])
481
- phases_failed = result.get("phases_failed", [])
482
- steps_succeeded = result.get("steps_succeeded", 0)
483
- steps_failed = result.get("steps_failed", 0)
484
- errors = result.get("errors", [])
485
-
486
- self.console.print("") # Blank line before summary
487
-
488
- # Status banner
489
- if status == "success":
490
- self.console.print("=" * 60)
491
- self.console.print_success(" PROJECT GENERATION COMPLETE")
492
- self.console.print("=" * 60)
493
- else:
494
- self.console.print("=" * 60)
495
- self.console.print_warning(" PROJECT GENERATION FINISHED WITH ISSUES")
496
- self.console.print("=" * 60)
497
-
498
- self.console.print("")
499
-
500
- # Phase summary
501
- if phases_completed:
502
- self.console.print(f"Phases completed: {', '.join(phases_completed)}")
503
- if phases_failed:
504
- self.console.print(f"Phases failed: {', '.join(phases_failed)}")
505
-
506
- # Step summary
507
- total_steps = steps_succeeded + steps_failed
508
- self.console.print(f"Steps: {steps_succeeded}/{total_steps} succeeded")
509
-
510
- # Errors/warnings
511
- if errors:
512
- self.console.print("")
513
- self.console.print("Warnings/Errors:")
514
- for error in errors[:5]: # Show first 5 errors
515
- self.console.print(f" - {error}")
516
- if len(errors) > 5:
517
- self.console.print(f" ... and {len(errors) - 5} more")
518
-
519
- self.console.print("")
520
-
521
- # Next steps
522
- if status == "success":
523
- project_dir = result.get("project_dir", os.getcwd())
524
- self.console.print("Next steps:")
525
- self.console.print(f" 1. cd {project_dir}")
526
- self.console.print(" 2. npm run dev")
527
- self.console.print(" 3. Open http://localhost:3000 in your browser")
528
- else:
529
- self.console.print("Next steps:")
530
- self.console.print(" 1. Review the errors above")
531
- self.console.print(" 2. Run the command again to retry failed steps")
532
-
533
- self.console.print("")
534
- self.console.print("=" * 60)
535
-
536
-
537
- def main():
538
- """Main entry point for testing."""
539
- agent = CodeAgent()
540
- print("CodeAgent initialized successfully")
541
- print(f"Cache directory: {agent.cache_dir}")
542
- print(
543
- "Validators: syntax_validator, antipattern_checker, ast_analyzer, "
544
- "requirements_validator"
545
- )
546
-
547
-
548
- if __name__ == "__main__":
549
- main()
1
+ #!/usr/bin/env python
2
+ # Copyright(C) 2024-2025 Advanced Micro Devices, Inc. All rights reserved.
3
+ # SPDX-License-Identifier: MIT
4
+ """
5
+ Code Agent for GAIA.
6
+
7
+ This agent provides intelligent code operations and assistance, focusing on
8
+ comprehensive Python support with capabilities for code understanding, generation,
9
+ modification, and validation.
10
+
11
+ """
12
+
13
+ import json
14
+ import logging
15
+ import os
16
+ import time
17
+ from pathlib import Path
18
+ from typing import Any, Callable, Dict, Optional
19
+
20
+ from gaia.agents.base.agent import Agent
21
+ from gaia.agents.base.api_agent import ApiAgent
22
+ from gaia.agents.base.console import AgentConsole, SilentConsole
23
+ from gaia.agents.base.tools import _TOOL_REGISTRY
24
+ from gaia.security import PathValidator
25
+
26
+ from .orchestration import (
27
+ ExecutionResult,
28
+ Orchestrator,
29
+ UserContext,
30
+ )
31
+ from .system_prompt import get_system_prompt
32
+ from .tools import (
33
+ CodeFormattingMixin,
34
+ CodeToolsMixin,
35
+ ErrorFixingMixin,
36
+ ExternalToolsMixin,
37
+ FileIOToolsMixin,
38
+ ProjectManagementMixin,
39
+ TestingMixin,
40
+ TypeScriptToolsMixin,
41
+ ValidationAndParsingMixin,
42
+ ValidationToolsMixin,
43
+ WebToolsMixin,
44
+ )
45
+
46
+ # Import CLI tools
47
+ from .tools.cli_tools import CLIToolsMixin
48
+
49
+ # Import Prisma tools
50
+ from .tools.prisma_tools import PrismaToolsMixin
51
+
52
+ # Import refactored modules
53
+ from .validators import (
54
+ AntipatternChecker,
55
+ ASTAnalyzer,
56
+ RequirementsValidator,
57
+ SyntaxValidator,
58
+ )
59
+
60
+ logger = logging.getLogger(__name__)
61
+
62
+
63
+ class CodeAgent(
64
+ ApiAgent, # API support for VSCode integration
65
+ Agent,
66
+ CodeToolsMixin, # Code generation, analysis, helpers
67
+ ValidationAndParsingMixin, # Validation, AST parsing, error fixing helpers
68
+ FileIOToolsMixin, # File I/O operations
69
+ CodeFormattingMixin, # Code formatting (Black, etc.)
70
+ ProjectManagementMixin, # Project/workspace management
71
+ TestingMixin, # Testing tools
72
+ ErrorFixingMixin, # Error fixing tools
73
+ TypeScriptToolsMixin, # TypeScript runtime tools (npm, template fetching, validation)
74
+ WebToolsMixin, # Next.js full-stack web development tools (replaces frontend/backend)
75
+ PrismaToolsMixin, # Prisma database setup and management
76
+ CLIToolsMixin, # Universal CLI execution with process management
77
+ ExternalToolsMixin, # Context7 and Perplexity integration for documentation and web search
78
+ ValidationToolsMixin, # Validation and testing tools
79
+ ):
80
+ """
81
+ Intelligent autonomous code agent for comprehensive Python development workflows.
82
+
83
+ This agent autonomously handles complex coding tasks including:
84
+ - Workflow planning from requirements
85
+ - Code generation with best practices
86
+ - Automatic linting and formatting
87
+ - Error detection and correction
88
+ - Code execution and verification
89
+
90
+ Usage:
91
+ agent = CodeAgent()
92
+ result = agent.process_query("Create a calculator app with error handling")
93
+ # Agent will plan, generate, lint, fix, test, and verify automatically
94
+ """
95
+
96
+ def __init__(self, language="python", project_type="script", **kwargs):
97
+ """Initialize the Code agent.
98
+
99
+ Args:
100
+ language: Programming language ('python' or 'typescript', default: 'python')
101
+ project_type: Project type ('frontend', 'backend', 'fullstack', or 'script', default: 'script')
102
+ **kwargs: Agent initialization parameters:
103
+ - max_steps: Maximum conversation steps (default: 100)
104
+ - model_id: LLM model to use (default: Qwen3-Coder-30B-A3B-Instruct-GGUF)
105
+ - silent_mode: Suppress console output (default: False)
106
+ - debug: Enable debug logging (default: False)
107
+ - show_prompts: Display prompts sent to LLM (default: False)
108
+ - streaming: Enable real-time LLM response streaming (default: False)
109
+ """
110
+ # Store language and project type for prompt selection
111
+ self.language = language
112
+ self.project_type = project_type
113
+
114
+ # Default to more steps for complex workflows
115
+ if "max_steps" not in kwargs:
116
+ kwargs["max_steps"] = 100 # Increased for complex project generation
117
+ # Use the coding model for better code understanding
118
+ if "model_id" not in kwargs:
119
+ kwargs["model_id"] = "Qwen3-Coder-30B-A3B-Instruct-GGUF"
120
+ # Disable streaming by default (shows duplicate output)
121
+ # Users can enable with --streaming flag if desired
122
+ if "streaming" not in kwargs:
123
+ kwargs["streaming"] = False
124
+ # Code agent needs more plan iterations for complex projects
125
+ if "max_plan_iterations" not in kwargs:
126
+ kwargs["max_plan_iterations"] = 100
127
+
128
+ # Ensure .gaia cache directory exists for temporary files
129
+ self.cache_dir = Path.home() / ".gaia" / "cache"
130
+ self.cache_dir.mkdir(parents=True, exist_ok=True)
131
+
132
+ # Security: Configure allowed paths for file operations
133
+ self.allowed_paths = kwargs.pop("allowed_paths", None)
134
+ self.path_validator = PathValidator(self.allowed_paths)
135
+
136
+ # Workspace root for API mode (passed from VSCode)
137
+ self.workspace_root = None
138
+
139
+ # Progress callback for real-time updates
140
+ self.progress_callback = None
141
+
142
+ super().__init__(**kwargs)
143
+
144
+ # Store the tools description for later prompt reconstruction
145
+ # (base Agent's __init__ already appended tools to self.system_prompt)
146
+ self.tools_description = self._format_tools_for_prompt()
147
+
148
+ # Initialize validators and analyzers
149
+ self.syntax_validator = SyntaxValidator()
150
+ self.antipattern_checker = AntipatternChecker()
151
+ self.ast_analyzer = ASTAnalyzer()
152
+ self.requirements_validator = RequirementsValidator()
153
+
154
+ # Log context size requirement if not using cloud LLMs
155
+ if not kwargs.get("use_claude") and not kwargs.get("use_chatgpt"):
156
+ logger.debug(
157
+ "Code Agent requires large context size (32768 tokens). "
158
+ "Ensure Lemonade server is started with: lemonade-server serve --ctx-size 32768"
159
+ )
160
+
161
+ def _get_system_prompt(self, _user_input: Optional[str] = None) -> str:
162
+ """Generate the system prompt for the Code agent.
163
+
164
+ Uses the language and project_type set during initialization to
165
+ select the appropriate prompt (no runtime detection).
166
+
167
+ Args:
168
+ _user_input: Optional user query (not used for detection anymore)
169
+
170
+ Returns:
171
+ str: System prompt for code operations
172
+ """
173
+ return get_system_prompt(language=self.language, project_type=self.project_type)
174
+
175
+ def _create_console(self):
176
+ """Create console for Code agent output.
177
+
178
+ Returns:
179
+ AgentConsole or SilentConsole: Console instance
180
+ """
181
+ if self.silent_mode:
182
+ return SilentConsole()
183
+ return AgentConsole()
184
+
185
+ def _register_tools(self) -> None:
186
+ """Register Code-specific tools from mixins."""
187
+ # Register all tools from consolidated mixins
188
+ self.register_code_tools() # CodeToolsMixin
189
+ self.register_file_io_tools() # FileIOToolsMixin
190
+ self.register_code_formatting_tools() # CodeFormattingMixin
191
+ self.register_project_management_tools() # ProjectManagementMixin
192
+ self.register_testing_tools() # TestingMixin
193
+ self.register_error_fixing_tools() # ErrorFixingMixin
194
+ self.register_typescript_tools() # TypeScriptToolsMixin
195
+ self.register_web_tools() # WebToolsMixin (Next.js unified approach)
196
+ self.register_prisma_tools() # PrismaToolsMixin (Prisma database management)
197
+ self.register_cli_tools() # CLIToolsMixin (Universal CLI execution)
198
+ self.register_external_tools() # ExternalToolsMixin (Context7 & Perplexity)
199
+ self.register_validation_tools() # ValidationToolsMixin (Testing and validation)
200
+
201
+ def process_query(
202
+ self, user_input: str, workspace_root=None, progress_callback=None, **kwargs
203
+ ): # pylint: disable=arguments-differ,unused-argument
204
+ """Process a query using the orchestrator workflow.
205
+
206
+ Args:
207
+ user_input: The user's query
208
+ workspace_root: Optional workspace directory for file operations (from VSCode)
209
+ progress_callback: Optional callback function for progress updates
210
+ **kwargs: Additional arguments:
211
+ - step_through: Enable step-through debugging (pause after each step)
212
+
213
+ Returns:
214
+ Execution result summary from the orchestrator
215
+ """
216
+ # Extract trace options
217
+ trace = kwargs.get("trace", False)
218
+ trace_filename = kwargs.get("filename")
219
+
220
+ # Extract step_through from kwargs
221
+ step_through = kwargs.get("step_through", False)
222
+
223
+ del kwargs # Unused - accept for CLI compatibility
224
+ # Store workspace root and change to it if provided
225
+ if workspace_root:
226
+ self.workspace_root = workspace_root
227
+ self.path_validator.add_allowed_path(workspace_root)
228
+ original_cwd = os.getcwd()
229
+ os.chdir(workspace_root)
230
+ logger.debug(f"Changed working directory to: {workspace_root}")
231
+
232
+ # Store progress callback for tools to use
233
+ if progress_callback:
234
+ self.progress_callback = progress_callback
235
+
236
+ # Update system prompt based on actual user input for language detection
237
+ # Reconstruct full prompt with language-specific base + tools
238
+ base_prompt = self._get_system_prompt(user_input)
239
+
240
+ # AI-powered schema inference (Perplexity -> Local LLM -> fallback)
241
+ # This dynamically determines what fields the app needs without hardcoding
242
+ schema_context = ""
243
+ inferred_entity = None
244
+ inferred_fields = None
245
+ try:
246
+ from .schema_inference import format_schema_context, infer_schema
247
+
248
+ # Use self.chat for local LLM fallback if Perplexity unavailable
249
+ chat_sdk = getattr(self, "chat", None)
250
+ schema_result = infer_schema(user_input, chat_sdk)
251
+
252
+ if schema_result.get("entity"):
253
+ schema_context = format_schema_context(schema_result)
254
+ inferred_entity = schema_result["entity"]
255
+ # Convert fields from list format [{"name": "x", "type": "y"}]
256
+ # to dict format {"x": "y"} expected by tools
257
+ raw_fields = schema_result.get("fields", [])
258
+ if isinstance(raw_fields, list):
259
+ inferred_fields = {
260
+ f["name"]: f.get("type", "string")
261
+ for f in raw_fields
262
+ if isinstance(f, dict) and "name" in f
263
+ }
264
+ else:
265
+ inferred_fields = raw_fields
266
+ logger.debug(
267
+ f"Schema inferred: {inferred_entity} "
268
+ f"({len(inferred_fields)} fields) via {schema_result['source']}"
269
+ )
270
+ except Exception as e:
271
+ logger.warning(f"Schema inference failed (continuing without): {e}")
272
+
273
+ # Add current working directory context
274
+ workspace_context = ""
275
+ if workspace_root:
276
+ workspace_context = (
277
+ f"\n\nProject directory (dedicated): {os.getcwd()}\n"
278
+ f"IMPORTANT: When creating new projects (e.g., npx create-next-app, cargo new, etc.), "
279
+ f"use '.' as the project name to install directly in this directory, NOT in a subdirectory.\n"
280
+ )
281
+ else:
282
+ workspace_context = f"\n\nCurrent working directory: {os.getcwd()}\n"
283
+
284
+ self.system_prompt = (
285
+ base_prompt
286
+ + schema_context # AI-inferred schema (if available)
287
+ + workspace_context
288
+ + f"\n\n==== AVAILABLE TOOLS ====\n{self.tools_description}\n\n"
289
+ )
290
+
291
+ try:
292
+ # Orchestrator is the ONLY workflow path
293
+ # Handles correct step ordering for all project types
294
+ execution_result = self._process_with_orchestrator(
295
+ user_input,
296
+ workspace_root,
297
+ entity_name=inferred_entity,
298
+ schema_fields=inferred_fields,
299
+ step_through=step_through,
300
+ )
301
+
302
+ # Write trace to file if requested
303
+ if trace:
304
+ try:
305
+ # Construct trace data
306
+ trace_data = {
307
+ "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
308
+ "agent": "CodeAgent",
309
+ "query": user_input,
310
+ "workspace_root": workspace_root or os.getcwd(),
311
+ "result": {
312
+ "success": execution_result.success,
313
+ "summary": execution_result.summary,
314
+ "outputs": execution_result.outputs,
315
+ "errors": execution_result.errors,
316
+ },
317
+ }
318
+
319
+ if not trace_filename:
320
+ timestamp = time.strftime("%Y%m%d_%H%M%S")
321
+ trace_filename = f"agent_trace_{timestamp}.json"
322
+
323
+ # Write to file
324
+ with open(trace_filename, "w", encoding="utf-8") as f:
325
+ json.dump(trace_data, f, indent=2)
326
+
327
+ logger.info(f"Trace written to {trace_filename}")
328
+ if not self.silent_mode:
329
+ self.console.print(f"\nTrace written to {trace_filename}")
330
+
331
+ except Exception as e:
332
+ logger.error(f"Failed to write trace file: {e}")
333
+
334
+ # Return dict matching app.py's expected format
335
+ project_dir = execution_result.outputs.get(
336
+ "project_dir", workspace_root or os.getcwd()
337
+ )
338
+ return {
339
+ "status": "success" if execution_result.success else "error",
340
+ "result": execution_result.summary,
341
+ "phases_completed": execution_result.phases_completed,
342
+ "phases_failed": execution_result.phases_failed,
343
+ "steps_succeeded": execution_result.steps_succeeded,
344
+ "steps_failed": execution_result.steps_failed,
345
+ "errors": execution_result.errors,
346
+ "project_dir": project_dir,
347
+ }
348
+ finally:
349
+ # Restore original working directory if we changed it
350
+ if workspace_root:
351
+ os.chdir(original_cwd)
352
+ logger.info(f"Restored working directory to: {original_cwd}")
353
+
354
+ def _create_tool_executor(self) -> Callable[[str, Dict[str, Any]], Any]:
355
+ """Create a tool executor function that uses registered tools.
356
+
357
+ Returns:
358
+ Function that executes tools by name
359
+ """
360
+
361
+ def execute_tool(tool_name: str, tool_args: Dict[str, Any]) -> Any:
362
+ """Execute a registered tool."""
363
+ if tool_name not in _TOOL_REGISTRY:
364
+ return {"success": False, "error": f"Unknown tool: {tool_name}"}
365
+
366
+ tool_func = _TOOL_REGISTRY[tool_name]["function"]
367
+ try:
368
+ return tool_func(**tool_args)
369
+ except Exception as e: # pylint: disable=broad-exception-caught
370
+ logger.exception(f"Tool execution failed: {tool_name}")
371
+ return {"success": False, "error": str(e)}
372
+
373
+ return execute_tool
374
+
375
+ def _process_with_orchestrator(
376
+ self,
377
+ user_input: str,
378
+ workspace_root: Optional[str] = None,
379
+ entity_name: Optional[str] = None,
380
+ schema_fields: Optional[Dict[str, str]] = None,
381
+ step_through: bool = False,
382
+ ) -> ExecutionResult:
383
+ """Process request using the LLM-driven orchestrator.
384
+
385
+ Args:
386
+ user_input: User's request
387
+ workspace_root: Optional workspace directory
388
+ entity_name: Entity name from schema inference (e.g., "Todo")
389
+ schema_fields: Field definitions from schema inference
390
+ step_through: Enable step-through debugging
391
+
392
+ Returns:
393
+ ExecutionResult with workflow execution status
394
+
395
+ Raises:
396
+ ValueError: If no LLM client (chat) is available
397
+ """
398
+ tool_executor = self._create_tool_executor()
399
+
400
+ # Create user context with inferred schema
401
+ context = UserContext(
402
+ user_request=user_input,
403
+ project_dir=workspace_root or os.getcwd(),
404
+ language=self.language,
405
+ project_type=self.project_type,
406
+ entity_name=entity_name,
407
+ schema_fields=schema_fields,
408
+ )
409
+
410
+ # Create LLM fixer wrapper that adapts signature
411
+ # ErrorHandler expects (error_text, code) -> Optional[fixed_code]
412
+ # _fix_code_with_llm expects (code, file_path, error_msg) -> Optional[fixed_code]
413
+ def llm_fixer(error_text: str, code: str) -> Optional[str]:
414
+ """Wrapper to adapt _fix_code_with_llm signature for ErrorHandler."""
415
+ return self._fix_code_with_llm(code, "file.ts", error_text)
416
+
417
+ # Get LLM client for checklist generation (required)
418
+ # The chat SDK has a send(message, timeout) method compatible with ChatSDK protocol
419
+ llm_client = getattr(self, "chat", None)
420
+ if llm_client is None:
421
+ raise ValueError(
422
+ "LLM client (chat) is required for orchestrator. "
423
+ "Ensure the agent has a chat SDK configured."
424
+ )
425
+
426
+ orchestrator = Orchestrator(
427
+ tool_executor=tool_executor,
428
+ llm_client=llm_client,
429
+ llm_fixer=llm_fixer,
430
+ progress_callback=self._orchestrator_progress_callback,
431
+ console=self.console,
432
+ )
433
+
434
+ logger.debug("Running LLM-driven orchestrator")
435
+ return orchestrator.execute(context, step_through=step_through)
436
+
437
+ def _orchestrator_progress_callback(
438
+ self, phase: str, step: str, current: int, total: int
439
+ ) -> None:
440
+ """Handle progress updates from orchestrator."""
441
+ if self.progress_callback:
442
+ self.progress_callback(
443
+ {
444
+ "type": "progress",
445
+ "phase": phase,
446
+ "step": step,
447
+ "current": current,
448
+ "total": total,
449
+ }
450
+ )
451
+ # Also print to console if not silent
452
+ # Skip "checklist" phase printing as ChecklistExecutor handles its own output
453
+ if not self.silent_mode and hasattr(self, "console") and phase != "checklist":
454
+ self.console.print_info(f"[{current}/{total}] {phase}: {step}")
455
+
456
+ def display_result(
457
+ self,
458
+ title: str = "Result",
459
+ result: Dict[str, Any] = None,
460
+ print_result: bool = False,
461
+ ) -> None:
462
+ """Display orchestrator execution result with a nice summary.
463
+
464
+ Args:
465
+ title: Title for the result display
466
+ result: Orchestrator result dictionary
467
+ print_result: If True, also print raw JSON
468
+ """
469
+ if result is None:
470
+ self.console.print_warning("No result available to display.")
471
+ return
472
+
473
+ # Print raw JSON if requested
474
+ if print_result:
475
+ self.console.pretty_print_json(result, title)
476
+ return
477
+
478
+ # Build a nice summary for orchestrator results
479
+ status = result.get("status", "unknown")
480
+ phases_completed = result.get("phases_completed", [])
481
+ phases_failed = result.get("phases_failed", [])
482
+ steps_succeeded = result.get("steps_succeeded", 0)
483
+ steps_failed = result.get("steps_failed", 0)
484
+ errors = result.get("errors", [])
485
+
486
+ self.console.print("") # Blank line before summary
487
+
488
+ # Status banner
489
+ if status == "success":
490
+ self.console.print("=" * 60)
491
+ self.console.print_success(" PROJECT GENERATION COMPLETE")
492
+ self.console.print("=" * 60)
493
+ else:
494
+ self.console.print("=" * 60)
495
+ self.console.print_warning(" PROJECT GENERATION FINISHED WITH ISSUES")
496
+ self.console.print("=" * 60)
497
+
498
+ self.console.print("")
499
+
500
+ # Phase summary
501
+ if phases_completed:
502
+ self.console.print(f"Phases completed: {', '.join(phases_completed)}")
503
+ if phases_failed:
504
+ self.console.print(f"Phases failed: {', '.join(phases_failed)}")
505
+
506
+ # Step summary
507
+ total_steps = steps_succeeded + steps_failed
508
+ self.console.print(f"Steps: {steps_succeeded}/{total_steps} succeeded")
509
+
510
+ # Errors/warnings
511
+ if errors:
512
+ self.console.print("")
513
+ self.console.print("Warnings/Errors:")
514
+ for error in errors[:5]: # Show first 5 errors
515
+ self.console.print(f" - {error}")
516
+ if len(errors) > 5:
517
+ self.console.print(f" ... and {len(errors) - 5} more")
518
+
519
+ self.console.print("")
520
+
521
+ # Next steps
522
+ if status == "success":
523
+ project_dir = result.get("project_dir", os.getcwd())
524
+ self.console.print("Next steps:")
525
+ self.console.print(f" 1. cd {project_dir}")
526
+ self.console.print(" 2. npm run dev")
527
+ self.console.print(" 3. Open http://localhost:3000 in your browser")
528
+ else:
529
+ self.console.print("Next steps:")
530
+ self.console.print(" 1. Review the errors above")
531
+ self.console.print(" 2. Run the command again to retry failed steps")
532
+
533
+ self.console.print("")
534
+ self.console.print("=" * 60)
535
+
536
+
537
+ def main():
538
+ """Main entry point for testing."""
539
+ agent = CodeAgent()
540
+ print("CodeAgent initialized successfully")
541
+ print(f"Cache directory: {agent.cache_dir}")
542
+ print(
543
+ "Validators: syntax_validator, antipattern_checker, ast_analyzer, "
544
+ "requirements_validator"
545
+ )
546
+
547
+
548
+ if __name__ == "__main__":
549
+ main()