amd-gaia 0.15.0__py3-none-any.whl → 0.15.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (181) hide show
  1. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/METADATA +223 -223
  2. amd_gaia-0.15.1.dist-info/RECORD +178 -0
  3. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/entry_points.txt +1 -0
  4. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/licenses/LICENSE.md +20 -20
  5. gaia/__init__.py +29 -29
  6. gaia/agents/__init__.py +19 -19
  7. gaia/agents/base/__init__.py +9 -9
  8. gaia/agents/base/agent.py +2177 -2177
  9. gaia/agents/base/api_agent.py +120 -120
  10. gaia/agents/base/console.py +1841 -1841
  11. gaia/agents/base/errors.py +237 -237
  12. gaia/agents/base/mcp_agent.py +86 -86
  13. gaia/agents/base/tools.py +83 -83
  14. gaia/agents/blender/agent.py +556 -556
  15. gaia/agents/blender/agent_simple.py +133 -135
  16. gaia/agents/blender/app.py +211 -211
  17. gaia/agents/blender/app_simple.py +41 -41
  18. gaia/agents/blender/core/__init__.py +16 -16
  19. gaia/agents/blender/core/materials.py +506 -506
  20. gaia/agents/blender/core/objects.py +316 -316
  21. gaia/agents/blender/core/rendering.py +225 -225
  22. gaia/agents/blender/core/scene.py +220 -220
  23. gaia/agents/blender/core/view.py +146 -146
  24. gaia/agents/chat/__init__.py +9 -9
  25. gaia/agents/chat/agent.py +835 -835
  26. gaia/agents/chat/app.py +1058 -1058
  27. gaia/agents/chat/session.py +508 -508
  28. gaia/agents/chat/tools/__init__.py +15 -15
  29. gaia/agents/chat/tools/file_tools.py +96 -96
  30. gaia/agents/chat/tools/rag_tools.py +1729 -1729
  31. gaia/agents/chat/tools/shell_tools.py +436 -436
  32. gaia/agents/code/__init__.py +7 -7
  33. gaia/agents/code/agent.py +549 -549
  34. gaia/agents/code/cli.py +377 -0
  35. gaia/agents/code/models.py +135 -135
  36. gaia/agents/code/orchestration/__init__.py +24 -24
  37. gaia/agents/code/orchestration/checklist_executor.py +1763 -1763
  38. gaia/agents/code/orchestration/checklist_generator.py +713 -713
  39. gaia/agents/code/orchestration/factories/__init__.py +9 -9
  40. gaia/agents/code/orchestration/factories/base.py +63 -63
  41. gaia/agents/code/orchestration/factories/nextjs_factory.py +118 -118
  42. gaia/agents/code/orchestration/factories/python_factory.py +106 -106
  43. gaia/agents/code/orchestration/orchestrator.py +841 -841
  44. gaia/agents/code/orchestration/project_analyzer.py +391 -391
  45. gaia/agents/code/orchestration/steps/__init__.py +67 -67
  46. gaia/agents/code/orchestration/steps/base.py +188 -188
  47. gaia/agents/code/orchestration/steps/error_handler.py +314 -314
  48. gaia/agents/code/orchestration/steps/nextjs.py +828 -828
  49. gaia/agents/code/orchestration/steps/python.py +307 -307
  50. gaia/agents/code/orchestration/template_catalog.py +469 -469
  51. gaia/agents/code/orchestration/workflows/__init__.py +14 -14
  52. gaia/agents/code/orchestration/workflows/base.py +80 -80
  53. gaia/agents/code/orchestration/workflows/nextjs.py +186 -186
  54. gaia/agents/code/orchestration/workflows/python.py +94 -94
  55. gaia/agents/code/prompts/__init__.py +11 -11
  56. gaia/agents/code/prompts/base_prompt.py +77 -77
  57. gaia/agents/code/prompts/code_patterns.py +2036 -2036
  58. gaia/agents/code/prompts/nextjs_prompt.py +40 -40
  59. gaia/agents/code/prompts/python_prompt.py +109 -109
  60. gaia/agents/code/schema_inference.py +365 -365
  61. gaia/agents/code/system_prompt.py +41 -41
  62. gaia/agents/code/tools/__init__.py +42 -42
  63. gaia/agents/code/tools/cli_tools.py +1138 -1138
  64. gaia/agents/code/tools/code_formatting.py +319 -319
  65. gaia/agents/code/tools/code_tools.py +769 -769
  66. gaia/agents/code/tools/error_fixing.py +1347 -1347
  67. gaia/agents/code/tools/external_tools.py +180 -180
  68. gaia/agents/code/tools/file_io.py +845 -845
  69. gaia/agents/code/tools/prisma_tools.py +190 -190
  70. gaia/agents/code/tools/project_management.py +1016 -1016
  71. gaia/agents/code/tools/testing.py +321 -321
  72. gaia/agents/code/tools/typescript_tools.py +122 -122
  73. gaia/agents/code/tools/validation_parsing.py +461 -461
  74. gaia/agents/code/tools/validation_tools.py +806 -806
  75. gaia/agents/code/tools/web_dev_tools.py +1758 -1758
  76. gaia/agents/code/validators/__init__.py +16 -16
  77. gaia/agents/code/validators/antipattern_checker.py +241 -241
  78. gaia/agents/code/validators/ast_analyzer.py +197 -197
  79. gaia/agents/code/validators/requirements_validator.py +145 -145
  80. gaia/agents/code/validators/syntax_validator.py +171 -171
  81. gaia/agents/docker/__init__.py +7 -7
  82. gaia/agents/docker/agent.py +642 -642
  83. gaia/agents/emr/__init__.py +8 -8
  84. gaia/agents/emr/agent.py +1506 -1506
  85. gaia/agents/emr/cli.py +1322 -1322
  86. gaia/agents/emr/constants.py +475 -475
  87. gaia/agents/emr/dashboard/__init__.py +4 -4
  88. gaia/agents/emr/dashboard/server.py +1974 -1974
  89. gaia/agents/jira/__init__.py +11 -11
  90. gaia/agents/jira/agent.py +894 -894
  91. gaia/agents/jira/jql_templates.py +299 -299
  92. gaia/agents/routing/__init__.py +7 -7
  93. gaia/agents/routing/agent.py +567 -570
  94. gaia/agents/routing/system_prompt.py +75 -75
  95. gaia/agents/summarize/__init__.py +11 -0
  96. gaia/agents/summarize/agent.py +885 -0
  97. gaia/agents/summarize/prompts.py +129 -0
  98. gaia/api/__init__.py +23 -23
  99. gaia/api/agent_registry.py +238 -238
  100. gaia/api/app.py +305 -305
  101. gaia/api/openai_server.py +575 -575
  102. gaia/api/schemas.py +186 -186
  103. gaia/api/sse_handler.py +373 -373
  104. gaia/apps/__init__.py +4 -4
  105. gaia/apps/llm/__init__.py +6 -6
  106. gaia/apps/llm/app.py +173 -169
  107. gaia/apps/summarize/app.py +116 -633
  108. gaia/apps/summarize/html_viewer.py +133 -133
  109. gaia/apps/summarize/pdf_formatter.py +284 -284
  110. gaia/audio/__init__.py +2 -2
  111. gaia/audio/audio_client.py +439 -439
  112. gaia/audio/audio_recorder.py +269 -269
  113. gaia/audio/kokoro_tts.py +599 -599
  114. gaia/audio/whisper_asr.py +432 -432
  115. gaia/chat/__init__.py +16 -16
  116. gaia/chat/app.py +430 -430
  117. gaia/chat/prompts.py +522 -522
  118. gaia/chat/sdk.py +1228 -1225
  119. gaia/cli.py +5481 -5632
  120. gaia/database/__init__.py +10 -10
  121. gaia/database/agent.py +176 -176
  122. gaia/database/mixin.py +290 -290
  123. gaia/database/testing.py +64 -64
  124. gaia/eval/batch_experiment.py +2332 -2332
  125. gaia/eval/claude.py +542 -542
  126. gaia/eval/config.py +37 -37
  127. gaia/eval/email_generator.py +512 -512
  128. gaia/eval/eval.py +3179 -3179
  129. gaia/eval/groundtruth.py +1130 -1130
  130. gaia/eval/transcript_generator.py +582 -582
  131. gaia/eval/webapp/README.md +167 -167
  132. gaia/eval/webapp/package-lock.json +875 -875
  133. gaia/eval/webapp/package.json +20 -20
  134. gaia/eval/webapp/public/app.js +3402 -3402
  135. gaia/eval/webapp/public/index.html +87 -87
  136. gaia/eval/webapp/public/styles.css +3661 -3661
  137. gaia/eval/webapp/server.js +415 -415
  138. gaia/eval/webapp/test-setup.js +72 -72
  139. gaia/llm/__init__.py +9 -2
  140. gaia/llm/base_client.py +60 -0
  141. gaia/llm/exceptions.py +12 -0
  142. gaia/llm/factory.py +70 -0
  143. gaia/llm/lemonade_client.py +3236 -3221
  144. gaia/llm/lemonade_manager.py +294 -294
  145. gaia/llm/providers/__init__.py +9 -0
  146. gaia/llm/providers/claude.py +108 -0
  147. gaia/llm/providers/lemonade.py +120 -0
  148. gaia/llm/providers/openai_provider.py +79 -0
  149. gaia/llm/vlm_client.py +382 -382
  150. gaia/logger.py +189 -189
  151. gaia/mcp/agent_mcp_server.py +245 -245
  152. gaia/mcp/blender_mcp_client.py +138 -138
  153. gaia/mcp/blender_mcp_server.py +648 -648
  154. gaia/mcp/context7_cache.py +332 -332
  155. gaia/mcp/external_services.py +518 -518
  156. gaia/mcp/mcp_bridge.py +811 -550
  157. gaia/mcp/servers/__init__.py +6 -6
  158. gaia/mcp/servers/docker_mcp.py +83 -83
  159. gaia/perf_analysis.py +361 -0
  160. gaia/rag/__init__.py +10 -10
  161. gaia/rag/app.py +293 -293
  162. gaia/rag/demo.py +304 -304
  163. gaia/rag/pdf_utils.py +235 -235
  164. gaia/rag/sdk.py +2194 -2194
  165. gaia/security.py +163 -163
  166. gaia/talk/app.py +289 -289
  167. gaia/talk/sdk.py +538 -538
  168. gaia/testing/__init__.py +87 -87
  169. gaia/testing/assertions.py +330 -330
  170. gaia/testing/fixtures.py +333 -333
  171. gaia/testing/mocks.py +493 -493
  172. gaia/util.py +46 -46
  173. gaia/utils/__init__.py +33 -33
  174. gaia/utils/file_watcher.py +675 -675
  175. gaia/utils/parsing.py +223 -223
  176. gaia/version.py +100 -100
  177. amd_gaia-0.15.0.dist-info/RECORD +0 -168
  178. gaia/agents/code/app.py +0 -266
  179. gaia/llm/llm_client.py +0 -723
  180. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/WHEEL +0 -0
  181. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/top_level.txt +0 -0
@@ -1,570 +1,567 @@
1
- # Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
2
- # SPDX-License-Identifier: MIT
3
- """RoutingAgent - Intelligently routes requests and disambiguates parameters."""
4
-
5
- import json
6
- import os
7
- from typing import Any, Dict, List, Optional
8
-
9
- from gaia.agents.base.agent import Agent
10
- from gaia.llm.llm_client import LLMClient
11
- from gaia.logger import get_logger
12
-
13
- from .system_prompt import ROUTING_ANALYSIS_PROMPT
14
-
15
- logger = get_logger(__name__)
16
-
17
-
18
- class RoutingAgent:
19
- """
20
- Routes user requests to appropriate agents with intelligent disambiguation.
21
-
22
- Currently handles Code agent routing. Future: Jira, Docker, etc.
23
-
24
- Flow:
25
- 1. Analyze query with LLM to detect agent and parameters
26
- 2. If parameters unknown, ask user for clarification
27
- 3. Recursively re-analyze with user's response as added context
28
- 4. Once resolved, return configured agent ready to execute
29
- """
30
-
31
- def __init__(
32
- self,
33
- api_mode: bool = False,
34
- output_handler=None,
35
- **agent_kwargs,
36
- ):
37
- """Initialize routing agent with LLM client.
38
-
39
- Args:
40
- api_mode: If True, skip interactive questions and use defaults/best-guess.
41
- If False (default), ask clarification questions via input().
42
- output_handler: Optional OutputHandler for streaming events (passed to created agents).
43
- If None (default), agents create their own AgentConsole.
44
- **agent_kwargs: Additional kwargs to pass to created agents
45
- """
46
- # API mode settings
47
- self.api_mode = api_mode
48
- self.output_handler = output_handler
49
-
50
- # Initialize LLM client for language detection
51
- # Extract LLM-specific params from agent_kwargs
52
- use_claude = agent_kwargs.get("use_claude", False)
53
- use_chatgpt = agent_kwargs.get("use_chatgpt", False)
54
-
55
- # Determine base_url: CLI arg > Environment > LLMClient default
56
- base_url = agent_kwargs.get("base_url")
57
- if base_url is None:
58
- # Read from environment if not provided
59
- base_url = os.getenv("LEMONADE_BASE_URL", "http://localhost:8000/api/v1")
60
-
61
- llm_kwargs = {
62
- "use_claude": use_claude,
63
- "use_openai": use_chatgpt,
64
- "base_url": base_url,
65
- }
66
-
67
- self.llm_client = LLMClient(**llm_kwargs)
68
- self.agent_kwargs = agent_kwargs # Store for passing to created agents
69
-
70
- # Model to use for routing analysis (configurable via env var)
71
- self.routing_model = os.getenv(
72
- "AGENT_ROUTING_MODEL", "Qwen3-Coder-30B-A3B-Instruct-GGUF"
73
- )
74
-
75
- def process_query(
76
- self,
77
- query: str,
78
- conversation_history: Optional[List[Dict[str, str]]] = None,
79
- execute: bool = None,
80
- workspace_root: Optional[str] = None,
81
- **kwargs,
82
- ):
83
- """
84
- Process query with optional conversation history from disambiguation rounds.
85
-
86
- Args:
87
- query: Original user query
88
- conversation_history: List of conversation turns [{"role": "user", "content": "..."}]
89
- execute: If True, execute the routed agent and return result.
90
- If False, return the agent instance (CLI behavior).
91
- If None (default), uses api_mode (True for API, False for CLI).
92
- workspace_root: Optional workspace directory for agent execution (API mode).
93
- **kwargs: Additional kwargs passed to agent.process_query() when execute=True.
94
-
95
- Returns:
96
- If execute=False: Configured agent instance ready to execute
97
- If execute=True: Execution result from agent.process_query()
98
-
99
- Example (CLI mode - default):
100
- router = RoutingAgent()
101
- agent = router.process_query("Create Express API")
102
- result = agent.process_query("Create Express API")
103
-
104
- Example (API mode with execute):
105
- router = RoutingAgent(api_mode=True, output_handler=sse_handler)
106
- result = router.process_query("Create Express API") # auto-executes
107
- """
108
- # Default execute based on api_mode: API mode auto-executes, CLI returns agent
109
- if execute is None:
110
- execute = self.api_mode
111
-
112
- if conversation_history is None:
113
- conversation_history = []
114
-
115
- # Add current query to conversation history if not already there
116
- if not conversation_history or conversation_history[-1].get("content") != query:
117
- conversation_history.append({"role": "user", "content": query})
118
-
119
- logger.debug(
120
- f"Routing analysis for: '{query}' (conversation turns: {len(conversation_history)})"
121
- )
122
-
123
- # Analyze with LLM using conversation history
124
- analysis = self._analyze_with_llm(conversation_history)
125
-
126
- logger.debug(f"Analysis result: {analysis}")
127
-
128
- # If language could not be determined, default to TypeScript/Next.js
129
- analysis = self._default_unknown_language_to_typescript(analysis)
130
-
131
- # Check if we have all required parameters
132
- if self._has_unknowns(analysis):
133
- if self.api_mode:
134
- # API mode: skip interactive questions, use defaults
135
- logger.info("API mode: using defaults for unknown parameters")
136
- agent = self._create_agent_with_defaults(analysis)
137
- else:
138
- # Interactive mode: ask user for clarification
139
- question = self._generate_clarification_question(analysis)
140
- print(f"\n{question}")
141
- user_response = input("> ").strip()
142
-
143
- if not user_response:
144
- logger.warning("Empty user response, using defaults")
145
- # Use defaults if user just hits enter
146
- agent = self._create_agent_with_defaults(analysis)
147
- else:
148
- # Add assistant question and user response to conversation history
149
- conversation_history.append(
150
- {"role": "assistant", "content": question}
151
- )
152
- conversation_history.append(
153
- {"role": "user", "content": user_response}
154
- )
155
-
156
- # Recursive call with enriched conversation history
157
- return self.process_query(
158
- query,
159
- conversation_history,
160
- execute=execute,
161
- workspace_root=workspace_root,
162
- **kwargs,
163
- )
164
- else:
165
- # All parameters resolved, create agent
166
- agent = self._create_agent(analysis)
167
-
168
- # Execute if requested (API mode), otherwise return agent (CLI mode)
169
- if execute:
170
- return agent.process_query(query, workspace_root=workspace_root, **kwargs)
171
- return agent
172
-
173
- def _analyze_with_llm(
174
- self, conversation_history: List[Dict[str, str]]
175
- ) -> Dict[str, Any]:
176
- """
177
- Analyze query with LLM to determine agent and parameters.
178
-
179
- Args:
180
- conversation_history: Full conversation including clarifications
181
-
182
- Returns:
183
- Analysis dict with agent, parameters, confidence, reasoning
184
- """
185
- # Build context from conversation history
186
- context_parts = []
187
- for turn in conversation_history:
188
- role = turn["role"]
189
- content = turn["content"]
190
- if role == "user":
191
- context_parts.append(f"User: {content}")
192
- elif role == "assistant":
193
- context_parts.append(f"Assistant: {content}")
194
-
195
- full_context = "\n".join(context_parts)
196
-
197
- # Format prompt with full conversation context
198
- analysis_prompt = f"""Analyze this conversation and determine the configuration parameters.
199
-
200
- Conversation:
201
- {full_context}
202
-
203
- {ROUTING_ANALYSIS_PROMPT.split('User Request: "{query}"')[1]}"""
204
-
205
- # Wrap in Qwen chat format
206
- prompt = (
207
- f"<|im_start|>user\n{analysis_prompt}<|im_end|>\n<|im_start|>assistant\n"
208
- )
209
-
210
- try:
211
- response = self.llm_client.generate(
212
- prompt=prompt,
213
- model=self.routing_model,
214
- max_tokens=500,
215
- stop=["<|im_end|>", "<|im_start|>"],
216
- stream=False,
217
- )
218
-
219
- # Extract JSON from response
220
- response_text = response.strip()
221
-
222
- # Handle potential markdown code blocks
223
- if "```json" in response_text:
224
- response_text = (
225
- response_text.split("```json")[1].split("```")[0].strip()
226
- )
227
- elif "```" in response_text:
228
- response_text = response_text.split("```")[1].split("```")[0].strip()
229
-
230
- # Remove any leading/trailing whitespace and parse
231
- result = json.loads(response_text)
232
- return result
233
-
234
- except json.JSONDecodeError as e:
235
- logger.error(f"Failed to parse LLM response as JSON: {e}")
236
- logger.error(f"Response was: {response_text}")
237
- # Fallback to defaults
238
- return {
239
- "agent": "code",
240
- "parameters": {"language": "unknown", "project_type": "unknown"},
241
- "confidence": 0.0,
242
- "reasoning": "JSON parse error, using defaults",
243
- }
244
- except Exception as e:
245
- logger.error(f"Error analyzing query: {e}")
246
- raise RuntimeError(f"Failed to analyze query with Lemonade: {e}") from e
247
-
248
- def _fallback_keyword_detection(self, query: str) -> Dict[str, Any]:
249
- """
250
- Fallback keyword-based detection when LLM fails.
251
-
252
- Args:
253
- query: User query
254
-
255
- Returns:
256
- Analysis dict with detected language and project type
257
- """
258
- query_lower = query.lower()
259
-
260
- # TypeScript/Node.js indicators
261
- ts_keywords = [
262
- "nextjs",
263
- "next.js",
264
- "express",
265
- "nestjs",
266
- "koa",
267
- "fastify",
268
- "mongodb",
269
- "mongoose",
270
- "node.js",
271
- "nodejs",
272
- "react",
273
- "vue",
274
- "angular",
275
- "svelte",
276
- "vite",
277
- "typescript",
278
- ]
279
-
280
- # Python indicators
281
- py_keywords = ["django", "flask", "fastapi", "pandas", "numpy", "python"]
282
-
283
- # Detect language
284
- has_ts = any(kw in query_lower for kw in ts_keywords)
285
- has_py = any(kw in query_lower for kw in py_keywords)
286
-
287
- if has_ts:
288
- language = "typescript"
289
- reasoning = f"Detected TypeScript keywords: {[kw for kw in ts_keywords if kw in query_lower]}"
290
- elif has_py:
291
- language = "python"
292
- reasoning = f"Detected Python keywords: {[kw for kw in py_keywords if kw in query_lower]}"
293
- else:
294
- language = "unknown"
295
- reasoning = "No framework keywords detected"
296
-
297
- # Detect project type based on language
298
- if language == "typescript":
299
- # All TypeScript web apps use Next.js fullstack approach
300
- if any(kw in query_lower for kw in ["cli", "tool", "script", "utility"]):
301
- project_type = "script"
302
- else:
303
- # Default to fullstack for any web-related TypeScript project
304
- project_type = "fullstack"
305
- elif language == "python":
306
- # Python project types
307
- if any(
308
- kw in query_lower
309
- for kw in [
310
- "api",
311
- "rest",
312
- "backend",
313
- "server",
314
- "fastapi",
315
- "flask",
316
- "django",
317
- ]
318
- ):
319
- project_type = "api"
320
- elif any(kw in query_lower for kw in ["web", "website", "dashboard"]):
321
- project_type = "web"
322
- elif any(
323
- kw in query_lower
324
- for kw in ["cli", "tool", "script", "utility", "calculator"]
325
- ):
326
- project_type = "script"
327
- else:
328
- project_type = "unknown"
329
- else:
330
- # Unknown language - try to detect project type from keywords
331
- if any(
332
- kw in query_lower
333
- for kw in [
334
- "api",
335
- "rest",
336
- "backend",
337
- "web",
338
- "app",
339
- "dashboard",
340
- "frontend",
341
- ]
342
- ):
343
- project_type = "fullstack" # Assume web app
344
- elif any(kw in query_lower for kw in ["cli", "tool", "script", "utility"]):
345
- project_type = "script"
346
- else:
347
- project_type = "unknown"
348
-
349
- return {
350
- "agent": "code",
351
- "parameters": {"language": language, "project_type": project_type},
352
- "confidence": 0.8 if language != "unknown" else 0.3,
353
- "reasoning": f"Keyword detection: {reasoning}",
354
- }
355
-
356
- def _has_unknowns(self, analysis: Dict[str, Any]) -> bool:
357
- """
358
- Check if analysis has unknown parameters that need disambiguation.
359
-
360
- Args:
361
- analysis: Analysis result from LLM
362
-
363
- Returns:
364
- True if any required parameter is unknown or confidence is low
365
- """
366
- params = analysis.get("parameters", {})
367
- confidence = analysis.get("confidence", 0.0)
368
-
369
- # Check for explicit unknowns
370
- has_unknown_params = (
371
- params.get("language") == "unknown"
372
- or params.get("project_type") == "unknown"
373
- )
374
-
375
- # Check for low confidence (< 0.9 means LLM is guessing)
376
- low_confidence = confidence < 0.9
377
-
378
- return has_unknown_params or low_confidence
379
-
380
- def _generate_clarification_question(self, analysis: Dict[str, Any]) -> str:
381
- """
382
- Generate natural language clarification question based on unknowns.
383
-
384
- Args:
385
- analysis: Analysis result with unknowns
386
-
387
- Returns:
388
- Question string to ask user
389
- """
390
- params = analysis.get("parameters", {})
391
- language = params.get("language")
392
- project_type = params.get("project_type")
393
-
394
- if language == "unknown" and project_type == "unknown":
395
- return (
396
- "What kind of application would you like to build?\n"
397
- "(e.g., 'Next.js blog', 'Python CLI tool', 'Django API', 'React dashboard')"
398
- )
399
- elif language == "unknown":
400
- if project_type == "fullstack":
401
- return (
402
- "What language/framework would you like to use for your web application?\n"
403
- "(e.g., 'Next.js/TypeScript' for web apps, 'Django/Python' for APIs)"
404
- )
405
- elif project_type == "script":
406
- return (
407
- "What language would you like to use for your script?\n"
408
- "(e.g., 'Python', 'TypeScript/Node.js')"
409
- )
410
- else:
411
- return (
412
- "What language/framework would you like to use?\n"
413
- "(e.g., 'Next.js', 'Django', 'Python', 'TypeScript')"
414
- )
415
- elif project_type == "unknown":
416
- if language == "typescript":
417
- return (
418
- "What type of TypeScript project would you like to create?\n"
419
- "(e.g., 'web app' for Next.js full-stack, 'CLI tool' for Node.js script)"
420
- )
421
- else: # python
422
- return (
423
- "What type of Python project would you like to create?\n"
424
- "(e.g., 'REST API', 'web app', 'CLI tool', 'data analysis script')"
425
- )
426
-
427
- return "Please provide more details about your project."
428
-
429
- def _get_console(self):
430
- """Return the configured output handler or a default console."""
431
- if self.output_handler:
432
- return self.output_handler
433
-
434
- from gaia.agents.base.console import AgentConsole
435
-
436
- return AgentConsole()
437
-
438
- def _enforce_typescript_only(
439
- self, language: str, project_type: str, console
440
- ) -> tuple[str, str]:
441
- """Warn and normalize when routing to unsupported languages."""
442
- is_nextjs = language == "typescript" and project_type == "fullstack"
443
-
444
- if not is_nextjs:
445
- console.print_error(
446
- "Only TypeScript (Next.js) is currently supported. "
447
- "Please try a Next.js/TypeScript request."
448
- )
449
- raise SystemExit(1)
450
-
451
- return language, project_type
452
-
453
- def _default_unknown_language_to_typescript(
454
- self, analysis: Dict[str, Any]
455
- ) -> Dict[str, Any]:
456
- """Default unknown language/project type to TypeScript/Next.js."""
457
- params = analysis.get("parameters", {})
458
- language = params.get("language")
459
-
460
- if language != "unknown":
461
- return analysis
462
-
463
- console = self._get_console()
464
- console.print_info(
465
- "Defaulting to TypeScript (Next.js) because the language could not be determined."
466
- )
467
-
468
- params["language"] = "typescript"
469
- if params.get("project_type") == "unknown":
470
- params["project_type"] = "fullstack"
471
-
472
- analysis["parameters"] = params
473
- analysis["confidence"] = 1.0
474
- analysis["reasoning"] = (
475
- analysis.get("reasoning", "")
476
- + " Defaulted to TypeScript/Next.js due to unknown language."
477
- ).strip()
478
-
479
- return analysis
480
-
481
- def _create_agent(self, analysis: Dict[str, Any]) -> Agent:
482
- """
483
- Create configured agent based on analysis.
484
-
485
- Args:
486
- analysis: Resolved analysis with all parameters
487
-
488
- Returns:
489
- Configured agent instance
490
- """
491
- agent_type = analysis.get("agent", "code")
492
- params = analysis.get("parameters", {})
493
-
494
- if agent_type == "code":
495
- from gaia.agents.code.agent import CodeAgent
496
-
497
- language = params.get("language", "python")
498
- project_type = params.get("project_type", "script")
499
-
500
- logger.debug(
501
- f"Creating CodeAgent with language={language}, project_type={project_type}"
502
- )
503
-
504
- # Use passed output_handler or create AgentConsole (CLI default)
505
- console = self._get_console()
506
- language, project_type = self._enforce_typescript_only(
507
- language, project_type, console
508
- )
509
-
510
- # Print agent selected message
511
- console.print_agent_selected("CodeAgent", language, project_type)
512
-
513
- # Build agent kwargs, including output_handler if provided
514
- agent_init_kwargs = dict(self.agent_kwargs)
515
- if self.output_handler:
516
- agent_init_kwargs["output_handler"] = self.output_handler
517
-
518
- # Merge routing params with any additional kwargs
519
- return CodeAgent(
520
- language=language, project_type=project_type, **agent_init_kwargs
521
- )
522
- else:
523
- raise ValueError(f"Unknown agent type: {agent_type}")
524
-
525
- def _create_agent_with_defaults(self, analysis: Dict[str, Any]) -> Agent:
526
- """
527
- Create agent with default values for unknown parameters.
528
-
529
- Args:
530
- analysis: Analysis that may have unknowns
531
-
532
- Returns:
533
- Configured agent with defaults
534
- """
535
- params = analysis.get("parameters", {})
536
-
537
- # Determine language with defaults
538
- language = params.get("language")
539
- if language == "unknown":
540
- # Default to Python as the safest option
541
- language = "python"
542
- logger.info("Defaulting to Python for unknown language")
543
-
544
- # Determine project type with smart defaults based on language
545
- project_type = params.get("project_type")
546
- if project_type == "unknown":
547
- if language == "typescript":
548
- # TypeScript defaults to fullstack (Next.js)
549
- project_type = "fullstack"
550
- logger.info("Defaulting to fullstack (Next.js) for TypeScript")
551
- else:
552
- # Python defaults to script
553
- project_type = "script"
554
- logger.info("Defaulting to script for Python")
555
-
556
- from gaia.agents.code.agent import CodeAgent
557
-
558
- console = self._get_console()
559
- language, project_type = self._enforce_typescript_only(
560
- language, project_type, console
561
- )
562
-
563
- # Build agent kwargs, including output_handler if provided
564
- agent_init_kwargs = dict(self.agent_kwargs)
565
- if self.output_handler:
566
- agent_init_kwargs["output_handler"] = self.output_handler
567
-
568
- return CodeAgent(
569
- language=language, project_type=project_type, **agent_init_kwargs
570
- )
1
+ # Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
2
+ # SPDX-License-Identifier: MIT
3
+ """RoutingAgent - Intelligently routes requests and disambiguates parameters."""
4
+
5
+ import json
6
+ import os
7
+ from typing import Any, Dict, List, Optional
8
+
9
+ from gaia.agents.base.agent import Agent
10
+ from gaia.llm import create_client
11
+ from gaia.logger import get_logger
12
+
13
+ from .system_prompt import ROUTING_ANALYSIS_PROMPT
14
+
15
+ logger = get_logger(__name__)
16
+
17
+
18
+ class RoutingAgent:
19
+ """
20
+ Routes user requests to appropriate agents with intelligent disambiguation.
21
+
22
+ Currently handles Code agent routing. Future: Jira, Docker, etc.
23
+
24
+ Flow:
25
+ 1. Analyze query with LLM to detect agent and parameters
26
+ 2. If parameters unknown, ask user for clarification
27
+ 3. Recursively re-analyze with user's response as added context
28
+ 4. Once resolved, return configured agent ready to execute
29
+ """
30
+
31
+ def __init__(
32
+ self,
33
+ api_mode: bool = False,
34
+ output_handler=None,
35
+ **agent_kwargs,
36
+ ):
37
+ """Initialize routing agent with LLM client.
38
+
39
+ Args:
40
+ api_mode: If True, skip interactive questions and use defaults/best-guess.
41
+ If False (default), ask clarification questions via input().
42
+ output_handler: Optional OutputHandler for streaming events (passed to created agents).
43
+ If None (default), agents create their own AgentConsole.
44
+ **agent_kwargs: Additional kwargs to pass to created agents
45
+ """
46
+ # API mode settings
47
+ self.api_mode = api_mode
48
+ self.output_handler = output_handler
49
+
50
+ # Initialize LLM client for language detection
51
+ # Extract LLM-specific params from agent_kwargs
52
+ use_claude = agent_kwargs.get("use_claude", False)
53
+ use_chatgpt = agent_kwargs.get("use_chatgpt", False)
54
+
55
+ # Determine base_url: CLI arg > Environment > LLMClient default
56
+ base_url = agent_kwargs.get("base_url")
57
+ if base_url is None:
58
+ # Read from environment if not provided
59
+ base_url = os.getenv("LEMONADE_BASE_URL", "http://localhost:8000/api/v1")
60
+
61
+ # Initialize LLM client - factory auto-detects provider from flags
62
+ self.llm_client = create_client(
63
+ use_claude=use_claude, use_openai=use_chatgpt, base_url=base_url
64
+ )
65
+ self.agent_kwargs = agent_kwargs # Store for passing to created agents
66
+
67
+ # Model to use for routing analysis (configurable via env var)
68
+ self.routing_model = os.getenv(
69
+ "AGENT_ROUTING_MODEL", "Qwen3-Coder-30B-A3B-Instruct-GGUF"
70
+ )
71
+
72
+ def process_query(
73
+ self,
74
+ query: str,
75
+ conversation_history: Optional[List[Dict[str, str]]] = None,
76
+ execute: bool = None,
77
+ workspace_root: Optional[str] = None,
78
+ **kwargs,
79
+ ):
80
+ """
81
+ Process query with optional conversation history from disambiguation rounds.
82
+
83
+ Args:
84
+ query: Original user query
85
+ conversation_history: List of conversation turns [{"role": "user", "content": "..."}]
86
+ execute: If True, execute the routed agent and return result.
87
+ If False, return the agent instance (CLI behavior).
88
+ If None (default), uses api_mode (True for API, False for CLI).
89
+ workspace_root: Optional workspace directory for agent execution (API mode).
90
+ **kwargs: Additional kwargs passed to agent.process_query() when execute=True.
91
+
92
+ Returns:
93
+ If execute=False: Configured agent instance ready to execute
94
+ If execute=True: Execution result from agent.process_query()
95
+
96
+ Example (CLI mode - default):
97
+ router = RoutingAgent()
98
+ agent = router.process_query("Create Express API")
99
+ result = agent.process_query("Create Express API")
100
+
101
+ Example (API mode with execute):
102
+ router = RoutingAgent(api_mode=True, output_handler=sse_handler)
103
+ result = router.process_query("Create Express API") # auto-executes
104
+ """
105
+ # Default execute based on api_mode: API mode auto-executes, CLI returns agent
106
+ if execute is None:
107
+ execute = self.api_mode
108
+
109
+ if conversation_history is None:
110
+ conversation_history = []
111
+
112
+ # Add current query to conversation history if not already there
113
+ if not conversation_history or conversation_history[-1].get("content") != query:
114
+ conversation_history.append({"role": "user", "content": query})
115
+
116
+ logger.debug(
117
+ f"Routing analysis for: '{query}' (conversation turns: {len(conversation_history)})"
118
+ )
119
+
120
+ # Analyze with LLM using conversation history
121
+ analysis = self._analyze_with_llm(conversation_history)
122
+
123
+ logger.debug(f"Analysis result: {analysis}")
124
+
125
+ # If language could not be determined, default to TypeScript/Next.js
126
+ analysis = self._default_unknown_language_to_typescript(analysis)
127
+
128
+ # Check if we have all required parameters
129
+ if self._has_unknowns(analysis):
130
+ if self.api_mode:
131
+ # API mode: skip interactive questions, use defaults
132
+ logger.info("API mode: using defaults for unknown parameters")
133
+ agent = self._create_agent_with_defaults(analysis)
134
+ else:
135
+ # Interactive mode: ask user for clarification
136
+ question = self._generate_clarification_question(analysis)
137
+ print(f"\n{question}")
138
+ user_response = input("> ").strip()
139
+
140
+ if not user_response:
141
+ logger.warning("Empty user response, using defaults")
142
+ # Use defaults if user just hits enter
143
+ agent = self._create_agent_with_defaults(analysis)
144
+ else:
145
+ # Add assistant question and user response to conversation history
146
+ conversation_history.append(
147
+ {"role": "assistant", "content": question}
148
+ )
149
+ conversation_history.append(
150
+ {"role": "user", "content": user_response}
151
+ )
152
+
153
+ # Recursive call with enriched conversation history
154
+ return self.process_query(
155
+ query,
156
+ conversation_history,
157
+ execute=execute,
158
+ workspace_root=workspace_root,
159
+ **kwargs,
160
+ )
161
+ else:
162
+ # All parameters resolved, create agent
163
+ agent = self._create_agent(analysis)
164
+
165
+ # Execute if requested (API mode), otherwise return agent (CLI mode)
166
+ if execute:
167
+ return agent.process_query(query, workspace_root=workspace_root, **kwargs)
168
+ return agent
169
+
170
+ def _analyze_with_llm(
171
+ self, conversation_history: List[Dict[str, str]]
172
+ ) -> Dict[str, Any]:
173
+ """
174
+ Analyze query with LLM to determine agent and parameters.
175
+
176
+ Args:
177
+ conversation_history: Full conversation including clarifications
178
+
179
+ Returns:
180
+ Analysis dict with agent, parameters, confidence, reasoning
181
+ """
182
+ # Build context from conversation history
183
+ context_parts = []
184
+ for turn in conversation_history:
185
+ role = turn["role"]
186
+ content = turn["content"]
187
+ if role == "user":
188
+ context_parts.append(f"User: {content}")
189
+ elif role == "assistant":
190
+ context_parts.append(f"Assistant: {content}")
191
+
192
+ full_context = "\n".join(context_parts)
193
+
194
+ # Format prompt with full conversation context
195
+ analysis_prompt = f"""Analyze this conversation and determine the configuration parameters.
196
+
197
+ Conversation:
198
+ {full_context}
199
+
200
+ {ROUTING_ANALYSIS_PROMPT.split('User Request: "{query}"')[1]}"""
201
+
202
+ # Wrap in Qwen chat format
203
+ prompt = (
204
+ f"<|im_start|>user\n{analysis_prompt}<|im_end|>\n<|im_start|>assistant\n"
205
+ )
206
+
207
+ try:
208
+ response = self.llm_client.generate(
209
+ prompt=prompt,
210
+ model=self.routing_model,
211
+ max_tokens=500,
212
+ stop=["<|im_end|>", "<|im_start|>"],
213
+ stream=False,
214
+ )
215
+
216
+ # Extract JSON from response
217
+ response_text = response.strip()
218
+
219
+ # Handle potential markdown code blocks
220
+ if "```json" in response_text:
221
+ response_text = (
222
+ response_text.split("```json")[1].split("```")[0].strip()
223
+ )
224
+ elif "```" in response_text:
225
+ response_text = response_text.split("```")[1].split("```")[0].strip()
226
+
227
+ # Remove any leading/trailing whitespace and parse
228
+ result = json.loads(response_text)
229
+ return result
230
+
231
+ except json.JSONDecodeError as e:
232
+ logger.error(f"Failed to parse LLM response as JSON: {e}")
233
+ logger.error(f"Response was: {response_text}")
234
+ # Fallback to defaults
235
+ return {
236
+ "agent": "code",
237
+ "parameters": {"language": "unknown", "project_type": "unknown"},
238
+ "confidence": 0.0,
239
+ "reasoning": "JSON parse error, using defaults",
240
+ }
241
+ except Exception as e:
242
+ logger.error(f"Error analyzing query: {e}")
243
+ raise RuntimeError(f"Failed to analyze query with Lemonade: {e}") from e
244
+
245
+ def _fallback_keyword_detection(self, query: str) -> Dict[str, Any]:
246
+ """
247
+ Fallback keyword-based detection when LLM fails.
248
+
249
+ Args:
250
+ query: User query
251
+
252
+ Returns:
253
+ Analysis dict with detected language and project type
254
+ """
255
+ query_lower = query.lower()
256
+
257
+ # TypeScript/Node.js indicators
258
+ ts_keywords = [
259
+ "nextjs",
260
+ "next.js",
261
+ "express",
262
+ "nestjs",
263
+ "koa",
264
+ "fastify",
265
+ "mongodb",
266
+ "mongoose",
267
+ "node.js",
268
+ "nodejs",
269
+ "react",
270
+ "vue",
271
+ "angular",
272
+ "svelte",
273
+ "vite",
274
+ "typescript",
275
+ ]
276
+
277
+ # Python indicators
278
+ py_keywords = ["django", "flask", "fastapi", "pandas", "numpy", "python"]
279
+
280
+ # Detect language
281
+ has_ts = any(kw in query_lower for kw in ts_keywords)
282
+ has_py = any(kw in query_lower for kw in py_keywords)
283
+
284
+ if has_ts:
285
+ language = "typescript"
286
+ reasoning = f"Detected TypeScript keywords: {[kw for kw in ts_keywords if kw in query_lower]}"
287
+ elif has_py:
288
+ language = "python"
289
+ reasoning = f"Detected Python keywords: {[kw for kw in py_keywords if kw in query_lower]}"
290
+ else:
291
+ language = "unknown"
292
+ reasoning = "No framework keywords detected"
293
+
294
+ # Detect project type based on language
295
+ if language == "typescript":
296
+ # All TypeScript web apps use Next.js fullstack approach
297
+ if any(kw in query_lower for kw in ["cli", "tool", "script", "utility"]):
298
+ project_type = "script"
299
+ else:
300
+ # Default to fullstack for any web-related TypeScript project
301
+ project_type = "fullstack"
302
+ elif language == "python":
303
+ # Python project types
304
+ if any(
305
+ kw in query_lower
306
+ for kw in [
307
+ "api",
308
+ "rest",
309
+ "backend",
310
+ "server",
311
+ "fastapi",
312
+ "flask",
313
+ "django",
314
+ ]
315
+ ):
316
+ project_type = "api"
317
+ elif any(kw in query_lower for kw in ["web", "website", "dashboard"]):
318
+ project_type = "web"
319
+ elif any(
320
+ kw in query_lower
321
+ for kw in ["cli", "tool", "script", "utility", "calculator"]
322
+ ):
323
+ project_type = "script"
324
+ else:
325
+ project_type = "unknown"
326
+ else:
327
+ # Unknown language - try to detect project type from keywords
328
+ if any(
329
+ kw in query_lower
330
+ for kw in [
331
+ "api",
332
+ "rest",
333
+ "backend",
334
+ "web",
335
+ "app",
336
+ "dashboard",
337
+ "frontend",
338
+ ]
339
+ ):
340
+ project_type = "fullstack" # Assume web app
341
+ elif any(kw in query_lower for kw in ["cli", "tool", "script", "utility"]):
342
+ project_type = "script"
343
+ else:
344
+ project_type = "unknown"
345
+
346
+ return {
347
+ "agent": "code",
348
+ "parameters": {"language": language, "project_type": project_type},
349
+ "confidence": 0.8 if language != "unknown" else 0.3,
350
+ "reasoning": f"Keyword detection: {reasoning}",
351
+ }
352
+
353
+ def _has_unknowns(self, analysis: Dict[str, Any]) -> bool:
354
+ """
355
+ Check if analysis has unknown parameters that need disambiguation.
356
+
357
+ Args:
358
+ analysis: Analysis result from LLM
359
+
360
+ Returns:
361
+ True if any required parameter is unknown or confidence is low
362
+ """
363
+ params = analysis.get("parameters", {})
364
+ confidence = analysis.get("confidence", 0.0)
365
+
366
+ # Check for explicit unknowns
367
+ has_unknown_params = (
368
+ params.get("language") == "unknown"
369
+ or params.get("project_type") == "unknown"
370
+ )
371
+
372
+ # Check for low confidence (< 0.9 means LLM is guessing)
373
+ low_confidence = confidence < 0.9
374
+
375
+ return has_unknown_params or low_confidence
376
+
377
+ def _generate_clarification_question(self, analysis: Dict[str, Any]) -> str:
378
+ """
379
+ Generate natural language clarification question based on unknowns.
380
+
381
+ Args:
382
+ analysis: Analysis result with unknowns
383
+
384
+ Returns:
385
+ Question string to ask user
386
+ """
387
+ params = analysis.get("parameters", {})
388
+ language = params.get("language")
389
+ project_type = params.get("project_type")
390
+
391
+ if language == "unknown" and project_type == "unknown":
392
+ return (
393
+ "What kind of application would you like to build?\n"
394
+ "(e.g., 'Next.js blog', 'Python CLI tool', 'Django API', 'React dashboard')"
395
+ )
396
+ elif language == "unknown":
397
+ if project_type == "fullstack":
398
+ return (
399
+ "What language/framework would you like to use for your web application?\n"
400
+ "(e.g., 'Next.js/TypeScript' for web apps, 'Django/Python' for APIs)"
401
+ )
402
+ elif project_type == "script":
403
+ return (
404
+ "What language would you like to use for your script?\n"
405
+ "(e.g., 'Python', 'TypeScript/Node.js')"
406
+ )
407
+ else:
408
+ return (
409
+ "What language/framework would you like to use?\n"
410
+ "(e.g., 'Next.js', 'Django', 'Python', 'TypeScript')"
411
+ )
412
+ elif project_type == "unknown":
413
+ if language == "typescript":
414
+ return (
415
+ "What type of TypeScript project would you like to create?\n"
416
+ "(e.g., 'web app' for Next.js full-stack, 'CLI tool' for Node.js script)"
417
+ )
418
+ else: # python
419
+ return (
420
+ "What type of Python project would you like to create?\n"
421
+ "(e.g., 'REST API', 'web app', 'CLI tool', 'data analysis script')"
422
+ )
423
+
424
+ return "Please provide more details about your project."
425
+
426
+ def _get_console(self):
427
+ """Return the configured output handler or a default console."""
428
+ if self.output_handler:
429
+ return self.output_handler
430
+
431
+ from gaia.agents.base.console import AgentConsole
432
+
433
+ return AgentConsole()
434
+
435
+ def _enforce_typescript_only(
436
+ self, language: str, project_type: str, console
437
+ ) -> tuple[str, str]:
438
+ """Warn and normalize when routing to unsupported languages."""
439
+ is_nextjs = language == "typescript" and project_type == "fullstack"
440
+
441
+ if not is_nextjs:
442
+ console.print_error(
443
+ "Only TypeScript (Next.js) is currently supported. "
444
+ "Please try a Next.js/TypeScript request."
445
+ )
446
+ raise SystemExit(1)
447
+
448
+ return language, project_type
449
+
450
+ def _default_unknown_language_to_typescript(
451
+ self, analysis: Dict[str, Any]
452
+ ) -> Dict[str, Any]:
453
+ """Default unknown language/project type to TypeScript/Next.js."""
454
+ params = analysis.get("parameters", {})
455
+ language = params.get("language")
456
+
457
+ if language != "unknown":
458
+ return analysis
459
+
460
+ console = self._get_console()
461
+ console.print_info(
462
+ "Defaulting to TypeScript (Next.js) because the language could not be determined."
463
+ )
464
+
465
+ params["language"] = "typescript"
466
+ if params.get("project_type") == "unknown":
467
+ params["project_type"] = "fullstack"
468
+
469
+ analysis["parameters"] = params
470
+ analysis["confidence"] = 1.0
471
+ analysis["reasoning"] = (
472
+ analysis.get("reasoning", "")
473
+ + " Defaulted to TypeScript/Next.js due to unknown language."
474
+ ).strip()
475
+
476
+ return analysis
477
+
478
+ def _create_agent(self, analysis: Dict[str, Any]) -> Agent:
479
+ """
480
+ Create configured agent based on analysis.
481
+
482
+ Args:
483
+ analysis: Resolved analysis with all parameters
484
+
485
+ Returns:
486
+ Configured agent instance
487
+ """
488
+ agent_type = analysis.get("agent", "code")
489
+ params = analysis.get("parameters", {})
490
+
491
+ if agent_type == "code":
492
+ from gaia.agents.code.agent import CodeAgent
493
+
494
+ language = params.get("language", "python")
495
+ project_type = params.get("project_type", "script")
496
+
497
+ logger.debug(
498
+ f"Creating CodeAgent with language={language}, project_type={project_type}"
499
+ )
500
+
501
+ # Use passed output_handler or create AgentConsole (CLI default)
502
+ console = self._get_console()
503
+ language, project_type = self._enforce_typescript_only(
504
+ language, project_type, console
505
+ )
506
+
507
+ # Print agent selected message
508
+ console.print_agent_selected("CodeAgent", language, project_type)
509
+
510
+ # Build agent kwargs, including output_handler if provided
511
+ agent_init_kwargs = dict(self.agent_kwargs)
512
+ if self.output_handler:
513
+ agent_init_kwargs["output_handler"] = self.output_handler
514
+
515
+ # Merge routing params with any additional kwargs
516
+ return CodeAgent(
517
+ language=language, project_type=project_type, **agent_init_kwargs
518
+ )
519
+ else:
520
+ raise ValueError(f"Unknown agent type: {agent_type}")
521
+
522
+ def _create_agent_with_defaults(self, analysis: Dict[str, Any]) -> Agent:
523
+ """
524
+ Create agent with default values for unknown parameters.
525
+
526
+ Args:
527
+ analysis: Analysis that may have unknowns
528
+
529
+ Returns:
530
+ Configured agent with defaults
531
+ """
532
+ params = analysis.get("parameters", {})
533
+
534
+ # Determine language with defaults
535
+ language = params.get("language")
536
+ if language == "unknown":
537
+ # Default to Python as the safest option
538
+ language = "python"
539
+ logger.info("Defaulting to Python for unknown language")
540
+
541
+ # Determine project type with smart defaults based on language
542
+ project_type = params.get("project_type")
543
+ if project_type == "unknown":
544
+ if language == "typescript":
545
+ # TypeScript defaults to fullstack (Next.js)
546
+ project_type = "fullstack"
547
+ logger.info("Defaulting to fullstack (Next.js) for TypeScript")
548
+ else:
549
+ # Python defaults to script
550
+ project_type = "script"
551
+ logger.info("Defaulting to script for Python")
552
+
553
+ from gaia.agents.code.agent import CodeAgent
554
+
555
+ console = self._get_console()
556
+ language, project_type = self._enforce_typescript_only(
557
+ language, project_type, console
558
+ )
559
+
560
+ # Build agent kwargs, including output_handler if provided
561
+ agent_init_kwargs = dict(self.agent_kwargs)
562
+ if self.output_handler:
563
+ agent_init_kwargs["output_handler"] = self.output_handler
564
+
565
+ return CodeAgent(
566
+ language=language, project_type=project_type, **agent_init_kwargs
567
+ )