amd-gaia 0.15.2__py3-none-any.whl → 0.15.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: amd-gaia
3
- Version: 0.15.2
3
+ Version: 0.15.3
4
4
  Summary: GAIA is a lightweight agent framework designed for the edge and AI PCs.
5
5
  Home-page: https://github.com/amd/gaia
6
6
  Author: AMD
@@ -24,6 +24,7 @@ Requires-Dist: accelerate
24
24
  Requires-Dist: python-dotenv
25
25
  Requires-Dist: aiohttp
26
26
  Requires-Dist: rich
27
+ Requires-Dist: requests
27
28
  Requires-Dist: watchdog>=2.1.0
28
29
  Requires-Dist: pillow>=9.0.0
29
30
  Provides-Extra: api
@@ -1,16 +1,16 @@
1
- amd_gaia-0.15.2.dist-info/licenses/LICENSE.md,sha256=yIF_ozJFJ83xmRHBrFVeGoc12JVoCphi1cy07CsfroQ,1083
1
+ amd_gaia-0.15.3.dist-info/licenses/LICENSE.md,sha256=yIF_ozJFJ83xmRHBrFVeGoc12JVoCphi1cy07CsfroQ,1083
2
2
  gaia/__init__.py,sha256=4SNDd1YBqgMNkH71EY3x81E_CSM4gc3Wl7GClCl1KTI,843
3
- gaia/cli.py,sha256=sjgL2Ch7Zs0gSclHCdDfuw-lFHRaIzI0-fQ-aWGVYKQ,220283
3
+ gaia/cli.py,sha256=XVeH4gPwNKtZFiB1BRzXWMdPbBMX-I9SQExhHGVhmCQ,229473
4
4
  gaia/logger.py,sha256=lrE9P2uL97TO35rfRyNMOdjo_BcNlvlym2N18B4sT7Y,6560
5
5
  gaia/perf_analysis.py,sha256=RJjv8d52zkbZzPlv7lLLwtQS7qiQY7VX-g-W0skcdyU,11070
6
6
  gaia/security.py,sha256=O2XKjAEGkjleWN-MKQPXgk6C4k_n-K4X2OYAdm6YtDA,6816
7
7
  gaia/util.py,sha256=QYOjlm4dEJePwptjMJOkVGYXbw7wAee4o4P1KOUKicY,1569
8
- gaia/version.py,sha256=WRkdcNEUd-2Zi922mMofuQyPsGTDnxUCP8Q-YNQWf10,2840
8
+ gaia/version.py,sha256=wXv7VMa67cVUCUNBTglC1chdHs0T6hx5h4ACCNNcSvo,2840
9
9
  gaia/agents/__init__.py,sha256=-xX8KhEOt3OglQnHJh_fh2WlRibvnNptOsRMzpkUb-c,471
10
10
  gaia/agents/base/__init__.py,sha256=rgZ1-KKULJpy337UWHaEyU-SPTmqbUHTjBuaiAI87I8,364
11
- gaia/agents/base/agent.py,sha256=q2UrK-Fa16EC5xASK-F2eJtsG5RJp2f8nbSZM3Avav0,94355
11
+ gaia/agents/base/agent.py,sha256=_aEQXgMiEev2PbFKNM_JPprdfLJVHuN1arhUxL09tx0,104720
12
12
  gaia/agents/base/api_agent.py,sha256=vjFGufBikYcgigjWPxNP-IGJl3joyHsT43mr7uOXs4I,3906
13
- gaia/agents/base/console.py,sha256=Y-pTmzy7ap3EvoyjhVFfvb5xqAhrXeOaxPlTS_yOXEM,69801
13
+ gaia/agents/base/console.py,sha256=efBfN9DTjVZkBzqOdAeuyxBXYArrEUyq-4ir70jRfxs,78183
14
14
  gaia/agents/base/errors.py,sha256=UQs9Zt-Rhl480d5h3qoAzC8UKZsdy-gQGzrDlE73DFA,7011
15
15
  gaia/agents/base/mcp_agent.py,sha256=j53OIpwvn-bGOnq5UxlctLcX65mjaxKPziveWRiGalw,2352
16
16
  gaia/agents/base/tools.py,sha256=zghYt8bVCiwEz-zcrsQpNom3e_JMoY4X1VgP_siYywU,2796
@@ -99,6 +99,8 @@ gaia/agents/routing/system_prompt.py,sha256=XOuQRYvsh4GDLwUcSoutf2Zz-YwTq3g33GVu
99
99
  gaia/agents/summarize/__init__.py,sha256=hZgWtAdKKiyHjc1sy0HwAiwJeQFyKd_Mo5cHC7Pzdrc,247
100
100
  gaia/agents/summarize/agent.py,sha256=xoe-6Bb2QeaDWA11GLkfLutaMYnx3ucDMpwJCxAFzdU,37679
101
101
  gaia/agents/summarize/prompts.py,sha256=Is-WrNnbl86acOCh_Y_eYhfY6PU68_MUhO1k0AUaEtY,6266
102
+ gaia/agents/tools/__init__.py,sha256=Lmo8XNPbEBcBMudi1sWbPdRMkhl8huFZwefvseA3qNQ,302
103
+ gaia/agents/tools/file_tools.py,sha256=vMEhwjb_c1u-SNOUK5Ac_lXTKd_Bo70Ff2UwGjXQriI,29118
102
104
  gaia/api/__init__.py,sha256=BGWiO7Hg75lgf5cjujA4QIl65L-gbpR7WYiRlQI7f6A,641
103
105
  gaia/api/agent_registry.py,sha256=tmYZOE7KTn1UdXtEGSTLtmBmMsQMVlhVgX-0m--UrHY,7997
104
106
  gaia/api/app.py,sha256=63xtdFjHxQczzl9D5f6zme9chORtj8wD3-Zdcbg3hE8,10081
@@ -140,18 +142,18 @@ gaia/eval/webapp/public/app.js,sha256=-qqTCqIXLJ2JKBMcXnNPM04XBD-ioX_4JzR50Gr3qu
140
142
  gaia/eval/webapp/public/index.html,sha256=qfo97RArd77bSMAWQrNnUVp7_To8e6XCXV3iHqNmw1E,4738
141
143
  gaia/eval/webapp/public/styles.css,sha256=toAtkMtfYB16ZApZXTQQ6i7IYC2S9CahUT6oj1a0B0Y,71381
142
144
  gaia/installer/__init__.py,sha256=NvQ-XNJsAnjwfyF2YwkerNcztTMDMxKXf_9NGoNYQdY,503
143
- gaia/installer/init_command.py,sha256=vx9PMct5Uxs_6PKvsN4GbsqE_AmPStJHenk72ka43VE,47854
144
- gaia/installer/lemonade_installer.py,sha256=DL8ziWnXubbj-5Tc7eBuEO_Sfa7t1Y7cLq0qUKqxXYg,21611
145
+ gaia/installer/init_command.py,sha256=UDxHMR3wb6TQI-C_pPQHBFayPD4TomTV09l17igvSVc,63506
146
+ gaia/installer/lemonade_installer.py,sha256=sxG1mMqTQYZs-9bMFPN4uwDZPMeGpvzo1W-PvgByj_U,24393
145
147
  gaia/llm/__init__.py,sha256=fdu07-f682NvxLR-wgwaY5s0L-ZEphXD0sfUp-LAQn8,355
146
148
  gaia/llm/base_client.py,sha256=g9JXXWslQLdwzeURnddmZzj7u27VMu2nILm5OSCA05Q,1701
147
149
  gaia/llm/exceptions.py,sha256=aQ7sk_KtwW5MlJP6HFFnnKDhevNfsNIvYOTF8UUJnaQ,413
148
150
  gaia/llm/factory.py,sha256=IkbCHCIbLADWuU1_HoooJTDhr0xu_mDQVzhxtc-S9tY,2508
149
- gaia/llm/lemonade_client.py,sha256=pv-736R1Qpodfa2XLLJQesV4I7WeEu69SYjrMke7P_w,126772
150
- gaia/llm/lemonade_manager.py,sha256=R93qqmsuYqSf4gLG0R1BTTkZxu9WCLmKCHLvzu1K_yE,11017
151
+ gaia/llm/lemonade_client.py,sha256=Zm1O4iyruLv_sZ9lg1RU-K0c6No5hC1rWhc5lg7dIGg,132213
152
+ gaia/llm/lemonade_manager.py,sha256=gAetsv8xafBZYsEoH6yS-zvieUj02dSdzzsnFta003I,13276
151
153
  gaia/llm/vlm_client.py,sha256=D4bV-XmO2-Lu34qFGqP5l0zN6p0x85to3aJRGQ4SA0A,13247
152
154
  gaia/llm/providers/__init__.py,sha256=h2qbDWhOfE9G6YHgLMYzTw5bv1L63cSGuwEzML6iJU8,329
153
155
  gaia/llm/providers/claude.py,sha256=r22rb8sZR-wt-LzNEDU49drftPOn5ZY1MrftO65_78M,3262
154
- gaia/llm/providers/lemonade.py,sha256=BbeSzNzqRnEiUn9P55TYUf9kuGN_NL1ib9XO9pPH5Sw,4004
156
+ gaia/llm/providers/lemonade.py,sha256=iA9CaZo09uMMJgac8xFq1_oDEhf0YSiIy0QJl9TWFrI,4398
155
157
  gaia/llm/providers/openai_provider.py,sha256=eU1NRtSPCfW0wKiK2ePcWNczvwt1E4koD9aL_D6cwc0,2502
156
158
  gaia/mcp/agent_mcp_server.py,sha256=YU_BgRRK0VPaJJgO1CgFodFuYrO6XoRCo3cfltpTlQY,10152
157
159
  gaia/mcp/blender_mcp_client.py,sha256=M5aI5esMPFqVrxYE0m7_xPAI-y8eTMgorlcflfyMM44,4967
@@ -175,8 +177,8 @@ gaia/testing/mocks.py,sha256=ojqkCYXBEAiMqiv6icfCVKOL6fw0RIc-Cv67uq08jNI,13895
175
177
  gaia/utils/__init__.py,sha256=HnxwoKZvPoYWcSdf-iu5rYzwySkpuqSAwyFSFIHjBNI,733
176
178
  gaia/utils/file_watcher.py,sha256=ZTlPvAmrSUtm42he29YCHfy3YByg03C4tFTPWM4ZsEc,22388
177
179
  gaia/utils/parsing.py,sha256=PZ0aizm-pVox2pAqZA8Xc-k10c-bAXh71C5RkbyniyA,6695
178
- amd_gaia-0.15.2.dist-info/METADATA,sha256=N7qMt4xbXkMCfIXQ25E6KTELtE8Rwk7SDpJnk7TffMA,8463
179
- amd_gaia-0.15.2.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
180
- amd_gaia-0.15.2.dist-info/entry_points.txt,sha256=nXsKQ1xgHlGqfZgBksSaEOL0TA8zV7lXJaxVSlgsygE,223
181
- amd_gaia-0.15.2.dist-info/top_level.txt,sha256=79yz2gLmqK4bFQkuN8_cnHVY-lobuVT0Ww4YoO6OFvU,5
182
- amd_gaia-0.15.2.dist-info/RECORD,,
180
+ amd_gaia-0.15.3.dist-info/METADATA,sha256=acP0sK3j-2ediOZ1kgr-hPi3vR4ULZfIRqsGqgD0Eg4,8487
181
+ amd_gaia-0.15.3.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
182
+ amd_gaia-0.15.3.dist-info/entry_points.txt,sha256=nXsKQ1xgHlGqfZgBksSaEOL0TA8zV7lXJaxVSlgsygE,223
183
+ amd_gaia-0.15.3.dist-info/top_level.txt,sha256=79yz2gLmqK4bFQkuN8_cnHVY-lobuVT0Ww4YoO6OFvU,5
184
+ amd_gaia-0.15.3.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.10.1)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
gaia/agents/base/agent.py CHANGED
@@ -68,7 +68,7 @@ class Agent(abc.ABC):
68
68
  claude_model: str = "claude-sonnet-4-20250514",
69
69
  base_url: Optional[str] = None,
70
70
  model_id: str = None,
71
- max_steps: int = 5,
71
+ max_steps: int = 20,
72
72
  debug_prompts: bool = False,
73
73
  show_prompts: bool = False,
74
74
  output_dir: str = None,
@@ -78,6 +78,7 @@ class Agent(abc.ABC):
78
78
  debug: bool = False,
79
79
  output_handler=None,
80
80
  max_plan_iterations: int = 3,
81
+ max_consecutive_repeats: int = 4,
81
82
  min_context_size: int = 32768,
82
83
  skip_lemonade: bool = False,
83
84
  ):
@@ -100,6 +101,7 @@ class Agent(abc.ABC):
100
101
  debug: If True, enables debug output for troubleshooting (default: False)
101
102
  output_handler: Custom OutputHandler for displaying agent output (default: None, creates console based on silent_mode)
102
103
  max_plan_iterations: Maximum number of plan-execute-replan cycles (default: 3, 0 = unlimited)
104
+ max_consecutive_repeats: Maximum consecutive identical tool calls before stopping (default: 4)
103
105
  min_context_size: Minimum context size required for this agent (default: 32768).
104
106
  skip_lemonade: If True, skip Lemonade server initialization (default: False).
105
107
  Use this when connecting to a different OpenAI-compatible backend.
@@ -120,6 +122,7 @@ class Agent(abc.ABC):
120
122
  self.debug = debug
121
123
  self.last_result = None # Store the most recent result
122
124
  self.max_plan_iterations = max_plan_iterations
125
+ self.max_consecutive_repeats = max_consecutive_repeats
123
126
  self._current_query: Optional[str] = (
124
127
  None # Store current query for error context
125
128
  )
@@ -154,17 +157,17 @@ class Agent(abc.ABC):
154
157
  self.console = self._create_console()
155
158
 
156
159
  # Initialize LLM client for local model
157
- self.system_prompt = self._get_system_prompt()
160
+ # Note: System prompt will be composed after _register_tools()
161
+ # This allows mixins to be initialized first (in subclass __init__)
158
162
 
159
163
  # Register tools for this agent
160
164
  self._register_tools()
161
165
 
162
- # Update system prompt with available tools and response format
163
- tools_description = self._format_tools_for_prompt()
164
- self.system_prompt += f"\n\n==== AVAILABLE TOOLS ====\n{tools_description}\n"
166
+ # Note: system_prompt is now a lazy @property that composes on first access
167
+ # Tool descriptions and response format are added in _compose_system_prompt()
165
168
 
166
- # Add JSON response format instructions (shared across all agents)
167
- self.system_prompt += """
169
+ # Store response format template for use in composition
170
+ self._response_format_template = """
168
171
  ==== RESPONSE FORMAT ====
169
172
  You must respond ONLY in valid JSON. No text before { or after }.
170
173
 
@@ -220,13 +223,127 @@ You must respond ONLY in valid JSON. No text before { or after }.
220
223
  if self.show_prompts:
221
224
  self.console.print_prompt(self.system_prompt, "Initial System Prompt")
222
225
 
223
- @abc.abstractmethod
226
+ def _get_mixin_prompts(self) -> list[str]:
227
+ """
228
+ Auto-collect system prompt fragments from inherited mixins.
229
+
230
+ Checks for mixin methods following the pattern: get_*_system_prompt()
231
+ Override this method to modify, reorder, or filter mixin prompts.
232
+
233
+ Returns:
234
+ List of prompt fragments from mixins (empty list if no mixins provide prompts)
235
+
236
+ Example:
237
+ def _get_mixin_prompts(self) -> list[str]:
238
+ prompts = super()._get_mixin_prompts()
239
+ # Modify SD prompt
240
+ if prompts:
241
+ prompts[0] = prompts[0].replace("whimsical", "serious")
242
+ return prompts
243
+ """
244
+ prompts = []
245
+
246
+ # Check for SD mixin prompts
247
+ if hasattr(self, "get_sd_system_prompt"):
248
+ fragment = self.get_sd_system_prompt()
249
+ if fragment:
250
+ prompts.append(fragment)
251
+
252
+ # Check for VLM mixin prompts
253
+ if hasattr(self, "get_vlm_system_prompt"):
254
+ fragment = self.get_vlm_system_prompt()
255
+ if fragment:
256
+ prompts.append(fragment)
257
+
258
+ # Add more mixin checks here as new prompt-providing mixins are created
259
+
260
+ return prompts
261
+
262
+ def _compose_system_prompt(self) -> str:
263
+ """
264
+ Compose final system prompt from mixin fragments + agent custom + tools + format.
265
+
266
+ Override this method for complete control over prompt composition order.
267
+
268
+ Returns:
269
+ Composed system prompt string
270
+
271
+ Example:
272
+ def _compose_system_prompt(self) -> str:
273
+ # Custom composition order
274
+ parts = [
275
+ "Base instructions first",
276
+ *self._get_mixin_prompts(),
277
+ self._get_system_prompt(),
278
+ ]
279
+ return "\n\n".join(p for p in parts if p)
280
+ """
281
+ parts = []
282
+
283
+ # Add mixin prompts first
284
+ parts.extend(self._get_mixin_prompts())
285
+
286
+ # Add agent-specific prompt
287
+ custom = self._get_system_prompt()
288
+ if custom:
289
+ parts.append(custom)
290
+
291
+ # Add tool descriptions (if tools registered)
292
+ if hasattr(self, "_format_tools_for_prompt"):
293
+ tools_description = self._format_tools_for_prompt()
294
+ if tools_description:
295
+ parts.append(f"==== AVAILABLE TOOLS ====\n{tools_description}")
296
+
297
+ # Add response format (if template set)
298
+ if hasattr(self, "_response_format_template"):
299
+ parts.append(self._response_format_template)
300
+
301
+ return "\n\n".join(p for p in parts if p)
302
+
303
+ @property
304
+ def system_prompt(self) -> str:
305
+ """
306
+ Lazy-loaded system prompt composed from mixins + agent custom.
307
+
308
+ Computed on first access to allow mixins to initialize in subclass __init__.
309
+
310
+ To see the prompt for debugging:
311
+ print(agent.system_prompt)
312
+ """
313
+ if not hasattr(self, "_system_prompt_cache"):
314
+ self._system_prompt_cache = self._compose_system_prompt()
315
+ return self._system_prompt_cache
316
+
317
+ @system_prompt.setter
318
+ def system_prompt(self, value: str):
319
+ """Allow setting system prompt (used when appending tool descriptions)."""
320
+ self._system_prompt_cache = value
321
+
224
322
  def _get_system_prompt(self) -> str:
225
323
  """
226
- Generate the system prompt for the agent.
227
- Subclasses must implement this to provide domain-specific prompts.
324
+ Return agent-specific system prompt additions.
325
+
326
+ Default implementation returns empty string (use only mixin prompts).
327
+ Override this method to add custom instructions.
328
+
329
+ When using mixins that provide prompts (e.g., SDToolsMixin):
330
+ - Return "" to use only mixin prompts (default behavior)
331
+ - Return custom instructions to append to mixin prompts
332
+ - Override _compose_system_prompt() for full control over composition
333
+
334
+ Returns:
335
+ Agent-specific system prompt (empty string by default)
336
+
337
+ Example:
338
+ # Use only mixin prompts (default)
339
+ def _get_system_prompt(self) -> str:
340
+ return ""
341
+
342
+ # Add custom instructions
343
+ def _get_system_prompt(self) -> str:
344
+ return "Always save metadata to logs"
228
345
  """
229
- raise NotImplementedError("Subclasses must implement _get_system_prompt")
346
+ return "" # Default: use only mixin prompts
230
347
 
231
348
  def _create_console(self):
232
349
  """
@@ -747,6 +864,106 @@ You must respond ONLY in valid JSON. No text before { or after }.
747
864
  # Valid conversational response - wrap it in expected format
748
865
  return {"thought": "", "goal": "", "answer": response.strip()}
749
866
 
867
+ def _resolve_plan_parameters(
868
+ self, tool_args: Any, step_results: List[Dict[str, Any]], _depth: int = 0
869
+ ) -> Any:
870
+ """
871
+ Recursively resolve placeholder references in tool arguments from previous step results.
872
+
873
+ Supports dynamic parameter substitution in multi-step plans:
874
+ - $PREV.field - Get field from previous step result
875
+ - $STEP_0.field - Get field from specific step result (0-indexed)
876
+
877
+ Args:
878
+ tool_args: Tool arguments that may contain placeholders
879
+ step_results: List of results from previously executed steps
880
+ _depth: Internal recursion depth counter (max 50 levels)
881
+
882
+ Returns:
883
+ Tool arguments with placeholders resolved to actual values
884
+
885
+ Examples:
886
+ >>> step_results = [{"image_path": "/path/to/img.png", "status": "success"}]
887
+ >>> tool_args = {"image_path": "$PREV.image_path", "style": "dramatic"}
888
+ >>> resolved = agent._resolve_plan_parameters(tool_args, step_results)
889
+ >>> resolved
890
+ {"image_path": "/path/to/img.png", "style": "dramatic"}
891
+
892
+ Backward Compatibility:
893
+ - If no placeholders exist, returns original tool_args unchanged
894
+ - If placeholder references invalid step/field, returns placeholder string unchanged
895
+
896
+ Limitations:
897
+ - Field names cannot contain dots (e.g., $PREV.user.name not supported - use $PREV.user_name)
898
+ - Maximum nesting depth of 50 levels to prevent stack overflow
899
+ - No type checking - resolved values are used as-is (tools should validate inputs)
900
+ """
901
+ # Prevent stack overflow from deeply nested structures
902
+ MAX_DEPTH = 50
903
+ if _depth > MAX_DEPTH:
904
+ logger.warning(
905
+ f"Maximum recursion depth ({MAX_DEPTH}) exceeded in parameter resolution, returning unchanged"
906
+ )
907
+ return tool_args
908
+
909
+ # Handle dict: recursively resolve each value
910
+ if isinstance(tool_args, dict):
911
+ return {
912
+ k: self._resolve_plan_parameters(v, step_results, _depth + 1)
913
+ for k, v in tool_args.items()
914
+ }
915
+
916
+ # Handle list: recursively resolve each item
917
+ elif isinstance(tool_args, list):
918
+ return [
919
+ self._resolve_plan_parameters(item, step_results, _depth + 1)
920
+ for item in tool_args
921
+ ]
922
+
923
+ # Handle string: check for placeholder patterns
924
+ elif isinstance(tool_args, str):
925
+ # Handle $PREV.field - get field from previous step
926
+ if tool_args.startswith("$PREV.") and step_results:
927
+ field = tool_args[6:] # Strip "$PREV."
928
+ prev_result = step_results[-1]
929
+ if isinstance(prev_result, dict) and field in prev_result:
930
+ resolved = prev_result[field]
931
+ logger.debug(
932
+ f"Resolved {tool_args} -> {resolved} from previous step result"
933
+ )
934
+ return resolved
935
+ else:
936
+ logger.warning(
937
+ f"Could not resolve {tool_args}: field '{field}' not found in previous result"
938
+ )
939
+ return tool_args # Return unchanged if field not found
940
+
941
+ # Handle $STEP_N.field - get field from specific step
942
+ match = re.match(r"\$STEP_(\d+)\.(.+)", tool_args)
943
+ if match and step_results:
944
+ step_idx = int(match.group(1))
945
+ field = match.group(2)
946
+ if 0 <= step_idx < len(step_results):
947
+ step_result = step_results[step_idx]
948
+ if isinstance(step_result, dict) and field in step_result:
949
+ resolved = step_result[field]
950
+ logger.debug(
951
+ f"Resolved {tool_args} -> {resolved} from step {step_idx} result"
952
+ )
953
+ return resolved
954
+ else:
955
+ logger.warning(
956
+ f"Could not resolve {tool_args}: field '{field}' not found in step {step_idx} result"
957
+ )
958
+ else:
959
+ logger.warning(
960
+ f"Could not resolve {tool_args}: step {step_idx} out of range (0-{len(step_results)-1})"
961
+ )
962
+ return tool_args # Return unchanged if reference invalid
963
+
964
+ # For all other types (int, float, bool, None), return unchanged
965
+ return tool_args
966
+
750
967
  def _execute_tool(self, tool_name: str, tool_args: Dict[str, Any]) -> Any:
751
968
  """
752
969
  Execute a tool by name with the provided arguments.
@@ -1137,9 +1354,10 @@ You must respond ONLY in valid JSON. No text before { or after }.
1137
1354
  steps_taken = 0
1138
1355
  final_answer = None
1139
1356
  error_count = 0
1140
- last_tool_call = None # Track the last tool call to prevent loops
1357
+ tool_call_history = [] # Track recent tool calls to detect loops (last 5 calls)
1141
1358
  last_error = None # Track the last error to handle it properly
1142
- previous_outputs = [] # Track previous tool outputs
1359
+ previous_outputs = [] # Track previous tool outputs (truncated for context)
1360
+ step_results = [] # Track full tool results for parameter substitution
1143
1361
 
1144
1362
  # Reset state management
1145
1363
  self.execution_state = self.STATE_PLANNING
@@ -1156,7 +1374,7 @@ You must respond ONLY in valid JSON. No text before { or after }.
1156
1374
  steps_limit = max_steps if max_steps is not None else self.max_steps
1157
1375
 
1158
1376
  # Print initial message with max steps info
1159
- self.console.print_processing_start(user_input, steps_limit)
1377
+ self.console.print_processing_start(user_input, steps_limit, self.model_id)
1160
1378
  logger.debug(f"Using max_steps: {steps_limit}")
1161
1379
 
1162
1380
  prompt = f"User request: {user_input}\n\n"
@@ -1214,6 +1432,9 @@ You must respond ONLY in valid JSON. No text before { or after }.
1214
1432
  tool_name = next_step["tool"]
1215
1433
  tool_args = next_step["tool_args"]
1216
1434
 
1435
+ # Resolve dynamic parameters from previous step results
1436
+ tool_args = self._resolve_plan_parameters(tool_args, step_results)
1437
+
1217
1438
  # Create a parsed response structure as if it came from the LLM
1218
1439
  parsed = {
1219
1440
  "thought": f"Executing step {self.current_step + 1} of the plan",
@@ -1265,6 +1486,9 @@ You must respond ONLY in valid JSON. No text before { or after }.
1265
1486
  }
1266
1487
  )
1267
1488
 
1489
+ # Store full result for parameter substitution in subsequent plan steps
1490
+ step_results.append(tool_result)
1491
+
1268
1492
  # Share tool output with subsequent LLM calls
1269
1493
  messages.append(
1270
1494
  self._create_tool_message(tool_name, truncated_result)
@@ -1426,9 +1650,14 @@ You must respond ONLY in valid JSON. No text before { or after }.
1426
1650
  )
1427
1651
 
1428
1652
  # Create a specific error recovery prompt
1653
+ last_tool = (
1654
+ tool_call_history[-1][0]
1655
+ if tool_call_history
1656
+ else "unknown tool"
1657
+ )
1429
1658
  prompt = (
1430
1659
  "TOOL EXECUTION FAILED!\n\n"
1431
- f"You were trying to execute: {last_tool_call[0] if last_tool_call else 'unknown tool'}\n"
1660
+ f"You were trying to execute: {last_tool}\n"
1432
1661
  f"Error: {last_error}\n\n"
1433
1662
  f"Original task: {user_input}\n\n"
1434
1663
  f"Current plan step {self.current_step + 1}/{self.total_plan_steps} failed.\n"
@@ -1450,6 +1679,7 @@ You must respond ONLY in valid JSON. No text before { or after }.
1450
1679
  self.current_plan = None
1451
1680
  self.current_step = 0
1452
1681
  self.total_plan_steps = 0
1682
+ step_results.clear() # Clear stale results from failed plan
1453
1683
 
1454
1684
  elif self.execution_state == self.STATE_COMPLETION:
1455
1685
  self.console.print_state_info("COMPLETION: Finalizing response")
@@ -1789,8 +2019,15 @@ You must respond ONLY in valid JSON. No text before { or after }.
1789
2019
  for i, step in enumerate(parsed["plan"]):
1790
2020
  if not isinstance(step, dict):
1791
2021
  invalid_steps.append((i, type(step).__name__, step))
1792
- elif "tool" not in step or "tool_args" not in step:
1793
- invalid_steps.append((i, "missing fields", step))
2022
+ elif "tool" not in step:
2023
+ invalid_steps.append((i, "missing tool field", step))
2024
+ elif "tool_args" not in step:
2025
+ # Auto-add empty tool_args for convenience
2026
+ # LLMs sometimes omit this for tools with all optional parameters
2027
+ step["tool_args"] = {}
2028
+ logger.debug(
2029
+ f"Auto-added empty tool_args for step {i+1}: {step['tool']}"
2030
+ )
1794
2031
 
1795
2032
  if invalid_steps:
1796
2033
  logger.error(f"Invalid plan steps found: {invalid_steps}")
@@ -1865,12 +2102,27 @@ You must respond ONLY in valid JSON. No text before { or after }.
1865
2102
  # Start progress indicator for tool execution
1866
2103
  self.console.start_progress(f"Executing {tool_name}")
1867
2104
 
1868
- # Check for repeated tool calls
1869
- if last_tool_call == (tool_name, str(tool_args)):
2105
+ # Check for repeated tool calls (allow up to 3 identical calls)
2106
+ current_call = (tool_name, str(tool_args))
2107
+ tool_call_history.append(current_call)
2108
+
2109
+ # Keep only last 5 calls for loop detection
2110
+ if len(tool_call_history) > 5:
2111
+ tool_call_history.pop(0)
2112
+
2113
+ # Count consecutive identical calls
2114
+ consecutive_count = 0
2115
+ for call in reversed(tool_call_history):
2116
+ if call == current_call:
2117
+ consecutive_count += 1
2118
+ else:
2119
+ break
2120
+
2121
+ # Stop after max_consecutive_repeats identical calls
2122
+ if consecutive_count >= self.max_consecutive_repeats:
1870
2123
  # Stop progress indicator
1871
2124
  self.console.stop_progress()
1872
2125
 
1873
- logger.warning(f"Detected repeated tool call: {tool_name}")
1874
2126
  # Force a final answer if the same tool is called repeatedly
1875
2127
  final_answer = (
1876
2128
  f"Task completed with {tool_name}. No further action needed."
@@ -1906,9 +2158,6 @@ You must respond ONLY in valid JSON. No text before { or after }.
1906
2158
  # Share tool output with subsequent LLM calls
1907
2159
  messages.append(self._create_tool_message(tool_name, truncated_result))
1908
2160
 
1909
- # Update last tool call
1910
- last_tool_call = (tool_name, str(tool_args))
1911
-
1912
2161
  # For single-step plans, we still need to let the LLM process the result
1913
2162
  # This is especially important for RAG queries where the LLM needs to
1914
2163
  # synthesize the retrieved information into a coherent answer