tunacode-cli 0.0.13__py3-none-any.whl → 0.0.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tunacode-cli might be problematic. Click here for more details.

tunacode/cli/commands.py CHANGED
@@ -138,6 +138,73 @@ class DumpCommand(SimpleCommand):
138
138
  await ui.dump_messages(context.state_manager.session.messages)
139
139
 
140
140
 
141
+ class ThoughtsCommand(SimpleCommand):
142
+ """Toggle display of agent thoughts."""
143
+
144
+ def __init__(self):
145
+ super().__init__(
146
+ CommandSpec(
147
+ name="thoughts",
148
+ aliases=["/thoughts"],
149
+ description="Show or hide agent thought messages",
150
+ category=CommandCategory.DEBUG,
151
+ )
152
+ )
153
+
154
+ async def execute(self, args: List[str], context: CommandContext) -> None:
155
+ state = context.state_manager.session
156
+ if args:
157
+ arg = args[0].lower()
158
+ if arg in {"on", "1", "true"}:
159
+ state.show_thoughts = True
160
+ elif arg in {"off", "0", "false"}:
161
+ state.show_thoughts = False
162
+ else:
163
+ await ui.error("Usage: /thoughts [on|off]")
164
+ return
165
+ else:
166
+ state.show_thoughts = not state.show_thoughts
167
+ status = "ON" if state.show_thoughts else "OFF"
168
+ await ui.success(f"Thought display {status}")
169
+
170
+
171
+ class IterationsCommand(SimpleCommand):
172
+ """Configure maximum agent iterations for ReAct reasoning."""
173
+
174
+ def __init__(self):
175
+ super().__init__(
176
+ CommandSpec(
177
+ name="iterations",
178
+ aliases=["/iterations"],
179
+ description="Set maximum agent iterations for complex reasoning",
180
+ category=CommandCategory.DEBUG,
181
+ )
182
+ )
183
+
184
+ async def execute(self, args: List[str], context: CommandContext) -> None:
185
+ state = context.state_manager.session
186
+ if args:
187
+ try:
188
+ new_limit = int(args[0])
189
+ if new_limit < 1 or new_limit > 50:
190
+ await ui.error("Iterations must be between 1 and 50")
191
+ return
192
+
193
+ # Update the user config
194
+ if "settings" not in state.user_config:
195
+ state.user_config["settings"] = {}
196
+ state.user_config["settings"]["max_iterations"] = new_limit
197
+
198
+ await ui.success(f"Maximum iterations set to {new_limit}")
199
+ await ui.muted("Higher values allow more complex reasoning but may be slower")
200
+ except ValueError:
201
+ await ui.error("Please provide a valid number")
202
+ else:
203
+ current = state.user_config.get("settings", {}).get("max_iterations", 15)
204
+ await ui.info(f"Current maximum iterations: {current}")
205
+ await ui.muted("Usage: /iterations <number> (1-50)")
206
+
207
+
141
208
  class ClearCommand(SimpleCommand):
142
209
  """Clear screen and message history."""
143
210
 
@@ -152,8 +219,127 @@ class ClearCommand(SimpleCommand):
152
219
  )
153
220
 
154
221
  async def execute(self, args: List[str], context: CommandContext) -> None:
222
+ # Patch any orphaned tool calls before clearing
223
+ from tunacode.core.agents.main import patch_tool_messages
224
+ patch_tool_messages("Conversation cleared", context.state_manager)
225
+
155
226
  await ui.clear()
156
227
  context.state_manager.session.messages = []
228
+ await ui.success("Message history cleared")
229
+
230
+
231
+ class FixCommand(SimpleCommand):
232
+ """Fix orphaned tool calls that cause API errors."""
233
+
234
+ def __init__(self):
235
+ super().__init__(
236
+ CommandSpec(
237
+ name="fix",
238
+ aliases=["/fix"],
239
+ description="Fix orphaned tool calls causing API errors",
240
+ category=CommandCategory.DEBUG,
241
+ )
242
+ )
243
+
244
+ async def execute(self, args: List[str], context: CommandContext) -> None:
245
+ from tunacode.core.agents.main import patch_tool_messages
246
+
247
+ # Count current messages
248
+ before_count = len(context.state_manager.session.messages)
249
+
250
+ # Patch orphaned tool calls
251
+ patch_tool_messages("Tool call resolved by /fix command", context.state_manager)
252
+
253
+ # Count after patching
254
+ after_count = len(context.state_manager.session.messages)
255
+ patched_count = after_count - before_count
256
+
257
+ if patched_count > 0:
258
+ await ui.success(f"Fixed {patched_count} orphaned tool call(s)")
259
+ await ui.muted("You can now continue the conversation normally")
260
+ else:
261
+ await ui.info("No orphaned tool calls found")
262
+
263
+
264
+ class ParseToolsCommand(SimpleCommand):
265
+ """Parse and execute JSON tool calls from the last response."""
266
+
267
+ def __init__(self):
268
+ super().__init__(
269
+ CommandSpec(
270
+ name="parsetools",
271
+ aliases=["/parsetools"],
272
+ description="Parse JSON tool calls from last response when structured calling fails",
273
+ category=CommandCategory.DEBUG,
274
+ )
275
+ )
276
+
277
+ async def execute(self, args: List[str], context: CommandContext) -> None:
278
+ from tunacode.core.agents.main import extract_and_execute_tool_calls
279
+
280
+ # Find the last model response in messages
281
+ messages = context.state_manager.session.messages
282
+ if not messages:
283
+ await ui.error("No message history found")
284
+ return
285
+
286
+ # Look for the most recent response with text content
287
+ found_content = False
288
+ for msg in reversed(messages):
289
+ if hasattr(msg, 'parts'):
290
+ for part in msg.parts:
291
+ if hasattr(part, 'content') and isinstance(part.content, str):
292
+ # Create tool callback
293
+ from tunacode.cli.repl import _tool_handler
294
+ def tool_callback_with_state(part, node):
295
+ return _tool_handler(part, node, context.state_manager)
296
+
297
+ try:
298
+ await extract_and_execute_tool_calls(
299
+ part.content,
300
+ tool_callback_with_state,
301
+ context.state_manager
302
+ )
303
+ await ui.success("JSON tool parsing completed")
304
+ found_content = True
305
+ return
306
+ except Exception as e:
307
+ await ui.error(f"Failed to parse tools: {str(e)}")
308
+ return
309
+
310
+ if not found_content:
311
+ await ui.error("No parseable content found in recent messages")
312
+
313
+
314
+ class RefreshConfigCommand(SimpleCommand):
315
+ """Refresh configuration from defaults."""
316
+
317
+ def __init__(self):
318
+ super().__init__(
319
+ CommandSpec(
320
+ name="refresh",
321
+ aliases=["/refresh"],
322
+ description="Refresh configuration from defaults (useful after updates)",
323
+ category=CommandCategory.SYSTEM,
324
+ )
325
+ )
326
+
327
+ async def execute(self, args: List[str], context: CommandContext) -> None:
328
+ from tunacode.configuration.defaults import DEFAULT_USER_CONFIG
329
+
330
+ # Update current session config with latest defaults
331
+ for key, value in DEFAULT_USER_CONFIG.items():
332
+ if key not in context.state_manager.session.user_config:
333
+ context.state_manager.session.user_config[key] = value
334
+ elif isinstance(value, dict):
335
+ # Merge dict values, preserving user overrides
336
+ for subkey, subvalue in value.items():
337
+ if subkey not in context.state_manager.session.user_config[key]:
338
+ context.state_manager.session.user_config[key][subkey] = subvalue
339
+
340
+ # Show updated max_iterations
341
+ max_iterations = context.state_manager.session.user_config.get("settings", {}).get("max_iterations", 20)
342
+ await ui.success(f"Configuration refreshed - max iterations: {max_iterations}")
157
343
 
158
344
 
159
345
  class TunaCodeCommand(SimpleCommand):
@@ -232,7 +418,6 @@ class HelpCommand(SimpleCommand):
232
418
  await ui.help(self._command_registry)
233
419
 
234
420
 
235
-
236
421
  class BranchCommand(SimpleCommand):
237
422
  """Create and switch to a new git branch."""
238
423
 
@@ -247,8 +432,8 @@ class BranchCommand(SimpleCommand):
247
432
  )
248
433
 
249
434
  async def execute(self, args: List[str], context: CommandContext) -> None:
250
- import subprocess
251
435
  import os
436
+ import subprocess
252
437
 
253
438
  if not args:
254
439
  await ui.error("Usage: /branch <branch-name>")
@@ -332,14 +517,16 @@ class ModelCommand(SimpleCommand):
332
517
 
333
518
  # Get the model name from args
334
519
  model_name = args[0]
335
-
520
+
336
521
  # Check if provider prefix is present
337
522
  if ":" not in model_name:
338
523
  await ui.error("Model name must include provider prefix")
339
524
  await ui.muted("Format: provider:model-name")
340
- await ui.muted("Examples: openai:gpt-4.1, anthropic:claude-3-opus, google-gla:gemini-2.0-flash")
525
+ await ui.muted(
526
+ "Examples: openai:gpt-4.1, anthropic:claude-3-opus, google-gla:gemini-2.0-flash"
527
+ )
341
528
  return None
342
-
529
+
343
530
  # No validation - user is responsible for correct model names
344
531
  await ui.warning("Model set without validation - verify the model name is correct")
345
532
 
@@ -416,8 +603,7 @@ class CommandRegistry:
416
603
  category_commands = self._categories[command.category]
417
604
  # Remove any existing instance of this command class
418
605
  self._categories[command.category] = [
419
- cmd for cmd in category_commands
420
- if cmd.__class__ != command.__class__
606
+ cmd for cmd in category_commands if cmd.__class__ != command.__class__
421
607
  ]
422
608
  # Add the new instance
423
609
  self._categories[command.category].append(command)
@@ -436,7 +622,12 @@ class CommandRegistry:
436
622
  command_classes = [
437
623
  YoloCommand,
438
624
  DumpCommand,
625
+ ThoughtsCommand,
626
+ IterationsCommand,
439
627
  ClearCommand,
628
+ FixCommand,
629
+ ParseToolsCommand,
630
+ RefreshConfigCommand,
440
631
  HelpCommand,
441
632
  BranchCommand,
442
633
  # TunaCodeCommand, # TODO: Temporarily disabled
@@ -459,7 +650,7 @@ class CommandRegistry:
459
650
  # Only update if callback has changed
460
651
  if self._factory.dependencies.process_request_callback == callback:
461
652
  return
462
-
653
+
463
654
  self._factory.update_dependencies(process_request_callback=callback)
464
655
 
465
656
  # Re-register CompactCommand with new dependency if already registered
@@ -494,10 +685,10 @@ class CommandRegistry:
494
685
  if command_name in self._commands:
495
686
  command = self._commands[command_name]
496
687
  return await command.execute(args, context)
497
-
688
+
498
689
  # Try partial matching
499
690
  matches = self.find_matching_commands(command_name)
500
-
691
+
501
692
  if not matches:
502
693
  raise ValidationError(f"Unknown command: {command_name}")
503
694
  elif len(matches) == 1:
@@ -513,10 +704,10 @@ class CommandRegistry:
513
704
  def find_matching_commands(self, partial_command: str) -> List[str]:
514
705
  """
515
706
  Find all commands that start with the given partial command.
516
-
707
+
517
708
  Args:
518
709
  partial_command: The partial command to match
519
-
710
+
520
711
  Returns:
521
712
  List of matching command names
522
713
  """
@@ -534,11 +725,11 @@ class CommandRegistry:
534
725
  return False
535
726
 
536
727
  command_name = parts[0].lower()
537
-
728
+
538
729
  # Check exact match first
539
730
  if command_name in self._commands:
540
731
  return True
541
-
732
+
542
733
  # Check partial match
543
734
  return len(self.find_matching_commands(command_name)) > 0
544
735
 
tunacode/cli/repl.py CHANGED
@@ -167,10 +167,14 @@ async def process_request(text: str, state_manager: StateManager, output: bool =
167
167
  await ui.error(str(e))
168
168
  return
169
169
 
170
+ # Patch any orphaned tool calls from previous requests before proceeding
171
+ patch_tool_messages("Tool execution was interrupted", state_manager)
172
+
170
173
  # Create a partial function that includes state_manager
171
174
  def tool_callback_with_state(part, node):
172
175
  return _tool_handler(part, node, state_manager)
173
176
 
177
+ start_idx = len(state_manager.session.messages)
174
178
  res = await agent.process_request(
175
179
  state_manager.session.current_model,
176
180
  text,
@@ -178,7 +182,17 @@ async def process_request(text: str, state_manager: StateManager, output: bool =
178
182
  tool_callback=tool_callback_with_state,
179
183
  )
180
184
  if output:
181
- await ui.agent(res.result.output)
185
+ if state_manager.session.show_thoughts:
186
+ new_msgs = state_manager.session.messages[start_idx:]
187
+ for msg in new_msgs:
188
+ if isinstance(msg, dict) and "thought" in msg:
189
+ await ui.muted(f"THOUGHT: {msg['thought']}")
190
+ # Check if result exists and has output
191
+ if hasattr(res, 'result') and res.result is not None and hasattr(res.result, 'output'):
192
+ await ui.agent(res.result.output)
193
+ else:
194
+ # Fallback: show that the request was processed
195
+ await ui.muted("Request completed")
182
196
  except CancelledError:
183
197
  await ui.muted("Request cancelled")
184
198
  except UserAbortError:
@@ -188,6 +202,31 @@ async def process_request(text: str, state_manager: StateManager, output: bool =
188
202
  await ui.muted(error_message)
189
203
  patch_tool_messages(error_message, state_manager)
190
204
  except Exception as e:
205
+ # Check if this might be a tool calling failure that we can recover from
206
+ error_str = str(e).lower()
207
+ if any(keyword in error_str for keyword in ['tool', 'function', 'call', 'schema']):
208
+ # Try to extract and execute tool calls from the last response
209
+ if state_manager.session.messages:
210
+ last_msg = state_manager.session.messages[-1]
211
+ if hasattr(last_msg, 'parts'):
212
+ for part in last_msg.parts:
213
+ if hasattr(part, 'content') and isinstance(part.content, str):
214
+ from tunacode.core.agents.main import extract_and_execute_tool_calls
215
+ try:
216
+ # Create a partial function that includes state_manager
217
+ def tool_callback_with_state(part, node):
218
+ return _tool_handler(part, node, state_manager)
219
+
220
+ await extract_and_execute_tool_calls(
221
+ part.content,
222
+ tool_callback_with_state,
223
+ state_manager
224
+ )
225
+ await ui.warning("🔧 Recovered using JSON tool parsing")
226
+ return # Successfully recovered
227
+ except Exception:
228
+ pass # Fallback failed, continue with normal error handling
229
+
191
230
  # Wrap unexpected exceptions in AgentError for better tracking
192
231
  agent_error = AgentError(f"Agent processing failed: {str(e)}")
193
232
  agent_error.__cause__ = e # Preserve the original exception chain
@@ -210,7 +249,7 @@ async def repl(state_manager: StateManager):
210
249
  await ui.muted(f"• Model: {state_manager.session.current_model}")
211
250
  await ui.success("Ready to assist with your development")
212
251
  await ui.line()
213
-
252
+
214
253
  instance = agent.get_or_create_agent(state_manager.session.current_model, state_manager)
215
254
 
216
255
  async with instance.run_mcp_servers():
@@ -18,6 +18,7 @@ DEFAULT_USER_CONFIG: UserConfig = {
18
18
  },
19
19
  "settings": {
20
20
  "max_retries": 10,
21
+ "max_iterations": 20,
21
22
  "tool_ignore": [TOOL_READ_FILE],
22
23
  "guide_file": GUIDE_FILE_NAME,
23
24
  },
tunacode/constants.py CHANGED
@@ -7,7 +7,7 @@ Centralizes all magic strings, UI text, error messages, and application constant
7
7
 
8
8
  # Application info
9
9
  APP_NAME = "TunaCode"
10
- APP_VERSION = "0.0.13"
10
+ APP_VERSION = "0.0.14"
11
11
 
12
12
  # File patterns
13
13
  GUIDE_FILE_PATTERN = "{name}.md"
@@ -4,6 +4,8 @@ Main agent functionality and coordination for the TunaCode CLI.
4
4
  Handles agent creation, configuration, and request processing.
5
5
  """
6
6
 
7
+ import json
8
+ import re
7
9
  from datetime import datetime, timezone
8
10
  from typing import Optional
9
11
 
@@ -13,32 +15,125 @@ from pydantic_ai.messages import ModelRequest, ToolReturnPart
13
15
  from tunacode.core.state import StateManager
14
16
  from tunacode.services.mcp import get_mcp_servers
15
17
  from tunacode.tools.bash import bash
18
+ from tunacode.tools.grep import grep
16
19
  from tunacode.tools.read_file import read_file
17
20
  from tunacode.tools.run_command import run_command
18
21
  from tunacode.tools.update_file import update_file
19
22
  from tunacode.tools.write_file import write_file
20
- from tunacode.types import (AgentRun, ErrorMessage, ModelName, PydanticAgent, ToolCallback,
21
- ToolCallId, ToolName)
23
+ from tunacode.types import (
24
+ AgentRun,
25
+ ErrorMessage,
26
+ ModelName,
27
+ PydanticAgent,
28
+ ToolCallback,
29
+ ToolCallId,
30
+ ToolName,
31
+ )
22
32
 
23
33
 
24
34
  async def _process_node(node, tool_callback: Optional[ToolCallback], state_manager: StateManager):
25
35
  if hasattr(node, "request"):
26
36
  state_manager.session.messages.append(node.request)
27
37
 
38
+ if hasattr(node, "thought") and node.thought:
39
+ state_manager.session.messages.append({"thought": node.thought})
40
+ # Display thought immediately if show_thoughts is enabled
41
+ if state_manager.session.show_thoughts:
42
+ from tunacode.ui import console as ui
43
+ await ui.muted(f"💭 THOUGHT: {node.thought}")
44
+
28
45
  if hasattr(node, "model_response"):
29
46
  state_manager.session.messages.append(node.model_response)
47
+
48
+ # Enhanced ReAct thought processing
49
+ if state_manager.session.show_thoughts:
50
+ from tunacode.ui import console as ui
51
+ import json
52
+ import re
53
+
54
+ for part in node.model_response.parts:
55
+ if hasattr(part, 'content') and isinstance(part.content, str):
56
+ content = part.content.strip()
57
+
58
+ # Pattern 1: Inline JSON thoughts {"thought": "..."}
59
+ thought_pattern = r'\{"thought":\s*"([^"]+)"\}'
60
+ matches = re.findall(thought_pattern, content)
61
+ for thought in matches:
62
+ await ui.muted(f"💭 REASONING: {thought}")
63
+
64
+ # Pattern 2: Standalone thought JSON objects
65
+ try:
66
+ if content.startswith('{"thought"'):
67
+ thought_obj = json.loads(content)
68
+ if 'thought' in thought_obj:
69
+ await ui.muted(f"💭 REASONING: {thought_obj['thought']}")
70
+ except (json.JSONDecodeError, KeyError):
71
+ pass
72
+
73
+ # Pattern 3: Multi-line thoughts with context
74
+ multiline_pattern = r'\{"thought":\s*"([^"]+(?:\\.[^"]*)*?)"\}'
75
+ multiline_matches = re.findall(multiline_pattern, content, re.DOTALL)
76
+ for thought in multiline_matches:
77
+ if thought not in [m for m in matches]: # Avoid duplicates
78
+ # Clean up escaped characters
79
+ cleaned_thought = thought.replace('\\"', '"').replace('\\n', ' ')
80
+ await ui.muted(f"💭 REASONING: {cleaned_thought}")
81
+
82
+ # Pattern 4: Text-based reasoning indicators
83
+ reasoning_indicators = [
84
+ (r'I need to (.+?)\.', 'PLANNING'),
85
+ (r'Let me (.+?)\.', 'ACTION'),
86
+ (r'The output shows (.+?)\.', 'OBSERVATION'),
87
+ (r'Based on (.+?), I should (.+?)\.', 'DECISION')
88
+ ]
89
+
90
+ for pattern, label in reasoning_indicators:
91
+ indicator_matches = re.findall(pattern, content, re.IGNORECASE)
92
+ for match in indicator_matches:
93
+ if isinstance(match, tuple):
94
+ match_text = ' '.join(match)
95
+ else:
96
+ match_text = match
97
+ await ui.muted(f"🎯 {label}: {match_text}")
98
+ break # Only show first match per pattern
99
+
100
+ # Check for tool calls and fallback to JSON parsing if needed
101
+ has_tool_calls = False
30
102
  for part in node.model_response.parts:
31
103
  if part.part_kind == "tool-call" and tool_callback:
104
+ has_tool_calls = True
32
105
  await tool_callback(part, node)
106
+ elif part.part_kind == "tool-return":
107
+ obs_msg = f"OBSERVATION[{part.tool_name}]: {part.content[:2_000]}"
108
+ state_manager.session.messages.append(obs_msg)
109
+
110
+ # If no structured tool calls found, try parsing JSON from text content
111
+ if not has_tool_calls and tool_callback:
112
+ for part in node.model_response.parts:
113
+ if hasattr(part, 'content') and isinstance(part.content, str):
114
+ await extract_and_execute_tool_calls(part.content, tool_callback, state_manager)
33
115
 
34
116
 
35
117
  def get_or_create_agent(model: ModelName, state_manager: StateManager) -> PydanticAgent:
36
118
  if model not in state_manager.session.agents:
37
119
  max_retries = state_manager.session.user_config["settings"]["max_retries"]
120
+
121
+ # Load system prompt
122
+ import os
123
+ from pathlib import Path
124
+ prompt_path = Path(__file__).parent.parent.parent / "prompts" / "system.txt"
125
+ try:
126
+ with open(prompt_path, "r", encoding="utf-8") as f:
127
+ system_prompt = f.read().strip()
128
+ except FileNotFoundError:
129
+ system_prompt = None
130
+
38
131
  state_manager.session.agents[model] = Agent(
39
132
  model=model,
133
+ system_prompt=system_prompt,
40
134
  tools=[
41
135
  Tool(bash, max_retries=max_retries),
136
+ Tool(grep, max_retries=max_retries),
42
137
  Tool(read_file, max_retries=max_retries),
43
138
  Tool(run_command, max_retries=max_retries),
44
139
  Tool(update_file, max_retries=max_retries),
@@ -107,6 +202,110 @@ def patch_tool_messages(
107
202
  )
108
203
 
109
204
 
205
+ async def parse_json_tool_calls(text: str, tool_callback: Optional[ToolCallback], state_manager: StateManager):
206
+ """
207
+ Parse JSON tool calls from text when structured tool calling fails.
208
+ Fallback for when API providers don't support proper tool calling.
209
+ """
210
+ if not tool_callback:
211
+ return
212
+
213
+ # Pattern for JSON tool calls: {"tool": "tool_name", "args": {...}}
214
+ # Find potential JSON objects and parse them
215
+ potential_jsons = []
216
+ brace_count = 0
217
+ start_pos = -1
218
+
219
+ for i, char in enumerate(text):
220
+ if char == '{':
221
+ if brace_count == 0:
222
+ start_pos = i
223
+ brace_count += 1
224
+ elif char == '}':
225
+ brace_count -= 1
226
+ if brace_count == 0 and start_pos != -1:
227
+ potential_json = text[start_pos:i+1]
228
+ try:
229
+ parsed = json.loads(potential_json)
230
+ if isinstance(parsed, dict) and 'tool' in parsed and 'args' in parsed:
231
+ potential_jsons.append((parsed['tool'], parsed['args']))
232
+ except json.JSONDecodeError:
233
+ pass
234
+ start_pos = -1
235
+
236
+ matches = potential_jsons
237
+
238
+ for tool_name, args in matches:
239
+ try:
240
+ # Create a mock tool call object
241
+ class MockToolCall:
242
+ def __init__(self, tool_name: str, args: dict):
243
+ self.tool_name = tool_name
244
+ self.args = args
245
+ self.tool_call_id = f"fallback_{datetime.now().timestamp()}"
246
+
247
+ class MockNode:
248
+ pass
249
+
250
+ # Execute the tool through the callback
251
+ mock_call = MockToolCall(tool_name, args)
252
+ mock_node = MockNode()
253
+
254
+ await tool_callback(mock_call, mock_node)
255
+
256
+ if state_manager.session.show_thoughts:
257
+ from tunacode.ui import console as ui
258
+ await ui.muted(f"🔧 FALLBACK: Executed {tool_name} via JSON parsing")
259
+
260
+ except Exception as e:
261
+ if state_manager.session.show_thoughts:
262
+ from tunacode.ui import console as ui
263
+ await ui.error(f"❌ Error executing fallback tool {tool_name}: {str(e)}")
264
+
265
+
266
+ async def extract_and_execute_tool_calls(text: str, tool_callback: Optional[ToolCallback], state_manager: StateManager):
267
+ """
268
+ Extract tool calls from text content and execute them.
269
+ Supports multiple formats for maximum compatibility.
270
+ """
271
+ if not tool_callback:
272
+ return
273
+
274
+ # Format 1: {"tool": "name", "args": {...}}
275
+ await parse_json_tool_calls(text, tool_callback, state_manager)
276
+
277
+ # Format 2: Tool calls in code blocks
278
+ code_block_pattern = r'```json\s*(\{(?:[^{}]|"[^"]*"|(?:\{[^}]*\}))*"tool"(?:[^{}]|"[^"]*"|(?:\{[^}]*\}))*\})\s*```'
279
+ code_matches = re.findall(code_block_pattern, text, re.MULTILINE | re.DOTALL)
280
+
281
+ for match in code_matches:
282
+ try:
283
+ tool_data = json.loads(match)
284
+ if 'tool' in tool_data and 'args' in tool_data:
285
+ class MockToolCall:
286
+ def __init__(self, tool_name: str, args: dict):
287
+ self.tool_name = tool_name
288
+ self.args = args
289
+ self.tool_call_id = f"codeblock_{datetime.now().timestamp()}"
290
+
291
+ class MockNode:
292
+ pass
293
+
294
+ mock_call = MockToolCall(tool_data['tool'], tool_data['args'])
295
+ mock_node = MockNode()
296
+
297
+ await tool_callback(mock_call, mock_node)
298
+
299
+ if state_manager.session.show_thoughts:
300
+ from tunacode.ui import console as ui
301
+ await ui.muted(f"🔧 FALLBACK: Executed {tool_data['tool']} from code block")
302
+
303
+ except (json.JSONDecodeError, KeyError, Exception) as e:
304
+ if state_manager.session.show_thoughts:
305
+ from tunacode.ui import console as ui
306
+ await ui.error(f"❌ Error parsing code block tool call: {str(e)}")
307
+
308
+
110
309
  async def process_request(
111
310
  model: ModelName,
112
311
  message: str,
@@ -115,7 +314,24 @@ async def process_request(
115
314
  ) -> AgentRun:
116
315
  agent = get_or_create_agent(model, state_manager)
117
316
  mh = state_manager.session.messages.copy()
317
+ # Get max iterations from config (default: 20)
318
+ max_iterations = state_manager.session.user_config.get("settings", {}).get("max_iterations", 20)
319
+
118
320
  async with agent.iter(message, message_history=mh) as agent_run:
321
+ i = 0
119
322
  async for node in agent_run:
120
323
  await _process_node(node, tool_callback, state_manager)
324
+ i += 1
325
+
326
+ # Display iteration progress if thoughts are enabled
327
+ if state_manager.session.show_thoughts and i > 1:
328
+ from tunacode.ui import console as ui
329
+ await ui.muted(f"🔄 Iteration {i}/{max_iterations}")
330
+
331
+ if i >= max_iterations:
332
+ if state_manager.session.show_thoughts:
333
+ from tunacode.ui import console as ui
334
+ await ui.warning(f"⚠️ Reached maximum iterations ({max_iterations})")
335
+ break
336
+
121
337
  return agent_run