tunacode-cli 0.0.55__py3-none-any.whl → 0.0.78.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tunacode-cli might be problematic. Click here for more details.

Files changed (114) hide show
  1. tunacode/cli/commands/__init__.py +2 -2
  2. tunacode/cli/commands/implementations/__init__.py +2 -3
  3. tunacode/cli/commands/implementations/command_reload.py +48 -0
  4. tunacode/cli/commands/implementations/debug.py +2 -2
  5. tunacode/cli/commands/implementations/development.py +10 -8
  6. tunacode/cli/commands/implementations/model.py +357 -29
  7. tunacode/cli/commands/implementations/quickstart.py +43 -0
  8. tunacode/cli/commands/implementations/system.py +96 -3
  9. tunacode/cli/commands/implementations/template.py +0 -2
  10. tunacode/cli/commands/registry.py +139 -5
  11. tunacode/cli/commands/slash/__init__.py +32 -0
  12. tunacode/cli/commands/slash/command.py +157 -0
  13. tunacode/cli/commands/slash/loader.py +135 -0
  14. tunacode/cli/commands/slash/processor.py +294 -0
  15. tunacode/cli/commands/slash/types.py +93 -0
  16. tunacode/cli/commands/slash/validator.py +400 -0
  17. tunacode/cli/main.py +23 -2
  18. tunacode/cli/repl.py +217 -190
  19. tunacode/cli/repl_components/command_parser.py +38 -4
  20. tunacode/cli/repl_components/error_recovery.py +85 -4
  21. tunacode/cli/repl_components/output_display.py +12 -1
  22. tunacode/cli/repl_components/tool_executor.py +1 -1
  23. tunacode/configuration/defaults.py +12 -3
  24. tunacode/configuration/key_descriptions.py +284 -0
  25. tunacode/configuration/settings.py +0 -1
  26. tunacode/constants.py +12 -40
  27. tunacode/core/agents/__init__.py +43 -2
  28. tunacode/core/agents/agent_components/__init__.py +7 -0
  29. tunacode/core/agents/agent_components/agent_config.py +249 -55
  30. tunacode/core/agents/agent_components/agent_helpers.py +43 -13
  31. tunacode/core/agents/agent_components/node_processor.py +179 -139
  32. tunacode/core/agents/agent_components/response_state.py +123 -6
  33. tunacode/core/agents/agent_components/state_transition.py +116 -0
  34. tunacode/core/agents/agent_components/streaming.py +296 -0
  35. tunacode/core/agents/agent_components/task_completion.py +19 -6
  36. tunacode/core/agents/agent_components/tool_buffer.py +21 -1
  37. tunacode/core/agents/agent_components/tool_executor.py +10 -0
  38. tunacode/core/agents/main.py +522 -370
  39. tunacode/core/agents/main_legact.py +538 -0
  40. tunacode/core/agents/prompts.py +66 -0
  41. tunacode/core/agents/utils.py +29 -121
  42. tunacode/core/code_index.py +83 -29
  43. tunacode/core/setup/__init__.py +0 -2
  44. tunacode/core/setup/config_setup.py +110 -20
  45. tunacode/core/setup/config_wizard.py +230 -0
  46. tunacode/core/setup/coordinator.py +14 -5
  47. tunacode/core/state.py +16 -20
  48. tunacode/core/token_usage/usage_tracker.py +5 -3
  49. tunacode/core/tool_authorization.py +352 -0
  50. tunacode/core/tool_handler.py +67 -40
  51. tunacode/exceptions.py +119 -5
  52. tunacode/prompts/system.xml +751 -0
  53. tunacode/services/mcp.py +125 -7
  54. tunacode/setup.py +5 -25
  55. tunacode/tools/base.py +163 -0
  56. tunacode/tools/bash.py +110 -1
  57. tunacode/tools/glob.py +332 -34
  58. tunacode/tools/grep.py +179 -82
  59. tunacode/tools/grep_components/result_formatter.py +98 -4
  60. tunacode/tools/list_dir.py +132 -2
  61. tunacode/tools/prompts/bash_prompt.xml +72 -0
  62. tunacode/tools/prompts/glob_prompt.xml +45 -0
  63. tunacode/tools/prompts/grep_prompt.xml +98 -0
  64. tunacode/tools/prompts/list_dir_prompt.xml +31 -0
  65. tunacode/tools/prompts/react_prompt.xml +23 -0
  66. tunacode/tools/prompts/read_file_prompt.xml +54 -0
  67. tunacode/tools/prompts/run_command_prompt.xml +64 -0
  68. tunacode/tools/prompts/update_file_prompt.xml +53 -0
  69. tunacode/tools/prompts/write_file_prompt.xml +37 -0
  70. tunacode/tools/react.py +153 -0
  71. tunacode/tools/read_file.py +91 -0
  72. tunacode/tools/run_command.py +114 -0
  73. tunacode/tools/schema_assembler.py +167 -0
  74. tunacode/tools/update_file.py +94 -0
  75. tunacode/tools/write_file.py +86 -0
  76. tunacode/tools/xml_helper.py +83 -0
  77. tunacode/tutorial/__init__.py +9 -0
  78. tunacode/tutorial/content.py +98 -0
  79. tunacode/tutorial/manager.py +182 -0
  80. tunacode/tutorial/steps.py +124 -0
  81. tunacode/types.py +20 -27
  82. tunacode/ui/completers.py +434 -50
  83. tunacode/ui/config_dashboard.py +585 -0
  84. tunacode/ui/console.py +63 -11
  85. tunacode/ui/input.py +20 -3
  86. tunacode/ui/keybindings.py +7 -4
  87. tunacode/ui/model_selector.py +395 -0
  88. tunacode/ui/output.py +40 -19
  89. tunacode/ui/panels.py +212 -43
  90. tunacode/ui/path_heuristics.py +91 -0
  91. tunacode/ui/prompt_manager.py +5 -1
  92. tunacode/ui/tool_ui.py +33 -10
  93. tunacode/utils/api_key_validation.py +93 -0
  94. tunacode/utils/config_comparator.py +340 -0
  95. tunacode/utils/json_utils.py +206 -0
  96. tunacode/utils/message_utils.py +14 -4
  97. tunacode/utils/models_registry.py +593 -0
  98. tunacode/utils/ripgrep.py +332 -9
  99. tunacode/utils/text_utils.py +18 -1
  100. tunacode/utils/user_configuration.py +45 -0
  101. tunacode_cli-0.0.78.6.dist-info/METADATA +260 -0
  102. tunacode_cli-0.0.78.6.dist-info/RECORD +158 -0
  103. {tunacode_cli-0.0.55.dist-info → tunacode_cli-0.0.78.6.dist-info}/WHEEL +1 -2
  104. tunacode/cli/commands/implementations/todo.py +0 -217
  105. tunacode/context.py +0 -71
  106. tunacode/core/setup/git_safety_setup.py +0 -182
  107. tunacode/prompts/system.md +0 -731
  108. tunacode/tools/read_file_async_poc.py +0 -196
  109. tunacode/tools/todo.py +0 -349
  110. tunacode_cli-0.0.55.dist-info/METADATA +0 -322
  111. tunacode_cli-0.0.55.dist-info/RECORD +0 -126
  112. tunacode_cli-0.0.55.dist-info/top_level.txt +0 -1
  113. {tunacode_cli-0.0.55.dist-info → tunacode_cli-0.0.78.6.dist-info}/entry_points.txt +0 -0
  114. {tunacode_cli-0.0.55.dist-info → tunacode_cli-0.0.78.6.dist-info}/licenses/LICENSE +0 -0
@@ -1,25 +1,48 @@
1
1
  """Agent configuration and creation utilities."""
2
2
 
3
3
  from pathlib import Path
4
+ from typing import Dict, Tuple
4
5
 
6
+ from httpx import AsyncClient, HTTPStatusError
5
7
  from pydantic_ai import Agent
8
+ from pydantic_ai.models.anthropic import AnthropicModel
9
+ from pydantic_ai.models.openai import OpenAIChatModel
10
+ from pydantic_ai.providers.anthropic import AnthropicProvider
11
+ from pydantic_ai.providers.openai import OpenAIProvider
12
+ from pydantic_ai.retries import AsyncTenacityTransport, RetryConfig, wait_retry_after
13
+ from tenacity import retry_if_exception_type, stop_after_attempt
6
14
 
7
15
  from tunacode.core.logging.logger import get_logger
8
16
  from tunacode.core.state import StateManager
9
- from tunacode.services.mcp import get_mcp_servers
17
+ from tunacode.services.mcp import get_mcp_servers, register_mcp_agent
10
18
  from tunacode.tools.bash import bash
11
19
  from tunacode.tools.glob import glob
12
20
  from tunacode.tools.grep import grep
13
21
  from tunacode.tools.list_dir import list_dir
14
22
  from tunacode.tools.read_file import read_file
15
23
  from tunacode.tools.run_command import run_command
16
- from tunacode.tools.todo import TodoTool
17
24
  from tunacode.tools.update_file import update_file
18
25
  from tunacode.tools.write_file import write_file
19
26
  from tunacode.types import ModelName, PydanticAgent
20
27
 
21
28
  logger = get_logger(__name__)
22
29
 
30
+ # Module-level caches for system prompts
31
+ _PROMPT_CACHE: Dict[str, Tuple[str, float]] = {}
32
+ _TUNACODE_CACHE: Dict[str, Tuple[str, float]] = {}
33
+
34
+ # Module-level cache for agents to persist across requests
35
+ _AGENT_CACHE: Dict[ModelName, PydanticAgent] = {}
36
+ _AGENT_CACHE_VERSION: Dict[ModelName, int] = {}
37
+
38
+
39
+ def clear_all_caches():
40
+ """Clear all module-level caches. Useful for testing."""
41
+ _PROMPT_CACHE.clear()
42
+ _TUNACODE_CACHE.clear()
43
+ _AGENT_CACHE.clear()
44
+ _AGENT_CACHE_VERSION.clear()
45
+
23
46
 
24
47
  def get_agent_tool():
25
48
  """Lazy import for Agent and Tool to avoid circular imports."""
@@ -28,44 +51,176 @@ def get_agent_tool():
28
51
  return Agent, Tool
29
52
 
30
53
 
31
- def load_system_prompt(base_path: Path) -> str:
32
- """Load the system prompt from file."""
33
- prompt_path = base_path / "prompts" / "system.md"
54
+ def _read_prompt_from_path(prompt_path: Path) -> str:
55
+ """Return prompt content from disk, leveraging the cache when possible."""
56
+ cache_key = str(prompt_path)
57
+
34
58
  try:
35
- with open(prompt_path, "r", encoding="utf-8") as f:
36
- return f.read().strip()
37
- except FileNotFoundError:
38
- # Fallback to system.txt if system.md not found
39
- prompt_path = base_path / "prompts" / "system.txt"
40
- try:
41
- with open(prompt_path, "r", encoding="utf-8") as f:
42
- return f.read().strip()
43
- except FileNotFoundError:
44
- # Use a default system prompt if neither file exists
45
- return "You are a helpful AI assistant for software development tasks."
59
+ current_mtime = prompt_path.stat().st_mtime
60
+ except FileNotFoundError as error:
61
+ raise FileNotFoundError from error
62
+
63
+ if cache_key in _PROMPT_CACHE:
64
+ cached_content, cached_mtime = _PROMPT_CACHE[cache_key]
65
+ if current_mtime == cached_mtime:
66
+ return cached_content
67
+
68
+ try:
69
+ content = prompt_path.read_text(encoding="utf-8").strip()
70
+ except FileNotFoundError as error:
71
+ raise FileNotFoundError from error
72
+
73
+ _PROMPT_CACHE[cache_key] = (content, current_mtime)
74
+ return content
75
+
76
+
77
+ def load_system_prompt(base_path: Path) -> str:
78
+ """Load the system prompt from system.xml file with caching.
79
+
80
+ Raises:
81
+ FileNotFoundError: If system.xml does not exist in the prompts directory.
82
+ """
83
+ prompts_dir = base_path / "prompts"
84
+ prompt_path = prompts_dir / "system.xml"
85
+
86
+ if not prompt_path.exists():
87
+ raise FileNotFoundError(
88
+ f"Required system prompt file not found: {prompt_path}. "
89
+ "The system.xml file must exist in the prompts directory."
90
+ )
91
+
92
+ return _read_prompt_from_path(prompt_path)
46
93
 
47
94
 
48
95
  def load_tunacode_context() -> str:
49
- """Load TUNACODE.md context if it exists."""
96
+ """Load AGENTS.md context if it exists with caching."""
50
97
  try:
51
- tunacode_path = Path.cwd() / "TUNACODE.md"
52
- if tunacode_path.exists():
53
- tunacode_content = tunacode_path.read_text(encoding="utf-8")
54
- if tunacode_content.strip():
55
- logger.info("📄 TUNACODE.md located: Loading context...")
56
- return "\n\n# Project Context from TUNACODE.md\n" + tunacode_content
57
- else:
58
- logger.info("📄 TUNACODE.md not found: Using default context")
98
+ tunacode_path = Path.cwd() / "AGENTS.md"
99
+ cache_key = str(tunacode_path)
100
+
101
+ if not tunacode_path.exists():
102
+ logger.info("📄 AGENTS.md not found: Using default context")
103
+ return ""
104
+
105
+ # Check cache with file modification time
106
+ if cache_key in _TUNACODE_CACHE:
107
+ cached_content, cached_mtime = _TUNACODE_CACHE[cache_key]
108
+ current_mtime = tunacode_path.stat().st_mtime
109
+ if current_mtime == cached_mtime:
110
+ return cached_content
111
+
112
+ # Load from file and cache
113
+ tunacode_content = tunacode_path.read_text(encoding="utf-8")
114
+ if tunacode_content.strip():
115
+ logger.info("📄 AGENTS.md located: Loading context...")
116
+ result = "\n\n# Project Context from AGENTS.md\n" + tunacode_content
117
+ _TUNACODE_CACHE[cache_key] = (result, tunacode_path.stat().st_mtime)
118
+ return result
59
119
  else:
60
- logger.info("📄 TUNACODE.md not found: Using default context")
120
+ logger.info("📄 AGENTS.md not found: Using default context")
121
+ _TUNACODE_CACHE[cache_key] = ("", tunacode_path.stat().st_mtime)
122
+ return ""
123
+
61
124
  except Exception as e:
62
- logger.debug(f"Error loading TUNACODE.md: {e}")
63
- return ""
125
+ logger.debug(f"Error loading AGENTS.md: {e}")
126
+ return ""
127
+
128
+
129
+ def _create_model_with_retry(
130
+ model_string: str, http_client: AsyncClient, state_manager: StateManager
131
+ ):
132
+ """Create a model instance with retry-enabled HTTP client.
133
+
134
+ Parses model string in format 'provider:model_name' and creates
135
+ appropriate provider and model instances with the retry-enabled HTTP client.
136
+ """
137
+ # Extract environment config
138
+ env = state_manager.session.user_config.get("env", {})
139
+
140
+ # Provider configuration: API key names and base URLs
141
+ PROVIDER_CONFIG = {
142
+ "anthropic": {"api_key_name": "ANTHROPIC_API_KEY", "base_url": None},
143
+ "openai": {"api_key_name": "OPENAI_API_KEY", "base_url": None},
144
+ "openrouter": {
145
+ "api_key_name": "OPENROUTER_API_KEY",
146
+ "base_url": "https://openrouter.ai/api/v1",
147
+ },
148
+ "azure": {
149
+ "api_key_name": "AZURE_OPENAI_API_KEY",
150
+ "base_url": env.get("AZURE_OPENAI_ENDPOINT"),
151
+ },
152
+ "deepseek": {"api_key_name": "DEEPSEEK_API_KEY", "base_url": None},
153
+ }
154
+
155
+ # Parse model string
156
+ if ":" in model_string:
157
+ provider_name, model_name = model_string.split(":", 1)
158
+ else:
159
+ # Auto-detect provider from model name
160
+ model_name = model_string
161
+ if model_name.startswith("claude"):
162
+ provider_name = "anthropic"
163
+ elif model_name.startswith(("gpt", "o1", "o3")):
164
+ provider_name = "openai"
165
+ else:
166
+ # Default to treating as model string (pydantic-ai will auto-detect)
167
+ return model_string
168
+
169
+ # Create provider with api_key + base_url + http_client
170
+ if provider_name == "anthropic":
171
+ api_key = env.get("ANTHROPIC_API_KEY")
172
+ provider = AnthropicProvider(api_key=api_key, http_client=http_client)
173
+ return AnthropicModel(model_name, provider=provider)
174
+ elif provider_name in ("openai", "openrouter", "azure", "deepseek"):
175
+ # OpenAI-compatible providers all use OpenAIChatModel
176
+ config = PROVIDER_CONFIG.get(provider_name, {})
177
+ api_key = env.get(config.get("api_key_name"))
178
+ base_url = config.get("base_url")
179
+ provider = OpenAIProvider(api_key=api_key, base_url=base_url, http_client=http_client)
180
+ return OpenAIChatModel(model_name, provider=provider)
181
+ else:
182
+ # Unsupported provider, return string and let pydantic-ai handle it
183
+ # (won't have retry support but won't break)
184
+ logger.warning(
185
+ f"Provider '{provider_name}' not configured for HTTP retries. "
186
+ f"Falling back to default behavior."
187
+ )
188
+ return model_string
64
189
 
65
190
 
66
191
  def get_or_create_agent(model: ModelName, state_manager: StateManager) -> PydanticAgent:
67
192
  """Get existing agent or create new one for the specified model."""
68
- if model not in state_manager.session.agents:
193
+ import logging
194
+
195
+ logger = logging.getLogger(__name__)
196
+
197
+ # Check session-level cache first (for backward compatibility with tests)
198
+ if model in state_manager.session.agents:
199
+ logger.debug(f"Using session-cached agent for model {model}")
200
+ return state_manager.session.agents[model]
201
+
202
+ # Check module-level cache
203
+ if model in _AGENT_CACHE:
204
+ # Verify cache is still valid (check for config changes)
205
+ settings = state_manager.session.user_config.get("settings", {})
206
+ current_version = hash(
207
+ (
208
+ str(settings.get("max_retries", 3)),
209
+ str(settings.get("tool_strict_validation", False)),
210
+ str(state_manager.session.user_config.get("mcpServers", {})),
211
+ )
212
+ )
213
+ if _AGENT_CACHE_VERSION.get(model) == current_version:
214
+ logger.debug(f"Using module-cached agent for model {model}")
215
+ state_manager.session.agents[model] = _AGENT_CACHE[model]
216
+ return _AGENT_CACHE[model]
217
+ else:
218
+ logger.debug(f"Cache invalidated for model {model} due to config change")
219
+ del _AGENT_CACHE[model]
220
+ del _AGENT_CACHE_VERSION[model]
221
+
222
+ if model not in _AGENT_CACHE:
223
+ logger.debug(f"Creating new agent for model {model}")
69
224
  max_retries = state_manager.session.user_config.get("settings", {}).get("max_retries", 3)
70
225
 
71
226
  # Lazy import Agent and Tool
@@ -75,35 +230,74 @@ def get_or_create_agent(model: ModelName, state_manager: StateManager) -> Pydant
75
230
  base_path = Path(__file__).parent.parent.parent.parent
76
231
  system_prompt = load_system_prompt(base_path)
77
232
 
78
- # Load TUNACODE.md context
233
+ # Load AGENTS.md context
79
234
  system_prompt += load_tunacode_context()
80
235
 
81
- # Initialize todo tool
82
- todo_tool = TodoTool(state_manager=state_manager)
236
+ # Get tool strict validation setting from config (default to False for backward
237
+ # compatibility)
238
+ tool_strict_validation = state_manager.session.user_config.get("settings", {}).get(
239
+ "tool_strict_validation", False
240
+ )
241
+
242
+ # Create tool list
243
+ tools_list = [
244
+ Tool(bash, max_retries=max_retries, strict=tool_strict_validation),
245
+ Tool(glob, max_retries=max_retries, strict=tool_strict_validation),
246
+ Tool(grep, max_retries=max_retries, strict=tool_strict_validation),
247
+ Tool(list_dir, max_retries=max_retries, strict=tool_strict_validation),
248
+ Tool(read_file, max_retries=max_retries, strict=tool_strict_validation),
249
+ Tool(run_command, max_retries=max_retries, strict=tool_strict_validation),
250
+ Tool(update_file, max_retries=max_retries, strict=tool_strict_validation),
251
+ Tool(write_file, max_retries=max_retries, strict=tool_strict_validation),
252
+ ]
83
253
 
84
- # Add todo context if available
85
- try:
86
- current_todos = todo_tool.get_current_todos_sync()
87
- if current_todos != "No todos found":
88
- system_prompt += f'\n\n# Current Todo List\n\nYou have existing todos that need attention:\n\n{current_todos}\n\nRemember to check progress on these todos and update them as you work. Use todo("list") to see current status anytime.'
89
- except Exception as e:
90
- logger.warning(f"Warning: Failed to load todos: {e}")
254
+ logger.debug(f"Creating agent with {len(tools_list)} tools")
255
+
256
+ mcp_servers = get_mcp_servers(state_manager)
257
+
258
+ # Configure HTTP client with retry logic at transport layer
259
+ # This handles retries BEFORE node creation, avoiding pydantic-ai's
260
+ # single-stream-per-node constraint violations
261
+ # https://ai.pydantic.dev/api/retries/#pydantic_ai.retries.wait_retry_after
262
+ transport = AsyncTenacityTransport(
263
+ config=RetryConfig(
264
+ retry=retry_if_exception_type(HTTPStatusError),
265
+ wait=wait_retry_after(max_wait=60),
266
+ stop=stop_after_attempt(max_retries),
267
+ reraise=True,
268
+ ),
269
+ validate_response=lambda r: r.raise_for_status(),
270
+ )
271
+ http_client = AsyncClient(transport=transport)
91
272
 
92
- # Create agent with all tools
93
- state_manager.session.agents[model] = Agent(
94
- model=model,
273
+ # Create model instance with retry-enabled HTTP client
274
+ model_instance = _create_model_with_retry(model, http_client, state_manager)
275
+
276
+ agent = Agent(
277
+ model=model_instance,
95
278
  system_prompt=system_prompt,
96
- tools=[
97
- Tool(bash, max_retries=max_retries),
98
- Tool(glob, max_retries=max_retries),
99
- Tool(grep, max_retries=max_retries),
100
- Tool(list_dir, max_retries=max_retries),
101
- Tool(read_file, max_retries=max_retries),
102
- Tool(run_command, max_retries=max_retries),
103
- Tool(todo_tool._execute, max_retries=max_retries),
104
- Tool(update_file, max_retries=max_retries),
105
- Tool(write_file, max_retries=max_retries),
106
- ],
107
- mcp_servers=get_mcp_servers(state_manager),
279
+ tools=tools_list,
280
+ mcp_servers=mcp_servers,
108
281
  )
109
- return state_manager.session.agents[model]
282
+
283
+ # Register agent for MCP cleanup tracking
284
+ mcp_server_names = state_manager.session.user_config.get("mcpServers", {}).keys()
285
+ for server_name in mcp_server_names:
286
+ register_mcp_agent(server_name, agent)
287
+
288
+ # Store in both caches
289
+ _AGENT_CACHE[model] = agent
290
+ _AGENT_CACHE_VERSION[model] = hash(
291
+ (
292
+ str(state_manager.session.user_config.get("settings", {}).get("max_retries", 3)),
293
+ str(
294
+ state_manager.session.user_config.get("settings", {}).get(
295
+ "tool_strict_validation", False
296
+ )
297
+ ),
298
+ str(state_manager.session.user_config.get("mcpServers", {})),
299
+ )
300
+ )
301
+ state_manager.session.agents[model] = agent
302
+
303
+ return _AGENT_CACHE[model]
@@ -94,27 +94,29 @@ def create_empty_response_message(
94
94
  iteration: int,
95
95
  state_manager: StateManager,
96
96
  ) -> str:
97
- """Create an aggressive message for handling empty responses."""
97
+ """Create a constructive message for handling empty responses."""
98
98
  tools_context = get_recent_tools_context(tool_calls)
99
99
 
100
- content = f"""FAILURE DETECTED: You returned {("an " + empty_reason if empty_reason != "empty" else "an empty")} response.
101
-
102
- This is UNACCEPTABLE. You FAILED to produce output.
100
+ reason = empty_reason if empty_reason != "empty" else "empty"
101
+ content = f"""Response appears {reason} or incomplete. Let's troubleshoot and try again.
103
102
 
104
103
  Task: {message[:200]}...
105
104
  {tools_context}
106
- Current iteration: {iteration}
105
+ Attempt: {iteration}
106
+
107
+ Please take one of these specific actions:
107
108
 
108
- TRY AGAIN RIGHT NOW:
109
+ 1. **Search yielded no results?** → Try alternative search terms or broader patterns
110
+ 2. **Found what you need?** → Use TUNACODE DONE: to finalize
111
+ 3. **Encountering a blocker?** → Explain the specific issue preventing progress
112
+ 4. **Need more context?** → Use list_dir or expand your search scope
109
113
 
110
- 1. If your search returned no results → Try a DIFFERENT search pattern
111
- 2. If you found what you need Use TUNACODE_TASK_COMPLETE
112
- 3. If you're stuck EXPLAIN SPECIFICALLY what's blocking you
113
- 4. If you need to explore Use list_dir or broader searches
114
+ **Expected in your response:**
115
+ - Execute at least one tool OR provide substantial analysis
116
+ - If stuck, clearly describe what you've tried and what's blocking you
117
+ - Avoid empty responses - the system needs actionable output to proceed
114
118
 
115
- YOU MUST PRODUCE REAL OUTPUT IN THIS RESPONSE. NO EXCUSES.
116
- EXECUTE A TOOL OR PROVIDE SUBSTANTIAL CONTENT.
117
- DO NOT RETURN ANOTHER EMPTY RESPONSE."""
119
+ Ready to continue with a complete response."""
118
120
 
119
121
  return content
120
122
 
@@ -200,6 +202,34 @@ def create_fallback_response(
200
202
  return fallback
201
203
 
202
204
 
205
+ async def handle_empty_response(
206
+ message: str,
207
+ reason: str,
208
+ iter_index: int,
209
+ state: Any,
210
+ ) -> None:
211
+ """Handle empty responses by creating a synthetic user message with retry guidance."""
212
+ from tunacode.ui import console as ui
213
+
214
+ force_action_content = create_empty_response_message(
215
+ message,
216
+ reason,
217
+ getattr(state.sm.session, "tool_calls", []),
218
+ iter_index,
219
+ state.sm,
220
+ )
221
+ create_user_message(force_action_content, state.sm)
222
+
223
+ if state.show_thoughts:
224
+ await ui.warning("\nEMPTY RESPONSE FAILURE - AGGRESSIVE RETRY TRIGGERED")
225
+ await ui.muted(f" Reason: {reason}")
226
+ await ui.muted(
227
+ f" Recent tools: "
228
+ f"{get_recent_tools_context(getattr(state.sm.session, 'tool_calls', []))}"
229
+ )
230
+ await ui.muted(" Injecting retry guidance prompt")
231
+
232
+
203
233
  def format_fallback_output(fallback: FallbackResponse) -> str:
204
234
  """Format a fallback response into a comprehensive output string."""
205
235
  output_parts = [fallback.summary, ""]