tunacode-cli 0.0.76__py3-none-any.whl → 0.0.76.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tunacode-cli might be problematic. Click here for more details.

@@ -122,7 +122,7 @@ class FixCommand(SimpleCommand):
122
122
  )
123
123
 
124
124
  async def execute(self, args: List[str], context: CommandContext) -> None:
125
- from tunacode.core.agents.main import patch_tool_messages
125
+ from tunacode.core.agents import patch_tool_messages
126
126
 
127
127
  # Count current messages
128
128
  before_count = len(context.state_manager.session.messages)
@@ -152,7 +152,7 @@ class ParseToolsCommand(SimpleCommand):
152
152
  )
153
153
 
154
154
  async def execute(self, args: List[str], context: CommandContext) -> None:
155
- from tunacode.core.agents.main import extract_and_execute_tool_calls
155
+ from tunacode.core.agents import extract_and_execute_tool_calls
156
156
 
157
157
  # Find the last model response in messages
158
158
  messages = context.state_manager.session.messages
@@ -40,7 +40,7 @@ class ClearCommand(SimpleCommand):
40
40
 
41
41
  async def execute(self, args: List[str], context: CommandContext) -> None:
42
42
  # Patch any orphaned tool calls before clearing
43
- from tunacode.core.agents.main import patch_tool_messages
43
+ from tunacode.core.agents import patch_tool_messages
44
44
 
45
45
  patch_tool_messages("Conversation cleared", context.state_manager)
46
46
 
tunacode/cli/main.py CHANGED
@@ -30,6 +30,9 @@ def main(
30
30
  wizard: bool = typer.Option(
31
31
  False, "--wizard", help="Run interactive setup wizard for guided configuration."
32
32
  ),
33
+ show_config: bool = typer.Option(
34
+ False, "--show-config", help="Show configuration dashboard and exit."
35
+ ),
33
36
  baseurl: str = typer.Option(
34
37
  None, "--baseurl", help="API base URL (e.g., https://openrouter.ai/api/v1)"
35
38
  ),
@@ -49,6 +52,13 @@ def main(
49
52
  await ui.version()
50
53
  return
51
54
 
55
+ if show_config:
56
+ from tunacode.ui.config_dashboard import show_config_dashboard
57
+
58
+ await ui.banner()
59
+ show_config_dashboard()
60
+ return
61
+
52
62
  await ui.banner()
53
63
 
54
64
  # Start update check in background
tunacode/cli/repl.py CHANGED
@@ -14,8 +14,8 @@ from pydantic_ai.exceptions import UnexpectedModelBehavior
14
14
 
15
15
  from tunacode.configuration.models import ModelRegistry
16
16
  from tunacode.constants import DEFAULT_CONTEXT_WINDOW
17
- from tunacode.core.agents import main as agent
18
- from tunacode.core.agents.main import patch_tool_messages
17
+ from tunacode.core import agents as agent
18
+ from tunacode.core.agents import patch_tool_messages
19
19
  from tunacode.core.token_usage.api_response_parser import ApiResponseParser
20
20
  from tunacode.core.token_usage.cost_calculator import CostCalculator
21
21
  from tunacode.core.token_usage.usage_tracker import UsageTracker
@@ -238,7 +238,7 @@ async def _handle_plan_approval(state_manager, original_request=None):
238
238
  action()
239
239
  if key == "a" and original_request:
240
240
  await ui.info("🚀 Executing implementation...")
241
- await process_request(
241
+ await execute_repl_request(
242
242
  _transform_to_implementation_request(original_request),
243
243
  state_manager,
244
244
  output=True,
@@ -259,16 +259,16 @@ _command_registry.register_all_default_commands()
259
259
 
260
260
  async def _handle_command(command: str, state_manager: StateManager) -> CommandResult:
261
261
  """Handles a command string using the command registry."""
262
- context = CommandContext(state_manager=state_manager, process_request=process_request)
262
+ context = CommandContext(state_manager=state_manager, process_request=execute_repl_request)
263
263
  try:
264
- _command_registry.set_process_request_callback(process_request)
264
+ _command_registry.set_process_request_callback(execute_repl_request)
265
265
  return await _command_registry.execute(command, context)
266
266
  except ValidationError as e:
267
267
  await ui.error(str(e))
268
268
  return None
269
269
 
270
270
 
271
- async def process_request(text: str, state_manager: StateManager, output: bool = True):
271
+ async def execute_repl_request(text: str, state_manager: StateManager, output: bool = True):
272
272
  """Process input using the agent, handling cancellation safely."""
273
273
  import uuid
274
274
 
@@ -320,7 +320,9 @@ async def process_request(text: str, state_manager: StateManager, output: bool =
320
320
  if enable_streaming:
321
321
  await ui.spinner(False, state_manager.session.spinner, state_manager)
322
322
  state_manager.session.is_streaming_active = True
323
- streaming_panel = ui.StreamingAgentPanel()
323
+ streaming_panel = ui.StreamingAgentPanel(
324
+ debug=bool(state_manager.session.show_thoughts)
325
+ )
324
326
  await streaming_panel.start()
325
327
  state_manager.session.streaming_panel = streaming_panel
326
328
 
@@ -337,6 +339,20 @@ async def process_request(text: str, state_manager: StateManager, output: bool =
337
339
  await streaming_panel.stop()
338
340
  state_manager.session.streaming_panel = None
339
341
  state_manager.session.is_streaming_active = False
342
+ # Emit source-side streaming diagnostics if thoughts are enabled
343
+ if state_manager.session.show_thoughts:
344
+ try:
345
+ raw = getattr(state_manager.session, "_debug_raw_stream_accum", "") or ""
346
+ events = getattr(state_manager.session, "_debug_events", []) or []
347
+ raw_first5 = repr(raw[:5])
348
+ await ui.muted(
349
+ f"[debug] raw_stream_first5={raw_first5} total_len={len(raw)}"
350
+ )
351
+ for line in events:
352
+ await ui.muted(line)
353
+ except Exception:
354
+ # Don't let diagnostics break normal flow
355
+ pass
340
356
  else:
341
357
  res = await agent.process_request(
342
358
  text,
@@ -396,6 +412,10 @@ async def process_request(text: str, state_manager: StateManager, output: bool =
396
412
  )
397
413
 
398
414
 
415
+ # Backwards compatibility: exported name expected by external integrations/tests
416
+ process_request = execute_repl_request
417
+
418
+
399
419
  async def warm_code_index():
400
420
  """Pre-warm the code index in background for faster directory operations."""
401
421
  try:
@@ -517,7 +537,7 @@ async def repl(state_manager: StateManager):
517
537
  state_manager.session.operation_cancelled = False
518
538
 
519
539
  state_manager.session.current_task = get_app().create_background_task(
520
- process_request(line, state_manager)
540
+ execute_repl_request(line, state_manager)
521
541
  )
522
542
  await state_manager.session.current_task
523
543
 
@@ -6,6 +6,7 @@ Error recovery utilities for the REPL.
6
6
 
7
7
  import logging
8
8
 
9
+ import tunacode.core.agents as agent_api
9
10
  from tunacode.types import StateManager
10
11
  from tunacode.ui import console as ui
11
12
 
@@ -130,13 +131,12 @@ async def attempt_tool_recovery(e: Exception, state_manager: StateManager) -> bo
130
131
  )
131
132
 
132
133
  try:
133
- from tunacode.core.agents.main import extract_and_execute_tool_calls
134
134
 
135
135
  def tool_callback_with_state(tool_part, _node):
136
136
  return tool_handler(tool_part, state_manager)
137
137
 
138
138
  # This function now returns the number of tools found
139
- tools_found = await extract_and_execute_tool_calls(
139
+ tools_found = await agent_api.extract_and_execute_tool_calls(
140
140
  content_to_parse, tool_callback_with_state, state_manager
141
141
  )
142
142
 
@@ -9,7 +9,7 @@ from asyncio.exceptions import CancelledError
9
9
 
10
10
  from prompt_toolkit.application import run_in_terminal
11
11
 
12
- from tunacode.core.agents.main import patch_tool_messages
12
+ from tunacode.core.agents import patch_tool_messages
13
13
  from tunacode.core.tool_handler import ToolHandler
14
14
  from tunacode.exceptions import UserAbortError
15
15
  from tunacode.types import StateManager
@@ -5,7 +5,7 @@ Default configuration values for the TunaCode CLI.
5
5
  Provides sensible defaults for user configuration and environment variables.
6
6
  """
7
7
 
8
- from tunacode.constants import GUIDE_FILE_NAME, ToolName
8
+ from tunacode.constants import GUIDE_FILE_NAME
9
9
  from tunacode.types import UserConfig
10
10
 
11
11
  DEFAULT_USER_CONFIG: UserConfig = {
@@ -19,7 +19,7 @@ DEFAULT_USER_CONFIG: UserConfig = {
19
19
  "settings": {
20
20
  "max_retries": 10,
21
21
  "max_iterations": 40,
22
- "tool_ignore": [ToolName.READ_FILE],
22
+ "tool_ignore": [],
23
23
  "guide_file": GUIDE_FILE_NAME,
24
24
  "fallback_response": True,
25
25
  "fallback_verbosity": "normal", # Options: minimal, normal, detailed
@@ -0,0 +1,275 @@
1
+ """
2
+ Module: tunacode.configuration.key_descriptions
3
+
4
+ Educational descriptions and examples for configuration keys to help users
5
+ understand what each setting does and how to configure it properly.
6
+ """
7
+
8
+ from dataclasses import dataclass
9
+ from typing import Any, Dict, Optional
10
+
11
+
12
+ @dataclass
13
+ class KeyDescription:
14
+ """Description of a configuration key with examples and help text."""
15
+
16
+ name: str
17
+ description: str
18
+ example: Any
19
+ help_text: str
20
+ category: str
21
+ is_sensitive: bool = False
22
+ service_type: Optional[str] = None # For API keys: "openai", "anthropic", etc.
23
+
24
+
25
+ # Configuration key descriptions organized by category
26
+ CONFIG_KEY_DESCRIPTIONS: Dict[str, KeyDescription] = {
27
+ # Root level keys
28
+ "default_model": KeyDescription(
29
+ name="default_model",
30
+ description="Which AI model TunaCode uses by default",
31
+ example="openrouter:openai/gpt-4.1",
32
+ help_text="Format: provider:model-name. Examples: openai:gpt-4, anthropic:claude-3-sonnet, google:gemini-pro",
33
+ category="AI Models",
34
+ ),
35
+ "skip_git_safety": KeyDescription(
36
+ name="skip_git_safety",
37
+ description="Skip Git safety checks when making changes",
38
+ example=True,
39
+ help_text="When true, TunaCode won't create safety branches before making changes. Use with caution!",
40
+ category="Safety Settings",
41
+ ),
42
+ # Environment variables (API Keys)
43
+ "env.OPENAI_API_KEY": KeyDescription(
44
+ name="OPENAI_API_KEY",
45
+ description="Your OpenAI API key for GPT models",
46
+ example="sk-proj-abc123...",
47
+ help_text="Get this from https://platform.openai.com/api-keys. Required for OpenAI models like GPT-4.",
48
+ category="API Keys",
49
+ is_sensitive=True,
50
+ service_type="openai",
51
+ ),
52
+ "env.ANTHROPIC_API_KEY": KeyDescription(
53
+ name="ANTHROPIC_API_KEY",
54
+ description="Your Anthropic API key for Claude models",
55
+ example="sk-ant-api03-abc123...",
56
+ help_text="Get this from https://console.anthropic.com/. Required for Claude models.",
57
+ category="API Keys",
58
+ is_sensitive=True,
59
+ service_type="anthropic",
60
+ ),
61
+ "env.OPENROUTER_API_KEY": KeyDescription(
62
+ name="OPENROUTER_API_KEY",
63
+ description="Your OpenRouter API key for accessing multiple models",
64
+ example="sk-or-v1-abc123...",
65
+ help_text="Get this from https://openrouter.ai/keys. Gives access to many different AI models.",
66
+ category="API Keys",
67
+ is_sensitive=True,
68
+ service_type="openrouter",
69
+ ),
70
+ "env.GEMINI_API_KEY": KeyDescription(
71
+ name="GEMINI_API_KEY",
72
+ description="Your Google Gemini API key",
73
+ example="AIza123...",
74
+ help_text="Get this from Google AI Studio. Required for Gemini models.",
75
+ category="API Keys",
76
+ is_sensitive=True,
77
+ service_type="google",
78
+ ),
79
+ "env.OPENAI_BASE_URL": KeyDescription(
80
+ name="OPENAI_BASE_URL",
81
+ description="Custom API endpoint for OpenAI-compatible services",
82
+ example="https://api.cerebras.ai/v1",
83
+ help_text="Use this to connect to local models (LM Studio, Ollama) or alternative providers like Cerebras.",
84
+ category="API Configuration",
85
+ ),
86
+ # Settings
87
+ "settings.max_retries": KeyDescription(
88
+ name="max_retries",
89
+ description="How many times to retry failed API calls",
90
+ example=10,
91
+ help_text="Higher values = more resilient to temporary API issues, but slower when APIs are down.",
92
+ category="Behavior Settings",
93
+ ),
94
+ "settings.max_iterations": KeyDescription(
95
+ name="max_iterations",
96
+ description="Maximum conversation turns before stopping",
97
+ example=40,
98
+ help_text="Prevents infinite loops. TunaCode will stop after this many back-and-forth exchanges.",
99
+ category="Behavior Settings",
100
+ ),
101
+ "settings.tool_ignore": KeyDescription(
102
+ name="tool_ignore",
103
+ description="List of tools TunaCode should not use",
104
+ example=["read_file", "write_file"],
105
+ help_text="Useful for restricting what TunaCode can do. Empty list means all tools are available.",
106
+ category="Tool Configuration",
107
+ ),
108
+ "settings.guide_file": KeyDescription(
109
+ name="guide_file",
110
+ description="Name of your project guide file",
111
+ example="TUNACODE.md",
112
+ help_text="TunaCode looks for this file to understand your project. Usually TUNACODE.md or README.md.",
113
+ category="Project Settings",
114
+ ),
115
+ "settings.fallback_response": KeyDescription(
116
+ name="fallback_response",
117
+ description="Whether to provide a response when tools fail",
118
+ example=True,
119
+ help_text="When true, TunaCode will try to help even if some tools don't work properly.",
120
+ category="Behavior Settings",
121
+ ),
122
+ "settings.fallback_verbosity": KeyDescription(
123
+ name="fallback_verbosity",
124
+ description="How detailed fallback responses should be",
125
+ example="normal",
126
+ help_text="Options: minimal, normal, detailed. Controls how much TunaCode explains when things go wrong.",
127
+ category="Behavior Settings",
128
+ ),
129
+ "settings.context_window_size": KeyDescription(
130
+ name="context_window_size",
131
+ description="Maximum tokens TunaCode can use in one conversation",
132
+ example=200000,
133
+ help_text="Larger values = TunaCode remembers more context, but costs more. Adjust based on your model's limits.",
134
+ category="Performance Settings",
135
+ ),
136
+ "settings.enable_streaming": KeyDescription(
137
+ name="enable_streaming",
138
+ description="Show AI responses as they're generated",
139
+ example=True,
140
+ help_text="When true, you see responses appear word-by-word. When false, you wait for complete responses.",
141
+ category="User Experience",
142
+ ),
143
+ # Ripgrep settings
144
+ "settings.ripgrep.use_bundled": KeyDescription(
145
+ name="ripgrep.use_bundled",
146
+ description="Use TunaCode's built-in ripgrep instead of system version",
147
+ example=False,
148
+ help_text="Usually false is better - uses your system's ripgrep which may be newer/faster.",
149
+ category="Search Settings",
150
+ ),
151
+ "settings.ripgrep.timeout": KeyDescription(
152
+ name="ripgrep.timeout",
153
+ description="How long to wait for search results (seconds)",
154
+ example=10,
155
+ help_text="Prevents searches from hanging. Increase for very large codebases.",
156
+ category="Search Settings",
157
+ ),
158
+ "settings.ripgrep.max_buffer_size": KeyDescription(
159
+ name="ripgrep.max_buffer_size",
160
+ description="Maximum size of search results (bytes)",
161
+ example=1048576,
162
+ help_text="1MB by default. Prevents memory issues with huge search results.",
163
+ category="Search Settings",
164
+ ),
165
+ "settings.ripgrep.max_results": KeyDescription(
166
+ name="ripgrep.max_results",
167
+ description="Maximum number of search results to return",
168
+ example=100,
169
+ help_text="Prevents overwhelming output. Increase if you need more comprehensive search results.",
170
+ category="Search Settings",
171
+ ),
172
+ "settings.ripgrep.enable_metrics": KeyDescription(
173
+ name="ripgrep.enable_metrics",
174
+ description="Collect performance data about searches",
175
+ example=False,
176
+ help_text="Enable for debugging search performance. Usually not needed.",
177
+ category="Search Settings",
178
+ ),
179
+ "settings.ripgrep.debug": KeyDescription(
180
+ name="ripgrep.debug",
181
+ description="Show detailed search debugging information",
182
+ example=False,
183
+ help_text="Enable for troubleshooting search issues. Creates verbose output.",
184
+ category="Search Settings",
185
+ ),
186
+ # Tutorial/onboarding settings
187
+ "settings.enable_tutorial": KeyDescription(
188
+ name="enable_tutorial",
189
+ description="Show tutorial prompts for new users",
190
+ example=True,
191
+ help_text="Helps new users learn TunaCode. Disable once you're comfortable with the tool.",
192
+ category="User Experience",
193
+ ),
194
+ "settings.first_installation_date": KeyDescription(
195
+ name="first_installation_date",
196
+ description="When TunaCode was first installed",
197
+ example="2025-09-11T11:50:40.167105",
198
+ help_text="Automatically set. Used for tracking usage patterns and showing relevant tips.",
199
+ category="System Information",
200
+ ),
201
+ "settings.tutorial_declined": KeyDescription(
202
+ name="tutorial_declined",
203
+ description="Whether user declined the tutorial",
204
+ example=True,
205
+ help_text="Automatically set when you skip the tutorial. Prevents repeated tutorial prompts.",
206
+ category="User Experience",
207
+ ),
208
+ # MCP Servers
209
+ "mcpServers": KeyDescription(
210
+ name="mcpServers",
211
+ description="Model Context Protocol server configurations",
212
+ example={},
213
+ help_text="Advanced feature for connecting external tools and services. Usually empty for basic usage.",
214
+ category="Advanced Features",
215
+ ),
216
+ }
217
+
218
+
219
+ def get_key_description(key_path: str) -> Optional[KeyDescription]:
220
+ """Get description for a configuration key by its path."""
221
+ return CONFIG_KEY_DESCRIPTIONS.get(key_path)
222
+
223
+
224
+ def get_service_type_for_api_key(key_name: str) -> Optional[str]:
225
+ """Determine the service type for an API key."""
226
+ service_mapping = {
227
+ "OPENAI_API_KEY": "openai",
228
+ "ANTHROPIC_API_KEY": "anthropic",
229
+ "OPENROUTER_API_KEY": "openrouter",
230
+ "GEMINI_API_KEY": "google",
231
+ }
232
+ return service_mapping.get(key_name)
233
+
234
+
235
+ def get_categories() -> Dict[str, list[KeyDescription]]:
236
+ """Get all configuration keys organized by category."""
237
+ categories: Dict[str, list[KeyDescription]] = {}
238
+
239
+ for desc in CONFIG_KEY_DESCRIPTIONS.values():
240
+ if desc.category not in categories:
241
+ categories[desc.category] = []
242
+ categories[desc.category].append(desc)
243
+
244
+ return categories
245
+
246
+
247
+ def get_configuration_glossary() -> str:
248
+ """Generate a glossary of configuration terms for the help section."""
249
+ glossary = """
250
+ [bold]Configuration Key Glossary[/bold]
251
+
252
+ [cyan]What are configuration keys?[/cyan]
253
+ Configuration keys are setting names (like 'default_model', 'max_retries') that control how TunaCode behaves.
254
+ Think of them like preferences in any app - they let you customize TunaCode to work the way you want.
255
+
256
+ [cyan]Key Categories:[/cyan]
257
+ • [yellow]AI Models[/yellow]: Which AI to use (GPT-4, Claude, etc.)
258
+ • [yellow]API Keys[/yellow]: Your credentials for AI services
259
+ • [yellow]Behavior Settings[/yellow]: How TunaCode acts (retries, iterations, etc.)
260
+ • [yellow]Tool Configuration[/yellow]: Which tools TunaCode can use
261
+ • [yellow]Performance Settings[/yellow]: Memory and speed optimizations
262
+ • [yellow]User Experience[/yellow]: Interface and tutorial preferences
263
+
264
+ [cyan]Common Examples:[/cyan]
265
+ • default_model → Which AI model to use by default
266
+ • max_retries → How many times to retry failed requests
267
+ • OPENAI_API_KEY → Your OpenAI account credentials
268
+ • tool_ignore → List of tools TunaCode shouldn't use
269
+ • context_window_size → How much conversation history to remember
270
+
271
+ [cyan]Default vs Custom:[/cyan]
272
+ • 📋 Default: TunaCode's built-in settings (work for most people)
273
+ • 🔧 Custom: Settings you've changed to fit your needs
274
+ """
275
+ return glossary.strip()
tunacode/constants.py CHANGED
@@ -9,7 +9,7 @@ from enum import Enum
9
9
 
10
10
  # Application info
11
11
  APP_NAME = "TunaCode"
12
- APP_VERSION = "0.0.76"
12
+ APP_VERSION = "0.0.76.2"
13
13
 
14
14
 
15
15
  # File patterns
@@ -44,6 +44,7 @@ class ToolName(str, Enum):
44
44
  LIST_DIR = "list_dir"
45
45
  GLOB = "glob"
46
46
  TODO = "todo"
47
+ REACT = "react"
47
48
  EXIT_PLAN_MODE = "exit_plan_mode"
48
49
 
49
50
 
@@ -57,6 +58,7 @@ TOOL_GREP = ToolName.GREP
57
58
  TOOL_LIST_DIR = ToolName.LIST_DIR
58
59
  TOOL_GLOB = ToolName.GLOB
59
60
  TOOL_TODO = ToolName.TODO
61
+ TOOL_REACT = ToolName.REACT
60
62
  TOOL_EXIT_PLAN_MODE = ToolName.EXIT_PLAN_MODE
61
63
 
62
64
  # Tool categorization
@@ -65,6 +67,7 @@ READ_ONLY_TOOLS = [
65
67
  ToolName.GREP,
66
68
  ToolName.LIST_DIR,
67
69
  ToolName.GLOB,
70
+ ToolName.REACT,
68
71
  ToolName.EXIT_PLAN_MODE,
69
72
  ]
70
73
  WRITE_TOOLS = [ToolName.WRITE_FILE, ToolName.UPDATE_FILE]
@@ -1,8 +1,45 @@
1
- """Agent helper modules."""
1
+ """Public entry points for TunaCode agent orchestration."""
2
2
 
3
- from .main import get_or_create_agent, process_request
3
+ from . import main as main
4
+ from .agent_components import (
5
+ AgentRunWithState,
6
+ AgentRunWrapper,
7
+ ResponseState,
8
+ SimpleResult,
9
+ ToolBuffer,
10
+ _process_node,
11
+ check_task_completion,
12
+ execute_tools_parallel,
13
+ extract_and_execute_tool_calls,
14
+ get_model_messages,
15
+ get_or_create_agent,
16
+ parse_json_tool_calls,
17
+ patch_tool_messages,
18
+ )
19
+ from .main import (
20
+ check_query_satisfaction,
21
+ get_agent_tool,
22
+ get_mcp_servers,
23
+ process_request,
24
+ )
4
25
 
5
26
  __all__ = [
6
27
  "process_request",
7
28
  "get_or_create_agent",
29
+ "extract_and_execute_tool_calls",
30
+ "parse_json_tool_calls",
31
+ "get_model_messages",
32
+ "patch_tool_messages",
33
+ "_process_node",
34
+ "ResponseState",
35
+ "SimpleResult",
36
+ "AgentRunWrapper",
37
+ "AgentRunWithState",
38
+ "ToolBuffer",
39
+ "check_task_completion",
40
+ "execute_tools_parallel",
41
+ "get_mcp_servers",
42
+ "check_query_satisfaction",
43
+ "get_agent_tool",
44
+ "main",
8
45
  ]
@@ -1,5 +1,7 @@
1
1
  """Agent components package for modular agent functionality."""
2
2
 
3
+ from tunacode.ui.tool_descriptions import get_batch_description
4
+
3
5
  from .agent_config import get_or_create_agent
4
6
  from .agent_helpers import (
5
7
  create_empty_response_message,
@@ -17,6 +19,7 @@ from .message_handler import get_model_messages, patch_tool_messages
17
19
  from .node_processor import _process_node
18
20
  from .response_state import ResponseState
19
21
  from .result_wrapper import AgentRunWithState, AgentRunWrapper, SimpleResult
22
+ from .streaming import stream_model_request_node
20
23
  from .task_completion import check_task_completion
21
24
  from .tool_buffer import ToolBuffer
22
25
  from .tool_executor import execute_tools_parallel
@@ -44,4 +47,6 @@ __all__ = [
44
47
  "get_tool_description",
45
48
  "get_tool_summary",
46
49
  "get_user_prompt_part_class",
50
+ "stream_model_request_node",
51
+ "get_batch_description",
47
52
  ]
@@ -209,11 +209,32 @@ async def _process_node(
209
209
  # Stream content to callback if provided
210
210
  # Use this as fallback when true token streaming is not available
211
211
  if streaming_callback and not STREAMING_AVAILABLE:
212
+ # Basic diagnostics for first-chunk behavior in fallback streaming
213
+ first_emitted = False
214
+ raw_accum = ""
212
215
  for part in node.model_response.parts:
213
216
  if hasattr(part, "content") and isinstance(part.content, str):
214
- content = part.content.strip()
215
- if content and not content.startswith('{"thought"'):
216
- # Stream non-JSON content (actual response content)
217
+ content = part.content
218
+ # Only check for empty content and JSON thoughts, don't strip whitespace
219
+ # as it may remove important leading spaces/characters
220
+ if content and not content.lstrip().startswith('{"thought"'):
221
+ if not first_emitted:
222
+ try:
223
+ import time as _t
224
+
225
+ ts_ns = _t.perf_counter_ns()
226
+ except Exception:
227
+ ts_ns = 0
228
+ # We cannot guarantee session access here; log via logger only
229
+ logger.debug(
230
+ "[src-fallback] first_chunk ts_ns=%s chunk_repr=%r len=%d",
231
+ ts_ns,
232
+ content[:5],
233
+ len(content),
234
+ )
235
+ first_emitted = True
236
+ raw_accum += content
237
+ # Stream the original content without stripping
217
238
  if streaming_callback:
218
239
  await streaming_callback(content)
219
240