tunacode-cli 0.0.55__py3-none-any.whl → 0.0.78.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tunacode-cli might be problematic. Click here for more details.
- tunacode/cli/commands/__init__.py +2 -2
- tunacode/cli/commands/implementations/__init__.py +2 -3
- tunacode/cli/commands/implementations/command_reload.py +48 -0
- tunacode/cli/commands/implementations/debug.py +2 -2
- tunacode/cli/commands/implementations/development.py +10 -8
- tunacode/cli/commands/implementations/model.py +357 -29
- tunacode/cli/commands/implementations/quickstart.py +43 -0
- tunacode/cli/commands/implementations/system.py +96 -3
- tunacode/cli/commands/implementations/template.py +0 -2
- tunacode/cli/commands/registry.py +139 -5
- tunacode/cli/commands/slash/__init__.py +32 -0
- tunacode/cli/commands/slash/command.py +157 -0
- tunacode/cli/commands/slash/loader.py +135 -0
- tunacode/cli/commands/slash/processor.py +294 -0
- tunacode/cli/commands/slash/types.py +93 -0
- tunacode/cli/commands/slash/validator.py +400 -0
- tunacode/cli/main.py +23 -2
- tunacode/cli/repl.py +217 -190
- tunacode/cli/repl_components/command_parser.py +38 -4
- tunacode/cli/repl_components/error_recovery.py +85 -4
- tunacode/cli/repl_components/output_display.py +12 -1
- tunacode/cli/repl_components/tool_executor.py +1 -1
- tunacode/configuration/defaults.py +12 -3
- tunacode/configuration/key_descriptions.py +284 -0
- tunacode/configuration/settings.py +0 -1
- tunacode/constants.py +12 -40
- tunacode/core/agents/__init__.py +43 -2
- tunacode/core/agents/agent_components/__init__.py +7 -0
- tunacode/core/agents/agent_components/agent_config.py +249 -55
- tunacode/core/agents/agent_components/agent_helpers.py +43 -13
- tunacode/core/agents/agent_components/node_processor.py +179 -139
- tunacode/core/agents/agent_components/response_state.py +123 -6
- tunacode/core/agents/agent_components/state_transition.py +116 -0
- tunacode/core/agents/agent_components/streaming.py +296 -0
- tunacode/core/agents/agent_components/task_completion.py +19 -6
- tunacode/core/agents/agent_components/tool_buffer.py +21 -1
- tunacode/core/agents/agent_components/tool_executor.py +10 -0
- tunacode/core/agents/main.py +522 -370
- tunacode/core/agents/main_legact.py +538 -0
- tunacode/core/agents/prompts.py +66 -0
- tunacode/core/agents/utils.py +29 -121
- tunacode/core/code_index.py +83 -29
- tunacode/core/setup/__init__.py +0 -2
- tunacode/core/setup/config_setup.py +110 -20
- tunacode/core/setup/config_wizard.py +230 -0
- tunacode/core/setup/coordinator.py +14 -5
- tunacode/core/state.py +16 -20
- tunacode/core/token_usage/usage_tracker.py +5 -3
- tunacode/core/tool_authorization.py +352 -0
- tunacode/core/tool_handler.py +67 -40
- tunacode/exceptions.py +119 -5
- tunacode/prompts/system.xml +751 -0
- tunacode/services/mcp.py +125 -7
- tunacode/setup.py +5 -25
- tunacode/tools/base.py +163 -0
- tunacode/tools/bash.py +110 -1
- tunacode/tools/glob.py +332 -34
- tunacode/tools/grep.py +179 -82
- tunacode/tools/grep_components/result_formatter.py +98 -4
- tunacode/tools/list_dir.py +132 -2
- tunacode/tools/prompts/bash_prompt.xml +72 -0
- tunacode/tools/prompts/glob_prompt.xml +45 -0
- tunacode/tools/prompts/grep_prompt.xml +98 -0
- tunacode/tools/prompts/list_dir_prompt.xml +31 -0
- tunacode/tools/prompts/react_prompt.xml +23 -0
- tunacode/tools/prompts/read_file_prompt.xml +54 -0
- tunacode/tools/prompts/run_command_prompt.xml +64 -0
- tunacode/tools/prompts/update_file_prompt.xml +53 -0
- tunacode/tools/prompts/write_file_prompt.xml +37 -0
- tunacode/tools/react.py +153 -0
- tunacode/tools/read_file.py +91 -0
- tunacode/tools/run_command.py +114 -0
- tunacode/tools/schema_assembler.py +167 -0
- tunacode/tools/update_file.py +94 -0
- tunacode/tools/write_file.py +86 -0
- tunacode/tools/xml_helper.py +83 -0
- tunacode/tutorial/__init__.py +9 -0
- tunacode/tutorial/content.py +98 -0
- tunacode/tutorial/manager.py +182 -0
- tunacode/tutorial/steps.py +124 -0
- tunacode/types.py +20 -27
- tunacode/ui/completers.py +434 -50
- tunacode/ui/config_dashboard.py +585 -0
- tunacode/ui/console.py +63 -11
- tunacode/ui/input.py +20 -3
- tunacode/ui/keybindings.py +7 -4
- tunacode/ui/model_selector.py +395 -0
- tunacode/ui/output.py +40 -19
- tunacode/ui/panels.py +212 -43
- tunacode/ui/path_heuristics.py +91 -0
- tunacode/ui/prompt_manager.py +5 -1
- tunacode/ui/tool_ui.py +33 -10
- tunacode/utils/api_key_validation.py +93 -0
- tunacode/utils/config_comparator.py +340 -0
- tunacode/utils/json_utils.py +206 -0
- tunacode/utils/message_utils.py +14 -4
- tunacode/utils/models_registry.py +593 -0
- tunacode/utils/ripgrep.py +332 -9
- tunacode/utils/text_utils.py +18 -1
- tunacode/utils/user_configuration.py +45 -0
- tunacode_cli-0.0.78.6.dist-info/METADATA +260 -0
- tunacode_cli-0.0.78.6.dist-info/RECORD +158 -0
- {tunacode_cli-0.0.55.dist-info → tunacode_cli-0.0.78.6.dist-info}/WHEEL +1 -2
- tunacode/cli/commands/implementations/todo.py +0 -217
- tunacode/context.py +0 -71
- tunacode/core/setup/git_safety_setup.py +0 -182
- tunacode/prompts/system.md +0 -731
- tunacode/tools/read_file_async_poc.py +0 -196
- tunacode/tools/todo.py +0 -349
- tunacode_cli-0.0.55.dist-info/METADATA +0 -322
- tunacode_cli-0.0.55.dist-info/RECORD +0 -126
- tunacode_cli-0.0.55.dist-info/top_level.txt +0 -1
- {tunacode_cli-0.0.55.dist-info → tunacode_cli-0.0.78.6.dist-info}/entry_points.txt +0 -0
- {tunacode_cli-0.0.55.dist-info → tunacode_cli-0.0.78.6.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
"""State transition management for agent response processing."""
|
|
2
|
+
|
|
3
|
+
import threading
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from enum import Enum
|
|
6
|
+
from typing import TYPE_CHECKING, Dict, Set
|
|
7
|
+
|
|
8
|
+
from tunacode.types import AgentState
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
pass
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class InvalidStateTransitionError(Exception):
|
|
15
|
+
"""Raised when an invalid state transition is attempted."""
|
|
16
|
+
|
|
17
|
+
def __init__(self, from_state: Enum, to_state: Enum, message: str = None):
|
|
18
|
+
self.from_state = from_state
|
|
19
|
+
self.to_state = to_state
|
|
20
|
+
self.message = message or f"Invalid state transition: {from_state.value} → {to_state.value}"
|
|
21
|
+
super().__init__(self.message)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class StateTransitionRules:
|
|
26
|
+
"""Defines valid state transitions for the agent state machine."""
|
|
27
|
+
|
|
28
|
+
# Valid transitions for each state
|
|
29
|
+
valid_transitions: Dict[Enum, Set[Enum]]
|
|
30
|
+
|
|
31
|
+
def is_valid_transition(self, from_state: Enum, to_state: Enum) -> bool:
|
|
32
|
+
"""Check if a transition between states is valid."""
|
|
33
|
+
return to_state in self.valid_transitions.get(from_state, set())
|
|
34
|
+
|
|
35
|
+
def get_valid_next_states(self, current_state: Enum) -> Set[Enum]:
|
|
36
|
+
"""Get all valid next states from the current state."""
|
|
37
|
+
return self.valid_transitions.get(current_state, set())
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class AgentStateMachine:
|
|
41
|
+
"""Thread-safe state machine for agent response processing."""
|
|
42
|
+
|
|
43
|
+
def __init__(self, initial_state: "AgentState", rules: StateTransitionRules):
|
|
44
|
+
"""
|
|
45
|
+
Initialize the state machine.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
initial_state: The starting state
|
|
49
|
+
rules: Transition rules defining valid state changes
|
|
50
|
+
"""
|
|
51
|
+
self._state = initial_state
|
|
52
|
+
self._rules = rules
|
|
53
|
+
self._lock = threading.RLock() # Reentrant lock for thread safety
|
|
54
|
+
self._completion_detected = False
|
|
55
|
+
|
|
56
|
+
@property
|
|
57
|
+
def current_state(self) -> "AgentState":
|
|
58
|
+
"""Get the current state."""
|
|
59
|
+
with self._lock:
|
|
60
|
+
return self._state
|
|
61
|
+
|
|
62
|
+
def transition_to(self, new_state: "AgentState") -> None:
|
|
63
|
+
"""
|
|
64
|
+
Transition to a new state.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
new_state: The state to transition to
|
|
68
|
+
|
|
69
|
+
Raises:
|
|
70
|
+
InvalidStateTransitionError: If the transition is not valid
|
|
71
|
+
"""
|
|
72
|
+
with self._lock:
|
|
73
|
+
if not self._rules.is_valid_transition(self._state, new_state):
|
|
74
|
+
raise InvalidStateTransitionError(
|
|
75
|
+
self._state,
|
|
76
|
+
new_state,
|
|
77
|
+
f"Invalid state transition: {self._state.value} → {new_state.value}",
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
# Handle self-transitions as no-ops
|
|
81
|
+
if self._state == new_state:
|
|
82
|
+
return
|
|
83
|
+
|
|
84
|
+
self._state = new_state
|
|
85
|
+
|
|
86
|
+
def can_transition_to(self, target_state: "AgentState") -> bool:
|
|
87
|
+
"""Check if a transition to the target state is allowed."""
|
|
88
|
+
with self._lock:
|
|
89
|
+
return self._rules.is_valid_transition(self._state, target_state)
|
|
90
|
+
|
|
91
|
+
def set_completion_detected(self, detected: bool = True) -> None:
|
|
92
|
+
"""Mark that completion has been detected in the RESPONSE state."""
|
|
93
|
+
with self._lock:
|
|
94
|
+
self._completion_detected = detected
|
|
95
|
+
|
|
96
|
+
def is_completed(self) -> bool:
|
|
97
|
+
"""Check if the task is completed (only valid in RESPONSE state)."""
|
|
98
|
+
with self._lock:
|
|
99
|
+
return self._state == AgentState.RESPONSE and self._completion_detected
|
|
100
|
+
|
|
101
|
+
def reset(self, initial_state: "AgentState" = None) -> None:
|
|
102
|
+
"""Reset the state machine to initial state."""
|
|
103
|
+
with self._lock:
|
|
104
|
+
self._state = initial_state or AgentState.USER_INPUT
|
|
105
|
+
self._completion_detected = False
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
# Define the transition rules for the agent state machine
|
|
109
|
+
AGENT_TRANSITION_RULES = StateTransitionRules(
|
|
110
|
+
valid_transitions={
|
|
111
|
+
AgentState.USER_INPUT: {AgentState.ASSISTANT},
|
|
112
|
+
AgentState.ASSISTANT: {AgentState.TOOL_EXECUTION, AgentState.RESPONSE},
|
|
113
|
+
AgentState.TOOL_EXECUTION: {AgentState.RESPONSE},
|
|
114
|
+
AgentState.RESPONSE: {AgentState.ASSISTANT}, # Can transition back to continue
|
|
115
|
+
}
|
|
116
|
+
)
|
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
"""Streaming instrumentation and handling for agent model request nodes.
|
|
2
|
+
|
|
3
|
+
This module encapsulates verbose streaming + logging logic used during
|
|
4
|
+
token-level streaming from the LLM provider. It updates session debug fields
|
|
5
|
+
and streams deltas to the provided callback while being resilient to errors.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from typing import Awaitable, Callable, Optional
|
|
11
|
+
|
|
12
|
+
from pydantic_ai.messages import PartDeltaEvent, TextPartDelta
|
|
13
|
+
|
|
14
|
+
from tunacode.core.logging.logger import get_logger
|
|
15
|
+
from tunacode.core.state import StateManager
|
|
16
|
+
|
|
17
|
+
logger = get_logger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
async def stream_model_request_node(
|
|
21
|
+
node,
|
|
22
|
+
agent_run_ctx,
|
|
23
|
+
state_manager: StateManager,
|
|
24
|
+
streaming_callback: Optional[Callable[[str], Awaitable[None]]],
|
|
25
|
+
request_id: str,
|
|
26
|
+
iteration_index: int,
|
|
27
|
+
) -> None:
|
|
28
|
+
"""Stream token deltas for a model request node with detailed instrumentation.
|
|
29
|
+
|
|
30
|
+
This function mirrors the prior inline logic in main.py but is extracted to
|
|
31
|
+
keep main.py lean. On streaming failure, it degrades gracefully to allow
|
|
32
|
+
non-streaming processing of the node.
|
|
33
|
+
"""
|
|
34
|
+
if not streaming_callback:
|
|
35
|
+
return
|
|
36
|
+
|
|
37
|
+
# Gracefully handle streaming errors from LLM provider
|
|
38
|
+
try:
|
|
39
|
+
async with node.stream(agent_run_ctx) as request_stream:
|
|
40
|
+
# Initialize per-node debug accumulators
|
|
41
|
+
state_manager.session._debug_raw_stream_accum = ""
|
|
42
|
+
state_manager.session._debug_events = []
|
|
43
|
+
first_delta_logged = False
|
|
44
|
+
debug_event_count = 0
|
|
45
|
+
first_delta_seen = False
|
|
46
|
+
seeded_prefix_sent = False
|
|
47
|
+
pre_first_delta_text: Optional[str] = None
|
|
48
|
+
|
|
49
|
+
# Helper to extract text from a possible final-result object
|
|
50
|
+
def _extract_text(obj) -> Optional[str]:
|
|
51
|
+
try:
|
|
52
|
+
if obj is None:
|
|
53
|
+
return None
|
|
54
|
+
if isinstance(obj, str):
|
|
55
|
+
return obj
|
|
56
|
+
# Common attributes that may hold text
|
|
57
|
+
for attr in ("output", "text", "content", "message"):
|
|
58
|
+
v = getattr(obj, attr, None)
|
|
59
|
+
if isinstance(v, str) and v:
|
|
60
|
+
return v
|
|
61
|
+
# Parts-based result
|
|
62
|
+
parts = getattr(obj, "parts", None)
|
|
63
|
+
if isinstance(parts, (list, tuple)) and parts:
|
|
64
|
+
texts: list[str] = []
|
|
65
|
+
for p in parts:
|
|
66
|
+
c = getattr(p, "content", None)
|
|
67
|
+
if isinstance(c, str) and c:
|
|
68
|
+
texts.append(c)
|
|
69
|
+
if texts:
|
|
70
|
+
return "".join(texts)
|
|
71
|
+
# Nested .result or .response
|
|
72
|
+
for attr in ("result", "response", "final"):
|
|
73
|
+
v = getattr(obj, attr, None)
|
|
74
|
+
t = _extract_text(v)
|
|
75
|
+
if t:
|
|
76
|
+
return t
|
|
77
|
+
except Exception:
|
|
78
|
+
return None
|
|
79
|
+
return None
|
|
80
|
+
|
|
81
|
+
# Mark stream open
|
|
82
|
+
try:
|
|
83
|
+
import time as _t
|
|
84
|
+
|
|
85
|
+
state_manager.session._debug_events.append(
|
|
86
|
+
f"[src] stream_opened ts_ns={_t.perf_counter_ns()}"
|
|
87
|
+
)
|
|
88
|
+
except Exception:
|
|
89
|
+
pass
|
|
90
|
+
|
|
91
|
+
async for event in request_stream:
|
|
92
|
+
debug_event_count += 1
|
|
93
|
+
# Log first few raw event types for diagnosis
|
|
94
|
+
if debug_event_count <= 5:
|
|
95
|
+
try:
|
|
96
|
+
etype = type(event).__name__
|
|
97
|
+
d = getattr(event, "delta", None)
|
|
98
|
+
dtype = type(d).__name__ if d is not None else None
|
|
99
|
+
c = getattr(d, "content_delta", None) if d is not None else None
|
|
100
|
+
clen = len(c) if isinstance(c, str) else None
|
|
101
|
+
cpreview = repr(c[:5]) if isinstance(c, str) else None
|
|
102
|
+
# Probe common fields on non-delta events to see if they contain text
|
|
103
|
+
r = getattr(event, "result", None)
|
|
104
|
+
rtype = type(r).__name__ if r is not None else None
|
|
105
|
+
rpreview = None
|
|
106
|
+
rplen = None
|
|
107
|
+
# Also inspect event.part if present (e.g., PartStartEvent)
|
|
108
|
+
p = getattr(event, "part", None)
|
|
109
|
+
ptype = type(p).__name__ if p is not None else None
|
|
110
|
+
pkind = getattr(p, "part_kind", None)
|
|
111
|
+
pcontent = getattr(p, "content", None)
|
|
112
|
+
ppreview = repr(pcontent[:20]) if isinstance(pcontent, str) else None
|
|
113
|
+
pplen = len(pcontent) if isinstance(pcontent, str) else None
|
|
114
|
+
try:
|
|
115
|
+
if isinstance(r, str):
|
|
116
|
+
rpreview = repr(r[:20])
|
|
117
|
+
rplen = len(r)
|
|
118
|
+
elif r is not None:
|
|
119
|
+
# Try a few common shapes: .output, .text, .parts
|
|
120
|
+
r_output = getattr(r, "output", None)
|
|
121
|
+
r_text = getattr(r, "text", None)
|
|
122
|
+
r_parts = getattr(r, "parts", None)
|
|
123
|
+
if isinstance(r_output, str):
|
|
124
|
+
rpreview = repr(r_output[:20])
|
|
125
|
+
rplen = len(r_output)
|
|
126
|
+
elif isinstance(r_text, str):
|
|
127
|
+
rpreview = repr(r_text[:20])
|
|
128
|
+
rplen = len(r_text)
|
|
129
|
+
elif isinstance(r_parts, (list, tuple)) and r_parts:
|
|
130
|
+
# render a compact preview of first textual part
|
|
131
|
+
for _rp in r_parts:
|
|
132
|
+
rc = getattr(_rp, "content", None)
|
|
133
|
+
if isinstance(rc, str) and rc:
|
|
134
|
+
rpreview = repr(rc[:20])
|
|
135
|
+
rplen = len(rc)
|
|
136
|
+
break
|
|
137
|
+
except Exception:
|
|
138
|
+
pass
|
|
139
|
+
event_info = (
|
|
140
|
+
f"[src] event[{debug_event_count}] etype={etype} d={dtype} "
|
|
141
|
+
f"clen={clen} cprev={cpreview} rtype={rtype} "
|
|
142
|
+
f"rprev={rpreview} rlen={rplen} ptype={ptype} "
|
|
143
|
+
f"pkind={pkind} pprev={ppreview} plen={pplen}"
|
|
144
|
+
)
|
|
145
|
+
state_manager.session._debug_events.append(event_info)
|
|
146
|
+
except Exception:
|
|
147
|
+
pass
|
|
148
|
+
|
|
149
|
+
# Attempt to capture pre-first-delta text from non-delta events
|
|
150
|
+
if not first_delta_seen:
|
|
151
|
+
try:
|
|
152
|
+
# event might be a PartStartEvent with .part.content
|
|
153
|
+
if hasattr(event, "part") and hasattr(event.part, "content"):
|
|
154
|
+
pc = event.part.content
|
|
155
|
+
if isinstance(pc, str) and pc and not pc.lstrip().startswith("\n"):
|
|
156
|
+
# capture a short potential prefix
|
|
157
|
+
pre_first_delta_text = pc[:100] if len(pc) > 100 else pc
|
|
158
|
+
except Exception:
|
|
159
|
+
pass
|
|
160
|
+
|
|
161
|
+
# Handle delta events
|
|
162
|
+
if isinstance(event, PartDeltaEvent):
|
|
163
|
+
if isinstance(event.delta, TextPartDelta):
|
|
164
|
+
if event.delta.content_delta is not None and streaming_callback:
|
|
165
|
+
# Seed prefix logic before the first true delta
|
|
166
|
+
if not first_delta_seen:
|
|
167
|
+
first_delta_seen = True
|
|
168
|
+
try:
|
|
169
|
+
delta_text = event.delta.content_delta or ""
|
|
170
|
+
# Only seed when we have a short, safe candidate
|
|
171
|
+
if (
|
|
172
|
+
pre_first_delta_text
|
|
173
|
+
and len(pre_first_delta_text) <= 100
|
|
174
|
+
and not seeded_prefix_sent
|
|
175
|
+
):
|
|
176
|
+
# If delta contains the candidate,
|
|
177
|
+
# emit the prefix up to that point
|
|
178
|
+
probe = pre_first_delta_text[:20]
|
|
179
|
+
idx = pre_first_delta_text.find(probe)
|
|
180
|
+
if idx > 0:
|
|
181
|
+
prefix = pre_first_delta_text[:idx]
|
|
182
|
+
if prefix:
|
|
183
|
+
await streaming_callback(prefix)
|
|
184
|
+
seeded_prefix_sent = True
|
|
185
|
+
preview_msg = (
|
|
186
|
+
f"[src] seeded_prefix idx={idx} "
|
|
187
|
+
f"len={len(prefix)} preview={repr(prefix)}"
|
|
188
|
+
)
|
|
189
|
+
state_manager.session._debug_events.append(
|
|
190
|
+
preview_msg
|
|
191
|
+
)
|
|
192
|
+
elif idx == -1:
|
|
193
|
+
# Delta text does not appear in pre-text;
|
|
194
|
+
# emit the pre-text directly as a seed
|
|
195
|
+
# Safe for short pre-text (e.g., first word)
|
|
196
|
+
# to avoid duplication
|
|
197
|
+
if pre_first_delta_text.strip():
|
|
198
|
+
await streaming_callback(pre_first_delta_text)
|
|
199
|
+
seeded_prefix_sent = True
|
|
200
|
+
text_len = len(pre_first_delta_text)
|
|
201
|
+
preview_repr = repr(pre_first_delta_text)
|
|
202
|
+
direct_msg = (
|
|
203
|
+
f"[src] seeded_prefix_direct "
|
|
204
|
+
f"len={text_len} preview={preview_repr}"
|
|
205
|
+
)
|
|
206
|
+
state_manager.session._debug_events.append(
|
|
207
|
+
direct_msg
|
|
208
|
+
)
|
|
209
|
+
else:
|
|
210
|
+
# idx == 0 means pre-text is already the
|
|
211
|
+
# start of delta; skip
|
|
212
|
+
skip_msg = (
|
|
213
|
+
f"[src] seed_skip idx={idx} "
|
|
214
|
+
f"delta_len={len(delta_text)}"
|
|
215
|
+
)
|
|
216
|
+
state_manager.session._debug_events.append(skip_msg)
|
|
217
|
+
except Exception:
|
|
218
|
+
pass
|
|
219
|
+
finally:
|
|
220
|
+
pre_first_delta_text = None
|
|
221
|
+
|
|
222
|
+
# Record first-delta instrumentation
|
|
223
|
+
if not first_delta_logged:
|
|
224
|
+
try:
|
|
225
|
+
import time as _t
|
|
226
|
+
|
|
227
|
+
ts_ns = _t.perf_counter_ns()
|
|
228
|
+
except Exception:
|
|
229
|
+
ts_ns = 0
|
|
230
|
+
# Store debug event summary for later display
|
|
231
|
+
chunk_preview = repr(
|
|
232
|
+
event.delta.content_delta[:5]
|
|
233
|
+
if event.delta.content_delta
|
|
234
|
+
else ""
|
|
235
|
+
)
|
|
236
|
+
chunk_len = len(event.delta.content_delta or "")
|
|
237
|
+
delta_msg = (
|
|
238
|
+
f"[src] first_delta_received ts_ns={ts_ns} "
|
|
239
|
+
f"chunk_repr={chunk_preview} len={chunk_len}"
|
|
240
|
+
)
|
|
241
|
+
state_manager.session._debug_events.append(delta_msg)
|
|
242
|
+
first_delta_logged = True
|
|
243
|
+
|
|
244
|
+
# Accumulate full raw stream for comparison and forward delta
|
|
245
|
+
delta_text = event.delta.content_delta or ""
|
|
246
|
+
state_manager.session._debug_raw_stream_accum += delta_text
|
|
247
|
+
await streaming_callback(delta_text)
|
|
248
|
+
else:
|
|
249
|
+
# Log empty or non-text deltas encountered
|
|
250
|
+
state_manager.session._debug_events.append(
|
|
251
|
+
"[src] empty_or_nontext_delta_skipped"
|
|
252
|
+
)
|
|
253
|
+
else:
|
|
254
|
+
# Capture any final result text for diagnostics
|
|
255
|
+
try:
|
|
256
|
+
final_text = _extract_text(getattr(event, "result", None))
|
|
257
|
+
if final_text:
|
|
258
|
+
final_msg = (
|
|
259
|
+
f"[src] final_text_preview len={len(final_text)} "
|
|
260
|
+
f"preview={repr(final_text[:20])}"
|
|
261
|
+
)
|
|
262
|
+
state_manager.session._debug_events.append(final_msg)
|
|
263
|
+
except Exception:
|
|
264
|
+
pass
|
|
265
|
+
except Exception as stream_err:
|
|
266
|
+
# Log with context and optionally notify UI, then degrade gracefully
|
|
267
|
+
logger.warning(
|
|
268
|
+
"Streaming error req=%s iter=%s: %s",
|
|
269
|
+
request_id,
|
|
270
|
+
iteration_index,
|
|
271
|
+
stream_err,
|
|
272
|
+
exc_info=True,
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
# Reset node state to allow graceful degradation to non-streaming mode
|
|
276
|
+
try:
|
|
277
|
+
if hasattr(node, "_did_stream"):
|
|
278
|
+
node._did_stream = False
|
|
279
|
+
logger.debug(
|
|
280
|
+
"Reset node._did_stream after streaming error (req=%s iter=%s)",
|
|
281
|
+
request_id,
|
|
282
|
+
iteration_index,
|
|
283
|
+
)
|
|
284
|
+
except Exception as reset_err:
|
|
285
|
+
logger.debug(
|
|
286
|
+
"Failed to reset node._did_stream (req=%s iter=%s): %s",
|
|
287
|
+
request_id,
|
|
288
|
+
iteration_index,
|
|
289
|
+
reset_err,
|
|
290
|
+
exc_info=True,
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
if getattr(state_manager.session, "show_thoughts", False):
|
|
294
|
+
from tunacode.ui import console as ui
|
|
295
|
+
|
|
296
|
+
await ui.warning("Streaming failed; falling back to non-streaming mode")
|
|
@@ -1,7 +1,13 @@
|
|
|
1
1
|
"""Task completion detection utilities."""
|
|
2
2
|
|
|
3
|
+
import re
|
|
3
4
|
from typing import Tuple
|
|
4
5
|
|
|
6
|
+
_COMPLETION_MARKERS = (
|
|
7
|
+
re.compile(r"^\s*TUNACODE\s+DONE:\s*", re.IGNORECASE),
|
|
8
|
+
re.compile(r"^\s*TUNACODE[_\s]+TASK_COMPLETE\s*:?[\s]*", re.IGNORECASE),
|
|
9
|
+
)
|
|
10
|
+
|
|
5
11
|
|
|
6
12
|
def check_task_completion(content: str) -> Tuple[bool, str]:
|
|
7
13
|
"""
|
|
@@ -18,11 +24,18 @@ def check_task_completion(content: str) -> Tuple[bool, str]:
|
|
|
18
24
|
if not content:
|
|
19
25
|
return False, content
|
|
20
26
|
|
|
21
|
-
lines = content.
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
+
lines = content.split("\n")
|
|
28
|
+
|
|
29
|
+
for idx, line in enumerate(lines):
|
|
30
|
+
for pattern in _COMPLETION_MARKERS:
|
|
31
|
+
match = pattern.match(line)
|
|
32
|
+
if match:
|
|
33
|
+
remainder = line[match.end() :].strip()
|
|
34
|
+
cleaned_lines = lines[:idx]
|
|
35
|
+
if remainder:
|
|
36
|
+
cleaned_lines.append(remainder)
|
|
37
|
+
cleaned_lines.extend(lines[idx + 1 :])
|
|
38
|
+
cleaned = "\n".join(cleaned_lines).strip()
|
|
39
|
+
return True, cleaned
|
|
27
40
|
|
|
28
41
|
return False, content
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""Tool buffer for managing parallel execution of read-only tools."""
|
|
2
2
|
|
|
3
|
-
from typing import Any, List, Tuple
|
|
3
|
+
from typing import Any, Dict, List, Tuple
|
|
4
4
|
|
|
5
5
|
|
|
6
6
|
class ToolBuffer:
|
|
@@ -22,3 +22,23 @@ class ToolBuffer:
|
|
|
22
22
|
def has_tasks(self) -> bool:
|
|
23
23
|
"""Check if there are buffered tasks."""
|
|
24
24
|
return len(self.read_only_tasks) > 0
|
|
25
|
+
|
|
26
|
+
def size(self) -> int:
|
|
27
|
+
"""Return the number of buffered tasks."""
|
|
28
|
+
return len(self.read_only_tasks)
|
|
29
|
+
|
|
30
|
+
def peek(self) -> List[Tuple[Any, Any]]:
|
|
31
|
+
"""Return buffered tasks without clearing the buffer."""
|
|
32
|
+
return self.read_only_tasks.copy()
|
|
33
|
+
|
|
34
|
+
def count_by_type(self) -> Dict[str, int]:
|
|
35
|
+
"""Count buffered tools by type for metrics and debugging."""
|
|
36
|
+
counts: Dict[str, int] = {}
|
|
37
|
+
for part, _ in self.read_only_tasks:
|
|
38
|
+
tool_name = getattr(part, "tool_name", "unknown")
|
|
39
|
+
counts[tool_name] = counts.get(tool_name, 0) + 1
|
|
40
|
+
return counts
|
|
41
|
+
|
|
42
|
+
def clear(self) -> None:
|
|
43
|
+
"""Clear all buffered tasks without executing them."""
|
|
44
|
+
self.read_only_tasks.clear()
|
|
@@ -34,6 +34,16 @@ async def execute_tools_parallel(
|
|
|
34
34
|
except Exception as e:
|
|
35
35
|
logger.error(f"Error executing parallel tool: {e}", exc_info=True)
|
|
36
36
|
return e
|
|
37
|
+
finally:
|
|
38
|
+
# Tool execution completed - resource cleanup handled by BaseTool.execute()
|
|
39
|
+
# Each tool's cleanup() method is called automatically in its execute()
|
|
40
|
+
# finally block. This ensures resources (file handles, connections,
|
|
41
|
+
# processes) are freed regardless of success or failure.
|
|
42
|
+
tool_name = getattr(part, "tool_name", "<unknown>")
|
|
43
|
+
logger.debug(
|
|
44
|
+
"Parallel tool execution completed (success or failure): tool=%s",
|
|
45
|
+
tool_name,
|
|
46
|
+
)
|
|
37
47
|
|
|
38
48
|
# If we have more tools than max_parallel, execute in batches
|
|
39
49
|
if len(tool_calls) > max_parallel:
|