tunacode-cli 0.0.76__py3-none-any.whl → 0.0.76.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tunacode-cli might be problematic. Click here for more details.
- tunacode/cli/commands/implementations/debug.py +2 -2
- tunacode/cli/commands/implementations/system.py +1 -1
- tunacode/cli/main.py +10 -0
- tunacode/cli/repl.py +28 -8
- tunacode/cli/repl_components/error_recovery.py +2 -2
- tunacode/cli/repl_components/tool_executor.py +1 -1
- tunacode/configuration/defaults.py +2 -2
- tunacode/configuration/key_descriptions.py +275 -0
- tunacode/constants.py +4 -1
- tunacode/core/agents/__init__.py +39 -2
- tunacode/core/agents/agent_components/__init__.py +5 -0
- tunacode/core/agents/agent_components/node_processor.py +24 -3
- tunacode/core/agents/agent_components/streaming.py +268 -0
- tunacode/core/agents/agent_components/task_completion.py +15 -6
- tunacode/core/agents/main.py +531 -380
- tunacode/core/agents/utils.py +1 -129
- tunacode/core/setup/config_wizard.py +2 -1
- tunacode/core/state.py +15 -0
- tunacode/tools/prompts/react_prompt.xml +23 -0
- tunacode/tools/react.py +153 -0
- tunacode/ui/config_dashboard.py +567 -0
- tunacode/ui/panels.py +92 -9
- tunacode/utils/config_comparator.py +340 -0
- {tunacode_cli-0.0.76.dist-info → tunacode_cli-0.0.76.2.dist-info}/METADATA +63 -6
- {tunacode_cli-0.0.76.dist-info → tunacode_cli-0.0.76.2.dist-info}/RECORD +28 -22
- {tunacode_cli-0.0.76.dist-info → tunacode_cli-0.0.76.2.dist-info}/WHEEL +0 -0
- {tunacode_cli-0.0.76.dist-info → tunacode_cli-0.0.76.2.dist-info}/entry_points.txt +0 -0
- {tunacode_cli-0.0.76.dist-info → tunacode_cli-0.0.76.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,268 @@
|
|
|
1
|
+
"""Streaming instrumentation and handling for agent model request nodes.
|
|
2
|
+
|
|
3
|
+
This module encapsulates verbose streaming + logging logic used during
|
|
4
|
+
token-level streaming from the LLM provider. It updates session debug fields
|
|
5
|
+
and streams deltas to the provided callback while being resilient to errors.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from typing import Awaitable, Callable, Optional
|
|
11
|
+
|
|
12
|
+
from tunacode.core.logging.logger import get_logger
|
|
13
|
+
from tunacode.core.state import StateManager
|
|
14
|
+
|
|
15
|
+
# Import streaming types with fallback for older versions
|
|
16
|
+
try: # pragma: no cover - import guard for pydantic_ai streaming types
|
|
17
|
+
from pydantic_ai.messages import PartDeltaEvent, TextPartDelta # type: ignore
|
|
18
|
+
|
|
19
|
+
STREAMING_AVAILABLE = True
|
|
20
|
+
except Exception: # pragma: no cover - fallback when streaming types unavailable
|
|
21
|
+
PartDeltaEvent = None # type: ignore
|
|
22
|
+
TextPartDelta = None # type: ignore
|
|
23
|
+
STREAMING_AVAILABLE = False
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
logger = get_logger(__name__)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
async def stream_model_request_node(
|
|
30
|
+
node,
|
|
31
|
+
agent_run_ctx,
|
|
32
|
+
state_manager: StateManager,
|
|
33
|
+
streaming_callback: Optional[Callable[[str], Awaitable[None]]],
|
|
34
|
+
request_id: str,
|
|
35
|
+
iteration_index: int,
|
|
36
|
+
) -> None:
|
|
37
|
+
"""Stream token deltas for a model request node with detailed instrumentation.
|
|
38
|
+
|
|
39
|
+
This function mirrors the prior inline logic in main.py but is extracted to
|
|
40
|
+
keep main.py lean. It performs up to one retry on streaming failure and then
|
|
41
|
+
degrades to non-streaming for that node.
|
|
42
|
+
"""
|
|
43
|
+
if not (STREAMING_AVAILABLE and streaming_callback):
|
|
44
|
+
return
|
|
45
|
+
|
|
46
|
+
# Gracefully handle streaming errors from LLM provider
|
|
47
|
+
for attempt in range(2): # simple retry once, then degrade gracefully
|
|
48
|
+
try:
|
|
49
|
+
async with node.stream(agent_run_ctx) as request_stream:
|
|
50
|
+
# Initialize per-node debug accumulators
|
|
51
|
+
state_manager.session._debug_raw_stream_accum = ""
|
|
52
|
+
state_manager.session._debug_events = []
|
|
53
|
+
first_delta_logged = False
|
|
54
|
+
debug_event_count = 0
|
|
55
|
+
first_delta_seen = False
|
|
56
|
+
seeded_prefix_sent = False
|
|
57
|
+
pre_first_delta_text: Optional[str] = None
|
|
58
|
+
|
|
59
|
+
# Helper to extract text from a possible final-result object
|
|
60
|
+
def _extract_text(obj) -> Optional[str]:
|
|
61
|
+
try:
|
|
62
|
+
if obj is None:
|
|
63
|
+
return None
|
|
64
|
+
if isinstance(obj, str):
|
|
65
|
+
return obj
|
|
66
|
+
# Common attributes that may hold text
|
|
67
|
+
for attr in ("output", "text", "content", "message"):
|
|
68
|
+
v = getattr(obj, attr, None)
|
|
69
|
+
if isinstance(v, str) and v:
|
|
70
|
+
return v
|
|
71
|
+
# Parts-based result
|
|
72
|
+
parts = getattr(obj, "parts", None)
|
|
73
|
+
if isinstance(parts, (list, tuple)) and parts:
|
|
74
|
+
texts: list[str] = []
|
|
75
|
+
for p in parts:
|
|
76
|
+
c = getattr(p, "content", None)
|
|
77
|
+
if isinstance(c, str) and c:
|
|
78
|
+
texts.append(c)
|
|
79
|
+
if texts:
|
|
80
|
+
return "".join(texts)
|
|
81
|
+
# Nested .result or .response
|
|
82
|
+
for attr in ("result", "response", "final"):
|
|
83
|
+
v = getattr(obj, attr, None)
|
|
84
|
+
t = _extract_text(v)
|
|
85
|
+
if t:
|
|
86
|
+
return t
|
|
87
|
+
except Exception:
|
|
88
|
+
return None
|
|
89
|
+
return None
|
|
90
|
+
|
|
91
|
+
# Mark stream open
|
|
92
|
+
try:
|
|
93
|
+
import time as _t
|
|
94
|
+
|
|
95
|
+
state_manager.session._debug_events.append(
|
|
96
|
+
f"[src] stream_opened ts_ns={_t.perf_counter_ns()}"
|
|
97
|
+
)
|
|
98
|
+
except Exception:
|
|
99
|
+
pass
|
|
100
|
+
|
|
101
|
+
async for event in request_stream:
|
|
102
|
+
debug_event_count += 1
|
|
103
|
+
# Log first few raw event types for diagnosis
|
|
104
|
+
if debug_event_count <= 5:
|
|
105
|
+
try:
|
|
106
|
+
etype = type(event).__name__
|
|
107
|
+
d = getattr(event, "delta", None)
|
|
108
|
+
dtype = type(d).__name__ if d is not None else None
|
|
109
|
+
c = getattr(d, "content_delta", None) if d is not None else None
|
|
110
|
+
clen = len(c) if isinstance(c, str) else None
|
|
111
|
+
cpreview = repr(c[:5]) if isinstance(c, str) else None
|
|
112
|
+
# Probe common fields on non-delta events to see if they contain text
|
|
113
|
+
r = getattr(event, "result", None)
|
|
114
|
+
rtype = type(r).__name__ if r is not None else None
|
|
115
|
+
rpreview = None
|
|
116
|
+
rplen = None
|
|
117
|
+
# Also inspect event.part if present (e.g., PartStartEvent)
|
|
118
|
+
p = getattr(event, "part", None)
|
|
119
|
+
ptype = type(p).__name__ if p is not None else None
|
|
120
|
+
pkind = getattr(p, "part_kind", None)
|
|
121
|
+
pcontent = getattr(p, "content", None)
|
|
122
|
+
ppreview = repr(pcontent[:20]) if isinstance(pcontent, str) else None
|
|
123
|
+
pplen = len(pcontent) if isinstance(pcontent, str) else None
|
|
124
|
+
try:
|
|
125
|
+
if isinstance(r, str):
|
|
126
|
+
rpreview = repr(r[:20])
|
|
127
|
+
rplen = len(r)
|
|
128
|
+
elif r is not None:
|
|
129
|
+
# Try a few common shapes: .output, .text, .parts
|
|
130
|
+
r_output = getattr(r, "output", None)
|
|
131
|
+
r_text = getattr(r, "text", None)
|
|
132
|
+
r_parts = getattr(r, "parts", None)
|
|
133
|
+
if isinstance(r_output, str):
|
|
134
|
+
rpreview = repr(r_output[:20])
|
|
135
|
+
rplen = len(r_output)
|
|
136
|
+
elif isinstance(r_text, str):
|
|
137
|
+
rpreview = repr(r_text[:20])
|
|
138
|
+
rplen = len(r_text)
|
|
139
|
+
elif isinstance(r_parts, (list, tuple)) and r_parts:
|
|
140
|
+
# render a compact preview of first textual part
|
|
141
|
+
for _rp in r_parts:
|
|
142
|
+
rc = getattr(_rp, "content", None)
|
|
143
|
+
if isinstance(rc, str) and rc:
|
|
144
|
+
rpreview = repr(rc[:20])
|
|
145
|
+
rplen = len(rc)
|
|
146
|
+
break
|
|
147
|
+
except Exception:
|
|
148
|
+
pass
|
|
149
|
+
state_manager.session._debug_events.append(
|
|
150
|
+
f"[src] event[{debug_event_count}] etype={etype} d={dtype} clen={clen} cprev={cpreview} rtype={rtype} rprev={rpreview} rlen={rplen} ptype={ptype} pkind={pkind} pprev={ppreview} plen={pplen}"
|
|
151
|
+
)
|
|
152
|
+
except Exception:
|
|
153
|
+
pass
|
|
154
|
+
|
|
155
|
+
# Attempt to capture pre-first-delta text from non-delta events
|
|
156
|
+
if not first_delta_seen:
|
|
157
|
+
try:
|
|
158
|
+
# event might be a PartStartEvent with .part.content
|
|
159
|
+
if hasattr(event, "part") and hasattr(event.part, "content"):
|
|
160
|
+
pc = event.part.content
|
|
161
|
+
if isinstance(pc, str) and pc and not pc.lstrip().startswith("\n"):
|
|
162
|
+
# capture a short potential prefix
|
|
163
|
+
pre_first_delta_text = pc[:100] if len(pc) > 100 else pc
|
|
164
|
+
except Exception:
|
|
165
|
+
pass
|
|
166
|
+
|
|
167
|
+
# Handle delta events
|
|
168
|
+
if PartDeltaEvent and isinstance(event, PartDeltaEvent):
|
|
169
|
+
if isinstance(event.delta, TextPartDelta):
|
|
170
|
+
if event.delta.content_delta is not None and streaming_callback:
|
|
171
|
+
# Seed prefix logic before the first true delta
|
|
172
|
+
if not first_delta_seen:
|
|
173
|
+
first_delta_seen = True
|
|
174
|
+
try:
|
|
175
|
+
delta_text = event.delta.content_delta or ""
|
|
176
|
+
# Only seed when we have a short, safe candidate
|
|
177
|
+
if (
|
|
178
|
+
pre_first_delta_text
|
|
179
|
+
and len(pre_first_delta_text) <= 100
|
|
180
|
+
and not seeded_prefix_sent
|
|
181
|
+
):
|
|
182
|
+
# If delta contains the candidate, emit the prefix up to that point
|
|
183
|
+
probe = pre_first_delta_text[:20]
|
|
184
|
+
idx = pre_first_delta_text.find(probe)
|
|
185
|
+
if idx > 0:
|
|
186
|
+
prefix = pre_first_delta_text[:idx]
|
|
187
|
+
if prefix:
|
|
188
|
+
await streaming_callback(prefix)
|
|
189
|
+
seeded_prefix_sent = True
|
|
190
|
+
state_manager.session._debug_events.append(
|
|
191
|
+
f"[src] seeded_prefix idx={idx} len={len(prefix)} preview={repr(prefix)}"
|
|
192
|
+
)
|
|
193
|
+
elif idx == -1:
|
|
194
|
+
# Delta text does not appear in pre-text; emit the pre-text directly as a seed
|
|
195
|
+
# Safe for short pre-text (e.g., first word) to avoid duplication
|
|
196
|
+
if pre_first_delta_text.strip():
|
|
197
|
+
await streaming_callback(pre_first_delta_text)
|
|
198
|
+
seeded_prefix_sent = True
|
|
199
|
+
state_manager.session._debug_events.append(
|
|
200
|
+
f"[src] seeded_prefix_direct len={len(pre_first_delta_text)} preview={repr(pre_first_delta_text)}"
|
|
201
|
+
)
|
|
202
|
+
else:
|
|
203
|
+
# idx == 0 means pre-text is already the start of delta; skip
|
|
204
|
+
state_manager.session._debug_events.append(
|
|
205
|
+
f"[src] seed_skip idx={idx} delta_len={len(delta_text)}"
|
|
206
|
+
)
|
|
207
|
+
except Exception:
|
|
208
|
+
pass
|
|
209
|
+
finally:
|
|
210
|
+
pre_first_delta_text = None
|
|
211
|
+
|
|
212
|
+
# Record first-delta instrumentation
|
|
213
|
+
if not first_delta_logged:
|
|
214
|
+
try:
|
|
215
|
+
import time as _t
|
|
216
|
+
|
|
217
|
+
ts_ns = _t.perf_counter_ns()
|
|
218
|
+
except Exception:
|
|
219
|
+
ts_ns = 0
|
|
220
|
+
# Store debug event summary for later display
|
|
221
|
+
state_manager.session._debug_events.append(
|
|
222
|
+
f"[src] first_delta_received ts_ns={ts_ns} chunk_repr={repr(event.delta.content_delta[:5] if event.delta.content_delta else '')} len={len(event.delta.content_delta or '')}"
|
|
223
|
+
)
|
|
224
|
+
first_delta_logged = True
|
|
225
|
+
|
|
226
|
+
# Accumulate full raw stream for comparison and forward delta
|
|
227
|
+
delta_text = event.delta.content_delta or ""
|
|
228
|
+
state_manager.session._debug_raw_stream_accum += delta_text
|
|
229
|
+
await streaming_callback(delta_text)
|
|
230
|
+
else:
|
|
231
|
+
# Log empty or non-text deltas encountered
|
|
232
|
+
state_manager.session._debug_events.append(
|
|
233
|
+
"[src] empty_or_nontext_delta_skipped"
|
|
234
|
+
)
|
|
235
|
+
else:
|
|
236
|
+
# Capture any final result text for diagnostics
|
|
237
|
+
try:
|
|
238
|
+
final_text = _extract_text(getattr(event, "result", None))
|
|
239
|
+
if final_text:
|
|
240
|
+
state_manager.session._debug_events.append(
|
|
241
|
+
f"[src] final_text_preview len={len(final_text)} preview={repr(final_text[:20])}"
|
|
242
|
+
)
|
|
243
|
+
except Exception:
|
|
244
|
+
pass
|
|
245
|
+
# Successful streaming; exit retry loop
|
|
246
|
+
break
|
|
247
|
+
except Exception as stream_err:
|
|
248
|
+
# Log with context and optionally notify UI, then retry once
|
|
249
|
+
logger.warning(
|
|
250
|
+
"Streaming error (attempt %s/2) req=%s iter=%s: %s",
|
|
251
|
+
attempt + 1,
|
|
252
|
+
request_id,
|
|
253
|
+
iteration_index,
|
|
254
|
+
stream_err,
|
|
255
|
+
exc_info=True,
|
|
256
|
+
)
|
|
257
|
+
if getattr(state_manager.session, "show_thoughts", False):
|
|
258
|
+
from tunacode.ui import console as ui
|
|
259
|
+
|
|
260
|
+
await ui.warning("Streaming failed; retrying once then falling back")
|
|
261
|
+
|
|
262
|
+
# On second failure, degrade gracefully (no streaming)
|
|
263
|
+
if attempt == 1:
|
|
264
|
+
if getattr(state_manager.session, "show_thoughts", False):
|
|
265
|
+
from tunacode.ui import console as ui
|
|
266
|
+
|
|
267
|
+
await ui.muted("Switching to non-streaming processing for this node")
|
|
268
|
+
break
|
|
@@ -3,6 +3,11 @@
|
|
|
3
3
|
import re
|
|
4
4
|
from typing import Tuple
|
|
5
5
|
|
|
6
|
+
_COMPLETION_MARKERS = (
|
|
7
|
+
re.compile(r"^\s*TUNACODE\s+DONE:\s*", re.IGNORECASE),
|
|
8
|
+
re.compile(r"^\s*TUNACODE[_\s]+TASK_COMPLETE\s*:?[\s]*", re.IGNORECASE),
|
|
9
|
+
)
|
|
10
|
+
|
|
6
11
|
|
|
7
12
|
def check_task_completion(content: str) -> Tuple[bool, str]:
|
|
8
13
|
"""
|
|
@@ -21,12 +26,16 @@ def check_task_completion(content: str) -> Tuple[bool, str]:
|
|
|
21
26
|
|
|
22
27
|
lines = content.split("\n")
|
|
23
28
|
|
|
24
|
-
# New marker: any line starting with "TUNACODE DONE:" (case-insensitive, allow leading whitespace)
|
|
25
|
-
done_pattern = re.compile(r"^\s*TUNACODE\s+DONE:\s*", re.IGNORECASE)
|
|
26
29
|
for idx, line in enumerate(lines):
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
30
|
+
for pattern in _COMPLETION_MARKERS:
|
|
31
|
+
match = pattern.match(line)
|
|
32
|
+
if match:
|
|
33
|
+
remainder = line[match.end() :].strip()
|
|
34
|
+
cleaned_lines = lines[:idx]
|
|
35
|
+
if remainder:
|
|
36
|
+
cleaned_lines.append(remainder)
|
|
37
|
+
cleaned_lines.extend(lines[idx + 1 :])
|
|
38
|
+
cleaned = "\n".join(cleaned_lines).strip()
|
|
39
|
+
return True, cleaned
|
|
31
40
|
|
|
32
41
|
return False, content
|