tunacode-cli 0.1.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tunacode-cli might be problematic. Click here for more details.
- tunacode/__init__.py +0 -0
- tunacode/cli/textual_repl.tcss +283 -0
- tunacode/configuration/__init__.py +1 -0
- tunacode/configuration/defaults.py +45 -0
- tunacode/configuration/models.py +147 -0
- tunacode/configuration/models_registry.json +1 -0
- tunacode/configuration/pricing.py +74 -0
- tunacode/configuration/settings.py +35 -0
- tunacode/constants.py +227 -0
- tunacode/core/__init__.py +6 -0
- tunacode/core/agents/__init__.py +39 -0
- tunacode/core/agents/agent_components/__init__.py +48 -0
- tunacode/core/agents/agent_components/agent_config.py +441 -0
- tunacode/core/agents/agent_components/agent_helpers.py +290 -0
- tunacode/core/agents/agent_components/message_handler.py +99 -0
- tunacode/core/agents/agent_components/node_processor.py +477 -0
- tunacode/core/agents/agent_components/response_state.py +129 -0
- tunacode/core/agents/agent_components/result_wrapper.py +51 -0
- tunacode/core/agents/agent_components/state_transition.py +112 -0
- tunacode/core/agents/agent_components/streaming.py +271 -0
- tunacode/core/agents/agent_components/task_completion.py +40 -0
- tunacode/core/agents/agent_components/tool_buffer.py +44 -0
- tunacode/core/agents/agent_components/tool_executor.py +101 -0
- tunacode/core/agents/agent_components/truncation_checker.py +37 -0
- tunacode/core/agents/delegation_tools.py +109 -0
- tunacode/core/agents/main.py +545 -0
- tunacode/core/agents/prompts.py +66 -0
- tunacode/core/agents/research_agent.py +231 -0
- tunacode/core/compaction.py +218 -0
- tunacode/core/prompting/__init__.py +27 -0
- tunacode/core/prompting/loader.py +66 -0
- tunacode/core/prompting/prompting_engine.py +98 -0
- tunacode/core/prompting/sections.py +50 -0
- tunacode/core/prompting/templates.py +69 -0
- tunacode/core/state.py +409 -0
- tunacode/exceptions.py +313 -0
- tunacode/indexing/__init__.py +5 -0
- tunacode/indexing/code_index.py +432 -0
- tunacode/indexing/constants.py +86 -0
- tunacode/lsp/__init__.py +112 -0
- tunacode/lsp/client.py +351 -0
- tunacode/lsp/diagnostics.py +19 -0
- tunacode/lsp/servers.py +101 -0
- tunacode/prompts/default_prompt.md +952 -0
- tunacode/prompts/research/sections/agent_role.xml +5 -0
- tunacode/prompts/research/sections/constraints.xml +14 -0
- tunacode/prompts/research/sections/output_format.xml +57 -0
- tunacode/prompts/research/sections/tool_use.xml +23 -0
- tunacode/prompts/sections/advanced_patterns.xml +255 -0
- tunacode/prompts/sections/agent_role.xml +8 -0
- tunacode/prompts/sections/completion.xml +10 -0
- tunacode/prompts/sections/critical_rules.xml +37 -0
- tunacode/prompts/sections/examples.xml +220 -0
- tunacode/prompts/sections/output_style.xml +94 -0
- tunacode/prompts/sections/parallel_exec.xml +105 -0
- tunacode/prompts/sections/search_pattern.xml +100 -0
- tunacode/prompts/sections/system_info.xml +6 -0
- tunacode/prompts/sections/tool_use.xml +84 -0
- tunacode/prompts/sections/user_instructions.xml +3 -0
- tunacode/py.typed +0 -0
- tunacode/templates/__init__.py +5 -0
- tunacode/templates/loader.py +15 -0
- tunacode/tools/__init__.py +10 -0
- tunacode/tools/authorization/__init__.py +29 -0
- tunacode/tools/authorization/context.py +32 -0
- tunacode/tools/authorization/factory.py +20 -0
- tunacode/tools/authorization/handler.py +58 -0
- tunacode/tools/authorization/notifier.py +35 -0
- tunacode/tools/authorization/policy.py +19 -0
- tunacode/tools/authorization/requests.py +119 -0
- tunacode/tools/authorization/rules.py +72 -0
- tunacode/tools/bash.py +222 -0
- tunacode/tools/decorators.py +213 -0
- tunacode/tools/glob.py +353 -0
- tunacode/tools/grep.py +468 -0
- tunacode/tools/grep_components/__init__.py +9 -0
- tunacode/tools/grep_components/file_filter.py +93 -0
- tunacode/tools/grep_components/pattern_matcher.py +158 -0
- tunacode/tools/grep_components/result_formatter.py +87 -0
- tunacode/tools/grep_components/search_result.py +34 -0
- tunacode/tools/list_dir.py +205 -0
- tunacode/tools/prompts/bash_prompt.xml +10 -0
- tunacode/tools/prompts/glob_prompt.xml +7 -0
- tunacode/tools/prompts/grep_prompt.xml +10 -0
- tunacode/tools/prompts/list_dir_prompt.xml +7 -0
- tunacode/tools/prompts/read_file_prompt.xml +9 -0
- tunacode/tools/prompts/todoclear_prompt.xml +12 -0
- tunacode/tools/prompts/todoread_prompt.xml +16 -0
- tunacode/tools/prompts/todowrite_prompt.xml +28 -0
- tunacode/tools/prompts/update_file_prompt.xml +9 -0
- tunacode/tools/prompts/web_fetch_prompt.xml +11 -0
- tunacode/tools/prompts/write_file_prompt.xml +7 -0
- tunacode/tools/react.py +111 -0
- tunacode/tools/read_file.py +68 -0
- tunacode/tools/todo.py +222 -0
- tunacode/tools/update_file.py +62 -0
- tunacode/tools/utils/__init__.py +1 -0
- tunacode/tools/utils/ripgrep.py +311 -0
- tunacode/tools/utils/text_match.py +352 -0
- tunacode/tools/web_fetch.py +245 -0
- tunacode/tools/write_file.py +34 -0
- tunacode/tools/xml_helper.py +34 -0
- tunacode/types/__init__.py +166 -0
- tunacode/types/base.py +94 -0
- tunacode/types/callbacks.py +53 -0
- tunacode/types/dataclasses.py +121 -0
- tunacode/types/pydantic_ai.py +31 -0
- tunacode/types/state.py +122 -0
- tunacode/ui/__init__.py +6 -0
- tunacode/ui/app.py +542 -0
- tunacode/ui/commands/__init__.py +430 -0
- tunacode/ui/components/__init__.py +1 -0
- tunacode/ui/headless/__init__.py +5 -0
- tunacode/ui/headless/output.py +72 -0
- tunacode/ui/main.py +252 -0
- tunacode/ui/renderers/__init__.py +41 -0
- tunacode/ui/renderers/errors.py +197 -0
- tunacode/ui/renderers/panels.py +550 -0
- tunacode/ui/renderers/search.py +314 -0
- tunacode/ui/renderers/tools/__init__.py +21 -0
- tunacode/ui/renderers/tools/bash.py +247 -0
- tunacode/ui/renderers/tools/diagnostics.py +186 -0
- tunacode/ui/renderers/tools/glob.py +226 -0
- tunacode/ui/renderers/tools/grep.py +228 -0
- tunacode/ui/renderers/tools/list_dir.py +198 -0
- tunacode/ui/renderers/tools/read_file.py +226 -0
- tunacode/ui/renderers/tools/research.py +294 -0
- tunacode/ui/renderers/tools/update_file.py +237 -0
- tunacode/ui/renderers/tools/web_fetch.py +182 -0
- tunacode/ui/repl_support.py +226 -0
- tunacode/ui/screens/__init__.py +16 -0
- tunacode/ui/screens/model_picker.py +303 -0
- tunacode/ui/screens/session_picker.py +181 -0
- tunacode/ui/screens/setup.py +218 -0
- tunacode/ui/screens/theme_picker.py +90 -0
- tunacode/ui/screens/update_confirm.py +69 -0
- tunacode/ui/shell_runner.py +129 -0
- tunacode/ui/styles/layout.tcss +98 -0
- tunacode/ui/styles/modals.tcss +38 -0
- tunacode/ui/styles/panels.tcss +81 -0
- tunacode/ui/styles/theme-nextstep.tcss +303 -0
- tunacode/ui/styles/widgets.tcss +33 -0
- tunacode/ui/styles.py +18 -0
- tunacode/ui/widgets/__init__.py +23 -0
- tunacode/ui/widgets/command_autocomplete.py +62 -0
- tunacode/ui/widgets/editor.py +402 -0
- tunacode/ui/widgets/file_autocomplete.py +47 -0
- tunacode/ui/widgets/messages.py +46 -0
- tunacode/ui/widgets/resource_bar.py +182 -0
- tunacode/ui/widgets/status_bar.py +98 -0
- tunacode/utils/__init__.py +0 -0
- tunacode/utils/config/__init__.py +13 -0
- tunacode/utils/config/user_configuration.py +91 -0
- tunacode/utils/messaging/__init__.py +10 -0
- tunacode/utils/messaging/message_utils.py +34 -0
- tunacode/utils/messaging/token_counter.py +77 -0
- tunacode/utils/parsing/__init__.py +13 -0
- tunacode/utils/parsing/command_parser.py +55 -0
- tunacode/utils/parsing/json_utils.py +188 -0
- tunacode/utils/parsing/retry.py +146 -0
- tunacode/utils/parsing/tool_parser.py +267 -0
- tunacode/utils/security/__init__.py +15 -0
- tunacode/utils/security/command.py +106 -0
- tunacode/utils/system/__init__.py +25 -0
- tunacode/utils/system/gitignore.py +155 -0
- tunacode/utils/system/paths.py +190 -0
- tunacode/utils/ui/__init__.py +9 -0
- tunacode/utils/ui/file_filter.py +135 -0
- tunacode/utils/ui/helpers.py +24 -0
- tunacode_cli-0.1.21.dist-info/METADATA +170 -0
- tunacode_cli-0.1.21.dist-info/RECORD +174 -0
- tunacode_cli-0.1.21.dist-info/WHEEL +4 -0
- tunacode_cli-0.1.21.dist-info/entry_points.txt +2 -0
- tunacode_cli-0.1.21.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,441 @@
|
|
|
1
|
+
"""Agent configuration and creation utilities."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import math
|
|
5
|
+
from collections.abc import Awaitable, Callable
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from httpx import AsyncClient, HTTPStatusError, Request
|
|
10
|
+
from pydantic_ai import Agent
|
|
11
|
+
from pydantic_ai.models.anthropic import AnthropicModel
|
|
12
|
+
from pydantic_ai.models.openai import OpenAIChatModel
|
|
13
|
+
from pydantic_ai.providers.anthropic import AnthropicProvider
|
|
14
|
+
from pydantic_ai.providers.openai import OpenAIProvider
|
|
15
|
+
from pydantic_ai.retries import AsyncTenacityTransport, RetryConfig, wait_retry_after
|
|
16
|
+
from tenacity import retry_if_exception_type, stop_after_attempt
|
|
17
|
+
|
|
18
|
+
from tunacode.constants import ENV_OPENAI_BASE_URL, SETTINGS_BASE_URL, UI_THINKING_MESSAGE
|
|
19
|
+
from tunacode.core.agents.delegation_tools import create_research_codebase_tool
|
|
20
|
+
from tunacode.core.prompting import (
|
|
21
|
+
MAIN_TEMPLATE,
|
|
22
|
+
TEMPLATE_OVERRIDES,
|
|
23
|
+
SectionLoader,
|
|
24
|
+
SystemPromptSection,
|
|
25
|
+
compose_prompt,
|
|
26
|
+
resolve_prompt,
|
|
27
|
+
)
|
|
28
|
+
from tunacode.core.state import StateManager
|
|
29
|
+
from tunacode.tools.bash import bash
|
|
30
|
+
from tunacode.tools.glob import glob
|
|
31
|
+
from tunacode.tools.grep import grep
|
|
32
|
+
from tunacode.tools.list_dir import list_dir
|
|
33
|
+
from tunacode.tools.read_file import read_file
|
|
34
|
+
from tunacode.tools.todo import create_todoclear_tool, create_todoread_tool, create_todowrite_tool
|
|
35
|
+
from tunacode.tools.update_file import update_file
|
|
36
|
+
from tunacode.tools.web_fetch import web_fetch
|
|
37
|
+
from tunacode.tools.write_file import write_file
|
|
38
|
+
from tunacode.types import ModelName, PydanticAgent
|
|
39
|
+
|
|
40
|
+
# Module-level caches for system prompts
|
|
41
|
+
_PROMPT_CACHE: dict[str, tuple[str, float]] = {}
|
|
42
|
+
_TUNACODE_CACHE: dict[str, tuple[str, float]] = {}
|
|
43
|
+
|
|
44
|
+
# Module-level cache for agents to persist across requests
|
|
45
|
+
_AGENT_CACHE: dict[ModelName, PydanticAgent] = {}
|
|
46
|
+
_AGENT_CACHE_VERSION: dict[ModelName, int] = {}
|
|
47
|
+
|
|
48
|
+
REQUEST_DELAY_MESSAGE_PREFIX = "Respecting request delay"
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _format_request_delay_message(seconds_remaining: float) -> str:
|
|
52
|
+
safe_remaining = max(0.0, seconds_remaining)
|
|
53
|
+
return f"{REQUEST_DELAY_MESSAGE_PREFIX}: {safe_remaining:.1f}s remaining"
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
async def _publish_delay_message(message: str, state_manager: StateManager) -> None:
|
|
57
|
+
"""Best-effort spinner update; UI failures must not block requests."""
|
|
58
|
+
streaming_panel = getattr(state_manager.session, "streaming_panel", None)
|
|
59
|
+
try:
|
|
60
|
+
if streaming_panel:
|
|
61
|
+
if message == UI_THINKING_MESSAGE:
|
|
62
|
+
await streaming_panel.clear_status_message()
|
|
63
|
+
else:
|
|
64
|
+
await streaming_panel.set_status_message(message)
|
|
65
|
+
except Exception:
|
|
66
|
+
pass
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
async def _sleep_with_countdown(
|
|
70
|
+
total_delay: float, countdown_steps: int, state_manager: StateManager
|
|
71
|
+
) -> None:
|
|
72
|
+
"""Sleep while surfacing a countdown via the spinner."""
|
|
73
|
+
delay_per_step = total_delay / countdown_steps
|
|
74
|
+
remaining = total_delay
|
|
75
|
+
await _publish_delay_message(_format_request_delay_message(remaining), state_manager)
|
|
76
|
+
|
|
77
|
+
for _ in range(countdown_steps):
|
|
78
|
+
await asyncio.sleep(delay_per_step)
|
|
79
|
+
remaining = max(0.0, remaining - delay_per_step)
|
|
80
|
+
await _publish_delay_message(_format_request_delay_message(remaining), state_manager)
|
|
81
|
+
|
|
82
|
+
await _publish_delay_message(UI_THINKING_MESSAGE, state_manager)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def _coerce_request_delay(state_manager: StateManager) -> float:
|
|
86
|
+
"""Return validated request_delay from config."""
|
|
87
|
+
settings = state_manager.session.user_config.get("settings", {})
|
|
88
|
+
request_delay_raw = settings.get("request_delay", 0.0)
|
|
89
|
+
request_delay = float(request_delay_raw)
|
|
90
|
+
|
|
91
|
+
if request_delay < 0.0 or request_delay > 60.0:
|
|
92
|
+
raise ValueError(f"request_delay must be between 0.0 and 60.0 seconds, got {request_delay}")
|
|
93
|
+
|
|
94
|
+
return request_delay
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def _coerce_global_request_timeout(state_manager: StateManager) -> float | None:
|
|
98
|
+
"""Return validated global_request_timeout from config, or None if disabled."""
|
|
99
|
+
settings = state_manager.session.user_config.get("settings", {})
|
|
100
|
+
timeout_raw = settings.get("global_request_timeout", 90.0)
|
|
101
|
+
timeout = float(timeout_raw)
|
|
102
|
+
|
|
103
|
+
if timeout < 0.0:
|
|
104
|
+
raise ValueError(f"global_request_timeout must be >= 0.0 seconds, got {timeout}")
|
|
105
|
+
|
|
106
|
+
if timeout == 0.0:
|
|
107
|
+
return None
|
|
108
|
+
|
|
109
|
+
return timeout
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def _coerce_optional_str(value: Any, label: str) -> str | None:
|
|
113
|
+
if value is None:
|
|
114
|
+
return None
|
|
115
|
+
|
|
116
|
+
if not isinstance(value, str):
|
|
117
|
+
raise ValueError(f"{label} must be a string, got {type(value).__name__}")
|
|
118
|
+
|
|
119
|
+
normalized = value.strip()
|
|
120
|
+
if not normalized:
|
|
121
|
+
return None
|
|
122
|
+
|
|
123
|
+
return normalized
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def _resolve_base_url_override(
|
|
127
|
+
env_base_url: str | None,
|
|
128
|
+
settings_base_url: str | None,
|
|
129
|
+
) -> str | None:
|
|
130
|
+
if env_base_url:
|
|
131
|
+
return env_base_url
|
|
132
|
+
|
|
133
|
+
if settings_base_url:
|
|
134
|
+
return settings_base_url
|
|
135
|
+
|
|
136
|
+
return None
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def _compute_agent_version(settings: dict[str, Any], request_delay: float) -> int:
|
|
140
|
+
"""Compute a hash representing agent-defining configuration."""
|
|
141
|
+
return hash(
|
|
142
|
+
(
|
|
143
|
+
str(settings.get("max_retries", 3)),
|
|
144
|
+
str(settings.get("tool_strict_validation", False)),
|
|
145
|
+
str(request_delay),
|
|
146
|
+
str(settings.get("global_request_timeout", 90.0)),
|
|
147
|
+
)
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def _build_request_hooks(
|
|
152
|
+
request_delay: float, state_manager: StateManager
|
|
153
|
+
) -> dict[str, list[Callable[[Request], Awaitable[None]]]]:
|
|
154
|
+
"""Return httpx event hooks enforcing a fixed pre-request delay."""
|
|
155
|
+
if request_delay <= 0:
|
|
156
|
+
# Reason: avoid overhead when no throttling requested
|
|
157
|
+
return {}
|
|
158
|
+
|
|
159
|
+
countdown_steps = max(int(math.ceil(request_delay)), 1)
|
|
160
|
+
|
|
161
|
+
async def _delay_before_request(_: Request) -> None:
|
|
162
|
+
await _sleep_with_countdown(request_delay, countdown_steps, state_manager)
|
|
163
|
+
|
|
164
|
+
return {"request": [_delay_before_request]}
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def clear_all_caches():
|
|
168
|
+
"""Clear all module-level caches. Useful for testing."""
|
|
169
|
+
_PROMPT_CACHE.clear()
|
|
170
|
+
_TUNACODE_CACHE.clear()
|
|
171
|
+
_AGENT_CACHE.clear()
|
|
172
|
+
_AGENT_CACHE_VERSION.clear()
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def get_agent_tool():
|
|
176
|
+
"""Lazy import for Agent and Tool to avoid circular imports."""
|
|
177
|
+
from pydantic_ai import Tool
|
|
178
|
+
|
|
179
|
+
return Agent, Tool
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def _read_prompt_from_path(prompt_path: Path) -> str:
|
|
183
|
+
"""Return prompt content from disk, leveraging the cache when possible."""
|
|
184
|
+
cache_key = str(prompt_path)
|
|
185
|
+
|
|
186
|
+
try:
|
|
187
|
+
current_mtime = prompt_path.stat().st_mtime
|
|
188
|
+
except FileNotFoundError as error:
|
|
189
|
+
raise FileNotFoundError from error
|
|
190
|
+
|
|
191
|
+
if cache_key in _PROMPT_CACHE:
|
|
192
|
+
cached_content, cached_mtime = _PROMPT_CACHE[cache_key]
|
|
193
|
+
if current_mtime == cached_mtime:
|
|
194
|
+
return cached_content
|
|
195
|
+
|
|
196
|
+
try:
|
|
197
|
+
content = prompt_path.read_text(encoding="utf-8").strip()
|
|
198
|
+
except FileNotFoundError as error:
|
|
199
|
+
raise FileNotFoundError from error
|
|
200
|
+
|
|
201
|
+
_PROMPT_CACHE[cache_key] = (content, current_mtime)
|
|
202
|
+
return content
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def load_system_prompt(base_path: Path, model: str | None = None) -> str:
|
|
206
|
+
"""Load the system prompt with section-based composition.
|
|
207
|
+
|
|
208
|
+
Loads individual section files from prompts/sections/ and composes them
|
|
209
|
+
using the template system.
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
base_path: Base path to the tunacode package
|
|
213
|
+
model: Optional model name for template overrides
|
|
214
|
+
|
|
215
|
+
Raises:
|
|
216
|
+
FileNotFoundError: If prompts/sections/ does not exist.
|
|
217
|
+
"""
|
|
218
|
+
prompts_dir = base_path / "prompts"
|
|
219
|
+
sections_dir = prompts_dir / "sections"
|
|
220
|
+
|
|
221
|
+
if not sections_dir.exists():
|
|
222
|
+
raise FileNotFoundError(
|
|
223
|
+
f"Required sections directory not found: {sections_dir}. "
|
|
224
|
+
"The prompts/sections/ directory must exist."
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
loader = SectionLoader(sections_dir)
|
|
228
|
+
sections = {s.value: loader.load_section(s) for s in SystemPromptSection}
|
|
229
|
+
|
|
230
|
+
# Get template (model-specific override or default)
|
|
231
|
+
template = TEMPLATE_OVERRIDES.get(model, MAIN_TEMPLATE) if model else MAIN_TEMPLATE
|
|
232
|
+
|
|
233
|
+
# Compose sections into template, then resolve dynamic placeholders
|
|
234
|
+
prompt = compose_prompt(template, sections)
|
|
235
|
+
return resolve_prompt(prompt)
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
def load_tunacode_context() -> str:
|
|
239
|
+
"""Load AGENTS.md context if it exists with caching."""
|
|
240
|
+
try:
|
|
241
|
+
tunacode_path = Path.cwd() / "AGENTS.md"
|
|
242
|
+
cache_key = str(tunacode_path)
|
|
243
|
+
|
|
244
|
+
if not tunacode_path.exists():
|
|
245
|
+
return ""
|
|
246
|
+
|
|
247
|
+
# Check cache with file modification time
|
|
248
|
+
if cache_key in _TUNACODE_CACHE:
|
|
249
|
+
cached_content, cached_mtime = _TUNACODE_CACHE[cache_key]
|
|
250
|
+
current_mtime = tunacode_path.stat().st_mtime
|
|
251
|
+
if current_mtime == cached_mtime:
|
|
252
|
+
return cached_content
|
|
253
|
+
|
|
254
|
+
# Load from file and cache
|
|
255
|
+
tunacode_content = tunacode_path.read_text(encoding="utf-8")
|
|
256
|
+
if tunacode_content.strip():
|
|
257
|
+
result = "\n\n# Project Context from AGENTS.md\n" + tunacode_content
|
|
258
|
+
_TUNACODE_CACHE[cache_key] = (result, tunacode_path.stat().st_mtime)
|
|
259
|
+
return result
|
|
260
|
+
else:
|
|
261
|
+
_TUNACODE_CACHE[cache_key] = ("", tunacode_path.stat().st_mtime)
|
|
262
|
+
return ""
|
|
263
|
+
|
|
264
|
+
except Exception:
|
|
265
|
+
return ""
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
def _create_model_with_retry(
|
|
269
|
+
model_string: str, http_client: AsyncClient, state_manager: StateManager
|
|
270
|
+
):
|
|
271
|
+
"""Create a model instance with retry-enabled HTTP client.
|
|
272
|
+
|
|
273
|
+
Parses model string in format 'provider:model_name' and creates
|
|
274
|
+
appropriate provider and model instances with the retry-enabled HTTP client.
|
|
275
|
+
"""
|
|
276
|
+
# Extract environment config
|
|
277
|
+
env = state_manager.session.user_config.get("env", {})
|
|
278
|
+
|
|
279
|
+
settings = state_manager.session.user_config.get("settings", {})
|
|
280
|
+
env_base_url_raw = env.get(ENV_OPENAI_BASE_URL)
|
|
281
|
+
settings_base_url_raw = settings.get(SETTINGS_BASE_URL)
|
|
282
|
+
env_base_url = _coerce_optional_str(env_base_url_raw, ENV_OPENAI_BASE_URL)
|
|
283
|
+
settings_base_url = _coerce_optional_str(settings_base_url_raw, SETTINGS_BASE_URL)
|
|
284
|
+
base_url_override = _resolve_base_url_override(env_base_url, settings_base_url)
|
|
285
|
+
|
|
286
|
+
# Provider configuration: API key names and base URLs
|
|
287
|
+
PROVIDER_CONFIG = {
|
|
288
|
+
"anthropic": {"api_key_name": "ANTHROPIC_API_KEY", "base_url": None},
|
|
289
|
+
"openai": {"api_key_name": "OPENAI_API_KEY", "base_url": None},
|
|
290
|
+
"openrouter": {
|
|
291
|
+
"api_key_name": "OPENROUTER_API_KEY",
|
|
292
|
+
"base_url": "https://openrouter.ai/api/v1",
|
|
293
|
+
},
|
|
294
|
+
"azure": {
|
|
295
|
+
"api_key_name": "AZURE_OPENAI_API_KEY",
|
|
296
|
+
"base_url": env.get("AZURE_OPENAI_ENDPOINT"),
|
|
297
|
+
},
|
|
298
|
+
"deepseek": {"api_key_name": "DEEPSEEK_API_KEY", "base_url": None},
|
|
299
|
+
"cerebras": {
|
|
300
|
+
"api_key_name": "CEREBRAS_API_KEY",
|
|
301
|
+
"base_url": "https://api.cerebras.ai/v1",
|
|
302
|
+
},
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
# Parse model string
|
|
306
|
+
if ":" in model_string:
|
|
307
|
+
provider_name, model_name = model_string.split(":", 1)
|
|
308
|
+
else:
|
|
309
|
+
# Auto-detect provider from model name
|
|
310
|
+
model_name = model_string
|
|
311
|
+
if model_name.startswith("claude"):
|
|
312
|
+
provider_name = "anthropic"
|
|
313
|
+
elif model_name.startswith(("gpt", "o1", "o3")):
|
|
314
|
+
provider_name = "openai"
|
|
315
|
+
else:
|
|
316
|
+
# Default to treating as model string (pydantic-ai will auto-detect)
|
|
317
|
+
return model_string
|
|
318
|
+
|
|
319
|
+
# Create provider with api_key + base_url + http_client
|
|
320
|
+
if provider_name == "anthropic":
|
|
321
|
+
api_key = env.get("ANTHROPIC_API_KEY")
|
|
322
|
+
provider = AnthropicProvider(api_key=api_key, http_client=http_client)
|
|
323
|
+
return AnthropicModel(model_name, provider=provider)
|
|
324
|
+
elif provider_name in ("openai", "openrouter", "azure", "deepseek", "cerebras"):
|
|
325
|
+
# OpenAI-compatible providers all use OpenAIChatModel
|
|
326
|
+
config = PROVIDER_CONFIG.get(provider_name, {})
|
|
327
|
+
api_key_name = config.get("api_key_name")
|
|
328
|
+
api_key = env.get(api_key_name) if api_key_name else None
|
|
329
|
+
base_url = config.get("base_url")
|
|
330
|
+
if base_url is None and provider_name != "azure":
|
|
331
|
+
base_url = base_url_override
|
|
332
|
+
provider = OpenAIProvider(api_key=api_key, base_url=base_url, http_client=http_client)
|
|
333
|
+
return OpenAIChatModel(model_name, provider=provider)
|
|
334
|
+
else:
|
|
335
|
+
# Unsupported provider, return string and let pydantic-ai handle it
|
|
336
|
+
# (won't have retry support but won't break)
|
|
337
|
+
return model_string
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
def get_or_create_agent(model: ModelName, state_manager: StateManager) -> PydanticAgent:
|
|
341
|
+
"""Get existing agent or create new one for the specified model."""
|
|
342
|
+
request_delay = _coerce_request_delay(state_manager)
|
|
343
|
+
settings = state_manager.session.user_config.get("settings", {})
|
|
344
|
+
agent_version = _compute_agent_version(settings, request_delay)
|
|
345
|
+
|
|
346
|
+
# Check session-level cache first (for backward compatibility with tests)
|
|
347
|
+
session_agent = state_manager.session.agents.get(model)
|
|
348
|
+
session_version = state_manager.session.agent_versions.get(model)
|
|
349
|
+
if session_agent and session_version == agent_version:
|
|
350
|
+
return session_agent
|
|
351
|
+
if session_agent and session_version != agent_version:
|
|
352
|
+
del state_manager.session.agents[model]
|
|
353
|
+
state_manager.session.agent_versions.pop(model, None)
|
|
354
|
+
|
|
355
|
+
# Check module-level cache
|
|
356
|
+
if model in _AGENT_CACHE:
|
|
357
|
+
# Verify cache is still valid (check for config changes)
|
|
358
|
+
cached_version = _AGENT_CACHE_VERSION.get(model)
|
|
359
|
+
if cached_version == agent_version:
|
|
360
|
+
state_manager.session.agents[model] = _AGENT_CACHE[model]
|
|
361
|
+
state_manager.session.agent_versions[model] = agent_version
|
|
362
|
+
return _AGENT_CACHE[model]
|
|
363
|
+
else:
|
|
364
|
+
del _AGENT_CACHE[model]
|
|
365
|
+
del _AGENT_CACHE_VERSION[model]
|
|
366
|
+
|
|
367
|
+
if model not in _AGENT_CACHE:
|
|
368
|
+
max_retries = settings.get("max_retries", 3)
|
|
369
|
+
|
|
370
|
+
# Lazy import Agent and Tool
|
|
371
|
+
Agent, Tool = get_agent_tool()
|
|
372
|
+
|
|
373
|
+
# Load system prompt (with optional model-specific template override)
|
|
374
|
+
base_path = Path(__file__).parent.parent.parent.parent
|
|
375
|
+
system_prompt = load_system_prompt(base_path, model=model)
|
|
376
|
+
|
|
377
|
+
# Load AGENTS.md context
|
|
378
|
+
system_prompt += load_tunacode_context()
|
|
379
|
+
|
|
380
|
+
# Get tool strict validation setting from config (default to False for backward
|
|
381
|
+
# compatibility)
|
|
382
|
+
tool_strict_validation = settings.get("tool_strict_validation", False)
|
|
383
|
+
|
|
384
|
+
# Create tool list
|
|
385
|
+
tools_list = [
|
|
386
|
+
Tool(bash, max_retries=max_retries, strict=tool_strict_validation),
|
|
387
|
+
Tool(glob, max_retries=max_retries, strict=tool_strict_validation),
|
|
388
|
+
Tool(grep, max_retries=max_retries, strict=tool_strict_validation),
|
|
389
|
+
Tool(list_dir, max_retries=max_retries, strict=tool_strict_validation),
|
|
390
|
+
Tool(read_file, max_retries=max_retries, strict=tool_strict_validation),
|
|
391
|
+
Tool(update_file, max_retries=max_retries, strict=tool_strict_validation),
|
|
392
|
+
Tool(web_fetch, max_retries=max_retries, strict=tool_strict_validation),
|
|
393
|
+
Tool(write_file, max_retries=max_retries, strict=tool_strict_validation),
|
|
394
|
+
]
|
|
395
|
+
|
|
396
|
+
# Add delegation tool (multi-agent pattern)
|
|
397
|
+
research_codebase = create_research_codebase_tool(state_manager)
|
|
398
|
+
tools_list.append(
|
|
399
|
+
Tool(research_codebase, max_retries=max_retries, strict=tool_strict_validation)
|
|
400
|
+
)
|
|
401
|
+
|
|
402
|
+
# Add todo tools (task tracking)
|
|
403
|
+
todowrite = create_todowrite_tool(state_manager)
|
|
404
|
+
todoread = create_todoread_tool(state_manager)
|
|
405
|
+
todoclear = create_todoclear_tool(state_manager)
|
|
406
|
+
tools_list.append(Tool(todowrite, max_retries=max_retries, strict=tool_strict_validation))
|
|
407
|
+
tools_list.append(Tool(todoread, max_retries=max_retries, strict=tool_strict_validation))
|
|
408
|
+
tools_list.append(Tool(todoclear, max_retries=max_retries, strict=tool_strict_validation))
|
|
409
|
+
|
|
410
|
+
# Configure HTTP client with retry logic at transport layer
|
|
411
|
+
# This handles retries BEFORE node creation, avoiding pydantic-ai's
|
|
412
|
+
# single-stream-per-node constraint violations
|
|
413
|
+
# https://ai.pydantic.dev/api/retries/#pydantic_ai.retries.wait_retry_after
|
|
414
|
+
transport = AsyncTenacityTransport(
|
|
415
|
+
config=RetryConfig(
|
|
416
|
+
retry=retry_if_exception_type(HTTPStatusError),
|
|
417
|
+
wait=wait_retry_after(max_wait=60),
|
|
418
|
+
stop=stop_after_attempt(max_retries),
|
|
419
|
+
reraise=True,
|
|
420
|
+
),
|
|
421
|
+
validate_response=lambda r: r.raise_for_status(),
|
|
422
|
+
)
|
|
423
|
+
event_hooks = _build_request_hooks(request_delay, state_manager)
|
|
424
|
+
http_client = AsyncClient(transport=transport, event_hooks=event_hooks)
|
|
425
|
+
|
|
426
|
+
# Create model instance with retry-enabled HTTP client
|
|
427
|
+
model_instance = _create_model_with_retry(model, http_client, state_manager)
|
|
428
|
+
|
|
429
|
+
agent = Agent(
|
|
430
|
+
model=model_instance,
|
|
431
|
+
system_prompt=system_prompt,
|
|
432
|
+
tools=tools_list,
|
|
433
|
+
)
|
|
434
|
+
|
|
435
|
+
# Store in both caches
|
|
436
|
+
_AGENT_CACHE[model] = agent
|
|
437
|
+
_AGENT_CACHE_VERSION[model] = agent_version
|
|
438
|
+
state_manager.session.agent_versions[model] = agent_version
|
|
439
|
+
state_manager.session.agents[model] = agent
|
|
440
|
+
|
|
441
|
+
return _AGENT_CACHE[model]
|