tunacode-cli 0.0.40__py3-none-any.whl → 0.0.41__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tunacode/cli/commands/__init__.py +2 -0
- tunacode/cli/commands/implementations/__init__.py +3 -0
- tunacode/cli/commands/implementations/debug.py +1 -1
- tunacode/cli/commands/implementations/todo.py +217 -0
- tunacode/cli/commands/registry.py +2 -0
- tunacode/cli/main.py +12 -5
- tunacode/cli/repl.py +197 -132
- tunacode/configuration/defaults.py +1 -0
- tunacode/configuration/models.py +6 -0
- tunacode/constants.py +22 -3
- tunacode/context.py +7 -3
- tunacode/core/agents/main.py +52 -9
- tunacode/core/setup/config_setup.py +5 -0
- tunacode/core/state.py +50 -1
- tunacode/core/token_usage/api_response_parser.py +44 -0
- tunacode/core/token_usage/cost_calculator.py +58 -0
- tunacode/core/token_usage/usage_tracker.py +98 -0
- tunacode/prompts/system.md +69 -5
- tunacode/tools/todo.py +343 -0
- tunacode/types.py +20 -1
- tunacode/ui/input.py +1 -1
- tunacode/ui/output.py +36 -0
- tunacode/utils/message_utils.py +17 -0
- tunacode/utils/token_counter.py +78 -8
- {tunacode_cli-0.0.40.dist-info → tunacode_cli-0.0.41.dist-info}/METADATA +2 -1
- {tunacode_cli-0.0.40.dist-info → tunacode_cli-0.0.41.dist-info}/RECORD +30 -26
- tunacode/cli/textual_app.py +0 -420
- tunacode/cli/textual_bridge.py +0 -161
- {tunacode_cli-0.0.40.dist-info → tunacode_cli-0.0.41.dist-info}/WHEEL +0 -0
- {tunacode_cli-0.0.40.dist-info → tunacode_cli-0.0.41.dist-info}/entry_points.txt +0 -0
- {tunacode_cli-0.0.40.dist-info → tunacode_cli-0.0.41.dist-info}/licenses/LICENSE +0 -0
- {tunacode_cli-0.0.40.dist-info → tunacode_cli-0.0.41.dist-info}/top_level.txt +0 -0
tunacode/tools/todo.py
ADDED
|
@@ -0,0 +1,343 @@
|
|
|
1
|
+
"""Todo management tool for agent integration.
|
|
2
|
+
|
|
3
|
+
This tool allows the AI agent to manage todo items during task execution.
|
|
4
|
+
It provides functionality for creating, updating, and tracking tasks.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import uuid
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from typing import List, Literal, Optional, Union
|
|
10
|
+
|
|
11
|
+
from pydantic_ai.exceptions import ModelRetry
|
|
12
|
+
|
|
13
|
+
from tunacode.constants import (
|
|
14
|
+
MAX_TODO_CONTENT_LENGTH,
|
|
15
|
+
MAX_TODOS_PER_SESSION,
|
|
16
|
+
TODO_PRIORITIES,
|
|
17
|
+
TODO_PRIORITY_MEDIUM,
|
|
18
|
+
TODO_STATUS_PENDING,
|
|
19
|
+
)
|
|
20
|
+
from tunacode.types import TodoItem, ToolResult, UILogger
|
|
21
|
+
|
|
22
|
+
from .base import BaseTool
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class TodoTool(BaseTool):
|
|
26
|
+
"""Tool for managing todo items from the AI agent."""
|
|
27
|
+
|
|
28
|
+
def __init__(self, state_manager, ui_logger: UILogger | None = None):
|
|
29
|
+
"""Initialize the todo tool.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
state_manager: StateManager instance for accessing todos
|
|
33
|
+
ui_logger: UI logger instance for displaying messages
|
|
34
|
+
"""
|
|
35
|
+
super().__init__(ui_logger)
|
|
36
|
+
self.state_manager = state_manager
|
|
37
|
+
|
|
38
|
+
@property
|
|
39
|
+
def tool_name(self) -> str:
|
|
40
|
+
return "todo"
|
|
41
|
+
|
|
42
|
+
async def _execute(
|
|
43
|
+
self,
|
|
44
|
+
action: Literal["add", "add_multiple", "update", "complete", "list", "remove"],
|
|
45
|
+
content: Optional[Union[str, List[str]]] = None,
|
|
46
|
+
todo_id: Optional[str] = None,
|
|
47
|
+
status: Optional[Literal["pending", "in_progress", "completed"]] = None,
|
|
48
|
+
priority: Optional[Literal["high", "medium", "low"]] = None,
|
|
49
|
+
todos: Optional[List[dict]] = None,
|
|
50
|
+
) -> ToolResult:
|
|
51
|
+
"""Execute todo management actions.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
action: The action to perform (add, add_multiple, update, complete, list, remove)
|
|
55
|
+
content: Content for new todos or updates (can be string or list for add_multiple)
|
|
56
|
+
todo_id: ID of existing todo for updates/completion
|
|
57
|
+
status: Status to set for updates
|
|
58
|
+
priority: Priority to set for new/updated todos
|
|
59
|
+
todos: List of todo dictionaries for add_multiple action (format: [{"content": "...", "priority": "..."}])
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
str: Result message describing what was done
|
|
63
|
+
|
|
64
|
+
Raises:
|
|
65
|
+
ModelRetry: When invalid parameters are provided
|
|
66
|
+
"""
|
|
67
|
+
if action == "add":
|
|
68
|
+
return await self._add_todo(content, priority)
|
|
69
|
+
elif action == "add_multiple":
|
|
70
|
+
return await self._add_multiple_todos(content, todos, priority)
|
|
71
|
+
elif action == "update":
|
|
72
|
+
return await self._update_todo(todo_id, status, priority, content)
|
|
73
|
+
elif action == "complete":
|
|
74
|
+
return await self._complete_todo(todo_id)
|
|
75
|
+
elif action == "list":
|
|
76
|
+
return await self._list_todos()
|
|
77
|
+
elif action == "remove":
|
|
78
|
+
return await self._remove_todo(todo_id)
|
|
79
|
+
else:
|
|
80
|
+
raise ModelRetry(
|
|
81
|
+
f"Invalid action '{action}'. Must be one of: add, add_multiple, update, complete, list, remove"
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
async def _add_todo(self, content: Optional[str], priority: Optional[str]) -> ToolResult:
|
|
85
|
+
"""Add a new todo item."""
|
|
86
|
+
if not content:
|
|
87
|
+
raise ModelRetry("Content is required when adding a todo")
|
|
88
|
+
|
|
89
|
+
# Validate content length
|
|
90
|
+
if len(content) > MAX_TODO_CONTENT_LENGTH:
|
|
91
|
+
raise ModelRetry(
|
|
92
|
+
f"Todo content is too long. Maximum length is {MAX_TODO_CONTENT_LENGTH} characters"
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
# Check todo limit
|
|
96
|
+
if len(self.state_manager.session.todos) >= MAX_TODOS_PER_SESSION:
|
|
97
|
+
raise ModelRetry(
|
|
98
|
+
f"Cannot add more todos. Maximum of {MAX_TODOS_PER_SESSION} todos allowed per session"
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Generate UUID for guaranteed uniqueness
|
|
102
|
+
new_id = f"todo_{uuid.uuid4().hex[:8]}"
|
|
103
|
+
|
|
104
|
+
# Default priority if not specified
|
|
105
|
+
todo_priority = priority or TODO_PRIORITY_MEDIUM
|
|
106
|
+
if todo_priority not in TODO_PRIORITIES:
|
|
107
|
+
raise ModelRetry(
|
|
108
|
+
f"Invalid priority '{todo_priority}'. Must be one of: {', '.join(TODO_PRIORITIES)}"
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
new_todo = TodoItem(
|
|
112
|
+
id=new_id,
|
|
113
|
+
content=content,
|
|
114
|
+
status=TODO_STATUS_PENDING,
|
|
115
|
+
priority=todo_priority,
|
|
116
|
+
created_at=datetime.now(),
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
self.state_manager.add_todo(new_todo)
|
|
120
|
+
return f"Added todo {new_id}: {content} (priority: {todo_priority})"
|
|
121
|
+
|
|
122
|
+
async def _add_multiple_todos(
|
|
123
|
+
self,
|
|
124
|
+
content: Optional[Union[str, List[str]]],
|
|
125
|
+
todos: Optional[List[dict]],
|
|
126
|
+
priority: Optional[str],
|
|
127
|
+
) -> ToolResult:
|
|
128
|
+
"""Add multiple todo items at once."""
|
|
129
|
+
|
|
130
|
+
# Handle different input formats
|
|
131
|
+
todos_to_add = []
|
|
132
|
+
|
|
133
|
+
if todos:
|
|
134
|
+
# Structured format: [{"content": "...", "priority": "..."}, ...]
|
|
135
|
+
for todo_data in todos:
|
|
136
|
+
if not isinstance(todo_data, dict) or "content" not in todo_data:
|
|
137
|
+
raise ModelRetry("Each todo must be a dict with 'content' field")
|
|
138
|
+
todo_content = todo_data["content"]
|
|
139
|
+
todo_priority = todo_data.get("priority", priority or TODO_PRIORITY_MEDIUM)
|
|
140
|
+
if todo_priority not in TODO_PRIORITIES:
|
|
141
|
+
raise ModelRetry(
|
|
142
|
+
f"Invalid priority '{todo_priority}'. Must be one of: {', '.join(TODO_PRIORITIES)}"
|
|
143
|
+
)
|
|
144
|
+
todos_to_add.append((todo_content, todo_priority))
|
|
145
|
+
elif isinstance(content, list):
|
|
146
|
+
# List of strings format: ["task1", "task2", ...]
|
|
147
|
+
default_priority = priority or TODO_PRIORITY_MEDIUM
|
|
148
|
+
if default_priority not in TODO_PRIORITIES:
|
|
149
|
+
raise ModelRetry(
|
|
150
|
+
f"Invalid priority '{default_priority}'. Must be one of: {', '.join(TODO_PRIORITIES)}"
|
|
151
|
+
)
|
|
152
|
+
for task_content in content:
|
|
153
|
+
if not isinstance(task_content, str):
|
|
154
|
+
raise ModelRetry("All content items must be strings")
|
|
155
|
+
todos_to_add.append((task_content, default_priority))
|
|
156
|
+
else:
|
|
157
|
+
raise ModelRetry(
|
|
158
|
+
"For add_multiple, provide either 'todos' list or 'content' as list of strings"
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
if not todos_to_add:
|
|
162
|
+
raise ModelRetry("No todos to add")
|
|
163
|
+
|
|
164
|
+
# Check todo limit
|
|
165
|
+
current_count = len(self.state_manager.session.todos)
|
|
166
|
+
if current_count + len(todos_to_add) > MAX_TODOS_PER_SESSION:
|
|
167
|
+
available = MAX_TODOS_PER_SESSION - current_count
|
|
168
|
+
raise ModelRetry(
|
|
169
|
+
f"Cannot add {len(todos_to_add)} todos. Only {available} slots available (max {MAX_TODOS_PER_SESSION} per session)"
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
# Add all todos
|
|
173
|
+
added_ids = []
|
|
174
|
+
for task_content, task_priority in todos_to_add:
|
|
175
|
+
# Validate content length
|
|
176
|
+
if len(task_content) > MAX_TODO_CONTENT_LENGTH:
|
|
177
|
+
raise ModelRetry(
|
|
178
|
+
f"Todo content is too long: '{task_content[:50]}...'. Maximum length is {MAX_TODO_CONTENT_LENGTH} characters"
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
# Generate UUID for guaranteed uniqueness
|
|
182
|
+
new_id = f"todo_{uuid.uuid4().hex[:8]}"
|
|
183
|
+
|
|
184
|
+
new_todo = TodoItem(
|
|
185
|
+
id=new_id,
|
|
186
|
+
content=task_content,
|
|
187
|
+
status=TODO_STATUS_PENDING,
|
|
188
|
+
priority=task_priority,
|
|
189
|
+
created_at=datetime.now(),
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
self.state_manager.add_todo(new_todo)
|
|
193
|
+
added_ids.append(new_id)
|
|
194
|
+
|
|
195
|
+
count = len(added_ids)
|
|
196
|
+
return f"Added {count} todos (IDs: {', '.join(added_ids)})"
|
|
197
|
+
|
|
198
|
+
async def _update_todo(
|
|
199
|
+
self,
|
|
200
|
+
todo_id: Optional[str],
|
|
201
|
+
status: Optional[str],
|
|
202
|
+
priority: Optional[str],
|
|
203
|
+
content: Optional[str],
|
|
204
|
+
) -> ToolResult:
|
|
205
|
+
"""Update an existing todo item."""
|
|
206
|
+
if not todo_id:
|
|
207
|
+
raise ModelRetry("Todo ID is required for updates")
|
|
208
|
+
|
|
209
|
+
# Find the todo
|
|
210
|
+
todo = None
|
|
211
|
+
for t in self.state_manager.session.todos:
|
|
212
|
+
if t.id == todo_id:
|
|
213
|
+
todo = t
|
|
214
|
+
break
|
|
215
|
+
|
|
216
|
+
if not todo:
|
|
217
|
+
raise ModelRetry(f"Todo with ID '{todo_id}' not found")
|
|
218
|
+
|
|
219
|
+
changes = []
|
|
220
|
+
|
|
221
|
+
# Update status if provided
|
|
222
|
+
if status:
|
|
223
|
+
if status not in ["pending", "in_progress", "completed"]:
|
|
224
|
+
raise ModelRetry(
|
|
225
|
+
f"Invalid status '{status}'. Must be pending, in_progress, or completed"
|
|
226
|
+
)
|
|
227
|
+
todo.status = status
|
|
228
|
+
if status == "completed" and not todo.completed_at:
|
|
229
|
+
todo.completed_at = datetime.now()
|
|
230
|
+
changes.append(f"status to {status}")
|
|
231
|
+
|
|
232
|
+
# Update priority if provided
|
|
233
|
+
if priority:
|
|
234
|
+
if priority not in ["high", "medium", "low"]:
|
|
235
|
+
raise ModelRetry(f"Invalid priority '{priority}'. Must be high, medium, or low")
|
|
236
|
+
todo.priority = priority
|
|
237
|
+
changes.append(f"priority to {priority}")
|
|
238
|
+
|
|
239
|
+
# Update content if provided
|
|
240
|
+
if content:
|
|
241
|
+
todo.content = content
|
|
242
|
+
changes.append(f"content to '{content}'")
|
|
243
|
+
|
|
244
|
+
if not changes:
|
|
245
|
+
raise ModelRetry(
|
|
246
|
+
"At least one of status, priority, or content must be provided for updates"
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
change_summary = ", ".join(changes)
|
|
250
|
+
return f"Updated todo {todo_id}: {change_summary}"
|
|
251
|
+
|
|
252
|
+
async def _complete_todo(self, todo_id: Optional[str]) -> ToolResult:
|
|
253
|
+
"""Mark a todo as completed."""
|
|
254
|
+
if not todo_id:
|
|
255
|
+
raise ModelRetry("Todo ID is required to mark as complete")
|
|
256
|
+
|
|
257
|
+
# Find and update the todo
|
|
258
|
+
for todo in self.state_manager.session.todos:
|
|
259
|
+
if todo.id == todo_id:
|
|
260
|
+
todo.status = "completed"
|
|
261
|
+
todo.completed_at = datetime.now()
|
|
262
|
+
return f"Marked todo {todo_id} as completed: {todo.content}"
|
|
263
|
+
|
|
264
|
+
raise ModelRetry(f"Todo with ID '{todo_id}' not found")
|
|
265
|
+
|
|
266
|
+
async def _list_todos(self) -> ToolResult:
|
|
267
|
+
"""List all current todos."""
|
|
268
|
+
todos = self.state_manager.session.todos
|
|
269
|
+
if not todos:
|
|
270
|
+
return "No todos found"
|
|
271
|
+
|
|
272
|
+
# Group by status for better organization
|
|
273
|
+
pending = [t for t in todos if t.status == "pending"]
|
|
274
|
+
in_progress = [t for t in todos if t.status == "in_progress"]
|
|
275
|
+
completed = [t for t in todos if t.status == "completed"]
|
|
276
|
+
|
|
277
|
+
lines = []
|
|
278
|
+
|
|
279
|
+
if in_progress:
|
|
280
|
+
lines.append("IN PROGRESS:")
|
|
281
|
+
for todo in in_progress:
|
|
282
|
+
lines.append(f" {todo.id}: {todo.content} (priority: {todo.priority})")
|
|
283
|
+
|
|
284
|
+
if pending:
|
|
285
|
+
lines.append("\nPENDING:")
|
|
286
|
+
for todo in pending:
|
|
287
|
+
lines.append(f" {todo.id}: {todo.content} (priority: {todo.priority})")
|
|
288
|
+
|
|
289
|
+
if completed:
|
|
290
|
+
lines.append("\nCOMPLETED:")
|
|
291
|
+
for todo in completed:
|
|
292
|
+
lines.append(f" {todo.id}: {todo.content}")
|
|
293
|
+
|
|
294
|
+
return "\n".join(lines)
|
|
295
|
+
|
|
296
|
+
async def _remove_todo(self, todo_id: Optional[str]) -> ToolResult:
|
|
297
|
+
"""Remove a todo item."""
|
|
298
|
+
if not todo_id:
|
|
299
|
+
raise ModelRetry("Todo ID is required to remove a todo")
|
|
300
|
+
|
|
301
|
+
# Find the todo first to get its content for the response
|
|
302
|
+
todo_content = None
|
|
303
|
+
for todo in self.state_manager.session.todos:
|
|
304
|
+
if todo.id == todo_id:
|
|
305
|
+
todo_content = todo.content
|
|
306
|
+
break
|
|
307
|
+
|
|
308
|
+
if not todo_content:
|
|
309
|
+
raise ModelRetry(f"Todo with ID '{todo_id}' not found")
|
|
310
|
+
|
|
311
|
+
self.state_manager.remove_todo(todo_id)
|
|
312
|
+
return f"Removed todo {todo_id}: {todo_content}"
|
|
313
|
+
|
|
314
|
+
def get_current_todos_sync(self) -> str:
|
|
315
|
+
"""Get current todos synchronously for system prompt inclusion."""
|
|
316
|
+
todos = self.state_manager.session.todos
|
|
317
|
+
|
|
318
|
+
if not todos:
|
|
319
|
+
return "No todos found"
|
|
320
|
+
|
|
321
|
+
# Group by status for better organization
|
|
322
|
+
pending = [t for t in todos if t.status == "pending"]
|
|
323
|
+
in_progress = [t for t in todos if t.status == "in_progress"]
|
|
324
|
+
completed = [t for t in todos if t.status == "completed"]
|
|
325
|
+
|
|
326
|
+
lines = []
|
|
327
|
+
|
|
328
|
+
if in_progress:
|
|
329
|
+
lines.append("IN PROGRESS:")
|
|
330
|
+
for todo in in_progress:
|
|
331
|
+
lines.append(f" {todo.id}: {todo.content} (priority: {todo.priority})")
|
|
332
|
+
|
|
333
|
+
if pending:
|
|
334
|
+
lines.append("\nPENDING:")
|
|
335
|
+
for todo in pending:
|
|
336
|
+
lines.append(f" {todo.id}: {todo.content} (priority: {todo.priority})")
|
|
337
|
+
|
|
338
|
+
if completed:
|
|
339
|
+
lines.append("\nCOMPLETED:")
|
|
340
|
+
for todo in completed:
|
|
341
|
+
lines.append(f" {todo.id}: {todo.content}")
|
|
342
|
+
|
|
343
|
+
return "\n".join(lines)
|
tunacode/types.py
CHANGED
|
@@ -6,8 +6,9 @@ used throughout the TunaCode codebase.
|
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
from dataclasses import dataclass, field
|
|
9
|
+
from datetime import datetime
|
|
9
10
|
from pathlib import Path
|
|
10
|
-
from typing import Any, Awaitable, Callable, Dict, List, Optional, Protocol, Tuple, Union
|
|
11
|
+
from typing import Any, Awaitable, Callable, Dict, List, Literal, Optional, Protocol, Tuple, Union
|
|
11
12
|
|
|
12
13
|
# Try to import pydantic-ai types if available
|
|
13
14
|
try:
|
|
@@ -23,6 +24,18 @@ except ImportError:
|
|
|
23
24
|
ModelRequest = Any
|
|
24
25
|
ModelResponse = Any
|
|
25
26
|
|
|
27
|
+
|
|
28
|
+
@dataclass
|
|
29
|
+
class TodoItem:
|
|
30
|
+
id: str
|
|
31
|
+
content: str
|
|
32
|
+
status: Literal["pending", "in_progress", "completed"]
|
|
33
|
+
priority: Literal["high", "medium", "low"]
|
|
34
|
+
created_at: datetime
|
|
35
|
+
completed_at: Optional[datetime] = None
|
|
36
|
+
tags: list[str] = field(default_factory=list)
|
|
37
|
+
|
|
38
|
+
|
|
26
39
|
# =============================================================================
|
|
27
40
|
# Core Types
|
|
28
41
|
# =============================================================================
|
|
@@ -287,3 +300,9 @@ class CostBreakdown:
|
|
|
287
300
|
cached_cost: float
|
|
288
301
|
output_cost: float
|
|
289
302
|
total_cost: float
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
class UsageTrackerProtocol(Protocol):
|
|
306
|
+
"""Protocol for a class that tracks and displays token usage and cost."""
|
|
307
|
+
|
|
308
|
+
async def track_and_display(self, response_obj: Any) -> None: ...
|
tunacode/ui/input.py
CHANGED
tunacode/ui/output.py
CHANGED
|
@@ -14,6 +14,7 @@ from tunacode.constants import (
|
|
|
14
14
|
)
|
|
15
15
|
from tunacode.core.state import StateManager
|
|
16
16
|
from tunacode.utils.file_utils import DotDict
|
|
17
|
+
from tunacode.utils.token_counter import format_token_count
|
|
17
18
|
|
|
18
19
|
from .constants import SPINNER_TYPE
|
|
19
20
|
from .decorators import create_sync_wrapper
|
|
@@ -129,5 +130,40 @@ async def spinner(show: bool = True, spinner_obj=None, state_manager: StateManag
|
|
|
129
130
|
return spinner_obj
|
|
130
131
|
|
|
131
132
|
|
|
133
|
+
def get_context_window_display(total_tokens: int, max_tokens: int) -> str:
|
|
134
|
+
"""
|
|
135
|
+
Create a color-coded display for the context window status.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
total_tokens: The current number of tokens in the context.
|
|
139
|
+
max_tokens: The maximum number of tokens for the model.
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
A formatted string for display.
|
|
143
|
+
"""
|
|
144
|
+
# Ensure we have actual integers, not mocks or other objects
|
|
145
|
+
try:
|
|
146
|
+
total_tokens = int(total_tokens)
|
|
147
|
+
max_tokens = int(max_tokens)
|
|
148
|
+
except (TypeError, ValueError):
|
|
149
|
+
return ""
|
|
150
|
+
|
|
151
|
+
if max_tokens == 0:
|
|
152
|
+
return ""
|
|
153
|
+
|
|
154
|
+
percentage = (float(total_tokens) / float(max_tokens)) * 100 if max_tokens else 0
|
|
155
|
+
color = "success"
|
|
156
|
+
if percentage > 80:
|
|
157
|
+
color = "error"
|
|
158
|
+
elif percentage > 50:
|
|
159
|
+
color = "warning"
|
|
160
|
+
|
|
161
|
+
return (
|
|
162
|
+
f"[b]Context:[/] [{colors[color]}]"
|
|
163
|
+
f"{format_token_count(total_tokens)}/{format_token_count(max_tokens)} "
|
|
164
|
+
f"({int(percentage)}%)[/]"
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
|
|
132
168
|
# Auto-generated sync version
|
|
133
169
|
sync_print = print.sync # type: ignore
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""Utilities for processing message history."""
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def get_message_content(message: any) -> str:
|
|
5
|
+
"""Extracts the content from a message object of any type."""
|
|
6
|
+
if isinstance(message, str):
|
|
7
|
+
return message
|
|
8
|
+
if isinstance(message, dict):
|
|
9
|
+
if "content" in message:
|
|
10
|
+
return message["content"]
|
|
11
|
+
if "thought" in message:
|
|
12
|
+
return message["thought"]
|
|
13
|
+
if hasattr(message, "content"):
|
|
14
|
+
return message.content
|
|
15
|
+
if hasattr(message, "parts"):
|
|
16
|
+
return " ".join(get_message_content(part) for part in message.parts)
|
|
17
|
+
return ""
|
tunacode/utils/token_counter.py
CHANGED
|
@@ -1,23 +1,93 @@
|
|
|
1
|
-
"""
|
|
1
|
+
"""Token counting utility using tiktoken for accurate, offline token estimation."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
4
|
+
from functools import lru_cache
|
|
5
|
+
from typing import Optional
|
|
3
6
|
|
|
4
|
-
|
|
7
|
+
# Configure logging
|
|
8
|
+
logging.basicConfig(level=logging.INFO)
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
# Cache for tokenizer encodings
|
|
12
|
+
_encoding_cache = {}
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@lru_cache(maxsize=8)
|
|
16
|
+
def get_encoding(model_name: str):
|
|
17
|
+
"""Get the appropriate tiktoken encoding for a model.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
model_name: The model name in format "provider:model"
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
A tiktoken encoding instance
|
|
24
|
+
"""
|
|
25
|
+
try:
|
|
26
|
+
import tiktoken
|
|
27
|
+
except ImportError:
|
|
28
|
+
logger.warning("tiktoken not available, falling back to character estimation")
|
|
29
|
+
return None
|
|
30
|
+
|
|
31
|
+
# Extract the model part from "provider:model" format
|
|
32
|
+
if ":" in model_name:
|
|
33
|
+
provider, model = model_name.split(":", 1)
|
|
34
|
+
else:
|
|
35
|
+
provider, model = "unknown", model_name
|
|
36
|
+
|
|
37
|
+
# Map common models to their tiktoken encodings
|
|
38
|
+
if provider == "openai":
|
|
39
|
+
if "gpt-4" in model:
|
|
40
|
+
encoding_name = "cl100k_base" # GPT-4 encoding
|
|
41
|
+
elif "gpt-3.5" in model:
|
|
42
|
+
encoding_name = "cl100k_base" # GPT-3.5-turbo encoding
|
|
43
|
+
else:
|
|
44
|
+
encoding_name = "cl100k_base" # Default for newer models
|
|
45
|
+
elif provider == "anthropic":
|
|
46
|
+
# Claude models use similar tokenization to GPT-4
|
|
47
|
+
encoding_name = "cl100k_base"
|
|
48
|
+
else:
|
|
49
|
+
# Default encoding for unknown models
|
|
50
|
+
encoding_name = "cl100k_base"
|
|
51
|
+
|
|
52
|
+
try:
|
|
53
|
+
return tiktoken.get_encoding(encoding_name)
|
|
54
|
+
except Exception as e:
|
|
55
|
+
logger.error(f"Error loading tiktoken encoding '{encoding_name}': {e}")
|
|
56
|
+
return None
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def estimate_tokens(text: str, model_name: Optional[str] = None) -> int:
|
|
5
60
|
"""
|
|
6
|
-
Estimate token count using
|
|
61
|
+
Estimate token count using tiktoken for accurate results.
|
|
7
62
|
|
|
8
|
-
|
|
9
|
-
|
|
63
|
+
Args:
|
|
64
|
+
text: The text to count tokens for.
|
|
65
|
+
model_name: Optional model name for model-specific tokenization.
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
The estimated number of tokens.
|
|
10
69
|
"""
|
|
11
70
|
if not text:
|
|
12
71
|
return 0
|
|
13
72
|
|
|
14
|
-
#
|
|
73
|
+
# Try tiktoken first if model is specified
|
|
74
|
+
if model_name:
|
|
75
|
+
encoding = get_encoding(model_name)
|
|
76
|
+
if encoding:
|
|
77
|
+
try:
|
|
78
|
+
return len(encoding.encode(text))
|
|
79
|
+
except Exception as e:
|
|
80
|
+
logger.error(f"Error counting tokens with tiktoken: {e}")
|
|
81
|
+
|
|
82
|
+
# Fallback to character-based estimation
|
|
15
83
|
# This is roughly accurate for English text
|
|
16
84
|
return len(text) // 4
|
|
17
85
|
|
|
18
86
|
|
|
19
87
|
def format_token_count(count: int) -> str:
|
|
20
|
-
"""Format token count for display."""
|
|
21
|
-
if count >=
|
|
88
|
+
"""Format token count for display with full precision."""
|
|
89
|
+
if count >= 1_000_000:
|
|
90
|
+
return f"{count:,}"
|
|
91
|
+
elif count >= 1000:
|
|
22
92
|
return f"{count:,}"
|
|
23
93
|
return str(count)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tunacode-cli
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.41
|
|
4
4
|
Summary: Your agentic CLI developer.
|
|
5
5
|
Author-email: larock22 <noreply@github.com>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -24,6 +24,7 @@ Requires-Dist: prompt_toolkit==3.0.51
|
|
|
24
24
|
Requires-Dist: pydantic-ai[logfire]==0.2.6
|
|
25
25
|
Requires-Dist: pygments==2.19.1
|
|
26
26
|
Requires-Dist: rich==14.0.0
|
|
27
|
+
Requires-Dist: tiktoken>=0.5.2
|
|
27
28
|
Provides-Extra: dev
|
|
28
29
|
Requires-Dist: build; extra == "dev"
|
|
29
30
|
Requires-Dist: ruff; extra == "dev"
|