tunacode-cli 0.0.39__py3-none-any.whl → 0.0.41__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tunacode-cli might be problematic. Click here for more details.

Files changed (33) hide show
  1. tunacode/cli/commands/__init__.py +2 -0
  2. tunacode/cli/commands/implementations/__init__.py +3 -0
  3. tunacode/cli/commands/implementations/debug.py +1 -1
  4. tunacode/cli/commands/implementations/todo.py +217 -0
  5. tunacode/cli/commands/registry.py +2 -0
  6. tunacode/cli/main.py +12 -5
  7. tunacode/cli/repl.py +197 -132
  8. tunacode/configuration/defaults.py +1 -0
  9. tunacode/configuration/models.py +6 -0
  10. tunacode/constants.py +32 -3
  11. tunacode/context.py +7 -3
  12. tunacode/core/agents/main.py +52 -9
  13. tunacode/core/setup/config_setup.py +5 -0
  14. tunacode/core/state.py +50 -1
  15. tunacode/core/token_usage/api_response_parser.py +44 -0
  16. tunacode/core/token_usage/cost_calculator.py +58 -0
  17. tunacode/core/token_usage/usage_tracker.py +98 -0
  18. tunacode/prompts/system.md +69 -5
  19. tunacode/tools/todo.py +343 -0
  20. tunacode/types.py +20 -1
  21. tunacode/ui/input.py +1 -1
  22. tunacode/ui/output.py +36 -0
  23. tunacode/utils/message_utils.py +17 -0
  24. tunacode/utils/text_utils.py +131 -25
  25. tunacode/utils/token_counter.py +78 -8
  26. {tunacode_cli-0.0.39.dist-info → tunacode_cli-0.0.41.dist-info}/METADATA +3 -1
  27. {tunacode_cli-0.0.39.dist-info → tunacode_cli-0.0.41.dist-info}/RECORD +31 -27
  28. tunacode/cli/textual_app.py +0 -420
  29. tunacode/cli/textual_bridge.py +0 -161
  30. {tunacode_cli-0.0.39.dist-info → tunacode_cli-0.0.41.dist-info}/WHEEL +0 -0
  31. {tunacode_cli-0.0.39.dist-info → tunacode_cli-0.0.41.dist-info}/entry_points.txt +0 -0
  32. {tunacode_cli-0.0.39.dist-info → tunacode_cli-0.0.41.dist-info}/licenses/LICENSE +0 -0
  33. {tunacode_cli-0.0.39.dist-info → tunacode_cli-0.0.41.dist-info}/top_level.txt +0 -0
tunacode/cli/repl.py CHANGED
@@ -5,7 +5,12 @@ Interactive REPL (Read-Eval-Print Loop) implementation for TunaCode.
5
5
  Handles user input, command processing, and agent interaction in an interactive shell.
6
6
  """
7
7
 
8
+ # ============================================================================
9
+ # IMPORTS AND DEPENDENCIES
10
+ # ============================================================================
11
+
8
12
  import json
13
+ import logging
9
14
  import os
10
15
  import subprocess
11
16
  from asyncio.exceptions import CancelledError
@@ -15,21 +20,44 @@ from prompt_toolkit.application import run_in_terminal
15
20
  from prompt_toolkit.application.current import get_app
16
21
  from pydantic_ai.exceptions import UnexpectedModelBehavior
17
22
 
18
- from tunacode.configuration.settings import ApplicationSettings
23
+ from tunacode.constants import DEFAULT_CONTEXT_WINDOW
19
24
  from tunacode.core.agents import main as agent
20
25
  from tunacode.core.agents.main import patch_tool_messages
21
26
  from tunacode.core.tool_handler import ToolHandler
22
27
  from tunacode.exceptions import AgentError, UserAbortError, ValidationError
23
28
  from tunacode.ui import console as ui
29
+ from tunacode.ui.output import get_context_window_display
24
30
  from tunacode.ui.tool_ui import ToolUI
25
31
  from tunacode.utils.security import CommandSecurityError, safe_subprocess_run
26
32
 
27
33
  from ..types import CommandContext, CommandResult, StateManager, ToolArgs
28
34
  from .commands import CommandRegistry
29
35
 
30
- # Tool UI instance
36
+ # ============================================================================
37
+ # MODULE-LEVEL CONSTANTS AND CONFIGURATION
38
+ # ============================================================================
39
+
31
40
  _tool_ui = ToolUI()
32
41
 
42
+ MSG_OPERATION_ABORTED = "Operation aborted."
43
+ MSG_OPERATION_ABORTED_BY_USER = "Operation aborted by user."
44
+ MSG_TOOL_INTERRUPTED = "Tool execution was interrupted"
45
+ MSG_REQUEST_CANCELLED = "Request cancelled"
46
+ MSG_REQUEST_COMPLETED = "Request completed"
47
+ MSG_JSON_RECOVERY = "Recovered using JSON tool parsing"
48
+ MSG_SESSION_ENDED = "Session ended. Happy coding!"
49
+ MSG_AGENT_BUSY = "Agent is busy, press Ctrl+C to interrupt."
50
+ MSG_HIT_CTRL_C = "Hit Ctrl+C again to exit"
51
+ SHELL_ENV_VAR = "SHELL"
52
+ DEFAULT_SHELL = "bash"
53
+
54
+ # Configure logging
55
+ logger = logging.getLogger(__name__)
56
+
57
+ # ============================================================================
58
+ # UTILITY FUNCTIONS
59
+ # ============================================================================
60
+
33
61
 
34
62
  def _parse_args(args) -> ToolArgs:
35
63
  """
@@ -55,105 +83,63 @@ def _parse_args(args) -> ToolArgs:
55
83
  raise ValidationError(f"Invalid args type: {type(args)}")
56
84
 
57
85
 
58
- async def _tool_confirm(tool_call, node, state_manager: StateManager):
59
- """Confirm tool execution with separated business logic and UI."""
60
- # Create tool handler with state
61
- tool_handler = ToolHandler(state_manager)
62
- args = _parse_args(tool_call.args)
63
-
64
- # Check if confirmation is needed
65
- if not tool_handler.should_confirm(tool_call.tool_name):
66
- # Log MCP tools when skipping confirmation
67
- app_settings = ApplicationSettings()
68
- if tool_call.tool_name not in app_settings.internal_tools:
69
- title = _tool_ui._get_tool_title(tool_call.tool_name)
70
- await _tool_ui.log_mcp(title, args)
71
- return
72
-
73
- # Stop spinner during user interaction (only if not streaming)
74
- if not state_manager.session.is_streaming_active and state_manager.session.spinner:
75
- state_manager.session.spinner.stop()
76
-
77
- # Create confirmation request
78
- request = tool_handler.create_confirmation_request(tool_call.tool_name, args)
79
-
80
- # Show UI and get response
81
- response = await _tool_ui.show_confirmation(request, state_manager)
86
+ # ============================================================================
87
+ # TOOL EXECUTION AND CONFIRMATION HANDLERS
88
+ # ============================================================================
82
89
 
83
- # Process the response
84
- if not tool_handler.process_confirmation(response, tool_call.tool_name):
85
- raise UserAbortError("User aborted.")
86
90
 
87
- await ui.line() # Add line after user input
88
-
89
- # Restart spinner (only if not streaming)
90
- if not state_manager.session.is_streaming_active and state_manager.session.spinner:
91
- state_manager.session.spinner.start()
92
-
93
-
94
- async def _tool_handler(part, node, state_manager: StateManager):
91
+ async def _tool_handler(part, state_manager: StateManager):
95
92
  """Handle tool execution with separated business logic and UI."""
96
- # Create tool handler with state first to check if confirmation is needed
97
93
  tool_handler = ToolHandler(state_manager)
98
94
 
99
- # Only show tool info for tools that require confirmation
100
95
  if tool_handler.should_confirm(part.tool_name):
101
96
  await ui.info(f"Tool({part.tool_name})")
102
97
 
103
- # Stop spinner only if not streaming
104
98
  if not state_manager.session.is_streaming_active and state_manager.session.spinner:
105
99
  state_manager.session.spinner.stop()
106
100
 
107
- # Track if we need to stop/restart streaming panel
108
101
  streaming_panel = None
109
102
  if state_manager.session.is_streaming_active and hasattr(
110
103
  state_manager.session, "streaming_panel"
111
104
  ):
112
105
  streaming_panel = state_manager.session.streaming_panel
113
- # Stop the streaming panel to prevent UI interference during confirmation
114
106
  if streaming_panel and tool_handler.should_confirm(part.tool_name):
115
107
  await streaming_panel.stop()
116
108
 
117
109
  try:
118
110
  args = _parse_args(part.args)
119
111
 
120
- # Use a synchronous function in run_in_terminal to avoid async deadlocks
121
112
  def confirm_func():
122
- # Skip confirmation if not needed
123
113
  if not tool_handler.should_confirm(part.tool_name):
124
114
  return False
125
-
126
- # Create confirmation request
127
115
  request = tool_handler.create_confirmation_request(part.tool_name, args)
128
116
 
129
- # Show sync UI and get response
130
117
  response = _tool_ui.show_sync_confirmation(request)
131
118
 
132
- # Process the response
133
119
  if not tool_handler.process_confirmation(response, part.tool_name):
134
120
  return True # Abort
135
121
  return False # Continue
136
122
 
137
- # Run the confirmation in the terminal
138
123
  should_abort = await run_in_terminal(confirm_func)
139
124
 
140
125
  if should_abort:
141
126
  raise UserAbortError("User aborted.")
142
127
 
143
128
  except UserAbortError:
144
- patch_tool_messages("Operation aborted by user.", state_manager)
129
+ patch_tool_messages(MSG_OPERATION_ABORTED_BY_USER, state_manager)
145
130
  raise
146
131
  finally:
147
- # Restart streaming panel if it was stopped
148
132
  if streaming_panel and tool_handler.should_confirm(part.tool_name):
149
133
  await streaming_panel.start()
150
134
 
151
- # Restart spinner only if not streaming
152
135
  if not state_manager.session.is_streaming_active and state_manager.session.spinner:
153
136
  state_manager.session.spinner.start()
154
137
 
155
138
 
156
- # Initialize command registry
139
+ # ============================================================================
140
+ # COMMAND SYSTEM
141
+ # ============================================================================
142
+
157
143
  _command_registry = CommandRegistry()
158
144
  _command_registry.register_all_default_commands()
159
145
 
@@ -169,71 +155,138 @@ async def _handle_command(command: str, state_manager: StateManager) -> CommandR
169
155
  Returns:
170
156
  Command result (varies by command).
171
157
  """
172
- # Create command context
173
158
  context = CommandContext(state_manager=state_manager, process_request=process_request)
174
159
 
175
160
  try:
176
- # Set the process_request callback for commands that need it
177
161
  _command_registry.set_process_request_callback(process_request)
178
162
 
179
- # Execute the command
180
163
  return await _command_registry.execute(command, context)
181
164
  except ValidationError as e:
182
165
  await ui.error(str(e))
183
166
 
184
167
 
168
+ # ============================================================================
169
+ # ERROR RECOVERY
170
+ # ============================================================================
171
+
172
+
173
+ async def _attempt_tool_recovery(e: Exception, state_manager: StateManager) -> bool:
174
+ """
175
+ Attempt to recover from tool calling failures using guard clauses.
176
+
177
+ Returns:
178
+ bool: True if recovery was successful, False otherwise
179
+ """
180
+ error_str = str(e).lower()
181
+ tool_keywords = ["tool", "function", "call", "schema"]
182
+ if not any(keyword in error_str for keyword in tool_keywords):
183
+ return False
184
+
185
+ if not state_manager.session.messages:
186
+ return False
187
+
188
+ last_msg = state_manager.session.messages[-1]
189
+ if not hasattr(last_msg, "parts"):
190
+ return False
191
+
192
+ for part in last_msg.parts:
193
+ if not hasattr(part, "content") or not isinstance(part.content, str):
194
+ continue
195
+
196
+ try:
197
+ from tunacode.core.agents.main import extract_and_execute_tool_calls
198
+
199
+ def tool_callback_with_state(part, node):
200
+ return _tool_handler(part, state_manager)
201
+
202
+ await extract_and_execute_tool_calls(
203
+ part.content, tool_callback_with_state, state_manager
204
+ )
205
+
206
+ await ui.warning(f" {MSG_JSON_RECOVERY}")
207
+ return True
208
+
209
+ except Exception as e:
210
+ logger.debug(f"Failed to check triple quotes: {e}")
211
+ continue
212
+
213
+ return False
214
+
215
+
216
+ # ============================================================================
217
+ # AGENT OUTPUT DISPLAY
218
+ # ============================================================================
219
+
220
+
221
+ async def _display_agent_output(res, enable_streaming: bool) -> None:
222
+ """Display agent output using guard clauses to flatten nested conditionals."""
223
+ if enable_streaming:
224
+ return
225
+
226
+ if not hasattr(res, "result") or res.result is None or not hasattr(res.result, "output"):
227
+ await ui.muted(MSG_REQUEST_COMPLETED)
228
+ return
229
+
230
+ output = res.result.output
231
+
232
+ if not isinstance(output, str):
233
+ return
234
+
235
+ if output.strip().startswith('{"thought"'):
236
+ return
237
+
238
+ if '"tool_uses"' in output:
239
+ return
240
+
241
+ await ui.agent(output)
242
+
243
+
244
+ # ============================================================================
245
+ # MAIN AGENT REQUEST PROCESSING
246
+ # ============================================================================
247
+
248
+
185
249
  async def process_request(text: str, state_manager: StateManager, output: bool = True):
186
250
  """Process input using the agent, handling cancellation safely."""
251
+
187
252
  state_manager.session.spinner = await ui.spinner(
188
253
  True, state_manager.session.spinner, state_manager
189
254
  )
190
255
  try:
191
- # Patch any orphaned tool calls from previous requests before proceeding
192
- patch_tool_messages("Tool execution was interrupted", state_manager)
256
+ patch_tool_messages(MSG_TOOL_INTERRUPTED, state_manager)
193
257
 
194
- # Clear tracking for new request when thoughts are enabled
195
258
  if state_manager.session.show_thoughts:
196
259
  state_manager.session.tool_calls = []
197
- # Don't clear files_in_context - keep it cumulative for the session
198
260
  state_manager.session.iteration_count = 0
199
261
  state_manager.session.current_iteration = 0
200
262
 
201
- # Track message start for thoughts display
202
263
  start_idx = len(state_manager.session.messages)
203
264
 
204
- # Create a partial function that includes state_manager
205
265
  def tool_callback_with_state(part, node):
206
- return _tool_handler(part, node, state_manager)
266
+ return _tool_handler(part, state_manager)
207
267
 
208
- # Expand @file references before sending to the agent
209
268
  try:
210
269
  from tunacode.utils.text_utils import expand_file_refs
211
270
 
212
271
  text, referenced_files = expand_file_refs(text)
213
- # Track the referenced files
214
272
  for file_path in referenced_files:
215
273
  state_manager.session.files_in_context.add(file_path)
216
274
  except ValueError as e:
217
275
  await ui.error(str(e))
218
276
  return
219
277
 
220
- # Check if streaming is enabled (default: True for better UX)
221
278
  enable_streaming = state_manager.session.user_config.get("settings", {}).get(
222
279
  "enable_streaming", True
223
280
  )
224
281
 
225
282
  if enable_streaming:
226
- # Stop spinner before starting streaming display (Rich.Live conflict)
227
283
  await ui.spinner(False, state_manager.session.spinner, state_manager)
228
284
 
229
- # Mark that streaming is active to prevent spinner conflicts
230
285
  state_manager.session.is_streaming_active = True
231
286
 
232
- # Use streaming agent processing
233
287
  streaming_panel = ui.StreamingAgentPanel()
234
288
  await streaming_panel.start()
235
289
 
236
- # Store streaming panel reference in session for tool handler access
237
290
  state_manager.session.streaming_panel = streaming_panel
238
291
 
239
292
  try:
@@ -250,11 +303,8 @@ async def process_request(text: str, state_manager: StateManager, output: bool =
250
303
  )
251
304
  finally:
252
305
  await streaming_panel.stop()
253
- # Clear streaming panel reference
254
306
  state_manager.session.streaming_panel = None
255
- # Mark streaming as inactive
256
307
  state_manager.session.is_streaming_active = False
257
- # Don't restart spinner - it will be stopped in the outer finally block anyway
258
308
  else:
259
309
  # Use normal agent processing
260
310
  res = await agent.process_request(
@@ -263,6 +313,7 @@ async def process_request(text: str, state_manager: StateManager, output: bool =
263
313
  state_manager,
264
314
  tool_callback=tool_callback_with_state,
265
315
  )
316
+
266
317
  if output:
267
318
  if state_manager.session.show_thoughts:
268
319
  new_msgs = state_manager.session.messages[start_idx:]
@@ -271,62 +322,38 @@ async def process_request(text: str, state_manager: StateManager, output: bool =
271
322
  await ui.muted(f"THOUGHT: {msg['thought']}")
272
323
 
273
324
  # Only display result if not streaming (streaming already showed content)
274
- if not enable_streaming:
275
- # Check if result exists and has output
276
- if (
277
- hasattr(res, "result")
278
- and res.result is not None
279
- and hasattr(res.result, "output")
280
- ):
281
- output = res.result.output
282
- # Filter out JSON responses with "thought" field
283
- if isinstance(output, str) and not (
284
- output.strip().startswith('{"thought"') or '"tool_uses"' in output
285
- ):
286
- await ui.agent(output)
287
- else:
288
- # Fallback: show that the request was processed
289
- await ui.muted("Request completed")
325
+ if enable_streaming:
326
+ pass # Guard: streaming already showed content
327
+ elif (
328
+ not hasattr(res, "result")
329
+ or res.result is None
330
+ or not hasattr(res.result, "output")
331
+ ):
332
+ # Fallback: show that the request was processed
333
+ await ui.muted(MSG_REQUEST_COMPLETED)
334
+ else:
335
+ # Use the dedicated function for displaying agent output
336
+ await _display_agent_output(res, enable_streaming)
290
337
 
291
338
  # Always show files in context after agent response
292
339
  if state_manager.session.files_in_context:
293
- # Extract just filenames from full paths for readability
294
340
  filenames = [Path(f).name for f in sorted(state_manager.session.files_in_context)]
295
341
  await ui.muted(f"\nFiles in context: {', '.join(filenames)}")
342
+
343
+ # --- ERROR HANDLING ---
296
344
  except CancelledError:
297
- await ui.muted("Request cancelled")
345
+ await ui.muted(MSG_REQUEST_CANCELLED)
298
346
  except UserAbortError:
299
- await ui.muted("Operation aborted.")
347
+ await ui.muted(MSG_OPERATION_ABORTED)
300
348
  except UnexpectedModelBehavior as e:
301
349
  error_message = str(e)
302
350
  await ui.muted(error_message)
303
351
  patch_tool_messages(error_message, state_manager)
304
352
  except Exception as e:
305
- # Check if this might be a tool calling failure that we can recover from
306
- error_str = str(e).lower()
307
- if any(keyword in error_str for keyword in ["tool", "function", "call", "schema"]):
308
- # Try to extract and execute tool calls from the last response
309
- if state_manager.session.messages:
310
- last_msg = state_manager.session.messages[-1]
311
- if hasattr(last_msg, "parts"):
312
- for part in last_msg.parts:
313
- if hasattr(part, "content") and isinstance(part.content, str):
314
- from tunacode.core.agents.main import extract_and_execute_tool_calls
315
-
316
- try:
317
- # Create a partial function that includes state_manager
318
- def tool_callback_with_state(part, node):
319
- return _tool_handler(part, node, state_manager)
320
-
321
- await extract_and_execute_tool_calls(
322
- part.content, tool_callback_with_state, state_manager
323
- )
324
- await ui.warning(" Recovered using JSON tool parsing")
325
- return # Successfully recovered
326
- except Exception:
327
- pass # Fallback failed, continue with normal error handling
353
+ # Try tool recovery for tool-related errors
354
+ if await _attempt_tool_recovery(e, state_manager):
355
+ return # Successfully recovered
328
356
 
329
- # Wrap unexpected exceptions in AgentError for better tracking
330
357
  agent_error = AgentError(f"Agent processing failed: {str(e)}")
331
358
  agent_error.__cause__ = e # Preserve the original exception chain
332
359
  await ui.error(str(e))
@@ -334,20 +361,32 @@ async def process_request(text: str, state_manager: StateManager, output: bool =
334
361
  await ui.spinner(False, state_manager.session.spinner, state_manager)
335
362
  state_manager.session.current_task = None
336
363
 
337
- # Force refresh of the multiline input prompt to restore placeholder
338
364
  if "multiline" in state_manager.session.input_sessions:
339
365
  await run_in_terminal(
340
366
  lambda: state_manager.session.input_sessions["multiline"].app.invalidate()
341
367
  )
342
368
 
343
369
 
370
+ # ============================================================================
371
+ # MAIN REPL LOOP
372
+ # ============================================================================
373
+
374
+
344
375
  async def repl(state_manager: StateManager):
376
+ """Main REPL loop that handles user interaction and input processing."""
345
377
  action = None
346
378
  ctrl_c_pressed = False
347
379
 
348
- # Professional startup information
349
- await ui.muted(f"• Model: {state_manager.session.current_model}")
350
- await ui.success("Ready to assist with your development")
380
+ model_name = state_manager.session.current_model
381
+ max_tokens = (
382
+ state_manager.session.user_config.get("context_window_size") or DEFAULT_CONTEXT_WINDOW
383
+ )
384
+ state_manager.session.max_tokens = max_tokens
385
+
386
+ state_manager.session.update_token_count()
387
+ context_display = get_context_window_display(state_manager.session.total_tokens, max_tokens)
388
+ await ui.muted(f"• Model: {model_name} • {context_display}")
389
+ await ui.success("Ready to assist")
351
390
  await ui.line()
352
391
 
353
392
  instance = agent.get_or_create_agent(state_manager.session.current_model, state_manager)
@@ -360,7 +399,7 @@ async def repl(state_manager: StateManager):
360
399
  if ctrl_c_pressed:
361
400
  break
362
401
  ctrl_c_pressed = True
363
- await ui.warning("Hit Ctrl+C again to exit")
402
+ await ui.warning(MSG_HIT_CTRL_C)
364
403
  continue
365
404
 
366
405
  if not line:
@@ -380,16 +419,12 @@ async def repl(state_manager: StateManager):
380
419
  if line.startswith("!"):
381
420
  command = line[1:].strip()
382
421
 
383
- # Show tool-style header for bash commands
384
422
  cmd_display = command if command else "Interactive shell"
385
423
  await ui.panel("Tool(bash)", f"Command: {cmd_display}", border_style="yellow")
386
424
 
387
425
  def run_shell():
388
426
  try:
389
427
  if command:
390
- # Use secure subprocess execution for shell commands
391
- # Note: User shell commands are inherently risky but this is by design
392
- # We validate but allow shell features since it's explicit user intent
393
428
  try:
394
429
  result = safe_subprocess_run(
395
430
  command,
@@ -403,7 +438,7 @@ async def repl(state_manager: StateManager):
403
438
  print(f"\nSecurity validation failed: {str(e)}")
404
439
  print("If you need to run this command, please ensure it's safe.")
405
440
  else:
406
- shell = os.environ.get("SHELL", "bash")
441
+ shell = os.environ.get(SHELL_ENV_VAR, DEFAULT_SHELL)
407
442
  subprocess.run(shell) # Interactive shell is safe
408
443
  except Exception as e:
409
444
  print(f"\nShell command failed: {str(e)}")
@@ -412,16 +447,46 @@ async def repl(state_manager: StateManager):
412
447
  await ui.line()
413
448
  continue
414
449
 
415
- # Check if another task is already running
450
+ # --- AGENT REQUEST PROCESSING ---
416
451
  if state_manager.session.current_task and not state_manager.session.current_task.done():
417
- await ui.muted("Agent is busy, press Ctrl+C to interrupt.")
452
+ await ui.muted(MSG_AGENT_BUSY)
418
453
  continue
419
454
 
420
455
  state_manager.session.current_task = get_app().create_background_task(
421
456
  process_request(line, state_manager)
422
457
  )
458
+ await state_manager.session.current_task
423
459
 
424
- if action == "restart":
425
- await repl(state_manager)
426
- else:
427
- await ui.info("Session ended. Happy coding!")
460
+ state_manager.session.update_token_count()
461
+ context_display = get_context_window_display(
462
+ state_manager.session.total_tokens, state_manager.session.max_tokens
463
+ )
464
+ await ui.muted(f"• Model: {state_manager.session.current_model} • {context_display}")
465
+
466
+ if action == "restart":
467
+ await repl(state_manager)
468
+ else:
469
+ # Show session cost summary if available
470
+ session_total = state_manager.session.session_total_usage
471
+ if session_total:
472
+ try:
473
+ prompt = int(session_total.get("prompt_tokens", 0) or 0)
474
+ completion = int(session_total.get("completion_tokens", 0) or 0)
475
+ total_tokens = prompt + completion
476
+ total_cost = float(session_total.get("cost", 0) or 0)
477
+
478
+ # Only show summary if we have actual token usage
479
+ if total_tokens > 0 or total_cost > 0:
480
+ summary = (
481
+ f"\n[bold cyan]TunaCode Session Summary[/bold cyan]\n"
482
+ f" - Total Tokens: {total_tokens:,}\n"
483
+ f" - Prompt Tokens: {prompt:,}\n"
484
+ f" - Completion Tokens: {completion:,}\n"
485
+ f" - [bold green]Total Session Cost: ${total_cost:.4f}[/bold green]"
486
+ )
487
+ ui.console.print(summary)
488
+ except (TypeError, ValueError) as e:
489
+ # Skip displaying summary if values can't be converted to numbers
490
+ logger.debug(f"Failed to display token usage summary: {e}")
491
+
492
+ await ui.info(MSG_SESSION_ENDED)
@@ -23,6 +23,7 @@ DEFAULT_USER_CONFIG: UserConfig = {
23
23
  "guide_file": GUIDE_FILE_NAME,
24
24
  "fallback_response": True,
25
25
  "fallback_verbosity": "normal", # Options: minimal, normal, detailed
26
+ "context_window_size": 200000,
26
27
  },
27
28
  "mcpServers": {},
28
29
  }
@@ -24,6 +24,9 @@ class ModelRegistry:
24
24
  "anthropic:claude-3-7-sonnet-latest": ModelConfig(
25
25
  pricing=ModelPricing(input=3.00, cached_input=1.50, output=15.00)
26
26
  ),
27
+ "google/gemini-2.5-flash-lite-preview-06-17": ModelConfig(
28
+ pricing=ModelPricing(input=0.10, cached_input=0.025, output=0.40)
29
+ ),
27
30
  "google-gla:gemini-2.0-flash": ModelConfig(
28
31
  pricing=ModelPricing(input=0.10, cached_input=0.025, output=0.40)
29
32
  ),
@@ -75,6 +78,9 @@ class ModelRegistry:
75
78
  "openrouter:openai/gpt-4.1-nano": ModelConfig(
76
79
  pricing=ModelPricing(input=0.10, cached_input=0.05, output=0.40)
77
80
  ),
81
+ "openrouter:google/gemini-2.5-flash-lite-preview-06-17": ModelConfig(
82
+ pricing=ModelPricing(input=0.10, cached_input=0.025, output=0.40)
83
+ ),
78
84
  }
79
85
 
80
86
  def get_model(self, name: ModelName) -> ModelConfig:
tunacode/constants.py CHANGED
@@ -7,7 +7,7 @@ Centralizes all magic strings, UI text, error messages, and application constant
7
7
 
8
8
  # Application info
9
9
  APP_NAME = "TunaCode"
10
- APP_VERSION = "0.0.39"
10
+ APP_VERSION = "0.0.41"
11
11
 
12
12
  # File patterns
13
13
  GUIDE_FILE_PATTERN = "{name}.md"
@@ -18,6 +18,10 @@ CONFIG_FILE_NAME = "tunacode.json"
18
18
  # Default limits
19
19
  MAX_FILE_SIZE = 100 * 1024 # 100KB
20
20
  MAX_COMMAND_OUTPUT = 5000 # 5000 chars
21
+ MAX_FILES_IN_DIR = 50
22
+ MAX_TOTAL_DIR_SIZE = 2 * 1024 * 1024 # 2 MB
23
+ DEFAULT_CONTEXT_WINDOW = 200000 # 200k tokens
24
+
21
25
 
22
26
  # Command output processing
23
27
  COMMAND_OUTPUT_THRESHOLD = 3500 # Length threshold for truncation
@@ -33,6 +37,7 @@ TOOL_BASH = "bash"
33
37
  TOOL_GREP = "grep"
34
38
  TOOL_LIST_DIR = "list_dir"
35
39
  TOOL_GLOB = "glob"
40
+ TOOL_TODO = "todo"
36
41
 
37
42
  # Tool categorization
38
43
  READ_ONLY_TOOLS = [TOOL_READ_FILE, TOOL_GREP, TOOL_LIST_DIR, TOOL_GLOB]
@@ -89,8 +94,8 @@ UI_COLORS = {
89
94
  }
90
95
 
91
96
  # UI text and formatting
92
- UI_PROMPT_PREFIX = " "
93
- UI_THINKING_MESSAGE = "[bold #00d7ff]Thinking...[/bold #00d7ff]"
97
+ UI_PROMPT_PREFIX = "> "
98
+ UI_THINKING_MESSAGE = "[bold #00d7ff]Thinking...[/bold #00d7ff]"
94
99
  UI_DARKGREY_OPEN = "<darkgrey>"
95
100
  UI_DARKGREY_CLOSE = "</darkgrey>"
96
101
  UI_BOLD_OPEN = "<bold>"
@@ -115,6 +120,13 @@ ERROR_COMMAND_NOT_FOUND = "Error: Command not found or failed to execute:"
115
120
  ERROR_COMMAND_EXECUTION = (
116
121
  "Error: Command not found or failed to execute: {command}. Details: {error}"
117
122
  )
123
+ # Directory expansion errors
124
+ ERROR_DIR_TOO_LARGE = (
125
+ "Error: Directory '{path}' expansion aborted. Total size exceeds {limit_mb:.1f} MB limit."
126
+ )
127
+ ERROR_DIR_TOO_MANY_FILES = (
128
+ "Error: Directory '{path}' expansion aborted. Exceeds limit of {limit} files."
129
+ )
118
130
 
119
131
  # Command output messages
120
132
  CMD_OUTPUT_NO_OUTPUT = "No output."
@@ -128,3 +140,20 @@ MSG_UPDATE_AVAILABLE = "Update available: v{latest_version}"
128
140
  MSG_UPDATE_INSTRUCTION = "Exit, and run: [bold]pip install --upgrade tunacode-cli"
129
141
  MSG_VERSION_DISPLAY = "TunaCode CLI {version}"
130
142
  MSG_FILE_SIZE_LIMIT = " Please specify a smaller file or use other tools to process it."
143
+
144
+ # Todo-related constants
145
+ TODO_STATUS_PENDING = "pending"
146
+ TODO_STATUS_IN_PROGRESS = "in_progress"
147
+ TODO_STATUS_COMPLETED = "completed"
148
+ TODO_STATUSES = [TODO_STATUS_PENDING, TODO_STATUS_IN_PROGRESS, TODO_STATUS_COMPLETED]
149
+
150
+ TODO_PRIORITY_HIGH = "high"
151
+ TODO_PRIORITY_MEDIUM = "medium"
152
+ TODO_PRIORITY_LOW = "low"
153
+ TODO_PRIORITIES = [TODO_PRIORITY_HIGH, TODO_PRIORITY_MEDIUM, TODO_PRIORITY_LOW]
154
+
155
+ # Maximum number of todos allowed per session
156
+ MAX_TODOS_PER_SESSION = 100
157
+
158
+ # Maximum length for todo content
159
+ MAX_TODO_CONTENT_LENGTH = 500