tunacode-cli 0.0.41__py3-none-any.whl → 0.0.43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tunacode-cli might be problematic. Click here for more details.

@@ -0,0 +1,487 @@
1
+ """Module: tunacode.core.recursive.hierarchy
2
+
3
+ Hierarchical task management system for maintaining parent-child relationships and execution state.
4
+ """
5
+
6
+ import logging
7
+ from collections import defaultdict, deque
8
+ from dataclasses import dataclass, field
9
+ from datetime import datetime
10
+ from typing import Any, Dict, List, Optional, Set
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ @dataclass
16
+ class TaskExecutionContext:
17
+ """Context information for task execution."""
18
+
19
+ task_id: str
20
+ parent_id: Optional[str] = None
21
+ depth: int = 0
22
+ inherited_context: Dict[str, Any] = field(default_factory=dict)
23
+ local_context: Dict[str, Any] = field(default_factory=dict)
24
+ started_at: Optional[datetime] = None
25
+ completed_at: Optional[datetime] = None
26
+
27
+ def get_full_context(self) -> Dict[str, Any]:
28
+ """Get merged context (inherited + local)."""
29
+ return {**self.inherited_context, **self.local_context}
30
+
31
+
32
+ @dataclass
33
+ class TaskRelationship:
34
+ """Represents a relationship between tasks."""
35
+
36
+ parent_id: str
37
+ child_id: str
38
+ relationship_type: str = "subtask" # subtask, dependency, etc.
39
+ metadata: Dict[str, Any] = field(default_factory=dict)
40
+
41
+
42
+ class TaskHierarchy:
43
+ """Manages hierarchical task relationships and execution state."""
44
+
45
+ def __init__(self):
46
+ """Initialize the task hierarchy manager."""
47
+ # Core data structures
48
+ self._tasks: Dict[str, Dict[str, Any]] = {}
49
+ self._parent_to_children: Dict[str, List[str]] = defaultdict(list)
50
+ self._child_to_parent: Dict[str, str] = {}
51
+ self._task_dependencies: Dict[str, Set[str]] = defaultdict(set)
52
+ self._reverse_dependencies: Dict[str, Set[str]] = defaultdict(set)
53
+ self._execution_contexts: Dict[str, TaskExecutionContext] = {}
54
+ self._execution_order: List[str] = []
55
+ self._completed_tasks: Set[str] = set()
56
+ self._failed_tasks: Set[str] = set()
57
+
58
+ def add_task(
59
+ self,
60
+ task_id: str,
61
+ task_data: Dict[str, Any],
62
+ parent_id: Optional[str] = None,
63
+ dependencies: Optional[List[str]] = None,
64
+ ) -> bool:
65
+ """Add a task to the hierarchy.
66
+
67
+ Args:
68
+ task_id: Unique identifier for the task
69
+ task_data: Task information (title, description, etc.)
70
+ parent_id: Optional parent task ID
71
+ dependencies: Optional list of task IDs this task depends on
72
+
73
+ Returns:
74
+ True if task was added successfully, False otherwise
75
+ """
76
+ if task_id in self._tasks:
77
+ logger.warning(f"Task {task_id} already exists")
78
+ return False
79
+
80
+ # Store task data
81
+ self._tasks[task_id] = {
82
+ "id": task_id,
83
+ "parent_id": parent_id,
84
+ "created_at": datetime.now(),
85
+ **task_data,
86
+ }
87
+
88
+ # Set up parent-child relationships
89
+ if parent_id:
90
+ if parent_id not in self._tasks:
91
+ logger.error(f"Parent task {parent_id} does not exist")
92
+ return False
93
+
94
+ self._parent_to_children[parent_id].append(task_id)
95
+ self._child_to_parent[task_id] = parent_id
96
+
97
+ # Set up dependencies
98
+ if dependencies:
99
+ for dep_id in dependencies:
100
+ if dep_id not in self._tasks:
101
+ logger.warning(f"Dependency {dep_id} does not exist yet")
102
+ self.add_dependency(task_id, dep_id)
103
+
104
+ logger.debug(f"Added task {task_id} to hierarchy")
105
+ return True
106
+
107
+ def add_dependency(self, task_id: str, depends_on: str) -> bool:
108
+ """Add a dependency relationship between tasks.
109
+
110
+ Args:
111
+ task_id: Task that has the dependency
112
+ depends_on: Task that must complete first
113
+
114
+ Returns:
115
+ True if dependency was added, False if it would create a cycle
116
+ """
117
+ # Check if adding this would create a cycle
118
+ if self._would_create_cycle(task_id, depends_on):
119
+ logger.error(f"Adding dependency {task_id} -> {depends_on} would create a cycle")
120
+ return False
121
+
122
+ self._task_dependencies[task_id].add(depends_on)
123
+ self._reverse_dependencies[depends_on].add(task_id)
124
+ return True
125
+
126
+ def remove_dependency(self, task_id: str, depends_on: str) -> bool:
127
+ """Remove a dependency relationship.
128
+
129
+ Args:
130
+ task_id: Task that has the dependency
131
+ depends_on: Task to remove from dependencies
132
+
133
+ Returns:
134
+ True if dependency was removed
135
+ """
136
+ if depends_on in self._task_dependencies.get(task_id, set()):
137
+ self._task_dependencies[task_id].remove(depends_on)
138
+ self._reverse_dependencies[depends_on].discard(task_id)
139
+ return True
140
+ return False
141
+
142
+ def get_task(self, task_id: str) -> Optional[Dict[str, Any]]:
143
+ """Get task information.
144
+
145
+ Args:
146
+ task_id: Task identifier
147
+
148
+ Returns:
149
+ Task data or None if not found
150
+ """
151
+ return self._tasks.get(task_id)
152
+
153
+ def get_children(self, parent_id: str) -> List[str]:
154
+ """Get all direct children of a task.
155
+
156
+ Args:
157
+ parent_id: Parent task ID
158
+
159
+ Returns:
160
+ List of child task IDs
161
+ """
162
+ return self._parent_to_children.get(parent_id, []).copy()
163
+
164
+ def get_parent(self, task_id: str) -> Optional[str]:
165
+ """Get the parent of a task.
166
+
167
+ Args:
168
+ task_id: Task ID
169
+
170
+ Returns:
171
+ Parent task ID or None
172
+ """
173
+ return self._child_to_parent.get(task_id)
174
+
175
+ def get_ancestors(self, task_id: str) -> List[str]:
176
+ """Get all ancestors of a task (parent, grandparent, etc.).
177
+
178
+ Args:
179
+ task_id: Task ID
180
+
181
+ Returns:
182
+ List of ancestor IDs from immediate parent to root
183
+ """
184
+ ancestors = []
185
+ current = self._child_to_parent.get(task_id)
186
+
187
+ while current:
188
+ ancestors.append(current)
189
+ current = self._child_to_parent.get(current)
190
+
191
+ return ancestors
192
+
193
+ def get_depth(self, task_id: str) -> int:
194
+ """Get the depth of a task in the hierarchy.
195
+
196
+ Args:
197
+ task_id: Task ID
198
+
199
+ Returns:
200
+ Depth (0 for root tasks)
201
+ """
202
+ return len(self.get_ancestors(task_id))
203
+
204
+ def get_dependencies(self, task_id: str) -> Set[str]:
205
+ """Get tasks that must complete before this task.
206
+
207
+ Args:
208
+ task_id: Task ID
209
+
210
+ Returns:
211
+ Set of dependency task IDs
212
+ """
213
+ return self._task_dependencies.get(task_id, set()).copy()
214
+
215
+ def get_dependents(self, task_id: str) -> Set[str]:
216
+ """Get tasks that depend on this task.
217
+
218
+ Args:
219
+ task_id: Task ID
220
+
221
+ Returns:
222
+ Set of dependent task IDs
223
+ """
224
+ return self._reverse_dependencies.get(task_id, set()).copy()
225
+
226
+ def can_execute(self, task_id: str) -> bool:
227
+ """Check if a task can be executed (all dependencies met).
228
+
229
+ Args:
230
+ task_id: Task ID
231
+
232
+ Returns:
233
+ True if task can be executed
234
+ """
235
+ if task_id not in self._tasks:
236
+ return False
237
+
238
+ # Check if all dependencies are completed
239
+ dependencies = self._task_dependencies.get(task_id, set())
240
+ return all(dep in self._completed_tasks for dep in dependencies)
241
+
242
+ def get_executable_tasks(self) -> List[str]:
243
+ """Get all tasks that can currently be executed.
244
+
245
+ Returns:
246
+ List of task IDs that have all dependencies met
247
+ """
248
+ executable = []
249
+
250
+ for task_id in self._tasks:
251
+ if (
252
+ task_id not in self._completed_tasks
253
+ and task_id not in self._failed_tasks
254
+ and self.can_execute(task_id)
255
+ ):
256
+ executable.append(task_id)
257
+
258
+ return executable
259
+
260
+ def mark_completed(self, task_id: str, result: Any = None) -> None:
261
+ """Mark a task as completed.
262
+
263
+ Args:
264
+ task_id: Task ID
265
+ result: Optional result data
266
+ """
267
+ if task_id in self._tasks:
268
+ self._completed_tasks.add(task_id)
269
+ self._tasks[task_id]["status"] = "completed"
270
+ self._tasks[task_id]["result"] = result
271
+ self._tasks[task_id]["completed_at"] = datetime.now()
272
+
273
+ # Update execution context if exists
274
+ if task_id in self._execution_contexts:
275
+ self._execution_contexts[task_id].completed_at = datetime.now()
276
+
277
+ def mark_failed(self, task_id: str, error: str) -> None:
278
+ """Mark a task as failed.
279
+
280
+ Args:
281
+ task_id: Task ID
282
+ error: Error message
283
+ """
284
+ if task_id in self._tasks:
285
+ self._failed_tasks.add(task_id)
286
+ self._tasks[task_id]["status"] = "failed"
287
+ self._tasks[task_id]["error"] = error
288
+ self._tasks[task_id]["failed_at"] = datetime.now()
289
+
290
+ def create_execution_context(
291
+ self, task_id: str, parent_context: Optional[Dict[str, Any]] = None
292
+ ) -> TaskExecutionContext:
293
+ """Create an execution context for a task.
294
+
295
+ Args:
296
+ task_id: Task ID
297
+ parent_context: Optional parent context to inherit
298
+
299
+ Returns:
300
+ New execution context
301
+ """
302
+ parent_id = self._child_to_parent.get(task_id)
303
+ depth = self.get_depth(task_id)
304
+
305
+ context = TaskExecutionContext(
306
+ task_id=task_id,
307
+ parent_id=parent_id,
308
+ depth=depth,
309
+ inherited_context=parent_context or {},
310
+ started_at=datetime.now(),
311
+ )
312
+
313
+ self._execution_contexts[task_id] = context
314
+ self._execution_order.append(task_id)
315
+
316
+ return context
317
+
318
+ def get_execution_context(self, task_id: str) -> Optional[TaskExecutionContext]:
319
+ """Get the execution context for a task.
320
+
321
+ Args:
322
+ task_id: Task ID
323
+
324
+ Returns:
325
+ Execution context or None
326
+ """
327
+ return self._execution_contexts.get(task_id)
328
+
329
+ def propagate_context(
330
+ self, from_task: str, to_task: str, context_update: Dict[str, Any]
331
+ ) -> None:
332
+ """Propagate context from one task to another.
333
+
334
+ Args:
335
+ from_task: Source task ID
336
+ to_task: Target task ID
337
+ context_update: Context to propagate
338
+ """
339
+ if to_task in self._execution_contexts:
340
+ self._execution_contexts[to_task].inherited_context.update(context_update)
341
+
342
+ def aggregate_child_results(self, parent_id: str) -> Dict[str, Any]:
343
+ """Aggregate results from all children of a task.
344
+
345
+ Args:
346
+ parent_id: Parent task ID
347
+
348
+ Returns:
349
+ Aggregated results dictionary
350
+ """
351
+ children = self._parent_to_children.get(parent_id, [])
352
+
353
+ results = {"completed": [], "failed": [], "pending": [], "aggregated_data": {}}
354
+
355
+ for child_id in children:
356
+ child_task = self._tasks.get(child_id, {})
357
+ status = child_task.get("status", "pending")
358
+
359
+ if status == "completed":
360
+ results["completed"].append({"id": child_id, "result": child_task.get("result")})
361
+ elif status == "failed":
362
+ results["failed"].append({"id": child_id, "error": child_task.get("error")})
363
+ else:
364
+ results["pending"].append(child_id)
365
+
366
+ return results
367
+
368
+ def get_execution_path(self, task_id: str) -> List[str]:
369
+ """Get the full execution path from root to this task.
370
+
371
+ Args:
372
+ task_id: Task ID
373
+
374
+ Returns:
375
+ List of task IDs from root to this task
376
+ """
377
+ ancestors = self.get_ancestors(task_id)
378
+ ancestors.reverse() # Root to task order
379
+ ancestors.append(task_id)
380
+ return ancestors
381
+
382
+ def _would_create_cycle(self, task_id: str, depends_on: str) -> bool:
383
+ """Check if adding a dependency would create a cycle.
384
+
385
+ Args:
386
+ task_id: Task that would have the dependency
387
+ depends_on: Task it would depend on
388
+
389
+ Returns:
390
+ True if this would create a cycle
391
+ """
392
+ # BFS to check if we can reach task_id from depends_on
393
+ visited = set()
394
+ queue = deque([depends_on])
395
+
396
+ while queue:
397
+ current = queue.popleft()
398
+ if current == task_id:
399
+ return True
400
+
401
+ if current in visited:
402
+ continue
403
+
404
+ visited.add(current)
405
+
406
+ # Add all tasks that depend on current
407
+ for dependent in self._reverse_dependencies.get(current, []):
408
+ if dependent not in visited:
409
+ queue.append(dependent)
410
+
411
+ return False
412
+
413
+ def get_topological_order(self) -> List[str]:
414
+ """Get a valid execution order respecting all dependencies.
415
+
416
+ Returns:
417
+ List of task IDs in valid execution order
418
+ """
419
+ # Kahn's algorithm for topological sort
420
+ in_degree = {}
421
+ for task_id in self._tasks:
422
+ in_degree[task_id] = len(self._task_dependencies.get(task_id, set()))
423
+
424
+ queue = deque([task_id for task_id, degree in in_degree.items() if degree == 0])
425
+ order = []
426
+
427
+ while queue:
428
+ current = queue.popleft()
429
+ order.append(current)
430
+
431
+ # Reduce in-degree for all dependents
432
+ for dependent in self._reverse_dependencies.get(current, []):
433
+ in_degree[dependent] -= 1
434
+ if in_degree[dependent] == 0:
435
+ queue.append(dependent)
436
+
437
+ # If we couldn't process all tasks, there's a cycle
438
+ if len(order) != len(self._tasks):
439
+ logger.error("Dependency cycle detected")
440
+ # Return partial order
441
+ remaining = [t for t in self._tasks if t not in order]
442
+ order.extend(remaining)
443
+
444
+ return order
445
+
446
+ def visualize_hierarchy(self, show_dependencies: bool = True) -> str:
447
+ """Generate a text visualization of the task hierarchy.
448
+
449
+ Args:
450
+ show_dependencies: Whether to show dependency relationships
451
+
452
+ Returns:
453
+ String representation of the hierarchy
454
+ """
455
+ lines = []
456
+
457
+ # Find root tasks (no parent)
458
+ roots = [task_id for task_id in self._tasks if task_id not in self._child_to_parent]
459
+
460
+ def build_tree(task_id: str, prefix: str = "", is_last: bool = True):
461
+ task = self._tasks[task_id]
462
+ status = task.get("status", "pending")
463
+
464
+ # Build current line
465
+ connector = "└── " if is_last else "├── "
466
+ status_icon = "✓" if status == "completed" else "✗" if status == "failed" else "○"
467
+ line = f"{prefix}{connector}[{status_icon}] {task_id}: {task.get('title', 'Untitled')[:50]}"
468
+
469
+ # Add dependencies if requested
470
+ if show_dependencies:
471
+ deps = self._task_dependencies.get(task_id, set())
472
+ if deps:
473
+ line += f" (deps: {', '.join(deps)})"
474
+
475
+ lines.append(line)
476
+
477
+ # Process children
478
+ children = self._parent_to_children.get(task_id, [])
479
+ for i, child_id in enumerate(children):
480
+ extension = " " if is_last else "│ "
481
+ build_tree(child_id, prefix + extension, i == len(children) - 1)
482
+
483
+ # Build tree for each root
484
+ for i, root_id in enumerate(roots):
485
+ build_tree(root_id, "", i == len(roots) - 1)
486
+
487
+ return "\n".join(lines) if lines else "Empty hierarchy"
tunacode/core/state.py CHANGED
@@ -68,6 +68,13 @@ class SessionState:
68
68
  "cost": 0.0,
69
69
  }
70
70
  )
71
+ # Recursive execution tracking
72
+ current_recursion_depth: int = 0
73
+ max_recursion_depth: int = 5
74
+ parent_task_id: Optional[str] = None
75
+ task_hierarchy: dict[str, Any] = field(default_factory=dict)
76
+ iteration_budgets: dict[str, int] = field(default_factory=dict)
77
+ recursive_context_stack: list[dict[str, Any]] = field(default_factory=list)
71
78
 
72
79
  def update_token_count(self):
73
80
  """Calculates the total token count from messages and files in context."""
@@ -98,6 +105,40 @@ class StateManager:
98
105
  todo.completed_at = datetime.now()
99
106
  break
100
107
 
108
+ def push_recursive_context(self, context: dict[str, Any]) -> None:
109
+ """Push a new context onto the recursive execution stack."""
110
+ self._session.recursive_context_stack.append(context)
111
+ self._session.current_recursion_depth += 1
112
+
113
+ def pop_recursive_context(self) -> Optional[dict[str, Any]]:
114
+ """Pop the current context from the recursive execution stack."""
115
+ if self._session.recursive_context_stack:
116
+ self._session.current_recursion_depth = max(
117
+ 0, self._session.current_recursion_depth - 1
118
+ )
119
+ return self._session.recursive_context_stack.pop()
120
+ return None
121
+
122
+ def set_task_iteration_budget(self, task_id: str, budget: int) -> None:
123
+ """Set the iteration budget for a specific task."""
124
+ self._session.iteration_budgets[task_id] = budget
125
+
126
+ def get_task_iteration_budget(self, task_id: str) -> int:
127
+ """Get the iteration budget for a specific task."""
128
+ return self._session.iteration_budgets.get(task_id, 10) # Default to 10
129
+
130
+ def can_recurse_deeper(self) -> bool:
131
+ """Check if we can recurse deeper without exceeding limits."""
132
+ return self._session.current_recursion_depth < self._session.max_recursion_depth
133
+
134
+ def reset_recursive_state(self) -> None:
135
+ """Reset all recursive execution state."""
136
+ self._session.current_recursion_depth = 0
137
+ self._session.parent_task_id = None
138
+ self._session.task_hierarchy.clear()
139
+ self._session.iteration_budgets.clear()
140
+ self._session.recursive_context_stack.clear()
141
+
101
142
  def remove_todo(self, todo_id: str) -> None:
102
143
  self._session.todos = [todo for todo in self._session.todos if todo.id != todo_id]
103
144
 
tunacode/exceptions.py CHANGED
@@ -114,3 +114,26 @@ class TooBroadPatternError(ToolExecutionError):
114
114
  f"Pattern '{pattern}' is too broad - no matches found within {timeout_seconds}s. "
115
115
  "Please use a more specific pattern.",
116
116
  )
117
+
118
+
119
+ class ToolBatchingJSONError(TunaCodeError):
120
+ """Raised when JSON parsing fails during tool batching after all retries are exhausted."""
121
+
122
+ def __init__(
123
+ self,
124
+ json_content: str,
125
+ retry_count: int,
126
+ original_error: OriginalError = None,
127
+ ):
128
+ self.json_content = json_content
129
+ self.retry_count = retry_count
130
+ self.original_error = original_error
131
+
132
+ # Truncate JSON content for display if too long
133
+ display_content = json_content[:100] + "..." if len(json_content) > 100 else json_content
134
+
135
+ super().__init__(
136
+ f"The model is having issues with tool batching. "
137
+ f"JSON parsing failed after {retry_count} retries. "
138
+ f"Invalid JSON: {display_content}"
139
+ )
@@ -0,0 +1,45 @@
1
+ # DSPy Task Planning Prompt
2
+
3
+ This prompt is automatically generated and optimized by DSPy for TunaCode's complex task planning.
4
+
5
+ ## Format
6
+
7
+ Given the fields `complex_request`, produce the fields `subtasks_with_tools`, `total_tool_calls`, `requires_todo`, `parallelization_opportunities`.
8
+
9
+ ---
10
+
11
+ Follow the following format.
12
+
13
+ Complex Request: A complex task that needs breakdown
14
+ Reasoning: Let's think step by step in order to ${produce the answer}. We need to break this down into manageable subtasks...
15
+ Subtasks With Tools: JSON array of {task, tools, priority} objects
16
+ Total Tool Calls: Estimated total number of tool calls
17
+ Requires Todo: Whether todo tool should be used
18
+ Parallelization Opportunities: Number of parallel execution opportunities
19
+
20
+ ---
21
+
22
+ ## Learned Example (Selected by DSPy)
23
+
24
+ ### Example: Authentication System Implementation
25
+ Complex Request: Implement user authentication system with JWT tokens
26
+ Reasoning: Let's think step by step. This is a complex implementation task that requires multiple components. First, I need to analyze the current application structure to understand how to integrate authentication. Then I'll design the user model and database schema. Next, I'll create the authentication endpoints for registration and login. I'll need to implement JWT token generation and validation. Finally, I'll add tests to ensure everything works correctly. Several of these tasks can be done in parallel once the initial analysis is complete.
27
+ Subtasks With Tools: [{"task": "Analyze current app structure", "tools": ["list_dir", "grep", "read_file"], "priority": "high"}, {"task": "Design user model", "tools": ["write_file"], "priority": "high"}, {"task": "Create auth endpoints", "tools": ["write_file", "update_file"], "priority": "high"}, {"task": "Add JWT tokens", "tools": ["write_file", "grep"], "priority": "high"}, {"task": "Write tests", "tools": ["write_file", "run_command"], "priority": "medium"}]
28
+ Total Tool Calls: 15
29
+ Requires Todo: true
30
+ Parallelization Opportunities: 3
31
+
32
+ ---
33
+
34
+ ## Key Patterns for Complex Tasks
35
+
36
+ 1. **Break Down First**: Start with analysis/exploration before implementation
37
+ 2. **Priority Levels**: High for core functionality, medium for tests/docs, low for nice-to-haves
38
+ 3. **Tool Grouping**: Group related tools together for each subtask
39
+ 4. **Todo Usage**: Use todo tool for tasks with 5+ subtasks
40
+ 5. **Parallelization**: Identify independent subtasks that can run concurrently
41
+
42
+ ---
43
+
44
+ Complex Request: ${complex_request}
45
+ Reasoning: Let's think step by step...
@@ -0,0 +1,58 @@
1
+ # DSPy Tool Selection Prompt
2
+
3
+ This prompt is automatically generated and optimized by DSPy for TunaCode's tool selection.
4
+
5
+ ## Format
6
+
7
+ Given the fields `user_request`, `current_directory`, produce the fields `tools_json`, `requires_confirmation`, `reasoning`.
8
+
9
+ ---
10
+
11
+ Follow the following format.
12
+
13
+ User Request: The user's request or task
14
+ Current Directory: Current working directory context
15
+ Reasoning: Let's think step by step in order to ${produce the answer}. We ...
16
+ Tools Json: JSON array of tool calls with batch grouping, e.g. [[tool1, tool2, tool3], [tool4]]
17
+ Requires Confirmation: Whether any tools require user confirmation
18
+
19
+ ---
20
+
21
+ ## Learned Examples (Automatically Selected by DSPy)
22
+
23
+ ### Example 1: Searching for Implementation
24
+ User Request: Show me the authentication system implementation
25
+ Current Directory: .
26
+ Reasoning: Let's think step by step. To show the authentication implementation, I need to search for auth-related files across the codebase. I'll use grep to find files containing 'auth', list the auth directory if it exists, and use glob to find all auth-related Python files. These are all read-only operations that can be executed in parallel.
27
+ Tools Json: [["grep(\"auth\", \"src/\")", "list_dir(\"src/auth/\")", "glob(\"**/*auth*.py\")"]]
28
+ Requires Confirmation: false
29
+
30
+ ### Example 2: Reading Multiple Files (Optimal Batching)
31
+ User Request: Read all config files and the main module
32
+ Current Directory: .
33
+ Reasoning: Let's think step by step. I need to read multiple specific files. All of these are read operations that can be batched together for parallel execution. I'll batch them in a group of 4 for optimal performance.
34
+ Tools Json: [["read_file(\"config.json\")", "read_file(\"settings.py\")", "read_file(\".env\")", "read_file(\"main.py\")"]]
35
+ Requires Confirmation: false
36
+
37
+ ### Example 3: Search, Read, then Modify Pattern
38
+ User Request: Find the bug in validation and fix it
39
+ Current Directory: .
40
+ Reasoning: Let's think step by step. First, I need to search for validation-related code and errors. I'll use grep to search for error patterns and validation code, and list the validators directory. These search operations can be parallelized. After finding the issue, I'll need to read the specific file and then update it to fix the bug.
41
+ Tools Json: [["grep(\"error\", \"logs/\")", "grep(\"validation\", \"src/\")", "list_dir(\"src/validators/\")"], ["read_file(\"src/validators/user.py\")"], ["update_file(\"src/validators/user.py\", \"old\", \"new\")"]]
42
+ Requires Confirmation: true
43
+
44
+ ---
45
+
46
+ ## Key Patterns Learned by DSPy
47
+
48
+ 1. **3-4 Tool Batching**: Optimal batch size for parallel read-only operations
49
+ 2. **Read-Only Parallelization**: grep, list_dir, glob, read_file can run in parallel
50
+ 3. **Sequential Writes**: write_file, update_file, run_command, bash must run sequentially
51
+ 4. **Confirmation Required**: Any write/execute operation needs confirmation
52
+ 5. **Search → Read → Modify**: Common pattern for debugging and fixes
53
+
54
+ ---
55
+
56
+ User Request: ${user_request}
57
+ Current Directory: ${current_directory}
58
+ Reasoning: Let's think step by step...
tunacode/ui/console.py CHANGED
@@ -43,7 +43,7 @@ from .prompt_manager import PromptConfig, PromptManager
43
43
  from .validators import ModelValidator
44
44
 
45
45
  # Create console object for backward compatibility
46
- console = RichConsole()
46
+ console = RichConsole(force_terminal=True, legacy_windows=False)
47
47
 
48
48
  # Create key bindings object for backward compatibility
49
49
  kb = create_key_bindings()
tunacode/ui/output.py CHANGED
@@ -19,7 +19,8 @@ from tunacode.utils.token_counter import format_token_count
19
19
  from .constants import SPINNER_TYPE
20
20
  from .decorators import create_sync_wrapper
21
21
 
22
- console = Console()
22
+ # Create console with explicit settings to ensure ANSI codes work properly
23
+ console = Console(force_terminal=True, legacy_windows=False)
23
24
  colors = DotDict(UI_COLORS)
24
25
 
25
26
  BANNER = """[bold cyan]