tunacode-cli 0.0.41__py3-none-any.whl → 0.0.42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tunacode-cli might be problematic. Click here for more details.

@@ -1,13 +1,20 @@
1
1
  import asyncio
2
2
  import importlib
3
3
  import json
4
+ import logging
4
5
  import os
5
6
  import re
6
7
  from collections.abc import Iterator
7
8
  from datetime import datetime, timezone
8
9
  from typing import Any
9
10
 
10
- from tunacode.constants import READ_ONLY_TOOLS
11
+ from tunacode.constants import (
12
+ JSON_PARSE_BASE_DELAY,
13
+ JSON_PARSE_MAX_DELAY,
14
+ JSON_PARSE_MAX_RETRIES,
15
+ READ_ONLY_TOOLS,
16
+ )
17
+ from tunacode.exceptions import ToolBatchingJSONError
11
18
  from tunacode.types import (
12
19
  ErrorMessage,
13
20
  StateManager,
@@ -16,6 +23,9 @@ from tunacode.types import (
16
23
  ToolName,
17
24
  )
18
25
  from tunacode.ui import console as ui
26
+ from tunacode.utils.retry import retry_json_parse_async
27
+
28
+ logger = logging.getLogger(__name__)
19
29
 
20
30
 
21
31
  # Lazy import for Agent and Tool
@@ -167,11 +177,28 @@ async def parse_json_tool_calls(
167
177
  if brace_count == 0 and start_pos != -1:
168
178
  potential_json = text[start_pos : i + 1]
169
179
  try:
170
- parsed = json.loads(potential_json)
180
+ # Use retry logic for JSON parsing
181
+ parsed = await retry_json_parse_async(
182
+ potential_json,
183
+ max_retries=JSON_PARSE_MAX_RETRIES,
184
+ base_delay=JSON_PARSE_BASE_DELAY,
185
+ max_delay=JSON_PARSE_MAX_DELAY,
186
+ )
171
187
  if isinstance(parsed, dict) and "tool" in parsed and "args" in parsed:
172
188
  potential_jsons.append((parsed["tool"], parsed["args"]))
173
- except json.JSONDecodeError:
174
- pass
189
+ except json.JSONDecodeError as e:
190
+ # After all retries failed
191
+ logger.error(f"JSON parsing failed after {JSON_PARSE_MAX_RETRIES} retries: {e}")
192
+ if state_manager.session.show_thoughts:
193
+ await ui.error(
194
+ f"Failed to parse tool JSON after {JSON_PARSE_MAX_RETRIES} retries"
195
+ )
196
+ # Raise custom exception for better error handling
197
+ raise ToolBatchingJSONError(
198
+ json_content=potential_json,
199
+ retry_count=JSON_PARSE_MAX_RETRIES,
200
+ original_error=e,
201
+ ) from e
175
202
  start_pos = -1
176
203
 
177
204
  matches = potential_jsons
@@ -220,7 +247,13 @@ async def extract_and_execute_tool_calls(
220
247
 
221
248
  for match in code_matches:
222
249
  try:
223
- tool_data = json.loads(match)
250
+ # Use retry logic for JSON parsing in code blocks
251
+ tool_data = await retry_json_parse_async(
252
+ match,
253
+ max_retries=JSON_PARSE_MAX_RETRIES,
254
+ base_delay=JSON_PARSE_BASE_DELAY,
255
+ max_delay=JSON_PARSE_MAX_DELAY,
256
+ )
224
257
  if "tool" in tool_data and "args" in tool_data:
225
258
 
226
259
  class MockToolCall:
@@ -240,7 +273,22 @@ async def extract_and_execute_tool_calls(
240
273
  if state_manager.session.show_thoughts:
241
274
  await ui.muted(f"FALLBACK: Executed {tool_data['tool']} from code block")
242
275
 
243
- except (json.JSONDecodeError, KeyError, Exception) as e:
276
+ except json.JSONDecodeError as e:
277
+ # After all retries failed
278
+ logger.error(
279
+ f"Code block JSON parsing failed after {JSON_PARSE_MAX_RETRIES} retries: {e}"
280
+ )
281
+ if state_manager.session.show_thoughts:
282
+ await ui.error(
283
+ f"Failed to parse code block tool JSON after {JSON_PARSE_MAX_RETRIES} retries"
284
+ )
285
+ # Raise custom exception for better error handling
286
+ raise ToolBatchingJSONError(
287
+ json_content=match,
288
+ retry_count=JSON_PARSE_MAX_RETRIES,
289
+ original_error=e,
290
+ ) from e
291
+ except (KeyError, Exception) as e:
244
292
  if state_manager.session.show_thoughts:
245
293
  await ui.error(f"Error parsing code block tool call: {e!s}")
246
294
 
@@ -0,0 +1,18 @@
1
+ """Module: tunacode.core.recursive
2
+
3
+ Recursive task execution system for complex task decomposition and execution.
4
+ """
5
+
6
+ from .aggregator import ResultAggregator
7
+ from .budget import BudgetManager
8
+ from .decomposer import TaskDecomposer
9
+ from .executor import RecursiveTaskExecutor
10
+ from .hierarchy import TaskHierarchy
11
+
12
+ __all__ = [
13
+ "RecursiveTaskExecutor",
14
+ "TaskDecomposer",
15
+ "TaskHierarchy",
16
+ "BudgetManager",
17
+ "ResultAggregator",
18
+ ]
@@ -0,0 +1,467 @@
1
+ """Module: tunacode.core.recursive.aggregator
2
+
3
+ Result aggregation and context management for recursive task execution.
4
+ """
5
+
6
+ import json
7
+ import logging
8
+ from dataclasses import dataclass, field
9
+ from datetime import datetime
10
+ from enum import Enum
11
+ from typing import Any, Dict, List, Optional, Set
12
+
13
+ from tunacode.core.state import StateManager
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class AggregationStrategy(Enum):
19
+ """Strategies for aggregating results."""
20
+
21
+ CONCATENATE = "concatenate" # Simple concatenation
22
+ STRUCTURED = "structured" # Preserve task structure
23
+ SUMMARY = "summary" # Generate summary
24
+ INTELLIGENT = "intelligent" # Use agent to merge
25
+
26
+
27
+ class ConflictResolution(Enum):
28
+ """Strategies for resolving conflicts in results."""
29
+
30
+ LATEST = "latest" # Use most recent result
31
+ PRIORITY = "priority" # Use highest priority result
32
+ MERGE = "merge" # Attempt to merge
33
+ AGENT = "agent" # Use agent to resolve
34
+
35
+
36
+ @dataclass
37
+ class TaskResult:
38
+ """Result from a single task execution."""
39
+
40
+ task_id: str
41
+ task_title: str
42
+ result_data: Any
43
+ status: str # completed, failed, partial
44
+ timestamp: datetime = field(default_factory=datetime.now)
45
+ error: Optional[str] = None
46
+ metadata: Dict[str, Any] = field(default_factory=dict)
47
+
48
+
49
+ @dataclass
50
+ class AggregatedResult:
51
+ """Aggregated result from multiple tasks."""
52
+
53
+ primary_result: Any
54
+ task_results: List[TaskResult]
55
+ strategy_used: AggregationStrategy
56
+ conflicts_resolved: int = 0
57
+ partial_failures: List[str] = field(default_factory=list)
58
+ metadata: Dict[str, Any] = field(default_factory=dict)
59
+ aggregation_time: datetime = field(default_factory=datetime.now)
60
+
61
+
62
+ @dataclass
63
+ class ExecutionContext:
64
+ """Context information during execution."""
65
+
66
+ task_id: str
67
+ parent_context: Dict[str, Any]
68
+ local_context: Dict[str, Any]
69
+ files_accessed: Set[str] = field(default_factory=set)
70
+ tools_used: List[str] = field(default_factory=list)
71
+ key_findings: List[str] = field(default_factory=list)
72
+
73
+
74
+ class ResultAggregator:
75
+ """Aggregates results from distributed subtask executions."""
76
+
77
+ def __init__(self, state_manager: StateManager):
78
+ """Initialize the ResultAggregator.
79
+
80
+ Args:
81
+ state_manager: StateManager for accessing agents
82
+ """
83
+ self.state_manager = state_manager
84
+ self._context_cache: Dict[str, ExecutionContext] = {}
85
+
86
+ async def aggregate_results(
87
+ self,
88
+ task_results: List[TaskResult],
89
+ parent_task: Optional[Dict[str, Any]] = None,
90
+ strategy: AggregationStrategy = AggregationStrategy.INTELLIGENT,
91
+ ) -> AggregatedResult:
92
+ """Aggregate results from multiple subtasks.
93
+
94
+ Args:
95
+ task_results: List of results from subtasks
96
+ parent_task: Optional parent task information
97
+ strategy: Aggregation strategy to use
98
+
99
+ Returns:
100
+ AggregatedResult with merged data
101
+ """
102
+ if not task_results:
103
+ return AggregatedResult(
104
+ primary_result="No results to aggregate", task_results=[], strategy_used=strategy
105
+ )
106
+
107
+ # Separate successful and failed results
108
+ successful_results = [r for r in task_results if r.status == "completed"]
109
+ failed_results = [r for r in task_results if r.status == "failed"]
110
+ partial_results = [r for r in task_results if r.status == "partial"]
111
+
112
+ # Handle complete failure
113
+ if not successful_results and not partial_results:
114
+ return AggregatedResult(
115
+ primary_result="All subtasks failed",
116
+ task_results=task_results,
117
+ strategy_used=strategy,
118
+ partial_failures=[r.task_id for r in failed_results],
119
+ )
120
+
121
+ # Apply aggregation strategy
122
+ if strategy == AggregationStrategy.CONCATENATE:
123
+ result = await self._aggregate_concatenate(successful_results + partial_results)
124
+ elif strategy == AggregationStrategy.STRUCTURED:
125
+ result = await self._aggregate_structured(
126
+ successful_results + partial_results, parent_task
127
+ )
128
+ elif strategy == AggregationStrategy.SUMMARY:
129
+ result = await self._aggregate_summary(
130
+ successful_results + partial_results, parent_task
131
+ )
132
+ elif strategy == AggregationStrategy.INTELLIGENT:
133
+ result = await self._aggregate_intelligent(
134
+ successful_results + partial_results, parent_task
135
+ )
136
+ else:
137
+ result = await self._aggregate_concatenate(successful_results + partial_results)
138
+
139
+ return AggregatedResult(
140
+ primary_result=result,
141
+ task_results=task_results,
142
+ strategy_used=strategy,
143
+ partial_failures=[r.task_id for r in failed_results],
144
+ )
145
+
146
+ async def _aggregate_concatenate(self, results: List[TaskResult]) -> str:
147
+ """Simple concatenation of results.
148
+
149
+ Args:
150
+ results: List of successful results
151
+
152
+ Returns:
153
+ Concatenated string result
154
+ """
155
+ parts = []
156
+ for i, result in enumerate(results):
157
+ parts.append(f"=== Task {i + 1}: {result.task_title} ===")
158
+ parts.append(str(result.result_data))
159
+ parts.append("")
160
+
161
+ return "\n".join(parts)
162
+
163
+ async def _aggregate_structured(
164
+ self, results: List[TaskResult], parent_task: Optional[Dict[str, Any]]
165
+ ) -> Dict[str, Any]:
166
+ """Create structured aggregation preserving task relationships.
167
+
168
+ Args:
169
+ results: List of successful results
170
+ parent_task: Parent task information
171
+
172
+ Returns:
173
+ Structured dictionary result
174
+ """
175
+ structured_result = {
176
+ "parent_task": parent_task.get("title") if parent_task else "Unknown",
177
+ "completed_subtasks": len([r for r in results if r.status == "completed"]),
178
+ "partial_subtasks": len([r for r in results if r.status == "partial"]),
179
+ "subtask_results": [],
180
+ }
181
+
182
+ for result in results:
183
+ structured_result["subtask_results"].append(
184
+ {
185
+ "task_id": result.task_id,
186
+ "title": result.task_title,
187
+ "status": result.status,
188
+ "result": result.result_data,
189
+ "timestamp": result.timestamp.isoformat(),
190
+ }
191
+ )
192
+
193
+ return structured_result
194
+
195
+ async def _aggregate_summary(
196
+ self, results: List[TaskResult], parent_task: Optional[Dict[str, Any]]
197
+ ) -> str:
198
+ """Generate a summary of results.
199
+
200
+ Args:
201
+ results: List of successful results
202
+ parent_task: Parent task information
203
+
204
+ Returns:
205
+ Summary string
206
+ """
207
+ summary_parts = []
208
+
209
+ if parent_task:
210
+ summary_parts.append(f"Summary for: {parent_task.get('title', 'Unknown Task')}")
211
+ summary_parts.append("")
212
+
213
+ summary_parts.append(f"Completed {len(results)} subtasks:")
214
+ summary_parts.append("")
215
+
216
+ for i, result in enumerate(results):
217
+ # Extract key information from result
218
+ result_str = str(result.result_data)
219
+ preview = result_str[:200] + "..." if len(result_str) > 200 else result_str
220
+
221
+ summary_parts.append(f"{i + 1}. {result.task_title}")
222
+ summary_parts.append(f" Status: {result.status}")
223
+ summary_parts.append(f" Result: {preview}")
224
+ summary_parts.append("")
225
+
226
+ return "\n".join(summary_parts)
227
+
228
+ async def _aggregate_intelligent(
229
+ self, results: List[TaskResult], parent_task: Optional[Dict[str, Any]]
230
+ ) -> Any:
231
+ """Use agent to intelligently merge results.
232
+
233
+ Args:
234
+ results: List of successful results
235
+ parent_task: Parent task information
236
+
237
+ Returns:
238
+ Intelligently merged result
239
+ """
240
+ agent = self.state_manager.session.agents.get("main")
241
+ if not agent:
242
+ # Fallback to summary
243
+ return await self._aggregate_summary(results, parent_task)
244
+
245
+ # Prepare context for agent
246
+ context = {
247
+ "parent_task": parent_task.get("description") if parent_task else "Unknown",
248
+ "subtask_count": len(results),
249
+ "subtask_results": [],
250
+ }
251
+
252
+ for result in results:
253
+ context["subtask_results"].append(
254
+ {
255
+ "title": result.task_title,
256
+ "result": str(result.result_data)[:500], # Limit size
257
+ }
258
+ )
259
+
260
+ merge_prompt = f"""Intelligently merge and synthesize these subtask results into a coherent response.
261
+
262
+ Parent Task: {context["parent_task"]}
263
+
264
+ Subtask Results:
265
+ {json.dumps(context["subtask_results"], indent=2)}
266
+
267
+ Instructions:
268
+ 1. Identify the key achievements from each subtask
269
+ 2. Synthesize findings into a unified response
270
+ 3. Highlight any important patterns or insights
271
+ 4. Present the result in a clear, actionable format
272
+
273
+ Provide a comprehensive but concise synthesis that addresses the original task."""
274
+
275
+ try:
276
+ result = await agent.run(merge_prompt)
277
+ return result
278
+ except Exception as e:
279
+ logger.error(f"Agent-based aggregation failed: {str(e)}")
280
+ # Fallback to summary
281
+ return await self._aggregate_summary(results, parent_task)
282
+
283
+ def create_context(
284
+ self, task_id: str, parent_context: Optional[Dict[str, Any]] = None
285
+ ) -> ExecutionContext:
286
+ """Create an execution context for a task.
287
+
288
+ Args:
289
+ task_id: Task identifier
290
+ parent_context: Optional parent context to inherit
291
+
292
+ Returns:
293
+ New execution context
294
+ """
295
+ context = ExecutionContext(
296
+ task_id=task_id, parent_context=parent_context or {}, local_context={}
297
+ )
298
+
299
+ self._context_cache[task_id] = context
300
+ return context
301
+
302
+ def get_context(self, task_id: str) -> Optional[ExecutionContext]:
303
+ """Get execution context for a task.
304
+
305
+ Args:
306
+ task_id: Task identifier
307
+
308
+ Returns:
309
+ Execution context or None
310
+ """
311
+ return self._context_cache.get(task_id)
312
+
313
+ def update_context(
314
+ self,
315
+ task_id: str,
316
+ updates: Dict[str, Any],
317
+ files: Optional[Set[str]] = None,
318
+ tools: Optional[List[str]] = None,
319
+ findings: Optional[List[str]] = None,
320
+ ) -> None:
321
+ """Update execution context for a task.
322
+
323
+ Args:
324
+ task_id: Task identifier
325
+ updates: Context updates
326
+ files: Files accessed
327
+ tools: Tools used
328
+ findings: Key findings
329
+ """
330
+ context = self._context_cache.get(task_id)
331
+ if not context:
332
+ logger.warning(f"No context found for task {task_id}")
333
+ return
334
+
335
+ context.local_context.update(updates)
336
+
337
+ if files:
338
+ context.files_accessed.update(files)
339
+ if tools:
340
+ context.tools_used.extend(tools)
341
+ if findings:
342
+ context.key_findings.extend(findings)
343
+
344
+ def synthesize_contexts(self, task_ids: List[str]) -> Dict[str, Any]:
345
+ """Synthesize contexts from multiple tasks.
346
+
347
+ Args:
348
+ task_ids: List of task IDs
349
+
350
+ Returns:
351
+ Synthesized context dictionary
352
+ """
353
+ synthesized = {
354
+ "files_accessed": set(),
355
+ "tools_used": [],
356
+ "key_findings": [],
357
+ "combined_context": {},
358
+ }
359
+
360
+ for task_id in task_ids:
361
+ context = self._context_cache.get(task_id)
362
+ if context:
363
+ synthesized["files_accessed"].update(context.files_accessed)
364
+ synthesized["tools_used"].extend(context.tools_used)
365
+ synthesized["key_findings"].extend(context.key_findings)
366
+ synthesized["combined_context"].update(context.local_context)
367
+
368
+ # Convert set to list for JSON serialization
369
+ synthesized["files_accessed"] = list(synthesized["files_accessed"])
370
+
371
+ # Remove duplicates from tools while preserving order
372
+ seen = set()
373
+ unique_tools = []
374
+ for tool in synthesized["tools_used"]:
375
+ if tool not in seen:
376
+ seen.add(tool)
377
+ unique_tools.append(tool)
378
+ synthesized["tools_used"] = unique_tools
379
+
380
+ return synthesized
381
+
382
+ async def resolve_conflicts(
383
+ self,
384
+ conflicting_results: List[TaskResult],
385
+ strategy: ConflictResolution = ConflictResolution.AGENT,
386
+ ) -> TaskResult:
387
+ """Resolve conflicts between contradictory results.
388
+
389
+ Args:
390
+ conflicting_results: List of conflicting results
391
+ strategy: Resolution strategy
392
+
393
+ Returns:
394
+ Single resolved result
395
+ """
396
+ if not conflicting_results:
397
+ raise ValueError("No results to resolve conflicts for")
398
+
399
+ if len(conflicting_results) == 1:
400
+ return conflicting_results[0]
401
+
402
+ if strategy == ConflictResolution.LATEST:
403
+ # Return most recent result
404
+ return max(conflicting_results, key=lambda r: r.timestamp)
405
+
406
+ elif strategy == ConflictResolution.PRIORITY:
407
+ # Use metadata priority if available
408
+ def get_priority(result: TaskResult) -> int:
409
+ return result.metadata.get("priority", 0)
410
+
411
+ return max(conflicting_results, key=get_priority)
412
+
413
+ elif strategy == ConflictResolution.MERGE:
414
+ # Simple merge attempt
415
+ merged_data = {}
416
+ for result in conflicting_results:
417
+ if isinstance(result.result_data, dict):
418
+ merged_data.update(result.result_data)
419
+ else:
420
+ # Can't merge non-dict results
421
+ return conflicting_results[-1]
422
+
423
+ return TaskResult(
424
+ task_id="merged",
425
+ task_title="Merged Result",
426
+ result_data=merged_data,
427
+ status="completed",
428
+ )
429
+
430
+ elif strategy == ConflictResolution.AGENT:
431
+ # Use agent to resolve
432
+ agent = self.state_manager.session.agents.get("main")
433
+ if not agent:
434
+ # Fallback to latest
435
+ return max(conflicting_results, key=lambda r: r.timestamp)
436
+
437
+ conflict_data = []
438
+ for result in conflicting_results:
439
+ conflict_data.append(
440
+ {
441
+ "task": result.task_title,
442
+ "result": str(result.result_data)[:300],
443
+ "timestamp": result.timestamp.isoformat(),
444
+ }
445
+ )
446
+
447
+ resolution_prompt = f"""Resolve conflicts between these task results:
448
+
449
+ {json.dumps(conflict_data, indent=2)}
450
+
451
+ Analyze the differences and provide a single, coherent result that best represents the correct outcome."""
452
+
453
+ try:
454
+ resolved = await agent.run(resolution_prompt)
455
+ return TaskResult(
456
+ task_id="resolved",
457
+ task_title="Conflict Resolved",
458
+ result_data=resolved,
459
+ status="completed",
460
+ )
461
+ except Exception as e:
462
+ logger.error(f"Agent conflict resolution failed: {str(e)}")
463
+ # Fallback to latest
464
+ return max(conflicting_results, key=lambda r: r.timestamp)
465
+
466
+ # Default to latest
467
+ return max(conflicting_results, key=lambda r: r.timestamp)