tunacode-cli 0.0.48__py3-none-any.whl → 0.0.50__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tunacode-cli might be problematic. Click here for more details.

Files changed (45) hide show
  1. api/auth.py +13 -0
  2. api/users.py +8 -0
  3. tunacode/__init__.py +4 -0
  4. tunacode/cli/main.py +4 -0
  5. tunacode/cli/repl.py +39 -6
  6. tunacode/configuration/defaults.py +0 -1
  7. tunacode/constants.py +7 -1
  8. tunacode/core/agents/main.py +268 -245
  9. tunacode/core/agents/utils.py +54 -6
  10. tunacode/core/logging/__init__.py +29 -0
  11. tunacode/core/logging/config.py +57 -0
  12. tunacode/core/logging/formatters.py +48 -0
  13. tunacode/core/logging/handlers.py +83 -0
  14. tunacode/core/logging/logger.py +8 -0
  15. tunacode/core/recursive/__init__.py +18 -0
  16. tunacode/core/recursive/aggregator.py +467 -0
  17. tunacode/core/recursive/budget.py +414 -0
  18. tunacode/core/recursive/decomposer.py +398 -0
  19. tunacode/core/recursive/executor.py +470 -0
  20. tunacode/core/recursive/hierarchy.py +488 -0
  21. tunacode/core/state.py +45 -0
  22. tunacode/exceptions.py +23 -0
  23. tunacode/tools/base.py +7 -1
  24. tunacode/types.py +5 -1
  25. tunacode/ui/completers.py +2 -2
  26. tunacode/ui/console.py +30 -9
  27. tunacode/ui/input.py +2 -1
  28. tunacode/ui/keybindings.py +58 -1
  29. tunacode/ui/logging_compat.py +44 -0
  30. tunacode/ui/output.py +7 -6
  31. tunacode/ui/panels.py +30 -5
  32. tunacode/ui/recursive_progress.py +380 -0
  33. tunacode/utils/retry.py +163 -0
  34. tunacode/utils/security.py +3 -2
  35. tunacode/utils/token_counter.py +1 -2
  36. {tunacode_cli-0.0.48.dist-info → tunacode_cli-0.0.50.dist-info}/METADATA +2 -2
  37. {tunacode_cli-0.0.48.dist-info → tunacode_cli-0.0.50.dist-info}/RECORD +41 -29
  38. {tunacode_cli-0.0.48.dist-info → tunacode_cli-0.0.50.dist-info}/top_level.txt +1 -0
  39. tunacode/core/agents/dspy_integration.py +0 -223
  40. tunacode/core/agents/dspy_tunacode.py +0 -458
  41. tunacode/prompts/dspy_task_planning.md +0 -45
  42. tunacode/prompts/dspy_tool_selection.md +0 -58
  43. {tunacode_cli-0.0.48.dist-info → tunacode_cli-0.0.50.dist-info}/WHEEL +0 -0
  44. {tunacode_cli-0.0.48.dist-info → tunacode_cli-0.0.50.dist-info}/entry_points.txt +0 -0
  45. {tunacode_cli-0.0.48.dist-info → tunacode_cli-0.0.50.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,398 @@
1
+ """Module: tunacode.core.recursive.decomposer
2
+
3
+ Task decomposition engine that uses the main agent for intelligent task analysis and breakdown.
4
+ """
5
+
6
+ import json
7
+ import logging
8
+ from typing import Any, Dict, List, Optional
9
+
10
+ from pydantic import BaseModel, Field
11
+
12
+ from tunacode.core.state import StateManager
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class SubtaskDefinition(BaseModel):
18
+ """Definition of a single subtask."""
19
+
20
+ title: str
21
+ description: str
22
+ dependencies: List[int] = Field(default_factory=list)
23
+ estimated_complexity: float = Field(ge=0.0, le=1.0)
24
+ estimated_iterations: int = Field(ge=1)
25
+
26
+
27
+ class DecompositionResult(BaseModel):
28
+ """Result of task decomposition analysis."""
29
+
30
+ should_decompose: bool
31
+ reasoning: str
32
+ subtasks: List[SubtaskDefinition] = Field(default_factory=list)
33
+ total_complexity: float = Field(ge=0.0, le=1.0)
34
+ confidence: float = Field(ge=0.0, le=1.0)
35
+
36
+
37
+ class TaskDecomposer:
38
+ """Intelligent task decomposition using the main agent."""
39
+
40
+ def __init__(self, state_manager: StateManager):
41
+ """Initialize the TaskDecomposer.
42
+
43
+ Args:
44
+ state_manager: StateManager instance for accessing the main agent
45
+ """
46
+ self.state_manager = state_manager
47
+ self._decomposition_cache: Dict[str, DecompositionResult] = {}
48
+
49
+ async def analyze_and_decompose(
50
+ self,
51
+ task_description: str,
52
+ context: Optional[Dict[str, Any]] = None,
53
+ max_subtasks: int = 5,
54
+ min_subtasks: int = 2,
55
+ ) -> DecompositionResult:
56
+ """Analyze a task and decompose it if necessary.
57
+
58
+ Args:
59
+ task_description: Description of the task to analyze
60
+ context: Optional context about the task
61
+ max_subtasks: Maximum number of subtasks to generate
62
+ min_subtasks: Minimum number of subtasks to generate
63
+
64
+ Returns:
65
+ DecompositionResult with analysis and potential subtasks
66
+ """
67
+ # Check cache first
68
+ cache_key = f"{task_description}:{max_subtasks}:{min_subtasks}"
69
+ if cache_key in self._decomposition_cache:
70
+ logger.debug("Using cached decomposition result")
71
+ return self._decomposition_cache[cache_key]
72
+
73
+ # Get the main agent
74
+ agent = self.state_manager.session.agents.get("main")
75
+ if not agent:
76
+ logger.debug("Main agent not available, using heuristic decomposition")
77
+ return self._heuristic_decomposition(task_description)
78
+
79
+ # Build context-aware prompt
80
+ context_str = ""
81
+ if context:
82
+ context_str = f"\n\nAdditional context:\n{json.dumps(context, indent=2)}"
83
+
84
+ decomposition_prompt = f"""Analyze this task and determine if it should be broken down into subtasks.
85
+
86
+ Task: {task_description}{context_str}
87
+
88
+ Consider:
89
+ 1. Is this task complex enough to benefit from decomposition?
90
+ 2. Can it be logically broken into {min_subtasks}-{max_subtasks} independent subtasks?
91
+ 3. Would decomposition improve clarity and execution?
92
+
93
+ Provide a detailed analysis in the following JSON format:
94
+ {{
95
+ "should_decompose": true/false,
96
+ "reasoning": "Detailed explanation of your decision",
97
+ "total_complexity": 0.0-1.0,
98
+ "confidence": 0.0-1.0,
99
+ "subtasks": [
100
+ {{
101
+ "title": "Short descriptive title",
102
+ "description": "Detailed description of what this subtask accomplishes",
103
+ "dependencies": [indices of other subtasks this depends on],
104
+ "estimated_complexity": 0.0-1.0,
105
+ "estimated_iterations": 1-20
106
+ }}
107
+ ]
108
+ }}
109
+
110
+ Guidelines for subtasks:
111
+ - Each subtask should be self-contained and testable
112
+ - Dependencies should form a valid DAG (no cycles)
113
+ - Complexity estimates: 0.0=trivial, 0.5=moderate, 1.0=very complex
114
+ - Iteration estimates: rough number of agent turns needed
115
+
116
+ Only include subtasks if should_decompose is true."""
117
+
118
+ try:
119
+ # Run the agent
120
+ result = await agent.run(decomposition_prompt)
121
+
122
+ # Parse the response
123
+ decomposition = self._parse_agent_response(result, task_description)
124
+
125
+ # Validate and cache
126
+ decomposition = self._validate_decomposition(decomposition)
127
+ self._decomposition_cache[cache_key] = decomposition
128
+
129
+ return decomposition
130
+
131
+ except Exception as e:
132
+ logger.error(f"Error in task decomposition: {str(e)}")
133
+ return self._heuristic_decomposition(task_description)
134
+
135
+ def _parse_agent_response(self, response: Any, original_task: str) -> DecompositionResult:
136
+ """Parse the agent's response into a DecompositionResult.
137
+
138
+ Args:
139
+ response: Raw response from the agent
140
+ original_task: Original task description for fallback
141
+
142
+ Returns:
143
+ Parsed DecompositionResult
144
+ """
145
+ try:
146
+ # Extract JSON from response
147
+ response_text = str(response)
148
+
149
+ # Find JSON in response (handle markdown code blocks)
150
+ import re
151
+
152
+ json_match = re.search(r"```json\s*(.*?)\s*```", response_text, re.DOTALL)
153
+ if json_match:
154
+ json_text = json_match.group(1)
155
+ else:
156
+ # Try to find raw JSON
157
+ json_match = re.search(r"\{.*\}", response_text, re.DOTALL)
158
+ if json_match:
159
+ json_text = json_match.group(0)
160
+ else:
161
+ raise ValueError("No JSON found in response")
162
+
163
+ # Parse JSON
164
+ data = json.loads(json_text)
165
+
166
+ # Convert to DecompositionResult
167
+ subtasks = []
168
+ for subtask_data in data.get("subtasks", []):
169
+ subtasks.append(
170
+ SubtaskDefinition(
171
+ title=subtask_data.get("title", "Untitled"),
172
+ description=subtask_data.get("description", ""),
173
+ dependencies=subtask_data.get("dependencies", []),
174
+ estimated_complexity=float(subtask_data.get("estimated_complexity", 0.5)),
175
+ estimated_iterations=int(subtask_data.get("estimated_iterations", 5)),
176
+ )
177
+ )
178
+
179
+ return DecompositionResult(
180
+ should_decompose=bool(data.get("should_decompose", False)),
181
+ reasoning=data.get("reasoning", "No reasoning provided"),
182
+ subtasks=subtasks,
183
+ total_complexity=float(data.get("total_complexity", 0.5)),
184
+ confidence=float(data.get("confidence", 0.7)),
185
+ )
186
+
187
+ except Exception as e:
188
+ logger.error(f"Error parsing agent response: {str(e)}")
189
+ # Fallback to simple decomposition
190
+ return DecompositionResult(
191
+ should_decompose=True,
192
+ reasoning="Failed to parse agent response, using default decomposition",
193
+ subtasks=[
194
+ SubtaskDefinition(
195
+ title=f"Analyze requirements for: {original_task[:50]}",
196
+ description="Analyze and understand the requirements",
197
+ estimated_complexity=0.3,
198
+ estimated_iterations=3,
199
+ ),
200
+ SubtaskDefinition(
201
+ title=f"Implement: {original_task[:50]}",
202
+ description="Implement the core functionality",
203
+ dependencies=[0],
204
+ estimated_complexity=0.7,
205
+ estimated_iterations=10,
206
+ ),
207
+ SubtaskDefinition(
208
+ title=f"Test and validate: {original_task[:50]}",
209
+ description="Test and validate the implementation",
210
+ dependencies=[1],
211
+ estimated_complexity=0.4,
212
+ estimated_iterations=5,
213
+ ),
214
+ ],
215
+ total_complexity=0.7,
216
+ confidence=0.3,
217
+ )
218
+
219
+ def _heuristic_decomposition(self, task_description: str) -> DecompositionResult:
220
+ """Simple heuristic decomposition when agent is not available.
221
+
222
+ Args:
223
+ task_description: Task to decompose
224
+
225
+ Returns:
226
+ Simple DecompositionResult based on heuristics
227
+ """
228
+ # Simple heuristics
229
+ word_count = len(task_description.split())
230
+ has_multiple_verbs = (
231
+ sum(
232
+ 1
233
+ for word in ["and", "then", "also", "with", "plus"]
234
+ if word in task_description.lower()
235
+ )
236
+ >= 2
237
+ )
238
+
239
+ complexity = min(1.0, (word_count / 30) * 0.5 + (0.3 if has_multiple_verbs else 0))
240
+ should_decompose = complexity >= 0.6 and word_count > 20
241
+
242
+ if should_decompose:
243
+ return DecompositionResult(
244
+ should_decompose=True,
245
+ reasoning="Task appears complex based on length and structure",
246
+ subtasks=[
247
+ SubtaskDefinition(
248
+ title="Prepare and analyze",
249
+ description=f"Prepare and analyze requirements for: {task_description[:100]}",
250
+ estimated_complexity=0.3,
251
+ estimated_iterations=3,
252
+ ),
253
+ SubtaskDefinition(
254
+ title="Core implementation",
255
+ description=f"Implement main functionality for: {task_description[:100]}",
256
+ dependencies=[0],
257
+ estimated_complexity=0.6,
258
+ estimated_iterations=8,
259
+ ),
260
+ SubtaskDefinition(
261
+ title="Finalize and verify",
262
+ description=f"Complete and verify: {task_description[:100]}",
263
+ dependencies=[1],
264
+ estimated_complexity=0.3,
265
+ estimated_iterations=4,
266
+ ),
267
+ ],
268
+ total_complexity=complexity,
269
+ confidence=0.4,
270
+ )
271
+ else:
272
+ return DecompositionResult(
273
+ should_decompose=False,
274
+ reasoning="Task is simple enough to execute directly",
275
+ subtasks=[],
276
+ total_complexity=complexity,
277
+ confidence=0.6,
278
+ )
279
+
280
+ def _validate_decomposition(self, decomposition: DecompositionResult) -> DecompositionResult:
281
+ """Validate and fix common issues in decomposition.
282
+
283
+ Args:
284
+ decomposition: Decomposition to validate
285
+
286
+ Returns:
287
+ Validated and potentially fixed decomposition
288
+ """
289
+ if not decomposition.should_decompose:
290
+ # Clear subtasks if we shouldn't decompose
291
+ decomposition.subtasks = []
292
+ return decomposition
293
+
294
+ # Validate dependencies form a DAG
295
+ if decomposition.subtasks:
296
+ n = len(decomposition.subtasks)
297
+
298
+ # Fix out-of-range dependencies
299
+ for i, subtask in enumerate(decomposition.subtasks):
300
+ subtask.dependencies = [
301
+ dep for dep in subtask.dependencies if 0 <= dep < n and dep != i
302
+ ]
303
+
304
+ # Check for cycles using DFS
305
+ def has_cycle(adj_list: Dict[int, List[int]]) -> bool:
306
+ visited = [False] * n
307
+ rec_stack = [False] * n
308
+
309
+ def dfs(node: int) -> bool:
310
+ visited[node] = True
311
+ rec_stack[node] = True
312
+
313
+ for neighbor in adj_list.get(node, []):
314
+ if not visited[neighbor]:
315
+ if dfs(neighbor):
316
+ return True
317
+ elif rec_stack[neighbor]:
318
+ return True
319
+
320
+ rec_stack[node] = False
321
+ return False
322
+
323
+ for i in range(n):
324
+ if not visited[i]:
325
+ if dfs(i):
326
+ return True
327
+ return False
328
+
329
+ # Build adjacency list
330
+ adj_list = {}
331
+ for i, subtask in enumerate(decomposition.subtasks):
332
+ for dep in subtask.dependencies:
333
+ if dep not in adj_list:
334
+ adj_list[dep] = []
335
+ adj_list[dep].append(i)
336
+
337
+ # Remove cycles if found
338
+ if has_cycle(adj_list):
339
+ logger.warning("Cycle detected in dependencies, removing all dependencies")
340
+ for subtask in decomposition.subtasks:
341
+ subtask.dependencies = []
342
+ decomposition.reasoning += " (Note: Circular dependencies were removed)"
343
+
344
+ return decomposition
345
+
346
+ def get_subtask_order(self, subtasks: List[SubtaskDefinition]) -> List[int]:
347
+ """Get the execution order for subtasks based on dependencies.
348
+
349
+ Args:
350
+ subtasks: List of subtasks with dependencies
351
+
352
+ Returns:
353
+ List of indices in execution order
354
+ """
355
+ if not subtasks:
356
+ return []
357
+
358
+ n = len(subtasks)
359
+ in_degree = [0] * n
360
+ adj_list = {i: [] for i in range(n)}
361
+
362
+ # Build graph
363
+ for i, subtask in enumerate(subtasks):
364
+ for dep in subtask.dependencies:
365
+ adj_list[dep].append(i)
366
+ in_degree[i] += 1
367
+
368
+ # Topological sort using Kahn's algorithm
369
+ queue = [i for i in range(n) if in_degree[i] == 0]
370
+ order = []
371
+
372
+ while queue:
373
+ node = queue.pop(0)
374
+ order.append(node)
375
+
376
+ for neighbor in adj_list[node]:
377
+ in_degree[neighbor] -= 1
378
+ if in_degree[neighbor] == 0:
379
+ queue.append(neighbor)
380
+
381
+ # If we couldn't process all nodes, there's a cycle (shouldn't happen after validation)
382
+ if len(order) != n:
383
+ logger.error("Dependency cycle detected during ordering")
384
+ # Return simple linear order as fallback
385
+ return list(range(n))
386
+
387
+ return order
388
+
389
+ def estimate_total_iterations(self, subtasks: List[SubtaskDefinition]) -> int:
390
+ """Estimate total iterations needed for all subtasks.
391
+
392
+ Args:
393
+ subtasks: List of subtasks
394
+
395
+ Returns:
396
+ Total estimated iterations
397
+ """
398
+ return sum(subtask.estimated_iterations for subtask in subtasks)