kubrick-cli 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kubrick_cli/__init__.py +47 -0
- kubrick_cli/agent_loop.py +274 -0
- kubrick_cli/classifier.py +194 -0
- kubrick_cli/config.py +247 -0
- kubrick_cli/display.py +154 -0
- kubrick_cli/execution_strategy.py +195 -0
- kubrick_cli/main.py +806 -0
- kubrick_cli/planning.py +319 -0
- kubrick_cli/progress.py +162 -0
- kubrick_cli/providers/__init__.py +6 -0
- kubrick_cli/providers/anthropic_provider.py +209 -0
- kubrick_cli/providers/base.py +136 -0
- kubrick_cli/providers/factory.py +161 -0
- kubrick_cli/providers/openai_provider.py +181 -0
- kubrick_cli/providers/triton_provider.py +96 -0
- kubrick_cli/safety.py +204 -0
- kubrick_cli/scheduler.py +183 -0
- kubrick_cli/setup_wizard.py +161 -0
- kubrick_cli/tools.py +400 -0
- kubrick_cli/triton_client.py +177 -0
- kubrick_cli-0.1.4.dist-info/METADATA +137 -0
- kubrick_cli-0.1.4.dist-info/RECORD +26 -0
- kubrick_cli-0.1.4.dist-info/WHEEL +5 -0
- kubrick_cli-0.1.4.dist-info/entry_points.txt +2 -0
- kubrick_cli-0.1.4.dist-info/licenses/LICENSE +21 -0
- kubrick_cli-0.1.4.dist-info/top_level.txt +1 -0
kubrick_cli/__init__.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
"""Kubrick CLI - AI-assisted coding tool with agentic capabilities."""
|
|
2
|
+
|
|
3
|
+
__version__ = "0.2.0"
|
|
4
|
+
|
|
5
|
+
from .agent_loop import AgentLoop, CompletionDetector
|
|
6
|
+
from .classifier import TaskClassification, TaskClassifier
|
|
7
|
+
from .config import KubrickConfig
|
|
8
|
+
from .display import DisplayManager
|
|
9
|
+
from .execution_strategy import ExecutionConfig, ExecutionStrategy
|
|
10
|
+
from .main import KubrickCLI
|
|
11
|
+
from .planning import PlanningPhase
|
|
12
|
+
from .progress import ProgressTracker
|
|
13
|
+
from .providers.base import ProviderAdapter
|
|
14
|
+
from .providers.factory import ProviderFactory
|
|
15
|
+
from .providers.triton_provider import TritonProvider
|
|
16
|
+
from .providers.openai_provider import OpenAIProvider
|
|
17
|
+
from .providers.anthropic_provider import AnthropicProvider
|
|
18
|
+
from .safety import SafetyConfig, SafetyManager
|
|
19
|
+
from .scheduler import ToolScheduler
|
|
20
|
+
from .setup_wizard import SetupWizard
|
|
21
|
+
from .tools import ToolExecutor
|
|
22
|
+
from .triton_client import TritonLLMClient
|
|
23
|
+
|
|
24
|
+
__all__ = [
|
|
25
|
+
"AgentLoop",
|
|
26
|
+
"AnthropicProvider",
|
|
27
|
+
"CompletionDetector",
|
|
28
|
+
"DisplayManager",
|
|
29
|
+
"ExecutionConfig",
|
|
30
|
+
"ExecutionStrategy",
|
|
31
|
+
"KubrickCLI",
|
|
32
|
+
"KubrickConfig",
|
|
33
|
+
"OpenAIProvider",
|
|
34
|
+
"PlanningPhase",
|
|
35
|
+
"ProgressTracker",
|
|
36
|
+
"ProviderAdapter",
|
|
37
|
+
"ProviderFactory",
|
|
38
|
+
"SafetyConfig",
|
|
39
|
+
"SafetyManager",
|
|
40
|
+
"SetupWizard",
|
|
41
|
+
"TaskClassification",
|
|
42
|
+
"TaskClassifier",
|
|
43
|
+
"ToolExecutor",
|
|
44
|
+
"ToolScheduler",
|
|
45
|
+
"TritonLLMClient",
|
|
46
|
+
"TritonProvider",
|
|
47
|
+
]
|
|
@@ -0,0 +1,274 @@
|
|
|
1
|
+
"""Multi-step agentic execution loop with completion detection."""
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from typing import Dict, List, Tuple
|
|
5
|
+
|
|
6
|
+
from rich.console import Console
|
|
7
|
+
|
|
8
|
+
console = Console()
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
from .display import DisplayManager
|
|
12
|
+
except ImportError:
|
|
13
|
+
DisplayManager = None
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
from .scheduler import ToolScheduler
|
|
17
|
+
except ImportError:
|
|
18
|
+
ToolScheduler = None
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class CompletionDetector:
|
|
22
|
+
"""Detects when an agent has completed its task."""
|
|
23
|
+
|
|
24
|
+
COMPLETION_MARKERS = [
|
|
25
|
+
"TASK_COMPLETE",
|
|
26
|
+
"PLAN_COMPLETE",
|
|
27
|
+
"[COMPLETE]",
|
|
28
|
+
"[DONE]",
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
@staticmethod
|
|
32
|
+
def is_complete(
|
|
33
|
+
response_text: str,
|
|
34
|
+
has_tool_calls: bool,
|
|
35
|
+
iteration: int,
|
|
36
|
+
max_iterations: int,
|
|
37
|
+
) -> Tuple[bool, str]:
|
|
38
|
+
"""
|
|
39
|
+
Determine if the agent has completed its task.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
response_text: The agent's response text
|
|
43
|
+
has_tool_calls: Whether the response contains tool calls
|
|
44
|
+
iteration: Current iteration number
|
|
45
|
+
max_iterations: Maximum allowed iterations
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
Tuple of (is_complete, reason)
|
|
49
|
+
"""
|
|
50
|
+
for marker in CompletionDetector.COMPLETION_MARKERS:
|
|
51
|
+
if marker in response_text:
|
|
52
|
+
return True, f"explicit_marker:{marker}"
|
|
53
|
+
|
|
54
|
+
if iteration >= max_iterations:
|
|
55
|
+
return True, "max_iterations_reached"
|
|
56
|
+
|
|
57
|
+
if not has_tool_calls:
|
|
58
|
+
if CompletionDetector._looks_conclusive(response_text):
|
|
59
|
+
return True, "conclusive_response"
|
|
60
|
+
|
|
61
|
+
return False, "continuing"
|
|
62
|
+
|
|
63
|
+
@staticmethod
|
|
64
|
+
def _looks_conclusive(text: str) -> bool:
|
|
65
|
+
"""
|
|
66
|
+
Check if text looks like a conclusive response.
|
|
67
|
+
|
|
68
|
+
This is a heuristic to detect when the agent is done without
|
|
69
|
+
explicitly saying so.
|
|
70
|
+
"""
|
|
71
|
+
text_lower = text.lower()
|
|
72
|
+
|
|
73
|
+
conclusive_patterns = [
|
|
74
|
+
r"\b(done|completed|finished|ready)\b",
|
|
75
|
+
r"\b(successfully|all set|good to go)\b",
|
|
76
|
+
r"\bhere(?:'s| is) (?:the |a )?(?:summary|result)",
|
|
77
|
+
r"\b(?:task|work|changes) (?:is |are )?(?:complete|done|finished)",
|
|
78
|
+
r"\blet me know if you need",
|
|
79
|
+
r"\bthat(?:'s| should do it)",
|
|
80
|
+
r"\beverything(?:'s| is) (?:set|ready|done)",
|
|
81
|
+
]
|
|
82
|
+
|
|
83
|
+
for pattern in conclusive_patterns:
|
|
84
|
+
if re.search(pattern, text_lower):
|
|
85
|
+
return True
|
|
86
|
+
|
|
87
|
+
return False
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class AgentLoop:
|
|
91
|
+
"""
|
|
92
|
+
Multi-step agentic execution loop.
|
|
93
|
+
|
|
94
|
+
Runs iteratively until the task is complete or max iterations reached.
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
def __init__(
|
|
98
|
+
self,
|
|
99
|
+
llm_client,
|
|
100
|
+
tool_executor,
|
|
101
|
+
max_iterations: int = 15,
|
|
102
|
+
max_tools_per_turn: int = 5,
|
|
103
|
+
timeout_seconds: int = 600,
|
|
104
|
+
stream_options: Dict = None,
|
|
105
|
+
display_manager=None,
|
|
106
|
+
tool_scheduler=None,
|
|
107
|
+
):
|
|
108
|
+
"""
|
|
109
|
+
Initialize the agent loop.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
llm_client: LLM client instance (e.g., TritonLLMClient)
|
|
113
|
+
tool_executor: Tool executor instance
|
|
114
|
+
max_iterations: Maximum number of iterations
|
|
115
|
+
max_tools_per_turn: Maximum tools per turn
|
|
116
|
+
timeout_seconds: Total timeout in seconds
|
|
117
|
+
stream_options: Optional streaming options
|
|
118
|
+
display_manager: Optional DisplayManager instance for natural language display
|
|
119
|
+
tool_scheduler: Optional ToolScheduler for parallel execution
|
|
120
|
+
"""
|
|
121
|
+
self.llm_client = llm_client
|
|
122
|
+
self.tool_executor = tool_executor
|
|
123
|
+
self.max_iterations = max_iterations
|
|
124
|
+
self.max_tools_per_turn = max_tools_per_turn
|
|
125
|
+
self.timeout_seconds = timeout_seconds
|
|
126
|
+
self.stream_options = stream_options or {}
|
|
127
|
+
self.display_manager = display_manager
|
|
128
|
+
self.tool_scheduler = tool_scheduler
|
|
129
|
+
|
|
130
|
+
def run(
|
|
131
|
+
self,
|
|
132
|
+
messages: List[Dict],
|
|
133
|
+
tool_parser,
|
|
134
|
+
display_callback=None,
|
|
135
|
+
) -> Dict:
|
|
136
|
+
"""
|
|
137
|
+
Run the agentic loop.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
messages: Conversation messages (will be modified in-place)
|
|
141
|
+
tool_parser: Function to parse tool calls from text
|
|
142
|
+
display_callback: Optional callback for displaying streaming response
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
Dict with execution results and metadata
|
|
146
|
+
"""
|
|
147
|
+
iteration = 0
|
|
148
|
+
total_tool_calls = 0
|
|
149
|
+
|
|
150
|
+
while iteration < self.max_iterations:
|
|
151
|
+
iteration += 1
|
|
152
|
+
|
|
153
|
+
console.print(
|
|
154
|
+
f"\n[dim]→ Agent iteration {iteration}/{self.max_iterations}[/dim]"
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
console.print("[bold cyan]Assistant:[/bold cyan]")
|
|
158
|
+
chunks = []
|
|
159
|
+
|
|
160
|
+
try:
|
|
161
|
+
for chunk in self.llm_client.generate_streaming(
|
|
162
|
+
messages, stream_options=self.stream_options
|
|
163
|
+
):
|
|
164
|
+
console.print(chunk, end="")
|
|
165
|
+
chunks.append(chunk)
|
|
166
|
+
|
|
167
|
+
console.print("\n")
|
|
168
|
+
|
|
169
|
+
except Exception as e:
|
|
170
|
+
console.print(f"\n[red]Error during LLM generation: {e}[/red]")
|
|
171
|
+
return {
|
|
172
|
+
"success": False,
|
|
173
|
+
"error": str(e),
|
|
174
|
+
"iterations": iteration,
|
|
175
|
+
"tool_calls": total_tool_calls,
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
response_text = "".join(chunks)
|
|
179
|
+
|
|
180
|
+
messages.append({"role": "assistant", "content": response_text})
|
|
181
|
+
|
|
182
|
+
if display_callback:
|
|
183
|
+
display_callback(response_text)
|
|
184
|
+
|
|
185
|
+
tool_calls = tool_parser(response_text)
|
|
186
|
+
|
|
187
|
+
is_complete, reason = CompletionDetector.is_complete(
|
|
188
|
+
response_text=response_text,
|
|
189
|
+
has_tool_calls=len(tool_calls) > 0,
|
|
190
|
+
iteration=iteration,
|
|
191
|
+
max_iterations=self.max_iterations,
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
if is_complete:
|
|
195
|
+
console.print(f"\n[green]✓ Task complete ({reason})[/green]")
|
|
196
|
+
return {
|
|
197
|
+
"success": True,
|
|
198
|
+
"completion_reason": reason,
|
|
199
|
+
"iterations": iteration,
|
|
200
|
+
"tool_calls": total_tool_calls,
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
if tool_calls:
|
|
204
|
+
if len(tool_calls) > self.max_tools_per_turn:
|
|
205
|
+
console.print(
|
|
206
|
+
f"[yellow]⚠ Too many tool calls ({len(tool_calls)}), "
|
|
207
|
+
f"limiting to {self.max_tools_per_turn}[/yellow]"
|
|
208
|
+
)
|
|
209
|
+
tool_calls = tool_calls[: self.max_tools_per_turn]
|
|
210
|
+
|
|
211
|
+
console.print(
|
|
212
|
+
f"\n[yellow]Executing {len(tool_calls)} tool(s)...[/yellow]\n"
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
if self.tool_scheduler and len(tool_calls) > 1:
|
|
216
|
+
execution_results = self.tool_scheduler.execute_tools(tool_calls)
|
|
217
|
+
else:
|
|
218
|
+
execution_results = [
|
|
219
|
+
self.tool_executor.execute(tool_name, params)
|
|
220
|
+
for tool_name, params in tool_calls
|
|
221
|
+
]
|
|
222
|
+
|
|
223
|
+
tool_results = []
|
|
224
|
+
for (tool_name, parameters), result in zip(
|
|
225
|
+
tool_calls, execution_results
|
|
226
|
+
):
|
|
227
|
+
if self.display_manager:
|
|
228
|
+
self.display_manager.display_tool_call(tool_name, parameters)
|
|
229
|
+
self.display_manager.display_tool_result(
|
|
230
|
+
tool_name, result, result["success"]
|
|
231
|
+
)
|
|
232
|
+
else:
|
|
233
|
+
console.print(f"[cyan]→ Called {tool_name}[/cyan]")
|
|
234
|
+
if result["success"]:
|
|
235
|
+
console.print(f"[green]✓ {tool_name} succeeded[/green]")
|
|
236
|
+
else:
|
|
237
|
+
console.print(
|
|
238
|
+
f"[red]✗ {tool_name} failed: {result['error']}[/red]"
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
if result["success"]:
|
|
242
|
+
tool_results.append(
|
|
243
|
+
f"Tool: {tool_name}\nResult: {result['result']}"
|
|
244
|
+
)
|
|
245
|
+
else:
|
|
246
|
+
tool_results.append(
|
|
247
|
+
f"Tool: {tool_name}\nError: {result['error']}"
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
total_tool_calls += 1
|
|
251
|
+
|
|
252
|
+
tool_results_text = "\n\n".join(tool_results)
|
|
253
|
+
messages.append(
|
|
254
|
+
{
|
|
255
|
+
"role": "user",
|
|
256
|
+
"content": f"Tool execution results:\n\n{tool_results_text}",
|
|
257
|
+
}
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
continue
|
|
261
|
+
|
|
262
|
+
console.print(
|
|
263
|
+
"[yellow]⚠ No tool calls and task not marked complete. Continuing...[/yellow]"
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
console.print(
|
|
267
|
+
f"\n[yellow]⚠ Max iterations ({self.max_iterations}) reached[/yellow]"
|
|
268
|
+
)
|
|
269
|
+
return {
|
|
270
|
+
"success": True,
|
|
271
|
+
"completion_reason": "max_iterations",
|
|
272
|
+
"iterations": iteration,
|
|
273
|
+
"tool_calls": total_tool_calls,
|
|
274
|
+
}
|
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
"""Task complexity classification for intelligent routing."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from typing import Dict, List
|
|
6
|
+
|
|
7
|
+
from rich.console import Console
|
|
8
|
+
|
|
9
|
+
console = Console()
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class TaskClassification:
|
|
14
|
+
"""Result of task classification."""
|
|
15
|
+
|
|
16
|
+
complexity: str # CONVERSATIONAL, SIMPLE, COMPLEX
|
|
17
|
+
reasoning: str
|
|
18
|
+
estimated_tool_calls: int
|
|
19
|
+
requires_tools: bool
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class TaskClassifier:
|
|
23
|
+
"""
|
|
24
|
+
Classifies tasks into three tiers to determine execution strategy.
|
|
25
|
+
|
|
26
|
+
CONVERSATIONAL:
|
|
27
|
+
- Greetings, questions, general chat
|
|
28
|
+
- No tool calls needed
|
|
29
|
+
- Single-turn response sufficient
|
|
30
|
+
|
|
31
|
+
SIMPLE:
|
|
32
|
+
- Single file operations
|
|
33
|
+
- Clear, specific scope
|
|
34
|
+
- Estimated 1-5 tool calls
|
|
35
|
+
- Low iteration count (3-5)
|
|
36
|
+
|
|
37
|
+
COMPLEX:
|
|
38
|
+
- Multi-file operations
|
|
39
|
+
- Architectural changes
|
|
40
|
+
- Uncertain scope
|
|
41
|
+
- Estimated >5 tool calls
|
|
42
|
+
- Full iteration count (15)
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
def __init__(self, llm_client):
|
|
46
|
+
"""
|
|
47
|
+
Initialize task classifier.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
llm_client: LLM client instance for classification
|
|
51
|
+
"""
|
|
52
|
+
self.llm_client = llm_client
|
|
53
|
+
|
|
54
|
+
def classify(
|
|
55
|
+
self, user_message: str, conversation_history: List[Dict] = None
|
|
56
|
+
) -> TaskClassification:
|
|
57
|
+
"""
|
|
58
|
+
Classify a task into CONVERSATIONAL, SIMPLE, or COMPLEX.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
user_message: The user's task request
|
|
62
|
+
conversation_history: Optional conversation context
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
TaskClassification object with detailed classification
|
|
66
|
+
"""
|
|
67
|
+
classification_messages = [
|
|
68
|
+
{
|
|
69
|
+
"role": "system",
|
|
70
|
+
"content": """You are a task complexity classifier for a coding assistant.
|
|
71
|
+
|
|
72
|
+
Your job is to classify tasks into three tiers: CONVERSATIONAL, SIMPLE, or COMPLEX.
|
|
73
|
+
|
|
74
|
+
# Classification Criteria
|
|
75
|
+
|
|
76
|
+
**CONVERSATIONAL:**
|
|
77
|
+
- Greetings, questions, general chat
|
|
78
|
+
- No file operations or code changes needed
|
|
79
|
+
- No tools required
|
|
80
|
+
- Estimated 0 tool calls
|
|
81
|
+
- Examples:
|
|
82
|
+
- "Hi", "Hello", "How are you?"
|
|
83
|
+
- "What can you do?"
|
|
84
|
+
- "Explain what async/await means"
|
|
85
|
+
- "Tell me about this project"
|
|
86
|
+
|
|
87
|
+
**SIMPLE:**
|
|
88
|
+
- Single file operations (read, write, edit one file)
|
|
89
|
+
- Clear, specific scope with well-defined requirements
|
|
90
|
+
- Estimated 1-5 tool calls
|
|
91
|
+
- Examples:
|
|
92
|
+
- "Read config.py"
|
|
93
|
+
- "Create a hello world script"
|
|
94
|
+
- "Fix the typo in line 42 of main.py"
|
|
95
|
+
- "List all Python files"
|
|
96
|
+
|
|
97
|
+
**COMPLEX:**
|
|
98
|
+
- Multi-file operations affecting multiple files
|
|
99
|
+
- Architectural changes or refactoring
|
|
100
|
+
- Uncertain scope requiring exploration
|
|
101
|
+
- Estimated >5 tool calls
|
|
102
|
+
- Examples:
|
|
103
|
+
- "Add logging to all Python files"
|
|
104
|
+
- "Refactor the authentication system"
|
|
105
|
+
- "Implement a new feature"
|
|
106
|
+
- "Debug why the tests are failing"
|
|
107
|
+
|
|
108
|
+
# Response Format
|
|
109
|
+
|
|
110
|
+
Respond with ONLY a JSON object:
|
|
111
|
+
```json
|
|
112
|
+
{
|
|
113
|
+
"complexity": "CONVERSATIONAL",
|
|
114
|
+
"reasoning": "Brief explanation why",
|
|
115
|
+
"estimated_tool_calls": 0,
|
|
116
|
+
"requires_tools": false
|
|
117
|
+
}
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
OR
|
|
121
|
+
|
|
122
|
+
```json
|
|
123
|
+
{
|
|
124
|
+
"complexity": "SIMPLE",
|
|
125
|
+
"reasoning": "Brief explanation why",
|
|
126
|
+
"estimated_tool_calls": 2,
|
|
127
|
+
"requires_tools": true
|
|
128
|
+
}
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
OR
|
|
132
|
+
|
|
133
|
+
```json
|
|
134
|
+
{
|
|
135
|
+
"complexity": "COMPLEX",
|
|
136
|
+
"reasoning": "Brief explanation why",
|
|
137
|
+
"estimated_tool_calls": 10,
|
|
138
|
+
"requires_tools": true
|
|
139
|
+
}
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
Respond with ONLY the JSON object, no other text.""",
|
|
143
|
+
},
|
|
144
|
+
{"role": "user", "content": user_message},
|
|
145
|
+
]
|
|
146
|
+
|
|
147
|
+
try:
|
|
148
|
+
response = self.llm_client.generate(
|
|
149
|
+
classification_messages, stream_options={}
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
json_start = response.find("{")
|
|
153
|
+
json_end = response.rfind("}") + 1
|
|
154
|
+
|
|
155
|
+
if json_start >= 0 and json_end > json_start:
|
|
156
|
+
json_str = response[json_start:json_end]
|
|
157
|
+
result = json.loads(json_str)
|
|
158
|
+
|
|
159
|
+
complexity = result.get("complexity", "SIMPLE").upper()
|
|
160
|
+
reasoning = result.get("reasoning", "No reasoning provided")
|
|
161
|
+
estimated_tool_calls = result.get("estimated_tool_calls", 0)
|
|
162
|
+
requires_tools = result.get("requires_tools", False)
|
|
163
|
+
|
|
164
|
+
console.print(
|
|
165
|
+
f"[dim]→ Task classified as {complexity}: {reasoning}[/dim]"
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
return TaskClassification(
|
|
169
|
+
complexity=complexity,
|
|
170
|
+
reasoning=reasoning,
|
|
171
|
+
estimated_tool_calls=estimated_tool_calls,
|
|
172
|
+
requires_tools=requires_tools,
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
console.print(
|
|
176
|
+
"[yellow]⚠ Classification parsing failed, defaulting to SIMPLE[/yellow]"
|
|
177
|
+
)
|
|
178
|
+
return TaskClassification(
|
|
179
|
+
complexity="SIMPLE",
|
|
180
|
+
reasoning="Parsing failed",
|
|
181
|
+
estimated_tool_calls=3,
|
|
182
|
+
requires_tools=True,
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
except Exception as e:
|
|
186
|
+
console.print(
|
|
187
|
+
f"[yellow]⚠ Classification error ({e}), defaulting to SIMPLE[/yellow]"
|
|
188
|
+
)
|
|
189
|
+
return TaskClassification(
|
|
190
|
+
complexity="SIMPLE",
|
|
191
|
+
reasoning=f"Error: {e}",
|
|
192
|
+
estimated_tool_calls=3,
|
|
193
|
+
requires_tools=True,
|
|
194
|
+
)
|