kubrick-cli 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
kubrick_cli/config.py ADDED
@@ -0,0 +1,247 @@
1
+ """Configuration management for Kubrick CLI."""
2
+
3
+ import json
4
+ from pathlib import Path
5
+ from typing import Any, Dict, Optional
6
+
7
+
8
+ class KubrickConfig:
9
+ """Manages Kubrick configuration and data directories."""
10
+
11
+ def __init__(self, skip_wizard: bool = False):
12
+ """
13
+ Initialize config manager and ensure directories exist.
14
+
15
+ Args:
16
+ skip_wizard: Skip setup wizard even if config doesn't exist (for testing)
17
+ """
18
+ # Use ~/.kubrick in all environments (Docker and non-Docker)
19
+ # In Docker, this resolves to /home/kubrick/.kubrick
20
+ # Outside Docker, this resolves to the current user's home directory
21
+ self.kubrick_dir = Path.home() / ".kubrick"
22
+ self.config_file = self.kubrick_dir / "config.json"
23
+ self.conversations_dir = self.kubrick_dir / "conversations"
24
+
25
+ self._ensure_directories()
26
+
27
+ self.config = self._load_config(skip_wizard=skip_wizard)
28
+
29
+ def _ensure_directories(self):
30
+ """Create necessary directories if they don't exist."""
31
+ self.kubrick_dir.mkdir(exist_ok=True)
32
+ self.conversations_dir.mkdir(exist_ok=True)
33
+
34
+ def _load_config(self, skip_wizard: bool = False) -> Dict[str, Any]:
35
+ """
36
+ Load configuration from file or create default.
37
+
38
+ Args:
39
+ skip_wizard: Skip setup wizard even if config doesn't exist
40
+
41
+ Returns:
42
+ Configuration dictionary
43
+ """
44
+ if self.config_file.exists():
45
+ try:
46
+ with open(self.config_file, "r") as f:
47
+ loaded_config = json.load(f)
48
+
49
+ default_config = self._get_default_config()
50
+ default_config.update(loaded_config)
51
+ return default_config
52
+
53
+ except (json.JSONDecodeError, IOError):
54
+ return self._get_default_config()
55
+ else:
56
+ if not skip_wizard:
57
+ from .setup_wizard import SetupWizard
58
+
59
+ wizard_config = SetupWizard.run()
60
+
61
+ config = self._get_default_config()
62
+ config.update(wizard_config)
63
+
64
+ self._save_config(config)
65
+ return config
66
+ else:
67
+ config = self._get_default_config()
68
+ self._save_config(config)
69
+ return config
70
+
71
+ def _get_default_config(self) -> Dict[str, Any]:
72
+ """Get default configuration values."""
73
+ return {
74
+ # Provider settings
75
+ "provider": "triton",
76
+ # Triton settings
77
+ "triton_url": "localhost:8000",
78
+ "triton_model": "llm_decoupled",
79
+ # OpenAI settings
80
+ "openai_api_key": None,
81
+ "openai_model": "gpt-4",
82
+ # Anthropic settings
83
+ "anthropic_api_key": None,
84
+ "anthropic_model": "claude-sonnet-4-5-20250929",
85
+ # Legacy compatibility
86
+ "model_name": "llm_decoupled",
87
+ "use_openai": False,
88
+ "default_working_dir": None,
89
+ # Agent loop settings
90
+ "max_iterations": 15,
91
+ "max_tools_per_turn": 5,
92
+ "total_timeout_seconds": 600,
93
+ "enable_parallel_tools": True,
94
+ "max_parallel_workers": 3,
95
+ # Safety settings
96
+ "require_dangerous_command_confirmation": True,
97
+ "tool_timeout_seconds": 30,
98
+ "max_file_size_mb": 10,
99
+ # Display settings
100
+ "display_mode": "natural",
101
+ "show_tool_results": True,
102
+ "show_progress": True,
103
+ # Task classification settings
104
+ "enable_task_classification": True,
105
+ "enable_planning_phase": True,
106
+ # Conversation settings
107
+ "auto_save_conversations": True,
108
+ "max_conversations": 100,
109
+ }
110
+
111
+ def _save_config(self, config: Dict[str, Any]):
112
+ """Save configuration to file."""
113
+ with open(self.config_file, "w") as f:
114
+ json.dump(config, f, indent=2)
115
+
116
+ def get(self, key: str, default: Any = None) -> Any:
117
+ """Get a configuration value."""
118
+ return self.config.get(key, default)
119
+
120
+ def set(self, key: str, value: Any):
121
+ """Set a configuration value and save."""
122
+ self.config[key] = value
123
+ self._save_config(self.config)
124
+
125
+ def get_all(self) -> Dict[str, Any]:
126
+ """Get all configuration values."""
127
+ return self.config.copy()
128
+
129
+ def save_conversation(
130
+ self, conversation_id: str, messages: list, metadata: Dict = None
131
+ ):
132
+ """
133
+ Save a conversation to disk.
134
+
135
+ Args:
136
+ conversation_id: Unique identifier for the conversation (e.g., timestamp)
137
+ messages: List of message dictionaries
138
+ metadata: Optional metadata (working_dir, triton_url, etc.)
139
+ """
140
+ conversation_file = self.conversations_dir / f"{conversation_id}.json"
141
+
142
+ data = {
143
+ "id": conversation_id,
144
+ "messages": messages,
145
+ "metadata": metadata or {},
146
+ }
147
+
148
+ with open(conversation_file, "w") as f:
149
+ json.dump(data, f, indent=2)
150
+
151
+ self._cleanup_old_conversations()
152
+
153
+ def load_conversation(self, conversation_id: str) -> Optional[Dict]:
154
+ """
155
+ Load a conversation from disk.
156
+
157
+ Supports two modes:
158
+ 1. By ID: Loads from ~/.kubrick/conversations/<id>.json
159
+ 2. By path: Loads from absolute or relative file path
160
+
161
+ Args:
162
+ conversation_id: Conversation ID or file path
163
+
164
+ Returns:
165
+ Dictionary with 'id', 'messages', and 'metadata', or None if not found
166
+ """
167
+ if (
168
+ "/" in conversation_id
169
+ or "\\" in conversation_id
170
+ or conversation_id.endswith(".json")
171
+ ):
172
+ conversation_file = Path(conversation_id).expanduser().resolve()
173
+ else:
174
+ conversation_file = self.conversations_dir / f"{conversation_id}.json"
175
+
176
+ if not conversation_file.exists():
177
+ return None
178
+
179
+ try:
180
+ with open(conversation_file, "r") as f:
181
+ return json.load(f)
182
+ except (json.JSONDecodeError, IOError):
183
+ return None
184
+
185
+ def list_conversations(self, limit: int = None) -> list:
186
+ """
187
+ List all saved conversations.
188
+
189
+ Args:
190
+ limit: Optional limit on number of conversations to return
191
+
192
+ Returns:
193
+ List of conversation metadata sorted by modification time (newest first)
194
+ """
195
+ conversations = []
196
+
197
+ for conv_file in self.conversations_dir.glob("*.json"):
198
+ try:
199
+ with open(conv_file, "r") as f:
200
+ data = json.load(f)
201
+
202
+ conversations.append(
203
+ {
204
+ "id": data.get("id", conv_file.stem),
205
+ "metadata": data.get("metadata", {}),
206
+ "message_count": len(data.get("messages", [])),
207
+ "modified": conv_file.stat().st_mtime,
208
+ }
209
+ )
210
+ except (json.JSONDecodeError, IOError):
211
+ continue
212
+
213
+ conversations.sort(key=lambda x: x["modified"], reverse=True)
214
+
215
+ if limit:
216
+ conversations = conversations[:limit]
217
+
218
+ return conversations
219
+
220
+ def delete_conversation(self, conversation_id: str) -> bool:
221
+ """
222
+ Delete a conversation.
223
+
224
+ Args:
225
+ conversation_id: Unique identifier for the conversation
226
+
227
+ Returns:
228
+ True if deleted, False if not found
229
+ """
230
+ conversation_file = self.conversations_dir / f"{conversation_id}.json"
231
+
232
+ if conversation_file.exists():
233
+ conversation_file.unlink()
234
+ return True
235
+ return False
236
+
237
+ def _cleanup_old_conversations(self):
238
+ """Remove oldest conversations if we exceed max_conversations."""
239
+ max_conversations = self.config.get("max_conversations", 100)
240
+
241
+ conversations = list(self.conversations_dir.glob("*.json"))
242
+
243
+ if len(conversations) > max_conversations:
244
+ conversations.sort(key=lambda x: x.stat().st_mtime)
245
+
246
+ for conv_file in conversations[: len(conversations) - max_conversations]:
247
+ conv_file.unlink()
kubrick_cli/display.py ADDED
@@ -0,0 +1,154 @@
1
+ """Display manager for natural language tool output."""
2
+
3
+ import json
4
+ from typing import Any, Dict
5
+
6
+ from rich.console import Console
7
+ from rich.panel import Panel
8
+ from rich.syntax import Syntax
9
+
10
+ console = Console()
11
+
12
+
13
+ class DisplayManager:
14
+ """
15
+ Manages display of tool calls and results in natural language.
16
+
17
+ Supports multiple display modes:
18
+ - natural: Human-friendly descriptions (default)
19
+ - json: Raw JSON panels (old behavior)
20
+ - verbose: Both natural + JSON
21
+ """
22
+
23
+ def __init__(self, config: Dict):
24
+ """
25
+ Initialize display manager.
26
+
27
+ Args:
28
+ config: Configuration dict with display settings
29
+ """
30
+ self.display_mode = config.get("display_mode", "natural")
31
+ self.show_tool_results = config.get("show_tool_results", True)
32
+
33
+ def display_tool_call(self, tool_name: str, parameters: Dict[str, Any]):
34
+ """
35
+ Display a tool call in the configured mode.
36
+
37
+ Args:
38
+ tool_name: Name of the tool being called
39
+ parameters: Tool parameters
40
+ """
41
+ if self.display_mode == "natural":
42
+ self._display_natural_tool_call(tool_name, parameters)
43
+ elif self.display_mode == "json":
44
+ self._display_json_tool_call(tool_name, parameters)
45
+ elif self.display_mode == "verbose":
46
+ self._display_natural_tool_call(tool_name, parameters)
47
+ self._display_json_tool_call(tool_name, parameters)
48
+
49
+ def display_tool_result(
50
+ self, tool_name: str, result: Dict[str, Any], success: bool
51
+ ):
52
+ """
53
+ Display a tool execution result.
54
+
55
+ Args:
56
+ tool_name: Name of the tool that was executed
57
+ result: Result dictionary
58
+ success: Whether the execution succeeded
59
+ """
60
+ if not self.show_tool_results:
61
+ return
62
+
63
+ if self.display_mode == "natural":
64
+ self._display_natural_result(tool_name, result, success)
65
+ elif self.display_mode in ("json", "verbose"):
66
+ self._display_json_result(tool_name, result, success)
67
+
68
+ def _display_natural_tool_call(self, tool_name: str, parameters: Dict[str, Any]):
69
+ """Display tool call in natural language."""
70
+ description = self._get_natural_description(tool_name, parameters)
71
+ console.print(f"[cyan]→ {description}[/cyan]")
72
+
73
+ def _display_json_tool_call(self, tool_name: str, parameters: Dict[str, Any]):
74
+ """Display tool call as JSON panel."""
75
+ tool_data = {"tool": tool_name, "parameters": parameters}
76
+ json_str = json.dumps(tool_data, indent=2)
77
+ syntax = Syntax(json_str, "json", theme="monokai", line_numbers=False)
78
+ console.print(Panel(syntax, title="Tool Call", border_style="cyan"))
79
+
80
+ def _display_natural_result(
81
+ self, tool_name: str, result: Dict[str, Any], success: bool
82
+ ):
83
+ """Display result in natural language."""
84
+ if success:
85
+ result_text = result.get("result", "")
86
+
87
+ if len(result_text) > 200:
88
+ preview = result_text[:200] + "..."
89
+ console.print(f"[green]✓ {tool_name} succeeded[/green]")
90
+ console.print(f"[dim]{preview}[/dim]")
91
+ else:
92
+ console.print(f"[green]✓ {tool_name} succeeded[/green]")
93
+ else:
94
+ error = result.get("error", "Unknown error")
95
+ console.print(f"[red]✗ {tool_name} failed: {error}[/red]")
96
+
97
+ def _display_json_result(
98
+ self, tool_name: str, result: Dict[str, Any], success: bool
99
+ ):
100
+ """Display result as JSON panel."""
101
+ json_str = json.dumps(result, indent=2)
102
+ syntax = Syntax(json_str, "json", theme="monokai", line_numbers=False)
103
+ border_style = "green" if success else "red"
104
+ title = f"Result: {tool_name}"
105
+ console.print(Panel(syntax, title=title, border_style=border_style))
106
+
107
+ def _get_natural_description(
108
+ self, tool_name: str, parameters: Dict[str, Any]
109
+ ) -> str:
110
+ """
111
+ Generate natural language description for a tool call.
112
+
113
+ Args:
114
+ tool_name: Name of the tool
115
+ parameters: Tool parameters
116
+
117
+ Returns:
118
+ Human-friendly description
119
+ """
120
+ if tool_name == "read_file":
121
+ file_path = parameters.get("file_path", "unknown")
122
+ return f"Reading {file_path}"
123
+
124
+ elif tool_name == "write_file":
125
+ file_path = parameters.get("file_path", "unknown")
126
+ content_length = len(parameters.get("content", ""))
127
+ return f"Writing {content_length} characters to {file_path}"
128
+
129
+ elif tool_name == "edit_file":
130
+ file_path = parameters.get("file_path", "unknown")
131
+ return f"Editing {file_path}"
132
+
133
+ elif tool_name == "list_files":
134
+ pattern = parameters.get("pattern", "*")
135
+ directory = parameters.get("directory", ".")
136
+ return f"Listing files matching '{pattern}' in {directory}"
137
+
138
+ elif tool_name == "search_files":
139
+ pattern = parameters.get("pattern", "")
140
+ file_pattern = parameters.get("file_pattern", "**/*")
141
+ return f"Searching for '{pattern}' in files matching '{file_pattern}'"
142
+
143
+ elif tool_name == "run_bash":
144
+ command = parameters.get("command", "")
145
+ if len(command) > 60:
146
+ command = command[:60] + "..."
147
+ return f"Running bash command: {command}"
148
+
149
+ elif tool_name == "create_directory":
150
+ path = parameters.get("path", "unknown")
151
+ return f"Creating directory {path}"
152
+
153
+ else:
154
+ return f"Calling {tool_name}"
@@ -0,0 +1,195 @@
1
+ """Execution strategy configuration based on task complexity."""
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Dict, Optional
5
+
6
+ from .classifier import TaskClassification
7
+
8
+
9
+ @dataclass
10
+ class ExecutionConfig:
11
+ """Configuration for task execution."""
12
+
13
+ mode: str # "conversational", "agentic_simple", "agentic_complex"
14
+ max_iterations: int
15
+ use_agent_loop: bool
16
+ use_planning: bool
17
+ model_tier: str # "fast", "balanced", "best"
18
+ temperature: float
19
+ max_tokens: Optional[int]
20
+ hyperparameters: Dict
21
+
22
+
23
+ class ExecutionStrategy:
24
+ """
25
+ Determines execution strategy based on task classification.
26
+
27
+ Optimizes for:
28
+ - Cost (use smaller models for simple tasks)
29
+ - Speed (fewer iterations for simple tasks)
30
+ - Quality (best models for complex tasks)
31
+ """
32
+
33
+ MODEL_TIERS = {
34
+ "openai": {
35
+ "fast": "gpt-3.5-turbo",
36
+ "balanced": "gpt-4",
37
+ "best": "gpt-4",
38
+ },
39
+ "anthropic": {
40
+ "fast": "claude-haiku-4-5-20251001",
41
+ "balanced": "claude-sonnet-4-5-20250929",
42
+ "best": "claude-opus-4-1-20250805",
43
+ },
44
+ "triton": {
45
+ "fast": None,
46
+ "balanced": None,
47
+ "best": None,
48
+ },
49
+ }
50
+
51
+ @staticmethod
52
+ def get_execution_config(
53
+ classification: Optional[TaskClassification],
54
+ provider_name: str,
55
+ default_model: str,
56
+ ) -> ExecutionConfig:
57
+ """
58
+ Get execution configuration based on task classification.
59
+
60
+ Args:
61
+ classification: Task classification result (None for fallback)
62
+ provider_name: Provider name (triton, openai, anthropic)
63
+ default_model: Default model name from config
64
+
65
+ Returns:
66
+ ExecutionConfig with optimized settings
67
+ """
68
+ # Handle None classification (fallback to simple)
69
+ if classification is None:
70
+ return ExecutionStrategy._simple_config(provider_name, default_model)
71
+
72
+ complexity = classification.complexity
73
+
74
+ if complexity == "CONVERSATIONAL":
75
+ return ExecutionStrategy._conversational_config(
76
+ provider_name, default_model
77
+ )
78
+ elif complexity == "SIMPLE":
79
+ return ExecutionStrategy._simple_config(provider_name, default_model)
80
+ elif complexity == "COMPLEX":
81
+ return ExecutionStrategy._complex_config(provider_name, default_model)
82
+ else:
83
+ # Fallback to simple
84
+ return ExecutionStrategy._simple_config(provider_name, default_model)
85
+
86
+ @staticmethod
87
+ def _conversational_config(
88
+ provider_name: str, default_model: str
89
+ ) -> ExecutionConfig:
90
+ """
91
+ Configuration for conversational tasks.
92
+
93
+ - Single-turn response
94
+ - No agent loop
95
+ - Fast/cheap model
96
+ - Higher temperature for creativity
97
+ """
98
+ # Get fast model for provider
99
+ model_tier = ExecutionStrategy.MODEL_TIERS.get(
100
+ provider_name, ExecutionStrategy.MODEL_TIERS["triton"]
101
+ )
102
+ fast_model = model_tier.get("fast") or default_model
103
+
104
+ return ExecutionConfig(
105
+ mode="conversational",
106
+ max_iterations=1,
107
+ use_agent_loop=False,
108
+ use_planning=False,
109
+ model_tier="fast",
110
+ temperature=0.7, # Higher for natural conversation
111
+ max_tokens=1000, # Shorter responses
112
+ hyperparameters={
113
+ "model": fast_model,
114
+ "temperature": 0.7,
115
+ "max_tokens": 1000,
116
+ },
117
+ )
118
+
119
+ @staticmethod
120
+ def _simple_config(provider_name: str, default_model: str) -> ExecutionConfig:
121
+ """
122
+ Configuration for simple tasks.
123
+
124
+ - Agent loop with low iterations
125
+ - No planning phase
126
+ - Balanced model
127
+ - Medium temperature
128
+ """
129
+ model_tier = ExecutionStrategy.MODEL_TIERS.get(
130
+ provider_name, ExecutionStrategy.MODEL_TIERS["triton"]
131
+ )
132
+ balanced_model = model_tier.get("balanced") or default_model
133
+
134
+ return ExecutionConfig(
135
+ mode="agentic_simple",
136
+ max_iterations=5, # Lower for simple tasks
137
+ use_agent_loop=True,
138
+ use_planning=False, # Skip planning for simple tasks
139
+ model_tier="balanced",
140
+ temperature=0.4, # Lower for accuracy
141
+ max_tokens=2000,
142
+ hyperparameters={
143
+ "model": balanced_model,
144
+ "temperature": 0.4,
145
+ "max_tokens": 2000,
146
+ },
147
+ )
148
+
149
+ @staticmethod
150
+ def _complex_config(provider_name: str, default_model: str) -> ExecutionConfig:
151
+ """
152
+ Configuration for complex tasks.
153
+
154
+ - Full agent loop
155
+ - Planning phase available
156
+ - Best model
157
+ - Low temperature for consistency
158
+ """
159
+ model_tier = ExecutionStrategy.MODEL_TIERS.get(
160
+ provider_name, ExecutionStrategy.MODEL_TIERS["triton"]
161
+ )
162
+ best_model = model_tier.get("best") or default_model
163
+
164
+ return ExecutionConfig(
165
+ mode="agentic_complex",
166
+ max_iterations=15, # Full iterations
167
+ use_agent_loop=True,
168
+ use_planning=True, # Enable planning for complex tasks
169
+ model_tier="best",
170
+ temperature=0.3, # Lower for consistency
171
+ max_tokens=4000,
172
+ hyperparameters={
173
+ "model": best_model,
174
+ "temperature": 0.3,
175
+ "max_tokens": 4000,
176
+ },
177
+ )
178
+
179
+ @staticmethod
180
+ def get_model_for_tier(provider_name: str, tier: str, default_model: str) -> str:
181
+ """
182
+ Get model name for a specific tier.
183
+
184
+ Args:
185
+ provider_name: Provider name
186
+ tier: Model tier (fast, balanced, best)
187
+ default_model: Fallback model
188
+
189
+ Returns:
190
+ Model name
191
+ """
192
+ model_tier = ExecutionStrategy.MODEL_TIERS.get(
193
+ provider_name, ExecutionStrategy.MODEL_TIERS["triton"]
194
+ )
195
+ return model_tier.get(tier) or default_model