kubrick-cli 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
kubrick_cli/main.py ADDED
@@ -0,0 +1,806 @@
1
+ #!/usr/bin/env python3
2
+ """Main CLI entry point for Kubrick."""
3
+
4
+ import argparse
5
+ import json
6
+ import re
7
+ from datetime import datetime
8
+
9
+ from rich.console import Console
10
+ from rich.markdown import Markdown
11
+ from rich.panel import Panel
12
+ from rich.prompt import Prompt
13
+ from rich.table import Table
14
+
15
+ from .agent_loop import AgentLoop
16
+ from .classifier import TaskClassifier
17
+ from .config import KubrickConfig
18
+ from .display import DisplayManager
19
+ from .execution_strategy import ExecutionStrategy
20
+ from .planning import PlanningPhase
21
+ from .providers.factory import ProviderFactory
22
+ from .safety import SafetyConfig, SafetyManager
23
+ from .scheduler import ToolScheduler
24
+ from .tools import ToolExecutor, get_tools_prompt
25
+
26
+ console = Console()
27
+
28
+
29
+ class KubrickCLI:
30
+ """Main CLI application."""
31
+
32
+ def __init__(
33
+ self,
34
+ config: KubrickConfig,
35
+ working_dir: str = None,
36
+ conversation_id: str = None,
37
+ provider_override: str = None,
38
+ ):
39
+ """
40
+ Initialize Kubrick CLI.
41
+
42
+ Args:
43
+ config: KubrickConfig instance
44
+ working_dir: Working directory for file operations (overrides config)
45
+ conversation_id: Load existing conversation by ID
46
+ provider_override: Override configured provider (for testing)
47
+ """
48
+ self.config = config
49
+
50
+ if provider_override:
51
+ config.set("provider", provider_override)
52
+
53
+ try:
54
+ self.provider = ProviderFactory.create_provider(config.get_all())
55
+ console.print(
56
+ f"[dim]→ Using {self.provider.provider_name} provider "
57
+ f"with model {self.provider.model_name}[/dim]"
58
+ )
59
+ except ValueError as e:
60
+ console.print(f"[red]Error: {e}[/red]")
61
+ console.print(
62
+ "[yellow]Please run the setup wizard or check your configuration.[/yellow]"
63
+ )
64
+ raise
65
+
66
+ self.client = self.provider
67
+
68
+ self.safety_manager = SafetyManager(SafetyConfig.from_config(config.get_all()))
69
+
70
+ self.tool_executor = ToolExecutor(
71
+ working_dir=working_dir, safety_manager=self.safety_manager
72
+ )
73
+
74
+ self.display_manager = DisplayManager(config.get_all())
75
+
76
+ enable_parallel = config.get("enable_parallel_tools", True)
77
+ max_workers = config.get("max_parallel_workers", 3)
78
+ self.tool_scheduler = ToolScheduler(
79
+ tool_executor=self.tool_executor,
80
+ max_workers=max_workers,
81
+ enable_parallel=enable_parallel,
82
+ )
83
+
84
+ max_iterations = config.get("max_iterations", 15)
85
+ max_tools_per_turn = config.get("max_tools_per_turn", 5)
86
+ timeout_seconds = config.get("total_timeout_seconds", 600)
87
+
88
+ self.agent_loop = AgentLoop(
89
+ llm_client=self.provider,
90
+ tool_executor=self.tool_executor,
91
+ max_iterations=max_iterations,
92
+ max_tools_per_turn=max_tools_per_turn,
93
+ timeout_seconds=timeout_seconds,
94
+ stream_options={},
95
+ display_manager=self.display_manager,
96
+ tool_scheduler=self.tool_scheduler,
97
+ )
98
+
99
+ self.classifier = TaskClassifier(self.provider)
100
+ self.planning_phase = PlanningPhase(
101
+ llm_client=self.provider,
102
+ tool_executor=self.tool_executor,
103
+ agent_loop=self.agent_loop,
104
+ )
105
+
106
+ self.interrupt_count = 0
107
+
108
+ self.last_listed_conversations = []
109
+
110
+ self.conversation_id = conversation_id or datetime.now().strftime(
111
+ "%Y%m%d_%H%M%S"
112
+ )
113
+
114
+ if conversation_id:
115
+ loaded = self._load_conversation(conversation_id)
116
+ if loaded:
117
+ self.messages = loaded
118
+ else:
119
+ console.print(
120
+ f"[yellow]Conversation {conversation_id} not found, "
121
+ "starting new conversation[/yellow]"
122
+ )
123
+ self.messages = self._get_initial_messages()
124
+ else:
125
+ self.messages = self._get_initial_messages()
126
+
127
+ def _get_initial_messages(self) -> list:
128
+ """Get initial system prompt messages."""
129
+ return [
130
+ {
131
+ "role": "system",
132
+ "content": f"""You are Kubrick, an AI coding assistant with agentic \
133
+ capabilities and file system access.
134
+
135
+ Current working directory: {self.tool_executor.working_dir}
136
+
137
+ # Agentic Behavior
138
+
139
+ You can ITERATE through multiple tool calls until a task is complete. \
140
+ You are not limited to a single response.
141
+
142
+ ## Process
143
+
144
+ 1. Call tools to gather information or make changes
145
+ 2. Analyze the results
146
+ 3. Continue calling tools as needed
147
+ 4. Signal completion when done
148
+
149
+ # Completion Signal
150
+
151
+ When you've completed the task, say "TASK_COMPLETE" followed by a summary of what you accomplished.
152
+
153
+ # Tool Call Format (EXACT SYNTAX REQUIRED)
154
+
155
+ ```tool_call
156
+ {{
157
+ "tool": "tool_name",
158
+ "parameters": {{
159
+ "param": "value"
160
+ }}
161
+ }}
162
+ ```
163
+
164
+ # Available Tools
165
+
166
+ {get_tools_prompt()}
167
+
168
+ # How to Explore Directories
169
+
170
+ To explore the full codebase structure:
171
+ - Use `list_files` with pattern `**/*.py` to list all Python files recursively
172
+ - Use `list_files` with pattern `**/*` to see ALL files and directories
173
+ - Use `run_bash` with `find . -type f` to list all files
174
+ - Use `run_bash` with `tree` or `ls -R` to see directory structure
175
+
176
+ Example - list all Python files:
177
+ ```tool_call
178
+ {{
179
+ "tool": "list_files",
180
+ "parameters": {{
181
+ "pattern": "**/*.py"
182
+ }}
183
+ }}
184
+ ```
185
+
186
+ # Important Rules
187
+
188
+ 1. **ITERATE**: Call tools immediately when needed, then analyze results and continue iterating
189
+ 2. **MULTIPLE TOOLS**: You can call multiple tools per response
190
+ 3. **READ BEFORE EDIT**: Always read a file before editing it
191
+ 4. **EXPLORE THOROUGHLY**: Use `**/*` patterns to see all files in subdirectories
192
+ 5. **SIGNAL COMPLETION**: Say "TASK_COMPLETE" when the task is done
193
+ 6. **USE TOOLS IMMEDIATELY**: Don't ask permission - just call the tool
194
+
195
+ # Examples
196
+
197
+ User: "Read the config.pbtxt file"
198
+ Assistant: I'll read that file for you.
199
+
200
+ ```tool_call
201
+ {{
202
+ "tool": "read_file",
203
+ "parameters": {{
204
+ "file_path": "config.pbtxt"
205
+ }}
206
+ }}
207
+ ```
208
+
209
+ User: "Create a new Python script called test.py with a hello world function"
210
+ Assistant: I'll create that file for you.
211
+
212
+ ```tool_call
213
+ {{
214
+ "tool": "write_file",
215
+ "parameters": {{
216
+ "file_path": "test.py",
217
+ "content": "def hello_world():\\n \
218
+ print('Hello, World!')\\n\\nif __name__ == '__main__':\\n hello_world()\\n"
219
+ }}
220
+ }}
221
+ ```
222
+
223
+ User: "Read main.py and add logging to all functions"
224
+ Assistant: I'll first read the file to understand its structure.
225
+
226
+ ```tool_call
227
+ {{
228
+ "tool": "read_file",
229
+ "parameters": {{
230
+ "file_path": "main.py"
231
+ }}
232
+ }}
233
+ ```
234
+
235
+ [After seeing the results, I'll add logging and continue iterating until done, \
236
+ then say TASK_COMPLETE]
237
+ """,
238
+ }
239
+ ]
240
+
241
+ def _load_conversation(self, conversation_id: str) -> list:
242
+ """Load a conversation by ID."""
243
+ data = self.config.load_conversation(conversation_id)
244
+ if data:
245
+ return data.get("messages", [])
246
+ return None
247
+
248
+ def _save_conversation(self):
249
+ """Save current conversation to disk."""
250
+ if self.config.get("auto_save_conversations", True):
251
+ metadata = {
252
+ "working_dir": str(self.tool_executor.working_dir),
253
+ "provider": self.provider.provider_name,
254
+ "model_name": self.provider.model_name,
255
+ "saved_at": datetime.now().isoformat(),
256
+ }
257
+ self.config.save_conversation(self.conversation_id, self.messages, metadata)
258
+
259
+ def parse_tool_calls(self, text: str) -> list:
260
+ """
261
+ Parse tool calls from LLM response.
262
+
263
+ Args:
264
+ text: Response text from LLM
265
+
266
+ Returns:
267
+ List of (tool_name, parameters) tuples
268
+ """
269
+ tool_calls = []
270
+
271
+ pattern = r"```tool_call\s*\n(.*?)\n```"
272
+ matches = re.findall(pattern, text, re.DOTALL)
273
+
274
+ for match in matches:
275
+ try:
276
+ tool_data = json.loads(match.strip())
277
+ tool_name = tool_data.get("tool")
278
+ parameters = tool_data.get("parameters", {})
279
+ if tool_name:
280
+ tool_calls.append((tool_name, parameters))
281
+ except json.JSONDecodeError as e:
282
+ console.print(f"[red]Failed to parse tool call: {e}[/red]")
283
+ continue
284
+
285
+ if not tool_calls:
286
+ json_pattern = (
287
+ r'\{\s*"tool"\s*:\s*"([^"]+)"\s*,\s*"parameters"\s*:\s*\{[^}]*\}\s*\}'
288
+ )
289
+ json_matches = re.findall(json_pattern, text, re.DOTALL)
290
+
291
+ if json_matches:
292
+ console.print(
293
+ "[yellow]⚠ Warning: Detected tool call without proper markdown fence. "
294
+ "Parsing anyway, but please use ```tool_call format.[/yellow]"
295
+ )
296
+
297
+ full_json_pattern = (
298
+ r'(\{\s*"tool"\s*:\s*"[^"]+"\s*,\s*"parameters"\s*:\s*\{.*?\}\s*\})'
299
+ )
300
+ for match in re.finditer(full_json_pattern, text, re.DOTALL):
301
+ try:
302
+ tool_data = json.loads(match.group(1))
303
+ tool_name = tool_data.get("tool")
304
+ parameters = tool_data.get("parameters", {})
305
+ if tool_name:
306
+ tool_calls.append((tool_name, parameters))
307
+ except json.JSONDecodeError:
308
+ continue
309
+
310
+ return tool_calls
311
+
312
+ def display_streaming_response(self, chunks: list) -> str:
313
+ """
314
+ Display streaming response and return full text.
315
+
316
+ Args:
317
+ chunks: List of text chunks
318
+
319
+ Returns:
320
+ Full response text
321
+ """
322
+ full_text = "".join(chunks)
323
+
324
+ parts = re.split(r"(```tool_call.*?```)", full_text, flags=re.DOTALL)
325
+
326
+ for part in parts:
327
+ if part.startswith("```tool_call"):
328
+ console.print(Panel(part, title="Tool Call", border_style="cyan"))
329
+ elif part.strip():
330
+ console.print(Markdown(part))
331
+
332
+ return full_text
333
+
334
+ def run_conversation_turn(self, user_message: str) -> None:
335
+ """
336
+ Run one turn of the conversation with optimized execution strategy.
337
+
338
+ Args:
339
+ user_message: User's message
340
+ """
341
+ self.messages.append({"role": "user", "content": user_message})
342
+
343
+ try:
344
+ classification = None
345
+ if self.config.get("enable_task_classification", True):
346
+ classification = self.classifier.classify(user_message, self.messages)
347
+
348
+ exec_config = ExecutionStrategy.get_execution_config(
349
+ classification,
350
+ self.provider.provider_name,
351
+ self.provider.model_name,
352
+ )
353
+
354
+ console.print(
355
+ f"[dim]→ Execution mode: {exec_config.mode} "
356
+ f"(model: {exec_config.model_tier}, "
357
+ f"max_iter: {exec_config.max_iterations})[/dim]"
358
+ )
359
+
360
+ current_model = self.provider.model_name
361
+ target_model = exec_config.hyperparameters.get("model")
362
+
363
+ if target_model and target_model != current_model:
364
+ try:
365
+ self.provider.set_model(target_model)
366
+ console.print(
367
+ f"[dim]→ Switched model: {current_model} → {target_model}[/dim]"
368
+ )
369
+ except Exception:
370
+ pass
371
+
372
+ else:
373
+ exec_config = ExecutionStrategy.get_execution_config(
374
+ None,
375
+ self.provider.provider_name,
376
+ self.provider.model_name,
377
+ )
378
+
379
+ if exec_config.mode == "conversational":
380
+ self._run_conversational_turn(exec_config)
381
+
382
+ else:
383
+ self._run_agentic_turn(classification, exec_config, user_message)
384
+
385
+ except KeyboardInterrupt:
386
+ console.print("\n[yellow]Interrupted[/yellow]")
387
+ except Exception as e:
388
+ import traceback
389
+
390
+ console.print(f"\n[red]Error: {e}[/red]")
391
+ console.print(f"[dim]{traceback.format_exc()}[/dim]")
392
+
393
+ def _run_conversational_turn(self, exec_config):
394
+ """
395
+ Handle conversational turn (no tools, single response).
396
+
397
+ Args:
398
+ exec_config: Execution configuration
399
+ """
400
+ console.print("[bold cyan]Assistant:[/bold cyan]")
401
+ chunks = []
402
+
403
+ stream_options = exec_config.hyperparameters.copy()
404
+
405
+ for chunk in self.provider.generate_streaming(
406
+ self.messages, stream_options=stream_options
407
+ ):
408
+ console.print(chunk, end="")
409
+ chunks.append(chunk)
410
+
411
+ console.print("\n")
412
+
413
+ response_text = "".join(chunks)
414
+ self.messages.append({"role": "assistant", "content": response_text})
415
+
416
+ def _run_agentic_turn(self, classification, exec_config, user_message):
417
+ """
418
+ Handle agentic turn (with tools and iterations).
419
+
420
+ Args:
421
+ classification: Task classification
422
+ exec_config: Execution configuration
423
+ user_message: Original user message
424
+ """
425
+ if (
426
+ exec_config.use_planning
427
+ and classification
428
+ and classification.complexity == "COMPLEX"
429
+ and self.config.get("enable_planning_phase", True)
430
+ ):
431
+ response = Prompt.ask(
432
+ "[bold yellow]This looks complex. Create a plan first?[/bold yellow]",
433
+ choices=["yes", "no"],
434
+ default="yes",
435
+ )
436
+
437
+ if response == "yes":
438
+ plan = self.planning_phase.execute_planning(user_message, self.messages)
439
+
440
+ approval = self.planning_phase.get_user_approval(plan)
441
+
442
+ if not approval.get("approved", False):
443
+ console.print("[yellow]Task cancelled by user[/yellow]")
444
+ return
445
+
446
+ if "modifications" in approval:
447
+ self.messages.append(
448
+ {
449
+ "role": "user",
450
+ "content": f"Plan modifications: {approval['modifications']}",
451
+ }
452
+ )
453
+
454
+ original_max_iterations = self.agent_loop.max_iterations
455
+ self.agent_loop.max_iterations = exec_config.max_iterations
456
+
457
+ self.agent_loop.stream_options = exec_config.hyperparameters
458
+
459
+ result = self.agent_loop.run(
460
+ messages=self.messages,
461
+ tool_parser=self.parse_tool_calls,
462
+ display_callback=None,
463
+ )
464
+
465
+ self.agent_loop.max_iterations = original_max_iterations
466
+
467
+ if result["success"]:
468
+ console.print(
469
+ f"\n[dim]Completed in {result['iterations']} iteration(s) "
470
+ f"with {result['tool_calls']} tool call(s)[/dim]"
471
+ )
472
+ else:
473
+ console.print(f"\n[red]Error: {result.get('error')}[/red]")
474
+
475
+ def run(self):
476
+ """Run the interactive CLI."""
477
+ console.print(
478
+ Panel.fit(
479
+ "[bold cyan]Kubrick CLI[/bold cyan]\n"
480
+ f"Working directory: {self.tool_executor.working_dir}\n"
481
+ f"Conversation ID: {self.conversation_id}\n"
482
+ "Type your questions or commands. Type 'exit' or 'quit' to exit.\n"
483
+ "Type '/help' to see all available in-session commands.",
484
+ border_style="cyan",
485
+ )
486
+ )
487
+
488
+ if not self.provider.is_healthy():
489
+ console.print(
490
+ f"[red]Warning: Cannot connect to {self.provider.provider_name} provider[/red]"
491
+ )
492
+ console.print(
493
+ "[yellow]Please check your provider configuration and connectivity.[/yellow]"
494
+ )
495
+
496
+ while True:
497
+ try:
498
+ user_input = Prompt.ask("\n[bold green]You[/bold green]")
499
+
500
+ self.interrupt_count = 0
501
+
502
+ if not user_input.strip():
503
+ continue
504
+
505
+ if user_input.lower() in ["exit", "quit", "q"]:
506
+ self._save_conversation()
507
+ console.print(
508
+ f"[cyan]Conversation saved as {self.conversation_id}[/cyan]"
509
+ )
510
+ console.print("[cyan]Goodbye![/cyan]")
511
+ break
512
+
513
+ if user_input.startswith("/"):
514
+ self._handle_command(user_input)
515
+ continue
516
+
517
+ self.run_conversation_turn(user_input)
518
+
519
+ self._save_conversation()
520
+
521
+ except KeyboardInterrupt:
522
+ self.interrupt_count += 1
523
+
524
+ if self.interrupt_count == 1:
525
+ console.print(
526
+ "\n[yellow]^C (Press again to start new conversation, "
527
+ "once more to exit)[/yellow]"
528
+ )
529
+ continue
530
+
531
+ elif self.interrupt_count == 2:
532
+ console.print("\n[yellow]Starting new conversation...[/yellow]")
533
+ self._save_conversation()
534
+
535
+ self.conversation_id = datetime.now().strftime("%Y%m%d_%H%M%S")
536
+ self.messages = self._get_initial_messages()
537
+
538
+ console.print(
539
+ f"[cyan]New conversation ID: {self.conversation_id}[/cyan]"
540
+ )
541
+ console.print("[dim]Press Ctrl+C once more to exit[/dim]")
542
+ continue
543
+
544
+ else:
545
+ self._save_conversation()
546
+ console.print(
547
+ f"\n[cyan]Conversation saved as {self.conversation_id}[/cyan]"
548
+ )
549
+ console.print("[cyan]Goodbye![/cyan]")
550
+ break
551
+
552
+ except Exception as e:
553
+ console.print(f"[red]Error: {e}[/red]")
554
+
555
+ def _handle_command(self, command: str):
556
+ """Handle special CLI commands."""
557
+ parts = command.strip().split()
558
+ cmd = parts[0].lower()
559
+
560
+ if cmd == "/save":
561
+ self._save_conversation()
562
+ console.print(
563
+ f"[green]Conversation saved as {self.conversation_id}[/green]"
564
+ )
565
+
566
+ elif cmd == "/list":
567
+ limit = int(parts[1]) if len(parts) > 1 else 20
568
+ conversations = self.config.list_conversations(limit=limit)
569
+
570
+ if not conversations:
571
+ console.print("[yellow]No saved conversations found[/yellow]")
572
+ return
573
+
574
+ self.last_listed_conversations = conversations
575
+
576
+ table = Table(title="Saved Conversations")
577
+ table.add_column("#", style="magenta", justify="right")
578
+ table.add_column("ID", style="cyan")
579
+ table.add_column("Messages", justify="right", style="green")
580
+ table.add_column("Working Dir", style="dim")
581
+ table.add_column("Modified", style="yellow")
582
+
583
+ for idx, conv in enumerate(conversations, start=1):
584
+ conv_id = conv["id"]
585
+ msg_count = str(conv["message_count"])
586
+ working_dir = conv["metadata"].get("working_dir", "N/A")
587
+ modified = datetime.fromtimestamp(conv["modified"]).strftime(
588
+ "%Y-%m-%d %H:%M"
589
+ )
590
+
591
+ table.add_row(str(idx), conv_id, msg_count, working_dir, modified)
592
+
593
+ console.print(table)
594
+ console.print(
595
+ "[dim]Use '/load <#>' to load a conversation by number (e.g., /load 1)[/dim]"
596
+ )
597
+
598
+ elif cmd == "/config":
599
+ if len(parts) == 1:
600
+ config_data = self.config.get_all()
601
+ table = Table(title="Current Configuration")
602
+ table.add_column("Setting", style="cyan")
603
+ table.add_column("Value", style="green")
604
+
605
+ for key, value in config_data.items():
606
+ table.add_row(key, str(value))
607
+
608
+ console.print(table)
609
+
610
+ elif len(parts) == 3:
611
+ key = parts[1]
612
+ value = parts[2]
613
+
614
+ try:
615
+ value = json.loads(value)
616
+ except json.JSONDecodeError:
617
+ pass
618
+
619
+ self.config.set(key, value)
620
+ console.print(f"[green]Set {key} = {value}[/green]")
621
+
622
+ else:
623
+ console.print("[yellow]Usage: /config [key value][/yellow]")
624
+
625
+ elif cmd == "/delete":
626
+ if len(parts) < 2:
627
+ console.print("[yellow]Usage: /delete <conversation_id>[/yellow]")
628
+ return
629
+
630
+ conv_id = parts[1]
631
+ if self.config.delete_conversation(conv_id):
632
+ console.print(f"[green]Deleted conversation {conv_id}[/green]")
633
+ else:
634
+ console.print(f"[yellow]Conversation {conv_id} not found[/yellow]")
635
+
636
+ elif cmd == "/load":
637
+ if len(parts) < 2:
638
+ console.print("[yellow]Usage: /load <#|conversation_id|path>[/yellow]")
639
+ console.print(
640
+ "[dim]Tip: Use '/list' to see available conversations[/dim]"
641
+ )
642
+ return
643
+
644
+ identifier = parts[1]
645
+
646
+ if identifier.isdigit():
647
+ idx = int(identifier) - 1
648
+ if not self.last_listed_conversations:
649
+ console.print(
650
+ "[yellow]No conversations listed. Use '/list' first.[/yellow]"
651
+ )
652
+ return
653
+ if idx < 0 or idx >= len(self.last_listed_conversations):
654
+ console.print(
655
+ "[yellow]Invalid number. "
656
+ f"Choose 1-{len(self.last_listed_conversations)}[/yellow]"
657
+ )
658
+ return
659
+
660
+ conversation_id = self.last_listed_conversations[idx]["id"]
661
+ else:
662
+ conversation_id = identifier
663
+
664
+ conversation = self.config.load_conversation(conversation_id)
665
+ if not conversation:
666
+ console.print(
667
+ f"[yellow]Failed to load conversation: {conversation_id}[/yellow]"
668
+ )
669
+ return
670
+
671
+ self.messages = conversation["messages"]
672
+ self.conversation_id = conversation["id"]
673
+
674
+ msg_count = len(self.messages)
675
+ console.print(
676
+ f"[green]Loaded conversation {conversation['id']} ({msg_count} messages)[/green]"
677
+ )
678
+
679
+ elif cmd == "/debug":
680
+ table = Table(title="Debug Information")
681
+ table.add_column("Item", style="cyan")
682
+ table.add_column("Value", style="green")
683
+
684
+ table.add_row("Conversation ID", self.conversation_id)
685
+ table.add_row("Messages Count", str(len(self.messages)))
686
+ table.add_row("Working Dir", str(self.tool_executor.working_dir))
687
+ table.add_row("Provider", self.provider.provider_name)
688
+ table.add_row("Model Name", self.provider.model_name)
689
+
690
+ console.print(table)
691
+
692
+ if len(parts) > 1 and parts[1] == "prompt":
693
+ console.print("\n[bold cyan]System Prompt:[/bold cyan]")
694
+ system_msg = next(
695
+ (m for m in self.messages if m["role"] == "system"), None
696
+ )
697
+ if system_msg:
698
+ console.print(Panel(system_msg["content"], border_style="dim"))
699
+ else:
700
+ console.print("[yellow]No system message found[/yellow]")
701
+
702
+ elif cmd == "/help":
703
+ console.print(
704
+ """
705
+ [bold cyan]In-Session Commands:[/bold cyan]
706
+
707
+ [green]/save[/green] - Save the current conversation
708
+ [green]/list [N][/green] - List saved conversations (default: 20)
709
+ [green]/load <#|ID>[/green] - Load a conversation by number or ID
710
+ [green]/config[/green] - Show current configuration
711
+ [green]/config KEY VALUE[/green] - Set a configuration value
712
+ [green]/delete ID[/green] - Delete a conversation
713
+ [green]/debug[/green] - Show debug information
714
+ [green]/debug prompt[/green] - Show the system prompt
715
+ [green]/help[/green] - Show this help message
716
+ [green]exit[/green] or [green]quit[/green] - Save conversation and exit
717
+
718
+ [bold cyan]Tips:[/bold cyan]
719
+ • Use [cyan]/list[/cyan] to see numbered conversations, \
720
+ then [cyan]/load 1[/cyan] to load by number
721
+ • You can also use [cyan]--load[/cyan] when starting: \
722
+ [dim]kubrick --load 20240118_143022[/dim]
723
+ """
724
+ )
725
+
726
+ else:
727
+ console.print(f"[yellow]Unknown command: {cmd}[/yellow]")
728
+ console.print(
729
+ "[dim]Available commands: "
730
+ "/save, /list, /load, /config, /delete, /debug, /help, exit, quit[/dim]"
731
+ )
732
+
733
+
734
+ def main():
735
+ """Main entry point."""
736
+ config = KubrickConfig()
737
+
738
+ parser = argparse.ArgumentParser(
739
+ description="Kubrick - AI-assisted coding CLI with agentic capabilities",
740
+ formatter_class=argparse.RawDescriptionHelpFormatter,
741
+ epilog="""
742
+ In-Session Commands:
743
+ /save Save the current conversation
744
+ /list [N] List saved conversations (default: 20)
745
+ /load <#|ID> Load a conversation by number or ID
746
+ /config Show current configuration
747
+ /config KEY VALUE Set a configuration value
748
+ /delete ID Delete a conversation
749
+ /debug Show debug information
750
+ /debug prompt Show the system prompt
751
+ /help Show all available commands
752
+ exit or quit Save conversation and exit
753
+
754
+ Examples:
755
+ kubrick # Start new conversation
756
+ kubrick --load 20240118_143022 # Load by conversation ID
757
+ kubrick --load /path/to/conversation.json # Load from file path
758
+ kubrick --working-dir /path/to/project # Set working directory
759
+ kubrick --provider openai # Override provider for this session
760
+ """,
761
+ )
762
+
763
+ parser.add_argument(
764
+ "--working-dir",
765
+ default=None,
766
+ help="Working directory for file operations (default: current directory)",
767
+ )
768
+
769
+ parser.add_argument(
770
+ "--load",
771
+ "--load-conversation",
772
+ dest="conversation_id",
773
+ default=None,
774
+ help=(
775
+ "Load a previous conversation by ID or file path "
776
+ "(e.g., 20240118_143022 or /path/to/conversation.json)"
777
+ ),
778
+ )
779
+
780
+ parser.add_argument(
781
+ "--provider",
782
+ default=None,
783
+ choices=["triton", "openai", "anthropic"],
784
+ help="Override configured provider for this session",
785
+ )
786
+
787
+ args = parser.parse_args()
788
+
789
+ try:
790
+ cli = KubrickCLI(
791
+ config=config,
792
+ working_dir=args.working_dir,
793
+ conversation_id=args.conversation_id,
794
+ provider_override=args.provider,
795
+ )
796
+
797
+ cli.run()
798
+ except Exception as e:
799
+ console.print(f"[red]Failed to start Kubrick: {e}[/red]")
800
+ import sys
801
+
802
+ sys.exit(1)
803
+
804
+
805
+ if __name__ == "__main__":
806
+ main()