hanzo 0.3.25__tar.gz → 0.3.26__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {hanzo-0.3.25 → hanzo-0.3.26}/PKG-INFO +1 -1
- {hanzo-0.3.25 → hanzo-0.3.26}/pyproject.toml +1 -1
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/__init__.py +1 -1
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/interactive/enhanced_repl.py +208 -33
- hanzo-0.3.26/src/hanzo/interactive/model_selector.py +166 -0
- hanzo-0.3.26/src/hanzo/tools/__init__.py +5 -0
- hanzo-0.3.26/src/hanzo/tools/detector.py +291 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/.gitignore +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/README.md +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/__main__.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/base_agent.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/batch_orchestrator.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/cli.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/commands/__init__.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/commands/agent.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/commands/auth.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/commands/auth_broken.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/commands/chat.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/commands/config.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/commands/mcp.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/commands/miner.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/commands/network.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/commands/node.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/commands/repl.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/commands/router.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/commands/tools.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/dev.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/fallback_handler.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/interactive/__init__.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/interactive/dashboard.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/interactive/repl.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/mcp_server.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/memory_manager.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/model_registry.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/orchestrator_config.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/rate_limiter.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/repl.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/router/__init__.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/streaming.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/ui/__init__.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/ui/inline_startup.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/ui/startup.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/utils/__init__.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/utils/config.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/utils/net_check.py +0 -0
- {hanzo-0.3.25 → hanzo-0.3.26}/src/hanzo/utils/output.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: hanzo
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.26
|
|
4
4
|
Summary: Hanzo AI - Complete AI Infrastructure Platform with CLI, Router, MCP, and Agent Runtime
|
|
5
5
|
Project-URL: Homepage, https://hanzo.ai
|
|
6
6
|
Project-URL: Repository, https://github.com/hanzoai/python-sdk
|
|
@@ -20,6 +20,18 @@ from prompt_toolkit.completion import WordCompleter
|
|
|
20
20
|
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
|
|
21
21
|
from prompt_toolkit.formatted_text import HTML
|
|
22
22
|
|
|
23
|
+
try:
|
|
24
|
+
from ..tools.detector import ToolDetector, AITool
|
|
25
|
+
except ImportError:
|
|
26
|
+
ToolDetector = None
|
|
27
|
+
AITool = None
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
from .model_selector import QuickModelSelector, BackgroundTaskManager
|
|
31
|
+
except ImportError:
|
|
32
|
+
QuickModelSelector = None
|
|
33
|
+
BackgroundTaskManager = None
|
|
34
|
+
|
|
23
35
|
|
|
24
36
|
class EnhancedHanzoREPL:
|
|
25
37
|
"""Enhanced REPL with model selection and authentication."""
|
|
@@ -57,6 +69,17 @@ class EnhancedHanzoREPL:
|
|
|
57
69
|
"local:mistral": "Local Mistral",
|
|
58
70
|
"local:phi-2": "Local Phi-2",
|
|
59
71
|
}
|
|
72
|
+
|
|
73
|
+
def get_all_models(self):
|
|
74
|
+
"""Get all available models including detected tools."""
|
|
75
|
+
models = dict(self.MODELS)
|
|
76
|
+
|
|
77
|
+
# Add detected tools as models
|
|
78
|
+
if self.detected_tools:
|
|
79
|
+
for tool in self.detected_tools:
|
|
80
|
+
models[f"tool:{tool.name}"] = f"{tool.display_name} (Tool)"
|
|
81
|
+
|
|
82
|
+
return models
|
|
60
83
|
|
|
61
84
|
def __init__(self, console: Optional[Console] = None):
|
|
62
85
|
self.console = console or Console()
|
|
@@ -68,8 +91,30 @@ class EnhancedHanzoREPL:
|
|
|
68
91
|
self.config = self.load_config()
|
|
69
92
|
self.auth = self.load_auth()
|
|
70
93
|
|
|
71
|
-
#
|
|
72
|
-
self.
|
|
94
|
+
# Initialize tool detector
|
|
95
|
+
self.tool_detector = ToolDetector(console) if ToolDetector else None
|
|
96
|
+
self.detected_tools = []
|
|
97
|
+
self.current_tool = None
|
|
98
|
+
|
|
99
|
+
# Initialize background task manager
|
|
100
|
+
self.task_manager = BackgroundTaskManager(console) if BackgroundTaskManager else None
|
|
101
|
+
|
|
102
|
+
# Detect available tools and set default
|
|
103
|
+
if self.tool_detector:
|
|
104
|
+
self.detected_tools = self.tool_detector.detect_all()
|
|
105
|
+
default_tool = self.tool_detector.get_default_tool()
|
|
106
|
+
|
|
107
|
+
# If Claude Code is available, use it as default
|
|
108
|
+
if default_tool:
|
|
109
|
+
self.current_model = f"tool:{default_tool.name}"
|
|
110
|
+
self.current_tool = default_tool
|
|
111
|
+
self.console.print(f"[green]✓ Detected {default_tool.display_name} as default AI assistant[/green]")
|
|
112
|
+
else:
|
|
113
|
+
# Fallback to regular models
|
|
114
|
+
self.current_model = self.config.get("default_model", "gpt-3.5-turbo")
|
|
115
|
+
else:
|
|
116
|
+
# No tool detector, use regular models
|
|
117
|
+
self.current_model = self.config.get("default_model", "gpt-3.5-turbo")
|
|
73
118
|
|
|
74
119
|
# Setup session
|
|
75
120
|
self.session = PromptSession(
|
|
@@ -86,9 +131,14 @@ class EnhancedHanzoREPL:
|
|
|
86
131
|
"status": self.show_status,
|
|
87
132
|
"model": self.change_model,
|
|
88
133
|
"models": self.list_models,
|
|
134
|
+
"tools": self.list_tools,
|
|
135
|
+
"agents": self.list_tools, # Alias for tools
|
|
89
136
|
"login": self.login,
|
|
90
137
|
"logout": self.logout,
|
|
91
138
|
"config": self.show_config,
|
|
139
|
+
"tasks": self.show_tasks,
|
|
140
|
+
"kill": self.kill_task,
|
|
141
|
+
"quick": self.quick_model_select,
|
|
92
142
|
}
|
|
93
143
|
|
|
94
144
|
self.running = False
|
|
@@ -144,8 +194,17 @@ class EnhancedHanzoREPL:
|
|
|
144
194
|
|
|
145
195
|
def get_model_info(self):
|
|
146
196
|
"""Get current model info string."""
|
|
147
|
-
# Determine provider from model name
|
|
148
197
|
model = self.current_model
|
|
198
|
+
|
|
199
|
+
# Check if using a tool
|
|
200
|
+
if model.startswith("tool:"):
|
|
201
|
+
if self.current_tool:
|
|
202
|
+
return f"[dim cyan]agent: {self.current_tool.display_name}[/dim cyan]"
|
|
203
|
+
else:
|
|
204
|
+
tool_name = model.replace("tool:", "")
|
|
205
|
+
return f"[dim cyan]agent: {tool_name}[/dim cyan]"
|
|
206
|
+
|
|
207
|
+
# Determine provider from model name
|
|
149
208
|
if model.startswith("gpt"):
|
|
150
209
|
provider = "openai"
|
|
151
210
|
elif model.startswith("claude"):
|
|
@@ -161,10 +220,7 @@ class EnhancedHanzoREPL:
|
|
|
161
220
|
else:
|
|
162
221
|
provider = "unknown"
|
|
163
222
|
|
|
164
|
-
|
|
165
|
-
auth_status = "🔓" if self.is_authenticated() else "🔒"
|
|
166
|
-
|
|
167
|
-
return f"[dim]model: {provider}/{model} {auth_status}[/dim]"
|
|
223
|
+
return f"[dim]model: {provider}/{model}[/dim]"
|
|
168
224
|
|
|
169
225
|
async def run(self):
|
|
170
226
|
"""Run the enhanced REPL."""
|
|
@@ -190,7 +246,8 @@ class EnhancedHanzoREPL:
|
|
|
190
246
|
# Get input with simple prompt
|
|
191
247
|
command = await self.session.prompt_async(
|
|
192
248
|
self.get_prompt(),
|
|
193
|
-
completer=completer
|
|
249
|
+
completer=completer,
|
|
250
|
+
vi_mode=True # Enable vi mode for better navigation
|
|
194
251
|
)
|
|
195
252
|
|
|
196
253
|
if not command.strip():
|
|
@@ -305,11 +362,11 @@ class EnhancedHanzoREPL:
|
|
|
305
362
|
self.console.print(f"\n[dim]Last login: {self.auth['last_login']}[/dim]")
|
|
306
363
|
|
|
307
364
|
async def change_model(self, args: str = ""):
|
|
308
|
-
"""Change the current model."""
|
|
365
|
+
"""Change the current model or tool."""
|
|
309
366
|
if not args:
|
|
310
367
|
# Show model selection menu
|
|
311
368
|
await self.list_models("")
|
|
312
|
-
self.console.print("\n[cyan]Enter model name or number:[/cyan]")
|
|
369
|
+
self.console.print("\n[cyan]Enter model/tool name or number:[/cyan]")
|
|
313
370
|
|
|
314
371
|
# Get selection
|
|
315
372
|
try:
|
|
@@ -317,41 +374,87 @@ class EnhancedHanzoREPL:
|
|
|
317
374
|
|
|
318
375
|
# Handle numeric selection
|
|
319
376
|
if selection.isdigit():
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
if
|
|
323
|
-
|
|
377
|
+
num = int(selection)
|
|
378
|
+
|
|
379
|
+
# Check if it's a tool selection
|
|
380
|
+
if self.detected_tools and num <= len(self.detected_tools):
|
|
381
|
+
tool = self.detected_tools[num - 1]
|
|
382
|
+
args = f"tool:{tool.name}"
|
|
324
383
|
else:
|
|
325
|
-
|
|
326
|
-
|
|
384
|
+
# It's a model selection
|
|
385
|
+
model_idx = num - len(self.detected_tools) - 1 if self.detected_tools else num - 1
|
|
386
|
+
models_list = list(self.MODELS.keys())
|
|
387
|
+
if 0 <= model_idx < len(models_list):
|
|
388
|
+
args = models_list[model_idx]
|
|
389
|
+
else:
|
|
390
|
+
self.console.print("[red]Invalid selection[/red]")
|
|
391
|
+
return
|
|
327
392
|
else:
|
|
328
393
|
args = selection
|
|
329
394
|
except (KeyboardInterrupt, EOFError):
|
|
330
395
|
return
|
|
331
396
|
|
|
332
|
-
#
|
|
333
|
-
if args
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
397
|
+
# Check if it's a tool
|
|
398
|
+
if args.startswith("tool:") or args in [t.name for t in self.detected_tools] if self.detected_tools else False:
|
|
399
|
+
# Handle tool selection
|
|
400
|
+
tool_name = args.replace("tool:", "") if args.startswith("tool:") else args
|
|
401
|
+
|
|
402
|
+
# Find the tool
|
|
403
|
+
tool = None
|
|
404
|
+
for t in self.detected_tools:
|
|
405
|
+
if t.name == tool_name or t.display_name.lower() == tool_name.lower():
|
|
406
|
+
tool = t
|
|
407
|
+
break
|
|
408
|
+
|
|
409
|
+
if tool:
|
|
410
|
+
self.current_model = f"tool:{tool.name}"
|
|
411
|
+
self.current_tool = tool
|
|
412
|
+
self.config["default_model"] = self.current_model
|
|
413
|
+
self.save_config()
|
|
414
|
+
self.console.print(f"[green]✅ Switched to {tool.display_name}[/green]")
|
|
415
|
+
else:
|
|
416
|
+
self.console.print(f"[red]Tool not found: {tool_name}[/red]")
|
|
417
|
+
self.console.print("[dim]Use /tools to see available tools[/dim]")
|
|
418
|
+
|
|
419
|
+
# Regular model
|
|
420
|
+
elif args in self.MODELS or args.startswith("local:"):
|
|
421
|
+
self.current_model = args
|
|
422
|
+
self.current_tool = None
|
|
423
|
+
self.config["default_model"] = args
|
|
424
|
+
self.save_config()
|
|
425
|
+
|
|
426
|
+
model_name = self.MODELS.get(args, args)
|
|
427
|
+
self.console.print(f"[green]✅ Switched to {model_name}[/green]")
|
|
342
428
|
|
|
343
|
-
|
|
344
|
-
|
|
429
|
+
else:
|
|
430
|
+
self.console.print(f"[red]Unknown model or tool: {args}[/red]")
|
|
431
|
+
self.console.print("[dim]Use /models or /tools to see available options[/dim]")
|
|
345
432
|
|
|
433
|
+
async def list_tools(self, args: str = ""):
|
|
434
|
+
"""List available AI tools."""
|
|
435
|
+
if self.tool_detector:
|
|
436
|
+
self.tool_detector.show_available_tools()
|
|
437
|
+
else:
|
|
438
|
+
self.console.print("[yellow]Tool detection not available[/yellow]")
|
|
439
|
+
|
|
346
440
|
async def list_models(self, args: str = ""):
|
|
347
441
|
"""List available models."""
|
|
348
|
-
|
|
442
|
+
# Show tools first if available
|
|
443
|
+
if self.detected_tools:
|
|
444
|
+
self.console.print("[bold cyan]AI Coding Assistants (Detected):[/bold cyan]")
|
|
445
|
+
for i, tool in enumerate(self.detected_tools, 1):
|
|
446
|
+
marker = "→" if self.current_model == f"tool:{tool.name}" else " "
|
|
447
|
+
self.console.print(f" {marker} {i}. {tool.display_name} ({tool.provider})")
|
|
448
|
+
self.console.print()
|
|
449
|
+
|
|
450
|
+
table = Table(title="Language Models", box=box.ROUNDED)
|
|
349
451
|
table.add_column("#", style="dim")
|
|
350
452
|
table.add_column("Model ID", style="cyan")
|
|
351
453
|
table.add_column("Name", style="white")
|
|
352
454
|
table.add_column("Provider", style="yellow")
|
|
353
455
|
|
|
354
|
-
|
|
456
|
+
start_idx = len(self.detected_tools) + 1 if self.detected_tools else 1
|
|
457
|
+
for i, (model_id, model_name) in enumerate(self.MODELS.items(), start_idx):
|
|
355
458
|
# Extract provider
|
|
356
459
|
if model_id.startswith("gpt"):
|
|
357
460
|
provider = "OpenAI"
|
|
@@ -449,6 +552,10 @@ class EnhancedHanzoREPL:
|
|
|
449
552
|
## Slash Commands:
|
|
450
553
|
- `/model [name]` - Change AI model (or `/m`)
|
|
451
554
|
- `/models` - List available models
|
|
555
|
+
- `/tools` - List available AI tools
|
|
556
|
+
- `/quick` - Quick model selector (arrow keys)
|
|
557
|
+
- `/tasks` - Show background tasks
|
|
558
|
+
- `/kill [id]` - Kill background task
|
|
452
559
|
- `/status` - Show system status (or `/s`)
|
|
453
560
|
- `/login` - Login to Hanzo Cloud
|
|
454
561
|
- `/logout` - Logout from Hanzo
|
|
@@ -457,6 +564,11 @@ class EnhancedHanzoREPL:
|
|
|
457
564
|
- `/clear` - Clear screen (or `/c`)
|
|
458
565
|
- `/quit` - Exit REPL (or `/q`)
|
|
459
566
|
|
|
567
|
+
## Quick Model Selection:
|
|
568
|
+
- Press ↓ arrow key for quick model selector
|
|
569
|
+
- Use ↑/↓ to navigate, Enter to select
|
|
570
|
+
- Esc to cancel
|
|
571
|
+
|
|
460
572
|
## Model Selection:
|
|
461
573
|
- Use `/model gpt-4` to switch to GPT-4
|
|
462
574
|
- Use `/model 3` to select model by number
|
|
@@ -508,6 +620,69 @@ class EnhancedHanzoREPL:
|
|
|
508
620
|
self.console.print(f"[red]Error executing command: {e}[/red]")
|
|
509
621
|
|
|
510
622
|
async def chat_with_ai(self, message: str):
|
|
511
|
-
"""Chat with AI using current model."""
|
|
512
|
-
#
|
|
513
|
-
|
|
623
|
+
"""Chat with AI using current model or tool."""
|
|
624
|
+
# Check if using a tool
|
|
625
|
+
if self.current_model.startswith("tool:") and self.current_tool:
|
|
626
|
+
# Use the detected tool directly
|
|
627
|
+
self.console.print(f"[dim]Using {self.current_tool.display_name}...[/dim]")
|
|
628
|
+
|
|
629
|
+
success, output = self.tool_detector.execute_with_tool(self.current_tool, message)
|
|
630
|
+
|
|
631
|
+
if success:
|
|
632
|
+
self.console.print(output)
|
|
633
|
+
else:
|
|
634
|
+
# Fallback to regular model
|
|
635
|
+
self.console.print(f"[yellow]{self.current_tool.display_name} failed, trying cloud model...[/yellow]")
|
|
636
|
+
await self.execute_command("ask", f"--cloud --model gpt-3.5-turbo {message}")
|
|
637
|
+
else:
|
|
638
|
+
# Use regular model through hanzo ask
|
|
639
|
+
await self.execute_command("ask", f"--cloud --model {self.current_model} {message}")
|
|
640
|
+
|
|
641
|
+
async def quick_model_select(self, args: str = ""):
|
|
642
|
+
"""Quick model selector with arrow keys."""
|
|
643
|
+
if not QuickModelSelector:
|
|
644
|
+
self.console.print("[yellow]Quick selector not available[/yellow]")
|
|
645
|
+
return
|
|
646
|
+
|
|
647
|
+
# Prepare tools and models
|
|
648
|
+
tools = [(f"tool:{t.name}", t.display_name) for t in self.detected_tools] if self.detected_tools else []
|
|
649
|
+
models = list(self.MODELS.items())
|
|
650
|
+
|
|
651
|
+
selector = QuickModelSelector(models, tools, self.current_model)
|
|
652
|
+
selected = await selector.run()
|
|
653
|
+
|
|
654
|
+
if selected:
|
|
655
|
+
# Change to selected model
|
|
656
|
+
await self.change_model(selected)
|
|
657
|
+
|
|
658
|
+
async def show_tasks(self, args: str = ""):
|
|
659
|
+
"""Show background tasks."""
|
|
660
|
+
if self.task_manager:
|
|
661
|
+
self.task_manager.list_tasks()
|
|
662
|
+
else:
|
|
663
|
+
self.console.print("[yellow]Task manager not available[/yellow]")
|
|
664
|
+
|
|
665
|
+
async def kill_task(self, args: str = ""):
|
|
666
|
+
"""Kill a background task."""
|
|
667
|
+
if not self.task_manager:
|
|
668
|
+
self.console.print("[yellow]Task manager not available[/yellow]")
|
|
669
|
+
return
|
|
670
|
+
|
|
671
|
+
if args:
|
|
672
|
+
if args.lower() == "all":
|
|
673
|
+
self.task_manager.kill_all()
|
|
674
|
+
else:
|
|
675
|
+
self.task_manager.kill_task(args)
|
|
676
|
+
else:
|
|
677
|
+
# Show tasks and prompt for selection
|
|
678
|
+
self.task_manager.list_tasks()
|
|
679
|
+
self.console.print("\n[cyan]Enter task ID to kill (or 'all' for all tasks):[/cyan]")
|
|
680
|
+
try:
|
|
681
|
+
task_id = await self.session.prompt_async("> ")
|
|
682
|
+
if task_id:
|
|
683
|
+
if task_id.lower() == "all":
|
|
684
|
+
self.task_manager.kill_all()
|
|
685
|
+
else:
|
|
686
|
+
self.task_manager.kill_task(task_id)
|
|
687
|
+
except (KeyboardInterrupt, EOFError):
|
|
688
|
+
pass
|
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
"""Quick model selector with arrow key navigation."""
|
|
2
|
+
|
|
3
|
+
from typing import List, Optional, Tuple
|
|
4
|
+
from prompt_toolkit import Application
|
|
5
|
+
from prompt_toolkit.key_binding import KeyBindings
|
|
6
|
+
from prompt_toolkit.layout.containers import HSplit, Window
|
|
7
|
+
from prompt_toolkit.layout.controls import FormattedTextControl
|
|
8
|
+
from prompt_toolkit.layout.layout import Layout
|
|
9
|
+
from prompt_toolkit.widgets import Label
|
|
10
|
+
from rich.console import Console
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class QuickModelSelector:
|
|
14
|
+
"""Quick model selector with arrow navigation."""
|
|
15
|
+
|
|
16
|
+
def __init__(self, models: List[Tuple[str, str]], tools: List[Tuple[str, str]], current: str):
|
|
17
|
+
self.models = models
|
|
18
|
+
self.tools = tools
|
|
19
|
+
self.current = current
|
|
20
|
+
self.all_items = tools + models # Tools first, then models
|
|
21
|
+
self.selected_index = 0
|
|
22
|
+
|
|
23
|
+
# Find current selection
|
|
24
|
+
for i, (item_id, _) in enumerate(self.all_items):
|
|
25
|
+
if item_id == current:
|
|
26
|
+
self.selected_index = i
|
|
27
|
+
break
|
|
28
|
+
|
|
29
|
+
def get_display_lines(self) -> List[str]:
|
|
30
|
+
"""Get display lines for the selector."""
|
|
31
|
+
lines = []
|
|
32
|
+
|
|
33
|
+
if self.tools:
|
|
34
|
+
lines.append("AI Coding Assistants:")
|
|
35
|
+
for i, (tool_id, tool_name) in enumerate(self.tools):
|
|
36
|
+
marker = "→ " if i == self.selected_index else " "
|
|
37
|
+
lines.append(f"{marker}{tool_name}")
|
|
38
|
+
|
|
39
|
+
if self.models:
|
|
40
|
+
if self.tools:
|
|
41
|
+
lines.append("") # Empty line
|
|
42
|
+
lines.append("Language Models:")
|
|
43
|
+
|
|
44
|
+
tool_count = len(self.tools)
|
|
45
|
+
for i, (model_id, model_name) in enumerate(self.models):
|
|
46
|
+
actual_idx = tool_count + i
|
|
47
|
+
marker = "→ " if actual_idx == self.selected_index else " "
|
|
48
|
+
lines.append(f"{marker}{model_name}")
|
|
49
|
+
|
|
50
|
+
return lines
|
|
51
|
+
|
|
52
|
+
def move_up(self):
|
|
53
|
+
"""Move selection up."""
|
|
54
|
+
if self.selected_index > 0:
|
|
55
|
+
self.selected_index -= 1
|
|
56
|
+
|
|
57
|
+
def move_down(self):
|
|
58
|
+
"""Move selection down."""
|
|
59
|
+
if self.selected_index < len(self.all_items) - 1:
|
|
60
|
+
self.selected_index += 1
|
|
61
|
+
|
|
62
|
+
def get_selected(self) -> Tuple[str, str]:
|
|
63
|
+
"""Get the selected item."""
|
|
64
|
+
if 0 <= self.selected_index < len(self.all_items):
|
|
65
|
+
return self.all_items[self.selected_index]
|
|
66
|
+
return None, None
|
|
67
|
+
|
|
68
|
+
async def run(self) -> Optional[str]:
|
|
69
|
+
"""Run the selector and return selected model/tool ID."""
|
|
70
|
+
kb = KeyBindings()
|
|
71
|
+
|
|
72
|
+
@kb.add('up')
|
|
73
|
+
def _(event):
|
|
74
|
+
self.move_up()
|
|
75
|
+
event.app.invalidate()
|
|
76
|
+
|
|
77
|
+
@kb.add('down')
|
|
78
|
+
def _(event):
|
|
79
|
+
self.move_down()
|
|
80
|
+
event.app.invalidate()
|
|
81
|
+
|
|
82
|
+
@kb.add('enter')
|
|
83
|
+
def _(event):
|
|
84
|
+
event.app.exit(result=self.get_selected()[0])
|
|
85
|
+
|
|
86
|
+
@kb.add('c-c')
|
|
87
|
+
@kb.add('escape')
|
|
88
|
+
def _(event):
|
|
89
|
+
event.app.exit(result=None)
|
|
90
|
+
|
|
91
|
+
def get_text():
|
|
92
|
+
lines = self.get_display_lines()
|
|
93
|
+
lines.append("")
|
|
94
|
+
lines.append("↑/↓: Navigate Enter: Select Esc: Cancel")
|
|
95
|
+
return "\n".join(lines)
|
|
96
|
+
|
|
97
|
+
layout = Layout(
|
|
98
|
+
Window(
|
|
99
|
+
FormattedTextControl(get_text),
|
|
100
|
+
wrap_lines=False
|
|
101
|
+
)
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
app = Application(
|
|
105
|
+
layout=layout,
|
|
106
|
+
key_bindings=kb,
|
|
107
|
+
full_screen=False,
|
|
108
|
+
mouse_support=True
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
return await app.run_async()
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
class BackgroundTaskManager:
|
|
115
|
+
"""Manage background tasks."""
|
|
116
|
+
|
|
117
|
+
def __init__(self, console: Optional[Console] = None):
|
|
118
|
+
self.console = console or Console()
|
|
119
|
+
self.tasks = {} # task_id -> process
|
|
120
|
+
self.next_id = 1
|
|
121
|
+
|
|
122
|
+
def add_task(self, name: str, process):
|
|
123
|
+
"""Add a background task."""
|
|
124
|
+
task_id = f"task_{self.next_id}"
|
|
125
|
+
self.next_id += 1
|
|
126
|
+
self.tasks[task_id] = {
|
|
127
|
+
"name": name,
|
|
128
|
+
"process": process,
|
|
129
|
+
"started": True
|
|
130
|
+
}
|
|
131
|
+
return task_id
|
|
132
|
+
|
|
133
|
+
def list_tasks(self):
|
|
134
|
+
"""List all background tasks."""
|
|
135
|
+
if not self.tasks:
|
|
136
|
+
self.console.print("[dim]No background tasks running[/dim]")
|
|
137
|
+
return
|
|
138
|
+
|
|
139
|
+
self.console.print("[bold]Background Tasks:[/bold]")
|
|
140
|
+
for task_id, task in self.tasks.items():
|
|
141
|
+
status = "🟢 Running" if task["process"].poll() is None else "🔴 Stopped"
|
|
142
|
+
self.console.print(f" {task_id}: {task['name']} - {status}")
|
|
143
|
+
|
|
144
|
+
def kill_task(self, task_id: str):
|
|
145
|
+
"""Kill a background task."""
|
|
146
|
+
if task_id in self.tasks:
|
|
147
|
+
task = self.tasks[task_id]
|
|
148
|
+
if task["process"].poll() is None:
|
|
149
|
+
task["process"].terminate()
|
|
150
|
+
self.console.print(f"[yellow]Terminated {task_id}: {task['name']}[/yellow]")
|
|
151
|
+
else:
|
|
152
|
+
self.console.print(f"[dim]Task {task_id} already stopped[/dim]")
|
|
153
|
+
del self.tasks[task_id]
|
|
154
|
+
else:
|
|
155
|
+
self.console.print(f"[red]Task {task_id} not found[/red]")
|
|
156
|
+
|
|
157
|
+
def kill_all(self):
|
|
158
|
+
"""Kill all background tasks."""
|
|
159
|
+
if not self.tasks:
|
|
160
|
+
self.console.print("[dim]No tasks to kill[/dim]")
|
|
161
|
+
return
|
|
162
|
+
|
|
163
|
+
for task_id in list(self.tasks.keys()):
|
|
164
|
+
self.kill_task(task_id)
|
|
165
|
+
|
|
166
|
+
self.console.print("[green]All tasks terminated[/green]")
|
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
"""Detect available AI coding tools and assistants."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import shutil
|
|
5
|
+
import subprocess
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Dict, List, Optional, Tuple
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
|
|
10
|
+
from rich.console import Console
|
|
11
|
+
from rich.table import Table
|
|
12
|
+
from rich import box
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class AITool:
|
|
17
|
+
"""Represents an AI coding tool."""
|
|
18
|
+
name: str
|
|
19
|
+
command: str
|
|
20
|
+
display_name: str
|
|
21
|
+
provider: str
|
|
22
|
+
priority: int # Lower is higher priority
|
|
23
|
+
check_command: Optional[str] = None
|
|
24
|
+
env_var: Optional[str] = None
|
|
25
|
+
api_endpoint: Optional[str] = None
|
|
26
|
+
detected: bool = False
|
|
27
|
+
version: Optional[str] = None
|
|
28
|
+
path: Optional[str] = None
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class ToolDetector:
|
|
32
|
+
"""Detect and manage available AI coding tools."""
|
|
33
|
+
|
|
34
|
+
# Define available tools with priority order
|
|
35
|
+
TOOLS = [
|
|
36
|
+
AITool(
|
|
37
|
+
name="claude-code",
|
|
38
|
+
command="claude",
|
|
39
|
+
display_name="Claude Code",
|
|
40
|
+
provider="anthropic",
|
|
41
|
+
priority=1,
|
|
42
|
+
check_command="claude --version",
|
|
43
|
+
env_var="ANTHROPIC_API_KEY"
|
|
44
|
+
),
|
|
45
|
+
AITool(
|
|
46
|
+
name="hanzo-dev",
|
|
47
|
+
command="hanzo dev",
|
|
48
|
+
display_name="Hanzo Dev (Native)",
|
|
49
|
+
provider="hanzo",
|
|
50
|
+
priority=2,
|
|
51
|
+
check_command="hanzo --version",
|
|
52
|
+
env_var="HANZO_API_KEY"
|
|
53
|
+
),
|
|
54
|
+
AITool(
|
|
55
|
+
name="openai-codex",
|
|
56
|
+
command="openai",
|
|
57
|
+
display_name="OpenAI Codex",
|
|
58
|
+
provider="openai",
|
|
59
|
+
priority=3,
|
|
60
|
+
check_command="openai --version",
|
|
61
|
+
env_var="OPENAI_API_KEY"
|
|
62
|
+
),
|
|
63
|
+
AITool(
|
|
64
|
+
name="gemini-cli",
|
|
65
|
+
command="gemini",
|
|
66
|
+
display_name="Gemini CLI",
|
|
67
|
+
provider="google",
|
|
68
|
+
priority=4,
|
|
69
|
+
check_command="gemini --version",
|
|
70
|
+
env_var="GEMINI_API_KEY"
|
|
71
|
+
),
|
|
72
|
+
AITool(
|
|
73
|
+
name="grok-cli",
|
|
74
|
+
command="grok",
|
|
75
|
+
display_name="Grok CLI",
|
|
76
|
+
provider="xai",
|
|
77
|
+
priority=5,
|
|
78
|
+
check_command="grok --version",
|
|
79
|
+
env_var="GROK_API_KEY"
|
|
80
|
+
),
|
|
81
|
+
AITool(
|
|
82
|
+
name="openhands",
|
|
83
|
+
command="openhands",
|
|
84
|
+
display_name="OpenHands CLI",
|
|
85
|
+
provider="openhands",
|
|
86
|
+
priority=6,
|
|
87
|
+
check_command="openhands --version",
|
|
88
|
+
env_var=None
|
|
89
|
+
),
|
|
90
|
+
AITool(
|
|
91
|
+
name="cursor",
|
|
92
|
+
command="cursor",
|
|
93
|
+
display_name="Cursor AI",
|
|
94
|
+
provider="cursor",
|
|
95
|
+
priority=7,
|
|
96
|
+
check_command="cursor --version",
|
|
97
|
+
env_var=None
|
|
98
|
+
),
|
|
99
|
+
AITool(
|
|
100
|
+
name="codeium",
|
|
101
|
+
command="codeium",
|
|
102
|
+
display_name="Codeium",
|
|
103
|
+
provider="codeium",
|
|
104
|
+
priority=8,
|
|
105
|
+
check_command="codeium --version",
|
|
106
|
+
env_var="CODEIUM_API_KEY"
|
|
107
|
+
),
|
|
108
|
+
AITool(
|
|
109
|
+
name="aider",
|
|
110
|
+
command="aider",
|
|
111
|
+
display_name="Aider",
|
|
112
|
+
provider="aider",
|
|
113
|
+
priority=9,
|
|
114
|
+
check_command="aider --version",
|
|
115
|
+
env_var=None
|
|
116
|
+
),
|
|
117
|
+
AITool(
|
|
118
|
+
name="continue",
|
|
119
|
+
command="continue",
|
|
120
|
+
display_name="Continue Dev",
|
|
121
|
+
provider="continue",
|
|
122
|
+
priority=10,
|
|
123
|
+
check_command="continue --version",
|
|
124
|
+
env_var=None
|
|
125
|
+
)
|
|
126
|
+
]
|
|
127
|
+
|
|
128
|
+
def __init__(self, console: Optional[Console] = None):
|
|
129
|
+
self.console = console or Console()
|
|
130
|
+
self.detected_tools: List[AITool] = []
|
|
131
|
+
|
|
132
|
+
def detect_all(self) -> List[AITool]:
|
|
133
|
+
"""Detect all available AI tools."""
|
|
134
|
+
self.detected_tools = []
|
|
135
|
+
|
|
136
|
+
for tool in self.TOOLS:
|
|
137
|
+
if self.detect_tool(tool):
|
|
138
|
+
self.detected_tools.append(tool)
|
|
139
|
+
|
|
140
|
+
# Sort by priority
|
|
141
|
+
self.detected_tools.sort(key=lambda t: t.priority)
|
|
142
|
+
return self.detected_tools
|
|
143
|
+
|
|
144
|
+
def detect_tool(self, tool: AITool) -> bool:
|
|
145
|
+
"""Detect if a specific tool is available."""
|
|
146
|
+
# Check if command exists
|
|
147
|
+
tool.path = shutil.which(tool.command.split()[0])
|
|
148
|
+
if tool.path:
|
|
149
|
+
tool.detected = True
|
|
150
|
+
|
|
151
|
+
# Try to get version
|
|
152
|
+
if tool.check_command:
|
|
153
|
+
try:
|
|
154
|
+
result = subprocess.run(
|
|
155
|
+
tool.check_command.split(),
|
|
156
|
+
capture_output=True,
|
|
157
|
+
text=True,
|
|
158
|
+
timeout=2
|
|
159
|
+
)
|
|
160
|
+
if result.returncode == 0:
|
|
161
|
+
tool.version = result.stdout.strip().split()[-1]
|
|
162
|
+
except:
|
|
163
|
+
pass
|
|
164
|
+
|
|
165
|
+
return True
|
|
166
|
+
|
|
167
|
+
# Check environment variable as fallback
|
|
168
|
+
if tool.env_var and os.getenv(tool.env_var):
|
|
169
|
+
tool.detected = True
|
|
170
|
+
return True
|
|
171
|
+
|
|
172
|
+
return False
|
|
173
|
+
|
|
174
|
+
def get_default_tool(self) -> Optional[AITool]:
|
|
175
|
+
"""Get the default tool based on priority and availability."""
|
|
176
|
+
if not self.detected_tools:
|
|
177
|
+
self.detect_all()
|
|
178
|
+
|
|
179
|
+
if self.detected_tools:
|
|
180
|
+
return self.detected_tools[0]
|
|
181
|
+
return None
|
|
182
|
+
|
|
183
|
+
def get_tool_by_name(self, name: str) -> Optional[AITool]:
|
|
184
|
+
"""Get a specific tool by name."""
|
|
185
|
+
for tool in self.TOOLS:
|
|
186
|
+
if tool.name == name or tool.display_name.lower() == name.lower():
|
|
187
|
+
if self.detect_tool(tool):
|
|
188
|
+
return tool
|
|
189
|
+
return None
|
|
190
|
+
|
|
191
|
+
def show_available_tools(self):
|
|
192
|
+
"""Display available tools in a table."""
|
|
193
|
+
self.detect_all()
|
|
194
|
+
|
|
195
|
+
table = Table(title="Available AI Coding Tools", box=box.ROUNDED)
|
|
196
|
+
table.add_column("#", style="dim")
|
|
197
|
+
table.add_column("Tool", style="cyan")
|
|
198
|
+
table.add_column("Provider", style="yellow")
|
|
199
|
+
table.add_column("Status", style="green")
|
|
200
|
+
table.add_column("Version", style="blue")
|
|
201
|
+
table.add_column("Priority", style="magenta")
|
|
202
|
+
|
|
203
|
+
for i, tool in enumerate(self.TOOLS, 1):
|
|
204
|
+
status = "✅ Available" if tool.detected else "❌ Not Found"
|
|
205
|
+
version = tool.version or "Unknown" if tool.detected else "-"
|
|
206
|
+
|
|
207
|
+
# Highlight the default tool
|
|
208
|
+
if tool.detected and tool == self.detected_tools[0] if self.detected_tools else False:
|
|
209
|
+
table.add_row(
|
|
210
|
+
str(i),
|
|
211
|
+
f"[bold green]→ {tool.display_name}[/bold green]",
|
|
212
|
+
tool.provider,
|
|
213
|
+
status,
|
|
214
|
+
version,
|
|
215
|
+
str(tool.priority)
|
|
216
|
+
)
|
|
217
|
+
else:
|
|
218
|
+
table.add_row(
|
|
219
|
+
str(i),
|
|
220
|
+
tool.display_name,
|
|
221
|
+
tool.provider,
|
|
222
|
+
status,
|
|
223
|
+
version,
|
|
224
|
+
str(tool.priority)
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
self.console.print(table)
|
|
228
|
+
|
|
229
|
+
if self.detected_tools:
|
|
230
|
+
default = self.detected_tools[0]
|
|
231
|
+
self.console.print(f"\n[green]Default tool: {default.display_name}[/green]")
|
|
232
|
+
else:
|
|
233
|
+
self.console.print("\n[yellow]No AI coding tools detected.[/yellow]")
|
|
234
|
+
self.console.print("[dim]Install Claude Code, OpenAI CLI, or other tools to enable AI features.[/dim]")
|
|
235
|
+
|
|
236
|
+
def get_tool_command(self, tool: AITool, prompt: str) -> List[str]:
|
|
237
|
+
"""Get the command to execute for a tool with a prompt."""
|
|
238
|
+
if tool.name == "claude-code":
|
|
239
|
+
return ["claude", prompt]
|
|
240
|
+
elif tool.name == "hanzo-dev":
|
|
241
|
+
return ["hanzo", "dev", "--prompt", prompt]
|
|
242
|
+
elif tool.name == "openai-codex":
|
|
243
|
+
return ["openai", "api", "completions.create", "-m", "code-davinci-002", "-p", prompt]
|
|
244
|
+
elif tool.name == "gemini-cli":
|
|
245
|
+
return ["gemini", "generate", "--prompt", prompt]
|
|
246
|
+
elif tool.name == "grok-cli":
|
|
247
|
+
return ["grok", "complete", prompt]
|
|
248
|
+
elif tool.name == "openhands":
|
|
249
|
+
return ["openhands", "run", prompt]
|
|
250
|
+
elif tool.name == "cursor":
|
|
251
|
+
return ["cursor", "--prompt", prompt]
|
|
252
|
+
elif tool.name == "aider":
|
|
253
|
+
return ["aider", "--message", prompt]
|
|
254
|
+
else:
|
|
255
|
+
return [tool.command, prompt]
|
|
256
|
+
|
|
257
|
+
def execute_with_tool(self, tool: AITool, prompt: str) -> Tuple[bool, str]:
|
|
258
|
+
"""Execute a prompt with a specific tool."""
|
|
259
|
+
try:
|
|
260
|
+
command = self.get_tool_command(tool, prompt)
|
|
261
|
+
result = subprocess.run(
|
|
262
|
+
command,
|
|
263
|
+
capture_output=True,
|
|
264
|
+
text=True,
|
|
265
|
+
timeout=30
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
if result.returncode == 0:
|
|
269
|
+
return True, result.stdout
|
|
270
|
+
else:
|
|
271
|
+
return False, result.stderr or "Command failed"
|
|
272
|
+
except subprocess.TimeoutExpired:
|
|
273
|
+
return False, "Command timed out"
|
|
274
|
+
except Exception as e:
|
|
275
|
+
return False, str(e)
|
|
276
|
+
|
|
277
|
+
def execute_with_fallback(self, prompt: str) -> Tuple[bool, str, AITool]:
|
|
278
|
+
"""Execute with fallback through available tools."""
|
|
279
|
+
if not self.detected_tools:
|
|
280
|
+
self.detect_all()
|
|
281
|
+
|
|
282
|
+
for tool in self.detected_tools:
|
|
283
|
+
self.console.print(f"[dim]Trying {tool.display_name}...[/dim]")
|
|
284
|
+
success, output = self.execute_with_tool(tool, prompt)
|
|
285
|
+
|
|
286
|
+
if success:
|
|
287
|
+
return True, output, tool
|
|
288
|
+
else:
|
|
289
|
+
self.console.print(f"[yellow]{tool.display_name} failed: {output}[/yellow]")
|
|
290
|
+
|
|
291
|
+
return False, "No available tools could handle the request", None
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|