hanzo 0.3.24__py3-none-any.whl → 0.3.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hanzo might be problematic. Click here for more details.
- hanzo/__init__.py +2 -2
- hanzo/cli.py +13 -5
- hanzo/commands/auth.py +206 -266
- hanzo/commands/auth_broken.py +377 -0
- hanzo/commands/chat.py +3 -0
- hanzo/interactive/enhanced_repl.py +688 -0
- hanzo/interactive/model_selector.py +166 -0
- hanzo/interactive/repl.py +2 -2
- hanzo/tools/__init__.py +5 -0
- hanzo/tools/detector.py +291 -0
- hanzo/ui/__init__.py +13 -0
- hanzo/ui/inline_startup.py +136 -0
- hanzo/ui/startup.py +350 -0
- {hanzo-0.3.24.dist-info → hanzo-0.3.26.dist-info}/METADATA +1 -1
- {hanzo-0.3.24.dist-info → hanzo-0.3.26.dist-info}/RECORD +17 -9
- {hanzo-0.3.24.dist-info → hanzo-0.3.26.dist-info}/WHEEL +0 -0
- {hanzo-0.3.24.dist-info → hanzo-0.3.26.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,688 @@
|
|
|
1
|
+
"""Enhanced REPL with model selection and authentication."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import json
|
|
5
|
+
import httpx
|
|
6
|
+
import asyncio
|
|
7
|
+
from typing import Optional, Dict, Any
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
|
|
11
|
+
from rich.console import Console
|
|
12
|
+
from rich.markdown import Markdown
|
|
13
|
+
from rich.panel import Panel
|
|
14
|
+
from rich.table import Table
|
|
15
|
+
from rich.text import Text
|
|
16
|
+
from rich import box
|
|
17
|
+
from prompt_toolkit import PromptSession
|
|
18
|
+
from prompt_toolkit.history import FileHistory
|
|
19
|
+
from prompt_toolkit.completion import WordCompleter
|
|
20
|
+
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
|
|
21
|
+
from prompt_toolkit.formatted_text import HTML
|
|
22
|
+
|
|
23
|
+
try:
|
|
24
|
+
from ..tools.detector import ToolDetector, AITool
|
|
25
|
+
except ImportError:
|
|
26
|
+
ToolDetector = None
|
|
27
|
+
AITool = None
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
from .model_selector import QuickModelSelector, BackgroundTaskManager
|
|
31
|
+
except ImportError:
|
|
32
|
+
QuickModelSelector = None
|
|
33
|
+
BackgroundTaskManager = None
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class EnhancedHanzoREPL:
|
|
37
|
+
"""Enhanced REPL with model selection and authentication."""
|
|
38
|
+
|
|
39
|
+
# Available models
|
|
40
|
+
MODELS = {
|
|
41
|
+
# OpenAI
|
|
42
|
+
"gpt-4": "OpenAI GPT-4",
|
|
43
|
+
"gpt-4-turbo": "OpenAI GPT-4 Turbo",
|
|
44
|
+
"gpt-3.5-turbo": "OpenAI GPT-3.5 Turbo",
|
|
45
|
+
|
|
46
|
+
# Anthropic
|
|
47
|
+
"claude-3-opus": "Anthropic Claude 3 Opus",
|
|
48
|
+
"claude-3-sonnet": "Anthropic Claude 3 Sonnet",
|
|
49
|
+
"claude-3-haiku": "Anthropic Claude 3 Haiku",
|
|
50
|
+
"claude-2.1": "Anthropic Claude 2.1",
|
|
51
|
+
|
|
52
|
+
# Google
|
|
53
|
+
"gemini-pro": "Google Gemini Pro",
|
|
54
|
+
"gemini-pro-vision": "Google Gemini Pro Vision",
|
|
55
|
+
|
|
56
|
+
# Meta
|
|
57
|
+
"llama2-70b": "Meta Llama 2 70B",
|
|
58
|
+
"llama2-13b": "Meta Llama 2 13B",
|
|
59
|
+
"llama2-7b": "Meta Llama 2 7B",
|
|
60
|
+
"codellama-34b": "Meta Code Llama 34B",
|
|
61
|
+
|
|
62
|
+
# Mistral
|
|
63
|
+
"mistral-medium": "Mistral Medium",
|
|
64
|
+
"mistral-small": "Mistral Small",
|
|
65
|
+
"mixtral-8x7b": "Mixtral 8x7B",
|
|
66
|
+
|
|
67
|
+
# Local models
|
|
68
|
+
"local:llama2": "Local Llama 2",
|
|
69
|
+
"local:mistral": "Local Mistral",
|
|
70
|
+
"local:phi-2": "Local Phi-2",
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
def get_all_models(self):
|
|
74
|
+
"""Get all available models including detected tools."""
|
|
75
|
+
models = dict(self.MODELS)
|
|
76
|
+
|
|
77
|
+
# Add detected tools as models
|
|
78
|
+
if self.detected_tools:
|
|
79
|
+
for tool in self.detected_tools:
|
|
80
|
+
models[f"tool:{tool.name}"] = f"{tool.display_name} (Tool)"
|
|
81
|
+
|
|
82
|
+
return models
|
|
83
|
+
|
|
84
|
+
def __init__(self, console: Optional[Console] = None):
|
|
85
|
+
self.console = console or Console()
|
|
86
|
+
self.config_dir = Path.home() / ".hanzo"
|
|
87
|
+
self.config_file = self.config_dir / "config.json"
|
|
88
|
+
self.auth_file = self.config_dir / "auth.json"
|
|
89
|
+
|
|
90
|
+
# Load configuration
|
|
91
|
+
self.config = self.load_config()
|
|
92
|
+
self.auth = self.load_auth()
|
|
93
|
+
|
|
94
|
+
# Initialize tool detector
|
|
95
|
+
self.tool_detector = ToolDetector(console) if ToolDetector else None
|
|
96
|
+
self.detected_tools = []
|
|
97
|
+
self.current_tool = None
|
|
98
|
+
|
|
99
|
+
# Initialize background task manager
|
|
100
|
+
self.task_manager = BackgroundTaskManager(console) if BackgroundTaskManager else None
|
|
101
|
+
|
|
102
|
+
# Detect available tools and set default
|
|
103
|
+
if self.tool_detector:
|
|
104
|
+
self.detected_tools = self.tool_detector.detect_all()
|
|
105
|
+
default_tool = self.tool_detector.get_default_tool()
|
|
106
|
+
|
|
107
|
+
# If Claude Code is available, use it as default
|
|
108
|
+
if default_tool:
|
|
109
|
+
self.current_model = f"tool:{default_tool.name}"
|
|
110
|
+
self.current_tool = default_tool
|
|
111
|
+
self.console.print(f"[green]✓ Detected {default_tool.display_name} as default AI assistant[/green]")
|
|
112
|
+
else:
|
|
113
|
+
# Fallback to regular models
|
|
114
|
+
self.current_model = self.config.get("default_model", "gpt-3.5-turbo")
|
|
115
|
+
else:
|
|
116
|
+
# No tool detector, use regular models
|
|
117
|
+
self.current_model = self.config.get("default_model", "gpt-3.5-turbo")
|
|
118
|
+
|
|
119
|
+
# Setup session
|
|
120
|
+
self.session = PromptSession(
|
|
121
|
+
history=FileHistory(str(self.config_dir / ".repl_history")),
|
|
122
|
+
auto_suggest=AutoSuggestFromHistory(),
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
# Commands
|
|
126
|
+
self.commands = {
|
|
127
|
+
"help": self.show_help,
|
|
128
|
+
"exit": self.exit_repl,
|
|
129
|
+
"quit": self.exit_repl,
|
|
130
|
+
"clear": self.clear_screen,
|
|
131
|
+
"status": self.show_status,
|
|
132
|
+
"model": self.change_model,
|
|
133
|
+
"models": self.list_models,
|
|
134
|
+
"tools": self.list_tools,
|
|
135
|
+
"agents": self.list_tools, # Alias for tools
|
|
136
|
+
"login": self.login,
|
|
137
|
+
"logout": self.logout,
|
|
138
|
+
"config": self.show_config,
|
|
139
|
+
"tasks": self.show_tasks,
|
|
140
|
+
"kill": self.kill_task,
|
|
141
|
+
"quick": self.quick_model_select,
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
self.running = False
|
|
145
|
+
|
|
146
|
+
def load_config(self) -> Dict[str, Any]:
|
|
147
|
+
"""Load configuration from file."""
|
|
148
|
+
if self.config_file.exists():
|
|
149
|
+
try:
|
|
150
|
+
return json.loads(self.config_file.read_text())
|
|
151
|
+
except:
|
|
152
|
+
pass
|
|
153
|
+
return {}
|
|
154
|
+
|
|
155
|
+
def save_config(self):
|
|
156
|
+
"""Save configuration to file."""
|
|
157
|
+
self.config_dir.mkdir(exist_ok=True)
|
|
158
|
+
self.config_file.write_text(json.dumps(self.config, indent=2))
|
|
159
|
+
|
|
160
|
+
def load_auth(self) -> Dict[str, Any]:
|
|
161
|
+
"""Load authentication data."""
|
|
162
|
+
if self.auth_file.exists():
|
|
163
|
+
try:
|
|
164
|
+
return json.loads(self.auth_file.read_text())
|
|
165
|
+
except:
|
|
166
|
+
pass
|
|
167
|
+
return {}
|
|
168
|
+
|
|
169
|
+
def save_auth(self):
|
|
170
|
+
"""Save authentication data."""
|
|
171
|
+
self.config_dir.mkdir(exist_ok=True)
|
|
172
|
+
self.auth_file.write_text(json.dumps(self.auth, indent=2))
|
|
173
|
+
|
|
174
|
+
def get_prompt(self) -> str:
|
|
175
|
+
"""Get the simple prompt."""
|
|
176
|
+
# We'll use a simple > prompt, the box is handled by prompt_toolkit
|
|
177
|
+
return "> "
|
|
178
|
+
|
|
179
|
+
def is_authenticated(self) -> bool:
|
|
180
|
+
"""Check if user is authenticated."""
|
|
181
|
+
# Check for API key
|
|
182
|
+
if os.getenv("HANZO_API_KEY"):
|
|
183
|
+
return True
|
|
184
|
+
|
|
185
|
+
# Check auth file
|
|
186
|
+
if self.auth.get("api_key"):
|
|
187
|
+
return True
|
|
188
|
+
|
|
189
|
+
# Check if logged in
|
|
190
|
+
if self.auth.get("logged_in"):
|
|
191
|
+
return True
|
|
192
|
+
|
|
193
|
+
return False
|
|
194
|
+
|
|
195
|
+
def get_model_info(self):
|
|
196
|
+
"""Get current model info string."""
|
|
197
|
+
model = self.current_model
|
|
198
|
+
|
|
199
|
+
# Check if using a tool
|
|
200
|
+
if model.startswith("tool:"):
|
|
201
|
+
if self.current_tool:
|
|
202
|
+
return f"[dim cyan]agent: {self.current_tool.display_name}[/dim cyan]"
|
|
203
|
+
else:
|
|
204
|
+
tool_name = model.replace("tool:", "")
|
|
205
|
+
return f"[dim cyan]agent: {tool_name}[/dim cyan]"
|
|
206
|
+
|
|
207
|
+
# Determine provider from model name
|
|
208
|
+
if model.startswith("gpt"):
|
|
209
|
+
provider = "openai"
|
|
210
|
+
elif model.startswith("claude"):
|
|
211
|
+
provider = "anthropic"
|
|
212
|
+
elif model.startswith("gemini"):
|
|
213
|
+
provider = "google"
|
|
214
|
+
elif model.startswith("llama") or model.startswith("codellama"):
|
|
215
|
+
provider = "meta"
|
|
216
|
+
elif model.startswith("mistral") or model.startswith("mixtral"):
|
|
217
|
+
provider = "mistral"
|
|
218
|
+
elif model.startswith("local:"):
|
|
219
|
+
provider = "local"
|
|
220
|
+
else:
|
|
221
|
+
provider = "unknown"
|
|
222
|
+
|
|
223
|
+
return f"[dim]model: {provider}/{model}[/dim]"
|
|
224
|
+
|
|
225
|
+
async def run(self):
|
|
226
|
+
"""Run the enhanced REPL."""
|
|
227
|
+
self.running = True
|
|
228
|
+
|
|
229
|
+
# Setup completer
|
|
230
|
+
commands = list(self.commands.keys())
|
|
231
|
+
models = list(self.MODELS.keys())
|
|
232
|
+
cli_commands = ["chat", "ask", "agent", "node", "mcp", "network",
|
|
233
|
+
"auth", "config", "tools", "miner", "serve", "net",
|
|
234
|
+
"dev", "router"]
|
|
235
|
+
|
|
236
|
+
completer = WordCompleter(
|
|
237
|
+
commands + models + cli_commands,
|
|
238
|
+
ignore_case=True,
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
while self.running:
|
|
242
|
+
try:
|
|
243
|
+
# Show model info above prompt
|
|
244
|
+
self.console.print(self.get_model_info())
|
|
245
|
+
|
|
246
|
+
# Get input with simple prompt
|
|
247
|
+
command = await self.session.prompt_async(
|
|
248
|
+
self.get_prompt(),
|
|
249
|
+
completer=completer,
|
|
250
|
+
vi_mode=True # Enable vi mode for better navigation
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
if not command.strip():
|
|
254
|
+
continue
|
|
255
|
+
|
|
256
|
+
# Handle slash commands
|
|
257
|
+
if command.startswith("/"):
|
|
258
|
+
await self.handle_slash_command(command[1:])
|
|
259
|
+
continue
|
|
260
|
+
|
|
261
|
+
# Parse command
|
|
262
|
+
parts = command.strip().split(maxsplit=1)
|
|
263
|
+
cmd = parts[0].lower()
|
|
264
|
+
args = parts[1] if len(parts) > 1 else ""
|
|
265
|
+
|
|
266
|
+
# Execute command
|
|
267
|
+
if cmd in self.commands:
|
|
268
|
+
await self.commands[cmd](args)
|
|
269
|
+
elif cmd in cli_commands:
|
|
270
|
+
await self.execute_command(cmd, args)
|
|
271
|
+
else:
|
|
272
|
+
# Treat as chat message
|
|
273
|
+
await self.chat_with_ai(command)
|
|
274
|
+
|
|
275
|
+
except KeyboardInterrupt:
|
|
276
|
+
continue
|
|
277
|
+
except EOFError:
|
|
278
|
+
break
|
|
279
|
+
except Exception as e:
|
|
280
|
+
self.console.print(f"[red]Error: {e}[/red]")
|
|
281
|
+
|
|
282
|
+
async def handle_slash_command(self, command: str):
|
|
283
|
+
"""Handle slash commands like /model, /status, etc."""
|
|
284
|
+
parts = command.strip().split(maxsplit=1)
|
|
285
|
+
cmd = parts[0].lower()
|
|
286
|
+
args = parts[1] if len(parts) > 1 else ""
|
|
287
|
+
|
|
288
|
+
# Map slash commands to regular commands
|
|
289
|
+
slash_map = {
|
|
290
|
+
"m": "model",
|
|
291
|
+
"s": "status",
|
|
292
|
+
"h": "help",
|
|
293
|
+
"q": "quit",
|
|
294
|
+
"c": "clear",
|
|
295
|
+
"models": "models",
|
|
296
|
+
"login": "login",
|
|
297
|
+
"logout": "logout",
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
mapped_cmd = slash_map.get(cmd, cmd)
|
|
301
|
+
|
|
302
|
+
if mapped_cmd in self.commands:
|
|
303
|
+
await self.commands[mapped_cmd](args)
|
|
304
|
+
else:
|
|
305
|
+
self.console.print(f"[yellow]Unknown command: /{cmd}[/yellow]")
|
|
306
|
+
self.console.print("[dim]Type /help for available commands[/dim]")
|
|
307
|
+
|
|
308
|
+
async def show_status(self, args: str = ""):
|
|
309
|
+
"""Show comprehensive status."""
|
|
310
|
+
# Create status table
|
|
311
|
+
table = Table(title="System Status", box=box.ROUNDED)
|
|
312
|
+
table.add_column("Component", style="cyan")
|
|
313
|
+
table.add_column("Status", style="green")
|
|
314
|
+
table.add_column("Details", style="dim")
|
|
315
|
+
|
|
316
|
+
# Authentication status
|
|
317
|
+
if self.is_authenticated():
|
|
318
|
+
auth_status = "✅ Authenticated"
|
|
319
|
+
auth_details = self.auth.get("email", "API Key configured")
|
|
320
|
+
else:
|
|
321
|
+
auth_status = "❌ Not authenticated"
|
|
322
|
+
auth_details = "Run /login to authenticate"
|
|
323
|
+
table.add_row("Authentication", auth_status, auth_details)
|
|
324
|
+
|
|
325
|
+
# Current model
|
|
326
|
+
model_name = self.MODELS.get(self.current_model, self.current_model)
|
|
327
|
+
table.add_row("Current Model", f"🤖 {self.current_model}", model_name)
|
|
328
|
+
|
|
329
|
+
# Router status
|
|
330
|
+
try:
|
|
331
|
+
response = httpx.get("http://localhost:4000/health", timeout=1)
|
|
332
|
+
router_status = "✅ Running" if response.status_code == 200 else "⚠️ Unhealthy"
|
|
333
|
+
router_details = "Port 4000"
|
|
334
|
+
except:
|
|
335
|
+
router_status = "❌ Offline"
|
|
336
|
+
router_details = "Run 'hanzo router start'"
|
|
337
|
+
table.add_row("Router", router_status, router_details)
|
|
338
|
+
|
|
339
|
+
# Node status
|
|
340
|
+
try:
|
|
341
|
+
response = httpx.get("http://localhost:8000/health", timeout=1)
|
|
342
|
+
node_status = "✅ Running" if response.status_code == 200 else "⚠️ Unhealthy"
|
|
343
|
+
node_details = "Port 8000"
|
|
344
|
+
except:
|
|
345
|
+
node_status = "❌ Offline"
|
|
346
|
+
node_details = "Run 'hanzo node start'"
|
|
347
|
+
table.add_row("Node", node_status, node_details)
|
|
348
|
+
|
|
349
|
+
# API endpoints
|
|
350
|
+
if os.getenv("HANZO_API_KEY"):
|
|
351
|
+
api_status = "✅ Configured"
|
|
352
|
+
api_details = "Using Hanzo Cloud API"
|
|
353
|
+
else:
|
|
354
|
+
api_status = "⚠️ Not configured"
|
|
355
|
+
api_details = "Set HANZO_API_KEY environment variable"
|
|
356
|
+
table.add_row("Cloud API", api_status, api_details)
|
|
357
|
+
|
|
358
|
+
self.console.print(table)
|
|
359
|
+
|
|
360
|
+
# Show additional info
|
|
361
|
+
if self.auth.get("last_login"):
|
|
362
|
+
self.console.print(f"\n[dim]Last login: {self.auth['last_login']}[/dim]")
|
|
363
|
+
|
|
364
|
+
async def change_model(self, args: str = ""):
|
|
365
|
+
"""Change the current model or tool."""
|
|
366
|
+
if not args:
|
|
367
|
+
# Show model selection menu
|
|
368
|
+
await self.list_models("")
|
|
369
|
+
self.console.print("\n[cyan]Enter model/tool name or number:[/cyan]")
|
|
370
|
+
|
|
371
|
+
# Get selection
|
|
372
|
+
try:
|
|
373
|
+
selection = await self.session.prompt_async("> ")
|
|
374
|
+
|
|
375
|
+
# Handle numeric selection
|
|
376
|
+
if selection.isdigit():
|
|
377
|
+
num = int(selection)
|
|
378
|
+
|
|
379
|
+
# Check if it's a tool selection
|
|
380
|
+
if self.detected_tools and num <= len(self.detected_tools):
|
|
381
|
+
tool = self.detected_tools[num - 1]
|
|
382
|
+
args = f"tool:{tool.name}"
|
|
383
|
+
else:
|
|
384
|
+
# It's a model selection
|
|
385
|
+
model_idx = num - len(self.detected_tools) - 1 if self.detected_tools else num - 1
|
|
386
|
+
models_list = list(self.MODELS.keys())
|
|
387
|
+
if 0 <= model_idx < len(models_list):
|
|
388
|
+
args = models_list[model_idx]
|
|
389
|
+
else:
|
|
390
|
+
self.console.print("[red]Invalid selection[/red]")
|
|
391
|
+
return
|
|
392
|
+
else:
|
|
393
|
+
args = selection
|
|
394
|
+
except (KeyboardInterrupt, EOFError):
|
|
395
|
+
return
|
|
396
|
+
|
|
397
|
+
# Check if it's a tool
|
|
398
|
+
if args.startswith("tool:") or args in [t.name for t in self.detected_tools] if self.detected_tools else False:
|
|
399
|
+
# Handle tool selection
|
|
400
|
+
tool_name = args.replace("tool:", "") if args.startswith("tool:") else args
|
|
401
|
+
|
|
402
|
+
# Find the tool
|
|
403
|
+
tool = None
|
|
404
|
+
for t in self.detected_tools:
|
|
405
|
+
if t.name == tool_name or t.display_name.lower() == tool_name.lower():
|
|
406
|
+
tool = t
|
|
407
|
+
break
|
|
408
|
+
|
|
409
|
+
if tool:
|
|
410
|
+
self.current_model = f"tool:{tool.name}"
|
|
411
|
+
self.current_tool = tool
|
|
412
|
+
self.config["default_model"] = self.current_model
|
|
413
|
+
self.save_config()
|
|
414
|
+
self.console.print(f"[green]✅ Switched to {tool.display_name}[/green]")
|
|
415
|
+
else:
|
|
416
|
+
self.console.print(f"[red]Tool not found: {tool_name}[/red]")
|
|
417
|
+
self.console.print("[dim]Use /tools to see available tools[/dim]")
|
|
418
|
+
|
|
419
|
+
# Regular model
|
|
420
|
+
elif args in self.MODELS or args.startswith("local:"):
|
|
421
|
+
self.current_model = args
|
|
422
|
+
self.current_tool = None
|
|
423
|
+
self.config["default_model"] = args
|
|
424
|
+
self.save_config()
|
|
425
|
+
|
|
426
|
+
model_name = self.MODELS.get(args, args)
|
|
427
|
+
self.console.print(f"[green]✅ Switched to {model_name}[/green]")
|
|
428
|
+
|
|
429
|
+
else:
|
|
430
|
+
self.console.print(f"[red]Unknown model or tool: {args}[/red]")
|
|
431
|
+
self.console.print("[dim]Use /models or /tools to see available options[/dim]")
|
|
432
|
+
|
|
433
|
+
async def list_tools(self, args: str = ""):
|
|
434
|
+
"""List available AI tools."""
|
|
435
|
+
if self.tool_detector:
|
|
436
|
+
self.tool_detector.show_available_tools()
|
|
437
|
+
else:
|
|
438
|
+
self.console.print("[yellow]Tool detection not available[/yellow]")
|
|
439
|
+
|
|
440
|
+
async def list_models(self, args: str = ""):
|
|
441
|
+
"""List available models."""
|
|
442
|
+
# Show tools first if available
|
|
443
|
+
if self.detected_tools:
|
|
444
|
+
self.console.print("[bold cyan]AI Coding Assistants (Detected):[/bold cyan]")
|
|
445
|
+
for i, tool in enumerate(self.detected_tools, 1):
|
|
446
|
+
marker = "→" if self.current_model == f"tool:{tool.name}" else " "
|
|
447
|
+
self.console.print(f" {marker} {i}. {tool.display_name} ({tool.provider})")
|
|
448
|
+
self.console.print()
|
|
449
|
+
|
|
450
|
+
table = Table(title="Language Models", box=box.ROUNDED)
|
|
451
|
+
table.add_column("#", style="dim")
|
|
452
|
+
table.add_column("Model ID", style="cyan")
|
|
453
|
+
table.add_column("Name", style="white")
|
|
454
|
+
table.add_column("Provider", style="yellow")
|
|
455
|
+
|
|
456
|
+
start_idx = len(self.detected_tools) + 1 if self.detected_tools else 1
|
|
457
|
+
for i, (model_id, model_name) in enumerate(self.MODELS.items(), start_idx):
|
|
458
|
+
# Extract provider
|
|
459
|
+
if model_id.startswith("gpt"):
|
|
460
|
+
provider = "OpenAI"
|
|
461
|
+
elif model_id.startswith("claude"):
|
|
462
|
+
provider = "Anthropic"
|
|
463
|
+
elif model_id.startswith("gemini"):
|
|
464
|
+
provider = "Google"
|
|
465
|
+
elif model_id.startswith("llama") or model_id.startswith("codellama"):
|
|
466
|
+
provider = "Meta"
|
|
467
|
+
elif model_id.startswith("mistral") or model_id.startswith("mixtral"):
|
|
468
|
+
provider = "Mistral"
|
|
469
|
+
elif model_id.startswith("local:"):
|
|
470
|
+
provider = "Local"
|
|
471
|
+
else:
|
|
472
|
+
provider = "Other"
|
|
473
|
+
|
|
474
|
+
# Highlight current model
|
|
475
|
+
if model_id == self.current_model:
|
|
476
|
+
table.add_row(
|
|
477
|
+
str(i),
|
|
478
|
+
f"[bold green]→ {model_id}[/bold green]",
|
|
479
|
+
f"[bold]{model_name}[/bold]",
|
|
480
|
+
provider
|
|
481
|
+
)
|
|
482
|
+
else:
|
|
483
|
+
table.add_row(str(i), model_id, model_name, provider)
|
|
484
|
+
|
|
485
|
+
self.console.print(table)
|
|
486
|
+
self.console.print("\n[dim]Use /model <name> or /model <number> to switch[/dim]")
|
|
487
|
+
|
|
488
|
+
async def login(self, args: str = ""):
|
|
489
|
+
"""Login to Hanzo."""
|
|
490
|
+
self.console.print("[cyan]Hanzo Authentication[/cyan]\n")
|
|
491
|
+
|
|
492
|
+
# Check if already logged in
|
|
493
|
+
if self.is_authenticated():
|
|
494
|
+
self.console.print("[yellow]Already authenticated[/yellow]")
|
|
495
|
+
if self.auth.get("email"):
|
|
496
|
+
self.console.print(f"Logged in as: {self.auth['email']}")
|
|
497
|
+
return
|
|
498
|
+
|
|
499
|
+
# Get credentials
|
|
500
|
+
try:
|
|
501
|
+
# Email
|
|
502
|
+
email = await self.session.prompt_async("Email: ")
|
|
503
|
+
|
|
504
|
+
# Password (hidden)
|
|
505
|
+
from prompt_toolkit import prompt
|
|
506
|
+
password = prompt("Password: ", is_password=True)
|
|
507
|
+
|
|
508
|
+
# Attempt login
|
|
509
|
+
self.console.print("\n[dim]Authenticating...[/dim]")
|
|
510
|
+
|
|
511
|
+
# TODO: Implement actual authentication
|
|
512
|
+
# For now, simulate successful login
|
|
513
|
+
await asyncio.sleep(1)
|
|
514
|
+
|
|
515
|
+
# Save auth
|
|
516
|
+
self.auth["email"] = email
|
|
517
|
+
self.auth["logged_in"] = True
|
|
518
|
+
self.auth["last_login"] = datetime.now().isoformat()
|
|
519
|
+
self.save_auth()
|
|
520
|
+
|
|
521
|
+
self.console.print("[green]✅ Successfully logged in![/green]")
|
|
522
|
+
|
|
523
|
+
except (KeyboardInterrupt, EOFError):
|
|
524
|
+
self.console.print("\n[yellow]Login cancelled[/yellow]")
|
|
525
|
+
|
|
526
|
+
async def logout(self, args: str = ""):
|
|
527
|
+
"""Logout from Hanzo."""
|
|
528
|
+
if not self.is_authenticated():
|
|
529
|
+
self.console.print("[yellow]Not logged in[/yellow]")
|
|
530
|
+
return
|
|
531
|
+
|
|
532
|
+
# Clear auth
|
|
533
|
+
self.auth = {}
|
|
534
|
+
self.save_auth()
|
|
535
|
+
|
|
536
|
+
# Clear environment variable if set
|
|
537
|
+
if "HANZO_API_KEY" in os.environ:
|
|
538
|
+
del os.environ["HANZO_API_KEY"]
|
|
539
|
+
|
|
540
|
+
self.console.print("[green]✅ Successfully logged out[/green]")
|
|
541
|
+
|
|
542
|
+
async def show_config(self, args: str = ""):
|
|
543
|
+
"""Show current configuration."""
|
|
544
|
+
config_text = json.dumps(self.config, indent=2)
|
|
545
|
+
self.console.print(Panel(config_text, title="Configuration", box=box.ROUNDED))
|
|
546
|
+
|
|
547
|
+
async def show_help(self, args: str = ""):
|
|
548
|
+
"""Show enhanced help."""
|
|
549
|
+
help_text = """
|
|
550
|
+
# Hanzo Enhanced REPL
|
|
551
|
+
|
|
552
|
+
## Slash Commands:
|
|
553
|
+
- `/model [name]` - Change AI model (or `/m`)
|
|
554
|
+
- `/models` - List available models
|
|
555
|
+
- `/tools` - List available AI tools
|
|
556
|
+
- `/quick` - Quick model selector (arrow keys)
|
|
557
|
+
- `/tasks` - Show background tasks
|
|
558
|
+
- `/kill [id]` - Kill background task
|
|
559
|
+
- `/status` - Show system status (or `/s`)
|
|
560
|
+
- `/login` - Login to Hanzo Cloud
|
|
561
|
+
- `/logout` - Logout from Hanzo
|
|
562
|
+
- `/config` - Show configuration
|
|
563
|
+
- `/help` - Show this help (or `/h`)
|
|
564
|
+
- `/clear` - Clear screen (or `/c`)
|
|
565
|
+
- `/quit` - Exit REPL (or `/q`)
|
|
566
|
+
|
|
567
|
+
## Quick Model Selection:
|
|
568
|
+
- Press ↓ arrow key for quick model selector
|
|
569
|
+
- Use ↑/↓ to navigate, Enter to select
|
|
570
|
+
- Esc to cancel
|
|
571
|
+
|
|
572
|
+
## Model Selection:
|
|
573
|
+
- Use `/model gpt-4` to switch to GPT-4
|
|
574
|
+
- Use `/model 3` to select model by number
|
|
575
|
+
- Current model shown in prompt: `hanzo [gpt] >`
|
|
576
|
+
|
|
577
|
+
## Authentication:
|
|
578
|
+
- 🔓 = Authenticated (logged in or API key set)
|
|
579
|
+
- 🔒 = Not authenticated
|
|
580
|
+
- Use `/login` to authenticate with Hanzo Cloud
|
|
581
|
+
|
|
582
|
+
## Tips:
|
|
583
|
+
- Type any message to chat with current model
|
|
584
|
+
- Use Tab for command completion
|
|
585
|
+
- Use Up/Down arrows for history
|
|
586
|
+
"""
|
|
587
|
+
self.console.print(Markdown(help_text))
|
|
588
|
+
|
|
589
|
+
async def clear_screen(self, args: str = ""):
|
|
590
|
+
"""Clear the screen."""
|
|
591
|
+
self.console.clear()
|
|
592
|
+
|
|
593
|
+
async def exit_repl(self, args: str = ""):
|
|
594
|
+
"""Exit the REPL."""
|
|
595
|
+
self.running = False
|
|
596
|
+
self.console.print("[yellow]Goodbye! 👋[/yellow]")
|
|
597
|
+
|
|
598
|
+
async def execute_command(self, cmd: str, args: str):
|
|
599
|
+
"""Execute a CLI command."""
|
|
600
|
+
# Import here to avoid circular imports
|
|
601
|
+
import subprocess
|
|
602
|
+
|
|
603
|
+
full_cmd = f"hanzo {cmd} {args}".strip()
|
|
604
|
+
self.console.print(f"[dim]Executing: {full_cmd}[/dim]")
|
|
605
|
+
|
|
606
|
+
try:
|
|
607
|
+
result = subprocess.run(
|
|
608
|
+
full_cmd,
|
|
609
|
+
shell=True,
|
|
610
|
+
capture_output=True,
|
|
611
|
+
text=True
|
|
612
|
+
)
|
|
613
|
+
|
|
614
|
+
if result.stdout:
|
|
615
|
+
self.console.print(result.stdout)
|
|
616
|
+
if result.stderr:
|
|
617
|
+
self.console.print(f"[red]{result.stderr}[/red]")
|
|
618
|
+
|
|
619
|
+
except Exception as e:
|
|
620
|
+
self.console.print(f"[red]Error executing command: {e}[/red]")
|
|
621
|
+
|
|
622
|
+
async def chat_with_ai(self, message: str):
|
|
623
|
+
"""Chat with AI using current model or tool."""
|
|
624
|
+
# Check if using a tool
|
|
625
|
+
if self.current_model.startswith("tool:") and self.current_tool:
|
|
626
|
+
# Use the detected tool directly
|
|
627
|
+
self.console.print(f"[dim]Using {self.current_tool.display_name}...[/dim]")
|
|
628
|
+
|
|
629
|
+
success, output = self.tool_detector.execute_with_tool(self.current_tool, message)
|
|
630
|
+
|
|
631
|
+
if success:
|
|
632
|
+
self.console.print(output)
|
|
633
|
+
else:
|
|
634
|
+
# Fallback to regular model
|
|
635
|
+
self.console.print(f"[yellow]{self.current_tool.display_name} failed, trying cloud model...[/yellow]")
|
|
636
|
+
await self.execute_command("ask", f"--cloud --model gpt-3.5-turbo {message}")
|
|
637
|
+
else:
|
|
638
|
+
# Use regular model through hanzo ask
|
|
639
|
+
await self.execute_command("ask", f"--cloud --model {self.current_model} {message}")
|
|
640
|
+
|
|
641
|
+
async def quick_model_select(self, args: str = ""):
|
|
642
|
+
"""Quick model selector with arrow keys."""
|
|
643
|
+
if not QuickModelSelector:
|
|
644
|
+
self.console.print("[yellow]Quick selector not available[/yellow]")
|
|
645
|
+
return
|
|
646
|
+
|
|
647
|
+
# Prepare tools and models
|
|
648
|
+
tools = [(f"tool:{t.name}", t.display_name) for t in self.detected_tools] if self.detected_tools else []
|
|
649
|
+
models = list(self.MODELS.items())
|
|
650
|
+
|
|
651
|
+
selector = QuickModelSelector(models, tools, self.current_model)
|
|
652
|
+
selected = await selector.run()
|
|
653
|
+
|
|
654
|
+
if selected:
|
|
655
|
+
# Change to selected model
|
|
656
|
+
await self.change_model(selected)
|
|
657
|
+
|
|
658
|
+
async def show_tasks(self, args: str = ""):
|
|
659
|
+
"""Show background tasks."""
|
|
660
|
+
if self.task_manager:
|
|
661
|
+
self.task_manager.list_tasks()
|
|
662
|
+
else:
|
|
663
|
+
self.console.print("[yellow]Task manager not available[/yellow]")
|
|
664
|
+
|
|
665
|
+
async def kill_task(self, args: str = ""):
|
|
666
|
+
"""Kill a background task."""
|
|
667
|
+
if not self.task_manager:
|
|
668
|
+
self.console.print("[yellow]Task manager not available[/yellow]")
|
|
669
|
+
return
|
|
670
|
+
|
|
671
|
+
if args:
|
|
672
|
+
if args.lower() == "all":
|
|
673
|
+
self.task_manager.kill_all()
|
|
674
|
+
else:
|
|
675
|
+
self.task_manager.kill_task(args)
|
|
676
|
+
else:
|
|
677
|
+
# Show tasks and prompt for selection
|
|
678
|
+
self.task_manager.list_tasks()
|
|
679
|
+
self.console.print("\n[cyan]Enter task ID to kill (or 'all' for all tasks):[/cyan]")
|
|
680
|
+
try:
|
|
681
|
+
task_id = await self.session.prompt_async("> ")
|
|
682
|
+
if task_id:
|
|
683
|
+
if task_id.lower() == "all":
|
|
684
|
+
self.task_manager.kill_all()
|
|
685
|
+
else:
|
|
686
|
+
self.task_manager.kill_task(task_id)
|
|
687
|
+
except (KeyboardInterrupt, EOFError):
|
|
688
|
+
pass
|