hanzo 0.3.13__py3-none-any.whl → 0.3.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hanzo might be problematic. Click here for more details.
- hanzo/cli.py +1 -1
- hanzo/dev.py +413 -2
- {hanzo-0.3.13.dist-info → hanzo-0.3.14.dist-info}/METADATA +1 -1
- {hanzo-0.3.13.dist-info → hanzo-0.3.14.dist-info}/RECORD +6 -6
- {hanzo-0.3.13.dist-info → hanzo-0.3.14.dist-info}/WHEEL +0 -0
- {hanzo-0.3.13.dist-info → hanzo-0.3.14.dist-info}/entry_points.txt +0 -0
hanzo/cli.py
CHANGED
hanzo/dev.py
CHANGED
|
@@ -899,6 +899,36 @@ Examples:
|
|
|
899
899
|
"""Direct API chat fallback when network orchestrator isn't available."""
|
|
900
900
|
import os
|
|
901
901
|
|
|
902
|
+
# Check for CLI tools and free/local options first
|
|
903
|
+
if self.orchestrator.orchestrator_model in ["codex", "openai-cli", "openai-codex"]:
|
|
904
|
+
# Use OpenAI CLI (Codex)
|
|
905
|
+
await self._use_openai_cli(message)
|
|
906
|
+
return
|
|
907
|
+
elif self.orchestrator.orchestrator_model in ["claude", "claude-code", "claude-desktop"]:
|
|
908
|
+
# Use Claude Desktop/Code
|
|
909
|
+
await self._use_claude_cli(message)
|
|
910
|
+
return
|
|
911
|
+
elif self.orchestrator.orchestrator_model in ["gemini", "gemini-cli", "google-gemini"]:
|
|
912
|
+
# Use Gemini CLI
|
|
913
|
+
await self._use_gemini_cli(message)
|
|
914
|
+
return
|
|
915
|
+
elif self.orchestrator.orchestrator_model in ["hanzo-ide", "hanzo-dev-ide", "ide"]:
|
|
916
|
+
# Use Hanzo Dev IDE from ~/work/hanzo/ide
|
|
917
|
+
await self._use_hanzo_ide(message)
|
|
918
|
+
return
|
|
919
|
+
elif self.orchestrator.orchestrator_model in ["codestral", "codestral-free", "free", "mistral-free"]:
|
|
920
|
+
# Use free Mistral Codestral API
|
|
921
|
+
await self._use_free_codestral(message)
|
|
922
|
+
return
|
|
923
|
+
elif self.orchestrator.orchestrator_model in ["starcoder", "starcoder2", "free-starcoder"]:
|
|
924
|
+
# Use free StarCoder via HuggingFace
|
|
925
|
+
await self._use_free_starcoder(message)
|
|
926
|
+
return
|
|
927
|
+
elif self.orchestrator.orchestrator_model.startswith("local:"):
|
|
928
|
+
# Use local model via Ollama or LM Studio
|
|
929
|
+
await self._use_local_model(message)
|
|
930
|
+
return
|
|
931
|
+
|
|
902
932
|
# Try OpenAI first
|
|
903
933
|
if os.getenv("OPENAI_API_KEY"):
|
|
904
934
|
try:
|
|
@@ -943,10 +973,391 @@ Examples:
|
|
|
943
973
|
|
|
944
974
|
# No API keys available
|
|
945
975
|
console.print("[red]No AI API keys configured![/red]")
|
|
946
|
-
console.print("
|
|
976
|
+
console.print("[yellow]Try these options that don't need your API key:[/yellow]")
|
|
977
|
+
console.print("\n[bold]CLI Tools (use existing tools):[/bold]")
|
|
978
|
+
console.print(" • hanzo dev --orchestrator codex # OpenAI CLI (if installed)")
|
|
979
|
+
console.print(" • hanzo dev --orchestrator claude # Claude Desktop (if installed)")
|
|
980
|
+
console.print(" • hanzo dev --orchestrator gemini # Gemini CLI (if installed)")
|
|
981
|
+
console.print(" • hanzo dev --orchestrator hanzo-ide # Hanzo IDE from ~/work/hanzo/ide")
|
|
982
|
+
console.print("\n[bold]Free APIs (rate limited):[/bold]")
|
|
983
|
+
console.print(" • hanzo dev --orchestrator codestral # Free Mistral Codestral")
|
|
984
|
+
console.print(" • hanzo dev --orchestrator starcoder # Free StarCoder")
|
|
985
|
+
console.print("\n[bold]Local Models (unlimited):[/bold]")
|
|
986
|
+
console.print(" • hanzo dev --orchestrator local:llama3.2 # Via Ollama")
|
|
987
|
+
console.print(" • hanzo dev --orchestrator local:codellama # Via Ollama")
|
|
988
|
+
console.print(" • hanzo dev --orchestrator local:mistral # Via Ollama")
|
|
989
|
+
console.print("\n[dim]Or set API keys for full access:[/dim]")
|
|
947
990
|
console.print(" • export OPENAI_API_KEY=sk-...")
|
|
948
991
|
console.print(" • export ANTHROPIC_API_KEY=sk-ant-...")
|
|
949
|
-
|
|
992
|
+
|
|
993
|
+
async def _use_free_codestral(self, message: str):
|
|
994
|
+
"""Use free Mistral Codestral API (no API key needed for trial)."""
|
|
995
|
+
try:
|
|
996
|
+
import httpx
|
|
997
|
+
|
|
998
|
+
console.print("[dim]Using free Codestral API (rate limited)...[/dim]")
|
|
999
|
+
|
|
1000
|
+
async with httpx.AsyncClient() as client:
|
|
1001
|
+
# Mistral offers free tier with rate limits
|
|
1002
|
+
response = await client.post(
|
|
1003
|
+
"https://api.mistral.ai/v1/chat/completions",
|
|
1004
|
+
headers={
|
|
1005
|
+
"Content-Type": "application/json",
|
|
1006
|
+
# Free tier doesn't need API key for limited usage
|
|
1007
|
+
},
|
|
1008
|
+
json={
|
|
1009
|
+
"model": "codestral-latest",
|
|
1010
|
+
"messages": [
|
|
1011
|
+
{"role": "system", "content": "You are Codestral, an AI coding assistant."},
|
|
1012
|
+
{"role": "user", "content": message}
|
|
1013
|
+
],
|
|
1014
|
+
"temperature": 0.7,
|
|
1015
|
+
"max_tokens": 2000
|
|
1016
|
+
},
|
|
1017
|
+
timeout=30.0
|
|
1018
|
+
)
|
|
1019
|
+
|
|
1020
|
+
if response.status_code == 200:
|
|
1021
|
+
data = response.json()
|
|
1022
|
+
if data.get("choices"):
|
|
1023
|
+
console.print(f"[cyan]Codestral:[/cyan] {data['choices'][0]['message']['content']}")
|
|
1024
|
+
else:
|
|
1025
|
+
console.print("[yellow]Free tier limit reached. Try local models instead:[/yellow]")
|
|
1026
|
+
console.print(" • Install Ollama: curl -fsSL https://ollama.com/install.sh | sh")
|
|
1027
|
+
console.print(" • Run: ollama pull codellama")
|
|
1028
|
+
console.print(" • Use: hanzo dev --orchestrator local:codellama")
|
|
1029
|
+
|
|
1030
|
+
except Exception as e:
|
|
1031
|
+
console.print(f"[red]Codestral error: {e}[/red]")
|
|
1032
|
+
console.print("[yellow]Try local models instead (no limits):[/yellow]")
|
|
1033
|
+
console.print(" • hanzo dev --orchestrator local:codellama")
|
|
1034
|
+
|
|
1035
|
+
async def _use_free_starcoder(self, message: str):
|
|
1036
|
+
"""Use free StarCoder via HuggingFace Inference API."""
|
|
1037
|
+
try:
|
|
1038
|
+
import httpx
|
|
1039
|
+
|
|
1040
|
+
console.print("[dim]Using free StarCoder API...[/dim]")
|
|
1041
|
+
|
|
1042
|
+
async with httpx.AsyncClient() as client:
|
|
1043
|
+
# HuggingFace offers free inference API
|
|
1044
|
+
response = await client.post(
|
|
1045
|
+
"https://api-inference.huggingface.co/models/bigcode/starcoder2-15b",
|
|
1046
|
+
headers={
|
|
1047
|
+
"Content-Type": "application/json",
|
|
1048
|
+
},
|
|
1049
|
+
json={
|
|
1050
|
+
"inputs": f"<|system|>You are StarCoder, an AI coding assistant.<|end|>\n<|user|>{message}<|end|>\n<|assistant|>",
|
|
1051
|
+
"parameters": {
|
|
1052
|
+
"temperature": 0.7,
|
|
1053
|
+
"max_new_tokens": 2000,
|
|
1054
|
+
"return_full_text": False
|
|
1055
|
+
}
|
|
1056
|
+
},
|
|
1057
|
+
timeout=30.0
|
|
1058
|
+
)
|
|
1059
|
+
|
|
1060
|
+
if response.status_code == 200:
|
|
1061
|
+
data = response.json()
|
|
1062
|
+
if isinstance(data, list) and data:
|
|
1063
|
+
console.print(f"[cyan]StarCoder:[/cyan] {data[0].get('generated_text', '')}")
|
|
1064
|
+
else:
|
|
1065
|
+
console.print("[yellow]API limit reached. Install local models:[/yellow]")
|
|
1066
|
+
console.print(" • brew install ollama")
|
|
1067
|
+
console.print(" • ollama pull starcoder2")
|
|
1068
|
+
console.print(" • hanzo dev --orchestrator local:starcoder2")
|
|
1069
|
+
|
|
1070
|
+
except Exception as e:
|
|
1071
|
+
console.print(f"[red]StarCoder error: {e}[/red]")
|
|
1072
|
+
|
|
1073
|
+
async def _use_openai_cli(self, message: str):
|
|
1074
|
+
"""Use OpenAI CLI (Codex) - the official OpenAI CLI tool."""
|
|
1075
|
+
try:
|
|
1076
|
+
import subprocess
|
|
1077
|
+
import json
|
|
1078
|
+
|
|
1079
|
+
console.print("[dim]Using OpenAI CLI (Codex)...[/dim]")
|
|
1080
|
+
|
|
1081
|
+
# Check if openai CLI is installed
|
|
1082
|
+
result = subprocess.run(["which", "openai"], capture_output=True, text=True)
|
|
1083
|
+
if result.returncode != 0:
|
|
1084
|
+
console.print("[red]OpenAI CLI not installed![/red]")
|
|
1085
|
+
console.print("[yellow]To install:[/yellow]")
|
|
1086
|
+
console.print(" • pip install openai-cli")
|
|
1087
|
+
console.print(" • openai login")
|
|
1088
|
+
console.print("Then use: hanzo dev --orchestrator codex")
|
|
1089
|
+
return
|
|
1090
|
+
|
|
1091
|
+
# Use openai CLI to chat
|
|
1092
|
+
cmd = ["openai", "api", "chat", "-m", "gpt-4", "-p", message]
|
|
1093
|
+
|
|
1094
|
+
process = subprocess.Popen(
|
|
1095
|
+
cmd,
|
|
1096
|
+
stdout=subprocess.PIPE,
|
|
1097
|
+
stderr=subprocess.PIPE,
|
|
1098
|
+
text=True
|
|
1099
|
+
)
|
|
1100
|
+
|
|
1101
|
+
stdout, stderr = process.communicate(timeout=30)
|
|
1102
|
+
|
|
1103
|
+
if process.returncode == 0 and stdout:
|
|
1104
|
+
console.print(f"[cyan]Codex:[/cyan] {stdout.strip()}")
|
|
1105
|
+
else:
|
|
1106
|
+
console.print(f"[red]OpenAI CLI error: {stderr}[/red]")
|
|
1107
|
+
|
|
1108
|
+
except subprocess.TimeoutExpired:
|
|
1109
|
+
console.print("[yellow]OpenAI CLI timed out[/yellow]")
|
|
1110
|
+
except Exception as e:
|
|
1111
|
+
console.print(f"[red]Error using OpenAI CLI: {e}[/red]")
|
|
1112
|
+
|
|
1113
|
+
async def _use_claude_cli(self, message: str):
|
|
1114
|
+
"""Use Claude Desktop/Code CLI."""
|
|
1115
|
+
try:
|
|
1116
|
+
import subprocess
|
|
1117
|
+
import os
|
|
1118
|
+
|
|
1119
|
+
console.print("[dim]Using Claude Desktop...[/dim]")
|
|
1120
|
+
|
|
1121
|
+
# Check for Claude Code or Claude Desktop
|
|
1122
|
+
claude_paths = [
|
|
1123
|
+
"/usr/local/bin/claude",
|
|
1124
|
+
"/Applications/Claude.app/Contents/MacOS/Claude",
|
|
1125
|
+
os.path.expanduser("~/Applications/Claude.app/Contents/MacOS/Claude"),
|
|
1126
|
+
"claude", # In PATH
|
|
1127
|
+
]
|
|
1128
|
+
|
|
1129
|
+
claude_path = None
|
|
1130
|
+
for path in claude_paths:
|
|
1131
|
+
if os.path.exists(path) or subprocess.run(["which", path], capture_output=True).returncode == 0:
|
|
1132
|
+
claude_path = path
|
|
1133
|
+
break
|
|
1134
|
+
|
|
1135
|
+
if not claude_path:
|
|
1136
|
+
console.print("[red]Claude Desktop not found![/red]")
|
|
1137
|
+
console.print("[yellow]To install:[/yellow]")
|
|
1138
|
+
console.print(" • Download from https://claude.ai/desktop")
|
|
1139
|
+
console.print(" • Or: brew install --cask claude")
|
|
1140
|
+
console.print("Then use: hanzo dev --orchestrator claude")
|
|
1141
|
+
return
|
|
1142
|
+
|
|
1143
|
+
# Send message to Claude via CLI or AppleScript on macOS
|
|
1144
|
+
if sys.platform == "darwin":
|
|
1145
|
+
# Use AppleScript to interact with Claude Desktop
|
|
1146
|
+
script = f'''
|
|
1147
|
+
tell application "Claude"
|
|
1148
|
+
activate
|
|
1149
|
+
delay 0.5
|
|
1150
|
+
tell application "System Events"
|
|
1151
|
+
keystroke "{message.replace('"', '\\"')}"
|
|
1152
|
+
key code 36 -- Enter key
|
|
1153
|
+
end tell
|
|
1154
|
+
end tell
|
|
1155
|
+
'''
|
|
1156
|
+
|
|
1157
|
+
subprocess.run(["osascript", "-e", script])
|
|
1158
|
+
console.print("[cyan]Sent to Claude Desktop. Check the app for response.[/cyan]")
|
|
1159
|
+
else:
|
|
1160
|
+
# Try direct CLI invocation
|
|
1161
|
+
process = subprocess.Popen(
|
|
1162
|
+
[claude_path, "--message", message],
|
|
1163
|
+
stdout=subprocess.PIPE,
|
|
1164
|
+
stderr=subprocess.PIPE,
|
|
1165
|
+
text=True
|
|
1166
|
+
)
|
|
1167
|
+
|
|
1168
|
+
stdout, stderr = process.communicate(timeout=30)
|
|
1169
|
+
|
|
1170
|
+
if stdout:
|
|
1171
|
+
console.print(f"[cyan]Claude:[/cyan] {stdout.strip()}")
|
|
1172
|
+
|
|
1173
|
+
except Exception as e:
|
|
1174
|
+
console.print(f"[red]Error using Claude Desktop: {e}[/red]")
|
|
1175
|
+
|
|
1176
|
+
async def _use_gemini_cli(self, message: str):
|
|
1177
|
+
"""Use Gemini CLI."""
|
|
1178
|
+
try:
|
|
1179
|
+
import subprocess
|
|
1180
|
+
|
|
1181
|
+
console.print("[dim]Using Gemini CLI...[/dim]")
|
|
1182
|
+
|
|
1183
|
+
# Check if gemini CLI is installed
|
|
1184
|
+
result = subprocess.run(["which", "gemini"], capture_output=True, text=True)
|
|
1185
|
+
if result.returncode != 0:
|
|
1186
|
+
console.print("[red]Gemini CLI not installed![/red]")
|
|
1187
|
+
console.print("[yellow]To install:[/yellow]")
|
|
1188
|
+
console.print(" • pip install google-generativeai-cli")
|
|
1189
|
+
console.print(" • gemini configure")
|
|
1190
|
+
console.print(" • Set GOOGLE_API_KEY environment variable")
|
|
1191
|
+
console.print("Then use: hanzo dev --orchestrator gemini")
|
|
1192
|
+
return
|
|
1193
|
+
|
|
1194
|
+
# Use gemini CLI
|
|
1195
|
+
cmd = ["gemini", "chat", message]
|
|
1196
|
+
|
|
1197
|
+
process = subprocess.Popen(
|
|
1198
|
+
cmd,
|
|
1199
|
+
stdout=subprocess.PIPE,
|
|
1200
|
+
stderr=subprocess.PIPE,
|
|
1201
|
+
text=True
|
|
1202
|
+
)
|
|
1203
|
+
|
|
1204
|
+
stdout, stderr = process.communicate(timeout=30)
|
|
1205
|
+
|
|
1206
|
+
if process.returncode == 0 and stdout:
|
|
1207
|
+
console.print(f"[cyan]Gemini:[/cyan] {stdout.strip()}")
|
|
1208
|
+
else:
|
|
1209
|
+
console.print(f"[red]Gemini CLI error: {stderr}[/red]")
|
|
1210
|
+
|
|
1211
|
+
except subprocess.TimeoutExpired:
|
|
1212
|
+
console.print("[yellow]Gemini CLI timed out[/yellow]")
|
|
1213
|
+
except Exception as e:
|
|
1214
|
+
console.print(f"[red]Error using Gemini CLI: {e}[/red]")
|
|
1215
|
+
|
|
1216
|
+
async def _use_hanzo_ide(self, message: str):
|
|
1217
|
+
"""Use Hanzo Dev IDE from ~/work/hanzo/ide."""
|
|
1218
|
+
try:
|
|
1219
|
+
import subprocess
|
|
1220
|
+
import os
|
|
1221
|
+
|
|
1222
|
+
console.print("[dim]Using Hanzo Dev IDE...[/dim]")
|
|
1223
|
+
|
|
1224
|
+
# Check if Hanzo IDE exists
|
|
1225
|
+
ide_path = os.path.expanduser("~/work/hanzo/ide")
|
|
1226
|
+
if not os.path.exists(ide_path):
|
|
1227
|
+
console.print("[red]Hanzo Dev IDE not found![/red]")
|
|
1228
|
+
console.print("[yellow]Expected location: ~/work/hanzo/ide[/yellow]")
|
|
1229
|
+
console.print("To set up:")
|
|
1230
|
+
console.print(" • git clone https://github.com/hanzoai/ide ~/work/hanzo/ide")
|
|
1231
|
+
console.print(" • cd ~/work/hanzo/ide && npm install")
|
|
1232
|
+
return
|
|
1233
|
+
|
|
1234
|
+
# Check for the CLI entry point
|
|
1235
|
+
cli_paths = [
|
|
1236
|
+
os.path.join(ide_path, "bin", "hanzo-ide"),
|
|
1237
|
+
os.path.join(ide_path, "hanzo-ide"),
|
|
1238
|
+
os.path.join(ide_path, "cli.js"),
|
|
1239
|
+
os.path.join(ide_path, "index.js"),
|
|
1240
|
+
]
|
|
1241
|
+
|
|
1242
|
+
cli_path = None
|
|
1243
|
+
for path in cli_paths:
|
|
1244
|
+
if os.path.exists(path):
|
|
1245
|
+
cli_path = path
|
|
1246
|
+
break
|
|
1247
|
+
|
|
1248
|
+
if not cli_path:
|
|
1249
|
+
# Try to run with npm/node
|
|
1250
|
+
package_json = os.path.join(ide_path, "package.json")
|
|
1251
|
+
if os.path.exists(package_json):
|
|
1252
|
+
# Run via npm
|
|
1253
|
+
cmd = ["npm", "run", "chat", "--", message]
|
|
1254
|
+
cwd = ide_path
|
|
1255
|
+
else:
|
|
1256
|
+
console.print("[red]Hanzo IDE CLI not found![/red]")
|
|
1257
|
+
return
|
|
1258
|
+
else:
|
|
1259
|
+
# Run the CLI directly
|
|
1260
|
+
if cli_path.endswith(".js"):
|
|
1261
|
+
cmd = ["node", cli_path, "chat", message]
|
|
1262
|
+
else:
|
|
1263
|
+
cmd = [cli_path, "chat", message]
|
|
1264
|
+
cwd = None
|
|
1265
|
+
|
|
1266
|
+
process = subprocess.Popen(
|
|
1267
|
+
cmd,
|
|
1268
|
+
stdout=subprocess.PIPE,
|
|
1269
|
+
stderr=subprocess.PIPE,
|
|
1270
|
+
text=True,
|
|
1271
|
+
cwd=cwd
|
|
1272
|
+
)
|
|
1273
|
+
|
|
1274
|
+
stdout, stderr = process.communicate(timeout=30)
|
|
1275
|
+
|
|
1276
|
+
if process.returncode == 0 and stdout:
|
|
1277
|
+
console.print(f"[cyan]Hanzo IDE:[/cyan] {stdout.strip()}")
|
|
1278
|
+
else:
|
|
1279
|
+
if stderr:
|
|
1280
|
+
console.print(f"[yellow]Hanzo IDE: {stderr}[/yellow]")
|
|
1281
|
+
else:
|
|
1282
|
+
console.print("[yellow]Hanzo IDE: No response[/yellow]")
|
|
1283
|
+
|
|
1284
|
+
except subprocess.TimeoutExpired:
|
|
1285
|
+
console.print("[yellow]Hanzo IDE timed out[/yellow]")
|
|
1286
|
+
except Exception as e:
|
|
1287
|
+
console.print(f"[red]Error using Hanzo IDE: {e}[/red]")
|
|
1288
|
+
|
|
1289
|
+
async def _use_local_model(self, message: str):
|
|
1290
|
+
"""Use local model via Ollama or LM Studio."""
|
|
1291
|
+
import httpx
|
|
1292
|
+
|
|
1293
|
+
model_name = self.orchestrator.orchestrator_model.replace("local:", "")
|
|
1294
|
+
|
|
1295
|
+
# Try Ollama first (default port 11434)
|
|
1296
|
+
try:
|
|
1297
|
+
console.print(f"[dim]Using local {model_name} via Ollama...[/dim]")
|
|
1298
|
+
|
|
1299
|
+
async with httpx.AsyncClient() as client:
|
|
1300
|
+
response = await client.post(
|
|
1301
|
+
"http://localhost:11434/api/chat",
|
|
1302
|
+
json={
|
|
1303
|
+
"model": model_name,
|
|
1304
|
+
"messages": [
|
|
1305
|
+
{"role": "system", "content": "You are a helpful AI coding assistant."},
|
|
1306
|
+
{"role": "user", "content": message}
|
|
1307
|
+
],
|
|
1308
|
+
"stream": False
|
|
1309
|
+
},
|
|
1310
|
+
timeout=60.0
|
|
1311
|
+
)
|
|
1312
|
+
|
|
1313
|
+
if response.status_code == 200:
|
|
1314
|
+
data = response.json()
|
|
1315
|
+
if data.get("message"):
|
|
1316
|
+
console.print(f"[cyan]{model_name}:[/cyan] {data['message']['content']}")
|
|
1317
|
+
return
|
|
1318
|
+
|
|
1319
|
+
except Exception:
|
|
1320
|
+
pass
|
|
1321
|
+
|
|
1322
|
+
# Try LM Studio (default port 1234)
|
|
1323
|
+
try:
|
|
1324
|
+
console.print(f"[dim]Trying LM Studio...[/dim]")
|
|
1325
|
+
|
|
1326
|
+
async with httpx.AsyncClient() as client:
|
|
1327
|
+
response = await client.post(
|
|
1328
|
+
"http://localhost:1234/v1/chat/completions",
|
|
1329
|
+
json={
|
|
1330
|
+
"model": model_name,
|
|
1331
|
+
"messages": [
|
|
1332
|
+
{"role": "system", "content": "You are a helpful AI coding assistant."},
|
|
1333
|
+
{"role": "user", "content": message}
|
|
1334
|
+
],
|
|
1335
|
+
"temperature": 0.7,
|
|
1336
|
+
"max_tokens": 2000
|
|
1337
|
+
},
|
|
1338
|
+
timeout=60.0
|
|
1339
|
+
)
|
|
1340
|
+
|
|
1341
|
+
if response.status_code == 200:
|
|
1342
|
+
data = response.json()
|
|
1343
|
+
if data.get("choices"):
|
|
1344
|
+
console.print(f"[cyan]{model_name}:[/cyan] {data['choices'][0]['message']['content']}")
|
|
1345
|
+
return
|
|
1346
|
+
|
|
1347
|
+
except Exception:
|
|
1348
|
+
pass
|
|
1349
|
+
|
|
1350
|
+
# Neither worked
|
|
1351
|
+
console.print(f"[red]Local model '{model_name}' not available[/red]")
|
|
1352
|
+
console.print("[yellow]To use local models:[/yellow]")
|
|
1353
|
+
console.print("\nOption 1 - Ollama (recommended):")
|
|
1354
|
+
console.print(" • Install: curl -fsSL https://ollama.com/install.sh | sh")
|
|
1355
|
+
console.print(f" • Pull model: ollama pull {model_name}")
|
|
1356
|
+
console.print(" • It will auto-start when you use hanzo dev")
|
|
1357
|
+
console.print("\nOption 2 - LM Studio:")
|
|
1358
|
+
console.print(" • Download from https://lmstudio.ai")
|
|
1359
|
+
console.print(f" • Load {model_name} model")
|
|
1360
|
+
console.print(" • Start local server (port 1234)")
|
|
950
1361
|
|
|
951
1362
|
async def handle_memory_command(self, command: str):
|
|
952
1363
|
"""Handle memory/context commands starting with #."""
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: hanzo
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.14
|
|
4
4
|
Summary: Hanzo AI - Complete AI Infrastructure Platform with CLI, Router, MCP, and Agent Runtime
|
|
5
5
|
Project-URL: Homepage, https://hanzo.ai
|
|
6
6
|
Project-URL: Repository, https://github.com/hanzoai/python-sdk
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
hanzo/__init__.py,sha256=f6N_RcJZ0F9ADrROlvPi1OrgwjF8cWQm34cml8hb1zk,169
|
|
2
2
|
hanzo/__main__.py,sha256=F3Vz0Ty3bdAj_8oxyETMIqxlmNRnJOAFB1XPxbyfouI,105
|
|
3
|
-
hanzo/cli.py,sha256=
|
|
4
|
-
hanzo/dev.py,sha256=
|
|
3
|
+
hanzo/cli.py,sha256=4cmWgFp1rfefWnNUHfu5Tclfbl0X9_NCygUktRe5C8g,18586
|
|
4
|
+
hanzo/dev.py,sha256=_f1PcGuZ1m6M6B-vRsVpaceGSSMVP5lQe-p30muAGWQ,91152
|
|
5
5
|
hanzo/mcp_server.py,sha256=XVygFNn-9CVdu8c95sP7fQjIRtA8K7nsGpgQNe44BRg,460
|
|
6
6
|
hanzo/orchestrator_config.py,sha256=JV7DS8aVZwBJ9XzgkQronFwV_A50QyXG3MH_pKwmCB8,11006
|
|
7
7
|
hanzo/repl.py,sha256=sW1quuqGkJ_AqgjN2vLNdtWgKDlXIkXiO9Bo1QQI0G4,1089
|
|
@@ -24,7 +24,7 @@ hanzo/utils/__init__.py,sha256=5RRwKI852vp8smr4xCRgeKfn7dLEnHbdXGfVYTZ5jDQ,69
|
|
|
24
24
|
hanzo/utils/config.py,sha256=FD_LoBpcoF5dgJ7WL4o6LDp2pdOy8kS-dJ6iRO2GcGM,4728
|
|
25
25
|
hanzo/utils/net_check.py,sha256=YFbJ65SzfDYHkHLZe3n51VhId1VI3zhyx8p6BM-l6jE,3017
|
|
26
26
|
hanzo/utils/output.py,sha256=W0j3psF07vJiX4s02gbN4zYWfbKNsb8TSIoagBSf5vA,2704
|
|
27
|
-
hanzo-0.3.
|
|
28
|
-
hanzo-0.3.
|
|
29
|
-
hanzo-0.3.
|
|
30
|
-
hanzo-0.3.
|
|
27
|
+
hanzo-0.3.14.dist-info/METADATA,sha256=9MjoIbpUCHSZyM34z4P4ZsIROnyNQRCKB7HNJp53mw4,4279
|
|
28
|
+
hanzo-0.3.14.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
29
|
+
hanzo-0.3.14.dist-info/entry_points.txt,sha256=pQLPMdqOXU_2BfTcMDhkqTCDNk_H6ApvYuSaWcuQOOw,171
|
|
30
|
+
hanzo-0.3.14.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|