hanzo 0.3.13__py3-none-any.whl → 0.3.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hanzo might be problematic. Click here for more details.
- hanzo/cli.py +1 -1
- hanzo/dev.py +551 -11
- {hanzo-0.3.13.dist-info → hanzo-0.3.15.dist-info}/METADATA +1 -1
- {hanzo-0.3.13.dist-info → hanzo-0.3.15.dist-info}/RECORD +6 -6
- {hanzo-0.3.13.dist-info → hanzo-0.3.15.dist-info}/WHEEL +0 -0
- {hanzo-0.3.13.dist-info → hanzo-0.3.15.dist-info}/entry_points.txt +0 -0
hanzo/cli.py
CHANGED
hanzo/dev.py
CHANGED
|
@@ -857,7 +857,19 @@ Examples:
|
|
|
857
857
|
async def chat_with_agents(self, message: str):
|
|
858
858
|
"""Send message to AI agents for natural chat."""
|
|
859
859
|
try:
|
|
860
|
-
#
|
|
860
|
+
# For codex and other CLI tools, go straight to direct API chat
|
|
861
|
+
if hasattr(self.orchestrator, 'orchestrator_model'):
|
|
862
|
+
model = self.orchestrator.orchestrator_model
|
|
863
|
+
if model in ["codex", "openai-cli", "openai-codex", "claude", "claude-code",
|
|
864
|
+
"claude-desktop", "gemini", "gemini-cli", "google-gemini",
|
|
865
|
+
"hanzo-ide", "hanzo-dev-ide", "ide", "codestral", "codestral-free",
|
|
866
|
+
"free", "mistral-free", "starcoder", "starcoder2", "free-starcoder"] or \
|
|
867
|
+
model.startswith("local:"):
|
|
868
|
+
# Use direct API/CLI chat for these models
|
|
869
|
+
await self._direct_api_chat(message)
|
|
870
|
+
return
|
|
871
|
+
|
|
872
|
+
# Show thinking indicator for network orchestrators
|
|
861
873
|
console.print("[dim]Thinking...[/dim]")
|
|
862
874
|
|
|
863
875
|
# Check if we have a network orchestrator with actual AI
|
|
@@ -876,7 +888,7 @@ Examples:
|
|
|
876
888
|
console.print("[yellow]No response from agent[/yellow]")
|
|
877
889
|
|
|
878
890
|
elif hasattr(self.orchestrator, 'execute_with_critique'):
|
|
879
|
-
# Use multi-Claude orchestrator
|
|
891
|
+
# Use multi-Claude orchestrator - but now it will use real AI!
|
|
880
892
|
result = await self.orchestrator.execute_with_critique(message)
|
|
881
893
|
|
|
882
894
|
if result.get("output"):
|
|
@@ -899,6 +911,36 @@ Examples:
|
|
|
899
911
|
"""Direct API chat fallback when network orchestrator isn't available."""
|
|
900
912
|
import os
|
|
901
913
|
|
|
914
|
+
# Check for CLI tools and free/local options first
|
|
915
|
+
if self.orchestrator.orchestrator_model in ["codex", "openai-cli", "openai-codex"]:
|
|
916
|
+
# Use OpenAI CLI (Codex)
|
|
917
|
+
await self._use_openai_cli(message)
|
|
918
|
+
return
|
|
919
|
+
elif self.orchestrator.orchestrator_model in ["claude", "claude-code", "claude-desktop"]:
|
|
920
|
+
# Use Claude Desktop/Code
|
|
921
|
+
await self._use_claude_cli(message)
|
|
922
|
+
return
|
|
923
|
+
elif self.orchestrator.orchestrator_model in ["gemini", "gemini-cli", "google-gemini"]:
|
|
924
|
+
# Use Gemini CLI
|
|
925
|
+
await self._use_gemini_cli(message)
|
|
926
|
+
return
|
|
927
|
+
elif self.orchestrator.orchestrator_model in ["hanzo-ide", "hanzo-dev-ide", "ide"]:
|
|
928
|
+
# Use Hanzo Dev IDE from ~/work/hanzo/ide
|
|
929
|
+
await self._use_hanzo_ide(message)
|
|
930
|
+
return
|
|
931
|
+
elif self.orchestrator.orchestrator_model in ["codestral", "codestral-free", "free", "mistral-free"]:
|
|
932
|
+
# Use free Mistral Codestral API
|
|
933
|
+
await self._use_free_codestral(message)
|
|
934
|
+
return
|
|
935
|
+
elif self.orchestrator.orchestrator_model in ["starcoder", "starcoder2", "free-starcoder"]:
|
|
936
|
+
# Use free StarCoder via HuggingFace
|
|
937
|
+
await self._use_free_starcoder(message)
|
|
938
|
+
return
|
|
939
|
+
elif self.orchestrator.orchestrator_model.startswith("local:"):
|
|
940
|
+
# Use local model via Ollama or LM Studio
|
|
941
|
+
await self._use_local_model(message)
|
|
942
|
+
return
|
|
943
|
+
|
|
902
944
|
# Try OpenAI first
|
|
903
945
|
if os.getenv("OPENAI_API_KEY"):
|
|
904
946
|
try:
|
|
@@ -943,10 +985,391 @@ Examples:
|
|
|
943
985
|
|
|
944
986
|
# No API keys available
|
|
945
987
|
console.print("[red]No AI API keys configured![/red]")
|
|
946
|
-
console.print("
|
|
988
|
+
console.print("[yellow]Try these options that don't need your API key:[/yellow]")
|
|
989
|
+
console.print("\n[bold]CLI Tools (use existing tools):[/bold]")
|
|
990
|
+
console.print(" • hanzo dev --orchestrator codex # OpenAI CLI (if installed)")
|
|
991
|
+
console.print(" • hanzo dev --orchestrator claude # Claude Desktop (if installed)")
|
|
992
|
+
console.print(" • hanzo dev --orchestrator gemini # Gemini CLI (if installed)")
|
|
993
|
+
console.print(" • hanzo dev --orchestrator hanzo-ide # Hanzo IDE from ~/work/hanzo/ide")
|
|
994
|
+
console.print("\n[bold]Free APIs (rate limited):[/bold]")
|
|
995
|
+
console.print(" • hanzo dev --orchestrator codestral # Free Mistral Codestral")
|
|
996
|
+
console.print(" • hanzo dev --orchestrator starcoder # Free StarCoder")
|
|
997
|
+
console.print("\n[bold]Local Models (unlimited):[/bold]")
|
|
998
|
+
console.print(" • hanzo dev --orchestrator local:llama3.2 # Via Ollama")
|
|
999
|
+
console.print(" • hanzo dev --orchestrator local:codellama # Via Ollama")
|
|
1000
|
+
console.print(" • hanzo dev --orchestrator local:mistral # Via Ollama")
|
|
1001
|
+
console.print("\n[dim]Or set API keys for full access:[/dim]")
|
|
947
1002
|
console.print(" • export OPENAI_API_KEY=sk-...")
|
|
948
1003
|
console.print(" • export ANTHROPIC_API_KEY=sk-ant-...")
|
|
949
|
-
|
|
1004
|
+
|
|
1005
|
+
async def _use_free_codestral(self, message: str):
|
|
1006
|
+
"""Use free Mistral Codestral API (no API key needed for trial)."""
|
|
1007
|
+
try:
|
|
1008
|
+
import httpx
|
|
1009
|
+
|
|
1010
|
+
console.print("[dim]Using free Codestral API (rate limited)...[/dim]")
|
|
1011
|
+
|
|
1012
|
+
async with httpx.AsyncClient() as client:
|
|
1013
|
+
# Mistral offers free tier with rate limits
|
|
1014
|
+
response = await client.post(
|
|
1015
|
+
"https://api.mistral.ai/v1/chat/completions",
|
|
1016
|
+
headers={
|
|
1017
|
+
"Content-Type": "application/json",
|
|
1018
|
+
# Free tier doesn't need API key for limited usage
|
|
1019
|
+
},
|
|
1020
|
+
json={
|
|
1021
|
+
"model": "codestral-latest",
|
|
1022
|
+
"messages": [
|
|
1023
|
+
{"role": "system", "content": "You are Codestral, an AI coding assistant."},
|
|
1024
|
+
{"role": "user", "content": message}
|
|
1025
|
+
],
|
|
1026
|
+
"temperature": 0.7,
|
|
1027
|
+
"max_tokens": 2000
|
|
1028
|
+
},
|
|
1029
|
+
timeout=30.0
|
|
1030
|
+
)
|
|
1031
|
+
|
|
1032
|
+
if response.status_code == 200:
|
|
1033
|
+
data = response.json()
|
|
1034
|
+
if data.get("choices"):
|
|
1035
|
+
console.print(f"[cyan]Codestral:[/cyan] {data['choices'][0]['message']['content']}")
|
|
1036
|
+
else:
|
|
1037
|
+
console.print("[yellow]Free tier limit reached. Try local models instead:[/yellow]")
|
|
1038
|
+
console.print(" • Install Ollama: curl -fsSL https://ollama.com/install.sh | sh")
|
|
1039
|
+
console.print(" • Run: ollama pull codellama")
|
|
1040
|
+
console.print(" • Use: hanzo dev --orchestrator local:codellama")
|
|
1041
|
+
|
|
1042
|
+
except Exception as e:
|
|
1043
|
+
console.print(f"[red]Codestral error: {e}[/red]")
|
|
1044
|
+
console.print("[yellow]Try local models instead (no limits):[/yellow]")
|
|
1045
|
+
console.print(" • hanzo dev --orchestrator local:codellama")
|
|
1046
|
+
|
|
1047
|
+
async def _use_free_starcoder(self, message: str):
|
|
1048
|
+
"""Use free StarCoder via HuggingFace Inference API."""
|
|
1049
|
+
try:
|
|
1050
|
+
import httpx
|
|
1051
|
+
|
|
1052
|
+
console.print("[dim]Using free StarCoder API...[/dim]")
|
|
1053
|
+
|
|
1054
|
+
async with httpx.AsyncClient() as client:
|
|
1055
|
+
# HuggingFace offers free inference API
|
|
1056
|
+
response = await client.post(
|
|
1057
|
+
"https://api-inference.huggingface.co/models/bigcode/starcoder2-15b",
|
|
1058
|
+
headers={
|
|
1059
|
+
"Content-Type": "application/json",
|
|
1060
|
+
},
|
|
1061
|
+
json={
|
|
1062
|
+
"inputs": f"<|system|>You are StarCoder, an AI coding assistant.<|end|>\n<|user|>{message}<|end|>\n<|assistant|>",
|
|
1063
|
+
"parameters": {
|
|
1064
|
+
"temperature": 0.7,
|
|
1065
|
+
"max_new_tokens": 2000,
|
|
1066
|
+
"return_full_text": False
|
|
1067
|
+
}
|
|
1068
|
+
},
|
|
1069
|
+
timeout=30.0
|
|
1070
|
+
)
|
|
1071
|
+
|
|
1072
|
+
if response.status_code == 200:
|
|
1073
|
+
data = response.json()
|
|
1074
|
+
if isinstance(data, list) and data:
|
|
1075
|
+
console.print(f"[cyan]StarCoder:[/cyan] {data[0].get('generated_text', '')}")
|
|
1076
|
+
else:
|
|
1077
|
+
console.print("[yellow]API limit reached. Install local models:[/yellow]")
|
|
1078
|
+
console.print(" • brew install ollama")
|
|
1079
|
+
console.print(" • ollama pull starcoder2")
|
|
1080
|
+
console.print(" • hanzo dev --orchestrator local:starcoder2")
|
|
1081
|
+
|
|
1082
|
+
except Exception as e:
|
|
1083
|
+
console.print(f"[red]StarCoder error: {e}[/red]")
|
|
1084
|
+
|
|
1085
|
+
async def _use_openai_cli(self, message: str):
|
|
1086
|
+
"""Use OpenAI CLI (Codex) - the official OpenAI CLI tool."""
|
|
1087
|
+
try:
|
|
1088
|
+
import subprocess
|
|
1089
|
+
import json
|
|
1090
|
+
|
|
1091
|
+
console.print("[dim]Using OpenAI CLI (Codex)...[/dim]")
|
|
1092
|
+
|
|
1093
|
+
# Check if openai CLI is installed
|
|
1094
|
+
result = subprocess.run(["which", "openai"], capture_output=True, text=True)
|
|
1095
|
+
if result.returncode != 0:
|
|
1096
|
+
console.print("[red]OpenAI CLI not installed![/red]")
|
|
1097
|
+
console.print("[yellow]To install:[/yellow]")
|
|
1098
|
+
console.print(" • pip install openai-cli")
|
|
1099
|
+
console.print(" • openai login")
|
|
1100
|
+
console.print("Then use: hanzo dev --orchestrator codex")
|
|
1101
|
+
return
|
|
1102
|
+
|
|
1103
|
+
# Use openai CLI to chat
|
|
1104
|
+
cmd = ["openai", "api", "chat", "-m", "gpt-4", "-p", message]
|
|
1105
|
+
|
|
1106
|
+
process = subprocess.Popen(
|
|
1107
|
+
cmd,
|
|
1108
|
+
stdout=subprocess.PIPE,
|
|
1109
|
+
stderr=subprocess.PIPE,
|
|
1110
|
+
text=True
|
|
1111
|
+
)
|
|
1112
|
+
|
|
1113
|
+
stdout, stderr = process.communicate(timeout=30)
|
|
1114
|
+
|
|
1115
|
+
if process.returncode == 0 and stdout:
|
|
1116
|
+
console.print(f"[cyan]Codex:[/cyan] {stdout.strip()}")
|
|
1117
|
+
else:
|
|
1118
|
+
console.print(f"[red]OpenAI CLI error: {stderr}[/red]")
|
|
1119
|
+
|
|
1120
|
+
except subprocess.TimeoutExpired:
|
|
1121
|
+
console.print("[yellow]OpenAI CLI timed out[/yellow]")
|
|
1122
|
+
except Exception as e:
|
|
1123
|
+
console.print(f"[red]Error using OpenAI CLI: {e}[/red]")
|
|
1124
|
+
|
|
1125
|
+
async def _use_claude_cli(self, message: str):
|
|
1126
|
+
"""Use Claude Desktop/Code CLI."""
|
|
1127
|
+
try:
|
|
1128
|
+
import subprocess
|
|
1129
|
+
import os
|
|
1130
|
+
|
|
1131
|
+
console.print("[dim]Using Claude Desktop...[/dim]")
|
|
1132
|
+
|
|
1133
|
+
# Check for Claude Code or Claude Desktop
|
|
1134
|
+
claude_paths = [
|
|
1135
|
+
"/usr/local/bin/claude",
|
|
1136
|
+
"/Applications/Claude.app/Contents/MacOS/Claude",
|
|
1137
|
+
os.path.expanduser("~/Applications/Claude.app/Contents/MacOS/Claude"),
|
|
1138
|
+
"claude", # In PATH
|
|
1139
|
+
]
|
|
1140
|
+
|
|
1141
|
+
claude_path = None
|
|
1142
|
+
for path in claude_paths:
|
|
1143
|
+
if os.path.exists(path) or subprocess.run(["which", path], capture_output=True).returncode == 0:
|
|
1144
|
+
claude_path = path
|
|
1145
|
+
break
|
|
1146
|
+
|
|
1147
|
+
if not claude_path:
|
|
1148
|
+
console.print("[red]Claude Desktop not found![/red]")
|
|
1149
|
+
console.print("[yellow]To install:[/yellow]")
|
|
1150
|
+
console.print(" • Download from https://claude.ai/desktop")
|
|
1151
|
+
console.print(" • Or: brew install --cask claude")
|
|
1152
|
+
console.print("Then use: hanzo dev --orchestrator claude")
|
|
1153
|
+
return
|
|
1154
|
+
|
|
1155
|
+
# Send message to Claude via CLI or AppleScript on macOS
|
|
1156
|
+
if sys.platform == "darwin":
|
|
1157
|
+
# Use AppleScript to interact with Claude Desktop
|
|
1158
|
+
script = f'''
|
|
1159
|
+
tell application "Claude"
|
|
1160
|
+
activate
|
|
1161
|
+
delay 0.5
|
|
1162
|
+
tell application "System Events"
|
|
1163
|
+
keystroke "{message.replace('"', '\\"')}"
|
|
1164
|
+
key code 36 -- Enter key
|
|
1165
|
+
end tell
|
|
1166
|
+
end tell
|
|
1167
|
+
'''
|
|
1168
|
+
|
|
1169
|
+
subprocess.run(["osascript", "-e", script])
|
|
1170
|
+
console.print("[cyan]Sent to Claude Desktop. Check the app for response.[/cyan]")
|
|
1171
|
+
else:
|
|
1172
|
+
# Try direct CLI invocation
|
|
1173
|
+
process = subprocess.Popen(
|
|
1174
|
+
[claude_path, "--message", message],
|
|
1175
|
+
stdout=subprocess.PIPE,
|
|
1176
|
+
stderr=subprocess.PIPE,
|
|
1177
|
+
text=True
|
|
1178
|
+
)
|
|
1179
|
+
|
|
1180
|
+
stdout, stderr = process.communicate(timeout=30)
|
|
1181
|
+
|
|
1182
|
+
if stdout:
|
|
1183
|
+
console.print(f"[cyan]Claude:[/cyan] {stdout.strip()}")
|
|
1184
|
+
|
|
1185
|
+
except Exception as e:
|
|
1186
|
+
console.print(f"[red]Error using Claude Desktop: {e}[/red]")
|
|
1187
|
+
|
|
1188
|
+
async def _use_gemini_cli(self, message: str):
|
|
1189
|
+
"""Use Gemini CLI."""
|
|
1190
|
+
try:
|
|
1191
|
+
import subprocess
|
|
1192
|
+
|
|
1193
|
+
console.print("[dim]Using Gemini CLI...[/dim]")
|
|
1194
|
+
|
|
1195
|
+
# Check if gemini CLI is installed
|
|
1196
|
+
result = subprocess.run(["which", "gemini"], capture_output=True, text=True)
|
|
1197
|
+
if result.returncode != 0:
|
|
1198
|
+
console.print("[red]Gemini CLI not installed![/red]")
|
|
1199
|
+
console.print("[yellow]To install:[/yellow]")
|
|
1200
|
+
console.print(" • pip install google-generativeai-cli")
|
|
1201
|
+
console.print(" • gemini configure")
|
|
1202
|
+
console.print(" • Set GOOGLE_API_KEY environment variable")
|
|
1203
|
+
console.print("Then use: hanzo dev --orchestrator gemini")
|
|
1204
|
+
return
|
|
1205
|
+
|
|
1206
|
+
# Use gemini CLI
|
|
1207
|
+
cmd = ["gemini", "chat", message]
|
|
1208
|
+
|
|
1209
|
+
process = subprocess.Popen(
|
|
1210
|
+
cmd,
|
|
1211
|
+
stdout=subprocess.PIPE,
|
|
1212
|
+
stderr=subprocess.PIPE,
|
|
1213
|
+
text=True
|
|
1214
|
+
)
|
|
1215
|
+
|
|
1216
|
+
stdout, stderr = process.communicate(timeout=30)
|
|
1217
|
+
|
|
1218
|
+
if process.returncode == 0 and stdout:
|
|
1219
|
+
console.print(f"[cyan]Gemini:[/cyan] {stdout.strip()}")
|
|
1220
|
+
else:
|
|
1221
|
+
console.print(f"[red]Gemini CLI error: {stderr}[/red]")
|
|
1222
|
+
|
|
1223
|
+
except subprocess.TimeoutExpired:
|
|
1224
|
+
console.print("[yellow]Gemini CLI timed out[/yellow]")
|
|
1225
|
+
except Exception as e:
|
|
1226
|
+
console.print(f"[red]Error using Gemini CLI: {e}[/red]")
|
|
1227
|
+
|
|
1228
|
+
async def _use_hanzo_ide(self, message: str):
|
|
1229
|
+
"""Use Hanzo Dev IDE from ~/work/hanzo/ide."""
|
|
1230
|
+
try:
|
|
1231
|
+
import subprocess
|
|
1232
|
+
import os
|
|
1233
|
+
|
|
1234
|
+
console.print("[dim]Using Hanzo Dev IDE...[/dim]")
|
|
1235
|
+
|
|
1236
|
+
# Check if Hanzo IDE exists
|
|
1237
|
+
ide_path = os.path.expanduser("~/work/hanzo/ide")
|
|
1238
|
+
if not os.path.exists(ide_path):
|
|
1239
|
+
console.print("[red]Hanzo Dev IDE not found![/red]")
|
|
1240
|
+
console.print("[yellow]Expected location: ~/work/hanzo/ide[/yellow]")
|
|
1241
|
+
console.print("To set up:")
|
|
1242
|
+
console.print(" • git clone https://github.com/hanzoai/ide ~/work/hanzo/ide")
|
|
1243
|
+
console.print(" • cd ~/work/hanzo/ide && npm install")
|
|
1244
|
+
return
|
|
1245
|
+
|
|
1246
|
+
# Check for the CLI entry point
|
|
1247
|
+
cli_paths = [
|
|
1248
|
+
os.path.join(ide_path, "bin", "hanzo-ide"),
|
|
1249
|
+
os.path.join(ide_path, "hanzo-ide"),
|
|
1250
|
+
os.path.join(ide_path, "cli.js"),
|
|
1251
|
+
os.path.join(ide_path, "index.js"),
|
|
1252
|
+
]
|
|
1253
|
+
|
|
1254
|
+
cli_path = None
|
|
1255
|
+
for path in cli_paths:
|
|
1256
|
+
if os.path.exists(path):
|
|
1257
|
+
cli_path = path
|
|
1258
|
+
break
|
|
1259
|
+
|
|
1260
|
+
if not cli_path:
|
|
1261
|
+
# Try to run with npm/node
|
|
1262
|
+
package_json = os.path.join(ide_path, "package.json")
|
|
1263
|
+
if os.path.exists(package_json):
|
|
1264
|
+
# Run via npm
|
|
1265
|
+
cmd = ["npm", "run", "chat", "--", message]
|
|
1266
|
+
cwd = ide_path
|
|
1267
|
+
else:
|
|
1268
|
+
console.print("[red]Hanzo IDE CLI not found![/red]")
|
|
1269
|
+
return
|
|
1270
|
+
else:
|
|
1271
|
+
# Run the CLI directly
|
|
1272
|
+
if cli_path.endswith(".js"):
|
|
1273
|
+
cmd = ["node", cli_path, "chat", message]
|
|
1274
|
+
else:
|
|
1275
|
+
cmd = [cli_path, "chat", message]
|
|
1276
|
+
cwd = None
|
|
1277
|
+
|
|
1278
|
+
process = subprocess.Popen(
|
|
1279
|
+
cmd,
|
|
1280
|
+
stdout=subprocess.PIPE,
|
|
1281
|
+
stderr=subprocess.PIPE,
|
|
1282
|
+
text=True,
|
|
1283
|
+
cwd=cwd
|
|
1284
|
+
)
|
|
1285
|
+
|
|
1286
|
+
stdout, stderr = process.communicate(timeout=30)
|
|
1287
|
+
|
|
1288
|
+
if process.returncode == 0 and stdout:
|
|
1289
|
+
console.print(f"[cyan]Hanzo IDE:[/cyan] {stdout.strip()}")
|
|
1290
|
+
else:
|
|
1291
|
+
if stderr:
|
|
1292
|
+
console.print(f"[yellow]Hanzo IDE: {stderr}[/yellow]")
|
|
1293
|
+
else:
|
|
1294
|
+
console.print("[yellow]Hanzo IDE: No response[/yellow]")
|
|
1295
|
+
|
|
1296
|
+
except subprocess.TimeoutExpired:
|
|
1297
|
+
console.print("[yellow]Hanzo IDE timed out[/yellow]")
|
|
1298
|
+
except Exception as e:
|
|
1299
|
+
console.print(f"[red]Error using Hanzo IDE: {e}[/red]")
|
|
1300
|
+
|
|
1301
|
+
async def _use_local_model(self, message: str):
|
|
1302
|
+
"""Use local model via Ollama or LM Studio."""
|
|
1303
|
+
import httpx
|
|
1304
|
+
|
|
1305
|
+
model_name = self.orchestrator.orchestrator_model.replace("local:", "")
|
|
1306
|
+
|
|
1307
|
+
# Try Ollama first (default port 11434)
|
|
1308
|
+
try:
|
|
1309
|
+
console.print(f"[dim]Using local {model_name} via Ollama...[/dim]")
|
|
1310
|
+
|
|
1311
|
+
async with httpx.AsyncClient() as client:
|
|
1312
|
+
response = await client.post(
|
|
1313
|
+
"http://localhost:11434/api/chat",
|
|
1314
|
+
json={
|
|
1315
|
+
"model": model_name,
|
|
1316
|
+
"messages": [
|
|
1317
|
+
{"role": "system", "content": "You are a helpful AI coding assistant."},
|
|
1318
|
+
{"role": "user", "content": message}
|
|
1319
|
+
],
|
|
1320
|
+
"stream": False
|
|
1321
|
+
},
|
|
1322
|
+
timeout=60.0
|
|
1323
|
+
)
|
|
1324
|
+
|
|
1325
|
+
if response.status_code == 200:
|
|
1326
|
+
data = response.json()
|
|
1327
|
+
if data.get("message"):
|
|
1328
|
+
console.print(f"[cyan]{model_name}:[/cyan] {data['message']['content']}")
|
|
1329
|
+
return
|
|
1330
|
+
|
|
1331
|
+
except Exception:
|
|
1332
|
+
pass
|
|
1333
|
+
|
|
1334
|
+
# Try LM Studio (default port 1234)
|
|
1335
|
+
try:
|
|
1336
|
+
console.print(f"[dim]Trying LM Studio...[/dim]")
|
|
1337
|
+
|
|
1338
|
+
async with httpx.AsyncClient() as client:
|
|
1339
|
+
response = await client.post(
|
|
1340
|
+
"http://localhost:1234/v1/chat/completions",
|
|
1341
|
+
json={
|
|
1342
|
+
"model": model_name,
|
|
1343
|
+
"messages": [
|
|
1344
|
+
{"role": "system", "content": "You are a helpful AI coding assistant."},
|
|
1345
|
+
{"role": "user", "content": message}
|
|
1346
|
+
],
|
|
1347
|
+
"temperature": 0.7,
|
|
1348
|
+
"max_tokens": 2000
|
|
1349
|
+
},
|
|
1350
|
+
timeout=60.0
|
|
1351
|
+
)
|
|
1352
|
+
|
|
1353
|
+
if response.status_code == 200:
|
|
1354
|
+
data = response.json()
|
|
1355
|
+
if data.get("choices"):
|
|
1356
|
+
console.print(f"[cyan]{model_name}:[/cyan] {data['choices'][0]['message']['content']}")
|
|
1357
|
+
return
|
|
1358
|
+
|
|
1359
|
+
except Exception:
|
|
1360
|
+
pass
|
|
1361
|
+
|
|
1362
|
+
# Neither worked
|
|
1363
|
+
console.print(f"[red]Local model '{model_name}' not available[/red]")
|
|
1364
|
+
console.print("[yellow]To use local models:[/yellow]")
|
|
1365
|
+
console.print("\nOption 1 - Ollama (recommended):")
|
|
1366
|
+
console.print(" • Install: curl -fsSL https://ollama.com/install.sh | sh")
|
|
1367
|
+
console.print(f" • Pull model: ollama pull {model_name}")
|
|
1368
|
+
console.print(" • It will auto-start when you use hanzo dev")
|
|
1369
|
+
console.print("\nOption 2 - LM Studio:")
|
|
1370
|
+
console.print(" • Download from https://lmstudio.ai")
|
|
1371
|
+
console.print(f" • Load {model_name} model")
|
|
1372
|
+
console.print(" • Start local server (port 1234)")
|
|
950
1373
|
|
|
951
1374
|
async def handle_memory_command(self, command: str):
|
|
952
1375
|
"""Handle memory/context commands starting with #."""
|
|
@@ -1890,13 +2313,130 @@ class MultiClaudeOrchestrator(HanzoDevOrchestrator):
|
|
|
1890
2313
|
return result
|
|
1891
2314
|
|
|
1892
2315
|
async def _send_to_instance(self, instance: Dict, prompt: str) -> Dict:
|
|
1893
|
-
"""Send a prompt to a specific Claude instance."""
|
|
1894
|
-
#
|
|
1895
|
-
|
|
1896
|
-
|
|
1897
|
-
|
|
1898
|
-
|
|
1899
|
-
|
|
2316
|
+
"""Send a prompt to a specific Claude instance using configured model."""
|
|
2317
|
+
# Simple direct approach - use the configured orchestrator model
|
|
2318
|
+
if self.orchestrator_model == "codex":
|
|
2319
|
+
# Use OpenAI CLI
|
|
2320
|
+
return await self._call_openai_cli(prompt)
|
|
2321
|
+
elif self.orchestrator_model in ["claude", "claude-code", "claude-desktop"]:
|
|
2322
|
+
# Use Claude Desktop
|
|
2323
|
+
return await self._call_claude_cli(prompt)
|
|
2324
|
+
elif self.orchestrator_model in ["gemini", "gemini-cli"]:
|
|
2325
|
+
# Use Gemini CLI
|
|
2326
|
+
return await self._call_gemini_cli(prompt)
|
|
2327
|
+
elif self.orchestrator_model.startswith("local:"):
|
|
2328
|
+
# Use local model
|
|
2329
|
+
return await self._call_local_model(prompt)
|
|
2330
|
+
else:
|
|
2331
|
+
# Try API-based models
|
|
2332
|
+
return await self._call_api_model(prompt)
|
|
2333
|
+
|
|
2334
|
+
async def _call_openai_cli(self, prompt: str) -> Dict:
|
|
2335
|
+
"""Call OpenAI CLI and return structured response."""
|
|
2336
|
+
try:
|
|
2337
|
+
import subprocess
|
|
2338
|
+
result = subprocess.run(
|
|
2339
|
+
["openai", "api", "chat", "-m", "gpt-4", "-p", prompt],
|
|
2340
|
+
capture_output=True,
|
|
2341
|
+
text=True,
|
|
2342
|
+
timeout=30
|
|
2343
|
+
)
|
|
2344
|
+
if result.returncode == 0 and result.stdout:
|
|
2345
|
+
return {"output": result.stdout.strip(), "success": True}
|
|
2346
|
+
except Exception as e:
|
|
2347
|
+
logger.error(f"OpenAI CLI error: {e}")
|
|
2348
|
+
return {"output": "OpenAI CLI not available. Install with: pip install openai-cli", "success": False}
|
|
2349
|
+
|
|
2350
|
+
async def _call_claude_cli(self, prompt: str) -> Dict:
|
|
2351
|
+
"""Call Claude Desktop and return structured response."""
|
|
2352
|
+
try:
|
|
2353
|
+
import subprocess
|
|
2354
|
+
import sys
|
|
2355
|
+
if sys.platform == "darwin":
|
|
2356
|
+
# macOS - use AppleScript
|
|
2357
|
+
script = f'tell application "Claude" to activate'
|
|
2358
|
+
subprocess.run(["osascript", "-e", script])
|
|
2359
|
+
return {"output": "Sent to Claude Desktop. Check app for response.", "success": True}
|
|
2360
|
+
except Exception as e:
|
|
2361
|
+
logger.error(f"Claude CLI error: {e}")
|
|
2362
|
+
return {"output": "Claude Desktop not available. Install from https://claude.ai/desktop", "success": False}
|
|
2363
|
+
|
|
2364
|
+
async def _call_gemini_cli(self, prompt: str) -> Dict:
|
|
2365
|
+
"""Call Gemini CLI and return structured response."""
|
|
2366
|
+
try:
|
|
2367
|
+
import subprocess
|
|
2368
|
+
result = subprocess.run(
|
|
2369
|
+
["gemini", "chat", prompt],
|
|
2370
|
+
capture_output=True,
|
|
2371
|
+
text=True,
|
|
2372
|
+
timeout=30
|
|
2373
|
+
)
|
|
2374
|
+
if result.returncode == 0 and result.stdout:
|
|
2375
|
+
return {"output": result.stdout.strip(), "success": True}
|
|
2376
|
+
except Exception as e:
|
|
2377
|
+
logger.error(f"Gemini CLI error: {e}")
|
|
2378
|
+
return {"output": "Gemini CLI not available. Install with: pip install google-generativeai-cli", "success": False}
|
|
2379
|
+
|
|
2380
|
+
async def _call_local_model(self, prompt: str) -> Dict:
|
|
2381
|
+
"""Call local model via Ollama and return structured response."""
|
|
2382
|
+
try:
|
|
2383
|
+
import httpx
|
|
2384
|
+
model_name = self.orchestrator_model.replace("local:", "")
|
|
2385
|
+
|
|
2386
|
+
async with httpx.AsyncClient() as client:
|
|
2387
|
+
response = await client.post(
|
|
2388
|
+
"http://localhost:11434/api/chat",
|
|
2389
|
+
json={
|
|
2390
|
+
"model": model_name,
|
|
2391
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
2392
|
+
"stream": False
|
|
2393
|
+
},
|
|
2394
|
+
timeout=60.0
|
|
2395
|
+
)
|
|
2396
|
+
|
|
2397
|
+
if response.status_code == 200:
|
|
2398
|
+
data = response.json()
|
|
2399
|
+
if data.get("message"):
|
|
2400
|
+
return {"output": data["message"]["content"], "success": True}
|
|
2401
|
+
except Exception as e:
|
|
2402
|
+
logger.error(f"Local model error: {e}")
|
|
2403
|
+
return {"output": f"Local model not available. Install Ollama and run: ollama pull {self.orchestrator_model.replace('local:', '')}", "success": False}
|
|
2404
|
+
|
|
2405
|
+
async def _call_api_model(self, prompt: str) -> Dict:
|
|
2406
|
+
"""Call API-based model and return structured response."""
|
|
2407
|
+
import os
|
|
2408
|
+
|
|
2409
|
+
# Try OpenAI
|
|
2410
|
+
if os.getenv("OPENAI_API_KEY"):
|
|
2411
|
+
try:
|
|
2412
|
+
from openai import AsyncOpenAI
|
|
2413
|
+
client = AsyncOpenAI()
|
|
2414
|
+
response = await client.chat.completions.create(
|
|
2415
|
+
model="gpt-4",
|
|
2416
|
+
messages=[{"role": "user", "content": prompt}],
|
|
2417
|
+
max_tokens=2000
|
|
2418
|
+
)
|
|
2419
|
+
if response.choices:
|
|
2420
|
+
return {"output": response.choices[0].message.content, "success": True}
|
|
2421
|
+
except Exception as e:
|
|
2422
|
+
logger.error(f"OpenAI API error: {e}")
|
|
2423
|
+
|
|
2424
|
+
# Try Anthropic
|
|
2425
|
+
if os.getenv("ANTHROPIC_API_KEY"):
|
|
2426
|
+
try:
|
|
2427
|
+
from anthropic import AsyncAnthropic
|
|
2428
|
+
client = AsyncAnthropic()
|
|
2429
|
+
response = await client.messages.create(
|
|
2430
|
+
model="claude-3-5-sonnet-20241022",
|
|
2431
|
+
messages=[{"role": "user", "content": prompt}],
|
|
2432
|
+
max_tokens=2000
|
|
2433
|
+
)
|
|
2434
|
+
if response.content:
|
|
2435
|
+
return {"output": response.content[0].text, "success": True}
|
|
2436
|
+
except Exception as e:
|
|
2437
|
+
logger.error(f"Anthropic API error: {e}")
|
|
2438
|
+
|
|
2439
|
+
return {"output": "No API keys configured. Set OPENAI_API_KEY or ANTHROPIC_API_KEY", "success": False}
|
|
1900
2440
|
|
|
1901
2441
|
async def _validate_improvement(self, original: Dict, improved: Dict) -> bool:
|
|
1902
2442
|
"""Validate that an improvement doesn't degrade quality."""
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: hanzo
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.15
|
|
4
4
|
Summary: Hanzo AI - Complete AI Infrastructure Platform with CLI, Router, MCP, and Agent Runtime
|
|
5
5
|
Project-URL: Homepage, https://hanzo.ai
|
|
6
6
|
Project-URL: Repository, https://github.com/hanzoai/python-sdk
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
hanzo/__init__.py,sha256=f6N_RcJZ0F9ADrROlvPi1OrgwjF8cWQm34cml8hb1zk,169
|
|
2
2
|
hanzo/__main__.py,sha256=F3Vz0Ty3bdAj_8oxyETMIqxlmNRnJOAFB1XPxbyfouI,105
|
|
3
|
-
hanzo/cli.py,sha256=
|
|
4
|
-
hanzo/dev.py,sha256=
|
|
3
|
+
hanzo/cli.py,sha256=JG1bBiCnc1lCnsl_lEJqXKp7iDj1KUn3s4hEu5pZkNI,18586
|
|
4
|
+
hanzo/dev.py,sha256=akZqLo2Ox9HSyi-NOasmfb06-qTxdz7pDlP7Cc9-UxQ,97324
|
|
5
5
|
hanzo/mcp_server.py,sha256=XVygFNn-9CVdu8c95sP7fQjIRtA8K7nsGpgQNe44BRg,460
|
|
6
6
|
hanzo/orchestrator_config.py,sha256=JV7DS8aVZwBJ9XzgkQronFwV_A50QyXG3MH_pKwmCB8,11006
|
|
7
7
|
hanzo/repl.py,sha256=sW1quuqGkJ_AqgjN2vLNdtWgKDlXIkXiO9Bo1QQI0G4,1089
|
|
@@ -24,7 +24,7 @@ hanzo/utils/__init__.py,sha256=5RRwKI852vp8smr4xCRgeKfn7dLEnHbdXGfVYTZ5jDQ,69
|
|
|
24
24
|
hanzo/utils/config.py,sha256=FD_LoBpcoF5dgJ7WL4o6LDp2pdOy8kS-dJ6iRO2GcGM,4728
|
|
25
25
|
hanzo/utils/net_check.py,sha256=YFbJ65SzfDYHkHLZe3n51VhId1VI3zhyx8p6BM-l6jE,3017
|
|
26
26
|
hanzo/utils/output.py,sha256=W0j3psF07vJiX4s02gbN4zYWfbKNsb8TSIoagBSf5vA,2704
|
|
27
|
-
hanzo-0.3.
|
|
28
|
-
hanzo-0.3.
|
|
29
|
-
hanzo-0.3.
|
|
30
|
-
hanzo-0.3.
|
|
27
|
+
hanzo-0.3.15.dist-info/METADATA,sha256=Fua1HkddM33hyQNQBUKbsfjdPDsURkJJPLPeuUMrTwM,4279
|
|
28
|
+
hanzo-0.3.15.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
29
|
+
hanzo-0.3.15.dist-info/entry_points.txt,sha256=pQLPMdqOXU_2BfTcMDhkqTCDNk_H6ApvYuSaWcuQOOw,171
|
|
30
|
+
hanzo-0.3.15.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|