hanzo 0.3.14__py3-none-any.whl → 0.3.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hanzo might be problematic. Click here for more details.
- hanzo/cli.py +1 -1
- hanzo/dev.py +140 -11
- {hanzo-0.3.14.dist-info → hanzo-0.3.16.dist-info}/METADATA +1 -1
- {hanzo-0.3.14.dist-info → hanzo-0.3.16.dist-info}/RECORD +6 -6
- {hanzo-0.3.14.dist-info → hanzo-0.3.16.dist-info}/WHEEL +0 -0
- {hanzo-0.3.14.dist-info → hanzo-0.3.16.dist-info}/entry_points.txt +0 -0
hanzo/cli.py
CHANGED
hanzo/dev.py
CHANGED
|
@@ -857,7 +857,19 @@ Examples:
|
|
|
857
857
|
async def chat_with_agents(self, message: str):
|
|
858
858
|
"""Send message to AI agents for natural chat."""
|
|
859
859
|
try:
|
|
860
|
-
#
|
|
860
|
+
# For codex and other CLI tools, go straight to direct API chat
|
|
861
|
+
if hasattr(self.orchestrator, 'orchestrator_model'):
|
|
862
|
+
model = self.orchestrator.orchestrator_model
|
|
863
|
+
if model in ["codex", "openai-cli", "openai-codex", "claude", "claude-code",
|
|
864
|
+
"claude-desktop", "gemini", "gemini-cli", "google-gemini",
|
|
865
|
+
"hanzo-ide", "hanzo-dev-ide", "ide", "codestral", "codestral-free",
|
|
866
|
+
"free", "mistral-free", "starcoder", "starcoder2", "free-starcoder"] or \
|
|
867
|
+
model.startswith("local:"):
|
|
868
|
+
# Use direct API/CLI chat for these models
|
|
869
|
+
await self._direct_api_chat(message)
|
|
870
|
+
return
|
|
871
|
+
|
|
872
|
+
# Show thinking indicator for network orchestrators
|
|
861
873
|
console.print("[dim]Thinking...[/dim]")
|
|
862
874
|
|
|
863
875
|
# Check if we have a network orchestrator with actual AI
|
|
@@ -876,7 +888,7 @@ Examples:
|
|
|
876
888
|
console.print("[yellow]No response from agent[/yellow]")
|
|
877
889
|
|
|
878
890
|
elif hasattr(self.orchestrator, 'execute_with_critique'):
|
|
879
|
-
# Use multi-Claude orchestrator
|
|
891
|
+
# Use multi-Claude orchestrator - but now it will use real AI!
|
|
880
892
|
result = await self.orchestrator.execute_with_critique(message)
|
|
881
893
|
|
|
882
894
|
if result.get("output"):
|
|
@@ -1088,8 +1100,8 @@ Examples:
|
|
|
1088
1100
|
console.print("Then use: hanzo dev --orchestrator codex")
|
|
1089
1101
|
return
|
|
1090
1102
|
|
|
1091
|
-
# Use openai CLI to chat
|
|
1092
|
-
cmd = ["openai", "api", "chat", "-m", "gpt-4", "-
|
|
1103
|
+
# Use openai CLI to chat - correct syntax
|
|
1104
|
+
cmd = ["openai", "api", "chat.completions.create", "-m", "gpt-4", "-g", message]
|
|
1093
1105
|
|
|
1094
1106
|
process = subprocess.Popen(
|
|
1095
1107
|
cmd,
|
|
@@ -2301,13 +2313,130 @@ class MultiClaudeOrchestrator(HanzoDevOrchestrator):
|
|
|
2301
2313
|
return result
|
|
2302
2314
|
|
|
2303
2315
|
async def _send_to_instance(self, instance: Dict, prompt: str) -> Dict:
|
|
2304
|
-
"""Send a prompt to a specific Claude instance."""
|
|
2305
|
-
#
|
|
2306
|
-
|
|
2307
|
-
|
|
2308
|
-
|
|
2309
|
-
|
|
2310
|
-
|
|
2316
|
+
"""Send a prompt to a specific Claude instance using configured model."""
|
|
2317
|
+
# Simple direct approach - use the configured orchestrator model
|
|
2318
|
+
if self.orchestrator_model == "codex":
|
|
2319
|
+
# Use OpenAI CLI
|
|
2320
|
+
return await self._call_openai_cli(prompt)
|
|
2321
|
+
elif self.orchestrator_model in ["claude", "claude-code", "claude-desktop"]:
|
|
2322
|
+
# Use Claude Desktop
|
|
2323
|
+
return await self._call_claude_cli(prompt)
|
|
2324
|
+
elif self.orchestrator_model in ["gemini", "gemini-cli"]:
|
|
2325
|
+
# Use Gemini CLI
|
|
2326
|
+
return await self._call_gemini_cli(prompt)
|
|
2327
|
+
elif self.orchestrator_model.startswith("local:"):
|
|
2328
|
+
# Use local model
|
|
2329
|
+
return await self._call_local_model(prompt)
|
|
2330
|
+
else:
|
|
2331
|
+
# Try API-based models
|
|
2332
|
+
return await self._call_api_model(prompt)
|
|
2333
|
+
|
|
2334
|
+
async def _call_openai_cli(self, prompt: str) -> Dict:
|
|
2335
|
+
"""Call OpenAI CLI and return structured response."""
|
|
2336
|
+
try:
|
|
2337
|
+
import subprocess
|
|
2338
|
+
result = subprocess.run(
|
|
2339
|
+
["openai", "api", "chat.completions.create", "-m", "gpt-4", "-g", prompt],
|
|
2340
|
+
capture_output=True,
|
|
2341
|
+
text=True,
|
|
2342
|
+
timeout=30
|
|
2343
|
+
)
|
|
2344
|
+
if result.returncode == 0 and result.stdout:
|
|
2345
|
+
return {"output": result.stdout.strip(), "success": True}
|
|
2346
|
+
except Exception as e:
|
|
2347
|
+
logger.error(f"OpenAI CLI error: {e}")
|
|
2348
|
+
return {"output": "OpenAI CLI not available. Install with: pip install openai-cli", "success": False}
|
|
2349
|
+
|
|
2350
|
+
async def _call_claude_cli(self, prompt: str) -> Dict:
|
|
2351
|
+
"""Call Claude Desktop and return structured response."""
|
|
2352
|
+
try:
|
|
2353
|
+
import subprocess
|
|
2354
|
+
import sys
|
|
2355
|
+
if sys.platform == "darwin":
|
|
2356
|
+
# macOS - use AppleScript
|
|
2357
|
+
script = f'tell application "Claude" to activate'
|
|
2358
|
+
subprocess.run(["osascript", "-e", script])
|
|
2359
|
+
return {"output": "Sent to Claude Desktop. Check app for response.", "success": True}
|
|
2360
|
+
except Exception as e:
|
|
2361
|
+
logger.error(f"Claude CLI error: {e}")
|
|
2362
|
+
return {"output": "Claude Desktop not available. Install from https://claude.ai/desktop", "success": False}
|
|
2363
|
+
|
|
2364
|
+
async def _call_gemini_cli(self, prompt: str) -> Dict:
|
|
2365
|
+
"""Call Gemini CLI and return structured response."""
|
|
2366
|
+
try:
|
|
2367
|
+
import subprocess
|
|
2368
|
+
result = subprocess.run(
|
|
2369
|
+
["gemini", "chat", prompt],
|
|
2370
|
+
capture_output=True,
|
|
2371
|
+
text=True,
|
|
2372
|
+
timeout=30
|
|
2373
|
+
)
|
|
2374
|
+
if result.returncode == 0 and result.stdout:
|
|
2375
|
+
return {"output": result.stdout.strip(), "success": True}
|
|
2376
|
+
except Exception as e:
|
|
2377
|
+
logger.error(f"Gemini CLI error: {e}")
|
|
2378
|
+
return {"output": "Gemini CLI not available. Install with: pip install google-generativeai-cli", "success": False}
|
|
2379
|
+
|
|
2380
|
+
async def _call_local_model(self, prompt: str) -> Dict:
|
|
2381
|
+
"""Call local model via Ollama and return structured response."""
|
|
2382
|
+
try:
|
|
2383
|
+
import httpx
|
|
2384
|
+
model_name = self.orchestrator_model.replace("local:", "")
|
|
2385
|
+
|
|
2386
|
+
async with httpx.AsyncClient() as client:
|
|
2387
|
+
response = await client.post(
|
|
2388
|
+
"http://localhost:11434/api/chat",
|
|
2389
|
+
json={
|
|
2390
|
+
"model": model_name,
|
|
2391
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
2392
|
+
"stream": False
|
|
2393
|
+
},
|
|
2394
|
+
timeout=60.0
|
|
2395
|
+
)
|
|
2396
|
+
|
|
2397
|
+
if response.status_code == 200:
|
|
2398
|
+
data = response.json()
|
|
2399
|
+
if data.get("message"):
|
|
2400
|
+
return {"output": data["message"]["content"], "success": True}
|
|
2401
|
+
except Exception as e:
|
|
2402
|
+
logger.error(f"Local model error: {e}")
|
|
2403
|
+
return {"output": f"Local model not available. Install Ollama and run: ollama pull {self.orchestrator_model.replace('local:', '')}", "success": False}
|
|
2404
|
+
|
|
2405
|
+
async def _call_api_model(self, prompt: str) -> Dict:
|
|
2406
|
+
"""Call API-based model and return structured response."""
|
|
2407
|
+
import os
|
|
2408
|
+
|
|
2409
|
+
# Try OpenAI
|
|
2410
|
+
if os.getenv("OPENAI_API_KEY"):
|
|
2411
|
+
try:
|
|
2412
|
+
from openai import AsyncOpenAI
|
|
2413
|
+
client = AsyncOpenAI()
|
|
2414
|
+
response = await client.chat.completions.create(
|
|
2415
|
+
model="gpt-4",
|
|
2416
|
+
messages=[{"role": "user", "content": prompt}],
|
|
2417
|
+
max_tokens=2000
|
|
2418
|
+
)
|
|
2419
|
+
if response.choices:
|
|
2420
|
+
return {"output": response.choices[0].message.content, "success": True}
|
|
2421
|
+
except Exception as e:
|
|
2422
|
+
logger.error(f"OpenAI API error: {e}")
|
|
2423
|
+
|
|
2424
|
+
# Try Anthropic
|
|
2425
|
+
if os.getenv("ANTHROPIC_API_KEY"):
|
|
2426
|
+
try:
|
|
2427
|
+
from anthropic import AsyncAnthropic
|
|
2428
|
+
client = AsyncAnthropic()
|
|
2429
|
+
response = await client.messages.create(
|
|
2430
|
+
model="claude-3-5-sonnet-20241022",
|
|
2431
|
+
messages=[{"role": "user", "content": prompt}],
|
|
2432
|
+
max_tokens=2000
|
|
2433
|
+
)
|
|
2434
|
+
if response.content:
|
|
2435
|
+
return {"output": response.content[0].text, "success": True}
|
|
2436
|
+
except Exception as e:
|
|
2437
|
+
logger.error(f"Anthropic API error: {e}")
|
|
2438
|
+
|
|
2439
|
+
return {"output": "No API keys configured. Set OPENAI_API_KEY or ANTHROPIC_API_KEY", "success": False}
|
|
2311
2440
|
|
|
2312
2441
|
async def _validate_improvement(self, original: Dict, improved: Dict) -> bool:
|
|
2313
2442
|
"""Validate that an improvement doesn't degrade quality."""
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: hanzo
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.16
|
|
4
4
|
Summary: Hanzo AI - Complete AI Infrastructure Platform with CLI, Router, MCP, and Agent Runtime
|
|
5
5
|
Project-URL: Homepage, https://hanzo.ai
|
|
6
6
|
Project-URL: Repository, https://github.com/hanzoai/python-sdk
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
hanzo/__init__.py,sha256=f6N_RcJZ0F9ADrROlvPi1OrgwjF8cWQm34cml8hb1zk,169
|
|
2
2
|
hanzo/__main__.py,sha256=F3Vz0Ty3bdAj_8oxyETMIqxlmNRnJOAFB1XPxbyfouI,105
|
|
3
|
-
hanzo/cli.py,sha256=
|
|
4
|
-
hanzo/dev.py,sha256=
|
|
3
|
+
hanzo/cli.py,sha256=79v-Ucxub1NsJuvjGdyQBASyr59pGSM88n7US4LBjZk,18586
|
|
4
|
+
hanzo/dev.py,sha256=VYuxBQZi7IeVNle52GyN1bdeXMgZk5YSy-q6QiOluyA,97379
|
|
5
5
|
hanzo/mcp_server.py,sha256=XVygFNn-9CVdu8c95sP7fQjIRtA8K7nsGpgQNe44BRg,460
|
|
6
6
|
hanzo/orchestrator_config.py,sha256=JV7DS8aVZwBJ9XzgkQronFwV_A50QyXG3MH_pKwmCB8,11006
|
|
7
7
|
hanzo/repl.py,sha256=sW1quuqGkJ_AqgjN2vLNdtWgKDlXIkXiO9Bo1QQI0G4,1089
|
|
@@ -24,7 +24,7 @@ hanzo/utils/__init__.py,sha256=5RRwKI852vp8smr4xCRgeKfn7dLEnHbdXGfVYTZ5jDQ,69
|
|
|
24
24
|
hanzo/utils/config.py,sha256=FD_LoBpcoF5dgJ7WL4o6LDp2pdOy8kS-dJ6iRO2GcGM,4728
|
|
25
25
|
hanzo/utils/net_check.py,sha256=YFbJ65SzfDYHkHLZe3n51VhId1VI3zhyx8p6BM-l6jE,3017
|
|
26
26
|
hanzo/utils/output.py,sha256=W0j3psF07vJiX4s02gbN4zYWfbKNsb8TSIoagBSf5vA,2704
|
|
27
|
-
hanzo-0.3.
|
|
28
|
-
hanzo-0.3.
|
|
29
|
-
hanzo-0.3.
|
|
30
|
-
hanzo-0.3.
|
|
27
|
+
hanzo-0.3.16.dist-info/METADATA,sha256=pVsilNXuBk6Rhg7f9GgrPEwyZbqHoYNsJVfzxoa69k0,4279
|
|
28
|
+
hanzo-0.3.16.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
29
|
+
hanzo-0.3.16.dist-info/entry_points.txt,sha256=pQLPMdqOXU_2BfTcMDhkqTCDNk_H6ApvYuSaWcuQOOw,171
|
|
30
|
+
hanzo-0.3.16.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|