hanzo 0.3.19__tar.gz → 0.3.21__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hanzo might be problematic. Click here for more details.

Files changed (34) hide show
  1. {hanzo-0.3.19 → hanzo-0.3.21}/PKG-INFO +1 -1
  2. {hanzo-0.3.19 → hanzo-0.3.21}/pyproject.toml +1 -1
  3. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/cli.py +1 -1
  4. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/dev.py +37 -1
  5. hanzo-0.3.21/src/hanzo/fallback_handler.py +249 -0
  6. hanzo-0.3.21/src/hanzo/memory_manager.py +425 -0
  7. hanzo-0.3.21/src/hanzo/rate_limiter.py +332 -0
  8. hanzo-0.3.21/src/hanzo/streaming.py +271 -0
  9. {hanzo-0.3.19 → hanzo-0.3.21}/.gitignore +0 -0
  10. {hanzo-0.3.19 → hanzo-0.3.21}/README.md +0 -0
  11. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/__init__.py +0 -0
  12. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/__main__.py +0 -0
  13. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/commands/__init__.py +0 -0
  14. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/commands/agent.py +0 -0
  15. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/commands/auth.py +0 -0
  16. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/commands/chat.py +0 -0
  17. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/commands/cluster.py +0 -0
  18. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/commands/config.py +0 -0
  19. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/commands/mcp.py +0 -0
  20. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/commands/miner.py +0 -0
  21. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/commands/network.py +0 -0
  22. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/commands/repl.py +0 -0
  23. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/commands/tools.py +0 -0
  24. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/interactive/__init__.py +0 -0
  25. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/interactive/dashboard.py +0 -0
  26. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/interactive/repl.py +0 -0
  27. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/mcp_server.py +0 -0
  28. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/orchestrator_config.py +0 -0
  29. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/repl.py +0 -0
  30. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/router/__init__.py +0 -0
  31. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/utils/__init__.py +0 -0
  32. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/utils/config.py +0 -0
  33. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/utils/net_check.py +0 -0
  34. {hanzo-0.3.19 → hanzo-0.3.21}/src/hanzo/utils/output.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hanzo
3
- Version: 0.3.19
3
+ Version: 0.3.21
4
4
  Summary: Hanzo AI - Complete AI Infrastructure Platform with CLI, Router, MCP, and Agent Runtime
5
5
  Project-URL: Homepage, https://hanzo.ai
6
6
  Project-URL: Repository, https://github.com/hanzoai/python-sdk
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "hanzo"
3
- version = "0.3.19"
3
+ version = "0.3.21"
4
4
  description = "Hanzo AI - Complete AI Infrastructure Platform with CLI, Router, MCP, and Agent Runtime"
5
5
  authors = [
6
6
  {name = "Hanzo AI", email = "dev@hanzo.ai"},
@@ -26,7 +26,7 @@ from .utils.output import console
26
26
  from .interactive.repl import HanzoREPL
27
27
 
28
28
  # Version
29
- __version__ = "0.3.19"
29
+ __version__ = "0.3.21"
30
30
 
31
31
 
32
32
  @click.group(invoke_without_command=True)
@@ -661,6 +661,11 @@ class HanzoDevREPL:
661
661
  "help": self.cmd_help,
662
662
  "exit": self.cmd_exit,
663
663
  }
664
+
665
+ # Initialize memory manager
666
+ from .memory_manager import MemoryManager
667
+ workspace = getattr(orchestrator, 'workspace_dir', '/tmp/hanzo')
668
+ self.memory_manager = MemoryManager(workspace)
664
669
 
665
670
  async def run(self):
666
671
  """Run the REPL."""
@@ -741,7 +746,10 @@ class HanzoDevREPL:
741
746
 
742
747
  elif user_input.startswith("#"):
743
748
  # Handle memory/context commands
744
- await self.handle_memory_command(user_input[1:].strip())
749
+ from .memory_manager import handle_memory_command
750
+ handled = handle_memory_command(user_input, self.memory_manager, console)
751
+ if not handled:
752
+ console.print("[yellow]Unknown memory command. Use #memory help[/yellow]")
745
753
 
746
754
  else:
747
755
  # Natural chat - send directly to AI agents
@@ -906,6 +914,34 @@ Examples:
906
914
  async def chat_with_agents(self, message: str):
907
915
  """Send message to AI agents for natural chat."""
908
916
  try:
917
+ # Add message to memory
918
+ self.memory_manager.add_message("user", message)
919
+
920
+ # Get memory context
921
+ memory_context = self.memory_manager.summarize_for_ai()
922
+
923
+ # Enhance message with context
924
+ if memory_context:
925
+ enhanced_message = f"{memory_context}\n\nUser: {message}"
926
+ else:
927
+ enhanced_message = message
928
+
929
+ # Try smart fallback if no specific model configured
930
+ if not hasattr(self.orchestrator, 'orchestrator_model') or \
931
+ self.orchestrator.orchestrator_model == "auto":
932
+ # Use streaming if available
933
+ from .streaming import stream_with_fallback
934
+ response = await stream_with_fallback(enhanced_message, console)
935
+
936
+ if response:
937
+ # Save AI response to memory
938
+ self.memory_manager.add_message("assistant", response)
939
+ # Response already displayed by streaming handler
940
+ return
941
+ else:
942
+ console.print("[red]No AI options available. Please configure API keys or install tools.[/red]")
943
+ return
944
+
909
945
  # For codex and other CLI tools, go straight to direct API chat
910
946
  if hasattr(self.orchestrator, 'orchestrator_model'):
911
947
  model = self.orchestrator.orchestrator_model
@@ -0,0 +1,249 @@
1
+ """
2
+ Intelligent fallback handler for Hanzo Dev.
3
+ Automatically tries available AI options when primary fails.
4
+ """
5
+
6
+ import os
7
+ import shutil
8
+ import subprocess
9
+ from typing import Optional, Dict, Any
10
+ from pathlib import Path
11
+
12
+ class FallbackHandler:
13
+ """Handles automatic fallback to available AI options."""
14
+
15
+ def __init__(self):
16
+ self.available_options = self._detect_available_options()
17
+ self.fallback_order = self._determine_fallback_order()
18
+
19
+ def _detect_available_options(self) -> Dict[str, bool]:
20
+ """Detect which AI options are available."""
21
+ options = {
22
+ "openai_api": bool(os.getenv("OPENAI_API_KEY")),
23
+ "anthropic_api": bool(os.getenv("ANTHROPIC_API_KEY")),
24
+ "google_api": bool(os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY")),
25
+ "openai_cli": shutil.which("openai") is not None,
26
+ "claude_cli": shutil.which("claude") is not None,
27
+ "gemini_cli": shutil.which("gemini") is not None,
28
+ "ollama": self._check_ollama(),
29
+ "hanzo_ide": Path.home().joinpath("work/hanzo/ide").exists(),
30
+ "free_apis": True, # Always available (Codestral, StarCoder)
31
+ }
32
+ return options
33
+
34
+ def _check_ollama(self) -> bool:
35
+ """Check if Ollama is running and has models."""
36
+ try:
37
+ import httpx
38
+ with httpx.Client(timeout=2.0) as client:
39
+ response = client.get("http://localhost:11434/api/tags")
40
+ if response.status_code == 200:
41
+ data = response.json()
42
+ return len(data.get("models", [])) > 0
43
+ except:
44
+ pass
45
+ return False
46
+
47
+ def _determine_fallback_order(self) -> list:
48
+ """Determine the order of fallback options based on availability."""
49
+ order = []
50
+
51
+ # Priority 1: API keys (fastest, most reliable)
52
+ if self.available_options["openai_api"]:
53
+ order.append(("openai_api", "gpt-4"))
54
+ if self.available_options["anthropic_api"]:
55
+ order.append(("anthropic_api", "claude-3-5-sonnet"))
56
+ if self.available_options["google_api"]:
57
+ order.append(("google_api", "gemini-pro"))
58
+
59
+ # Priority 2: CLI tools (no API key needed)
60
+ if self.available_options["openai_cli"]:
61
+ order.append(("openai_cli", "codex"))
62
+ if self.available_options["claude_cli"]:
63
+ order.append(("claude_cli", "claude-desktop"))
64
+ if self.available_options["gemini_cli"]:
65
+ order.append(("gemini_cli", "gemini"))
66
+
67
+ # Priority 3: Local models (free, but requires setup)
68
+ if self.available_options["ollama"]:
69
+ order.append(("ollama", "local:llama3.2"))
70
+ if self.available_options["hanzo_ide"]:
71
+ order.append(("hanzo_ide", "hanzo-ide"))
72
+
73
+ # Priority 4: Free cloud APIs (rate limited)
74
+ if self.available_options["free_apis"]:
75
+ order.append(("free_api", "codestral-free"))
76
+ order.append(("free_api", "starcoder2"))
77
+
78
+ return order
79
+
80
+ def get_best_option(self) -> Optional[tuple]:
81
+ """Get the best available AI option."""
82
+ if self.fallback_order:
83
+ return self.fallback_order[0]
84
+ return None
85
+
86
+ def get_next_option(self, failed_option: str) -> Optional[tuple]:
87
+ """Get the next fallback option after one fails."""
88
+ for i, (option_type, model) in enumerate(self.fallback_order):
89
+ if model == failed_option and i + 1 < len(self.fallback_order):
90
+ return self.fallback_order[i + 1]
91
+ return None
92
+
93
+ def suggest_setup(self) -> str:
94
+ """Suggest setup instructions for unavailable options."""
95
+ suggestions = []
96
+
97
+ if not self.available_options["openai_api"]:
98
+ suggestions.append("• Set OPENAI_API_KEY for GPT-4/GPT-5 access")
99
+
100
+ if not self.available_options["anthropic_api"]:
101
+ suggestions.append("• Set ANTHROPIC_API_KEY for Claude access")
102
+
103
+ if not self.available_options["ollama"]:
104
+ suggestions.append("• Install Ollama: curl -fsSL https://ollama.com/install.sh | sh")
105
+ suggestions.append(" Then run: ollama pull llama3.2")
106
+
107
+ if not self.available_options["openai_cli"]:
108
+ suggestions.append("• Install OpenAI CLI: pip install openai-cli")
109
+
110
+ if not self.available_options["claude_cli"]:
111
+ suggestions.append("• Install Claude Desktop from https://claude.ai/download")
112
+
113
+ return "\n".join(suggestions) if suggestions else "All AI options are available!"
114
+
115
+ def print_status(self, console):
116
+ """Print the current status of available AI options."""
117
+ from rich.table import Table
118
+
119
+ table = Table(title="Available AI Options", show_header=True,
120
+ header_style="bold magenta")
121
+ table.add_column("Option", style="cyan", width=20)
122
+ table.add_column("Status", width=10)
123
+ table.add_column("Model", width=20)
124
+
125
+ status_map = {
126
+ "openai_api": ("OpenAI API", "gpt-4"),
127
+ "anthropic_api": ("Anthropic API", "claude-3-5"),
128
+ "google_api": ("Google API", "gemini-pro"),
129
+ "openai_cli": ("OpenAI CLI", "codex"),
130
+ "claude_cli": ("Claude Desktop", "claude"),
131
+ "gemini_cli": ("Gemini CLI", "gemini"),
132
+ "ollama": ("Ollama Local", "llama3.2"),
133
+ "hanzo_ide": ("Hanzo IDE", "hanzo-dev"),
134
+ "free_apis": ("Free APIs", "codestral/starcoder"),
135
+ }
136
+
137
+ for key, available in self.available_options.items():
138
+ if key in status_map:
139
+ name, model = status_map[key]
140
+ status = "✅" if available else "❌"
141
+ table.add_row(name, status, model if available else "Not available")
142
+
143
+ console.print(table)
144
+
145
+ if self.fallback_order:
146
+ console.print(f"\n[green]Primary option: {self.fallback_order[0][1]}[/green]")
147
+ if len(self.fallback_order) > 1:
148
+ fallbacks = ", ".join([opt[1] for opt in self.fallback_order[1:]])
149
+ console.print(f"[yellow]Fallback options: {fallbacks}[/yellow]")
150
+ else:
151
+ console.print("\n[red]No AI options available![/red]")
152
+ console.print("\n[yellow]Setup suggestions:[/yellow]")
153
+ console.print(self.suggest_setup())
154
+
155
+
156
+ async def smart_chat(message: str, console=None) -> Optional[str]:
157
+ """
158
+ Smart chat that automatically tries available AI options.
159
+ Returns the AI response or None if all options fail.
160
+ """
161
+ from .rate_limiter import smart_limiter
162
+
163
+ handler = FallbackHandler()
164
+
165
+ if console:
166
+ console.print("\n[dim]Detecting available AI options...[/dim]")
167
+
168
+ best_option = handler.get_best_option()
169
+ if not best_option:
170
+ if console:
171
+ handler.print_status(console)
172
+ return None
173
+
174
+ option_type, model = best_option
175
+
176
+ # Try the primary option with rate limiting
177
+ try:
178
+ if option_type == "openai_api":
179
+ async def call_openai():
180
+ from openai import AsyncOpenAI
181
+ client = AsyncOpenAI()
182
+ response = await client.chat.completions.create(
183
+ model="gpt-4",
184
+ messages=[{"role": "user", "content": message}],
185
+ max_tokens=500
186
+ )
187
+ return response.choices[0].message.content
188
+
189
+ return await smart_limiter.execute_with_limit("openai", call_openai)
190
+
191
+ elif option_type == "anthropic_api":
192
+ from anthropic import AsyncAnthropic
193
+ client = AsyncAnthropic()
194
+ response = await client.messages.create(
195
+ model="claude-3-5-sonnet-20241022",
196
+ messages=[{"role": "user", "content": message}],
197
+ max_tokens=500
198
+ )
199
+ return response.content[0].text
200
+
201
+ elif option_type == "openai_cli":
202
+ # Use OpenAI CLI
203
+ result = subprocess.run(
204
+ ["openai", "api", "chat.completions.create", "-m", "gpt-4", "-g", message],
205
+ capture_output=True,
206
+ text=True,
207
+ timeout=30
208
+ )
209
+ if result.returncode == 0:
210
+ return result.stdout.strip()
211
+
212
+ elif option_type == "ollama":
213
+ # Use Ollama
214
+ import httpx
215
+ async with httpx.AsyncClient() as client:
216
+ response = await client.post(
217
+ "http://localhost:11434/api/generate",
218
+ json={"model": "llama3.2", "prompt": message, "stream": False},
219
+ timeout=30.0
220
+ )
221
+ if response.status_code == 200:
222
+ return response.json().get("response", "")
223
+
224
+ elif option_type == "free_api":
225
+ # Try free Codestral API
226
+ import httpx
227
+ async with httpx.AsyncClient() as client:
228
+ response = await client.post(
229
+ "https://codestral.mistral.ai/v1/fim/completions",
230
+ headers={"Content-Type": "application/json"},
231
+ json={"prompt": message, "suffix": "", "max_tokens": 500},
232
+ timeout=30.0
233
+ )
234
+ if response.status_code == 200:
235
+ return response.json().get("choices", [{}])[0].get("text", "")
236
+
237
+ except Exception as e:
238
+ if console:
239
+ console.print(f"[yellow]Primary option {model} failed: {e}[/yellow]")
240
+ console.print("[dim]Trying fallback...[/dim]")
241
+
242
+ # Try next fallback
243
+ next_option = handler.get_next_option(model)
244
+ if next_option:
245
+ # Recursively try the next option
246
+ handler.fallback_order.remove(best_option)
247
+ return await smart_chat(message, console)
248
+
249
+ return None