hanzo 0.3.19__py3-none-any.whl → 0.3.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hanzo might be problematic. Click here for more details.
- hanzo/cli.py +1 -1
- hanzo/dev.py +44 -1
- hanzo/fallback_handler.py +244 -0
- hanzo/memory_manager.py +425 -0
- {hanzo-0.3.19.dist-info → hanzo-0.3.20.dist-info}/METADATA +1 -1
- {hanzo-0.3.19.dist-info → hanzo-0.3.20.dist-info}/RECORD +8 -6
- {hanzo-0.3.19.dist-info → hanzo-0.3.20.dist-info}/WHEEL +0 -0
- {hanzo-0.3.19.dist-info → hanzo-0.3.20.dist-info}/entry_points.txt +0 -0
hanzo/cli.py
CHANGED
hanzo/dev.py
CHANGED
|
@@ -661,6 +661,11 @@ class HanzoDevREPL:
|
|
|
661
661
|
"help": self.cmd_help,
|
|
662
662
|
"exit": self.cmd_exit,
|
|
663
663
|
}
|
|
664
|
+
|
|
665
|
+
# Initialize memory manager
|
|
666
|
+
from .memory_manager import MemoryManager
|
|
667
|
+
workspace = getattr(orchestrator, 'workspace_dir', '/tmp/hanzo')
|
|
668
|
+
self.memory_manager = MemoryManager(workspace)
|
|
664
669
|
|
|
665
670
|
async def run(self):
|
|
666
671
|
"""Run the REPL."""
|
|
@@ -741,7 +746,10 @@ class HanzoDevREPL:
|
|
|
741
746
|
|
|
742
747
|
elif user_input.startswith("#"):
|
|
743
748
|
# Handle memory/context commands
|
|
744
|
-
|
|
749
|
+
from .memory_manager import handle_memory_command
|
|
750
|
+
handled = handle_memory_command(user_input, self.memory_manager, console)
|
|
751
|
+
if not handled:
|
|
752
|
+
console.print("[yellow]Unknown memory command. Use #memory help[/yellow]")
|
|
745
753
|
|
|
746
754
|
else:
|
|
747
755
|
# Natural chat - send directly to AI agents
|
|
@@ -906,6 +914,41 @@ Examples:
|
|
|
906
914
|
async def chat_with_agents(self, message: str):
|
|
907
915
|
"""Send message to AI agents for natural chat."""
|
|
908
916
|
try:
|
|
917
|
+
# Add message to memory
|
|
918
|
+
self.memory_manager.add_message("user", message)
|
|
919
|
+
|
|
920
|
+
# Get memory context
|
|
921
|
+
memory_context = self.memory_manager.summarize_for_ai()
|
|
922
|
+
|
|
923
|
+
# Enhance message with context
|
|
924
|
+
if memory_context:
|
|
925
|
+
enhanced_message = f"{memory_context}\n\nUser: {message}"
|
|
926
|
+
else:
|
|
927
|
+
enhanced_message = message
|
|
928
|
+
|
|
929
|
+
# Try smart fallback if no specific model configured
|
|
930
|
+
if not hasattr(self.orchestrator, 'orchestrator_model') or \
|
|
931
|
+
self.orchestrator.orchestrator_model == "auto":
|
|
932
|
+
from .fallback_handler import smart_chat
|
|
933
|
+
response = await smart_chat(enhanced_message, console)
|
|
934
|
+
if response:
|
|
935
|
+
# Save AI response to memory
|
|
936
|
+
self.memory_manager.add_message("assistant", response)
|
|
937
|
+
|
|
938
|
+
from rich.panel import Panel
|
|
939
|
+
console.print()
|
|
940
|
+
console.print(Panel(
|
|
941
|
+
response,
|
|
942
|
+
title="[bold cyan]AI Response[/bold cyan]",
|
|
943
|
+
title_align="left",
|
|
944
|
+
border_style="dim cyan",
|
|
945
|
+
padding=(1, 2)
|
|
946
|
+
))
|
|
947
|
+
return
|
|
948
|
+
else:
|
|
949
|
+
console.print("[red]No AI options available. Please configure API keys or install tools.[/red]")
|
|
950
|
+
return
|
|
951
|
+
|
|
909
952
|
# For codex and other CLI tools, go straight to direct API chat
|
|
910
953
|
if hasattr(self.orchestrator, 'orchestrator_model'):
|
|
911
954
|
model = self.orchestrator.orchestrator_model
|
|
@@ -0,0 +1,244 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Intelligent fallback handler for Hanzo Dev.
|
|
3
|
+
Automatically tries available AI options when primary fails.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import os
|
|
7
|
+
import shutil
|
|
8
|
+
import subprocess
|
|
9
|
+
from typing import Optional, Dict, Any
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
class FallbackHandler:
|
|
13
|
+
"""Handles automatic fallback to available AI options."""
|
|
14
|
+
|
|
15
|
+
def __init__(self):
|
|
16
|
+
self.available_options = self._detect_available_options()
|
|
17
|
+
self.fallback_order = self._determine_fallback_order()
|
|
18
|
+
|
|
19
|
+
def _detect_available_options(self) -> Dict[str, bool]:
|
|
20
|
+
"""Detect which AI options are available."""
|
|
21
|
+
options = {
|
|
22
|
+
"openai_api": bool(os.getenv("OPENAI_API_KEY")),
|
|
23
|
+
"anthropic_api": bool(os.getenv("ANTHROPIC_API_KEY")),
|
|
24
|
+
"google_api": bool(os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY")),
|
|
25
|
+
"openai_cli": shutil.which("openai") is not None,
|
|
26
|
+
"claude_cli": shutil.which("claude") is not None,
|
|
27
|
+
"gemini_cli": shutil.which("gemini") is not None,
|
|
28
|
+
"ollama": self._check_ollama(),
|
|
29
|
+
"hanzo_ide": Path.home().joinpath("work/hanzo/ide").exists(),
|
|
30
|
+
"free_apis": True, # Always available (Codestral, StarCoder)
|
|
31
|
+
}
|
|
32
|
+
return options
|
|
33
|
+
|
|
34
|
+
def _check_ollama(self) -> bool:
|
|
35
|
+
"""Check if Ollama is running and has models."""
|
|
36
|
+
try:
|
|
37
|
+
import httpx
|
|
38
|
+
with httpx.Client(timeout=2.0) as client:
|
|
39
|
+
response = client.get("http://localhost:11434/api/tags")
|
|
40
|
+
if response.status_code == 200:
|
|
41
|
+
data = response.json()
|
|
42
|
+
return len(data.get("models", [])) > 0
|
|
43
|
+
except:
|
|
44
|
+
pass
|
|
45
|
+
return False
|
|
46
|
+
|
|
47
|
+
def _determine_fallback_order(self) -> list:
|
|
48
|
+
"""Determine the order of fallback options based on availability."""
|
|
49
|
+
order = []
|
|
50
|
+
|
|
51
|
+
# Priority 1: API keys (fastest, most reliable)
|
|
52
|
+
if self.available_options["openai_api"]:
|
|
53
|
+
order.append(("openai_api", "gpt-4"))
|
|
54
|
+
if self.available_options["anthropic_api"]:
|
|
55
|
+
order.append(("anthropic_api", "claude-3-5-sonnet"))
|
|
56
|
+
if self.available_options["google_api"]:
|
|
57
|
+
order.append(("google_api", "gemini-pro"))
|
|
58
|
+
|
|
59
|
+
# Priority 2: CLI tools (no API key needed)
|
|
60
|
+
if self.available_options["openai_cli"]:
|
|
61
|
+
order.append(("openai_cli", "codex"))
|
|
62
|
+
if self.available_options["claude_cli"]:
|
|
63
|
+
order.append(("claude_cli", "claude-desktop"))
|
|
64
|
+
if self.available_options["gemini_cli"]:
|
|
65
|
+
order.append(("gemini_cli", "gemini"))
|
|
66
|
+
|
|
67
|
+
# Priority 3: Local models (free, but requires setup)
|
|
68
|
+
if self.available_options["ollama"]:
|
|
69
|
+
order.append(("ollama", "local:llama3.2"))
|
|
70
|
+
if self.available_options["hanzo_ide"]:
|
|
71
|
+
order.append(("hanzo_ide", "hanzo-ide"))
|
|
72
|
+
|
|
73
|
+
# Priority 4: Free cloud APIs (rate limited)
|
|
74
|
+
if self.available_options["free_apis"]:
|
|
75
|
+
order.append(("free_api", "codestral-free"))
|
|
76
|
+
order.append(("free_api", "starcoder2"))
|
|
77
|
+
|
|
78
|
+
return order
|
|
79
|
+
|
|
80
|
+
def get_best_option(self) -> Optional[tuple]:
|
|
81
|
+
"""Get the best available AI option."""
|
|
82
|
+
if self.fallback_order:
|
|
83
|
+
return self.fallback_order[0]
|
|
84
|
+
return None
|
|
85
|
+
|
|
86
|
+
def get_next_option(self, failed_option: str) -> Optional[tuple]:
|
|
87
|
+
"""Get the next fallback option after one fails."""
|
|
88
|
+
for i, (option_type, model) in enumerate(self.fallback_order):
|
|
89
|
+
if model == failed_option and i + 1 < len(self.fallback_order):
|
|
90
|
+
return self.fallback_order[i + 1]
|
|
91
|
+
return None
|
|
92
|
+
|
|
93
|
+
def suggest_setup(self) -> str:
|
|
94
|
+
"""Suggest setup instructions for unavailable options."""
|
|
95
|
+
suggestions = []
|
|
96
|
+
|
|
97
|
+
if not self.available_options["openai_api"]:
|
|
98
|
+
suggestions.append("• Set OPENAI_API_KEY for GPT-4/GPT-5 access")
|
|
99
|
+
|
|
100
|
+
if not self.available_options["anthropic_api"]:
|
|
101
|
+
suggestions.append("• Set ANTHROPIC_API_KEY for Claude access")
|
|
102
|
+
|
|
103
|
+
if not self.available_options["ollama"]:
|
|
104
|
+
suggestions.append("• Install Ollama: curl -fsSL https://ollama.com/install.sh | sh")
|
|
105
|
+
suggestions.append(" Then run: ollama pull llama3.2")
|
|
106
|
+
|
|
107
|
+
if not self.available_options["openai_cli"]:
|
|
108
|
+
suggestions.append("• Install OpenAI CLI: pip install openai-cli")
|
|
109
|
+
|
|
110
|
+
if not self.available_options["claude_cli"]:
|
|
111
|
+
suggestions.append("• Install Claude Desktop from https://claude.ai/download")
|
|
112
|
+
|
|
113
|
+
return "\n".join(suggestions) if suggestions else "All AI options are available!"
|
|
114
|
+
|
|
115
|
+
def print_status(self, console):
|
|
116
|
+
"""Print the current status of available AI options."""
|
|
117
|
+
from rich.table import Table
|
|
118
|
+
|
|
119
|
+
table = Table(title="Available AI Options", show_header=True,
|
|
120
|
+
header_style="bold magenta")
|
|
121
|
+
table.add_column("Option", style="cyan", width=20)
|
|
122
|
+
table.add_column("Status", width=10)
|
|
123
|
+
table.add_column("Model", width=20)
|
|
124
|
+
|
|
125
|
+
status_map = {
|
|
126
|
+
"openai_api": ("OpenAI API", "gpt-4"),
|
|
127
|
+
"anthropic_api": ("Anthropic API", "claude-3-5"),
|
|
128
|
+
"google_api": ("Google API", "gemini-pro"),
|
|
129
|
+
"openai_cli": ("OpenAI CLI", "codex"),
|
|
130
|
+
"claude_cli": ("Claude Desktop", "claude"),
|
|
131
|
+
"gemini_cli": ("Gemini CLI", "gemini"),
|
|
132
|
+
"ollama": ("Ollama Local", "llama3.2"),
|
|
133
|
+
"hanzo_ide": ("Hanzo IDE", "hanzo-dev"),
|
|
134
|
+
"free_apis": ("Free APIs", "codestral/starcoder"),
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
for key, available in self.available_options.items():
|
|
138
|
+
if key in status_map:
|
|
139
|
+
name, model = status_map[key]
|
|
140
|
+
status = "✅" if available else "❌"
|
|
141
|
+
table.add_row(name, status, model if available else "Not available")
|
|
142
|
+
|
|
143
|
+
console.print(table)
|
|
144
|
+
|
|
145
|
+
if self.fallback_order:
|
|
146
|
+
console.print(f"\n[green]Primary option: {self.fallback_order[0][1]}[/green]")
|
|
147
|
+
if len(self.fallback_order) > 1:
|
|
148
|
+
fallbacks = ", ".join([opt[1] for opt in self.fallback_order[1:]])
|
|
149
|
+
console.print(f"[yellow]Fallback options: {fallbacks}[/yellow]")
|
|
150
|
+
else:
|
|
151
|
+
console.print("\n[red]No AI options available![/red]")
|
|
152
|
+
console.print("\n[yellow]Setup suggestions:[/yellow]")
|
|
153
|
+
console.print(self.suggest_setup())
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
async def smart_chat(message: str, console=None) -> Optional[str]:
|
|
157
|
+
"""
|
|
158
|
+
Smart chat that automatically tries available AI options.
|
|
159
|
+
Returns the AI response or None if all options fail.
|
|
160
|
+
"""
|
|
161
|
+
handler = FallbackHandler()
|
|
162
|
+
|
|
163
|
+
if console:
|
|
164
|
+
console.print("\n[dim]Detecting available AI options...[/dim]")
|
|
165
|
+
|
|
166
|
+
best_option = handler.get_best_option()
|
|
167
|
+
if not best_option:
|
|
168
|
+
if console:
|
|
169
|
+
handler.print_status(console)
|
|
170
|
+
return None
|
|
171
|
+
|
|
172
|
+
option_type, model = best_option
|
|
173
|
+
|
|
174
|
+
# Try the primary option
|
|
175
|
+
try:
|
|
176
|
+
if option_type == "openai_api":
|
|
177
|
+
from openai import AsyncOpenAI
|
|
178
|
+
client = AsyncOpenAI()
|
|
179
|
+
response = await client.chat.completions.create(
|
|
180
|
+
model="gpt-4",
|
|
181
|
+
messages=[{"role": "user", "content": message}],
|
|
182
|
+
max_tokens=500
|
|
183
|
+
)
|
|
184
|
+
return response.choices[0].message.content
|
|
185
|
+
|
|
186
|
+
elif option_type == "anthropic_api":
|
|
187
|
+
from anthropic import AsyncAnthropic
|
|
188
|
+
client = AsyncAnthropic()
|
|
189
|
+
response = await client.messages.create(
|
|
190
|
+
model="claude-3-5-sonnet-20241022",
|
|
191
|
+
messages=[{"role": "user", "content": message}],
|
|
192
|
+
max_tokens=500
|
|
193
|
+
)
|
|
194
|
+
return response.content[0].text
|
|
195
|
+
|
|
196
|
+
elif option_type == "openai_cli":
|
|
197
|
+
# Use OpenAI CLI
|
|
198
|
+
result = subprocess.run(
|
|
199
|
+
["openai", "api", "chat.completions.create", "-m", "gpt-4", "-g", message],
|
|
200
|
+
capture_output=True,
|
|
201
|
+
text=True,
|
|
202
|
+
timeout=30
|
|
203
|
+
)
|
|
204
|
+
if result.returncode == 0:
|
|
205
|
+
return result.stdout.strip()
|
|
206
|
+
|
|
207
|
+
elif option_type == "ollama":
|
|
208
|
+
# Use Ollama
|
|
209
|
+
import httpx
|
|
210
|
+
async with httpx.AsyncClient() as client:
|
|
211
|
+
response = await client.post(
|
|
212
|
+
"http://localhost:11434/api/generate",
|
|
213
|
+
json={"model": "llama3.2", "prompt": message, "stream": False},
|
|
214
|
+
timeout=30.0
|
|
215
|
+
)
|
|
216
|
+
if response.status_code == 200:
|
|
217
|
+
return response.json().get("response", "")
|
|
218
|
+
|
|
219
|
+
elif option_type == "free_api":
|
|
220
|
+
# Try free Codestral API
|
|
221
|
+
import httpx
|
|
222
|
+
async with httpx.AsyncClient() as client:
|
|
223
|
+
response = await client.post(
|
|
224
|
+
"https://codestral.mistral.ai/v1/fim/completions",
|
|
225
|
+
headers={"Content-Type": "application/json"},
|
|
226
|
+
json={"prompt": message, "suffix": "", "max_tokens": 500},
|
|
227
|
+
timeout=30.0
|
|
228
|
+
)
|
|
229
|
+
if response.status_code == 200:
|
|
230
|
+
return response.json().get("choices", [{}])[0].get("text", "")
|
|
231
|
+
|
|
232
|
+
except Exception as e:
|
|
233
|
+
if console:
|
|
234
|
+
console.print(f"[yellow]Primary option {model} failed: {e}[/yellow]")
|
|
235
|
+
console.print("[dim]Trying fallback...[/dim]")
|
|
236
|
+
|
|
237
|
+
# Try next fallback
|
|
238
|
+
next_option = handler.get_next_option(model)
|
|
239
|
+
if next_option:
|
|
240
|
+
# Recursively try the next option
|
|
241
|
+
handler.fallback_order.remove(best_option)
|
|
242
|
+
return await smart_chat(message, console)
|
|
243
|
+
|
|
244
|
+
return None
|
hanzo/memory_manager.py
ADDED
|
@@ -0,0 +1,425 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Memory management system for Hanzo Dev.
|
|
3
|
+
Provides persistent context and memory like Claude Desktop.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
import os
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Dict, List, Any, Optional
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from dataclasses import dataclass, asdict
|
|
12
|
+
import hashlib
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class MemoryItem:
|
|
16
|
+
"""A single memory item."""
|
|
17
|
+
id: str
|
|
18
|
+
content: str
|
|
19
|
+
type: str # 'context', 'instruction', 'fact', 'code'
|
|
20
|
+
created_at: str
|
|
21
|
+
tags: List[str]
|
|
22
|
+
priority: int = 0 # Higher priority items are kept longer
|
|
23
|
+
|
|
24
|
+
def to_dict(self) -> Dict:
|
|
25
|
+
return asdict(self)
|
|
26
|
+
|
|
27
|
+
@classmethod
|
|
28
|
+
def from_dict(cls, data: Dict) -> 'MemoryItem':
|
|
29
|
+
return cls(**data)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class MemoryManager:
|
|
33
|
+
"""Manages persistent memory and context for AI conversations."""
|
|
34
|
+
|
|
35
|
+
def __init__(self, workspace_dir: str = None):
|
|
36
|
+
"""Initialize memory manager."""
|
|
37
|
+
if workspace_dir:
|
|
38
|
+
self.memory_dir = Path(workspace_dir) / ".hanzo" / "memory"
|
|
39
|
+
else:
|
|
40
|
+
self.memory_dir = Path.home() / ".hanzo" / "memory"
|
|
41
|
+
|
|
42
|
+
self.memory_dir.mkdir(parents=True, exist_ok=True)
|
|
43
|
+
self.memory_file = self.memory_dir / "context.json"
|
|
44
|
+
self.session_file = self.memory_dir / "session.json"
|
|
45
|
+
|
|
46
|
+
self.memories: List[MemoryItem] = []
|
|
47
|
+
self.session_context: Dict[str, Any] = {}
|
|
48
|
+
|
|
49
|
+
self.load_memories()
|
|
50
|
+
self.load_session()
|
|
51
|
+
|
|
52
|
+
def load_memories(self):
|
|
53
|
+
"""Load persistent memories from disk."""
|
|
54
|
+
if self.memory_file.exists():
|
|
55
|
+
try:
|
|
56
|
+
with open(self.memory_file, 'r') as f:
|
|
57
|
+
data = json.load(f)
|
|
58
|
+
self.memories = [MemoryItem.from_dict(item) for item in data.get('memories', [])]
|
|
59
|
+
except Exception as e:
|
|
60
|
+
print(f"Error loading memories: {e}")
|
|
61
|
+
self.memories = []
|
|
62
|
+
else:
|
|
63
|
+
# Initialize with default memories
|
|
64
|
+
self._init_default_memories()
|
|
65
|
+
|
|
66
|
+
def _init_default_memories(self):
|
|
67
|
+
"""Initialize with helpful default memories."""
|
|
68
|
+
defaults = [
|
|
69
|
+
MemoryItem(
|
|
70
|
+
id=self._generate_id("system"),
|
|
71
|
+
content="I am Hanzo Dev, an AI coding assistant with multiple orchestrator modes.",
|
|
72
|
+
type="instruction",
|
|
73
|
+
created_at=datetime.now().isoformat(),
|
|
74
|
+
tags=["system", "identity"],
|
|
75
|
+
priority=10
|
|
76
|
+
),
|
|
77
|
+
MemoryItem(
|
|
78
|
+
id=self._generate_id("capabilities"),
|
|
79
|
+
content="I can read/write files, search code, run commands, and use various AI models.",
|
|
80
|
+
type="fact",
|
|
81
|
+
created_at=datetime.now().isoformat(),
|
|
82
|
+
tags=["system", "capabilities"],
|
|
83
|
+
priority=9
|
|
84
|
+
),
|
|
85
|
+
MemoryItem(
|
|
86
|
+
id=self._generate_id("help"),
|
|
87
|
+
content="Use /help for commands, #memory for context management, or just chat naturally.",
|
|
88
|
+
type="instruction",
|
|
89
|
+
created_at=datetime.now().isoformat(),
|
|
90
|
+
tags=["system", "usage"],
|
|
91
|
+
priority=8
|
|
92
|
+
),
|
|
93
|
+
]
|
|
94
|
+
self.memories = defaults
|
|
95
|
+
self.save_memories()
|
|
96
|
+
|
|
97
|
+
def save_memories(self):
|
|
98
|
+
"""Save memories to disk."""
|
|
99
|
+
try:
|
|
100
|
+
data = {
|
|
101
|
+
'memories': [m.to_dict() for m in self.memories],
|
|
102
|
+
'updated_at': datetime.now().isoformat()
|
|
103
|
+
}
|
|
104
|
+
with open(self.memory_file, 'w') as f:
|
|
105
|
+
json.dump(data, f, indent=2)
|
|
106
|
+
except Exception as e:
|
|
107
|
+
print(f"Error saving memories: {e}")
|
|
108
|
+
|
|
109
|
+
def load_session(self):
|
|
110
|
+
"""Load current session context."""
|
|
111
|
+
if self.session_file.exists():
|
|
112
|
+
try:
|
|
113
|
+
with open(self.session_file, 'r') as f:
|
|
114
|
+
self.session_context = json.load(f)
|
|
115
|
+
except:
|
|
116
|
+
self.session_context = {}
|
|
117
|
+
else:
|
|
118
|
+
self.session_context = {
|
|
119
|
+
'started_at': datetime.now().isoformat(),
|
|
120
|
+
'messages': [],
|
|
121
|
+
'current_task': None,
|
|
122
|
+
'preferences': {}
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
def save_session(self):
|
|
126
|
+
"""Save session context."""
|
|
127
|
+
try:
|
|
128
|
+
with open(self.session_file, 'w') as f:
|
|
129
|
+
json.dump(self.session_context, f, indent=2)
|
|
130
|
+
except Exception as e:
|
|
131
|
+
print(f"Error saving session: {e}")
|
|
132
|
+
|
|
133
|
+
def add_memory(self, content: str, type: str = "context", tags: List[str] = None, priority: int = 0) -> str:
|
|
134
|
+
"""Add a new memory item."""
|
|
135
|
+
memory_id = self._generate_id(content)
|
|
136
|
+
|
|
137
|
+
# Check if similar memory exists
|
|
138
|
+
for mem in self.memories:
|
|
139
|
+
if mem.content == content:
|
|
140
|
+
return mem.id # Don't duplicate
|
|
141
|
+
|
|
142
|
+
memory = MemoryItem(
|
|
143
|
+
id=memory_id,
|
|
144
|
+
content=content,
|
|
145
|
+
type=type,
|
|
146
|
+
created_at=datetime.now().isoformat(),
|
|
147
|
+
tags=tags or [],
|
|
148
|
+
priority=priority
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
self.memories.append(memory)
|
|
152
|
+
self.save_memories()
|
|
153
|
+
|
|
154
|
+
return memory_id
|
|
155
|
+
|
|
156
|
+
def remove_memory(self, memory_id: str) -> bool:
|
|
157
|
+
"""Remove a memory by ID."""
|
|
158
|
+
for i, mem in enumerate(self.memories):
|
|
159
|
+
if mem.id == memory_id:
|
|
160
|
+
del self.memories[i]
|
|
161
|
+
self.save_memories()
|
|
162
|
+
return True
|
|
163
|
+
return False
|
|
164
|
+
|
|
165
|
+
def clear_memories(self, keep_system: bool = True):
|
|
166
|
+
"""Clear all memories, optionally keeping system memories."""
|
|
167
|
+
if keep_system:
|
|
168
|
+
self.memories = [m for m in self.memories if "system" in m.tags]
|
|
169
|
+
else:
|
|
170
|
+
self.memories = []
|
|
171
|
+
self.save_memories()
|
|
172
|
+
|
|
173
|
+
def get_memories(self, type: str = None, tags: List[str] = None) -> List[MemoryItem]:
|
|
174
|
+
"""Get memories filtered by type or tags."""
|
|
175
|
+
result = self.memories
|
|
176
|
+
|
|
177
|
+
if type:
|
|
178
|
+
result = [m for m in result if m.type == type]
|
|
179
|
+
|
|
180
|
+
if tags:
|
|
181
|
+
result = [m for m in result if any(tag in m.tags for tag in tags)]
|
|
182
|
+
|
|
183
|
+
# Sort by priority and creation date
|
|
184
|
+
result.sort(key=lambda m: (-m.priority, m.created_at), reverse=True)
|
|
185
|
+
|
|
186
|
+
return result
|
|
187
|
+
|
|
188
|
+
def get_context_string(self, max_tokens: int = 2000) -> str:
|
|
189
|
+
"""Get a formatted context string for AI prompts."""
|
|
190
|
+
# Sort memories by priority
|
|
191
|
+
sorted_memories = sorted(self.memories, key=lambda m: -m.priority)
|
|
192
|
+
|
|
193
|
+
context_parts = []
|
|
194
|
+
token_count = 0
|
|
195
|
+
|
|
196
|
+
for memory in sorted_memories:
|
|
197
|
+
# Rough token estimation (4 chars = 1 token)
|
|
198
|
+
memory_tokens = len(memory.content) // 4
|
|
199
|
+
|
|
200
|
+
if token_count + memory_tokens > max_tokens:
|
|
201
|
+
break
|
|
202
|
+
|
|
203
|
+
if memory.type == "instruction":
|
|
204
|
+
context_parts.append(f"INSTRUCTION: {memory.content}")
|
|
205
|
+
elif memory.type == "fact":
|
|
206
|
+
context_parts.append(f"FACT: {memory.content}")
|
|
207
|
+
elif memory.type == "code":
|
|
208
|
+
context_parts.append(f"CODE CONTEXT:\n{memory.content}")
|
|
209
|
+
else:
|
|
210
|
+
context_parts.append(memory.content)
|
|
211
|
+
|
|
212
|
+
token_count += memory_tokens
|
|
213
|
+
|
|
214
|
+
return "\n\n".join(context_parts)
|
|
215
|
+
|
|
216
|
+
def add_message(self, role: str, content: str):
|
|
217
|
+
"""Add a message to session history."""
|
|
218
|
+
self.session_context['messages'].append({
|
|
219
|
+
'role': role,
|
|
220
|
+
'content': content,
|
|
221
|
+
'timestamp': datetime.now().isoformat()
|
|
222
|
+
})
|
|
223
|
+
|
|
224
|
+
# Keep only last 50 messages
|
|
225
|
+
if len(self.session_context['messages']) > 50:
|
|
226
|
+
self.session_context['messages'] = self.session_context['messages'][-50:]
|
|
227
|
+
|
|
228
|
+
self.save_session()
|
|
229
|
+
|
|
230
|
+
def get_recent_messages(self, count: int = 10) -> List[Dict]:
|
|
231
|
+
"""Get recent messages from session."""
|
|
232
|
+
return self.session_context['messages'][-count:]
|
|
233
|
+
|
|
234
|
+
def set_preference(self, key: str, value: Any):
|
|
235
|
+
"""Set a user preference."""
|
|
236
|
+
self.session_context['preferences'][key] = value
|
|
237
|
+
self.save_session()
|
|
238
|
+
|
|
239
|
+
def get_preference(self, key: str, default: Any = None) -> Any:
|
|
240
|
+
"""Get a user preference."""
|
|
241
|
+
return self.session_context['preferences'].get(key, default)
|
|
242
|
+
|
|
243
|
+
def _generate_id(self, content: str) -> str:
|
|
244
|
+
"""Generate a unique ID for a memory item."""
|
|
245
|
+
hash_input = f"{content}{datetime.now().isoformat()}"
|
|
246
|
+
return hashlib.md5(hash_input.encode()).hexdigest()[:8]
|
|
247
|
+
|
|
248
|
+
def summarize_for_ai(self) -> str:
|
|
249
|
+
"""Create a summary suitable for AI context."""
|
|
250
|
+
summary = []
|
|
251
|
+
|
|
252
|
+
# Add system memories
|
|
253
|
+
system_memories = self.get_memories(tags=["system"])
|
|
254
|
+
if system_memories:
|
|
255
|
+
summary.append("SYSTEM CONTEXT:")
|
|
256
|
+
for mem in system_memories[:3]: # Top 3 system memories
|
|
257
|
+
summary.append(f"- {mem.content}")
|
|
258
|
+
|
|
259
|
+
# Add recent instructions
|
|
260
|
+
instructions = self.get_memories(type="instruction")
|
|
261
|
+
if instructions:
|
|
262
|
+
summary.append("\nINSTRUCTIONS:")
|
|
263
|
+
for mem in instructions[:3]: # Top 3 instructions
|
|
264
|
+
summary.append(f"- {mem.content}")
|
|
265
|
+
|
|
266
|
+
# Add important facts
|
|
267
|
+
facts = self.get_memories(type="fact")
|
|
268
|
+
if facts:
|
|
269
|
+
summary.append("\nKEY FACTS:")
|
|
270
|
+
for mem in facts[:5]: # Top 5 facts
|
|
271
|
+
summary.append(f"- {mem.content}")
|
|
272
|
+
|
|
273
|
+
# Add current task if set
|
|
274
|
+
if self.session_context.get('current_task'):
|
|
275
|
+
summary.append(f"\nCURRENT TASK: {self.session_context['current_task']}")
|
|
276
|
+
|
|
277
|
+
return "\n".join(summary)
|
|
278
|
+
|
|
279
|
+
def export_memories(self, file_path: str):
|
|
280
|
+
"""Export memories to a file."""
|
|
281
|
+
data = {
|
|
282
|
+
'memories': [m.to_dict() for m in self.memories],
|
|
283
|
+
'session': self.session_context,
|
|
284
|
+
'exported_at': datetime.now().isoformat()
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
with open(file_path, 'w') as f:
|
|
288
|
+
json.dump(data, f, indent=2)
|
|
289
|
+
|
|
290
|
+
def import_memories(self, file_path: str):
|
|
291
|
+
"""Import memories from a file."""
|
|
292
|
+
with open(file_path, 'r') as f:
|
|
293
|
+
data = json.load(f)
|
|
294
|
+
|
|
295
|
+
# Merge memories (avoid duplicates)
|
|
296
|
+
existing_ids = {m.id for m in self.memories}
|
|
297
|
+
|
|
298
|
+
for mem_data in data.get('memories', []):
|
|
299
|
+
if mem_data['id'] not in existing_ids:
|
|
300
|
+
self.memories.append(MemoryItem.from_dict(mem_data))
|
|
301
|
+
|
|
302
|
+
# Merge session preferences
|
|
303
|
+
if 'session' in data and 'preferences' in data['session']:
|
|
304
|
+
self.session_context['preferences'].update(data['session']['preferences'])
|
|
305
|
+
|
|
306
|
+
self.save_memories()
|
|
307
|
+
self.save_session()
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
def handle_memory_command(command: str, memory_manager: MemoryManager, console) -> bool:
|
|
311
|
+
"""
|
|
312
|
+
Handle #memory commands.
|
|
313
|
+
Returns True if command was handled, False otherwise.
|
|
314
|
+
"""
|
|
315
|
+
from rich.table import Table
|
|
316
|
+
from rich.panel import Panel
|
|
317
|
+
|
|
318
|
+
parts = command.strip().split(maxsplit=2)
|
|
319
|
+
|
|
320
|
+
if len(parts) == 1 or parts[1] == "show":
|
|
321
|
+
# Show current memories
|
|
322
|
+
memories = memory_manager.get_memories()
|
|
323
|
+
|
|
324
|
+
if not memories:
|
|
325
|
+
console.print("[yellow]No memories stored.[/yellow]")
|
|
326
|
+
return True
|
|
327
|
+
|
|
328
|
+
table = Table(title="Current Memories", show_header=True,
|
|
329
|
+
header_style="bold magenta")
|
|
330
|
+
table.add_column("ID", style="cyan", width=10)
|
|
331
|
+
table.add_column("Type", width=12)
|
|
332
|
+
table.add_column("Content", width=50)
|
|
333
|
+
table.add_column("Priority", width=8)
|
|
334
|
+
|
|
335
|
+
for mem in memories[:10]: # Show top 10
|
|
336
|
+
content = mem.content[:47] + "..." if len(mem.content) > 50 else mem.content
|
|
337
|
+
table.add_row(mem.id, mem.type, content, str(mem.priority))
|
|
338
|
+
|
|
339
|
+
console.print(table)
|
|
340
|
+
|
|
341
|
+
if len(memories) > 10:
|
|
342
|
+
console.print(f"[dim]... and {len(memories) - 10} more[/dim]")
|
|
343
|
+
|
|
344
|
+
return True
|
|
345
|
+
|
|
346
|
+
elif parts[1] == "add":
|
|
347
|
+
if len(parts) < 3:
|
|
348
|
+
console.print("[red]Usage: #memory add <content>[/red]")
|
|
349
|
+
return True
|
|
350
|
+
|
|
351
|
+
content = parts[2]
|
|
352
|
+
memory_id = memory_manager.add_memory(content, type="context")
|
|
353
|
+
console.print(f"[green]Added memory: {memory_id}[/green]")
|
|
354
|
+
return True
|
|
355
|
+
|
|
356
|
+
elif parts[1] == "remove":
|
|
357
|
+
if len(parts) < 3:
|
|
358
|
+
console.print("[red]Usage: #memory remove <id>[/red]")
|
|
359
|
+
return True
|
|
360
|
+
|
|
361
|
+
memory_id = parts[2]
|
|
362
|
+
if memory_manager.remove_memory(memory_id):
|
|
363
|
+
console.print(f"[green]Removed memory: {memory_id}[/green]")
|
|
364
|
+
else:
|
|
365
|
+
console.print(f"[red]Memory not found: {memory_id}[/red]")
|
|
366
|
+
return True
|
|
367
|
+
|
|
368
|
+
elif parts[1] == "clear":
|
|
369
|
+
memory_manager.clear_memories(keep_system=True)
|
|
370
|
+
console.print("[green]Cleared all non-system memories.[/green]")
|
|
371
|
+
return True
|
|
372
|
+
|
|
373
|
+
elif parts[1] == "save":
|
|
374
|
+
memory_manager.save_memories()
|
|
375
|
+
memory_manager.save_session()
|
|
376
|
+
console.print("[green]Memories saved.[/green]")
|
|
377
|
+
return True
|
|
378
|
+
|
|
379
|
+
elif parts[1] == "export":
|
|
380
|
+
if len(parts) < 3:
|
|
381
|
+
file_path = "hanzo_memories.json"
|
|
382
|
+
else:
|
|
383
|
+
file_path = parts[2]
|
|
384
|
+
|
|
385
|
+
memory_manager.export_memories(file_path)
|
|
386
|
+
console.print(f"[green]Exported memories to {file_path}[/green]")
|
|
387
|
+
return True
|
|
388
|
+
|
|
389
|
+
elif parts[1] == "import":
|
|
390
|
+
if len(parts) < 3:
|
|
391
|
+
console.print("[red]Usage: #memory import <file_path>[/red]")
|
|
392
|
+
return True
|
|
393
|
+
|
|
394
|
+
file_path = parts[2]
|
|
395
|
+
try:
|
|
396
|
+
memory_manager.import_memories(file_path)
|
|
397
|
+
console.print(f"[green]Imported memories from {file_path}[/green]")
|
|
398
|
+
except Exception as e:
|
|
399
|
+
console.print(f"[red]Error importing: {e}[/red]")
|
|
400
|
+
return True
|
|
401
|
+
|
|
402
|
+
elif parts[1] == "context":
|
|
403
|
+
# Show AI context
|
|
404
|
+
context = memory_manager.summarize_for_ai()
|
|
405
|
+
console.print(Panel(context, title="[bold cyan]AI Context[/bold cyan]",
|
|
406
|
+
title_align="left", border_style="dim cyan"))
|
|
407
|
+
return True
|
|
408
|
+
|
|
409
|
+
elif parts[1] == "help":
|
|
410
|
+
help_text = """Memory Commands:
|
|
411
|
+
#memory [show] - Show current memories
|
|
412
|
+
#memory add <text> - Add new memory
|
|
413
|
+
#memory remove <id> - Remove memory by ID
|
|
414
|
+
#memory clear - Clear all memories (keep system)
|
|
415
|
+
#memory save - Save memories to disk
|
|
416
|
+
#memory export [file] - Export memories to file
|
|
417
|
+
#memory import <file> - Import memories from file
|
|
418
|
+
#memory context - Show AI context summary
|
|
419
|
+
#memory help - Show this help"""
|
|
420
|
+
|
|
421
|
+
console.print(Panel(help_text, title="[bold cyan]Memory Help[/bold cyan]",
|
|
422
|
+
title_align="left", border_style="dim cyan"))
|
|
423
|
+
return True
|
|
424
|
+
|
|
425
|
+
return False
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: hanzo
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.20
|
|
4
4
|
Summary: Hanzo AI - Complete AI Infrastructure Platform with CLI, Router, MCP, and Agent Runtime
|
|
5
5
|
Project-URL: Homepage, https://hanzo.ai
|
|
6
6
|
Project-URL: Repository, https://github.com/hanzoai/python-sdk
|
|
@@ -1,8 +1,10 @@
|
|
|
1
1
|
hanzo/__init__.py,sha256=f6N_RcJZ0F9ADrROlvPi1OrgwjF8cWQm34cml8hb1zk,169
|
|
2
2
|
hanzo/__main__.py,sha256=F3Vz0Ty3bdAj_8oxyETMIqxlmNRnJOAFB1XPxbyfouI,105
|
|
3
|
-
hanzo/cli.py,sha256=
|
|
4
|
-
hanzo/dev.py,sha256=
|
|
3
|
+
hanzo/cli.py,sha256=IOwil9LIy5HR_08Yei9-u6ScNG0y0hacDJIeY9GNKMs,18586
|
|
4
|
+
hanzo/dev.py,sha256=yOB_v6aF2VxE_W9yyFWEHEQRdzSs6H0h3LTwDI0hMhI,102995
|
|
5
|
+
hanzo/fallback_handler.py,sha256=1xhkAI6_ZI1klB2YXYf3tiJCAYhLW8W7lqcaKYDkg0w,9917
|
|
5
6
|
hanzo/mcp_server.py,sha256=XVygFNn-9CVdu8c95sP7fQjIRtA8K7nsGpgQNe44BRg,460
|
|
7
|
+
hanzo/memory_manager.py,sha256=BjnHN0Fu6Lqg3aKGTVPKfQxDBjtengjwG3ac8zoirrQ,15101
|
|
6
8
|
hanzo/orchestrator_config.py,sha256=JV7DS8aVZwBJ9XzgkQronFwV_A50QyXG3MH_pKwmCB8,11006
|
|
7
9
|
hanzo/repl.py,sha256=sW1quuqGkJ_AqgjN2vLNdtWgKDlXIkXiO9Bo1QQI0G4,1089
|
|
8
10
|
hanzo/commands/__init__.py,sha256=7rh94TPNhdq4gJBJS0Ayf0fGNChQYCQCJcJPmYYehiQ,182
|
|
@@ -24,7 +26,7 @@ hanzo/utils/__init__.py,sha256=5RRwKI852vp8smr4xCRgeKfn7dLEnHbdXGfVYTZ5jDQ,69
|
|
|
24
26
|
hanzo/utils/config.py,sha256=FD_LoBpcoF5dgJ7WL4o6LDp2pdOy8kS-dJ6iRO2GcGM,4728
|
|
25
27
|
hanzo/utils/net_check.py,sha256=YFbJ65SzfDYHkHLZe3n51VhId1VI3zhyx8p6BM-l6jE,3017
|
|
26
28
|
hanzo/utils/output.py,sha256=W0j3psF07vJiX4s02gbN4zYWfbKNsb8TSIoagBSf5vA,2704
|
|
27
|
-
hanzo-0.3.
|
|
28
|
-
hanzo-0.3.
|
|
29
|
-
hanzo-0.3.
|
|
30
|
-
hanzo-0.3.
|
|
29
|
+
hanzo-0.3.20.dist-info/METADATA,sha256=bJW_-S27FING_R4QNuRn2RvfURWYspGnW8CoalqdFOI,4279
|
|
30
|
+
hanzo-0.3.20.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
31
|
+
hanzo-0.3.20.dist-info/entry_points.txt,sha256=pQLPMdqOXU_2BfTcMDhkqTCDNk_H6ApvYuSaWcuQOOw,171
|
|
32
|
+
hanzo-0.3.20.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|