hanzo 0.3.21__py3-none-any.whl → 0.3.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hanzo might be problematic. Click here for more details.
- hanzo/base_agent.py +517 -0
- hanzo/batch_orchestrator.py +988 -0
- hanzo/cli.py +1 -1
- hanzo/commands/repl.py +5 -2
- hanzo/dev.py +463 -261
- hanzo/fallback_handler.py +78 -52
- hanzo/memory_manager.py +145 -122
- hanzo/model_registry.py +399 -0
- hanzo/rate_limiter.py +59 -74
- hanzo/streaming.py +91 -70
- {hanzo-0.3.21.dist-info → hanzo-0.3.23.dist-info}/METADATA +1 -1
- {hanzo-0.3.21.dist-info → hanzo-0.3.23.dist-info}/RECORD +14 -11
- {hanzo-0.3.21.dist-info → hanzo-0.3.23.dist-info}/WHEEL +0 -0
- {hanzo-0.3.21.dist-info → hanzo-0.3.23.dist-info}/entry_points.txt +0 -0
hanzo/streaming.py
CHANGED
|
@@ -3,42 +3,44 @@ Streaming response handler for Hanzo Dev.
|
|
|
3
3
|
Provides real-time feedback as AI generates responses.
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
|
+
import time
|
|
6
7
|
import asyncio
|
|
7
|
-
from typing import
|
|
8
|
-
|
|
8
|
+
from typing import Callable, Optional, AsyncGenerator
|
|
9
|
+
|
|
9
10
|
from rich.live import Live
|
|
10
11
|
from rich.panel import Panel
|
|
12
|
+
from rich.console import Console
|
|
11
13
|
from rich.markdown import Markdown
|
|
12
|
-
import time
|
|
13
14
|
|
|
14
15
|
|
|
15
16
|
class StreamingHandler:
|
|
16
17
|
"""Handles streaming responses from AI models."""
|
|
17
|
-
|
|
18
|
+
|
|
18
19
|
def __init__(self, console: Console = None):
|
|
19
20
|
"""Initialize streaming handler."""
|
|
20
21
|
self.console = console or Console()
|
|
21
22
|
self.current_response = ""
|
|
22
23
|
self.is_streaming = False
|
|
23
|
-
|
|
24
|
+
|
|
24
25
|
async def stream_openai(self, client, messages: list, model: str = "gpt-4") -> str:
|
|
25
26
|
"""Stream response from OpenAI API."""
|
|
26
27
|
try:
|
|
27
28
|
stream = await client.chat.completions.create(
|
|
28
|
-
model=model,
|
|
29
|
-
messages=messages,
|
|
30
|
-
stream=True,
|
|
31
|
-
max_tokens=1000
|
|
29
|
+
model=model, messages=messages, stream=True, max_tokens=1000
|
|
32
30
|
)
|
|
33
|
-
|
|
31
|
+
|
|
34
32
|
self.current_response = ""
|
|
35
33
|
self.is_streaming = True
|
|
36
|
-
|
|
34
|
+
|
|
37
35
|
with Live(
|
|
38
|
-
Panel(
|
|
39
|
-
|
|
36
|
+
Panel(
|
|
37
|
+
"",
|
|
38
|
+
title="[bold cyan]AI Response[/bold cyan]",
|
|
39
|
+
title_align="left",
|
|
40
|
+
border_style="dim cyan",
|
|
41
|
+
),
|
|
40
42
|
console=self.console,
|
|
41
|
-
refresh_per_second=10
|
|
43
|
+
refresh_per_second=10,
|
|
42
44
|
) as live:
|
|
43
45
|
async for chunk in stream:
|
|
44
46
|
if chunk.choices[0].delta.content:
|
|
@@ -49,34 +51,38 @@ class StreamingHandler:
|
|
|
49
51
|
title="[bold cyan]AI Response[/bold cyan]",
|
|
50
52
|
title_align="left",
|
|
51
53
|
border_style="dim cyan",
|
|
52
|
-
padding=(1, 2)
|
|
54
|
+
padding=(1, 2),
|
|
53
55
|
)
|
|
54
56
|
)
|
|
55
|
-
|
|
57
|
+
|
|
56
58
|
self.is_streaming = False
|
|
57
59
|
return self.current_response
|
|
58
|
-
|
|
60
|
+
|
|
59
61
|
except Exception as e:
|
|
60
62
|
self.console.print(f"[red]Streaming error: {e}[/red]")
|
|
61
63
|
self.is_streaming = False
|
|
62
64
|
return None
|
|
63
|
-
|
|
64
|
-
async def stream_anthropic(
|
|
65
|
+
|
|
66
|
+
async def stream_anthropic(
|
|
67
|
+
self, client, messages: list, model: str = "claude-3-5-sonnet-20241022"
|
|
68
|
+
) -> str:
|
|
65
69
|
"""Stream response from Anthropic API."""
|
|
66
70
|
try:
|
|
67
71
|
self.current_response = ""
|
|
68
72
|
self.is_streaming = True
|
|
69
|
-
|
|
73
|
+
|
|
70
74
|
with Live(
|
|
71
|
-
Panel(
|
|
72
|
-
|
|
75
|
+
Panel(
|
|
76
|
+
"",
|
|
77
|
+
title="[bold cyan]AI Response[/bold cyan]",
|
|
78
|
+
title_align="left",
|
|
79
|
+
border_style="dim cyan",
|
|
80
|
+
),
|
|
73
81
|
console=self.console,
|
|
74
|
-
refresh_per_second=10
|
|
82
|
+
refresh_per_second=10,
|
|
75
83
|
) as live:
|
|
76
84
|
async with client.messages.stream(
|
|
77
|
-
model=model,
|
|
78
|
-
messages=messages,
|
|
79
|
-
max_tokens=1000
|
|
85
|
+
model=model, messages=messages, max_tokens=1000
|
|
80
86
|
) as stream:
|
|
81
87
|
async for text in stream.text_stream:
|
|
82
88
|
self.current_response += text
|
|
@@ -86,42 +92,47 @@ class StreamingHandler:
|
|
|
86
92
|
title="[bold cyan]AI Response[/bold cyan]",
|
|
87
93
|
title_align="left",
|
|
88
94
|
border_style="dim cyan",
|
|
89
|
-
padding=(1, 2)
|
|
95
|
+
padding=(1, 2),
|
|
90
96
|
)
|
|
91
97
|
)
|
|
92
|
-
|
|
98
|
+
|
|
93
99
|
self.is_streaming = False
|
|
94
100
|
return self.current_response
|
|
95
|
-
|
|
101
|
+
|
|
96
102
|
except Exception as e:
|
|
97
103
|
self.console.print(f"[red]Streaming error: {e}[/red]")
|
|
98
104
|
self.is_streaming = False
|
|
99
105
|
return None
|
|
100
|
-
|
|
106
|
+
|
|
101
107
|
async def stream_ollama(self, message: str, model: str = "llama3.2") -> str:
|
|
102
108
|
"""Stream response from Ollama local model."""
|
|
103
109
|
import httpx
|
|
104
|
-
|
|
110
|
+
|
|
105
111
|
try:
|
|
106
112
|
self.current_response = ""
|
|
107
113
|
self.is_streaming = True
|
|
108
|
-
|
|
114
|
+
|
|
109
115
|
with Live(
|
|
110
|
-
Panel(
|
|
111
|
-
|
|
116
|
+
Panel(
|
|
117
|
+
"",
|
|
118
|
+
title="[bold cyan]AI Response (Local)[/bold cyan]",
|
|
119
|
+
title_align="left",
|
|
120
|
+
border_style="dim cyan",
|
|
121
|
+
),
|
|
112
122
|
console=self.console,
|
|
113
|
-
refresh_per_second=10
|
|
123
|
+
refresh_per_second=10,
|
|
114
124
|
) as live:
|
|
115
125
|
async with httpx.AsyncClient() as client:
|
|
116
126
|
async with client.stream(
|
|
117
127
|
"POST",
|
|
118
128
|
"http://localhost:11434/api/generate",
|
|
119
129
|
json={"model": model, "prompt": message, "stream": True},
|
|
120
|
-
timeout=60.0
|
|
130
|
+
timeout=60.0,
|
|
121
131
|
) as response:
|
|
122
132
|
async for line in response.aiter_lines():
|
|
123
133
|
if line:
|
|
124
134
|
import json
|
|
135
|
+
|
|
125
136
|
data = json.loads(line)
|
|
126
137
|
if "response" in data:
|
|
127
138
|
self.current_response += data["response"]
|
|
@@ -131,52 +142,56 @@ class StreamingHandler:
|
|
|
131
142
|
title="[bold cyan]AI Response (Local)[/bold cyan]",
|
|
132
143
|
title_align="left",
|
|
133
144
|
border_style="dim cyan",
|
|
134
|
-
padding=(1, 2)
|
|
145
|
+
padding=(1, 2),
|
|
135
146
|
)
|
|
136
147
|
)
|
|
137
148
|
if data.get("done", False):
|
|
138
149
|
break
|
|
139
|
-
|
|
150
|
+
|
|
140
151
|
self.is_streaming = False
|
|
141
152
|
return self.current_response
|
|
142
|
-
|
|
153
|
+
|
|
143
154
|
except Exception as e:
|
|
144
155
|
self.console.print(f"[red]Ollama streaming error: {e}[/red]")
|
|
145
156
|
self.is_streaming = False
|
|
146
157
|
return None
|
|
147
|
-
|
|
158
|
+
|
|
148
159
|
async def simulate_streaming(self, text: str, delay: float = 0.02) -> str:
|
|
149
160
|
"""Simulate streaming for non-streaming APIs."""
|
|
150
161
|
self.current_response = ""
|
|
151
162
|
self.is_streaming = True
|
|
152
|
-
|
|
163
|
+
|
|
153
164
|
words = text.split()
|
|
154
|
-
|
|
165
|
+
|
|
155
166
|
with Live(
|
|
156
|
-
Panel(
|
|
157
|
-
|
|
167
|
+
Panel(
|
|
168
|
+
"",
|
|
169
|
+
title="[bold cyan]AI Response[/bold cyan]",
|
|
170
|
+
title_align="left",
|
|
171
|
+
border_style="dim cyan",
|
|
172
|
+
),
|
|
158
173
|
console=self.console,
|
|
159
|
-
refresh_per_second=20
|
|
174
|
+
refresh_per_second=20,
|
|
160
175
|
) as live:
|
|
161
176
|
for i, word in enumerate(words):
|
|
162
177
|
self.current_response += word
|
|
163
178
|
if i < len(words) - 1:
|
|
164
179
|
self.current_response += " "
|
|
165
|
-
|
|
180
|
+
|
|
166
181
|
live.update(
|
|
167
182
|
Panel(
|
|
168
183
|
Markdown(self.current_response),
|
|
169
184
|
title="[bold cyan]AI Response[/bold cyan]",
|
|
170
185
|
title_align="left",
|
|
171
186
|
border_style="dim cyan",
|
|
172
|
-
padding=(1, 2)
|
|
187
|
+
padding=(1, 2),
|
|
173
188
|
)
|
|
174
189
|
)
|
|
175
190
|
await asyncio.sleep(delay)
|
|
176
|
-
|
|
191
|
+
|
|
177
192
|
self.is_streaming = False
|
|
178
193
|
return self.current_response
|
|
179
|
-
|
|
194
|
+
|
|
180
195
|
def stop_streaming(self):
|
|
181
196
|
"""Stop current streaming operation."""
|
|
182
197
|
self.is_streaming = False
|
|
@@ -186,34 +201,38 @@ class StreamingHandler:
|
|
|
186
201
|
|
|
187
202
|
class TypewriterEffect:
|
|
188
203
|
"""Provides typewriter effect for text output."""
|
|
189
|
-
|
|
204
|
+
|
|
190
205
|
def __init__(self, console: Console = None):
|
|
191
206
|
self.console = console or Console()
|
|
192
|
-
|
|
207
|
+
|
|
193
208
|
async def type_text(self, text: str, speed: float = 0.03):
|
|
194
209
|
"""Type text with typewriter effect."""
|
|
195
210
|
for char in text:
|
|
196
211
|
self.console.print(char, end="")
|
|
197
212
|
await asyncio.sleep(speed)
|
|
198
213
|
self.console.print() # New line at end
|
|
199
|
-
|
|
214
|
+
|
|
200
215
|
async def type_code(self, code: str, language: str = "python", speed: float = 0.01):
|
|
201
216
|
"""Type code with syntax highlighting."""
|
|
202
217
|
from rich.syntax import Syntax
|
|
203
|
-
|
|
218
|
+
|
|
204
219
|
# Build up code progressively
|
|
205
220
|
current_code = ""
|
|
206
|
-
lines = code.split(
|
|
207
|
-
|
|
221
|
+
lines = code.split("\n")
|
|
222
|
+
|
|
208
223
|
with Live(console=self.console, refresh_per_second=30) as live:
|
|
209
224
|
for line in lines:
|
|
210
225
|
for char in line:
|
|
211
226
|
current_code += char
|
|
212
|
-
syntax = Syntax(
|
|
227
|
+
syntax = Syntax(
|
|
228
|
+
current_code, language, theme="monokai", line_numbers=True
|
|
229
|
+
)
|
|
213
230
|
live.update(syntax)
|
|
214
231
|
await asyncio.sleep(speed)
|
|
215
|
-
current_code +=
|
|
216
|
-
syntax = Syntax(
|
|
232
|
+
current_code += "\n"
|
|
233
|
+
syntax = Syntax(
|
|
234
|
+
current_code, language, theme="monokai", line_numbers=True
|
|
235
|
+
)
|
|
217
236
|
live.update(syntax)
|
|
218
237
|
|
|
219
238
|
|
|
@@ -222,50 +241,52 @@ async def stream_with_fallback(message: str, console: Console = None) -> Optiona
|
|
|
222
241
|
Stream response with automatic fallback to available options.
|
|
223
242
|
"""
|
|
224
243
|
import os
|
|
244
|
+
|
|
225
245
|
handler = StreamingHandler(console)
|
|
226
|
-
|
|
246
|
+
|
|
227
247
|
# Try OpenAI streaming
|
|
228
248
|
if os.getenv("OPENAI_API_KEY"):
|
|
229
249
|
try:
|
|
230
250
|
from openai import AsyncOpenAI
|
|
251
|
+
|
|
231
252
|
client = AsyncOpenAI()
|
|
232
253
|
return await handler.stream_openai(
|
|
233
|
-
client,
|
|
234
|
-
[{"role": "user", "content": message}]
|
|
254
|
+
client, [{"role": "user", "content": message}]
|
|
235
255
|
)
|
|
236
256
|
except Exception as e:
|
|
237
257
|
if console:
|
|
238
258
|
console.print(f"[yellow]OpenAI streaming failed: {e}[/yellow]")
|
|
239
|
-
|
|
259
|
+
|
|
240
260
|
# Try Anthropic streaming
|
|
241
261
|
if os.getenv("ANTHROPIC_API_KEY"):
|
|
242
262
|
try:
|
|
243
263
|
from anthropic import AsyncAnthropic
|
|
264
|
+
|
|
244
265
|
client = AsyncAnthropic()
|
|
245
266
|
return await handler.stream_anthropic(
|
|
246
|
-
client,
|
|
247
|
-
[{"role": "user", "content": message}]
|
|
267
|
+
client, [{"role": "user", "content": message}]
|
|
248
268
|
)
|
|
249
269
|
except Exception as e:
|
|
250
270
|
if console:
|
|
251
271
|
console.print(f"[yellow]Anthropic streaming failed: {e}[/yellow]")
|
|
252
|
-
|
|
272
|
+
|
|
253
273
|
# Try Ollama streaming
|
|
254
274
|
try:
|
|
255
275
|
return await handler.stream_ollama(message)
|
|
256
276
|
except:
|
|
257
277
|
pass
|
|
258
|
-
|
|
278
|
+
|
|
259
279
|
# Fallback to non-streaming with simulated effect
|
|
260
280
|
if console:
|
|
261
281
|
console.print("[yellow]Falling back to non-streaming mode[/yellow]")
|
|
262
|
-
|
|
282
|
+
|
|
263
283
|
# Get response from fallback handler
|
|
264
284
|
from .fallback_handler import smart_chat
|
|
285
|
+
|
|
265
286
|
response = await smart_chat(message, console)
|
|
266
|
-
|
|
287
|
+
|
|
267
288
|
if response:
|
|
268
289
|
# Simulate streaming
|
|
269
290
|
return await handler.simulate_streaming(response)
|
|
270
|
-
|
|
271
|
-
return None
|
|
291
|
+
|
|
292
|
+
return None
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: hanzo
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.23
|
|
4
4
|
Summary: Hanzo AI - Complete AI Infrastructure Platform with CLI, Router, MCP, and Agent Runtime
|
|
5
5
|
Project-URL: Homepage, https://hanzo.ai
|
|
6
6
|
Project-URL: Repository, https://github.com/hanzoai/python-sdk
|
|
@@ -1,14 +1,17 @@
|
|
|
1
1
|
hanzo/__init__.py,sha256=f6N_RcJZ0F9ADrROlvPi1OrgwjF8cWQm34cml8hb1zk,169
|
|
2
2
|
hanzo/__main__.py,sha256=F3Vz0Ty3bdAj_8oxyETMIqxlmNRnJOAFB1XPxbyfouI,105
|
|
3
|
-
hanzo/
|
|
4
|
-
hanzo/
|
|
5
|
-
hanzo/
|
|
3
|
+
hanzo/base_agent.py,sha256=GIKnJU0rCC7Jgmahi0ynxBKAfscb6CQBP261_aXDl1g,15325
|
|
4
|
+
hanzo/batch_orchestrator.py,sha256=dCuWyEA9ulM5SWfz5Ecep-IK615AO56pXmLOCz3fv6c,35817
|
|
5
|
+
hanzo/cli.py,sha256=5e_ee4_dOuiVWf--_ycsyBETkMOnrn0a30kORwwTTcY,18586
|
|
6
|
+
hanzo/dev.py,sha256=56ug80N_UDaOuXkq9-QwjZkqn4nc2h-NNrLBqwFSFYc,105807
|
|
7
|
+
hanzo/fallback_handler.py,sha256=avGujRF5a_x2Q-Q5ebQG87f54DcYfPFwlMMFhG2V45k,10181
|
|
6
8
|
hanzo/mcp_server.py,sha256=XVygFNn-9CVdu8c95sP7fQjIRtA8K7nsGpgQNe44BRg,460
|
|
7
|
-
hanzo/memory_manager.py,sha256=
|
|
9
|
+
hanzo/memory_manager.py,sha256=eoaMS7f7rb2n9QjwcsCOz4EyyYx-RZ1gJi5Yzwc-aWU,14883
|
|
10
|
+
hanzo/model_registry.py,sha256=q5i9ARhdn2io_5FvDsA8wsvpZ3_KdyaPVYQN__Nsui4,11921
|
|
8
11
|
hanzo/orchestrator_config.py,sha256=JV7DS8aVZwBJ9XzgkQronFwV_A50QyXG3MH_pKwmCB8,11006
|
|
9
|
-
hanzo/rate_limiter.py,sha256=
|
|
12
|
+
hanzo/rate_limiter.py,sha256=jK166q_IAukHMyT-k0BQVVO2JxmkVpnJjTHqR17Q-7A,10858
|
|
10
13
|
hanzo/repl.py,sha256=sW1quuqGkJ_AqgjN2vLNdtWgKDlXIkXiO9Bo1QQI0G4,1089
|
|
11
|
-
hanzo/streaming.py,sha256=
|
|
14
|
+
hanzo/streaming.py,sha256=McLFkJZ0EEcnGKJU0jE3aJj-JO0sfCGS6Q44TcPV1LI,10209
|
|
12
15
|
hanzo/commands/__init__.py,sha256=7rh94TPNhdq4gJBJS0Ayf0fGNChQYCQCJcJPmYYehiQ,182
|
|
13
16
|
hanzo/commands/agent.py,sha256=DXCfuxHfmC90IoIOL6BJyp7h2yNUo-VIxrfl4OMh8CU,3480
|
|
14
17
|
hanzo/commands/auth.py,sha256=JrM-EV4XDHzNDJeGJMjAr69T0Rxez53HEzlNo0jQ8nE,11187
|
|
@@ -18,7 +21,7 @@ hanzo/commands/config.py,sha256=xAzM6n9GhdVIqtn7JrHfLRzj1sshmxCujo7iet2hHqE,7490
|
|
|
18
21
|
hanzo/commands/mcp.py,sha256=u1uEKDY6gUIa7VymEnRzy0ZphdIKYoNwPSeffZaiKnk,7418
|
|
19
22
|
hanzo/commands/miner.py,sha256=_mZT9nQcT2QSSxI0rDDKuSBVdsg_uE_N_j3PXOHoj-Q,11677
|
|
20
23
|
hanzo/commands/network.py,sha256=wJDxGIxJqc6FzQhbAn0Mw-WGCPUeCOsxmdU6GCmOhgM,11408
|
|
21
|
-
hanzo/commands/repl.py,sha256=
|
|
24
|
+
hanzo/commands/repl.py,sha256=Frc7tVTThFz0M_vwyA-m4pmfGDUr0WZGsxdu6g0ylNU,6081
|
|
22
25
|
hanzo/commands/tools.py,sha256=fG27wRweVmaFJowBpmwp5PgkRUtIF8bIlu_hGWr69Ss,10393
|
|
23
26
|
hanzo/interactive/__init__.py,sha256=ENHkGOqu-JYI05lqoOKDczJGl96oq6nM476EPhflAbI,74
|
|
24
27
|
hanzo/interactive/dashboard.py,sha256=XB5H_PMlReriCip-wW9iuUiJQOAtSATFG8EyhhFhItU,3842
|
|
@@ -28,7 +31,7 @@ hanzo/utils/__init__.py,sha256=5RRwKI852vp8smr4xCRgeKfn7dLEnHbdXGfVYTZ5jDQ,69
|
|
|
28
31
|
hanzo/utils/config.py,sha256=FD_LoBpcoF5dgJ7WL4o6LDp2pdOy8kS-dJ6iRO2GcGM,4728
|
|
29
32
|
hanzo/utils/net_check.py,sha256=YFbJ65SzfDYHkHLZe3n51VhId1VI3zhyx8p6BM-l6jE,3017
|
|
30
33
|
hanzo/utils/output.py,sha256=W0j3psF07vJiX4s02gbN4zYWfbKNsb8TSIoagBSf5vA,2704
|
|
31
|
-
hanzo-0.3.
|
|
32
|
-
hanzo-0.3.
|
|
33
|
-
hanzo-0.3.
|
|
34
|
-
hanzo-0.3.
|
|
34
|
+
hanzo-0.3.23.dist-info/METADATA,sha256=eGqS7GX-lV1HYIJpSPq7KdKkOEph9tAYyE57JJfQWgM,4279
|
|
35
|
+
hanzo-0.3.23.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
36
|
+
hanzo-0.3.23.dist-info/entry_points.txt,sha256=pQLPMdqOXU_2BfTcMDhkqTCDNk_H6ApvYuSaWcuQOOw,171
|
|
37
|
+
hanzo-0.3.23.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|