agentviz 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,272 @@
1
+ Metadata-Version: 2.4
2
+ Name: agentviz
3
+ Version: 0.3.0
4
+ Summary: Real-time 3D visualization for multi-agent AI systems
5
+ License: MIT
6
+ Project-URL: Homepage, https://github.com/tonystark3110/AGENTVIZ
7
+ Project-URL: Repository, https://github.com/tonystark3110/AGENTVIZ
8
+ Project-URL: Issues, https://github.com/tonystark3110/AGENTVIZ/issues
9
+ Project-URL: Change Log, https://github.com/tonystark3110/AGENTVIZ/releases
10
+ Keywords: ai-agents,multi-agent,visualization,mcp,llm,observability,tracing,opentelemetry
11
+ Classifier: Development Status :: 3 - Alpha
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.10
16
+ Classifier: Programming Language :: Python :: 3.11
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Classifier: Topic :: Software Development :: Libraries
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Requires-Python: >=3.10
21
+ Description-Content-Type: text/markdown
22
+ Requires-Dist: fastapi>=0.110.0
23
+ Requires-Dist: uvicorn[standard]>=0.29.0
24
+ Requires-Dist: websockets>=12.0
25
+ Requires-Dist: httpx>=0.27.0
26
+ Provides-Extra: langchain
27
+ Requires-Dist: langchain-core>=0.1.0; extra == "langchain"
28
+ Provides-Extra: langgraph
29
+ Requires-Dist: langgraph>=0.1.0; extra == "langgraph"
30
+ Provides-Extra: crewai
31
+ Requires-Dist: crewai>=0.28.0; extra == "crewai"
32
+ Provides-Extra: openai-agents
33
+ Requires-Dist: openai-agents>=0.0.3; extra == "openai-agents"
34
+ Provides-Extra: otel
35
+ Requires-Dist: opentelemetry-sdk>=1.20.0; extra == "otel"
36
+ Provides-Extra: mcp
37
+ Provides-Extra: all
38
+ Requires-Dist: langchain-core>=0.1.0; extra == "all"
39
+ Requires-Dist: langgraph>=0.1.0; extra == "all"
40
+ Requires-Dist: crewai>=0.28.0; extra == "all"
41
+ Requires-Dist: openai-agents>=0.0.3; extra == "all"
42
+ Requires-Dist: opentelemetry-sdk>=1.20.0; extra == "all"
43
+
44
+ # agentviz
45
+
46
+ Real-time 3D visualization for multi-agent AI systems. Drop one decorator on your agent functions and watch them appear as robots in a live 3D scene — calls, responses, token streams, errors, and latency all rendered as they happen.
47
+
48
+ ```
49
+ pip install agentviz
50
+ agentviz serve
51
+ ```
52
+
53
+ ---
54
+
55
+ ## How it works
56
+
57
+ 1. You run `agentviz serve` — starts a local FastAPI server + opens the 3D UI in your browser
58
+ 2. You decorate your agent functions with `@agentviz.trace`
59
+ 3. Every call, response, error, and token stream is sent to the server and rendered live
60
+
61
+ The server can be self-hosted anywhere (Linode, Railway, Docker). Multiple agents on different machines can all connect to the same room.
62
+
63
+ ---
64
+
65
+ ## Quick start
66
+
67
+ ### Simplest — one decorator
68
+
69
+ ```python
70
+ import agentviz
71
+
72
+ agentviz.init(server="http://localhost:8000")
73
+
74
+ @agentviz.trace
75
+ async def fetch_data(query: str) -> str:
76
+ return await db.query(query)
77
+
78
+ @agentviz.trace(name="Planner", to="orchestrator", color="#9B59B6")
79
+ async def plan(goal: str) -> str:
80
+ return await llm.plan(goal)
81
+ ```
82
+
83
+ That's it. Run your agents — they show up in the UI automatically.
84
+
85
+ ### Environment variables (zero code changes)
86
+
87
+ ```bash
88
+ export AGENTVIZ_SERVER=http://localhost:8000
89
+ export AGENTVIZ_PROJECT=my-team
90
+ ```
91
+
92
+ Then just use `@agentviz.trace` with no `init()` call.
93
+
94
+ ### WebSocket SDK (full control)
95
+
96
+ ```python
97
+ from agentviz import AgentVizClient
98
+
99
+ async with AgentVizClient(
100
+ server="ws://localhost:8000/agent-ws",
101
+ name="DataFetcher",
102
+ color="#E74C3C",
103
+ ) as client:
104
+ call_id = await client.emit_call(to="orchestrator", message="Fetching records…")
105
+ result = await do_work()
106
+ await client.emit_response(to="orchestrator", call_id=call_id, result=result)
107
+ ```
108
+
109
+ ### HTTP client (serverless / AWS Lambda / Cloud Run)
110
+
111
+ ```python
112
+ from agentviz import HttpAgentVizClient
113
+
114
+ client = HttpAgentVizClient(server="https://my-agentviz.railway.app", name="Lambda")
115
+
116
+ call_id = client.emit_call(to="orchestrator", message="Processing event…")
117
+ result = process(event)
118
+ client.emit_response(to="orchestrator", call_id=call_id, result=result)
119
+ ```
120
+
121
+ ### Token streaming
122
+
123
+ ```python
124
+ async for chunk in llm.stream(prompt):
125
+ await client.emit_token(chunk.delta)
126
+ await client.emit_stream_end()
127
+ ```
128
+
129
+ Tokens accumulate in a speech bubble above the robot in real-time.
130
+
131
+ ---
132
+
133
+ ## Features
134
+
135
+ - **`@agentviz.trace`** — works on any `async` or `sync` function, no boilerplate
136
+ - **Trace trees** — nested calls automatically build a parent→child hierarchy (via `contextvars`)
137
+ - **Token streaming** — live speech bubble above each robot as the LLM generates
138
+ - **Error visualization** — red glow + shake animation on agent errors
139
+ - **Latency labels** — floating ms labels between agents, color-coded by speed
140
+ - **Dynamic agents** — robots spawn and despawn as agents connect and disconnect
141
+ - **5 layouts** — `semicircle`, `pipeline`, `star`, `mesh`, `grid` — switch live from the UI
142
+ - **Room isolation** — `?room=project-name` separates teams on the same server
143
+ - **Session recording** — SQLite-backed, replay any past session from the UI
144
+ - **No orchestrator required** — works for peer-to-peer autonomous agent systems
145
+
146
+ ---
147
+
148
+ ## Integrations
149
+
150
+ ### LangChain
151
+
152
+ ```python
153
+ from agentviz.integrations.langchain import AgentVizCallbackHandler
154
+ from agentviz import HttpAgentVizClient
155
+
156
+ client = HttpAgentVizClient(server="http://localhost:8000", name="LangChainAgent")
157
+ handler = AgentVizCallbackHandler(client)
158
+
159
+ chain.invoke({"input": "..."}, config={"callbacks": [handler]})
160
+ ```
161
+
162
+ ### LangGraph
163
+
164
+ ```python
165
+ from agentviz.integrations.langgraph import get_langgraph_callbacks
166
+
167
+ callbacks = get_langgraph_callbacks(client)
168
+ graph.invoke(state, config={"callbacks": callbacks})
169
+ ```
170
+
171
+ ### OpenAI Agents SDK
172
+
173
+ ```python
174
+ from agentviz.integrations.openai_agents import patch_openai_agents
175
+ import agentviz
176
+
177
+ agentviz.init(server="http://localhost:8000")
178
+ patch_openai_agents() # patches globally — all agents auto-traced from here
179
+ ```
180
+
181
+ ### OpenTelemetry
182
+
183
+ ```python
184
+ from opentelemetry.sdk.trace import TracerProvider
185
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
186
+ from agentviz.integrations.otel import AgentVizSpanExporter
187
+
188
+ provider = TracerProvider()
189
+ provider.add_span_processor(BatchSpanProcessor(AgentVizSpanExporter()))
190
+ ```
191
+
192
+ Every OTEL span becomes a call/response/error event in the 3D scene automatically.
193
+
194
+ ### MCP server (Claude Desktop / Cursor)
195
+
196
+ Add to your MCP host config:
197
+
198
+ ```json
199
+ {
200
+ "mcpServers": {
201
+ "agentviz": {
202
+ "command": "python",
203
+ "args": ["-m", "agentviz.mcp_server"],
204
+ "env": {
205
+ "AGENTVIZ_SERVER": "http://localhost:8000"
206
+ }
207
+ }
208
+ }
209
+ }
210
+ ```
211
+
212
+ Then use `agentviz_emit_call`, `agentviz_emit_response`, `agentviz_emit_token` etc. as tools from within Claude.
213
+
214
+ ---
215
+
216
+ ## Self-hosting
217
+
218
+ ### Docker
219
+
220
+ ```bash
221
+ docker build -t agentviz .
222
+ docker run -p 8000:8000 agentviz
223
+ ```
224
+
225
+ ### docker-compose
226
+
227
+ ```bash
228
+ docker-compose up
229
+ ```
230
+
231
+ ### Railway / Render / Fly.io
232
+
233
+ Push the repo and set the start command to:
234
+
235
+ ```
236
+ agentviz serve --host 0.0.0.0 --port $PORT --no-browser
237
+ ```
238
+
239
+ ---
240
+
241
+ ## CLI
242
+
243
+ ```
244
+ agentviz serve # start server, open browser
245
+ agentviz serve --port 9000 # custom port
246
+ agentviz serve --no-browser # headless (for servers)
247
+ agentviz serve --demo # also start demo agents
248
+ agentviz --version
249
+ ```
250
+
251
+ Or:
252
+
253
+ ```
254
+ python -m agentviz serve
255
+ ```
256
+
257
+ ---
258
+
259
+ ## Multi-room / multi-team
260
+
261
+ Each URL `?room=<name>` gets its own isolated scene, agent registry, and session history. Share a single deployed server across multiple teams:
262
+
263
+ ```
264
+ https://agentviz.mycompany.com/?room=search-team
265
+ https://agentviz.mycompany.com/?room=billing-team
266
+ ```
267
+
268
+ ---
269
+
270
+ ## License
271
+
272
+ MIT
@@ -0,0 +1,229 @@
1
+ # agentviz
2
+
3
+ Real-time 3D visualization for multi-agent AI systems. Drop one decorator on your agent functions and watch them appear as robots in a live 3D scene — calls, responses, token streams, errors, and latency all rendered as they happen.
4
+
5
+ ```
6
+ pip install agentviz
7
+ agentviz serve
8
+ ```
9
+
10
+ ---
11
+
12
+ ## How it works
13
+
14
+ 1. You run `agentviz serve` — starts a local FastAPI server + opens the 3D UI in your browser
15
+ 2. You decorate your agent functions with `@agentviz.trace`
16
+ 3. Every call, response, error, and token stream is sent to the server and rendered live
17
+
18
+ The server can be self-hosted anywhere (Linode, Railway, Docker). Multiple agents on different machines can all connect to the same room.
19
+
20
+ ---
21
+
22
+ ## Quick start
23
+
24
+ ### Simplest — one decorator
25
+
26
+ ```python
27
+ import agentviz
28
+
29
+ agentviz.init(server="http://localhost:8000")
30
+
31
+ @agentviz.trace
32
+ async def fetch_data(query: str) -> str:
33
+ return await db.query(query)
34
+
35
+ @agentviz.trace(name="Planner", to="orchestrator", color="#9B59B6")
36
+ async def plan(goal: str) -> str:
37
+ return await llm.plan(goal)
38
+ ```
39
+
40
+ That's it. Run your agents — they show up in the UI automatically.
41
+
42
+ ### Environment variables (zero code changes)
43
+
44
+ ```bash
45
+ export AGENTVIZ_SERVER=http://localhost:8000
46
+ export AGENTVIZ_PROJECT=my-team
47
+ ```
48
+
49
+ Then just use `@agentviz.trace` with no `init()` call.
50
+
51
+ ### WebSocket SDK (full control)
52
+
53
+ ```python
54
+ from agentviz import AgentVizClient
55
+
56
+ async with AgentVizClient(
57
+ server="ws://localhost:8000/agent-ws",
58
+ name="DataFetcher",
59
+ color="#E74C3C",
60
+ ) as client:
61
+ call_id = await client.emit_call(to="orchestrator", message="Fetching records…")
62
+ result = await do_work()
63
+ await client.emit_response(to="orchestrator", call_id=call_id, result=result)
64
+ ```
65
+
66
+ ### HTTP client (serverless / AWS Lambda / Cloud Run)
67
+
68
+ ```python
69
+ from agentviz import HttpAgentVizClient
70
+
71
+ client = HttpAgentVizClient(server="https://my-agentviz.railway.app", name="Lambda")
72
+
73
+ call_id = client.emit_call(to="orchestrator", message="Processing event…")
74
+ result = process(event)
75
+ client.emit_response(to="orchestrator", call_id=call_id, result=result)
76
+ ```
77
+
78
+ ### Token streaming
79
+
80
+ ```python
81
+ async for chunk in llm.stream(prompt):
82
+ await client.emit_token(chunk.delta)
83
+ await client.emit_stream_end()
84
+ ```
85
+
86
+ Tokens accumulate in a speech bubble above the robot in real-time.
87
+
88
+ ---
89
+
90
+ ## Features
91
+
92
+ - **`@agentviz.trace`** — works on any `async` or `sync` function, no boilerplate
93
+ - **Trace trees** — nested calls automatically build a parent→child hierarchy (via `contextvars`)
94
+ - **Token streaming** — live speech bubble above each robot as the LLM generates
95
+ - **Error visualization** — red glow + shake animation on agent errors
96
+ - **Latency labels** — floating ms labels between agents, color-coded by speed
97
+ - **Dynamic agents** — robots spawn and despawn as agents connect and disconnect
98
+ - **5 layouts** — `semicircle`, `pipeline`, `star`, `mesh`, `grid` — switch live from the UI
99
+ - **Room isolation** — `?room=project-name` separates teams on the same server
100
+ - **Session recording** — SQLite-backed, replay any past session from the UI
101
+ - **No orchestrator required** — works for peer-to-peer autonomous agent systems
102
+
103
+ ---
104
+
105
+ ## Integrations
106
+
107
+ ### LangChain
108
+
109
+ ```python
110
+ from agentviz.integrations.langchain import AgentVizCallbackHandler
111
+ from agentviz import HttpAgentVizClient
112
+
113
+ client = HttpAgentVizClient(server="http://localhost:8000", name="LangChainAgent")
114
+ handler = AgentVizCallbackHandler(client)
115
+
116
+ chain.invoke({"input": "..."}, config={"callbacks": [handler]})
117
+ ```
118
+
119
+ ### LangGraph
120
+
121
+ ```python
122
+ from agentviz.integrations.langgraph import get_langgraph_callbacks
123
+
124
+ callbacks = get_langgraph_callbacks(client)
125
+ graph.invoke(state, config={"callbacks": callbacks})
126
+ ```
127
+
128
+ ### OpenAI Agents SDK
129
+
130
+ ```python
131
+ from agentviz.integrations.openai_agents import patch_openai_agents
132
+ import agentviz
133
+
134
+ agentviz.init(server="http://localhost:8000")
135
+ patch_openai_agents() # patches globally — all agents auto-traced from here
136
+ ```
137
+
138
+ ### OpenTelemetry
139
+
140
+ ```python
141
+ from opentelemetry.sdk.trace import TracerProvider
142
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
143
+ from agentviz.integrations.otel import AgentVizSpanExporter
144
+
145
+ provider = TracerProvider()
146
+ provider.add_span_processor(BatchSpanProcessor(AgentVizSpanExporter()))
147
+ ```
148
+
149
+ Every OTEL span becomes a call/response/error event in the 3D scene automatically.
150
+
151
+ ### MCP server (Claude Desktop / Cursor)
152
+
153
+ Add to your MCP host config:
154
+
155
+ ```json
156
+ {
157
+ "mcpServers": {
158
+ "agentviz": {
159
+ "command": "python",
160
+ "args": ["-m", "agentviz.mcp_server"],
161
+ "env": {
162
+ "AGENTVIZ_SERVER": "http://localhost:8000"
163
+ }
164
+ }
165
+ }
166
+ }
167
+ ```
168
+
169
+ Then use `agentviz_emit_call`, `agentviz_emit_response`, `agentviz_emit_token` etc. as tools from within Claude.
170
+
171
+ ---
172
+
173
+ ## Self-hosting
174
+
175
+ ### Docker
176
+
177
+ ```bash
178
+ docker build -t agentviz .
179
+ docker run -p 8000:8000 agentviz
180
+ ```
181
+
182
+ ### docker-compose
183
+
184
+ ```bash
185
+ docker-compose up
186
+ ```
187
+
188
+ ### Railway / Render / Fly.io
189
+
190
+ Push the repo and set the start command to:
191
+
192
+ ```
193
+ agentviz serve --host 0.0.0.0 --port $PORT --no-browser
194
+ ```
195
+
196
+ ---
197
+
198
+ ## CLI
199
+
200
+ ```
201
+ agentviz serve # start server, open browser
202
+ agentviz serve --port 9000 # custom port
203
+ agentviz serve --no-browser # headless (for servers)
204
+ agentviz serve --demo # also start demo agents
205
+ agentviz --version
206
+ ```
207
+
208
+ Or:
209
+
210
+ ```
211
+ python -m agentviz serve
212
+ ```
213
+
214
+ ---
215
+
216
+ ## Multi-room / multi-team
217
+
218
+ Each URL `?room=<name>` gets its own isolated scene, agent registry, and session history. Share a single deployed server across multiple teams:
219
+
220
+ ```
221
+ https://agentviz.mycompany.com/?room=search-team
222
+ https://agentviz.mycompany.com/?room=billing-team
223
+ ```
224
+
225
+ ---
226
+
227
+ ## License
228
+
229
+ MIT
@@ -0,0 +1,53 @@
1
+ """
2
+ AgentViz — Real-time 3D visualization for multi-agent AI systems.
3
+
4
+ LangChain-style quick start
5
+ ----------------------------
6
+ import agentviz
7
+
8
+ # Option A: env vars (no code changes needed)
9
+ # export AGENTVIZ_SERVER=https://agentviz.yourdomain.com
10
+ # export AGENTVIZ_PROJECT=my-team
11
+
12
+ # Option B: one-line programmatic config
13
+ agentviz.init(server="https://agentviz.yourdomain.com", project="my-team")
14
+
15
+ # Decorate any function — async or sync
16
+ @agentviz.trace
17
+ async def fetch_data(query: str) -> str:
18
+ return await db.query(query)
19
+
20
+ @agentviz.trace(name="Planner", to="orchestrator", color="#9B59B6")
21
+ async def plan(goal: str) -> str:
22
+ return await llm.plan(goal)
23
+
24
+ # Auto-patch every installed framework at once
25
+ agentviz.instrument_all()
26
+
27
+ Manual SDK (full control)
28
+ --------------------------
29
+ from agentviz import AgentVizClient # WebSocket — local / same-network
30
+ from agentviz import HttpAgentVizClient # HTTP — cloud / serverless
31
+ """
32
+
33
+ # ── LangChain-style top-level API ─────────────────────────────────────────────
34
+ from .config import init, configure
35
+ from .tracing import trace, Agent, instrument_all
36
+
37
+ # ── Low-level SDK clients ─────────────────────────────────────────────────────
38
+ from .sdk import AgentVizClient
39
+ from .http_client import HttpAgentVizClient
40
+
41
+ __all__ = [
42
+ # LangChain-style
43
+ "init",
44
+ "configure",
45
+ "trace",
46
+ "Agent",
47
+ "instrument_all",
48
+ # SDK clients
49
+ "AgentVizClient",
50
+ "HttpAgentVizClient",
51
+ ]
52
+
53
+ __version__ = "0.3.0"
@@ -0,0 +1,4 @@
1
+ # Allows: python -m agentviz serve
2
+ from agentviz.cli import cli
3
+
4
+ cli()