agensights 0.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agensights-0.3.0/.gitignore +11 -0
- agensights-0.3.0/LICENSE +21 -0
- agensights-0.3.0/PKG-INFO +312 -0
- agensights-0.3.0/README.md +279 -0
- agensights-0.3.0/agensights/__init__.py +45 -0
- agensights-0.3.0/agensights/client.py +215 -0
- agensights-0.3.0/agensights/instrument.py +371 -0
- agensights-0.3.0/agensights/integrations.py +458 -0
- agensights-0.3.0/agensights/models.py +46 -0
- agensights-0.3.0/agensights/patch.py +645 -0
- agensights-0.3.0/agensights/trace.py +283 -0
- agensights-0.3.0/pyproject.toml +53 -0
- agensights-0.3.0/tests/__init__.py +0 -0
- agensights-0.3.0/tests/test_client.py +485 -0
- agensights-0.3.0/tests/test_instrument.py +497 -0
agensights-0.3.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 AgenSights
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,312 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: agensights
|
|
3
|
+
Version: 0.3.0
|
|
4
|
+
Summary: AgenSights SDK - AI Agent Observability. Zero-friction tracking for LLM calls, agents, and tools.
|
|
5
|
+
Project-URL: Homepage, https://github.com/agensights/agensights-python
|
|
6
|
+
Project-URL: Repository, https://github.com/agensights/agensights-python
|
|
7
|
+
Project-URL: Documentation, https://docs.agensights.dev
|
|
8
|
+
Project-URL: Bug Tracker, https://github.com/agensights/agensights-python/issues
|
|
9
|
+
Author-email: AgenSights <support@agensights.dev>
|
|
10
|
+
License: MIT
|
|
11
|
+
License-File: LICENSE
|
|
12
|
+
Keywords: ai-agents,analytics,anthropic,langchain,llm,monitoring,observability,openai,tracing
|
|
13
|
+
Classifier: Development Status :: 4 - Beta
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
22
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
23
|
+
Classifier: Topic :: System :: Monitoring
|
|
24
|
+
Classifier: Typing :: Typed
|
|
25
|
+
Requires-Python: >=3.9
|
|
26
|
+
Requires-Dist: httpx>=0.24.0
|
|
27
|
+
Provides-Extra: dev
|
|
28
|
+
Requires-Dist: pytest-httpx>=0.21.0; extra == 'dev'
|
|
29
|
+
Requires-Dist: pytest>=7.0; extra == 'dev'
|
|
30
|
+
Provides-Extra: langchain
|
|
31
|
+
Requires-Dist: langchain-core>=0.1.0; extra == 'langchain'
|
|
32
|
+
Description-Content-Type: text/markdown
|
|
33
|
+
|
|
34
|
+
# AgenSights Python SDK
|
|
35
|
+
|
|
36
|
+
Python SDK for [AgenSights](https://github.com/agensights/agensights-python) - AI Agent Observability.
|
|
37
|
+
|
|
38
|
+
Track LLM calls, tool invocations, and multi-step agent executions with zero-friction auto-instrumentation or manual tracking.
|
|
39
|
+
|
|
40
|
+
## Installation
|
|
41
|
+
|
|
42
|
+
```bash
|
|
43
|
+
pip install agensights
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
Or install from source:
|
|
47
|
+
|
|
48
|
+
```bash
|
|
49
|
+
pip install -e .
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
## Quick Start — Universal Init (Recommended)
|
|
53
|
+
|
|
54
|
+
One line at the top of your app patches every supported LLM provider automatically:
|
|
55
|
+
|
|
56
|
+
```python
|
|
57
|
+
import agensights
|
|
58
|
+
|
|
59
|
+
agensights.init(api_key="sk-dev-xxx")
|
|
60
|
+
|
|
61
|
+
# That's it. Every OpenAI, Anthropic, Bedrock, Google, Mistral,
|
|
62
|
+
# Cohere, and LiteLLM call is now tracked automatically.
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
You can also configure via environment variables (no code changes needed):
|
|
66
|
+
|
|
67
|
+
```bash
|
|
68
|
+
export AGENSIGHTS_API_KEY="sk-dev-xxx"
|
|
69
|
+
export AGENSIGHTS_BASE_URL="https://api.agensights.dev/api/v1"
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
```python
|
|
73
|
+
import agensights
|
|
74
|
+
agensights.init() # picks up from env vars
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
## Auto-Instrumentation (Per-Client)
|
|
78
|
+
|
|
79
|
+
Wrap your LLM client once and every call is tracked automatically.
|
|
80
|
+
|
|
81
|
+
### OpenAI
|
|
82
|
+
|
|
83
|
+
```python
|
|
84
|
+
from openai import OpenAI
|
|
85
|
+
from agensights import instrument_openai
|
|
86
|
+
|
|
87
|
+
client = instrument_openai(
|
|
88
|
+
OpenAI(api_key="sk-xxx"),
|
|
89
|
+
agensights_api_key="sk-dev-xxx",
|
|
90
|
+
agent_name="my-assistant",
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
# Every call is now automatically tracked
|
|
94
|
+
response = client.chat.completions.create(
|
|
95
|
+
model="gpt-4o",
|
|
96
|
+
messages=[{"role": "user", "content": "Hello!"}],
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# Embeddings are tracked too
|
|
100
|
+
embeddings = client.embeddings.create(
|
|
101
|
+
model="text-embedding-3-small",
|
|
102
|
+
input="Hello world",
|
|
103
|
+
)
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
### Anthropic
|
|
107
|
+
|
|
108
|
+
```python
|
|
109
|
+
import anthropic
|
|
110
|
+
from agensights import instrument_anthropic
|
|
111
|
+
|
|
112
|
+
client = instrument_anthropic(
|
|
113
|
+
anthropic.Anthropic(api_key="sk-ant-xxx"),
|
|
114
|
+
agensights_api_key="sk-dev-xxx",
|
|
115
|
+
agent_name="claude-agent",
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
# Automatically tracked
|
|
119
|
+
message = client.messages.create(
|
|
120
|
+
model="claude-3-5-sonnet-20241022",
|
|
121
|
+
max_tokens=1024,
|
|
122
|
+
messages=[{"role": "user", "content": "Hello!"}],
|
|
123
|
+
)
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
### LangChain
|
|
127
|
+
|
|
128
|
+
```python
|
|
129
|
+
from langchain_openai import ChatOpenAI
|
|
130
|
+
from agensights.integrations import LangChainCallbackHandler
|
|
131
|
+
|
|
132
|
+
handler = LangChainCallbackHandler(
|
|
133
|
+
api_key="sk-dev-xxx",
|
|
134
|
+
agent_name="langchain-agent",
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
llm = ChatOpenAI(model="gpt-4o", callbacks=[handler])
|
|
138
|
+
|
|
139
|
+
# All LLM and tool calls are tracked via callbacks
|
|
140
|
+
response = llm.invoke("Hello!")
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
## Agent Hierarchy Tracking
|
|
144
|
+
|
|
145
|
+
Track multi-agent workflows with automatic parent-child relationships:
|
|
146
|
+
|
|
147
|
+
```python
|
|
148
|
+
from agensights import instrument_openai
|
|
149
|
+
from openai import OpenAI
|
|
150
|
+
|
|
151
|
+
client = instrument_openai(OpenAI(), agensights_api_key="sk-dev-xxx")
|
|
152
|
+
|
|
153
|
+
with client.trace("find_laptop") as trace:
|
|
154
|
+
with trace.agent("planner") as planner:
|
|
155
|
+
with planner.agent("researcher") as researcher:
|
|
156
|
+
with researcher.tool("web_search"):
|
|
157
|
+
results = do_search("laptops") # latency auto-measured
|
|
158
|
+
# LLM call auto-captured under researcher agent
|
|
159
|
+
summary = client.chat.completions.create(
|
|
160
|
+
model="gpt-4o",
|
|
161
|
+
messages=[{"role": "user", "content": f"Summarize: {results}"}],
|
|
162
|
+
)
|
|
163
|
+
with planner.agent("writer") as writer:
|
|
164
|
+
result = client.chat.completions.create(
|
|
165
|
+
model="gpt-4o",
|
|
166
|
+
messages=[{"role": "user", "content": "Write recommendation"}],
|
|
167
|
+
)
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
This produces a full trace tree in the dashboard with parent-child spans linked automatically.
|
|
171
|
+
|
|
172
|
+
## Manual Tracking
|
|
173
|
+
|
|
174
|
+
For full control, use the `AgenSights` client directly.
|
|
175
|
+
|
|
176
|
+
### Single Calls
|
|
177
|
+
|
|
178
|
+
```python
|
|
179
|
+
from agensights import AgenSights
|
|
180
|
+
|
|
181
|
+
client = AgenSights(api_key="sk-prod-xxx")
|
|
182
|
+
|
|
183
|
+
# Track a single LLM call
|
|
184
|
+
client.track_llm(model="gpt-4o", input_tokens=100, output_tokens=50, latency_ms=300)
|
|
185
|
+
|
|
186
|
+
# Track a tool call
|
|
187
|
+
client.track_tool(tool_name="web_search", latency_ms=150)
|
|
188
|
+
|
|
189
|
+
# Always close when done
|
|
190
|
+
client.close()
|
|
191
|
+
```
|
|
192
|
+
|
|
193
|
+
### Tracing Multi-Step Executions
|
|
194
|
+
|
|
195
|
+
Use `client.trace()` to group related calls under a single trace:
|
|
196
|
+
|
|
197
|
+
```python
|
|
198
|
+
from agensights import AgenSights
|
|
199
|
+
|
|
200
|
+
client = AgenSights(api_key="sk-prod-xxx")
|
|
201
|
+
|
|
202
|
+
with client.trace("support_agent", workflow_id="ticket-456") as t:
|
|
203
|
+
# Track an LLM call
|
|
204
|
+
t.llm_call(model="gpt-4o", input_tokens=100, output_tokens=50, latency_ms=300)
|
|
205
|
+
|
|
206
|
+
# Track a tool call
|
|
207
|
+
t.tool_call(tool_name="web_search", latency_ms=150)
|
|
208
|
+
|
|
209
|
+
# Use spans for automatic duration tracking
|
|
210
|
+
with t.span("data_processing") as s:
|
|
211
|
+
# ... your code here ...
|
|
212
|
+
pass # duration is recorded automatically
|
|
213
|
+
|
|
214
|
+
client.close()
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
### Nested Agent Spans
|
|
218
|
+
|
|
219
|
+
```python
|
|
220
|
+
with client.trace("orchestrator") as t:
|
|
221
|
+
planner = t.agent("planner")
|
|
222
|
+
researcher = planner.agent("researcher") # sub-agent
|
|
223
|
+
researcher.tool(name="search_api", latency_ms=150)
|
|
224
|
+
researcher.llm_call(model="gpt-4o", input_tokens=100, output_tokens=50, latency_ms=300)
|
|
225
|
+
|
|
226
|
+
writer = planner.agent("writer")
|
|
227
|
+
writer.llm_call(model="claude-3-5-sonnet", input_tokens=200, output_tokens=100, latency_ms=400)
|
|
228
|
+
```
|
|
229
|
+
|
|
230
|
+
### Using the Client as a Context Manager
|
|
231
|
+
|
|
232
|
+
```python
|
|
233
|
+
with AgenSights(api_key="sk-prod-xxx") as client:
|
|
234
|
+
client.track_llm(model="gpt-4o", input_tokens=100, output_tokens=50, latency_ms=300)
|
|
235
|
+
# Client is automatically closed and flushed
|
|
236
|
+
```
|
|
237
|
+
|
|
238
|
+
## Configuration
|
|
239
|
+
|
|
240
|
+
### Environment Variables
|
|
241
|
+
|
|
242
|
+
| Variable | Description |
|
|
243
|
+
|----------|-------------|
|
|
244
|
+
| `AGENSIGHTS_API_KEY` | Your AgenSights API key (used when `api_key` is not passed) |
|
|
245
|
+
| `AGENSIGHTS_BASE_URL` | Backend API base URL (default: `https://api.agensights.com/api/v1`) |
|
|
246
|
+
|
|
247
|
+
### Client Parameters
|
|
248
|
+
|
|
249
|
+
| Parameter | Default | Description |
|
|
250
|
+
|-----------|---------|-------------|
|
|
251
|
+
| `api_key` | `AGENSIGHTS_API_KEY` env var | Your AgenSights API key |
|
|
252
|
+
| `base_url` | `AGENSIGHTS_BASE_URL` env var | Backend API base URL |
|
|
253
|
+
|
|
254
|
+
### Auto-Instrumentation Parameters
|
|
255
|
+
|
|
256
|
+
| Parameter | Default | Description |
|
|
257
|
+
|-----------|---------|-------------|
|
|
258
|
+
| `agensights_api_key` | `None` | API key (or pass `agensights_client` instead) |
|
|
259
|
+
| `agensights_client` | `None` | Pre-configured `AgenSights` instance |
|
|
260
|
+
| `agent_name` | `None` | Name to tag all events with |
|
|
261
|
+
| `base_url` | `None` | Override backend URL (falls back to env var) |
|
|
262
|
+
|
|
263
|
+
## Error Tracking
|
|
264
|
+
|
|
265
|
+
Errors are automatically captured during auto-instrumentation. For manual tracking:
|
|
266
|
+
|
|
267
|
+
```python
|
|
268
|
+
client.track_llm(
|
|
269
|
+
model="gpt-4o",
|
|
270
|
+
input_tokens=100,
|
|
271
|
+
output_tokens=0,
|
|
272
|
+
latency_ms=500,
|
|
273
|
+
status="error",
|
|
274
|
+
error_code="rate_limit",
|
|
275
|
+
)
|
|
276
|
+
```
|
|
277
|
+
|
|
278
|
+
## How It Works
|
|
279
|
+
|
|
280
|
+
- **Universal init** (`agensights.init()`) patches all supported LLM providers at the module level.
|
|
281
|
+
- **Auto-instrumentation** wraps LLM client methods (e.g., `chat.completions.create`) to capture model, tokens, latency, and errors transparently.
|
|
282
|
+
- Events are buffered locally and sent in batches to the AgenSights backend.
|
|
283
|
+
- The buffer flushes automatically every 5 seconds or when 100 events are accumulated.
|
|
284
|
+
- Call `client.flush()` to force an immediate send.
|
|
285
|
+
- Call `client.close()` to flush and release resources.
|
|
286
|
+
|
|
287
|
+
## Supported Providers
|
|
288
|
+
|
|
289
|
+
| Provider | `agensights.init()` | `instrument_*()` |
|
|
290
|
+
|----------|---------------------|-------------------|
|
|
291
|
+
| OpenAI | Auto-patched | `instrument_openai()` |
|
|
292
|
+
| Anthropic | Auto-patched | `instrument_anthropic()` |
|
|
293
|
+
| AWS Bedrock | Auto-patched | via `init()` |
|
|
294
|
+
| Google Gemini | Auto-patched | via `init()` |
|
|
295
|
+
| Mistral AI | Auto-patched | via `init()` |
|
|
296
|
+
| Cohere | Auto-patched | via `init()` |
|
|
297
|
+
| LiteLLM | Auto-patched | via `init()` |
|
|
298
|
+
| LangChain | — | `LangChainCallbackHandler` |
|
|
299
|
+
| CrewAI | — | `CrewAITracker` |
|
|
300
|
+
| AutoGen | — | `AutoGenTracker` |
|
|
301
|
+
| Google ADK | — | `GoogleADKTracker` |
|
|
302
|
+
|
|
303
|
+
## Development
|
|
304
|
+
|
|
305
|
+
```bash
|
|
306
|
+
pip install -e ".[dev]"
|
|
307
|
+
pytest
|
|
308
|
+
```
|
|
309
|
+
|
|
310
|
+
## License
|
|
311
|
+
|
|
312
|
+
MIT - see [LICENSE](LICENSE) for details.
|
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
# AgenSights Python SDK
|
|
2
|
+
|
|
3
|
+
Python SDK for [AgenSights](https://github.com/agensights/agensights-python) - AI Agent Observability.
|
|
4
|
+
|
|
5
|
+
Track LLM calls, tool invocations, and multi-step agent executions with zero-friction auto-instrumentation or manual tracking.
|
|
6
|
+
|
|
7
|
+
## Installation
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
pip install agensights
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
Or install from source:
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
pip install -e .
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
## Quick Start — Universal Init (Recommended)
|
|
20
|
+
|
|
21
|
+
One line at the top of your app patches every supported LLM provider automatically:
|
|
22
|
+
|
|
23
|
+
```python
|
|
24
|
+
import agensights
|
|
25
|
+
|
|
26
|
+
agensights.init(api_key="sk-dev-xxx")
|
|
27
|
+
|
|
28
|
+
# That's it. Every OpenAI, Anthropic, Bedrock, Google, Mistral,
|
|
29
|
+
# Cohere, and LiteLLM call is now tracked automatically.
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
You can also configure via environment variables (no code changes needed):
|
|
33
|
+
|
|
34
|
+
```bash
|
|
35
|
+
export AGENSIGHTS_API_KEY="sk-dev-xxx"
|
|
36
|
+
export AGENSIGHTS_BASE_URL="https://api.agensights.dev/api/v1"
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
```python
|
|
40
|
+
import agensights
|
|
41
|
+
agensights.init() # picks up from env vars
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
## Auto-Instrumentation (Per-Client)
|
|
45
|
+
|
|
46
|
+
Wrap your LLM client once and every call is tracked automatically.
|
|
47
|
+
|
|
48
|
+
### OpenAI
|
|
49
|
+
|
|
50
|
+
```python
|
|
51
|
+
from openai import OpenAI
|
|
52
|
+
from agensights import instrument_openai
|
|
53
|
+
|
|
54
|
+
client = instrument_openai(
|
|
55
|
+
OpenAI(api_key="sk-xxx"),
|
|
56
|
+
agensights_api_key="sk-dev-xxx",
|
|
57
|
+
agent_name="my-assistant",
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
# Every call is now automatically tracked
|
|
61
|
+
response = client.chat.completions.create(
|
|
62
|
+
model="gpt-4o",
|
|
63
|
+
messages=[{"role": "user", "content": "Hello!"}],
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
# Embeddings are tracked too
|
|
67
|
+
embeddings = client.embeddings.create(
|
|
68
|
+
model="text-embedding-3-small",
|
|
69
|
+
input="Hello world",
|
|
70
|
+
)
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
### Anthropic
|
|
74
|
+
|
|
75
|
+
```python
|
|
76
|
+
import anthropic
|
|
77
|
+
from agensights import instrument_anthropic
|
|
78
|
+
|
|
79
|
+
client = instrument_anthropic(
|
|
80
|
+
anthropic.Anthropic(api_key="sk-ant-xxx"),
|
|
81
|
+
agensights_api_key="sk-dev-xxx",
|
|
82
|
+
agent_name="claude-agent",
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
# Automatically tracked
|
|
86
|
+
message = client.messages.create(
|
|
87
|
+
model="claude-3-5-sonnet-20241022",
|
|
88
|
+
max_tokens=1024,
|
|
89
|
+
messages=[{"role": "user", "content": "Hello!"}],
|
|
90
|
+
)
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
### LangChain
|
|
94
|
+
|
|
95
|
+
```python
|
|
96
|
+
from langchain_openai import ChatOpenAI
|
|
97
|
+
from agensights.integrations import LangChainCallbackHandler
|
|
98
|
+
|
|
99
|
+
handler = LangChainCallbackHandler(
|
|
100
|
+
api_key="sk-dev-xxx",
|
|
101
|
+
agent_name="langchain-agent",
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
llm = ChatOpenAI(model="gpt-4o", callbacks=[handler])
|
|
105
|
+
|
|
106
|
+
# All LLM and tool calls are tracked via callbacks
|
|
107
|
+
response = llm.invoke("Hello!")
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
## Agent Hierarchy Tracking
|
|
111
|
+
|
|
112
|
+
Track multi-agent workflows with automatic parent-child relationships:
|
|
113
|
+
|
|
114
|
+
```python
|
|
115
|
+
from agensights import instrument_openai
|
|
116
|
+
from openai import OpenAI
|
|
117
|
+
|
|
118
|
+
client = instrument_openai(OpenAI(), agensights_api_key="sk-dev-xxx")
|
|
119
|
+
|
|
120
|
+
with client.trace("find_laptop") as trace:
|
|
121
|
+
with trace.agent("planner") as planner:
|
|
122
|
+
with planner.agent("researcher") as researcher:
|
|
123
|
+
with researcher.tool("web_search"):
|
|
124
|
+
results = do_search("laptops") # latency auto-measured
|
|
125
|
+
# LLM call auto-captured under researcher agent
|
|
126
|
+
summary = client.chat.completions.create(
|
|
127
|
+
model="gpt-4o",
|
|
128
|
+
messages=[{"role": "user", "content": f"Summarize: {results}"}],
|
|
129
|
+
)
|
|
130
|
+
with planner.agent("writer") as writer:
|
|
131
|
+
result = client.chat.completions.create(
|
|
132
|
+
model="gpt-4o",
|
|
133
|
+
messages=[{"role": "user", "content": "Write recommendation"}],
|
|
134
|
+
)
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
This produces a full trace tree in the dashboard with parent-child spans linked automatically.
|
|
138
|
+
|
|
139
|
+
## Manual Tracking
|
|
140
|
+
|
|
141
|
+
For full control, use the `AgenSights` client directly.
|
|
142
|
+
|
|
143
|
+
### Single Calls
|
|
144
|
+
|
|
145
|
+
```python
|
|
146
|
+
from agensights import AgenSights
|
|
147
|
+
|
|
148
|
+
client = AgenSights(api_key="sk-prod-xxx")
|
|
149
|
+
|
|
150
|
+
# Track a single LLM call
|
|
151
|
+
client.track_llm(model="gpt-4o", input_tokens=100, output_tokens=50, latency_ms=300)
|
|
152
|
+
|
|
153
|
+
# Track a tool call
|
|
154
|
+
client.track_tool(tool_name="web_search", latency_ms=150)
|
|
155
|
+
|
|
156
|
+
# Always close when done
|
|
157
|
+
client.close()
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
### Tracing Multi-Step Executions
|
|
161
|
+
|
|
162
|
+
Use `client.trace()` to group related calls under a single trace:
|
|
163
|
+
|
|
164
|
+
```python
|
|
165
|
+
from agensights import AgenSights
|
|
166
|
+
|
|
167
|
+
client = AgenSights(api_key="sk-prod-xxx")
|
|
168
|
+
|
|
169
|
+
with client.trace("support_agent", workflow_id="ticket-456") as t:
|
|
170
|
+
# Track an LLM call
|
|
171
|
+
t.llm_call(model="gpt-4o", input_tokens=100, output_tokens=50, latency_ms=300)
|
|
172
|
+
|
|
173
|
+
# Track a tool call
|
|
174
|
+
t.tool_call(tool_name="web_search", latency_ms=150)
|
|
175
|
+
|
|
176
|
+
# Use spans for automatic duration tracking
|
|
177
|
+
with t.span("data_processing") as s:
|
|
178
|
+
# ... your code here ...
|
|
179
|
+
pass # duration is recorded automatically
|
|
180
|
+
|
|
181
|
+
client.close()
|
|
182
|
+
```
|
|
183
|
+
|
|
184
|
+
### Nested Agent Spans
|
|
185
|
+
|
|
186
|
+
```python
|
|
187
|
+
with client.trace("orchestrator") as t:
|
|
188
|
+
planner = t.agent("planner")
|
|
189
|
+
researcher = planner.agent("researcher") # sub-agent
|
|
190
|
+
researcher.tool(name="search_api", latency_ms=150)
|
|
191
|
+
researcher.llm_call(model="gpt-4o", input_tokens=100, output_tokens=50, latency_ms=300)
|
|
192
|
+
|
|
193
|
+
writer = planner.agent("writer")
|
|
194
|
+
writer.llm_call(model="claude-3-5-sonnet", input_tokens=200, output_tokens=100, latency_ms=400)
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
### Using the Client as a Context Manager
|
|
198
|
+
|
|
199
|
+
```python
|
|
200
|
+
with AgenSights(api_key="sk-prod-xxx") as client:
|
|
201
|
+
client.track_llm(model="gpt-4o", input_tokens=100, output_tokens=50, latency_ms=300)
|
|
202
|
+
# Client is automatically closed and flushed
|
|
203
|
+
```
|
|
204
|
+
|
|
205
|
+
## Configuration
|
|
206
|
+
|
|
207
|
+
### Environment Variables
|
|
208
|
+
|
|
209
|
+
| Variable | Description |
|
|
210
|
+
|----------|-------------|
|
|
211
|
+
| `AGENSIGHTS_API_KEY` | Your AgenSights API key (used when `api_key` is not passed) |
|
|
212
|
+
| `AGENSIGHTS_BASE_URL` | Backend API base URL (default: `https://api.agensights.com/api/v1`) |
|
|
213
|
+
|
|
214
|
+
### Client Parameters
|
|
215
|
+
|
|
216
|
+
| Parameter | Default | Description |
|
|
217
|
+
|-----------|---------|-------------|
|
|
218
|
+
| `api_key` | `AGENSIGHTS_API_KEY` env var | Your AgenSights API key |
|
|
219
|
+
| `base_url` | `AGENSIGHTS_BASE_URL` env var | Backend API base URL |
|
|
220
|
+
|
|
221
|
+
### Auto-Instrumentation Parameters
|
|
222
|
+
|
|
223
|
+
| Parameter | Default | Description |
|
|
224
|
+
|-----------|---------|-------------|
|
|
225
|
+
| `agensights_api_key` | `None` | API key (or pass `agensights_client` instead) |
|
|
226
|
+
| `agensights_client` | `None` | Pre-configured `AgenSights` instance |
|
|
227
|
+
| `agent_name` | `None` | Name to tag all events with |
|
|
228
|
+
| `base_url` | `None` | Override backend URL (falls back to env var) |
|
|
229
|
+
|
|
230
|
+
## Error Tracking
|
|
231
|
+
|
|
232
|
+
Errors are automatically captured during auto-instrumentation. For manual tracking:
|
|
233
|
+
|
|
234
|
+
```python
|
|
235
|
+
client.track_llm(
|
|
236
|
+
model="gpt-4o",
|
|
237
|
+
input_tokens=100,
|
|
238
|
+
output_tokens=0,
|
|
239
|
+
latency_ms=500,
|
|
240
|
+
status="error",
|
|
241
|
+
error_code="rate_limit",
|
|
242
|
+
)
|
|
243
|
+
```
|
|
244
|
+
|
|
245
|
+
## How It Works
|
|
246
|
+
|
|
247
|
+
- **Universal init** (`agensights.init()`) patches all supported LLM providers at the module level.
|
|
248
|
+
- **Auto-instrumentation** wraps LLM client methods (e.g., `chat.completions.create`) to capture model, tokens, latency, and errors transparently.
|
|
249
|
+
- Events are buffered locally and sent in batches to the AgenSights backend.
|
|
250
|
+
- The buffer flushes automatically every 5 seconds or when 100 events are accumulated.
|
|
251
|
+
- Call `client.flush()` to force an immediate send.
|
|
252
|
+
- Call `client.close()` to flush and release resources.
|
|
253
|
+
|
|
254
|
+
## Supported Providers
|
|
255
|
+
|
|
256
|
+
| Provider | `agensights.init()` | `instrument_*()` |
|
|
257
|
+
|----------|---------------------|-------------------|
|
|
258
|
+
| OpenAI | Auto-patched | `instrument_openai()` |
|
|
259
|
+
| Anthropic | Auto-patched | `instrument_anthropic()` |
|
|
260
|
+
| AWS Bedrock | Auto-patched | via `init()` |
|
|
261
|
+
| Google Gemini | Auto-patched | via `init()` |
|
|
262
|
+
| Mistral AI | Auto-patched | via `init()` |
|
|
263
|
+
| Cohere | Auto-patched | via `init()` |
|
|
264
|
+
| LiteLLM | Auto-patched | via `init()` |
|
|
265
|
+
| LangChain | — | `LangChainCallbackHandler` |
|
|
266
|
+
| CrewAI | — | `CrewAITracker` |
|
|
267
|
+
| AutoGen | — | `AutoGenTracker` |
|
|
268
|
+
| Google ADK | — | `GoogleADKTracker` |
|
|
269
|
+
|
|
270
|
+
## Development
|
|
271
|
+
|
|
272
|
+
```bash
|
|
273
|
+
pip install -e ".[dev]"
|
|
274
|
+
pytest
|
|
275
|
+
```
|
|
276
|
+
|
|
277
|
+
## License
|
|
278
|
+
|
|
279
|
+
MIT - see [LICENSE](LICENSE) for details.
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""AgenSights SDK - AI Agent Observability. Zero friction."""
|
|
2
|
+
|
|
3
|
+
from .client import AgenSights
|
|
4
|
+
from .trace import AgentSpan, Span, Trace
|
|
5
|
+
from .patch import init, shutdown, get_client
|
|
6
|
+
from .instrument import (
|
|
7
|
+
instrument_openai,
|
|
8
|
+
instrument_anthropic,
|
|
9
|
+
AutoTrace,
|
|
10
|
+
AutoAgent,
|
|
11
|
+
get_current_trace,
|
|
12
|
+
get_current_agent,
|
|
13
|
+
)
|
|
14
|
+
from .integrations import (
|
|
15
|
+
LangChainCallbackHandler,
|
|
16
|
+
CrewAITracker,
|
|
17
|
+
AutoGenTracker,
|
|
18
|
+
GoogleADKTracker,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
__all__ = [
|
|
22
|
+
# Universal init (primary API — one line setup)
|
|
23
|
+
"init",
|
|
24
|
+
"shutdown",
|
|
25
|
+
"get_client",
|
|
26
|
+
# Core
|
|
27
|
+
"AgenSights",
|
|
28
|
+
# Instance-level instrumentation (legacy — still works)
|
|
29
|
+
"instrument_openai",
|
|
30
|
+
"instrument_anthropic",
|
|
31
|
+
# Framework integrations
|
|
32
|
+
"LangChainCallbackHandler",
|
|
33
|
+
"CrewAITracker",
|
|
34
|
+
"AutoGenTracker",
|
|
35
|
+
"GoogleADKTracker",
|
|
36
|
+
# Manual tracking (advanced)
|
|
37
|
+
"Trace",
|
|
38
|
+
"Span",
|
|
39
|
+
"AgentSpan",
|
|
40
|
+
"AutoTrace",
|
|
41
|
+
"AutoAgent",
|
|
42
|
+
"get_current_trace",
|
|
43
|
+
"get_current_agent",
|
|
44
|
+
]
|
|
45
|
+
__version__ = "0.3.0"
|