agentbasis 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentbasis/__init__.py +87 -0
- agentbasis/client.py +134 -0
- agentbasis/config.py +33 -0
- agentbasis/context.py +259 -0
- agentbasis/decorators.py +80 -0
- agentbasis/frameworks/langchain/__init__.py +109 -0
- agentbasis/frameworks/langchain/callback.py +373 -0
- agentbasis/frameworks/pydanticai/__init__.py +32 -0
- agentbasis/frameworks/pydanticai/instrumentation.py +233 -0
- agentbasis/llms/anthropic/__init__.py +18 -0
- agentbasis/llms/anthropic/messages.py +298 -0
- agentbasis/llms/gemini/__init__.py +18 -0
- agentbasis/llms/gemini/chat.py +326 -0
- agentbasis/llms/openai/__init__.py +18 -0
- agentbasis/llms/openai/chat.py +235 -0
- agentbasis-0.1.0.dist-info/METADATA +220 -0
- agentbasis-0.1.0.dist-info/RECORD +19 -0
- agentbasis-0.1.0.dist-info/WHEEL +5 -0
- agentbasis-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,373 @@
|
|
|
1
|
+
from typing import Any, Dict, List, Optional, Sequence, Union
|
|
2
|
+
from uuid import UUID
|
|
3
|
+
import json
|
|
4
|
+
|
|
5
|
+
from opentelemetry import trace, context as otel_context
|
|
6
|
+
from opentelemetry.trace import Status, StatusCode, Span
|
|
7
|
+
|
|
8
|
+
from agentbasis.context import inject_context_to_span
|
|
9
|
+
|
|
10
|
+
# Try to import LangChain types. If not available, we create dummy classes
|
|
11
|
+
# so the code doesn't crash on import (though instrument() will check this).
|
|
12
|
+
try:
|
|
13
|
+
from langchain_core.callbacks import BaseCallbackHandler
|
|
14
|
+
from langchain_core.outputs import LLMResult
|
|
15
|
+
from langchain_core.documents import Document
|
|
16
|
+
from langchain_core.messages import BaseMessage
|
|
17
|
+
except ImportError:
|
|
18
|
+
class BaseCallbackHandler:
|
|
19
|
+
pass
|
|
20
|
+
LLMResult = Any
|
|
21
|
+
Document = Any
|
|
22
|
+
BaseMessage = Any
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _get_tracer():
|
|
26
|
+
"""Get the tracer lazily at runtime to ensure it uses the configured provider."""
|
|
27
|
+
return trace.get_tracer("agentbasis.frameworks.langchain")
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _safe_json_dumps(obj: Any) -> str:
|
|
31
|
+
"""Safely serialize an object to JSON string, falling back to str() if needed."""
|
|
32
|
+
try:
|
|
33
|
+
return json.dumps(obj, default=str)
|
|
34
|
+
except (TypeError, ValueError):
|
|
35
|
+
return str(obj)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _extract_llm_info(serialized: Dict[str, Any]) -> Dict[str, Any]:
|
|
39
|
+
"""Extract model name and other LLM info from serialized data."""
|
|
40
|
+
info = {}
|
|
41
|
+
|
|
42
|
+
# Try various paths where model name might be stored
|
|
43
|
+
if "kwargs" in serialized:
|
|
44
|
+
kwargs = serialized["kwargs"]
|
|
45
|
+
if "model_name" in kwargs:
|
|
46
|
+
info["model"] = kwargs["model_name"]
|
|
47
|
+
elif "model" in kwargs:
|
|
48
|
+
info["model"] = kwargs["model"]
|
|
49
|
+
|
|
50
|
+
# Get the class name
|
|
51
|
+
if "name" in serialized:
|
|
52
|
+
info["class_name"] = serialized["name"]
|
|
53
|
+
elif "id" in serialized and isinstance(serialized["id"], list):
|
|
54
|
+
info["class_name"] = serialized["id"][-1]
|
|
55
|
+
|
|
56
|
+
return info
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def _extract_token_usage(response: Any) -> Dict[str, int]:
|
|
60
|
+
"""Extract token usage from LLM response."""
|
|
61
|
+
usage = {}
|
|
62
|
+
|
|
63
|
+
if hasattr(response, "llm_output") and response.llm_output:
|
|
64
|
+
llm_output = response.llm_output
|
|
65
|
+
if isinstance(llm_output, dict):
|
|
66
|
+
# OpenAI-style token usage
|
|
67
|
+
if "token_usage" in llm_output:
|
|
68
|
+
token_usage = llm_output["token_usage"]
|
|
69
|
+
usage["prompt_tokens"] = token_usage.get("prompt_tokens", 0)
|
|
70
|
+
usage["completion_tokens"] = token_usage.get("completion_tokens", 0)
|
|
71
|
+
usage["total_tokens"] = token_usage.get("total_tokens", 0)
|
|
72
|
+
# Direct usage field
|
|
73
|
+
elif "usage" in llm_output:
|
|
74
|
+
token_usage = llm_output["usage"]
|
|
75
|
+
usage["prompt_tokens"] = token_usage.get("prompt_tokens", 0)
|
|
76
|
+
usage["completion_tokens"] = token_usage.get("completion_tokens", 0)
|
|
77
|
+
usage["total_tokens"] = token_usage.get("total_tokens", 0)
|
|
78
|
+
|
|
79
|
+
return usage
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def _extract_response_content(response: Any) -> str:
|
|
83
|
+
"""Extract the text content from an LLM response."""
|
|
84
|
+
if hasattr(response, "generations") and response.generations:
|
|
85
|
+
# Get first generation from first prompt
|
|
86
|
+
first_gen_list = response.generations[0]
|
|
87
|
+
if first_gen_list:
|
|
88
|
+
first_gen = first_gen_list[0]
|
|
89
|
+
if hasattr(first_gen, "text"):
|
|
90
|
+
return first_gen.text
|
|
91
|
+
elif hasattr(first_gen, "message") and hasattr(first_gen.message, "content"):
|
|
92
|
+
return first_gen.message.content
|
|
93
|
+
return str(response)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class AgentBasisCallbackHandler(BaseCallbackHandler):
|
|
97
|
+
"""
|
|
98
|
+
Callback handler for LangChain that sends telemetry to AgentBasis via OpenTelemetry.
|
|
99
|
+
|
|
100
|
+
Supports:
|
|
101
|
+
- LLM calls (on_llm_start/end/error)
|
|
102
|
+
- Chain execution (on_chain_start/end/error)
|
|
103
|
+
- Tool invocations (on_tool_start/end/error)
|
|
104
|
+
- Retriever operations (on_retriever_start/end/error)
|
|
105
|
+
- Parent-child span relationships (nested traces)
|
|
106
|
+
|
|
107
|
+
Usage:
|
|
108
|
+
from agentbasis.frameworks.langchain import AgentBasisCallbackHandler
|
|
109
|
+
|
|
110
|
+
handler = AgentBasisCallbackHandler()
|
|
111
|
+
chain.invoke({"query": "..."}, config={"callbacks": [handler]})
|
|
112
|
+
|
|
113
|
+
Trace Structure Example:
|
|
114
|
+
└─ langchain.chain.RetrievalQA
|
|
115
|
+
├─ langchain.retriever.VectorStoreRetriever
|
|
116
|
+
└─ langchain.llm.ChatOpenAI
|
|
117
|
+
"""
|
|
118
|
+
|
|
119
|
+
def __init__(self):
|
|
120
|
+
super().__init__()
|
|
121
|
+
# Track active spans by run_id to close them later
|
|
122
|
+
self.spans: Dict[UUID, Span] = {}
|
|
123
|
+
# Track span contexts for parent-child relationships
|
|
124
|
+
self.span_contexts: Dict[UUID, otel_context.Context] = {}
|
|
125
|
+
|
|
126
|
+
def _start_span(self, span_name: str, parent_run_id: Optional[UUID] = None) -> Span:
|
|
127
|
+
"""
|
|
128
|
+
Start a new span, optionally as a child of a parent span.
|
|
129
|
+
|
|
130
|
+
Automatically injects user/session context (user_id, session_id,
|
|
131
|
+
conversation_id, metadata) from agentbasis.context.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
span_name: Name for the new span
|
|
135
|
+
parent_run_id: The run_id of the parent operation (if any)
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
The newly created span
|
|
139
|
+
"""
|
|
140
|
+
tracer = _get_tracer()
|
|
141
|
+
|
|
142
|
+
# Check if we have a parent span to nest under
|
|
143
|
+
if parent_run_id and parent_run_id in self.spans:
|
|
144
|
+
parent_span = self.spans[parent_run_id]
|
|
145
|
+
# Create a context with the parent span
|
|
146
|
+
parent_context = trace.set_span_in_context(parent_span)
|
|
147
|
+
# Start the new span as a child
|
|
148
|
+
span = tracer.start_span(span_name, context=parent_context)
|
|
149
|
+
else:
|
|
150
|
+
# No parent, start a root span
|
|
151
|
+
span = tracer.start_span(span_name)
|
|
152
|
+
|
|
153
|
+
# Inject user/session context (user_id, session_id, etc.)
|
|
154
|
+
inject_context_to_span(span)
|
|
155
|
+
|
|
156
|
+
return span
|
|
157
|
+
|
|
158
|
+
def _store_span(self, run_id: Optional[UUID], span: Span) -> None:
|
|
159
|
+
"""Store a span for later retrieval and for use as a parent."""
|
|
160
|
+
if run_id:
|
|
161
|
+
self.spans[run_id] = span
|
|
162
|
+
|
|
163
|
+
def _end_span(self, run_id: Optional[UUID], status: Status) -> Optional[Span]:
|
|
164
|
+
"""End a span and remove it from tracking."""
|
|
165
|
+
span = self.spans.pop(run_id, None) if run_id else None
|
|
166
|
+
if span:
|
|
167
|
+
span.set_status(status)
|
|
168
|
+
span.end()
|
|
169
|
+
return span
|
|
170
|
+
|
|
171
|
+
# ==================== LLM Callbacks ====================
|
|
172
|
+
|
|
173
|
+
def on_llm_start(
|
|
174
|
+
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
|
175
|
+
) -> Any:
|
|
176
|
+
"""Run when LLM starts running."""
|
|
177
|
+
run_id = kwargs.get("run_id")
|
|
178
|
+
parent_run_id = kwargs.get("parent_run_id")
|
|
179
|
+
|
|
180
|
+
# Extract model info
|
|
181
|
+
llm_info = _extract_llm_info(serialized)
|
|
182
|
+
model = llm_info.get("model", "unknown")
|
|
183
|
+
class_name = llm_info.get("class_name", "LLM")
|
|
184
|
+
|
|
185
|
+
span_name = f"langchain.llm.{class_name}"
|
|
186
|
+
span = self._start_span(span_name, parent_run_id)
|
|
187
|
+
|
|
188
|
+
# Set attributes
|
|
189
|
+
span.set_attribute("llm.system", "langchain")
|
|
190
|
+
span.set_attribute("llm.request.model", model)
|
|
191
|
+
span.set_attribute("llm.request.prompts", _safe_json_dumps(prompts))
|
|
192
|
+
span.set_attribute("llm.request.prompt_count", len(prompts))
|
|
193
|
+
|
|
194
|
+
self._store_span(run_id, span)
|
|
195
|
+
|
|
196
|
+
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any:
|
|
197
|
+
"""Run when LLM ends running."""
|
|
198
|
+
run_id = kwargs.get("run_id")
|
|
199
|
+
span = self.spans.pop(run_id, None)
|
|
200
|
+
|
|
201
|
+
if span:
|
|
202
|
+
# Extract and set response content
|
|
203
|
+
content = _extract_response_content(response)
|
|
204
|
+
span.set_attribute("llm.response.content", content)
|
|
205
|
+
|
|
206
|
+
# Extract and set token usage
|
|
207
|
+
usage = _extract_token_usage(response)
|
|
208
|
+
if usage:
|
|
209
|
+
span.set_attribute("llm.usage.prompt_tokens", usage.get("prompt_tokens", 0))
|
|
210
|
+
span.set_attribute("llm.usage.completion_tokens", usage.get("completion_tokens", 0))
|
|
211
|
+
span.set_attribute("llm.usage.total_tokens", usage.get("total_tokens", 0))
|
|
212
|
+
|
|
213
|
+
span.set_status(Status(StatusCode.OK))
|
|
214
|
+
span.end()
|
|
215
|
+
|
|
216
|
+
def on_llm_error(self, error: BaseException, **kwargs: Any) -> Any:
|
|
217
|
+
"""Run when LLM errors."""
|
|
218
|
+
run_id = kwargs.get("run_id")
|
|
219
|
+
span = self.spans.pop(run_id, None)
|
|
220
|
+
|
|
221
|
+
if span:
|
|
222
|
+
span.record_exception(error)
|
|
223
|
+
span.set_status(Status(StatusCode.ERROR, str(error)))
|
|
224
|
+
span.end()
|
|
225
|
+
|
|
226
|
+
# ==================== Chain Callbacks ====================
|
|
227
|
+
|
|
228
|
+
def on_chain_start(
|
|
229
|
+
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
|
|
230
|
+
) -> Any:
|
|
231
|
+
"""Run when chain starts running."""
|
|
232
|
+
run_id = kwargs.get("run_id")
|
|
233
|
+
parent_run_id = kwargs.get("parent_run_id")
|
|
234
|
+
|
|
235
|
+
# Get chain name from serialized data
|
|
236
|
+
chain_name = serialized.get("name")
|
|
237
|
+
if not chain_name and "id" in serialized and isinstance(serialized["id"], list):
|
|
238
|
+
chain_name = serialized["id"][-1]
|
|
239
|
+
chain_name = chain_name or "Chain"
|
|
240
|
+
|
|
241
|
+
span_name = f"langchain.chain.{chain_name}"
|
|
242
|
+
span = self._start_span(span_name, parent_run_id)
|
|
243
|
+
|
|
244
|
+
span.set_attribute("langchain.chain.name", chain_name)
|
|
245
|
+
span.set_attribute("langchain.chain.inputs", _safe_json_dumps(inputs))
|
|
246
|
+
|
|
247
|
+
self._store_span(run_id, span)
|
|
248
|
+
|
|
249
|
+
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:
|
|
250
|
+
"""Run when chain ends running."""
|
|
251
|
+
run_id = kwargs.get("run_id")
|
|
252
|
+
span = self.spans.pop(run_id, None)
|
|
253
|
+
|
|
254
|
+
if span:
|
|
255
|
+
span.set_attribute("langchain.chain.outputs", _safe_json_dumps(outputs))
|
|
256
|
+
span.set_status(Status(StatusCode.OK))
|
|
257
|
+
span.end()
|
|
258
|
+
|
|
259
|
+
def on_chain_error(self, error: BaseException, **kwargs: Any) -> Any:
|
|
260
|
+
"""Run when chain errors."""
|
|
261
|
+
run_id = kwargs.get("run_id")
|
|
262
|
+
span = self.spans.pop(run_id, None)
|
|
263
|
+
|
|
264
|
+
if span:
|
|
265
|
+
span.record_exception(error)
|
|
266
|
+
span.set_status(Status(StatusCode.ERROR, str(error)))
|
|
267
|
+
span.end()
|
|
268
|
+
|
|
269
|
+
# ==================== Tool Callbacks ====================
|
|
270
|
+
|
|
271
|
+
def on_tool_start(
|
|
272
|
+
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
|
|
273
|
+
) -> Any:
|
|
274
|
+
"""Run when tool starts running."""
|
|
275
|
+
run_id = kwargs.get("run_id")
|
|
276
|
+
parent_run_id = kwargs.get("parent_run_id")
|
|
277
|
+
|
|
278
|
+
# Get tool name
|
|
279
|
+
tool_name = serialized.get("name")
|
|
280
|
+
if not tool_name and "id" in serialized and isinstance(serialized["id"], list):
|
|
281
|
+
tool_name = serialized["id"][-1]
|
|
282
|
+
tool_name = tool_name or "Tool"
|
|
283
|
+
|
|
284
|
+
span_name = f"langchain.tool.{tool_name}"
|
|
285
|
+
span = self._start_span(span_name, parent_run_id)
|
|
286
|
+
|
|
287
|
+
span.set_attribute("langchain.tool.name", tool_name)
|
|
288
|
+
span.set_attribute("langchain.tool.input", input_str)
|
|
289
|
+
|
|
290
|
+
# Get tool description if available
|
|
291
|
+
if "description" in serialized:
|
|
292
|
+
span.set_attribute("langchain.tool.description", serialized["description"])
|
|
293
|
+
|
|
294
|
+
self._store_span(run_id, span)
|
|
295
|
+
|
|
296
|
+
def on_tool_end(self, output: str, **kwargs: Any) -> Any:
|
|
297
|
+
"""Run when tool ends running."""
|
|
298
|
+
run_id = kwargs.get("run_id")
|
|
299
|
+
span = self.spans.pop(run_id, None)
|
|
300
|
+
|
|
301
|
+
if span:
|
|
302
|
+
span.set_attribute("langchain.tool.output", str(output))
|
|
303
|
+
span.set_status(Status(StatusCode.OK))
|
|
304
|
+
span.end()
|
|
305
|
+
|
|
306
|
+
def on_tool_error(self, error: BaseException, **kwargs: Any) -> Any:
|
|
307
|
+
"""Run when tool errors."""
|
|
308
|
+
run_id = kwargs.get("run_id")
|
|
309
|
+
span = self.spans.pop(run_id, None)
|
|
310
|
+
|
|
311
|
+
if span:
|
|
312
|
+
span.record_exception(error)
|
|
313
|
+
span.set_status(Status(StatusCode.ERROR, str(error)))
|
|
314
|
+
span.end()
|
|
315
|
+
|
|
316
|
+
# ==================== Retriever Callbacks ====================
|
|
317
|
+
|
|
318
|
+
def on_retriever_start(
|
|
319
|
+
self, serialized: Dict[str, Any], query: str, **kwargs: Any
|
|
320
|
+
) -> Any:
|
|
321
|
+
"""Run when retriever starts running."""
|
|
322
|
+
run_id = kwargs.get("run_id")
|
|
323
|
+
parent_run_id = kwargs.get("parent_run_id")
|
|
324
|
+
|
|
325
|
+
# Get retriever name
|
|
326
|
+
retriever_name = serialized.get("name")
|
|
327
|
+
if not retriever_name and "id" in serialized and isinstance(serialized["id"], list):
|
|
328
|
+
retriever_name = serialized["id"][-1]
|
|
329
|
+
retriever_name = retriever_name or "Retriever"
|
|
330
|
+
|
|
331
|
+
span_name = f"langchain.retriever.{retriever_name}"
|
|
332
|
+
span = self._start_span(span_name, parent_run_id)
|
|
333
|
+
|
|
334
|
+
span.set_attribute("langchain.retriever.name", retriever_name)
|
|
335
|
+
span.set_attribute("langchain.retriever.query", query)
|
|
336
|
+
|
|
337
|
+
self._store_span(run_id, span)
|
|
338
|
+
|
|
339
|
+
def on_retriever_end(self, documents: Sequence[Document], **kwargs: Any) -> Any:
|
|
340
|
+
"""Run when retriever ends running."""
|
|
341
|
+
run_id = kwargs.get("run_id")
|
|
342
|
+
span = self.spans.pop(run_id, None)
|
|
343
|
+
|
|
344
|
+
if span:
|
|
345
|
+
# Record document count
|
|
346
|
+
span.set_attribute("langchain.retriever.document_count", len(documents))
|
|
347
|
+
|
|
348
|
+
# Extract document metadata and content summaries
|
|
349
|
+
doc_summaries = []
|
|
350
|
+
for i, doc in enumerate(documents[:10]): # Limit to first 10 docs
|
|
351
|
+
doc_info = {"index": i}
|
|
352
|
+
if hasattr(doc, "page_content"):
|
|
353
|
+
# Truncate content for span attribute
|
|
354
|
+
content = doc.page_content[:500] + "..." if len(doc.page_content) > 500 else doc.page_content
|
|
355
|
+
doc_info["content_preview"] = content
|
|
356
|
+
if hasattr(doc, "metadata") and doc.metadata:
|
|
357
|
+
doc_info["metadata"] = doc.metadata
|
|
358
|
+
doc_summaries.append(doc_info)
|
|
359
|
+
|
|
360
|
+
span.set_attribute("langchain.retriever.documents", _safe_json_dumps(doc_summaries))
|
|
361
|
+
span.set_status(Status(StatusCode.OK))
|
|
362
|
+
span.end()
|
|
363
|
+
|
|
364
|
+
def on_retriever_error(self, error: BaseException, **kwargs: Any) -> Any:
|
|
365
|
+
"""Run when retriever errors."""
|
|
366
|
+
run_id = kwargs.get("run_id")
|
|
367
|
+
span = self.spans.pop(run_id, None)
|
|
368
|
+
|
|
369
|
+
if span:
|
|
370
|
+
span.record_exception(error)
|
|
371
|
+
span.set_status(Status(StatusCode.ERROR, str(error)))
|
|
372
|
+
span.end()
|
|
373
|
+
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Pydantic AI integration for AgentBasis.
|
|
3
|
+
|
|
4
|
+
This module provides instrumentation for Pydantic AI agents, enabling
|
|
5
|
+
automatic tracing and observability via OpenTelemetry.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
import agentbasis
|
|
9
|
+
from agentbasis.frameworks.pydanticai import instrument
|
|
10
|
+
|
|
11
|
+
agentbasis.init(api_key="...", agent_id="...")
|
|
12
|
+
instrument()
|
|
13
|
+
|
|
14
|
+
# All Pydantic AI agents are now traced
|
|
15
|
+
from pydantic_ai import Agent
|
|
16
|
+
agent = Agent("openai:gpt-4")
|
|
17
|
+
result = agent.run_sync("Hello!")
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from .instrumentation import (
|
|
21
|
+
instrument,
|
|
22
|
+
get_instrumentation_settings,
|
|
23
|
+
get_metadata_callback,
|
|
24
|
+
create_traced_agent,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
__all__ = [
|
|
28
|
+
"instrument",
|
|
29
|
+
"get_instrumentation_settings",
|
|
30
|
+
"get_metadata_callback",
|
|
31
|
+
"create_traced_agent",
|
|
32
|
+
]
|
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Pydantic AI instrumentation for AgentBasis.
|
|
3
|
+
|
|
4
|
+
This module provides integration with Pydantic AI's built-in OpenTelemetry
|
|
5
|
+
instrumentation, ensuring traces flow to AgentBasis.
|
|
6
|
+
|
|
7
|
+
Pydantic AI already emits OTel traces via its instrumentation system. Since
|
|
8
|
+
AgentBasis sets up a global TracerProvider in agentbasis.init(), we just need
|
|
9
|
+
to enable Pydantic AI's instrumentation and the traces will automatically
|
|
10
|
+
flow to AgentBasis.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from typing import Any, Callable, Dict, Optional, Union
|
|
14
|
+
import warnings
|
|
15
|
+
|
|
16
|
+
from agentbasis.context import get_context_attributes
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# Track if we've already instrumented globally
|
|
20
|
+
_instrumented = False
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def instrument(
|
|
24
|
+
include_content: bool = True,
|
|
25
|
+
include_binary_content: bool = False,
|
|
26
|
+
) -> None:
|
|
27
|
+
"""
|
|
28
|
+
Enable AgentBasis instrumentation for all Pydantic AI agents.
|
|
29
|
+
|
|
30
|
+
This configures Pydantic AI to send traces to AgentBasis via OpenTelemetry.
|
|
31
|
+
Call this after agentbasis.init() and before creating any agents.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
include_content: Whether to include prompts and completions in traces.
|
|
35
|
+
Set to False for privacy (default: True)
|
|
36
|
+
include_binary_content: Whether to include binary content like images.
|
|
37
|
+
Can increase trace size significantly (default: False)
|
|
38
|
+
|
|
39
|
+
Example:
|
|
40
|
+
import agentbasis
|
|
41
|
+
from agentbasis.frameworks.pydanticai import instrument
|
|
42
|
+
|
|
43
|
+
agentbasis.init(api_key="...", agent_id="...")
|
|
44
|
+
instrument()
|
|
45
|
+
|
|
46
|
+
# Now all Pydantic AI agents will be traced
|
|
47
|
+
from pydantic_ai import Agent
|
|
48
|
+
agent = Agent("openai:gpt-4")
|
|
49
|
+
result = agent.run_sync("Hello!")
|
|
50
|
+
|
|
51
|
+
Example with privacy controls:
|
|
52
|
+
instrument(include_content=False) # Don't log prompts/completions
|
|
53
|
+
"""
|
|
54
|
+
global _instrumented
|
|
55
|
+
|
|
56
|
+
if _instrumented:
|
|
57
|
+
return
|
|
58
|
+
|
|
59
|
+
try:
|
|
60
|
+
from pydantic_ai import Agent
|
|
61
|
+
|
|
62
|
+
# Get instrumentation settings
|
|
63
|
+
settings = get_instrumentation_settings(
|
|
64
|
+
include_content=include_content,
|
|
65
|
+
include_binary_content=include_binary_content,
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
# Enable instrumentation globally for all agents
|
|
69
|
+
Agent.instrument_all(settings)
|
|
70
|
+
|
|
71
|
+
_instrumented = True
|
|
72
|
+
|
|
73
|
+
except ImportError:
|
|
74
|
+
warnings.warn(
|
|
75
|
+
"pydantic-ai is not installed. Install it with: pip install pydantic-ai",
|
|
76
|
+
ImportWarning
|
|
77
|
+
)
|
|
78
|
+
except AttributeError:
|
|
79
|
+
# Older versions of pydantic-ai might not have instrument_all
|
|
80
|
+
warnings.warn(
|
|
81
|
+
"Your version of pydantic-ai does not support global instrumentation. "
|
|
82
|
+
"Please upgrade to the latest version or use per-agent instrumentation.",
|
|
83
|
+
UserWarning
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def get_instrumentation_settings(
|
|
88
|
+
include_content: bool = True,
|
|
89
|
+
include_binary_content: bool = False,
|
|
90
|
+
) -> Any:
|
|
91
|
+
"""
|
|
92
|
+
Get instrumentation settings for a specific Pydantic AI agent.
|
|
93
|
+
|
|
94
|
+
Use this when you want to configure instrumentation per-agent
|
|
95
|
+
rather than globally, or when you need custom settings.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
include_content: Whether to include prompts and completions in traces
|
|
99
|
+
include_binary_content: Whether to include binary content like images
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
InstrumentationSettings object to pass to Agent(..., instrument=...)
|
|
103
|
+
Returns True if InstrumentationSettings is not available (basic mode)
|
|
104
|
+
|
|
105
|
+
Example:
|
|
106
|
+
from agentbasis.frameworks.pydanticai import get_instrumentation_settings
|
|
107
|
+
from pydantic_ai import Agent
|
|
108
|
+
|
|
109
|
+
# Basic usage
|
|
110
|
+
agent = Agent(
|
|
111
|
+
"openai:gpt-4",
|
|
112
|
+
instrument=get_instrumentation_settings()
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# With privacy controls (no prompt/completion logging)
|
|
116
|
+
agent = Agent(
|
|
117
|
+
"openai:gpt-4",
|
|
118
|
+
instrument=get_instrumentation_settings(include_content=False)
|
|
119
|
+
)
|
|
120
|
+
"""
|
|
121
|
+
try:
|
|
122
|
+
from pydantic_ai.agent import InstrumentationSettings
|
|
123
|
+
|
|
124
|
+
return InstrumentationSettings(
|
|
125
|
+
include_content=include_content,
|
|
126
|
+
include_binary_content=include_binary_content,
|
|
127
|
+
)
|
|
128
|
+
except ImportError:
|
|
129
|
+
# If InstrumentationSettings is not available, return True for basic instrumentation
|
|
130
|
+
return True
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def get_metadata_callback() -> Callable[..., Dict[str, Any]]:
|
|
134
|
+
"""
|
|
135
|
+
Get a metadata callback that injects AgentBasis context.
|
|
136
|
+
|
|
137
|
+
This callback can be passed to Pydantic AI's metadata parameter
|
|
138
|
+
to automatically include user_id, session_id, etc. in traces.
|
|
139
|
+
|
|
140
|
+
The callback reads from AgentBasis context at runtime, so it will
|
|
141
|
+
pick up any context set via agentbasis.set_user(), set_session(), etc.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
A callable that returns context metadata
|
|
145
|
+
|
|
146
|
+
Example:
|
|
147
|
+
import agentbasis
|
|
148
|
+
from agentbasis.frameworks.pydanticai import get_metadata_callback
|
|
149
|
+
from pydantic_ai import Agent
|
|
150
|
+
|
|
151
|
+
agent = Agent(
|
|
152
|
+
"openai:gpt-4",
|
|
153
|
+
metadata=get_metadata_callback()
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
# Set context before running
|
|
157
|
+
agentbasis.set_user("user-123")
|
|
158
|
+
agentbasis.set_session("session-456")
|
|
159
|
+
|
|
160
|
+
# This run will include user_id and session_id in metadata
|
|
161
|
+
result = agent.run_sync("Hello!")
|
|
162
|
+
"""
|
|
163
|
+
def _get_agentbasis_metadata(*args, **kwargs) -> Dict[str, Any]:
|
|
164
|
+
"""
|
|
165
|
+
Callback function that retrieves AgentBasis context attributes.
|
|
166
|
+
|
|
167
|
+
This is called by Pydantic AI before each agent run to get metadata.
|
|
168
|
+
"""
|
|
169
|
+
# Get current context from AgentBasis
|
|
170
|
+
context_attrs = get_context_attributes()
|
|
171
|
+
|
|
172
|
+
# Convert to a simpler format for Pydantic AI metadata
|
|
173
|
+
metadata = {}
|
|
174
|
+
|
|
175
|
+
if "agentbasis.user.id" in context_attrs:
|
|
176
|
+
metadata["user_id"] = context_attrs["agentbasis.user.id"]
|
|
177
|
+
|
|
178
|
+
if "agentbasis.session.id" in context_attrs:
|
|
179
|
+
metadata["session_id"] = context_attrs["agentbasis.session.id"]
|
|
180
|
+
|
|
181
|
+
if "agentbasis.conversation.id" in context_attrs:
|
|
182
|
+
metadata["conversation_id"] = context_attrs["agentbasis.conversation.id"]
|
|
183
|
+
|
|
184
|
+
if "agentbasis.metadata" in context_attrs:
|
|
185
|
+
# Include custom metadata (already JSON serialized)
|
|
186
|
+
metadata["custom"] = context_attrs["agentbasis.metadata"]
|
|
187
|
+
|
|
188
|
+
return metadata
|
|
189
|
+
|
|
190
|
+
return _get_agentbasis_metadata
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def create_traced_agent(
|
|
194
|
+
model: str,
|
|
195
|
+
include_content: bool = True,
|
|
196
|
+
**agent_kwargs
|
|
197
|
+
) -> Any:
|
|
198
|
+
"""
|
|
199
|
+
Convenience function to create a Pydantic AI agent with AgentBasis tracing.
|
|
200
|
+
|
|
201
|
+
This creates an agent with instrumentation and metadata callback
|
|
202
|
+
pre-configured for AgentBasis.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
model: The model to use (e.g., "openai:gpt-4", "anthropic:claude-3-opus")
|
|
206
|
+
include_content: Whether to include prompts/completions in traces
|
|
207
|
+
**agent_kwargs: Additional arguments to pass to Agent()
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
A configured Pydantic AI Agent
|
|
211
|
+
|
|
212
|
+
Example:
|
|
213
|
+
from agentbasis.frameworks.pydanticai import create_traced_agent
|
|
214
|
+
|
|
215
|
+
agent = create_traced_agent(
|
|
216
|
+
"openai:gpt-4",
|
|
217
|
+
system_prompt="You are a helpful assistant."
|
|
218
|
+
)
|
|
219
|
+
result = agent.run_sync("Hello!")
|
|
220
|
+
"""
|
|
221
|
+
try:
|
|
222
|
+
from pydantic_ai import Agent
|
|
223
|
+
|
|
224
|
+
return Agent(
|
|
225
|
+
model,
|
|
226
|
+
instrument=get_instrumentation_settings(include_content=include_content),
|
|
227
|
+
metadata=get_metadata_callback(),
|
|
228
|
+
**agent_kwargs
|
|
229
|
+
)
|
|
230
|
+
except ImportError:
|
|
231
|
+
raise ImportError(
|
|
232
|
+
"pydantic-ai is not installed. Install it with: pip install pydantic-ai"
|
|
233
|
+
)
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
from .messages import instrument_messages, instrument_async_messages
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def instrument():
|
|
5
|
+
"""
|
|
6
|
+
Auto-instruments the Anthropic SDK (both sync and async).
|
|
7
|
+
Call this function after `agentbasis.init()` and before using `anthropic`.
|
|
8
|
+
|
|
9
|
+
This instruments:
|
|
10
|
+
- Anthropic().messages.create() (sync)
|
|
11
|
+
- AsyncAnthropic().messages.create() (async)
|
|
12
|
+
"""
|
|
13
|
+
try:
|
|
14
|
+
import anthropic
|
|
15
|
+
instrument_messages(anthropic)
|
|
16
|
+
instrument_async_messages(anthropic)
|
|
17
|
+
except ImportError:
|
|
18
|
+
pass
|