agentbasis 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentbasis/__init__.py +87 -0
- agentbasis/client.py +134 -0
- agentbasis/config.py +33 -0
- agentbasis/context.py +259 -0
- agentbasis/decorators.py +80 -0
- agentbasis/frameworks/langchain/__init__.py +109 -0
- agentbasis/frameworks/langchain/callback.py +373 -0
- agentbasis/frameworks/pydanticai/__init__.py +32 -0
- agentbasis/frameworks/pydanticai/instrumentation.py +233 -0
- agentbasis/llms/anthropic/__init__.py +18 -0
- agentbasis/llms/anthropic/messages.py +298 -0
- agentbasis/llms/gemini/__init__.py +18 -0
- agentbasis/llms/gemini/chat.py +326 -0
- agentbasis/llms/openai/__init__.py +18 -0
- agentbasis/llms/openai/chat.py +235 -0
- agentbasis-0.1.0.dist-info/METADATA +220 -0
- agentbasis-0.1.0.dist-info/RECORD +19 -0
- agentbasis-0.1.0.dist-info/WHEEL +5 -0
- agentbasis-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
from typing import Any, Generator, AsyncGenerator
|
|
2
|
+
import functools
|
|
3
|
+
import time
|
|
4
|
+
from opentelemetry import trace
|
|
5
|
+
from opentelemetry.trace import Status, StatusCode, Span
|
|
6
|
+
|
|
7
|
+
from agentbasis.context import inject_context_to_span
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _get_tracer():
|
|
11
|
+
"""
|
|
12
|
+
Get the tracer lazily at runtime.
|
|
13
|
+
This ensures the tracer is retrieved after agentbasis.init() has configured the provider.
|
|
14
|
+
"""
|
|
15
|
+
return trace.get_tracer("agentbasis.llms.openai")
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _set_request_attributes(span: Span, model: str, messages: list, is_streaming: bool = False):
|
|
19
|
+
"""
|
|
20
|
+
Set common request attributes on a span.
|
|
21
|
+
"""
|
|
22
|
+
# Inject user/session context
|
|
23
|
+
inject_context_to_span(span)
|
|
24
|
+
|
|
25
|
+
span.set_attribute("llm.system", "openai")
|
|
26
|
+
span.set_attribute("llm.request.model", model)
|
|
27
|
+
span.set_attribute("llm.request.messages", str(messages))
|
|
28
|
+
if is_streaming:
|
|
29
|
+
span.set_attribute("llm.request.streaming", True)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _set_response_attributes(span: Span, response):
|
|
33
|
+
"""
|
|
34
|
+
Set common response attributes on a span (for non-streaming responses).
|
|
35
|
+
"""
|
|
36
|
+
if response.choices:
|
|
37
|
+
content = response.choices[0].message.content
|
|
38
|
+
span.set_attribute("llm.response.content", str(content))
|
|
39
|
+
|
|
40
|
+
if response.usage:
|
|
41
|
+
span.set_attribute("llm.usage.prompt_tokens", response.usage.prompt_tokens)
|
|
42
|
+
span.set_attribute("llm.usage.completion_tokens", response.usage.completion_tokens)
|
|
43
|
+
span.set_attribute("llm.usage.total_tokens", response.usage.total_tokens)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _wrap_sync_stream(stream, span: Span, start_time: float) -> Generator:
|
|
47
|
+
"""
|
|
48
|
+
Wrap a synchronous streaming response to track chunks and finalize span.
|
|
49
|
+
"""
|
|
50
|
+
content_parts = []
|
|
51
|
+
chunk_count = 0
|
|
52
|
+
first_token_time = None
|
|
53
|
+
|
|
54
|
+
try:
|
|
55
|
+
for chunk in stream:
|
|
56
|
+
chunk_count += 1
|
|
57
|
+
|
|
58
|
+
# Track time to first token
|
|
59
|
+
if first_token_time is None:
|
|
60
|
+
first_token_time = time.time()
|
|
61
|
+
span.set_attribute("llm.response.first_token_ms",
|
|
62
|
+
int((first_token_time - start_time) * 1000))
|
|
63
|
+
|
|
64
|
+
# Extract content from chunk
|
|
65
|
+
if chunk.choices and len(chunk.choices) > 0:
|
|
66
|
+
delta = chunk.choices[0].delta
|
|
67
|
+
if hasattr(delta, 'content') and delta.content:
|
|
68
|
+
content_parts.append(delta.content)
|
|
69
|
+
|
|
70
|
+
yield chunk
|
|
71
|
+
|
|
72
|
+
# Stream complete - finalize span
|
|
73
|
+
full_content = "".join(content_parts)
|
|
74
|
+
span.set_attribute("llm.response.content", full_content)
|
|
75
|
+
span.set_attribute("llm.response.chunk_count", chunk_count)
|
|
76
|
+
span.set_status(Status(StatusCode.OK))
|
|
77
|
+
|
|
78
|
+
except Exception as e:
|
|
79
|
+
span.record_exception(e)
|
|
80
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
81
|
+
raise
|
|
82
|
+
finally:
|
|
83
|
+
span.end()
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
async def _wrap_async_stream(stream, span: Span, start_time: float) -> AsyncGenerator:
|
|
87
|
+
"""
|
|
88
|
+
Wrap an asynchronous streaming response to track chunks and finalize span.
|
|
89
|
+
"""
|
|
90
|
+
content_parts = []
|
|
91
|
+
chunk_count = 0
|
|
92
|
+
first_token_time = None
|
|
93
|
+
|
|
94
|
+
try:
|
|
95
|
+
async for chunk in stream:
|
|
96
|
+
chunk_count += 1
|
|
97
|
+
|
|
98
|
+
# Track time to first token
|
|
99
|
+
if first_token_time is None:
|
|
100
|
+
first_token_time = time.time()
|
|
101
|
+
span.set_attribute("llm.response.first_token_ms",
|
|
102
|
+
int((first_token_time - start_time) * 1000))
|
|
103
|
+
|
|
104
|
+
# Extract content from chunk
|
|
105
|
+
if chunk.choices and len(chunk.choices) > 0:
|
|
106
|
+
delta = chunk.choices[0].delta
|
|
107
|
+
if hasattr(delta, 'content') and delta.content:
|
|
108
|
+
content_parts.append(delta.content)
|
|
109
|
+
|
|
110
|
+
yield chunk
|
|
111
|
+
|
|
112
|
+
# Stream complete - finalize span
|
|
113
|
+
full_content = "".join(content_parts)
|
|
114
|
+
span.set_attribute("llm.response.content", full_content)
|
|
115
|
+
span.set_attribute("llm.response.chunk_count", chunk_count)
|
|
116
|
+
span.set_status(Status(StatusCode.OK))
|
|
117
|
+
|
|
118
|
+
except Exception as e:
|
|
119
|
+
span.record_exception(e)
|
|
120
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
121
|
+
raise
|
|
122
|
+
finally:
|
|
123
|
+
span.end()
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def instrument_chat(openai_module: Any):
|
|
127
|
+
"""
|
|
128
|
+
Instruments the synchronous OpenAI Chat Completions API with OpenTelemetry.
|
|
129
|
+
Handles both regular and streaming responses.
|
|
130
|
+
"""
|
|
131
|
+
try:
|
|
132
|
+
from openai.resources.chat.completions import Completions
|
|
133
|
+
except ImportError:
|
|
134
|
+
return
|
|
135
|
+
|
|
136
|
+
original_create = Completions.create
|
|
137
|
+
|
|
138
|
+
@functools.wraps(original_create)
|
|
139
|
+
def wrapped_create(self, *args, **kwargs):
|
|
140
|
+
tracer = _get_tracer()
|
|
141
|
+
model = kwargs.get("model", "unknown")
|
|
142
|
+
messages = kwargs.get("messages", [])
|
|
143
|
+
is_streaming = kwargs.get("stream", False)
|
|
144
|
+
|
|
145
|
+
span_name = f"openai.chat.completions.create {model}"
|
|
146
|
+
|
|
147
|
+
if is_streaming:
|
|
148
|
+
# For streaming, we need to manually manage the span lifecycle
|
|
149
|
+
span = tracer.start_span(span_name)
|
|
150
|
+
start_time = time.time()
|
|
151
|
+
_set_request_attributes(span, model, messages, is_streaming=True)
|
|
152
|
+
|
|
153
|
+
try:
|
|
154
|
+
stream = original_create(self, *args, **kwargs)
|
|
155
|
+
# Return wrapped generator that will finalize span when exhausted
|
|
156
|
+
return _wrap_sync_stream(stream, span, start_time)
|
|
157
|
+
except Exception as e:
|
|
158
|
+
span.record_exception(e)
|
|
159
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
160
|
+
span.end()
|
|
161
|
+
raise
|
|
162
|
+
else:
|
|
163
|
+
# Non-streaming: use context manager as before
|
|
164
|
+
with tracer.start_as_current_span(span_name) as span:
|
|
165
|
+
_set_request_attributes(span, model, messages)
|
|
166
|
+
|
|
167
|
+
try:
|
|
168
|
+
response = original_create(self, *args, **kwargs)
|
|
169
|
+
_set_response_attributes(span, response)
|
|
170
|
+
span.set_status(Status(StatusCode.OK))
|
|
171
|
+
return response
|
|
172
|
+
|
|
173
|
+
except Exception as e:
|
|
174
|
+
span.record_exception(e)
|
|
175
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
176
|
+
raise
|
|
177
|
+
|
|
178
|
+
Completions.create = wrapped_create
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def instrument_async_chat(openai_module: Any):
|
|
182
|
+
"""
|
|
183
|
+
Instruments the asynchronous OpenAI Chat Completions API with OpenTelemetry.
|
|
184
|
+
Handles both regular and streaming responses.
|
|
185
|
+
"""
|
|
186
|
+
try:
|
|
187
|
+
from openai.resources.chat.completions import AsyncCompletions
|
|
188
|
+
except ImportError:
|
|
189
|
+
return
|
|
190
|
+
|
|
191
|
+
original_async_create = AsyncCompletions.create
|
|
192
|
+
|
|
193
|
+
@functools.wraps(original_async_create)
|
|
194
|
+
async def wrapped_async_create(self, *args, **kwargs):
|
|
195
|
+
tracer = _get_tracer()
|
|
196
|
+
model = kwargs.get("model", "unknown")
|
|
197
|
+
messages = kwargs.get("messages", [])
|
|
198
|
+
is_streaming = kwargs.get("stream", False)
|
|
199
|
+
|
|
200
|
+
span_name = f"openai.chat.completions.create {model}"
|
|
201
|
+
|
|
202
|
+
if is_streaming:
|
|
203
|
+
# For streaming, we need to manually manage the span lifecycle
|
|
204
|
+
span = tracer.start_span(span_name)
|
|
205
|
+
start_time = time.time()
|
|
206
|
+
span.set_attribute("llm.request.async", True)
|
|
207
|
+
_set_request_attributes(span, model, messages, is_streaming=True)
|
|
208
|
+
|
|
209
|
+
try:
|
|
210
|
+
stream = await original_async_create(self, *args, **kwargs)
|
|
211
|
+
# Return wrapped async generator that will finalize span when exhausted
|
|
212
|
+
return _wrap_async_stream(stream, span, start_time)
|
|
213
|
+
except Exception as e:
|
|
214
|
+
span.record_exception(e)
|
|
215
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
216
|
+
span.end()
|
|
217
|
+
raise
|
|
218
|
+
else:
|
|
219
|
+
# Non-streaming: use context manager as before
|
|
220
|
+
with tracer.start_as_current_span(span_name) as span:
|
|
221
|
+
span.set_attribute("llm.request.async", True)
|
|
222
|
+
_set_request_attributes(span, model, messages)
|
|
223
|
+
|
|
224
|
+
try:
|
|
225
|
+
response = await original_async_create(self, *args, **kwargs)
|
|
226
|
+
_set_response_attributes(span, response)
|
|
227
|
+
span.set_status(Status(StatusCode.OK))
|
|
228
|
+
return response
|
|
229
|
+
|
|
230
|
+
except Exception as e:
|
|
231
|
+
span.record_exception(e)
|
|
232
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
233
|
+
raise
|
|
234
|
+
|
|
235
|
+
AsyncCompletions.create = wrapped_async_create
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: agentbasis
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Management & Observability SDK for AI Agents
|
|
5
|
+
Author-email: AgentBasis <support@agentbasis.co>
|
|
6
|
+
Maintainer-email: AgentBasis <support@agentbasis.co>
|
|
7
|
+
Project-URL: Homepage, https://agentbasis.co
|
|
8
|
+
Project-URL: Documentation, https://docs.agentbasis.co
|
|
9
|
+
Project-URL: Repository, https://github.com/AgentBasis/agentbasis-python-sdk
|
|
10
|
+
Project-URL: Bug Tracker, https://github.com/AgentBasis/agentbasis-python-sdk/issues
|
|
11
|
+
Keywords: ai,agents,observability,tracing,opentelemetry,llm,openai,anthropic,langchain,monitoring,telemetry
|
|
12
|
+
Classifier: Development Status :: 4 - Beta
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
20
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
21
|
+
Classifier: Operating System :: OS Independent
|
|
22
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
23
|
+
Classifier: Topic :: System :: Monitoring
|
|
24
|
+
Requires-Python: >=3.8
|
|
25
|
+
Description-Content-Type: text/markdown
|
|
26
|
+
Requires-Dist: requests>=2.25.0
|
|
27
|
+
Requires-Dist: pydantic>=2.0.0
|
|
28
|
+
Requires-Dist: opentelemetry-api>=1.20.0
|
|
29
|
+
Requires-Dist: opentelemetry-sdk>=1.20.0
|
|
30
|
+
Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.20.0
|
|
31
|
+
|
|
32
|
+
# AgentBasis Python SDK
|
|
33
|
+
|
|
34
|
+
**Management & Observability SDK for AI Agents in production**
|
|
35
|
+
|
|
36
|
+
The **AgentBasis Python SDK** provides a simple, lightweight way to track the performance, traces, sessions, and behavior of AI agents. It sends data using the **OpenTelemetry (OTel)** standard, making it compatible with AgentBasis and other observability backends.
|
|
37
|
+
|
|
38
|
+
This is the **foundation SDK** that enables deep observability for coded agents built with:
|
|
39
|
+
- Pure Python
|
|
40
|
+
- LLM Providers:
|
|
41
|
+
- OpenAI
|
|
42
|
+
- Anthropic
|
|
43
|
+
- Gemini
|
|
44
|
+
- Frameworks
|
|
45
|
+
- LangChain
|
|
46
|
+
- Pydantic AI
|
|
47
|
+
|
|
48
|
+
## Installation
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
pip install agentbasis
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
## Quick Start
|
|
55
|
+
|
|
56
|
+
### 1. Initialize the SDK
|
|
57
|
+
Start by initializing the SDK with your API key and Agent ID. This usually goes at the top of your main application file.
|
|
58
|
+
|
|
59
|
+
```python
|
|
60
|
+
import agentbasis
|
|
61
|
+
|
|
62
|
+
# Initialize with your API Key and Agent ID
|
|
63
|
+
agentbasis.init(
|
|
64
|
+
api_key="your-api-key-here",
|
|
65
|
+
agent_id="your-agent-id-here"
|
|
66
|
+
)
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
### 2. Manual Tracking (Decorators)
|
|
70
|
+
Use the `@trace` decorator to automatically track any function.
|
|
71
|
+
|
|
72
|
+
```python
|
|
73
|
+
from agentbasis import trace
|
|
74
|
+
|
|
75
|
+
@trace
|
|
76
|
+
def chat_with_user(query):
|
|
77
|
+
# Your agent logic here
|
|
78
|
+
return "Response to: " + query
|
|
79
|
+
|
|
80
|
+
# When you call this, data is automatically sent to AgentBasis
|
|
81
|
+
chat_with_user("Hello world")
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
### 3. OpenAI Integration
|
|
85
|
+
Automatically track all your OpenAI calls (models, tokens, prompts) with one line of code.
|
|
86
|
+
|
|
87
|
+
```python
|
|
88
|
+
from agentbasis.llms.openai import instrument
|
|
89
|
+
|
|
90
|
+
# Enable OpenAI instrumentation
|
|
91
|
+
instrument()
|
|
92
|
+
|
|
93
|
+
# Now just use the OpenAI client as normal
|
|
94
|
+
from openai import OpenAI
|
|
95
|
+
client = OpenAI()
|
|
96
|
+
response = client.chat.completions.create(
|
|
97
|
+
model="gpt-4",
|
|
98
|
+
messages=[{"role": "user", "content": "Hello"}]
|
|
99
|
+
)
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
### 4. Anthropic Integration
|
|
103
|
+
Automatically track all your Anthropic Claude calls.
|
|
104
|
+
|
|
105
|
+
```python
|
|
106
|
+
from agentbasis.llms.anthropic import instrument
|
|
107
|
+
|
|
108
|
+
# Enable Anthropic instrumentation
|
|
109
|
+
instrument()
|
|
110
|
+
|
|
111
|
+
# Now just use the Anthropic client as normal
|
|
112
|
+
from anthropic import Anthropic
|
|
113
|
+
client = Anthropic()
|
|
114
|
+
response = client.messages.create(
|
|
115
|
+
model="claude-3-opus-20240229",
|
|
116
|
+
max_tokens=1024,
|
|
117
|
+
messages=[{"role": "user", "content": "Hello"}]
|
|
118
|
+
)
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
### 5. LangChain Integration
|
|
122
|
+
Track chains, tools, retrievers, and LLM calls in LangChain with full parent-child span relationships.
|
|
123
|
+
|
|
124
|
+
```python
|
|
125
|
+
from agentbasis.frameworks.langchain import get_callback_handler
|
|
126
|
+
|
|
127
|
+
# Create a callback handler
|
|
128
|
+
handler = get_callback_handler()
|
|
129
|
+
|
|
130
|
+
# Pass it to your LangChain calls
|
|
131
|
+
from langchain_openai import ChatOpenAI
|
|
132
|
+
llm = ChatOpenAI(model="gpt-4")
|
|
133
|
+
response = llm.invoke("Hello world", config={"callbacks": [handler]})
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
For chains and agents, pass the callback handler in the config:
|
|
137
|
+
|
|
138
|
+
```python
|
|
139
|
+
from langchain.chains import LLMChain
|
|
140
|
+
from langchain.prompts import PromptTemplate
|
|
141
|
+
from agentbasis.frameworks.langchain import get_callback_config
|
|
142
|
+
|
|
143
|
+
# Use get_callback_config() for convenience
|
|
144
|
+
chain = LLMChain(llm=llm, prompt=PromptTemplate.from_template("{query}"))
|
|
145
|
+
result = chain.invoke({"query": "What is AI?"}, config=get_callback_config())
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
### 6. Pydantic AI Integration
|
|
149
|
+
Track Pydantic AI agents with built-in OpenTelemetry support.
|
|
150
|
+
|
|
151
|
+
```python
|
|
152
|
+
from agentbasis.frameworks.pydanticai import instrument
|
|
153
|
+
|
|
154
|
+
# Enable global instrumentation for all Pydantic AI agents
|
|
155
|
+
instrument()
|
|
156
|
+
|
|
157
|
+
# Your agents are now automatically traced
|
|
158
|
+
from pydantic_ai import Agent
|
|
159
|
+
agent = Agent("openai:gpt-4")
|
|
160
|
+
result = agent.run_sync("Hello!")
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
For per-agent control with user context:
|
|
164
|
+
|
|
165
|
+
```python
|
|
166
|
+
from agentbasis.frameworks.pydanticai import create_traced_agent
|
|
167
|
+
|
|
168
|
+
# Create an agent pre-configured with tracing and context
|
|
169
|
+
agent = create_traced_agent(
|
|
170
|
+
"openai:gpt-4",
|
|
171
|
+
system_prompt="You are a helpful assistant."
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
# Set user context - it will be included in traces
|
|
175
|
+
agentbasis.set_user("user-123")
|
|
176
|
+
result = agent.run_sync("Hello!")
|
|
177
|
+
```
|
|
178
|
+
|
|
179
|
+
### 7. Track Users & Sessions (Optional)
|
|
180
|
+
Associate traces with specific users and sessions to debug issues and see per-user analytics.
|
|
181
|
+
|
|
182
|
+
```python
|
|
183
|
+
# Set the current user (from your auth system)
|
|
184
|
+
agentbasis.set_user(current_user.id)
|
|
185
|
+
|
|
186
|
+
# Optionally set session and conversation IDs
|
|
187
|
+
agentbasis.set_session("session-abc")
|
|
188
|
+
agentbasis.set_conversation("conv-123")
|
|
189
|
+
|
|
190
|
+
# All subsequent LLM calls will be tagged with this context
|
|
191
|
+
response = client.chat.completions.create(...)
|
|
192
|
+
```
|
|
193
|
+
|
|
194
|
+
Or use the context manager for scoped context:
|
|
195
|
+
|
|
196
|
+
```python
|
|
197
|
+
from agentbasis import context
|
|
198
|
+
|
|
199
|
+
with context(user_id="user-123", session_id="session-abc"):
|
|
200
|
+
# All traces in this block include the context
|
|
201
|
+
response = client.chat.completions.create(...)
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
## Core Concepts
|
|
205
|
+
|
|
206
|
+
- **OpenTelemetry**: We use OTel under the hood for maximum compatibility.
|
|
207
|
+
- **Spans**: Every action (function call, LLM request) is recorded as a Span.
|
|
208
|
+
- **Transport**: Data is batched and sent asynchronously to the AgentBasis backend.
|
|
209
|
+
|
|
210
|
+
## Documentation
|
|
211
|
+
|
|
212
|
+
For full documentation, visit [docs.agentbasis.co](https://docs.agentbasis.co).
|
|
213
|
+
|
|
214
|
+
## Contributing
|
|
215
|
+
|
|
216
|
+
See [CONTRIBUTING.md](CONTRIBUTING.md) for development setup and guidelines.
|
|
217
|
+
|
|
218
|
+
## License
|
|
219
|
+
MIT License - see [LICENSE](LICENSE) for details.
|
|
220
|
+
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
agentbasis/__init__.py,sha256=CskAzB1oO9YdoHkr6e4pizEtB3k8N9lMqpfv8yoslzY,2406
|
|
2
|
+
agentbasis/client.py,sha256=70Z_TDNA5MjqQtz6ncJ0rRQJxyJR89qjWt9OUK_mIg8,4759
|
|
3
|
+
agentbasis/config.py,sha256=PxhhcBrLA6E8UR03R2KiPqq7KV8mCdydo87i1SkaveA,1354
|
|
4
|
+
agentbasis/context.py,sha256=zrHHlNa2-84eiHMrAguBXgIkLZ9xMF1TYalzf1FgsBs,7959
|
|
5
|
+
agentbasis/decorators.py,sha256=jO0sWu4YaGBo0EPdgdMqM-7BPKxDw3-idWEgG-RowEo,2736
|
|
6
|
+
agentbasis/frameworks/langchain/__init__.py,sha256=M0y_Ylh1Os62rVZwRkFdzwNxD_rFrybUDdvrNANzvTs,3385
|
|
7
|
+
agentbasis/frameworks/langchain/callback.py,sha256=wYQ5h2BPOmIYUtqmTeS8mBoPTE9_UC0eN9KrN2N0jvU,14788
|
|
8
|
+
agentbasis/frameworks/pydanticai/__init__.py,sha256=t2A0ptvypQnd_wU9fRODpnUJA8Ez3DRhTCs-kVIRkLQ,785
|
|
9
|
+
agentbasis/frameworks/pydanticai/instrumentation.py,sha256=opW5OYq_FJ3zXQLdpoNdKCeX4op22R9cXhsU3Oa3HOg,7993
|
|
10
|
+
agentbasis/llms/anthropic/__init__.py,sha256=kGSmvlZv9OC8-qJOzZhGfkoNveXEvdpCfBxtqIvkCUM,538
|
|
11
|
+
agentbasis/llms/anthropic/messages.py,sha256=Uz1042c_bbpAcoE-9jOOH2CMQJ94x98COzWCsYC4Jno,11922
|
|
12
|
+
agentbasis/llms/gemini/__init__.py,sha256=My4V_p4CeckB7JW4HIy_JSjJTia17GlgZ3HCpHgoDpA,648
|
|
13
|
+
agentbasis/llms/gemini/chat.py,sha256=2J2nEvTkeuI6qXDJdM6Cq5vopmVb5wQn0Ls6cU-2-xM,13039
|
|
14
|
+
agentbasis/llms/openai/__init__.py,sha256=ciehM0Qnl0HKcmnvrSvH62orE-iY_Z-l0XD56qEOCkg,594
|
|
15
|
+
agentbasis/llms/openai/chat.py,sha256=HHrjuElQX4UALLJuSnu9fNJl6EssIYXiOJ5CLNpg1WQ,8773
|
|
16
|
+
agentbasis-0.1.0.dist-info/METADATA,sha256=djYSYzRXAhJ8RUWUUXX96-tI2SPjEZzHZyFO9A43n5w,6721
|
|
17
|
+
agentbasis-0.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
18
|
+
agentbasis-0.1.0.dist-info/top_level.txt,sha256=tqdyIQGlh-ZaQ4joP96SeZof0zSV1RwWbqrSQWl3ztc,11
|
|
19
|
+
agentbasis-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
agentbasis
|