agentos-python 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentos/__init__.py +58 -0
- agentos/client.py +273 -0
- agentos/compat/__init__.py +4 -0
- agentos/compat/braintrust.py +97 -0
- agentos/compat/helicone.py +72 -0
- agentos/compat/langfuse.py +317 -0
- agentos/compat/langsmith.py +79 -0
- agentos/compat/otel.py +167 -0
- agentos/compat/weave.py +93 -0
- agentos/config.py +34 -0
- agentos/decorators.py +372 -0
- agentos/events.py +449 -0
- agentos/integrations/__init__.py +15 -0
- agentos/integrations/anthropic.py +130 -0
- agentos/integrations/crewai.py +221 -0
- agentos/integrations/langchain.py +307 -0
- agentos/integrations/litellm.py +123 -0
- agentos/integrations/llamaindex.py +174 -0
- agentos/integrations/openai.py +149 -0
- agentos/integrations/pydantic_ai.py +236 -0
- agentos/py.typed +0 -0
- agentos/tracing.py +168 -0
- agentos/transport.py +285 -0
- agentos/types.py +101 -0
- agentos_python-0.1.0.dist-info/METADATA +155 -0
- agentos_python-0.1.0.dist-info/RECORD +28 -0
- agentos_python-0.1.0.dist-info/WHEEL +4 -0
- agentos_python-0.1.0.dist-info/licenses/LICENSE +21 -0
agentos/__init__.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
"""Agent OS Python SDK — AI agent observability platform.
|
|
2
|
+
|
|
3
|
+
Usage::
|
|
4
|
+
|
|
5
|
+
from agentos import AgentOS
|
|
6
|
+
|
|
7
|
+
client = AgentOS(api_key="aos_...")
|
|
8
|
+
client.llm_call("my-agent", model="gpt-4o", system="openai")
|
|
9
|
+
client.flush()
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from agentos.client import AgentOS, AsyncAgentOS, get_client
|
|
13
|
+
from agentos.config import AgentOSConfig
|
|
14
|
+
from agentos.decorators import observe, trace_agent, trace_context, trace_tool
|
|
15
|
+
from agentos.integrations.anthropic import wrap_anthropic
|
|
16
|
+
from agentos.integrations.openai import wrap_openai
|
|
17
|
+
from agentos.tracing import TraceContext, span, trace
|
|
18
|
+
from agentos.types import (
|
|
19
|
+
ErrorInfo,
|
|
20
|
+
EvalEvaluator,
|
|
21
|
+
EventPayload,
|
|
22
|
+
EventType,
|
|
23
|
+
FinishReason,
|
|
24
|
+
LLMMessage,
|
|
25
|
+
SecurityAction,
|
|
26
|
+
SecurityAlertType,
|
|
27
|
+
Severity,
|
|
28
|
+
ToolStatus,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
__version__ = "0.1.0"
|
|
32
|
+
|
|
33
|
+
__all__ = [
|
|
34
|
+
"__version__",
|
|
35
|
+
"AgentOS",
|
|
36
|
+
"AgentOSConfig",
|
|
37
|
+
"AsyncAgentOS",
|
|
38
|
+
"ErrorInfo",
|
|
39
|
+
"EvalEvaluator",
|
|
40
|
+
"EventPayload",
|
|
41
|
+
"EventType",
|
|
42
|
+
"FinishReason",
|
|
43
|
+
"LLMMessage",
|
|
44
|
+
"SecurityAction",
|
|
45
|
+
"SecurityAlertType",
|
|
46
|
+
"Severity",
|
|
47
|
+
"ToolStatus",
|
|
48
|
+
"TraceContext",
|
|
49
|
+
"get_client",
|
|
50
|
+
"observe",
|
|
51
|
+
"span",
|
|
52
|
+
"trace",
|
|
53
|
+
"trace_agent",
|
|
54
|
+
"trace_context",
|
|
55
|
+
"trace_tool",
|
|
56
|
+
"wrap_anthropic",
|
|
57
|
+
"wrap_openai",
|
|
58
|
+
]
|
agentos/client.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
1
|
+
"""AgentOS client — the main public API for the SDK.
|
|
2
|
+
|
|
3
|
+
Usage::
|
|
4
|
+
|
|
5
|
+
from agentos import AgentOS
|
|
6
|
+
|
|
7
|
+
client = AgentOS(api_key="aos_...")
|
|
8
|
+
client.llm_call("my-agent", model="gpt-4o", system="openai", input_tokens=100)
|
|
9
|
+
client.flush()
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
import atexit
|
|
15
|
+
import logging
|
|
16
|
+
import os
|
|
17
|
+
from collections.abc import Generator
|
|
18
|
+
from contextlib import contextmanager
|
|
19
|
+
from typing import Any
|
|
20
|
+
|
|
21
|
+
from agentos import events as _events
|
|
22
|
+
from agentos.config import AgentOSConfig
|
|
23
|
+
from agentos.tracing import TraceContext
|
|
24
|
+
from agentos.tracing import span as _span
|
|
25
|
+
from agentos.tracing import trace as _trace
|
|
26
|
+
from agentos.transport import AsyncTransport, Transport
|
|
27
|
+
|
|
28
|
+
logger = logging.getLogger("agentos")
|
|
29
|
+
|
|
30
|
+
SDK_NAME = "agentos-python"
|
|
31
|
+
SDK_VERSION = "0.1.0"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class AgentOS:
|
|
35
|
+
"""Synchronous Agent OS client.
|
|
36
|
+
|
|
37
|
+
All capture methods are non-blocking — events are queued and sent
|
|
38
|
+
in the background. Call ``flush()`` to ensure delivery.
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
def __init__(
|
|
42
|
+
self,
|
|
43
|
+
api_key: str | None = None,
|
|
44
|
+
base_url: str = "https://api.agentos.dev",
|
|
45
|
+
**kwargs: Any,
|
|
46
|
+
) -> None:
|
|
47
|
+
resolved_key = api_key or os.environ.get("AGENTOS_API_KEY", "")
|
|
48
|
+
self._config = AgentOSConfig(api_key=resolved_key, base_url=base_url, **kwargs)
|
|
49
|
+
self._transport: Transport | None = None
|
|
50
|
+
|
|
51
|
+
if self._config.enabled and self._config.api_key:
|
|
52
|
+
self._transport = Transport(self._config, version=SDK_VERSION)
|
|
53
|
+
atexit.register(self.shutdown)
|
|
54
|
+
|
|
55
|
+
def _enqueue(self, event: dict[str, Any]) -> None:
|
|
56
|
+
"""Stamp SDK metadata and enqueue for delivery."""
|
|
57
|
+
if self._transport is None:
|
|
58
|
+
return
|
|
59
|
+
event["sdk_name"] = SDK_NAME
|
|
60
|
+
event["sdk_version"] = SDK_VERSION
|
|
61
|
+
if self._config.environment:
|
|
62
|
+
event.setdefault("environment", self._config.environment)
|
|
63
|
+
try:
|
|
64
|
+
self._transport.enqueue(event)
|
|
65
|
+
except Exception:
|
|
66
|
+
logger.exception("Failed to enqueue event")
|
|
67
|
+
|
|
68
|
+
# --- Convenience methods (one per event type) ---
|
|
69
|
+
|
|
70
|
+
def capture(self, event: dict[str, Any]) -> None:
|
|
71
|
+
"""Enqueue a raw event dict."""
|
|
72
|
+
self._enqueue(event)
|
|
73
|
+
|
|
74
|
+
def llm_call(self, agent_id: str, *, model: str, system: str, **kwargs: Any) -> None:
|
|
75
|
+
"""Capture an LLM call event."""
|
|
76
|
+
if not self._config.capture_content:
|
|
77
|
+
kwargs.pop("input", None)
|
|
78
|
+
kwargs.pop("output", None)
|
|
79
|
+
self._enqueue(_events.llm_call(agent_id, model=model, system=system, **kwargs))
|
|
80
|
+
|
|
81
|
+
def tool_call(self, agent_id: str, *, tool_name: str, **kwargs: Any) -> None:
|
|
82
|
+
"""Capture a tool call event."""
|
|
83
|
+
if not self._config.capture_content:
|
|
84
|
+
kwargs.pop("input", None)
|
|
85
|
+
kwargs.pop("output", None)
|
|
86
|
+
self._enqueue(_events.tool_call(agent_id, tool_name=tool_name, **kwargs))
|
|
87
|
+
|
|
88
|
+
def handoff(self, agent_id: str, *, target_agent_id: str, **kwargs: Any) -> None:
|
|
89
|
+
"""Capture an agent handoff event."""
|
|
90
|
+
self._enqueue(_events.handoff(agent_id, target_agent_id=target_agent_id, **kwargs))
|
|
91
|
+
|
|
92
|
+
def retrieval_query(self, agent_id: str, *, source: str, **kwargs: Any) -> None:
|
|
93
|
+
"""Capture a retrieval query event."""
|
|
94
|
+
self._enqueue(_events.retrieval_query(agent_id, source=source, **kwargs))
|
|
95
|
+
|
|
96
|
+
def eval(self, agent_id: str, *, eval_name: str, score: float, **kwargs: Any) -> None:
|
|
97
|
+
"""Capture an evaluation event."""
|
|
98
|
+
self._enqueue(_events.eval_result(agent_id, eval_name=eval_name, score=score, **kwargs))
|
|
99
|
+
|
|
100
|
+
def security_alert(
|
|
101
|
+
self, agent_id: str, *, alert_type: str, severity: str, **kwargs: Any
|
|
102
|
+
) -> None:
|
|
103
|
+
"""Capture a security alert event."""
|
|
104
|
+
self._enqueue(
|
|
105
|
+
_events.security_alert(agent_id, alert_type=alert_type, severity=severity, **kwargs)
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
def flag_check(self, agent_id: str, *, flag_key: str, flag_value: Any, **kwargs: Any) -> None:
|
|
109
|
+
"""Capture a flag check event."""
|
|
110
|
+
self._enqueue(
|
|
111
|
+
_events.flag_check(agent_id, flag_key=flag_key, flag_value=flag_value, **kwargs)
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
def business_event(self, agent_id: str, *, event_name: str, **kwargs: Any) -> None:
|
|
115
|
+
"""Capture a business event."""
|
|
116
|
+
self._enqueue(_events.business_event(agent_id, event_name=event_name, **kwargs))
|
|
117
|
+
|
|
118
|
+
# --- Trace context ---
|
|
119
|
+
|
|
120
|
+
@contextmanager
|
|
121
|
+
def trace(self, agent_id: str, **kwargs: Any) -> Generator[TraceContext, None, None]:
|
|
122
|
+
"""Start a trace. Yields a TraceContext."""
|
|
123
|
+
with _trace(agent_id, **kwargs) as ctx:
|
|
124
|
+
yield ctx
|
|
125
|
+
|
|
126
|
+
@contextmanager
|
|
127
|
+
def span(self, **kwargs: Any) -> Generator[TraceContext, None, None]:
|
|
128
|
+
"""Start a child span within the current trace."""
|
|
129
|
+
with _span(**kwargs) as ctx:
|
|
130
|
+
yield ctx
|
|
131
|
+
|
|
132
|
+
# --- Lifecycle ---
|
|
133
|
+
|
|
134
|
+
def flush(self) -> None:
|
|
135
|
+
"""Block until all queued events are sent."""
|
|
136
|
+
if self._transport:
|
|
137
|
+
self._transport.flush()
|
|
138
|
+
|
|
139
|
+
def shutdown(self) -> None:
|
|
140
|
+
"""Flush and close the client. Called automatically at exit."""
|
|
141
|
+
if self._transport:
|
|
142
|
+
self._transport.shutdown()
|
|
143
|
+
self._transport = None
|
|
144
|
+
|
|
145
|
+
@property
|
|
146
|
+
def enabled(self) -> bool:
|
|
147
|
+
return self._config.enabled and self._transport is not None
|
|
148
|
+
|
|
149
|
+
@property
|
|
150
|
+
def queue_size(self) -> int:
|
|
151
|
+
return self._transport.queue_size if self._transport else 0
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
class AsyncAgentOS:
|
|
155
|
+
"""Asynchronous Agent OS client.
|
|
156
|
+
|
|
157
|
+
Usage::
|
|
158
|
+
|
|
159
|
+
client = AsyncAgentOS(api_key="aos_...")
|
|
160
|
+
await client.llm_call("my-agent", model="gpt-4o", system="openai")
|
|
161
|
+
await client.flush()
|
|
162
|
+
await client.shutdown()
|
|
163
|
+
"""
|
|
164
|
+
|
|
165
|
+
def __init__(
|
|
166
|
+
self,
|
|
167
|
+
api_key: str | None = None,
|
|
168
|
+
base_url: str = "https://api.agentos.dev",
|
|
169
|
+
**kwargs: Any,
|
|
170
|
+
) -> None:
|
|
171
|
+
resolved_key = api_key or os.environ.get("AGENTOS_API_KEY", "")
|
|
172
|
+
self._config = AgentOSConfig(api_key=resolved_key, base_url=base_url, **kwargs)
|
|
173
|
+
self._transport: AsyncTransport | None = None
|
|
174
|
+
|
|
175
|
+
if self._config.enabled and self._config.api_key:
|
|
176
|
+
self._transport = AsyncTransport(self._config, version=SDK_VERSION)
|
|
177
|
+
|
|
178
|
+
async def _enqueue(self, event: dict[str, Any]) -> None:
|
|
179
|
+
if self._transport is None:
|
|
180
|
+
return
|
|
181
|
+
event["sdk_name"] = SDK_NAME
|
|
182
|
+
event["sdk_version"] = SDK_VERSION
|
|
183
|
+
if self._config.environment:
|
|
184
|
+
event.setdefault("environment", self._config.environment)
|
|
185
|
+
try:
|
|
186
|
+
await self._transport.enqueue(event)
|
|
187
|
+
except Exception:
|
|
188
|
+
logger.exception("Failed to enqueue event")
|
|
189
|
+
|
|
190
|
+
async def capture(self, event: dict[str, Any]) -> None:
|
|
191
|
+
await self._enqueue(event)
|
|
192
|
+
|
|
193
|
+
async def llm_call(self, agent_id: str, *, model: str, system: str, **kwargs: Any) -> None:
|
|
194
|
+
if not self._config.capture_content:
|
|
195
|
+
kwargs.pop("input", None)
|
|
196
|
+
kwargs.pop("output", None)
|
|
197
|
+
await self._enqueue(_events.llm_call(agent_id, model=model, system=system, **kwargs))
|
|
198
|
+
|
|
199
|
+
async def tool_call(self, agent_id: str, *, tool_name: str, **kwargs: Any) -> None:
|
|
200
|
+
if not self._config.capture_content:
|
|
201
|
+
kwargs.pop("input", None)
|
|
202
|
+
kwargs.pop("output", None)
|
|
203
|
+
await self._enqueue(_events.tool_call(agent_id, tool_name=tool_name, **kwargs))
|
|
204
|
+
|
|
205
|
+
async def handoff(self, agent_id: str, *, target_agent_id: str, **kwargs: Any) -> None:
|
|
206
|
+
await self._enqueue(_events.handoff(agent_id, target_agent_id=target_agent_id, **kwargs))
|
|
207
|
+
|
|
208
|
+
async def retrieval_query(self, agent_id: str, *, source: str, **kwargs: Any) -> None:
|
|
209
|
+
await self._enqueue(_events.retrieval_query(agent_id, source=source, **kwargs))
|
|
210
|
+
|
|
211
|
+
async def eval(self, agent_id: str, *, eval_name: str, score: float, **kwargs: Any) -> None:
|
|
212
|
+
await self._enqueue(
|
|
213
|
+
_events.eval_result(agent_id, eval_name=eval_name, score=score, **kwargs)
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
async def security_alert(
|
|
217
|
+
self, agent_id: str, *, alert_type: str, severity: str, **kwargs: Any
|
|
218
|
+
) -> None:
|
|
219
|
+
await self._enqueue(
|
|
220
|
+
_events.security_alert(agent_id, alert_type=alert_type, severity=severity, **kwargs)
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
async def flag_check(
|
|
224
|
+
self, agent_id: str, *, flag_key: str, flag_value: Any, **kwargs: Any
|
|
225
|
+
) -> None:
|
|
226
|
+
await self._enqueue(
|
|
227
|
+
_events.flag_check(agent_id, flag_key=flag_key, flag_value=flag_value, **kwargs)
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
async def business_event(self, agent_id: str, *, event_name: str, **kwargs: Any) -> None:
|
|
231
|
+
await self._enqueue(_events.business_event(agent_id, event_name=event_name, **kwargs))
|
|
232
|
+
|
|
233
|
+
async def flush(self) -> None:
|
|
234
|
+
if self._transport:
|
|
235
|
+
await self._transport.flush()
|
|
236
|
+
|
|
237
|
+
async def shutdown(self) -> None:
|
|
238
|
+
if self._transport:
|
|
239
|
+
await self._transport.shutdown()
|
|
240
|
+
self._transport = None
|
|
241
|
+
|
|
242
|
+
@property
|
|
243
|
+
def enabled(self) -> bool:
|
|
244
|
+
return self._config.enabled and self._transport is not None
|
|
245
|
+
|
|
246
|
+
@property
|
|
247
|
+
def queue_size(self) -> int:
|
|
248
|
+
return self._transport.queue_size if self._transport else 0
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
# ---------------------------------------------------------------------------
|
|
252
|
+
# Global singleton (env-var auto-init)
|
|
253
|
+
# ---------------------------------------------------------------------------
|
|
254
|
+
|
|
255
|
+
_global_client: AgentOS | None = None
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def get_client() -> AgentOS | None:
|
|
259
|
+
"""Return the global client, auto-initializing from env vars if needed.
|
|
260
|
+
|
|
261
|
+
Set ``AGENTOS_API_KEY`` to enable auto-init. Returns None if not configured.
|
|
262
|
+
"""
|
|
263
|
+
global _global_client
|
|
264
|
+
if _global_client is None:
|
|
265
|
+
api_key = os.environ.get("AGENTOS_API_KEY")
|
|
266
|
+
if api_key:
|
|
267
|
+
_global_client = AgentOS(
|
|
268
|
+
api_key=api_key,
|
|
269
|
+
base_url=os.environ.get("AGENTOS_BASE_URL", "https://api.agentos.dev"),
|
|
270
|
+
enabled=os.environ.get("AGENTOS_ENABLED", "true").lower() == "true",
|
|
271
|
+
environment=os.environ.get("AGENTOS_ENVIRONMENT"),
|
|
272
|
+
)
|
|
273
|
+
return _global_client
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
"""Braintrust-compatible API shim.
|
|
2
|
+
|
|
3
|
+
Provides ``init``, ``@traced``, ``wrap_openai``, and ``Eval``.
|
|
4
|
+
|
|
5
|
+
Usage::
|
|
6
|
+
|
|
7
|
+
from agentos.compat.braintrust import init, traced, Eval
|
|
8
|
+
|
|
9
|
+
experiment = init("my-project", api_key="aos_...")
|
|
10
|
+
|
|
11
|
+
@traced
|
|
12
|
+
def my_pipeline(input: str) -> str: ...
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
from collections.abc import Callable
|
|
18
|
+
from typing import Any, TypeVar
|
|
19
|
+
|
|
20
|
+
from agentos.client import AgentOS, get_client
|
|
21
|
+
from agentos.decorators import observe
|
|
22
|
+
from agentos.integrations.openai import wrap_openai as _native_wrap_openai
|
|
23
|
+
|
|
24
|
+
F = TypeVar("F", bound=Callable[..., Any])
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def init(
|
|
28
|
+
project: str,
|
|
29
|
+
*,
|
|
30
|
+
api_key: str | None = None,
|
|
31
|
+
**kwargs: Any,
|
|
32
|
+
) -> AgentOS:
|
|
33
|
+
"""Initialize a Braintrust-compatible experiment.
|
|
34
|
+
|
|
35
|
+
Returns an AgentOS client configured as a global singleton.
|
|
36
|
+
"""
|
|
37
|
+
import agentos.client
|
|
38
|
+
|
|
39
|
+
client = AgentOS(api_key=api_key or "", **kwargs)
|
|
40
|
+
agentos.client._global_client = client
|
|
41
|
+
return client
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def traced(fn: F) -> F:
|
|
45
|
+
"""Braintrust-compatible ``@traced`` decorator."""
|
|
46
|
+
return observe()(fn)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def wrap_openai(openai_client: Any, **kwargs: Any) -> Any:
|
|
50
|
+
"""Braintrust-compatible ``wrap_openai``."""
|
|
51
|
+
return _native_wrap_openai(openai_client, **kwargs)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class Eval:
|
|
55
|
+
"""Braintrust-compatible eval runner.
|
|
56
|
+
|
|
57
|
+
Usage::
|
|
58
|
+
|
|
59
|
+
Eval(
|
|
60
|
+
"project",
|
|
61
|
+
data=lambda: [{"input": "2+2?", "expected": "4"}],
|
|
62
|
+
task=lambda input: call_llm(input),
|
|
63
|
+
scores=[lambda output, expected: float(output == expected)],
|
|
64
|
+
)
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
def __init__(
|
|
68
|
+
self,
|
|
69
|
+
project: str,
|
|
70
|
+
*,
|
|
71
|
+
data: Callable[[], list[dict[str, Any]]],
|
|
72
|
+
task: Callable[..., Any],
|
|
73
|
+
scores: list[Callable[..., float]],
|
|
74
|
+
api_key: str | None = None,
|
|
75
|
+
**kwargs: Any,
|
|
76
|
+
) -> None:
|
|
77
|
+
client = get_client()
|
|
78
|
+
if client is None and api_key:
|
|
79
|
+
client = AgentOS(api_key=api_key, flush_interval=0)
|
|
80
|
+
|
|
81
|
+
dataset = data()
|
|
82
|
+
for item in dataset:
|
|
83
|
+
input_val = item.get("input")
|
|
84
|
+
expected = item.get("expected")
|
|
85
|
+
output = task(input_val)
|
|
86
|
+
|
|
87
|
+
for scorer in scores:
|
|
88
|
+
score = scorer(output, expected)
|
|
89
|
+
if client:
|
|
90
|
+
client.eval(
|
|
91
|
+
project,
|
|
92
|
+
eval_name=scorer.__name__,
|
|
93
|
+
score=float(score),
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
if client:
|
|
97
|
+
client.flush()
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"""Helicone-compatible header translator and proxy config.
|
|
2
|
+
|
|
3
|
+
Usage::
|
|
4
|
+
|
|
5
|
+
from agentos.compat.helicone import get_proxy_config, translate_headers
|
|
6
|
+
|
|
7
|
+
config = get_proxy_config(api_key="aos_...", agent_id="my-agent")
|
|
8
|
+
client = openai.OpenAI(base_url=config["base_url"], default_headers=config["headers"])
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def get_proxy_config(
|
|
17
|
+
*,
|
|
18
|
+
api_key: str,
|
|
19
|
+
agent_id: str = "default",
|
|
20
|
+
base_url: str = "https://proxy.agentos.dev/v1",
|
|
21
|
+
) -> dict[str, Any]:
|
|
22
|
+
"""Return proxy config dict matching Helicone's usage pattern.
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
Dict with ``base_url`` and ``headers`` keys, ready to pass to OpenAI client.
|
|
26
|
+
"""
|
|
27
|
+
return {
|
|
28
|
+
"base_url": base_url,
|
|
29
|
+
"headers": {
|
|
30
|
+
"X-API-Key": api_key,
|
|
31
|
+
"X-Agent-Id": agent_id,
|
|
32
|
+
},
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def translate_headers(
|
|
37
|
+
helicone_headers: dict[str, str],
|
|
38
|
+
*,
|
|
39
|
+
api_key: str | None = None,
|
|
40
|
+
) -> dict[str, str]:
|
|
41
|
+
"""Translate Helicone-style headers to AgentOS headers.
|
|
42
|
+
|
|
43
|
+
Mapping:
|
|
44
|
+
- ``Helicone-Auth`` → ``X-API-Key`` (extracts Bearer token)
|
|
45
|
+
- ``Helicone-Session-Id`` → ``X-Session-Id``
|
|
46
|
+
- ``Helicone-Property-agent`` → ``X-Agent-Id``
|
|
47
|
+
- ``Helicone-Property-*`` → ``X-Property-*``
|
|
48
|
+
"""
|
|
49
|
+
result: dict[str, str] = {}
|
|
50
|
+
|
|
51
|
+
for key, value in helicone_headers.items():
|
|
52
|
+
lower = key.lower()
|
|
53
|
+
|
|
54
|
+
if lower == "helicone-auth":
|
|
55
|
+
# Extract Bearer token
|
|
56
|
+
token = value.replace("Bearer ", "").replace("bearer ", "").strip()
|
|
57
|
+
result["X-API-Key"] = api_key or token
|
|
58
|
+
|
|
59
|
+
elif lower == "helicone-session-id":
|
|
60
|
+
result["X-Session-Id"] = value
|
|
61
|
+
|
|
62
|
+
elif lower.startswith("helicone-property-"):
|
|
63
|
+
prop_name = key[len("Helicone-Property-") :]
|
|
64
|
+
if prop_name.lower() == "agent":
|
|
65
|
+
result["X-Agent-Id"] = value
|
|
66
|
+
else:
|
|
67
|
+
result[f"X-Property-{prop_name}"] = value
|
|
68
|
+
|
|
69
|
+
if api_key and "X-API-Key" not in result:
|
|
70
|
+
result["X-API-Key"] = api_key
|
|
71
|
+
|
|
72
|
+
return result
|