getstacklens 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- getstacklens/__init__.py +350 -0
- getstacklens/_client.py +118 -0
- getstacklens/exceptions.py +25 -0
- getstacklens/prompts.py +61 -0
- getstacklens/py.typed +0 -0
- getstacklens/tracer.py +306 -0
- getstacklens-0.0.1.dist-info/METADATA +196 -0
- getstacklens-0.0.1.dist-info/RECORD +10 -0
- getstacklens-0.0.1.dist-info/WHEEL +4 -0
- getstacklens-0.0.1.dist-info/licenses/LICENSE +31 -0
getstacklens/__init__.py
ADDED
|
@@ -0,0 +1,350 @@
|
|
|
1
|
+
"""
|
|
2
|
+
StackLens Python SDK
|
|
3
|
+
====================
|
|
4
|
+
|
|
5
|
+
Observability and governance for your AI stack.
|
|
6
|
+
|
|
7
|
+
Quickstart::
|
|
8
|
+
|
|
9
|
+
import getstacklens
|
|
10
|
+
|
|
11
|
+
getstacklens.configure(api_key="sl-xxxx")
|
|
12
|
+
getstacklens.trace("my-llm-call", model="gpt-4o", provider="openai",
|
|
13
|
+
input_tokens=150, output_tokens=200)
|
|
14
|
+
|
|
15
|
+
Full example with context manager::
|
|
16
|
+
|
|
17
|
+
with getstacklens.start_trace("agent-run") as span:
|
|
18
|
+
response = openai_client.chat.completions.create(
|
|
19
|
+
model="gpt-4o",
|
|
20
|
+
messages=[{"role": "user", "content": "Hello"}],
|
|
21
|
+
)
|
|
22
|
+
span.record_llm(
|
|
23
|
+
model="gpt-4o",
|
|
24
|
+
provider="openai",
|
|
25
|
+
input_tokens=response.usage.prompt_tokens,
|
|
26
|
+
output_tokens=response.usage.completion_tokens,
|
|
27
|
+
completion=response.choices[0].message.content,
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
Docs: https://getstacklens.ai/docs
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
from __future__ import annotations
|
|
34
|
+
|
|
35
|
+
from contextlib import asynccontextmanager, contextmanager
|
|
36
|
+
from datetime import datetime
|
|
37
|
+
from typing import AsyncIterator, Iterator
|
|
38
|
+
|
|
39
|
+
from .exceptions import (
|
|
40
|
+
ApiError,
|
|
41
|
+
AuthError,
|
|
42
|
+
ConfigurationError,
|
|
43
|
+
NetworkError,
|
|
44
|
+
StackLensError,
|
|
45
|
+
)
|
|
46
|
+
from .prompts import AsyncPromptsClient, PromptsClient
|
|
47
|
+
from .tracer import AsyncTracer, Span, Tracer
|
|
48
|
+
|
|
49
|
+
__version__ = "0.0.1"
|
|
50
|
+
|
|
51
|
+
__all__ = [
|
|
52
|
+
"configure",
|
|
53
|
+
"trace",
|
|
54
|
+
"start_trace",
|
|
55
|
+
"prompts",
|
|
56
|
+
"atrace",
|
|
57
|
+
"astart_trace",
|
|
58
|
+
"aprompts",
|
|
59
|
+
"Tracer",
|
|
60
|
+
"AsyncTracer",
|
|
61
|
+
"Span",
|
|
62
|
+
"PromptsClient",
|
|
63
|
+
"AsyncPromptsClient",
|
|
64
|
+
"StackLensError",
|
|
65
|
+
"ConfigurationError",
|
|
66
|
+
"AuthError",
|
|
67
|
+
"ApiError",
|
|
68
|
+
"NetworkError",
|
|
69
|
+
]
|
|
70
|
+
|
|
71
|
+
_DEFAULT_ENDPOINT = "https://api.getstacklens.ai"
|
|
72
|
+
|
|
73
|
+
_tracer: Tracer | None = None
|
|
74
|
+
_prompts_client: PromptsClient | None = None
|
|
75
|
+
_async_tracer: AsyncTracer | None = None
|
|
76
|
+
_async_prompts_client: AsyncPromptsClient | None = None
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def configure(api_key: str, endpoint: str = _DEFAULT_ENDPOINT) -> None:
|
|
80
|
+
"""
|
|
81
|
+
Configure the StackLens SDK.
|
|
82
|
+
|
|
83
|
+
Call this once at application startup before any tracing or prompt calls.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
api_key: Your StackLens API key (starts with ``sl-``).
|
|
87
|
+
Generate one from the StackLens dashboard under Settings → API Keys.
|
|
88
|
+
endpoint: Override the API base URL for self-hosted deployments.
|
|
89
|
+
Defaults to ``https://api.getstacklens.ai``.
|
|
90
|
+
|
|
91
|
+
Example::
|
|
92
|
+
|
|
93
|
+
import getstacklens
|
|
94
|
+
getstacklens.configure(api_key="sl-xxxx")
|
|
95
|
+
|
|
96
|
+
# Self-hosted:
|
|
97
|
+
getstacklens.configure(api_key="sl-xxxx", endpoint="https://api.your-domain.com")
|
|
98
|
+
"""
|
|
99
|
+
global _tracer, _prompts_client, _async_tracer, _async_prompts_client
|
|
100
|
+
_tracer = Tracer(api_key=api_key, endpoint=endpoint)
|
|
101
|
+
_prompts_client = PromptsClient(api_key=api_key, endpoint=endpoint)
|
|
102
|
+
_async_tracer = AsyncTracer(api_key=api_key, endpoint=endpoint)
|
|
103
|
+
_async_prompts_client = AsyncPromptsClient(api_key=api_key, endpoint=endpoint)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def _require_tracer() -> Tracer:
|
|
107
|
+
if _tracer is None:
|
|
108
|
+
raise ConfigurationError(
|
|
109
|
+
"StackLens is not configured. "
|
|
110
|
+
"Call getstacklens.configure(api_key='sl-...') before tracing."
|
|
111
|
+
)
|
|
112
|
+
return _tracer
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def trace(
|
|
116
|
+
name: str,
|
|
117
|
+
*,
|
|
118
|
+
model: str,
|
|
119
|
+
provider: str,
|
|
120
|
+
input_tokens: int,
|
|
121
|
+
output_tokens: int,
|
|
122
|
+
total_tokens: int | None = None,
|
|
123
|
+
cost_usd: float = 0.0,
|
|
124
|
+
attributes: dict[str, str] | None = None,
|
|
125
|
+
tags: list[str] | None = None,
|
|
126
|
+
status: str = "ok",
|
|
127
|
+
start_time: datetime | None = None,
|
|
128
|
+
end_time: datetime | None = None,
|
|
129
|
+
) -> str:
|
|
130
|
+
"""
|
|
131
|
+
Record a single LLM call and send it to StackLens. Returns the trace ID.
|
|
132
|
+
|
|
133
|
+
This is the simplest tracing path — one call, no context managers needed.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
name: A descriptive name for this operation (e.g. ``"chat-completion"``).
|
|
137
|
+
model: The model name (e.g. ``"gpt-4o"``, ``"claude-3-5-sonnet"``).
|
|
138
|
+
provider: The provider name (e.g. ``"openai"``, ``"anthropic"``, ``"gemini"``).
|
|
139
|
+
input_tokens: Number of input/prompt tokens.
|
|
140
|
+
output_tokens: Number of output/completion tokens.
|
|
141
|
+
total_tokens: Total tokens (computed from input + output if omitted).
|
|
142
|
+
cost_usd: Estimated cost in USD.
|
|
143
|
+
attributes: Arbitrary key-value pairs attached to the span.
|
|
144
|
+
tags: String tags for filtering in the dashboard.
|
|
145
|
+
status: ``'ok'`` (default) or ``'error'``.
|
|
146
|
+
start_time: When the LLM call started. Record before the call for accurate latency.
|
|
147
|
+
end_time: When the LLM call ended. Defaults to now if omitted.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
The trace ID string.
|
|
151
|
+
|
|
152
|
+
Example::
|
|
153
|
+
|
|
154
|
+
start = datetime.now(timezone.utc)
|
|
155
|
+
response = client.chat.completions.create(...)
|
|
156
|
+
getstacklens.trace(
|
|
157
|
+
"my-llm-call",
|
|
158
|
+
model="gpt-4o",
|
|
159
|
+
provider="openai",
|
|
160
|
+
input_tokens=150,
|
|
161
|
+
output_tokens=200,
|
|
162
|
+
start_time=start,
|
|
163
|
+
)
|
|
164
|
+
"""
|
|
165
|
+
return _require_tracer().record(
|
|
166
|
+
name,
|
|
167
|
+
model=model,
|
|
168
|
+
provider=provider,
|
|
169
|
+
input_tokens=input_tokens,
|
|
170
|
+
output_tokens=output_tokens,
|
|
171
|
+
total_tokens=total_tokens,
|
|
172
|
+
cost_usd=cost_usd,
|
|
173
|
+
attributes=attributes,
|
|
174
|
+
tags=tags,
|
|
175
|
+
status=status,
|
|
176
|
+
start_time=start_time,
|
|
177
|
+
end_time=end_time,
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
@contextmanager
|
|
182
|
+
def start_trace(name: str) -> Iterator[Span]:
|
|
183
|
+
"""
|
|
184
|
+
Context manager for tracing a multi-step or agent operation.
|
|
185
|
+
|
|
186
|
+
The span is sent to StackLens when the context exits. If an exception
|
|
187
|
+
is raised, the span status is automatically set to ``'error'``.
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
name: A descriptive name for this trace (e.g. ``"agent-run"``).
|
|
191
|
+
|
|
192
|
+
Yields:
|
|
193
|
+
:class:`~getstacklens.tracer.Span` — call :meth:`~getstacklens.tracer.Span.record_llm`
|
|
194
|
+
on it to attach LLM metadata.
|
|
195
|
+
|
|
196
|
+
Example::
|
|
197
|
+
|
|
198
|
+
with getstacklens.start_trace("agent-run") as span:
|
|
199
|
+
response = openai_client.chat.completions.create(
|
|
200
|
+
model="gpt-4o",
|
|
201
|
+
messages=[{"role": "user", "content": "Hello"}],
|
|
202
|
+
)
|
|
203
|
+
span.record_llm(
|
|
204
|
+
model="gpt-4o",
|
|
205
|
+
provider="openai",
|
|
206
|
+
input_tokens=response.usage.prompt_tokens,
|
|
207
|
+
output_tokens=response.usage.completion_tokens,
|
|
208
|
+
completion=response.choices[0].message.content,
|
|
209
|
+
)
|
|
210
|
+
"""
|
|
211
|
+
with _require_tracer().start_trace(name) as span:
|
|
212
|
+
yield span
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
def _require_async_tracer() -> AsyncTracer:
|
|
216
|
+
if _async_tracer is None:
|
|
217
|
+
raise ConfigurationError(
|
|
218
|
+
"StackLens is not configured. "
|
|
219
|
+
"Call getstacklens.configure(api_key='sl-...') before tracing."
|
|
220
|
+
)
|
|
221
|
+
return _async_tracer
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
async def atrace(
|
|
225
|
+
name: str,
|
|
226
|
+
*,
|
|
227
|
+
model: str,
|
|
228
|
+
provider: str,
|
|
229
|
+
input_tokens: int,
|
|
230
|
+
output_tokens: int,
|
|
231
|
+
total_tokens: int | None = None,
|
|
232
|
+
cost_usd: float = 0.0,
|
|
233
|
+
attributes: dict[str, str] | None = None,
|
|
234
|
+
tags: list[str] | None = None,
|
|
235
|
+
status: str = "ok",
|
|
236
|
+
start_time: datetime | None = None,
|
|
237
|
+
end_time: datetime | None = None,
|
|
238
|
+
) -> str:
|
|
239
|
+
"""
|
|
240
|
+
Async version of :func:`trace`. Record a single LLM call. Returns the trace ID.
|
|
241
|
+
|
|
242
|
+
Use this in asyncio / FastAPI applications instead of the sync :func:`trace`.
|
|
243
|
+
|
|
244
|
+
Example::
|
|
245
|
+
|
|
246
|
+
trace_id = await getstacklens.atrace(
|
|
247
|
+
"chat-completion",
|
|
248
|
+
model="gpt-4o",
|
|
249
|
+
provider="openai",
|
|
250
|
+
input_tokens=150,
|
|
251
|
+
output_tokens=200,
|
|
252
|
+
)
|
|
253
|
+
"""
|
|
254
|
+
return await _require_async_tracer().arecord(
|
|
255
|
+
name,
|
|
256
|
+
model=model,
|
|
257
|
+
provider=provider,
|
|
258
|
+
input_tokens=input_tokens,
|
|
259
|
+
output_tokens=output_tokens,
|
|
260
|
+
total_tokens=total_tokens,
|
|
261
|
+
cost_usd=cost_usd,
|
|
262
|
+
attributes=attributes,
|
|
263
|
+
tags=tags,
|
|
264
|
+
status=status,
|
|
265
|
+
start_time=start_time,
|
|
266
|
+
end_time=end_time,
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
@asynccontextmanager
|
|
271
|
+
async def astart_trace(name: str) -> AsyncIterator[Span]:
|
|
272
|
+
"""
|
|
273
|
+
Async context manager for tracing a multi-step or agent operation.
|
|
274
|
+
|
|
275
|
+
Use this in asyncio / FastAPI applications instead of the sync :func:`start_trace`.
|
|
276
|
+
|
|
277
|
+
Example::
|
|
278
|
+
|
|
279
|
+
async with getstacklens.astart_trace("agent-run") as span:
|
|
280
|
+
response = await async_client.chat.completions.create(...)
|
|
281
|
+
span.record_llm(
|
|
282
|
+
model="gpt-4o",
|
|
283
|
+
provider="openai",
|
|
284
|
+
input_tokens=response.usage.prompt_tokens,
|
|
285
|
+
output_tokens=response.usage.completion_tokens,
|
|
286
|
+
)
|
|
287
|
+
"""
|
|
288
|
+
async with _require_async_tracer().astart_trace(name) as span:
|
|
289
|
+
yield span
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
class _PromptsNamespace:
|
|
293
|
+
"""Access FlowOps versioned prompts. Use ``getstacklens.prompts.get()``."""
|
|
294
|
+
|
|
295
|
+
def get(self, name: str, *, env: str = "production") -> str:
|
|
296
|
+
"""
|
|
297
|
+
Fetch the active prompt for the given name and environment.
|
|
298
|
+
|
|
299
|
+
Args:
|
|
300
|
+
name: The prompt name as configured in FlowOps.
|
|
301
|
+
env: The environment — ``'dev'``, ``'staging'``, or ``'production'``
|
|
302
|
+
(default).
|
|
303
|
+
|
|
304
|
+
Returns:
|
|
305
|
+
The prompt content string.
|
|
306
|
+
|
|
307
|
+
Example::
|
|
308
|
+
|
|
309
|
+
system_prompt = getstacklens.prompts.get("support-system-prompt")
|
|
310
|
+
user_prompt = getstacklens.prompts.get("onboarding-email", env="staging")
|
|
311
|
+
"""
|
|
312
|
+
if _prompts_client is None:
|
|
313
|
+
raise ConfigurationError(
|
|
314
|
+
"StackLens is not configured. "
|
|
315
|
+
"Call getstacklens.configure(api_key='sl-...') first."
|
|
316
|
+
)
|
|
317
|
+
return _prompts_client.get(name, env=env)
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
prompts = _PromptsNamespace()
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
class _AsyncPromptsNamespace:
|
|
324
|
+
"""Async access to FlowOps versioned prompts. Use ``await getstacklens.aprompts.get()``."""
|
|
325
|
+
|
|
326
|
+
async def get(self, name: str, *, env: str = "production") -> str:
|
|
327
|
+
"""
|
|
328
|
+
Async version of :meth:`_PromptsNamespace.get`.
|
|
329
|
+
|
|
330
|
+
Args:
|
|
331
|
+
name: The prompt name as configured in FlowOps.
|
|
332
|
+
env: The environment — ``'dev'``, ``'staging'``, or ``'production'``
|
|
333
|
+
(default).
|
|
334
|
+
|
|
335
|
+
Returns:
|
|
336
|
+
The prompt content string.
|
|
337
|
+
|
|
338
|
+
Example::
|
|
339
|
+
|
|
340
|
+
system_prompt = await getstacklens.aprompts.get("support-system-prompt")
|
|
341
|
+
"""
|
|
342
|
+
if _async_prompts_client is None:
|
|
343
|
+
raise ConfigurationError(
|
|
344
|
+
"StackLens is not configured. "
|
|
345
|
+
"Call getstacklens.configure(api_key='sl-...') first."
|
|
346
|
+
)
|
|
347
|
+
return await _async_prompts_client.get(name, env=env)
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
aprompts = _AsyncPromptsNamespace()
|
getstacklens/_client.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
"""Internal HTTP client. Not part of the public API."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import atexit
|
|
7
|
+
import time
|
|
8
|
+
|
|
9
|
+
import httpx
|
|
10
|
+
|
|
11
|
+
from .exceptions import ApiError, AuthError, NetworkError
|
|
12
|
+
|
|
13
|
+
_DEFAULT_TIMEOUT = 10.0
|
|
14
|
+
_RETRY_ATTEMPTS = 2
|
|
15
|
+
_RETRY_DELAY = 0.5 # seconds; only applied on 5xx and network errors
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _parse_response(resp: httpx.Response) -> dict:
|
|
19
|
+
"""Raise the appropriate SDK exception or return the parsed JSON body."""
|
|
20
|
+
if resp.status_code == 401:
|
|
21
|
+
raise AuthError("Invalid API key.")
|
|
22
|
+
if resp.status_code == 403:
|
|
23
|
+
raise AuthError("API key does not have the required scope.")
|
|
24
|
+
if not resp.is_success:
|
|
25
|
+
try:
|
|
26
|
+
msg = resp.json().get("error", resp.text)
|
|
27
|
+
except Exception:
|
|
28
|
+
msg = resp.text
|
|
29
|
+
raise ApiError(resp.status_code, msg)
|
|
30
|
+
return resp.json()
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class _HttpClient:
|
|
34
|
+
def __init__(self, api_key: str, endpoint: str) -> None:
|
|
35
|
+
self._base = endpoint.rstrip("/")
|
|
36
|
+
self._http = httpx.Client(
|
|
37
|
+
headers={"X-Api-Key": api_key, "Content-Type": "application/json"},
|
|
38
|
+
timeout=_DEFAULT_TIMEOUT,
|
|
39
|
+
)
|
|
40
|
+
atexit.register(self.close)
|
|
41
|
+
|
|
42
|
+
def post(self, path: str, payload: dict) -> dict:
|
|
43
|
+
return self._request("POST", path, json=payload)
|
|
44
|
+
|
|
45
|
+
def get(self, path: str, params: dict | None = None) -> dict:
|
|
46
|
+
return self._request("GET", path, params=params)
|
|
47
|
+
|
|
48
|
+
def _request(self, method: str, path: str, **kwargs) -> dict:
|
|
49
|
+
url = f"{self._base}{path}"
|
|
50
|
+
last_exc: Exception | None = None
|
|
51
|
+
for attempt in range(_RETRY_ATTEMPTS):
|
|
52
|
+
try:
|
|
53
|
+
resp = self._http.request(method, url, **kwargs)
|
|
54
|
+
# Retry on server errors (5xx) but not on client errors (4xx)
|
|
55
|
+
if resp.status_code >= 500 and attempt < _RETRY_ATTEMPTS - 1:
|
|
56
|
+
time.sleep(_RETRY_DELAY)
|
|
57
|
+
continue
|
|
58
|
+
return _parse_response(resp)
|
|
59
|
+
except httpx.TimeoutException as exc:
|
|
60
|
+
last_exc = exc
|
|
61
|
+
if attempt < _RETRY_ATTEMPTS - 1:
|
|
62
|
+
time.sleep(_RETRY_DELAY)
|
|
63
|
+
except httpx.NetworkError as exc:
|
|
64
|
+
last_exc = exc
|
|
65
|
+
if attempt < _RETRY_ATTEMPTS - 1:
|
|
66
|
+
time.sleep(_RETRY_DELAY)
|
|
67
|
+
raise NetworkError(
|
|
68
|
+
f"Could not reach StackLens API at {self._base}: {last_exc}"
|
|
69
|
+
) from last_exc
|
|
70
|
+
|
|
71
|
+
def close(self) -> None:
|
|
72
|
+
try:
|
|
73
|
+
self._http.close()
|
|
74
|
+
except Exception:
|
|
75
|
+
pass
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class _AsyncHttpClient:
|
|
79
|
+
def __init__(self, api_key: str, endpoint: str) -> None:
|
|
80
|
+
self._base = endpoint.rstrip("/")
|
|
81
|
+
self._http = httpx.AsyncClient(
|
|
82
|
+
headers={"X-Api-Key": api_key, "Content-Type": "application/json"},
|
|
83
|
+
timeout=_DEFAULT_TIMEOUT,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
async def post(self, path: str, payload: dict) -> dict:
|
|
87
|
+
return await self._request("POST", path, json=payload)
|
|
88
|
+
|
|
89
|
+
async def get(self, path: str, params: dict | None = None) -> dict:
|
|
90
|
+
return await self._request("GET", path, params=params)
|
|
91
|
+
|
|
92
|
+
async def _request(self, method: str, path: str, **kwargs) -> dict:
|
|
93
|
+
url = f"{self._base}{path}"
|
|
94
|
+
last_exc: Exception | None = None
|
|
95
|
+
for attempt in range(_RETRY_ATTEMPTS):
|
|
96
|
+
try:
|
|
97
|
+
resp = await self._http.request(method, url, **kwargs)
|
|
98
|
+
if resp.status_code >= 500 and attempt < _RETRY_ATTEMPTS - 1:
|
|
99
|
+
await asyncio.sleep(_RETRY_DELAY)
|
|
100
|
+
continue
|
|
101
|
+
return _parse_response(resp)
|
|
102
|
+
except httpx.TimeoutException as exc:
|
|
103
|
+
last_exc = exc
|
|
104
|
+
if attempt < _RETRY_ATTEMPTS - 1:
|
|
105
|
+
await asyncio.sleep(_RETRY_DELAY)
|
|
106
|
+
except httpx.NetworkError as exc:
|
|
107
|
+
last_exc = exc
|
|
108
|
+
if attempt < _RETRY_ATTEMPTS - 1:
|
|
109
|
+
await asyncio.sleep(_RETRY_DELAY)
|
|
110
|
+
raise NetworkError(
|
|
111
|
+
f"Could not reach StackLens API at {self._base}: {last_exc}"
|
|
112
|
+
) from last_exc
|
|
113
|
+
|
|
114
|
+
async def close(self) -> None:
|
|
115
|
+
try:
|
|
116
|
+
await self._http.aclose()
|
|
117
|
+
except Exception:
|
|
118
|
+
pass
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
"""StackLens SDK exceptions."""
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class StackLensError(Exception):
|
|
5
|
+
"""Base exception for all StackLens SDK errors."""
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ConfigurationError(StackLensError):
|
|
9
|
+
"""Raised when the SDK is not configured. Call stacklens.configure() first."""
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class AuthError(StackLensError):
|
|
13
|
+
"""Raised when the API key is invalid or does not have the required scope."""
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ApiError(StackLensError):
|
|
17
|
+
"""Raised when the StackLens API returns an error response."""
|
|
18
|
+
|
|
19
|
+
def __init__(self, status_code: int, message: str) -> None:
|
|
20
|
+
self.status_code = status_code
|
|
21
|
+
super().__init__(f"API error {status_code}: {message}")
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class NetworkError(StackLensError):
|
|
25
|
+
"""Raised when the StackLens API cannot be reached (timeout, DNS, connection refused)."""
|
getstacklens/prompts.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
"""FlowOps prompt client."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from ._client import _AsyncHttpClient, _HttpClient
|
|
6
|
+
|
|
7
|
+
_PROMPTS_PATH = "/api/v1/flowops/v1/prompts/by-name"
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class PromptsClient:
|
|
11
|
+
"""
|
|
12
|
+
FlowOps client for fetching versioned prompts at runtime.
|
|
13
|
+
|
|
14
|
+
Prefer using the module-level ``getstacklens.prompts.get()`` helper over
|
|
15
|
+
instantiating this directly.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self, api_key: str, endpoint: str) -> None:
|
|
19
|
+
self._http = _HttpClient(api_key=api_key, endpoint=endpoint)
|
|
20
|
+
|
|
21
|
+
def get(self, name: str, *, env: str = "production") -> str:
|
|
22
|
+
"""
|
|
23
|
+
Fetch the active prompt content for the given name and environment.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
name: The prompt name as configured in FlowOps.
|
|
27
|
+
env: The environment — ``'dev'``, ``'staging'``, or ``'production'``
|
|
28
|
+
(default).
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
The prompt content string.
|
|
32
|
+
|
|
33
|
+
Example::
|
|
34
|
+
|
|
35
|
+
system_prompt = getstacklens.prompts.get("support-system-prompt", env="production")
|
|
36
|
+
"""
|
|
37
|
+
resp = self._http.get(f"{_PROMPTS_PATH}/{name}", params={"env": env})
|
|
38
|
+
return resp["content"]
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class AsyncPromptsClient:
|
|
42
|
+
"""
|
|
43
|
+
Async FlowOps client for fetching versioned prompts at runtime.
|
|
44
|
+
|
|
45
|
+
Prefer using the module-level ``getstacklens.aprompts.get()`` helper over
|
|
46
|
+
instantiating this directly.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
def __init__(self, api_key: str, endpoint: str) -> None:
|
|
50
|
+
self._http = _AsyncHttpClient(api_key=api_key, endpoint=endpoint)
|
|
51
|
+
|
|
52
|
+
async def get(self, name: str, *, env: str = "production") -> str:
|
|
53
|
+
"""
|
|
54
|
+
Async version of :meth:`PromptsClient.get`.
|
|
55
|
+
|
|
56
|
+
Example::
|
|
57
|
+
|
|
58
|
+
system_prompt = await getstacklens.aprompts.get("support-system-prompt")
|
|
59
|
+
"""
|
|
60
|
+
resp = await self._http.get(f"{_PROMPTS_PATH}/{name}", params={"env": env})
|
|
61
|
+
return resp["content"]
|
getstacklens/py.typed
ADDED
|
File without changes
|
getstacklens/tracer.py
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
1
|
+
"""StackTrace tracing client."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import uuid
|
|
6
|
+
from contextlib import asynccontextmanager, contextmanager
|
|
7
|
+
from datetime import datetime, timezone
|
|
8
|
+
from typing import AsyncIterator, Iterator
|
|
9
|
+
|
|
10
|
+
from ._client import _AsyncHttpClient, _HttpClient
|
|
11
|
+
|
|
12
|
+
_TRACES_PATH = "/api/v1/stacktrace/v1/traces"
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class Span:
|
|
16
|
+
"""
|
|
17
|
+
A single span within a trace.
|
|
18
|
+
|
|
19
|
+
Create spans via :func:`getstacklens.start_trace` rather than instantiating directly.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def __init__(
|
|
23
|
+
self,
|
|
24
|
+
name: str,
|
|
25
|
+
trace_id: str,
|
|
26
|
+
span_id: str,
|
|
27
|
+
parent_span_id: str | None,
|
|
28
|
+
) -> None:
|
|
29
|
+
self._name = name
|
|
30
|
+
self._trace_id = trace_id
|
|
31
|
+
self._span_id = span_id
|
|
32
|
+
self._parent_span_id = parent_span_id
|
|
33
|
+
self._start = datetime.now(timezone.utc)
|
|
34
|
+
self._llm: dict | None = None
|
|
35
|
+
self._attributes: dict[str, str] = {}
|
|
36
|
+
self._tags: list[str] = []
|
|
37
|
+
self._status = "ok"
|
|
38
|
+
|
|
39
|
+
def set_attribute(self, key: str, value: str) -> "Span":
|
|
40
|
+
"""Attach a key-value attribute to this span."""
|
|
41
|
+
self._attributes[str(key)] = str(value)
|
|
42
|
+
return self
|
|
43
|
+
|
|
44
|
+
def add_tag(self, *tags: str) -> "Span":
|
|
45
|
+
"""Add one or more string tags to this span."""
|
|
46
|
+
self._tags.extend(tags)
|
|
47
|
+
return self
|
|
48
|
+
|
|
49
|
+
def set_status(self, status: str) -> "Span":
|
|
50
|
+
"""Set span status: ``'ok'`` (default) or ``'error'``."""
|
|
51
|
+
self._status = status
|
|
52
|
+
return self
|
|
53
|
+
|
|
54
|
+
def record_llm(
|
|
55
|
+
self,
|
|
56
|
+
*,
|
|
57
|
+
model: str,
|
|
58
|
+
provider: str,
|
|
59
|
+
input_tokens: int,
|
|
60
|
+
output_tokens: int,
|
|
61
|
+
total_tokens: int | None = None,
|
|
62
|
+
cost_usd: float = 0.0,
|
|
63
|
+
temperature: float | None = None,
|
|
64
|
+
max_tokens: int | None = None,
|
|
65
|
+
prompt: str | None = None,
|
|
66
|
+
completion: str | None = None,
|
|
67
|
+
is_streaming: bool = False,
|
|
68
|
+
finish_reason: str | None = None,
|
|
69
|
+
) -> "Span":
|
|
70
|
+
"""
|
|
71
|
+
Record LLM call metadata for this span.
|
|
72
|
+
|
|
73
|
+
Example::
|
|
74
|
+
|
|
75
|
+
with getstacklens.start_trace("chat") as span:
|
|
76
|
+
response = openai_client.chat.completions.create(...)
|
|
77
|
+
span.record_llm(
|
|
78
|
+
model="gpt-4o",
|
|
79
|
+
provider="openai",
|
|
80
|
+
input_tokens=response.usage.prompt_tokens,
|
|
81
|
+
output_tokens=response.usage.completion_tokens,
|
|
82
|
+
)
|
|
83
|
+
"""
|
|
84
|
+
self._llm = {
|
|
85
|
+
"model": model,
|
|
86
|
+
"provider": provider,
|
|
87
|
+
"inputTokens": input_tokens,
|
|
88
|
+
"outputTokens": output_tokens,
|
|
89
|
+
"totalTokens": total_tokens
|
|
90
|
+
if total_tokens is not None
|
|
91
|
+
else input_tokens + output_tokens,
|
|
92
|
+
"estimatedCostUsd": cost_usd,
|
|
93
|
+
"temperature": temperature,
|
|
94
|
+
"maxTokens": max_tokens,
|
|
95
|
+
"promptContent": prompt,
|
|
96
|
+
"completionContent": completion,
|
|
97
|
+
"isStreaming": is_streaming,
|
|
98
|
+
"finishReason": finish_reason,
|
|
99
|
+
}
|
|
100
|
+
return self
|
|
101
|
+
|
|
102
|
+
def _to_payload(self) -> dict:
|
|
103
|
+
end = datetime.now(timezone.utc)
|
|
104
|
+
return {
|
|
105
|
+
"traceId": self._trace_id,
|
|
106
|
+
"spanId": self._span_id,
|
|
107
|
+
"parentSpanId": self._parent_span_id,
|
|
108
|
+
"name": self._name,
|
|
109
|
+
"kind": "llm" if self._llm else "internal",
|
|
110
|
+
"status": self._status,
|
|
111
|
+
"startTime": self._start.isoformat(),
|
|
112
|
+
"endTime": end.isoformat(),
|
|
113
|
+
"attributes": self._attributes,
|
|
114
|
+
"tags": self._tags,
|
|
115
|
+
"llmSpan": self._llm,
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
class Tracer:
|
|
120
|
+
"""
|
|
121
|
+
StackTrace client.
|
|
122
|
+
|
|
123
|
+
Prefer using the module-level :func:`getstacklens.trace` and
|
|
124
|
+
:func:`getstacklens.start_trace` helpers over instantiating this directly.
|
|
125
|
+
"""
|
|
126
|
+
|
|
127
|
+
def __init__(self, api_key: str, endpoint: str) -> None:
|
|
128
|
+
self._http = _HttpClient(api_key=api_key, endpoint=endpoint)
|
|
129
|
+
|
|
130
|
+
def record(
|
|
131
|
+
self,
|
|
132
|
+
name: str,
|
|
133
|
+
*,
|
|
134
|
+
model: str,
|
|
135
|
+
provider: str,
|
|
136
|
+
input_tokens: int,
|
|
137
|
+
output_tokens: int,
|
|
138
|
+
total_tokens: int | None = None,
|
|
139
|
+
cost_usd: float = 0.0,
|
|
140
|
+
attributes: dict[str, str] | None = None,
|
|
141
|
+
tags: list[str] | None = None,
|
|
142
|
+
status: str = "ok",
|
|
143
|
+
start_time: datetime | None = None,
|
|
144
|
+
end_time: datetime | None = None,
|
|
145
|
+
) -> str:
|
|
146
|
+
"""
|
|
147
|
+
Send a single LLM span. Returns the trace ID.
|
|
148
|
+
|
|
149
|
+
For accurate latency, record ``start_time`` before the LLM call and
|
|
150
|
+
``end_time`` after::
|
|
151
|
+
|
|
152
|
+
start = datetime.now(timezone.utc)
|
|
153
|
+
response = client.chat.completions.create(...)
|
|
154
|
+
tracer.record("chat", model="gpt-4o", provider="openai",
|
|
155
|
+
input_tokens=..., output_tokens=...,
|
|
156
|
+
start_time=start, end_time=datetime.now(timezone.utc))
|
|
157
|
+
|
|
158
|
+
If omitted, both timestamps are set to the moment ``record()`` is called
|
|
159
|
+
and the span will show 0 ms duration.
|
|
160
|
+
"""
|
|
161
|
+
trace_id = str(uuid.uuid4())
|
|
162
|
+
span_id = str(uuid.uuid4())
|
|
163
|
+
recorded_at = datetime.now(timezone.utc)
|
|
164
|
+
payload = {
|
|
165
|
+
"traceId": trace_id,
|
|
166
|
+
"spanId": span_id,
|
|
167
|
+
"parentSpanId": None,
|
|
168
|
+
"name": name,
|
|
169
|
+
"kind": "llm",
|
|
170
|
+
"status": status,
|
|
171
|
+
"startTime": (start_time or recorded_at).isoformat(),
|
|
172
|
+
"endTime": (end_time or recorded_at).isoformat(),
|
|
173
|
+
"attributes": attributes or {},
|
|
174
|
+
"tags": tags or [],
|
|
175
|
+
"llmSpan": {
|
|
176
|
+
"model": model,
|
|
177
|
+
"provider": provider,
|
|
178
|
+
"inputTokens": input_tokens,
|
|
179
|
+
"outputTokens": output_tokens,
|
|
180
|
+
"totalTokens": total_tokens
|
|
181
|
+
if total_tokens is not None
|
|
182
|
+
else input_tokens + output_tokens,
|
|
183
|
+
"estimatedCostUsd": cost_usd,
|
|
184
|
+
"temperature": None,
|
|
185
|
+
"maxTokens": None,
|
|
186
|
+
"promptContent": None,
|
|
187
|
+
"completionContent": None,
|
|
188
|
+
"isStreaming": False,
|
|
189
|
+
"finishReason": None,
|
|
190
|
+
},
|
|
191
|
+
}
|
|
192
|
+
self._http.post(_TRACES_PATH, payload)
|
|
193
|
+
return trace_id
|
|
194
|
+
|
|
195
|
+
@contextmanager
|
|
196
|
+
def start_trace(self, name: str) -> Iterator[Span]:
|
|
197
|
+
"""
|
|
198
|
+
Context manager for tracing a multi-step or agent operation.
|
|
199
|
+
|
|
200
|
+
Flushes the span to StackLens when the context exits (including on error).
|
|
201
|
+
|
|
202
|
+
Example::
|
|
203
|
+
|
|
204
|
+
with tracer.start_trace("my-agent-run") as span:
|
|
205
|
+
response = client.chat.completions.create(...)
|
|
206
|
+
span.record_llm(model="gpt-4o", provider="openai",
|
|
207
|
+
input_tokens=150, output_tokens=200)
|
|
208
|
+
"""
|
|
209
|
+
trace_id = str(uuid.uuid4())
|
|
210
|
+
span_id = str(uuid.uuid4())
|
|
211
|
+
span = Span(name=name, trace_id=trace_id, span_id=span_id, parent_span_id=None)
|
|
212
|
+
try:
|
|
213
|
+
yield span
|
|
214
|
+
except Exception:
|
|
215
|
+
span.set_status("error")
|
|
216
|
+
raise
|
|
217
|
+
finally:
|
|
218
|
+
self._http.post(_TRACES_PATH, span._to_payload())
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
class AsyncTracer:
|
|
222
|
+
"""
|
|
223
|
+
Async StackTrace client for use with asyncio / FastAPI / async frameworks.
|
|
224
|
+
|
|
225
|
+
Prefer the module-level :func:`getstacklens.atrace` and
|
|
226
|
+
:func:`getstacklens.astart_trace` helpers over instantiating this directly.
|
|
227
|
+
"""
|
|
228
|
+
|
|
229
|
+
def __init__(self, api_key: str, endpoint: str) -> None:
|
|
230
|
+
self._http = _AsyncHttpClient(api_key=api_key, endpoint=endpoint)
|
|
231
|
+
|
|
232
|
+
async def arecord(
|
|
233
|
+
self,
|
|
234
|
+
name: str,
|
|
235
|
+
*,
|
|
236
|
+
model: str,
|
|
237
|
+
provider: str,
|
|
238
|
+
input_tokens: int,
|
|
239
|
+
output_tokens: int,
|
|
240
|
+
total_tokens: int | None = None,
|
|
241
|
+
cost_usd: float = 0.0,
|
|
242
|
+
attributes: dict[str, str] | None = None,
|
|
243
|
+
tags: list[str] | None = None,
|
|
244
|
+
status: str = "ok",
|
|
245
|
+
start_time: datetime | None = None,
|
|
246
|
+
end_time: datetime | None = None,
|
|
247
|
+
) -> str:
|
|
248
|
+
"""Async version of :meth:`Tracer.record`. Returns the trace ID."""
|
|
249
|
+
trace_id = str(uuid.uuid4())
|
|
250
|
+
span_id = str(uuid.uuid4())
|
|
251
|
+
recorded_at = datetime.now(timezone.utc)
|
|
252
|
+
payload = {
|
|
253
|
+
"traceId": trace_id,
|
|
254
|
+
"spanId": span_id,
|
|
255
|
+
"parentSpanId": None,
|
|
256
|
+
"name": name,
|
|
257
|
+
"kind": "llm",
|
|
258
|
+
"status": status,
|
|
259
|
+
"startTime": (start_time or recorded_at).isoformat(),
|
|
260
|
+
"endTime": (end_time or recorded_at).isoformat(),
|
|
261
|
+
"attributes": attributes or {},
|
|
262
|
+
"tags": tags or [],
|
|
263
|
+
"llmSpan": {
|
|
264
|
+
"model": model,
|
|
265
|
+
"provider": provider,
|
|
266
|
+
"inputTokens": input_tokens,
|
|
267
|
+
"outputTokens": output_tokens,
|
|
268
|
+
"totalTokens": total_tokens
|
|
269
|
+
if total_tokens is not None
|
|
270
|
+
else input_tokens + output_tokens,
|
|
271
|
+
"estimatedCostUsd": cost_usd,
|
|
272
|
+
"temperature": None,
|
|
273
|
+
"maxTokens": None,
|
|
274
|
+
"promptContent": None,
|
|
275
|
+
"completionContent": None,
|
|
276
|
+
"isStreaming": False,
|
|
277
|
+
"finishReason": None,
|
|
278
|
+
},
|
|
279
|
+
}
|
|
280
|
+
await self._http.post(_TRACES_PATH, payload)
|
|
281
|
+
return trace_id
|
|
282
|
+
|
|
283
|
+
@asynccontextmanager
|
|
284
|
+
async def astart_trace(self, name: str) -> AsyncIterator[Span]:
|
|
285
|
+
"""
|
|
286
|
+
Async context manager for tracing a multi-step or agent operation.
|
|
287
|
+
|
|
288
|
+
Flushes the span to StackLens when the context exits (including on error).
|
|
289
|
+
|
|
290
|
+
Example::
|
|
291
|
+
|
|
292
|
+
async with tracer.astart_trace("my-agent-run") as span:
|
|
293
|
+
response = await async_openai_client.chat.completions.create(...)
|
|
294
|
+
span.record_llm(model="gpt-4o", provider="openai",
|
|
295
|
+
input_tokens=150, output_tokens=200)
|
|
296
|
+
"""
|
|
297
|
+
trace_id = str(uuid.uuid4())
|
|
298
|
+
span_id = str(uuid.uuid4())
|
|
299
|
+
span = Span(name=name, trace_id=trace_id, span_id=span_id, parent_span_id=None)
|
|
300
|
+
try:
|
|
301
|
+
yield span
|
|
302
|
+
except Exception:
|
|
303
|
+
span.set_status("error")
|
|
304
|
+
raise
|
|
305
|
+
finally:
|
|
306
|
+
await self._http.post(_TRACES_PATH, span._to_payload())
|
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: getstacklens
|
|
3
|
+
Version: 0.0.1
|
|
4
|
+
Summary: Python SDK for StackLens — observability and governance for your AI stack
|
|
5
|
+
Project-URL: Homepage, https://getstacklens.ai
|
|
6
|
+
Project-URL: Documentation, https://getstacklens.ai/docs
|
|
7
|
+
Project-URL: Repository, https://github.com/getstacklens-ai/stacklens-sdk-python
|
|
8
|
+
Project-URL: Bug Tracker, https://github.com/getstacklens-ai/stacklens-sdk-python/issues
|
|
9
|
+
License: StackLens Source-Available License
|
|
10
|
+
|
|
11
|
+
Copyright (c) 2026 StackLens Private Limited. All rights reserved.
|
|
12
|
+
|
|
13
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
14
|
+
of this software and associated files (the "Software"), to use and run it
|
|
15
|
+
for personal or commercial purposes, subject to the following conditions:
|
|
16
|
+
|
|
17
|
+
PERMITTED:
|
|
18
|
+
- Using and running the Software as-is
|
|
19
|
+
- Integrating the Software into your own applications
|
|
20
|
+
- Reading and referencing the source code
|
|
21
|
+
- Submitting corrections or improvements via pull request to the official
|
|
22
|
+
repository at https://github.com/getstacklens/stacklens-sdk-python
|
|
23
|
+
|
|
24
|
+
NOT PERMITTED without prior written permission from StackLens Private Limited:
|
|
25
|
+
- Modifying or creating derivative works based on the Software
|
|
26
|
+
- Redistributing the Software, in whole or in part
|
|
27
|
+
- Sublicensing the Software
|
|
28
|
+
- Selling the Software or incorporating it into a competing product or service
|
|
29
|
+
- Publishing a fork or modified copy of the Software
|
|
30
|
+
|
|
31
|
+
CONTRIBUTIONS:
|
|
32
|
+
All contributions submitted via pull request are made under the terms of this
|
|
33
|
+
license. By submitting a pull request, you agree to transfer copyright of your
|
|
34
|
+
contribution to StackLens Private Limited and grant StackLens Private Limited the right to use,
|
|
35
|
+
modify, and publish your contribution under any license.
|
|
36
|
+
|
|
37
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
38
|
+
IMPLIED. IN NO EVENT SHALL STACKLENS, INC. BE LIABLE FOR ANY CLAIM, DAMAGES,
|
|
39
|
+
OR OTHER LIABILITY ARISING FROM USE OF THE SOFTWARE.
|
|
40
|
+
License-File: LICENSE
|
|
41
|
+
Keywords: ai,governance,llm,observability,prompt-management,tracing
|
|
42
|
+
Classifier: Development Status :: 3 - Alpha
|
|
43
|
+
Classifier: Intended Audience :: Developers
|
|
44
|
+
Classifier: License :: Other/Proprietary License
|
|
45
|
+
Classifier: Programming Language :: Python :: 3
|
|
46
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
47
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
48
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
49
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
50
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
51
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
52
|
+
Classifier: Typing :: Typed
|
|
53
|
+
Requires-Python: >=3.9
|
|
54
|
+
Requires-Dist: httpx>=0.27
|
|
55
|
+
Provides-Extra: dev
|
|
56
|
+
Requires-Dist: pytest-asyncio>=0.24; extra == 'dev'
|
|
57
|
+
Requires-Dist: pytest>=8; extra == 'dev'
|
|
58
|
+
Requires-Dist: respx>=0.21; extra == 'dev'
|
|
59
|
+
Description-Content-Type: text/markdown
|
|
60
|
+
|
|
61
|
+
# stacklens-sdk-python
|
|
62
|
+
|
|
63
|
+
Python SDK for [StackLens](https://getstacklens.ai) — observability and governance for your AI stack.
|
|
64
|
+
|
|
65
|
+
Trace LLM calls, fetch versioned prompts, and enforce AI governance policies — in three lines of Python.
|
|
66
|
+
|
|
67
|
+
## Installation
|
|
68
|
+
|
|
69
|
+
```bash
|
|
70
|
+
pip install getstacklens
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
Requires Python 3.9+.
|
|
74
|
+
|
|
75
|
+
## Quickstart
|
|
76
|
+
|
|
77
|
+
```python
|
|
78
|
+
import getstacklens
|
|
79
|
+
|
|
80
|
+
getstacklens.configure(api_key="sl-xxxx")
|
|
81
|
+
getstacklens.trace("my-llm-call", model="gpt-4o", provider="openai", input_tokens=150, output_tokens=200)
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
Get your API key from the [StackLens dashboard](https://app.getstacklens.ai) under **Settings → API Keys**.
|
|
85
|
+
|
|
86
|
+
## Tracing LLM calls
|
|
87
|
+
|
|
88
|
+
### Simple trace (one line)
|
|
89
|
+
|
|
90
|
+
For accurate latency, record `start_time` before the call and pass it in:
|
|
91
|
+
|
|
92
|
+
```python
|
|
93
|
+
from datetime import datetime, timezone
|
|
94
|
+
import getstacklens
|
|
95
|
+
|
|
96
|
+
getstacklens.configure(api_key="sl-xxxx")
|
|
97
|
+
|
|
98
|
+
start = datetime.now(timezone.utc)
|
|
99
|
+
response = openai_client.chat.completions.create(
|
|
100
|
+
model="gpt-4o",
|
|
101
|
+
messages=[{"role": "user", "content": "Summarise this document."}],
|
|
102
|
+
)
|
|
103
|
+
getstacklens.trace(
|
|
104
|
+
"chat-completion",
|
|
105
|
+
model="gpt-4o",
|
|
106
|
+
provider="openai",
|
|
107
|
+
input_tokens=response.usage.prompt_tokens,
|
|
108
|
+
output_tokens=response.usage.completion_tokens,
|
|
109
|
+
start_time=start,
|
|
110
|
+
)
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
### Context manager (recommended for agent workflows)
|
|
114
|
+
|
|
115
|
+
```python
|
|
116
|
+
import openai
|
|
117
|
+
import getstacklens
|
|
118
|
+
|
|
119
|
+
getstacklens.configure(api_key="sl-xxxx")
|
|
120
|
+
client = openai.OpenAI()
|
|
121
|
+
|
|
122
|
+
with getstacklens.start_trace("customer-support-agent") as span:
|
|
123
|
+
response = client.chat.completions.create(
|
|
124
|
+
model="gpt-4o",
|
|
125
|
+
messages=[{"role": "user", "content": "How do I reset my password?"}],
|
|
126
|
+
)
|
|
127
|
+
span.record_llm(
|
|
128
|
+
model="gpt-4o",
|
|
129
|
+
provider="openai",
|
|
130
|
+
input_tokens=response.usage.prompt_tokens,
|
|
131
|
+
output_tokens=response.usage.completion_tokens,
|
|
132
|
+
completion=response.choices[0].message.content,
|
|
133
|
+
)
|
|
134
|
+
span.set_attribute("user_id", "u_123")
|
|
135
|
+
span.add_tag("support", "production")
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
If an exception is raised inside the context, the span status is automatically set to `error`.
|
|
139
|
+
|
|
140
|
+
## Fetching versioned prompts (FlowOps)
|
|
141
|
+
|
|
142
|
+
Manage prompts in the StackLens dashboard, then fetch them at runtime — no deploys needed.
|
|
143
|
+
|
|
144
|
+
```python
|
|
145
|
+
import getstacklens
|
|
146
|
+
|
|
147
|
+
getstacklens.configure(api_key="sl-xxxx")
|
|
148
|
+
|
|
149
|
+
# Fetch the active prompt for the production environment
|
|
150
|
+
system_prompt = getstacklens.prompts.get("support-system-prompt", env="production")
|
|
151
|
+
|
|
152
|
+
# Use in an LLM call
|
|
153
|
+
response = client.chat.completions.create(
|
|
154
|
+
model="gpt-4o",
|
|
155
|
+
messages=[
|
|
156
|
+
{"role": "system", "content": system_prompt},
|
|
157
|
+
{"role": "user", "content": user_message},
|
|
158
|
+
],
|
|
159
|
+
)
|
|
160
|
+
```
|
|
161
|
+
|
|
162
|
+
Available environments: `"dev"`, `"staging"`, `"production"` (default).
|
|
163
|
+
|
|
164
|
+
## Self-hosted deployments
|
|
165
|
+
|
|
166
|
+
Point the SDK at your own StackLens instance:
|
|
167
|
+
|
|
168
|
+
```python
|
|
169
|
+
getstacklens.configure(
|
|
170
|
+
api_key="sl-xxxx",
|
|
171
|
+
endpoint="https://api.your-domain.com",
|
|
172
|
+
)
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
See the [self-hosting guide](https://getstacklens.ai/docs/self-hosting) for setup instructions.
|
|
176
|
+
|
|
177
|
+
## Supported providers
|
|
178
|
+
|
|
179
|
+
Works with any LLM provider — pass the model and provider name you use:
|
|
180
|
+
|
|
181
|
+
| Provider | `provider` value |
|
|
182
|
+
|---|---|
|
|
183
|
+
| OpenAI | `"openai"` |
|
|
184
|
+
| Anthropic | `"anthropic"` |
|
|
185
|
+
| Google Gemini | `"gemini"` |
|
|
186
|
+
| Azure OpenAI | `"azure-openai"` |
|
|
187
|
+
| AWS Bedrock | `"bedrock"` |
|
|
188
|
+
| Any other | any string |
|
|
189
|
+
|
|
190
|
+
## Links
|
|
191
|
+
|
|
192
|
+
- [Documentation](https://getstacklens.ai/docs)
|
|
193
|
+
- [StackLens Platform](https://getstacklens.ai)
|
|
194
|
+
- [Dashboard](https://app.getstacklens.ai)
|
|
195
|
+
- [GitHub](https://github.com/getstacklens-ai/stacklens-sdk-python)
|
|
196
|
+
- [Report an issue](https://github.com/getstacklens-ai/stacklens-sdk-python/issues)
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
getstacklens/__init__.py,sha256=3SyL_xckUtM9DNnNJr8XWeC6fL6KqzWzeQsAl2Rs6Bo,10528
|
|
2
|
+
getstacklens/_client.py,sha256=UUR2JQg0etl8vYK7YdJQLVZZaBnJqvSJGO_vH2sDx08,4216
|
|
3
|
+
getstacklens/exceptions.py,sha256=M3Z0G15167NK4eCNuA0yVWFoughT5AjEqXXMzK4nnC8,767
|
|
4
|
+
getstacklens/prompts.py,sha256=Iafs6ESP8yhdNjBNa6yJQOOe4KQcnjc5JplVo2mK39o,1878
|
|
5
|
+
getstacklens/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
+
getstacklens/tracer.py,sha256=ZFh8JtOs6CmH-7j735-Wg46htLpATF1Xhm1RQd5u4HU,10203
|
|
7
|
+
getstacklens-0.0.1.dist-info/METADATA,sha256=CGtUJP4SMv546oL7e8NacBftM8-TCJfqc4o0tWAy7nI,6663
|
|
8
|
+
getstacklens-0.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
9
|
+
getstacklens-0.0.1.dist-info/licenses/LICENSE,sha256=UpKdprvuKX8dASu6LYRCSuLf8NiwLU8QCW0INa3wrZI,1505
|
|
10
|
+
getstacklens-0.0.1.dist-info/RECORD,,
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
StackLens Source-Available License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 StackLens Private Limited. All rights reserved.
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated files (the "Software"), to use and run it
|
|
7
|
+
for personal or commercial purposes, subject to the following conditions:
|
|
8
|
+
|
|
9
|
+
PERMITTED:
|
|
10
|
+
- Using and running the Software as-is
|
|
11
|
+
- Integrating the Software into your own applications
|
|
12
|
+
- Reading and referencing the source code
|
|
13
|
+
- Submitting corrections or improvements via pull request to the official
|
|
14
|
+
repository at https://github.com/getstacklens/stacklens-sdk-python
|
|
15
|
+
|
|
16
|
+
NOT PERMITTED without prior written permission from StackLens Private Limited:
|
|
17
|
+
- Modifying or creating derivative works based on the Software
|
|
18
|
+
- Redistributing the Software, in whole or in part
|
|
19
|
+
- Sublicensing the Software
|
|
20
|
+
- Selling the Software or incorporating it into a competing product or service
|
|
21
|
+
- Publishing a fork or modified copy of the Software
|
|
22
|
+
|
|
23
|
+
CONTRIBUTIONS:
|
|
24
|
+
All contributions submitted via pull request are made under the terms of this
|
|
25
|
+
license. By submitting a pull request, you agree to transfer copyright of your
|
|
26
|
+
contribution to StackLens Private Limited and grant StackLens Private Limited the right to use,
|
|
27
|
+
modify, and publish your contribution under any license.
|
|
28
|
+
|
|
29
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
30
|
+
IMPLIED. IN NO EVENT SHALL STACKLENS, INC. BE LIABLE FOR ANY CLAIM, DAMAGES,
|
|
31
|
+
OR OTHER LIABILITY ARISING FROM USE OF THE SOFTWARE.
|