oopstrace 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
oopstrace/__init__.py ADDED
@@ -0,0 +1,21 @@
1
+ from .client import (
2
+ OopstraceClient,
3
+ extract_context,
4
+ flush,
5
+ get_prompt,
6
+ init,
7
+ inject_headers,
8
+ span,
9
+ trace,
10
+ )
11
+
12
+ __all__ = [
13
+ "OopstraceClient",
14
+ "extract_context",
15
+ "flush",
16
+ "get_prompt",
17
+ "init",
18
+ "inject_headers",
19
+ "span",
20
+ "trace",
21
+ ]
oopstrace/client.py ADDED
@@ -0,0 +1,577 @@
1
+ """Oopstrace Python SDK — manual instrumentation with background batching.
2
+
3
+ Usage:
4
+ import oopstrace
5
+
6
+ client = oopstrace.OopstraceClient(api_key="tr_...", project_id="my-project")
7
+
8
+ @client.trace
9
+ def my_agent(user_input: str) -> str:
10
+ with client.span("call-llm", span_kind="LLM"):
11
+ ...
12
+
13
+ # Or use module-level helpers after init:
14
+ oopstrace.init(api_key="tr_...", project_id="my-project")
15
+
16
+ @oopstrace.trace
17
+ def my_fn(): ...
18
+ """
19
+
20
+ from __future__ import annotations
21
+
22
+ import base64
23
+ import contextlib
24
+ import functools
25
+ import inspect
26
+ import json
27
+ import logging
28
+ import os
29
+ import queue
30
+ import threading
31
+ import time
32
+ import uuid
33
+ from contextlib import contextmanager
34
+ from contextvars import ContextVar
35
+ from dataclasses import dataclass, field
36
+ from datetime import UTC, datetime
37
+ from typing import Any, Callable, Generator
38
+
39
+ import httpx
40
+
41
+ logger = logging.getLogger(__name__)
42
+
43
+ _DEFAULT_ENDPOINT = "https://api.oopstrace.dev"
44
+ _FLUSH_INTERVAL = 2.0 # seconds
45
+ _MAX_BATCH = 100
46
+
47
+
48
+ @dataclass
49
+ class _SpanData:
50
+ span_id: str
51
+ trace_id: str
52
+ parent_span_id: str | None
53
+ name: str
54
+ span_kind: str
55
+ start_ns: int
56
+ end_ns: int | None = None
57
+ status: str = "OK"
58
+ status_message: str | None = None
59
+ input: str | None = None
60
+ output: str | None = None
61
+ metadata: dict | None = None
62
+ model_name: str | None = None
63
+ input_tokens: int | None = None
64
+ output_tokens: int | None = None
65
+ user_id: str | None = None
66
+ session_id: str | None = None
67
+ attributes: dict = field(default_factory=dict)
68
+
69
+
70
+ @dataclass
71
+ class _TraceContext:
72
+ trace_id: str
73
+ root_span_id: str
74
+ current_span_id: str
75
+ user_id: str | None = None
76
+ session_id: str | None = None
77
+
78
+
79
+ _ctx_var: ContextVar[_TraceContext | None] = ContextVar("oopstrace_ctx", default=None)
80
+
81
+
82
+ def _to_b64(hex_str: str) -> str:
83
+ return base64.b64encode(bytes.fromhex(hex_str)).decode()
84
+
85
+
86
+ def _ns_to_str(ns: int) -> str:
87
+ return str(ns)
88
+
89
+
90
+ def _build_span_otel(span: _SpanData, project_id: str) -> dict:
91
+ attrs: list[dict] = []
92
+
93
+ def _add(key: str, val: Any) -> None:
94
+ if val is None:
95
+ return
96
+ if isinstance(val, str):
97
+ attrs.append({"key": key, "value": {"stringValue": val}})
98
+ elif isinstance(val, bool):
99
+ attrs.append({"key": key, "value": {"boolValue": val}})
100
+ elif isinstance(val, int):
101
+ attrs.append({"key": key, "value": {"intValue": val}})
102
+ elif isinstance(val, float):
103
+ attrs.append({"key": key, "value": {"doubleValue": val}})
104
+
105
+ _add("oopstrace.span.type", span.span_kind)
106
+ if span.input is not None:
107
+ _add("oopstrace.span.input", span.input)
108
+ if span.output is not None:
109
+ _add("oopstrace.span.output", span.output)
110
+ if span.model_name:
111
+ _add("oopstrace.llm.model", span.model_name)
112
+ if span.input_tokens is not None:
113
+ _add("gen_ai.usage.input_tokens", span.input_tokens)
114
+ if span.output_tokens is not None:
115
+ _add("gen_ai.usage.output_tokens", span.output_tokens)
116
+ if span.user_id:
117
+ _add("user.id", span.user_id)
118
+ if span.session_id:
119
+ _add("session.id", span.session_id)
120
+ if span.metadata:
121
+ _add("oopstrace.span.metadata", json.dumps(span.metadata))
122
+ for k, v in span.attributes.items():
123
+ _add(k, v)
124
+
125
+ otel_span: dict = {
126
+ "traceId": _to_b64(span.trace_id),
127
+ "spanId": _to_b64(span.span_id),
128
+ "name": span.name,
129
+ "kind": "SPAN_KIND_INTERNAL",
130
+ "startTimeUnixNano": _ns_to_str(span.start_ns),
131
+ "endTimeUnixNano": _ns_to_str(span.end_ns or span.start_ns),
132
+ "attributes": attrs,
133
+ "status": {"code": "STATUS_CODE_ERROR" if span.status == "ERROR" else "STATUS_CODE_OK"},
134
+ }
135
+ if span.parent_span_id:
136
+ otel_span["parentSpanId"] = _to_b64(span.parent_span_id)
137
+ if span.status_message:
138
+ otel_span["status"]["message"] = span.status_message
139
+
140
+ return otel_span
141
+
142
+
143
+ class _BatchExporter:
144
+ """Background daemon thread that drains a queue and ships batches every 2 s."""
145
+
146
+ def __init__(self, endpoint: str, api_key: str, project_id: str) -> None:
147
+ self._endpoint = endpoint.rstrip("/")
148
+ self._api_key = api_key
149
+ self._project_id = project_id
150
+ self._q: queue.Queue[_SpanData] = queue.Queue()
151
+ self._stop = threading.Event()
152
+ self._t = threading.Thread(target=self._run, daemon=True, name="oopstrace-exporter")
153
+ self._t.start()
154
+
155
+ def enqueue(self, span: _SpanData) -> None:
156
+ self._q.put_nowait(span)
157
+
158
+ def flush(self, timeout: float = 5.0) -> None:
159
+ """Drain queue and send synchronously. Blocks up to *timeout* seconds."""
160
+ deadline = time.monotonic() + timeout
161
+ pending: list[_SpanData] = []
162
+ while time.monotonic() < deadline:
163
+ try:
164
+ pending.append(self._q.get_nowait())
165
+ except queue.Empty:
166
+ break
167
+ if pending:
168
+ self._send(pending)
169
+
170
+ def shutdown(self) -> None:
171
+ self._stop.set()
172
+ self.flush()
173
+ self._t.join(timeout=3.0)
174
+
175
+ def _run(self) -> None:
176
+ while not self._stop.is_set():
177
+ time.sleep(_FLUSH_INTERVAL)
178
+ batch: list[_SpanData] = []
179
+ while len(batch) < _MAX_BATCH:
180
+ try:
181
+ batch.append(self._q.get_nowait())
182
+ except queue.Empty:
183
+ break
184
+ if batch:
185
+ self._send(batch)
186
+
187
+ def _send(self, spans: list[_SpanData]) -> None:
188
+ # Group spans by trace_id
189
+ by_trace: dict[str, list[_SpanData]] = {}
190
+ for s in spans:
191
+ by_trace.setdefault(s.trace_id, []).append(s)
192
+
193
+ scope_spans = [
194
+ {
195
+ "scope": {"name": "oopstrace-sdk", "version": "0.1.0"},
196
+ "spans": [_build_span_otel(s, self._project_id) for s in trace_spans],
197
+ }
198
+ for trace_spans in by_trace.values()
199
+ ]
200
+
201
+ payload = {
202
+ "resourceSpans": [
203
+ {
204
+ "resource": {
205
+ "attributes": [
206
+ {"key": "service.name", "value": {"stringValue": self._project_id}}
207
+ ]
208
+ },
209
+ "scopeSpans": scope_spans,
210
+ }
211
+ ]
212
+ }
213
+
214
+ try:
215
+ with httpx.Client(timeout=10.0) as client:
216
+ resp = client.post(
217
+ f"{self._endpoint}/api/v1/public/traces",
218
+ json=payload,
219
+ headers={
220
+ "Authorization": f"Bearer {self._api_key}",
221
+ "Content-Type": "application/json",
222
+ },
223
+ )
224
+ if resp.status_code >= 400:
225
+ logger.warning("Oopstrace export failed %s: %s", resp.status_code, resp.text[:200])
226
+ except Exception as exc:
227
+ logger.warning("Oopstrace export error: %s", exc)
228
+
229
+
230
+ def _new_id(bytes_len: int = 16) -> str:
231
+ return uuid.uuid4().bytes[:bytes_len].hex()
232
+
233
+
234
+ class OopstraceClient:
235
+ """Main SDK client. Create one instance per application."""
236
+
237
+ def __init__(
238
+ self,
239
+ api_key: str | None = None,
240
+ project_id: str | None = None,
241
+ endpoint: str | None = None,
242
+ enabled: bool = True,
243
+ auto_instrument: bool = False,
244
+ ) -> None:
245
+ self._api_key = api_key or os.environ.get("OOPSTRACE_API_KEY", "")
246
+ self._project_id = project_id or os.environ.get("OOPSTRACE_PROJECT_ID", "default")
247
+ self._endpoint = endpoint or os.environ.get("OOPSTRACE_ENDPOINT", _DEFAULT_ENDPOINT)
248
+ self._enabled = enabled and bool(self._api_key)
249
+ if self._enabled:
250
+ self._exporter = _BatchExporter(self._endpoint, self._api_key, self._project_id)
251
+ else:
252
+ self._exporter = None
253
+ if auto_instrument:
254
+ from oopstrace.patches import apply_all
255
+ apply_all(self)
256
+
257
+ # ------------------------------------------------------------------
258
+ # Context managers
259
+ # ------------------------------------------------------------------
260
+
261
+ @contextmanager
262
+ def start_trace(
263
+ self,
264
+ name: str,
265
+ *,
266
+ user_id: str | None = None,
267
+ session_id: str | None = None,
268
+ input: Any = None,
269
+ metadata: dict | None = None,
270
+ remote_context: dict | None = None,
271
+ ) -> Generator[_SpanData, None, None]:
272
+ """Open a root span that defines a new trace.
273
+
274
+ Pass *remote_context* (from :meth:`extract_context`) to continue a
275
+ distributed trace started by another agent process. The local root
276
+ span will be inserted as a child of the remote span while sharing the
277
+ same ``trace_id``, so all agents appear in one unified causal graph.
278
+ """
279
+ if remote_context:
280
+ trace_id = remote_context["trace_id"]
281
+ parent_span_id: str | None = remote_context.get("parent_span_id")
282
+ else:
283
+ trace_id = _new_id(16)
284
+ parent_span_id = None
285
+
286
+ span_id = _new_id(8)
287
+ start_ns = time.time_ns()
288
+ ctx = _TraceContext(
289
+ trace_id=trace_id,
290
+ root_span_id=span_id,
291
+ current_span_id=span_id,
292
+ user_id=user_id,
293
+ session_id=session_id,
294
+ )
295
+ tok = _ctx_var.set(ctx)
296
+ span = _SpanData(
297
+ span_id=span_id,
298
+ trace_id=trace_id,
299
+ parent_span_id=parent_span_id,
300
+ name=name,
301
+ span_kind="SPAN",
302
+ start_ns=start_ns,
303
+ user_id=user_id,
304
+ session_id=session_id,
305
+ input=json.dumps(input) if input is not None and not isinstance(input, str) else input,
306
+ metadata=metadata,
307
+ )
308
+ try:
309
+ yield span
310
+ except Exception as exc:
311
+ span.status = "ERROR"
312
+ span.status_message = str(exc)
313
+ raise
314
+ finally:
315
+ span.end_ns = time.time_ns()
316
+ _ctx_var.reset(tok)
317
+ if self._exporter:
318
+ self._exporter.enqueue(span)
319
+
320
+ @contextmanager
321
+ def start_span(
322
+ self,
323
+ name: str,
324
+ *,
325
+ span_kind: str = "SPAN",
326
+ input: Any = None,
327
+ output: Any = None,
328
+ model_name: str | None = None,
329
+ input_tokens: int | None = None,
330
+ output_tokens: int | None = None,
331
+ metadata: dict | None = None,
332
+ attributes: dict | None = None,
333
+ ) -> Generator[_SpanData, None, None]:
334
+ """Open a child span within the current trace."""
335
+ ctx = _ctx_var.get()
336
+ if ctx is None:
337
+ # Auto-create a trace if called outside a trace context
338
+ with self.start_trace(name) as root_span:
339
+ yield root_span
340
+ return
341
+
342
+ span_id = _new_id(8)
343
+ parent_id = ctx.current_span_id
344
+ old_current = ctx.current_span_id
345
+ ctx.current_span_id = span_id
346
+ start_ns = time.time_ns()
347
+
348
+ def _enc(v: Any) -> str | None:
349
+ if v is None:
350
+ return None
351
+ return json.dumps(v) if not isinstance(v, str) else v
352
+
353
+ span = _SpanData(
354
+ span_id=span_id,
355
+ trace_id=ctx.trace_id,
356
+ parent_span_id=parent_id,
357
+ name=name,
358
+ span_kind=span_kind.upper(),
359
+ start_ns=start_ns,
360
+ input=_enc(input),
361
+ output=_enc(output),
362
+ model_name=model_name,
363
+ input_tokens=input_tokens,
364
+ output_tokens=output_tokens,
365
+ metadata=metadata,
366
+ attributes=attributes or {},
367
+ user_id=ctx.user_id,
368
+ session_id=ctx.session_id,
369
+ )
370
+ try:
371
+ yield span
372
+ except Exception as exc:
373
+ span.status = "ERROR"
374
+ span.status_message = str(exc)
375
+ raise
376
+ finally:
377
+ span.end_ns = time.time_ns()
378
+ ctx.current_span_id = old_current
379
+ if self._exporter:
380
+ self._exporter.enqueue(span)
381
+
382
+ # ------------------------------------------------------------------
383
+ # Decorators
384
+ # ------------------------------------------------------------------
385
+
386
+ def trace(
387
+ self,
388
+ fn: Callable | None = None,
389
+ *,
390
+ name: str | None = None,
391
+ user_id: str | None = None,
392
+ session_id: str | None = None,
393
+ ):
394
+ """Decorator: wrap the function in a root trace span."""
395
+
396
+ def _decorator(f: Callable) -> Callable:
397
+ trace_name = name or f.__name__
398
+
399
+ @functools.wraps(f)
400
+ def _sync_wrapper(*args, **kwargs):
401
+ with self.start_trace(trace_name, user_id=user_id, session_id=session_id) as s:
402
+ result = f(*args, **kwargs)
403
+ if isinstance(result, str):
404
+ s.output = result
405
+ return result
406
+
407
+ @functools.wraps(f)
408
+ async def _async_wrapper(*args, **kwargs):
409
+ with self.start_trace(trace_name, user_id=user_id, session_id=session_id) as s:
410
+ result = await f(*args, **kwargs)
411
+ if isinstance(result, str):
412
+ s.output = result
413
+ return result
414
+
415
+ return _async_wrapper if inspect.iscoroutinefunction(f) else _sync_wrapper
416
+
417
+ return _decorator(fn) if fn is not None else _decorator
418
+
419
+ def span(
420
+ self,
421
+ fn: Callable | None = None,
422
+ *,
423
+ name: str | None = None,
424
+ span_kind: str = "SPAN",
425
+ ):
426
+ """Decorator: wrap the function in a child span."""
427
+
428
+ def _decorator(f: Callable) -> Callable:
429
+ span_name = name or f.__name__
430
+
431
+ @functools.wraps(f)
432
+ def _sync_wrapper(*args, **kwargs):
433
+ with self.start_span(span_name, span_kind=span_kind) as s:
434
+ result = f(*args, **kwargs)
435
+ if isinstance(result, str):
436
+ s.output = result
437
+ return result
438
+
439
+ @functools.wraps(f)
440
+ async def _async_wrapper(*args, **kwargs):
441
+ with self.start_span(span_name, span_kind=span_kind) as s:
442
+ result = await f(*args, **kwargs)
443
+ if isinstance(result, str):
444
+ s.output = result
445
+ return result
446
+
447
+ return _async_wrapper if inspect.iscoroutinefunction(f) else _sync_wrapper
448
+
449
+ return _decorator(fn) if fn is not None else _decorator
450
+
451
+ # ------------------------------------------------------------------
452
+ # Distributed trace propagation (W3C traceparent)
453
+ # ------------------------------------------------------------------
454
+
455
+ def inject_headers(self, headers: dict | None = None) -> dict:
456
+ """Inject W3C ``traceparent`` header into *headers* for an outgoing call.
457
+
458
+ Use this before making an HTTP request to another agent so the remote
459
+ process can link its spans into the same causal graph::
460
+
461
+ headers = client.inject_headers({"Content-Type": "application/json"})
462
+ httpx.post(agent_url, json=payload, headers=headers)
463
+ """
464
+ headers = dict(headers) if headers else {}
465
+ ctx = _ctx_var.get()
466
+ if ctx:
467
+ headers["traceparent"] = f"00-{ctx.trace_id}-{ctx.current_span_id}-01"
468
+ return headers
469
+
470
+ def extract_context(self, headers: dict) -> dict | None:
471
+ """Parse a W3C ``traceparent`` header from an incoming request.
472
+
473
+ Returns a ``remote_context`` dict ready to pass to :meth:`start_trace`,
474
+ or ``None`` if no valid header is present::
475
+
476
+ ctx = client.extract_context(request.headers)
477
+ with client.start_trace("AgentB", remote_context=ctx) as span:
478
+ ...
479
+ """
480
+ tp = headers.get("traceparent") or headers.get("Traceparent")
481
+ if not tp:
482
+ return None
483
+ parts = tp.split("-")
484
+ if len(parts) != 4:
485
+ return None
486
+ return {"trace_id": parts[1], "parent_span_id": parts[2]}
487
+
488
+ # ------------------------------------------------------------------
489
+ # Prompt management
490
+ # ------------------------------------------------------------------
491
+
492
+ def get_prompt(self, slug: str, version: int | None = None) -> str | None:
493
+ """Fetch a prompt template from the Prompt CMS."""
494
+ params = {} if version is None else {"version": version}
495
+ try:
496
+ with httpx.Client(timeout=5.0) as client:
497
+ resp = client.get(
498
+ f"{self._endpoint}/api/v1/public/prompts/{slug}",
499
+ params=params,
500
+ headers={"Authorization": f"Bearer {self._api_key}"},
501
+ )
502
+ if resp.status_code == 200:
503
+ return resp.json().get("template")
504
+ if resp.status_code == 404:
505
+ return None
506
+ except Exception as exc:
507
+ logger.warning("Oopstrace get_prompt error: %s", exc)
508
+ return None
509
+
510
+ def flush(self, timeout: float = 5.0) -> None:
511
+ """Block until all pending spans are exported."""
512
+ if self._exporter:
513
+ self._exporter.flush(timeout=timeout)
514
+
515
+ def shutdown(self) -> None:
516
+ """Flush and stop the background exporter thread."""
517
+ if self._exporter:
518
+ self._exporter.shutdown()
519
+
520
+
521
+ # ---------------------------------------------------------------------------
522
+ # Module-level convenience API
523
+ # ---------------------------------------------------------------------------
524
+
525
+ _default_client: OopstraceClient | None = None
526
+
527
+
528
+ def init(
529
+ api_key: str | None = None,
530
+ project_id: str | None = None,
531
+ endpoint: str | None = None,
532
+ auto_instrument: bool = False,
533
+ ) -> OopstraceClient:
534
+ """Initialize the module-level default client.
535
+
536
+ Set ``auto_instrument=True`` to automatically patch OpenAI and Anthropic
537
+ SDK calls — no decorators needed for basic LLM tracing.
538
+ """
539
+ global _default_client
540
+ _default_client = OopstraceClient(
541
+ api_key=api_key,
542
+ project_id=project_id,
543
+ endpoint=endpoint,
544
+ auto_instrument=auto_instrument,
545
+ )
546
+ return _default_client
547
+
548
+
549
+ def _get_client() -> OopstraceClient:
550
+ global _default_client
551
+ if _default_client is None:
552
+ _default_client = OopstraceClient()
553
+ return _default_client
554
+
555
+
556
+ def trace(fn: Callable | None = None, *, name: str | None = None, user_id: str | None = None, session_id: str | None = None):
557
+ return _get_client().trace(fn, name=name, user_id=user_id, session_id=session_id)
558
+
559
+
560
+ def span(fn: Callable | None = None, *, name: str | None = None, span_kind: str = "SPAN"):
561
+ return _get_client().span(fn, name=name, span_kind=span_kind)
562
+
563
+
564
+ def get_prompt(slug: str, version: int | None = None) -> str | None:
565
+ return _get_client().get_prompt(slug, version=version)
566
+
567
+
568
+ def flush(timeout: float = 5.0) -> None:
569
+ _get_client().flush(timeout=timeout)
570
+
571
+
572
+ def inject_headers(headers: dict | None = None) -> dict:
573
+ return _get_client().inject_headers(headers)
574
+
575
+
576
+ def extract_context(headers: dict) -> dict | None:
577
+ return _get_client().extract_context(headers)
@@ -0,0 +1,21 @@
1
+ """Monkey-patching registry for auto-instrumentation.
2
+
3
+ Call apply_all(client) to patch every SDK that is installed.
4
+ Individual patch functions are safe to call when the target library is absent.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import TYPE_CHECKING
10
+
11
+ if TYPE_CHECKING:
12
+ from oopstrace.client import OopstraceClient
13
+
14
+
15
+ def apply_all(client: "OopstraceClient") -> None:
16
+ """Apply all available patches against *client*."""
17
+ from oopstrace.patches._anthropic import patch_anthropic
18
+ from oopstrace.patches._openai import patch_openai
19
+
20
+ patch_openai(client)
21
+ patch_anthropic(client)
@@ -0,0 +1,81 @@
1
+ """Auto-instrumentation patch for the anthropic SDK.
2
+
3
+ Wraps `Messages.create` and `AsyncMessages.create` so that every Anthropic
4
+ call is automatically captured as a child LLM span — no decorators required.
5
+
6
+ The patch is idempotent: calling patch_anthropic() twice is safe.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import functools
12
+ from typing import TYPE_CHECKING, Any
13
+
14
+ if TYPE_CHECKING:
15
+ from oopstrace.client import OopstraceClient
16
+
17
+
18
+ def _extract_last_user_text(messages: list[dict]) -> str:
19
+ """Pull the text content out of the last user message."""
20
+ for msg in reversed(messages):
21
+ if msg.get("role") == "user":
22
+ content = msg.get("content", "")
23
+ if isinstance(content, str):
24
+ return content
25
+ if isinstance(content, list):
26
+ for block in content:
27
+ if isinstance(block, dict) and block.get("type") == "text":
28
+ return block.get("text", "")
29
+ return ""
30
+
31
+
32
+ def patch_anthropic(client: "OopstraceClient") -> None:
33
+ """Monkey-patch anthropic SDK classes to emit LLM spans automatically."""
34
+ try:
35
+ from anthropic.resources.messages import AsyncMessages, Messages
36
+ except ImportError:
37
+ return
38
+
39
+ if getattr(Messages, "_oopstrace_patched", False):
40
+ return
41
+
42
+ _orig_sync = Messages.create
43
+ _orig_async = AsyncMessages.create
44
+
45
+ @functools.wraps(_orig_sync)
46
+ def _sync_create(self_m, *args: Any, **kwargs: Any):
47
+ model = kwargs.get("model") or ""
48
+ messages = kwargs.get("messages") or []
49
+ inp = _extract_last_user_text(messages)
50
+
51
+ with client.start_span("anthropic.messages", span_kind="LLM", input=inp, model_name=model) as s:
52
+ result = _orig_sync(self_m, *args, **kwargs)
53
+ if hasattr(result, "usage") and result.usage:
54
+ s.input_tokens = result.usage.input_tokens
55
+ s.output_tokens = result.usage.output_tokens
56
+ if hasattr(result, "content") and result.content:
57
+ first = result.content[0]
58
+ if hasattr(first, "text"):
59
+ s.output = first.text
60
+ return result
61
+
62
+ @functools.wraps(_orig_async)
63
+ async def _async_create(self_m, *args: Any, **kwargs: Any):
64
+ model = kwargs.get("model") or ""
65
+ messages = kwargs.get("messages") or []
66
+ inp = _extract_last_user_text(messages)
67
+
68
+ with client.start_span("anthropic.messages", span_kind="LLM", input=inp, model_name=model) as s:
69
+ result = await _orig_async(self_m, *args, **kwargs)
70
+ if hasattr(result, "usage") and result.usage:
71
+ s.input_tokens = result.usage.input_tokens
72
+ s.output_tokens = result.usage.output_tokens
73
+ if hasattr(result, "content") and result.content:
74
+ first = result.content[0]
75
+ if hasattr(first, "text"):
76
+ s.output = first.text
77
+ return result
78
+
79
+ Messages.create = _sync_create
80
+ AsyncMessages.create = _async_create
81
+ Messages._oopstrace_patched = True
@@ -0,0 +1,67 @@
1
+ """Auto-instrumentation patch for the openai SDK (>= 1.0).
2
+
3
+ Wraps `Completions.create` and `AsyncCompletions.create` so that every chat
4
+ call is automatically captured as a child LLM span — no decorators required.
5
+
6
+ The patch is idempotent: calling patch_openai() twice is safe.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import functools
12
+ from typing import TYPE_CHECKING, Any
13
+
14
+ if TYPE_CHECKING:
15
+ from oopstrace.client import OopstraceClient
16
+
17
+
18
+ def patch_openai(client: "OopstraceClient") -> None:
19
+ """Monkey-patch openai SDK classes to emit LLM spans automatically."""
20
+ try:
21
+ from openai.resources.chat.completions import AsyncCompletions, Completions
22
+ except ImportError:
23
+ return
24
+
25
+ if getattr(Completions, "_oopstrace_patched", False):
26
+ return
27
+
28
+ _orig_sync = Completions.create
29
+ _orig_async = AsyncCompletions.create
30
+
31
+ @functools.wraps(_orig_sync)
32
+ def _sync_create(self_c, *args: Any, **kwargs: Any):
33
+ model = kwargs.get("model") or ""
34
+ messages = kwargs.get("messages") or []
35
+ inp = (messages[-1].get("content") or "") if messages else ""
36
+
37
+ with client.start_span("openai.chat", span_kind="LLM", input=inp, model_name=model) as s:
38
+ result = _orig_sync(self_c, *args, **kwargs)
39
+ if hasattr(result, "usage") and result.usage:
40
+ s.input_tokens = result.usage.prompt_tokens
41
+ s.output_tokens = result.usage.completion_tokens
42
+ if hasattr(result, "choices") and result.choices:
43
+ content = result.choices[0].message.content
44
+ if content:
45
+ s.output = content
46
+ return result
47
+
48
+ @functools.wraps(_orig_async)
49
+ async def _async_create(self_c, *args: Any, **kwargs: Any):
50
+ model = kwargs.get("model") or ""
51
+ messages = kwargs.get("messages") or []
52
+ inp = (messages[-1].get("content") or "") if messages else ""
53
+
54
+ with client.start_span("openai.chat", span_kind="LLM", input=inp, model_name=model) as s:
55
+ result = await _orig_async(self_c, *args, **kwargs)
56
+ if hasattr(result, "usage") and result.usage:
57
+ s.input_tokens = result.usage.prompt_tokens
58
+ s.output_tokens = result.usage.completion_tokens
59
+ if hasattr(result, "choices") and result.choices:
60
+ content = result.choices[0].message.content
61
+ if content:
62
+ s.output = content
63
+ return result
64
+
65
+ Completions.create = _sync_create
66
+ AsyncCompletions.create = _async_create
67
+ Completions._oopstrace_patched = True
@@ -0,0 +1,77 @@
1
+ Metadata-Version: 2.4
2
+ Name: oopstrace
3
+ Version: 0.1.0
4
+ Summary: Oopstrace Python SDK — LLM observability with automatic batching
5
+ License: MIT
6
+ Requires-Python: >=3.9
7
+ Requires-Dist: httpx>=0.24.0
8
+ Provides-Extra: dev
9
+ Requires-Dist: pytest; extra == 'dev'
10
+ Requires-Dist: pytest-asyncio; extra == 'dev'
11
+ Description-Content-Type: text/markdown
12
+
13
+ # OopsTrace Python SDK
14
+
15
+ LLM observability for Python. Trace every call, span, token count, and cost — visible instantly at [app.oopstrace.com](https://app.oopstrace.com).
16
+
17
+ ## Install
18
+
19
+ ```bash
20
+ pip install oopstrace
21
+ ```
22
+
23
+ ## Quickstart
24
+
25
+ ```python
26
+ import oopstrace
27
+
28
+ oopstrace.init(
29
+ api_key="tr_...", # from app.oopstrace.com → project → Access Keys
30
+ project_id="...", # your project ID
31
+ )
32
+
33
+ @oopstrace.trace
34
+ def run_agent(user_input: str) -> str:
35
+ with oopstrace.span("llm-call", span_kind="LLM") as s:
36
+ s.set_output("Hello from OopsTrace")
37
+ return "Hello from OopsTrace"
38
+
39
+ run_agent("test")
40
+ oopstrace.flush()
41
+ ```
42
+
43
+ ## Auto-instrument OpenAI / Anthropic
44
+
45
+ ```python
46
+ oopstrace.init(api_key="tr_...", project_id="...", auto_instrument=True)
47
+
48
+ # All openai.chat.completions.create() and anthropic.messages.create()
49
+ # calls are now traced automatically — no decorators needed.
50
+ ```
51
+
52
+ ## Manual spans
53
+
54
+ ```python
55
+ @oopstrace.trace
56
+ def pipeline(query: str):
57
+ with oopstrace.span("retrieval", span_kind="TOOL") as s:
58
+ docs = retrieve(query)
59
+ s.set_input(query)
60
+ s.set_output(str(docs))
61
+
62
+ with oopstrace.span("generate", span_kind="LLM") as s:
63
+ answer = llm(query, docs)
64
+ s.set_model("gpt-4o")
65
+ s.set_tokens(input=100, output=50)
66
+ return answer
67
+ ```
68
+
69
+ ## Self-hosted
70
+
71
+ ```python
72
+ oopstrace.init(
73
+ api_key="tr_...",
74
+ project_id="...",
75
+ endpoint="https://app.yourdomain.com", # point at your own server
76
+ )
77
+ ```
@@ -0,0 +1,8 @@
1
+ oopstrace/__init__.py,sha256=FY7BtqOXEDdbDtFV2RbBnEjM9IliSxUXpXsu-7pt1Mw,295
2
+ oopstrace/client.py,sha256=wQbYB51UF3mTD0mwYIqIEpMEZGOTand1WP4ssje71d0,19236
3
+ oopstrace/patches/__init__.py,sha256=IftQngmZhHQxLjZHsx7ipceCJH_H9zQZhL0G9sS9ksg,615
4
+ oopstrace/patches/_anthropic.py,sha256=Xb6U8fSgEret7LmVDzEwKYfe7H77kYS2wAMjnJTUY_4,3080
5
+ oopstrace/patches/_openai.py,sha256=fgExyJdLu2tTVCeEjwrhQH4NBqDurLnqbKjnGUgN7E8,2608
6
+ oopstrace-0.1.0.dist-info/METADATA,sha256=-GpMHbSG3AAGogfvW3muCXr_r8vZYyQr_bM2zSXXyQM,1829
7
+ oopstrace-0.1.0.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
8
+ oopstrace-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.29.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any