lucidicai 2.1.0__tar.gz → 2.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lucidicai-2.1.0 → lucidicai-2.1.1}/PKG-INFO +1 -1
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/__init__.py +24 -7
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/core/config.py +2 -2
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/sdk/context.py +93 -6
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/sdk/event.py +9 -5
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/sdk/init.py +95 -5
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/telemetry/litellm_bridge.py +2 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/telemetry/lucidic_exporter.py +50 -26
- lucidicai-2.1.1/lucidicai/telemetry/openai_patch.py +295 -0
- lucidicai-2.1.1/lucidicai/telemetry/openai_uninstrument.py +87 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/telemetry/telemetry_init.py +16 -1
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai.egg-info/PKG-INFO +1 -1
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai.egg-info/SOURCES.txt +2 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/setup.py +1 -1
- {lucidicai-2.1.0 → lucidicai-2.1.1}/README.md +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/api/__init__.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/api/client.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/api/resources/__init__.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/api/resources/dataset.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/api/resources/event.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/api/resources/session.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/core/__init__.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/core/errors.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/core/types.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/sdk/__init__.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/sdk/decorators.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/sdk/error_boundary.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/sdk/event_builder.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/sdk/features/__init__.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/sdk/features/dataset.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/sdk/features/feature_flag.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/sdk/shutdown_manager.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/telemetry/__init__.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/telemetry/context_bridge.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/telemetry/context_capture_processor.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/telemetry/extract.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/telemetry/openai_agents_instrumentor.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/telemetry/utils/__init__.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/telemetry/utils/model_pricing.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/utils/__init__.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/utils/images.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/utils/logger.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai/utils/queue.py +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai.egg-info/dependency_links.txt +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai.egg-info/requires.txt +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/lucidicai.egg-info/top_level.txt +0 -0
- {lucidicai-2.1.0 → lucidicai-2.1.1}/setup.cfg +0 -0
|
@@ -14,6 +14,10 @@ from .sdk.init import (
|
|
|
14
14
|
init as _init,
|
|
15
15
|
get_session_id as _get_session_id,
|
|
16
16
|
clear_state as _clear_state,
|
|
17
|
+
# Thread-local session management (advanced users)
|
|
18
|
+
set_thread_session,
|
|
19
|
+
clear_thread_session,
|
|
20
|
+
get_thread_session,
|
|
17
21
|
)
|
|
18
22
|
|
|
19
23
|
from .sdk.event import (
|
|
@@ -32,6 +36,7 @@ from .sdk.context import (
|
|
|
32
36
|
session_async,
|
|
33
37
|
run_session,
|
|
34
38
|
run_in_session,
|
|
39
|
+
thread_worker_with_session, # Thread isolation helper
|
|
35
40
|
current_session_id,
|
|
36
41
|
current_parent_event_id,
|
|
37
42
|
)
|
|
@@ -55,12 +60,15 @@ def _update_session(
|
|
|
55
60
|
session_eval=None,
|
|
56
61
|
session_eval_reason=None,
|
|
57
62
|
is_successful=None,
|
|
58
|
-
is_successful_reason=None
|
|
63
|
+
is_successful_reason=None,
|
|
64
|
+
session_id=None # Accept explicit session_id
|
|
59
65
|
):
|
|
60
66
|
"""Update the current session."""
|
|
61
67
|
from .sdk.init import get_resources, get_session_id
|
|
62
|
-
|
|
63
|
-
session_id
|
|
68
|
+
|
|
69
|
+
# Use provided session_id or fall back to context
|
|
70
|
+
if not session_id:
|
|
71
|
+
session_id = get_session_id()
|
|
64
72
|
if not session_id:
|
|
65
73
|
return
|
|
66
74
|
|
|
@@ -87,12 +95,15 @@ def _end_session(
|
|
|
87
95
|
session_eval_reason=None,
|
|
88
96
|
is_successful=None,
|
|
89
97
|
is_successful_reason=None,
|
|
90
|
-
wait_for_flush=True
|
|
98
|
+
wait_for_flush=True,
|
|
99
|
+
session_id=None # Accept explicit session_id
|
|
91
100
|
):
|
|
92
101
|
"""End the current session."""
|
|
93
102
|
from .sdk.init import get_resources, get_session_id, get_event_queue
|
|
94
|
-
|
|
95
|
-
session_id
|
|
103
|
+
|
|
104
|
+
# Use provided session_id or fall back to context
|
|
105
|
+
if not session_id:
|
|
106
|
+
session_id = get_session_id()
|
|
96
107
|
if not session_id:
|
|
97
108
|
return
|
|
98
109
|
|
|
@@ -282,7 +293,7 @@ get_error_history = error_boundary.get_error_history
|
|
|
282
293
|
clear_error_history = error_boundary.clear_error_history
|
|
283
294
|
|
|
284
295
|
# Version
|
|
285
|
-
__version__ = "2.
|
|
296
|
+
__version__ = "2.1.1"
|
|
286
297
|
|
|
287
298
|
# Apply error boundary wrapping to all SDK functions
|
|
288
299
|
from .sdk.error_boundary import wrap_sdk_function
|
|
@@ -371,8 +382,14 @@ __all__ = [
|
|
|
371
382
|
'session_async',
|
|
372
383
|
'run_session',
|
|
373
384
|
'run_in_session',
|
|
385
|
+
'thread_worker_with_session',
|
|
374
386
|
'current_session_id',
|
|
375
387
|
'current_parent_event_id',
|
|
388
|
+
|
|
389
|
+
# Thread-local session management (advanced)
|
|
390
|
+
'set_thread_session',
|
|
391
|
+
'clear_thread_session',
|
|
392
|
+
'get_thread_session',
|
|
376
393
|
|
|
377
394
|
# Error types
|
|
378
395
|
'LucidicError',
|
|
@@ -19,7 +19,7 @@ class Environment(Enum):
|
|
|
19
19
|
@dataclass
|
|
20
20
|
class NetworkConfig:
|
|
21
21
|
"""Network and connection settings"""
|
|
22
|
-
base_url: str = "https://
|
|
22
|
+
base_url: str = "https://backend.lucidic.ai/api"
|
|
23
23
|
timeout: int = 30
|
|
24
24
|
max_retries: int = 3
|
|
25
25
|
backoff_factor: float = 0.5
|
|
@@ -31,7 +31,7 @@ class NetworkConfig:
|
|
|
31
31
|
"""Load network configuration from environment variables"""
|
|
32
32
|
debug = os.getenv("LUCIDIC_DEBUG", "False").lower() == "true"
|
|
33
33
|
return cls(
|
|
34
|
-
base_url="http://localhost:8000/api" if debug else "https://
|
|
34
|
+
base_url="http://localhost:8000/api" if debug else "https://backend.lucidic.ai/api",
|
|
35
35
|
timeout=int(os.getenv("LUCIDIC_TIMEOUT", "30")),
|
|
36
36
|
max_retries=int(os.getenv("LUCIDIC_MAX_RETRIES", "3")),
|
|
37
37
|
backoff_factor=float(os.getenv("LUCIDIC_BACKOFF_FACTOR", "0.5")),
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
"""Async-safe context helpers for session (and step, extensible).
|
|
1
|
+
"""Async-safe and thread-safe context helpers for session (and step, extensible).
|
|
2
2
|
|
|
3
3
|
This module exposes context variables and helpers to bind a Lucidic
|
|
4
4
|
session to the current execution context (threads/async tasks), so
|
|
@@ -11,6 +11,7 @@ import contextvars
|
|
|
11
11
|
from typing import Optional, Iterator, AsyncIterator, Callable, Any, Dict
|
|
12
12
|
import logging
|
|
13
13
|
import os
|
|
14
|
+
import threading
|
|
14
15
|
|
|
15
16
|
|
|
16
17
|
# Context variable for the active Lucidic session id
|
|
@@ -26,32 +27,71 @@ current_parent_event_id: contextvars.ContextVar[Optional[str]] = contextvars.Con
|
|
|
26
27
|
|
|
27
28
|
|
|
28
29
|
def set_active_session(session_id: Optional[str]) -> None:
|
|
29
|
-
"""Bind the given session id to the current execution context.
|
|
30
|
+
"""Bind the given session id to the current execution context.
|
|
31
|
+
|
|
32
|
+
Sets both contextvar and thread-local storage when in a thread.
|
|
33
|
+
"""
|
|
34
|
+
from .init import set_thread_session, is_main_thread
|
|
35
|
+
|
|
30
36
|
current_session_id.set(session_id)
|
|
31
37
|
|
|
38
|
+
# Also set thread-local storage if we're in a non-main thread
|
|
39
|
+
if session_id and not is_main_thread():
|
|
40
|
+
set_thread_session(session_id)
|
|
41
|
+
|
|
32
42
|
|
|
33
43
|
def clear_active_session() -> None:
|
|
34
|
-
"""Clear any active session binding in the current execution context.
|
|
44
|
+
"""Clear any active session binding in the current execution context.
|
|
45
|
+
|
|
46
|
+
Clears both contextvar and thread-local storage when in a thread.
|
|
47
|
+
"""
|
|
48
|
+
from .init import clear_thread_session, is_main_thread
|
|
49
|
+
|
|
35
50
|
current_session_id.set(None)
|
|
36
51
|
|
|
52
|
+
# Also clear thread-local storage if we're in a non-main thread
|
|
53
|
+
if not is_main_thread():
|
|
54
|
+
clear_thread_session()
|
|
55
|
+
|
|
37
56
|
|
|
38
57
|
@contextmanager
|
|
39
58
|
def bind_session(session_id: str) -> Iterator[None]:
|
|
40
|
-
"""Context manager to temporarily bind an active session id.
|
|
59
|
+
"""Context manager to temporarily bind an active session id.
|
|
60
|
+
|
|
61
|
+
Handles both thread-local and context variable storage for proper isolation.
|
|
62
|
+
"""
|
|
63
|
+
from .init import set_thread_session, clear_thread_session, is_main_thread
|
|
64
|
+
|
|
41
65
|
token = current_session_id.set(session_id)
|
|
66
|
+
|
|
67
|
+
# If we're in a non-main thread, also set thread-local storage
|
|
68
|
+
thread_local_set = False
|
|
69
|
+
if not is_main_thread():
|
|
70
|
+
set_thread_session(session_id)
|
|
71
|
+
thread_local_set = True
|
|
72
|
+
|
|
42
73
|
try:
|
|
43
74
|
yield
|
|
44
75
|
finally:
|
|
76
|
+
if thread_local_set:
|
|
77
|
+
clear_thread_session()
|
|
45
78
|
current_session_id.reset(token)
|
|
46
79
|
|
|
47
80
|
|
|
48
81
|
@asynccontextmanager
|
|
49
82
|
async def bind_session_async(session_id: str) -> AsyncIterator[None]:
|
|
50
83
|
"""Async context manager to temporarily bind an active session id."""
|
|
84
|
+
from .init import set_task_session, clear_task_session
|
|
85
|
+
|
|
51
86
|
token = current_session_id.set(session_id)
|
|
87
|
+
|
|
88
|
+
# Also set task-local for async isolation
|
|
89
|
+
set_task_session(session_id)
|
|
90
|
+
|
|
52
91
|
try:
|
|
53
92
|
yield
|
|
54
93
|
finally:
|
|
94
|
+
clear_task_session()
|
|
55
95
|
current_session_id.reset(token)
|
|
56
96
|
|
|
57
97
|
|
|
@@ -81,9 +121,11 @@ def session(**init_params) -> Iterator[None]:
|
|
|
81
121
|
Notes:
|
|
82
122
|
- Ignores any provided auto_end parameter and ends the session on context exit.
|
|
83
123
|
- If LUCIDIC_DEBUG is true, logs a warning about ignoring auto_end.
|
|
124
|
+
- Handles thread-local storage for proper thread isolation.
|
|
84
125
|
"""
|
|
85
126
|
# Lazy import to avoid circular imports
|
|
86
127
|
import lucidicai as lai # type: ignore
|
|
128
|
+
from .init import set_thread_session, clear_thread_session, is_main_thread
|
|
87
129
|
|
|
88
130
|
# Force auto_end to False inside a context manager to control explicit end
|
|
89
131
|
user_auto_end = init_params.get('auto_end', None)
|
|
@@ -95,12 +137,22 @@ def session(**init_params) -> Iterator[None]:
|
|
|
95
137
|
|
|
96
138
|
session_id = lai.init(**init_params)
|
|
97
139
|
token = current_session_id.set(session_id)
|
|
140
|
+
|
|
141
|
+
# If we're in a non-main thread, also set thread-local storage
|
|
142
|
+
thread_local_set = False
|
|
143
|
+
if not is_main_thread():
|
|
144
|
+
set_thread_session(session_id)
|
|
145
|
+
thread_local_set = True
|
|
146
|
+
|
|
98
147
|
try:
|
|
99
148
|
yield
|
|
100
149
|
finally:
|
|
150
|
+
if thread_local_set:
|
|
151
|
+
clear_thread_session()
|
|
101
152
|
current_session_id.reset(token)
|
|
102
153
|
try:
|
|
103
|
-
|
|
154
|
+
# Pass session_id explicitly to avoid context issues
|
|
155
|
+
lai.end_session(session_id=session_id)
|
|
104
156
|
except Exception:
|
|
105
157
|
# Avoid masking the original exception from the with-block
|
|
106
158
|
pass
|
|
@@ -110,6 +162,7 @@ def session(**init_params) -> Iterator[None]:
|
|
|
110
162
|
async def session_async(**init_params) -> AsyncIterator[None]:
|
|
111
163
|
"""Async counterpart of session(...)."""
|
|
112
164
|
import lucidicai as lai # type: ignore
|
|
165
|
+
from .init import set_task_session, clear_task_session
|
|
113
166
|
|
|
114
167
|
user_auto_end = init_params.get('auto_end', None)
|
|
115
168
|
init_params = dict(init_params)
|
|
@@ -120,12 +173,19 @@ async def session_async(**init_params) -> AsyncIterator[None]:
|
|
|
120
173
|
|
|
121
174
|
session_id = lai.init(**init_params)
|
|
122
175
|
token = current_session_id.set(session_id)
|
|
176
|
+
|
|
177
|
+
# Set task-local session for true isolation in async
|
|
178
|
+
set_task_session(session_id)
|
|
179
|
+
|
|
123
180
|
try:
|
|
124
181
|
yield
|
|
125
182
|
finally:
|
|
183
|
+
# Clear task-local session first
|
|
184
|
+
clear_task_session()
|
|
126
185
|
current_session_id.reset(token)
|
|
127
186
|
try:
|
|
128
|
-
|
|
187
|
+
# Pass session_id explicitly to avoid context issues in async
|
|
188
|
+
lai.end_session(session_id=session_id)
|
|
129
189
|
except Exception:
|
|
130
190
|
pass
|
|
131
191
|
|
|
@@ -142,3 +202,30 @@ def run_in_session(session_id: str, fn: Callable[..., Any], *fn_args: Any, **fn_
|
|
|
142
202
|
return fn(*fn_args, **fn_kwargs)
|
|
143
203
|
|
|
144
204
|
|
|
205
|
+
def thread_worker_with_session(session_id: str, target: Callable[..., Any], *args, **kwargs) -> Any:
|
|
206
|
+
"""Wrapper for thread worker functions that ensures proper session isolation.
|
|
207
|
+
|
|
208
|
+
Use this as the target function for threads to ensure each thread gets
|
|
209
|
+
its own session context without bleeding from the parent thread.
|
|
210
|
+
|
|
211
|
+
Example:
|
|
212
|
+
thread = Thread(
|
|
213
|
+
target=thread_worker_with_session,
|
|
214
|
+
args=(session_id, actual_worker_function, arg1, arg2),
|
|
215
|
+
kwargs={'key': 'value'}
|
|
216
|
+
)
|
|
217
|
+
"""
|
|
218
|
+
from .init import set_thread_session, clear_thread_session
|
|
219
|
+
|
|
220
|
+
# Set thread-local session immediately
|
|
221
|
+
set_thread_session(session_id)
|
|
222
|
+
|
|
223
|
+
try:
|
|
224
|
+
# Also bind to contextvar for compatibility
|
|
225
|
+
with bind_session(session_id):
|
|
226
|
+
return target(*args, **kwargs)
|
|
227
|
+
finally:
|
|
228
|
+
# Clean up thread-local storage
|
|
229
|
+
clear_thread_session()
|
|
230
|
+
|
|
231
|
+
|
|
@@ -12,23 +12,27 @@ from ..utils.logger import debug, truncate_id
|
|
|
12
12
|
def create_event(
|
|
13
13
|
type: str = "generic",
|
|
14
14
|
event_id: Optional[str] = None,
|
|
15
|
+
session_id: Optional[str] = None, # accept explicit session_id
|
|
15
16
|
**kwargs
|
|
16
17
|
) -> str:
|
|
17
18
|
"""Create a new event.
|
|
18
|
-
|
|
19
|
+
|
|
19
20
|
Args:
|
|
20
21
|
type: Event type (llm_generation, function_call, error_traceback, generic)
|
|
21
22
|
event_id: Optional client event ID (will generate if not provided)
|
|
23
|
+
session_id: Optional session ID (will use context if not provided)
|
|
22
24
|
**kwargs: Event-specific fields
|
|
23
|
-
|
|
25
|
+
|
|
24
26
|
Returns:
|
|
25
27
|
Event ID (client-generated or provided UUID)
|
|
26
28
|
"""
|
|
27
29
|
# Import here to avoid circular dependency
|
|
28
30
|
from ..sdk.init import get_session_id, get_event_queue
|
|
29
|
-
|
|
30
|
-
#
|
|
31
|
-
|
|
31
|
+
|
|
32
|
+
# Use provided session_id or fall back to context
|
|
33
|
+
if not session_id:
|
|
34
|
+
session_id = get_session_id()
|
|
35
|
+
|
|
32
36
|
if not session_id:
|
|
33
37
|
# No active session, return dummy ID
|
|
34
38
|
debug("[Event] No active session, returning dummy event ID")
|
|
@@ -4,6 +4,9 @@ This module handles SDK initialization, separating concerns from the main __init
|
|
|
4
4
|
"""
|
|
5
5
|
import uuid
|
|
6
6
|
from typing import List, Optional
|
|
7
|
+
import asyncio
|
|
8
|
+
import threading
|
|
9
|
+
from weakref import WeakKeyDictionary
|
|
7
10
|
|
|
8
11
|
from ..api.client import HttpClient
|
|
9
12
|
from ..api.resources.event import EventResource
|
|
@@ -21,14 +24,18 @@ from opentelemetry.sdk.trace import TracerProvider
|
|
|
21
24
|
|
|
22
25
|
class SDKState:
|
|
23
26
|
"""Container for SDK runtime state."""
|
|
24
|
-
|
|
27
|
+
|
|
25
28
|
def __init__(self):
|
|
26
29
|
self.http: Optional[HttpClient] = None
|
|
27
30
|
self.event_queue: Optional[EventQueue] = None
|
|
28
31
|
self.session_id: Optional[str] = None
|
|
29
32
|
self.tracer_provider: Optional[TracerProvider] = None
|
|
30
33
|
self.resources = {}
|
|
31
|
-
|
|
34
|
+
# Task-local storage for async task isolation
|
|
35
|
+
self.task_sessions: WeakKeyDictionary = WeakKeyDictionary()
|
|
36
|
+
# Thread-local storage for thread isolation
|
|
37
|
+
self.thread_local = threading.local()
|
|
38
|
+
|
|
32
39
|
def reset(self):
|
|
33
40
|
"""Reset SDK state."""
|
|
34
41
|
# Shutdown telemetry first to ensure all spans are exported
|
|
@@ -42,17 +49,21 @@ class SDKState:
|
|
|
42
49
|
debug("[SDK] TracerProvider shutdown complete")
|
|
43
50
|
except Exception as e:
|
|
44
51
|
error(f"[SDK] Error shutting down TracerProvider: {e}")
|
|
45
|
-
|
|
52
|
+
|
|
46
53
|
if self.event_queue:
|
|
47
54
|
self.event_queue.shutdown()
|
|
48
55
|
if self.http:
|
|
49
56
|
self.http.close()
|
|
50
|
-
|
|
57
|
+
|
|
51
58
|
self.http = None
|
|
52
59
|
self.event_queue = None
|
|
53
60
|
self.session_id = None
|
|
54
61
|
self.tracer_provider = None
|
|
55
62
|
self.resources = {}
|
|
63
|
+
self.task_sessions.clear()
|
|
64
|
+
# Clear thread-local storage for current thread
|
|
65
|
+
if hasattr(self.thread_local, 'session_id'):
|
|
66
|
+
delattr(self.thread_local, 'session_id')
|
|
56
67
|
|
|
57
68
|
|
|
58
69
|
# Global SDK state
|
|
@@ -243,8 +254,87 @@ def _initialize_telemetry(providers: List[str]) -> None:
|
|
|
243
254
|
info(f"[Telemetry] Initialized for providers: {providers}")
|
|
244
255
|
|
|
245
256
|
|
|
257
|
+
def set_task_session(session_id: str) -> None:
|
|
258
|
+
"""Set session ID for current async task (if in async context)."""
|
|
259
|
+
try:
|
|
260
|
+
if task := asyncio.current_task():
|
|
261
|
+
_sdk_state.task_sessions[task] = session_id
|
|
262
|
+
debug(f"[SDK] Set task-local session {truncate_id(session_id)} for task {task.get_name()}")
|
|
263
|
+
except RuntimeError:
|
|
264
|
+
# Not in async context, ignore
|
|
265
|
+
pass
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
def clear_task_session() -> None:
|
|
269
|
+
"""Clear session ID for current async task (if in async context)."""
|
|
270
|
+
try:
|
|
271
|
+
if task := asyncio.current_task():
|
|
272
|
+
_sdk_state.task_sessions.pop(task, None)
|
|
273
|
+
debug(f"[SDK] Cleared task-local session for task {task.get_name()}")
|
|
274
|
+
except RuntimeError:
|
|
275
|
+
# Not in async context, ignore
|
|
276
|
+
pass
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
def set_thread_session(session_id: str) -> None:
|
|
280
|
+
"""Set session ID for current thread.
|
|
281
|
+
|
|
282
|
+
This provides true thread-local storage that doesn't inherit from parent thread.
|
|
283
|
+
"""
|
|
284
|
+
_sdk_state.thread_local.session_id = session_id
|
|
285
|
+
current_thread = threading.current_thread()
|
|
286
|
+
debug(f"[SDK] Set thread-local session {truncate_id(session_id)} for thread {current_thread.name}")
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
def clear_thread_session() -> None:
|
|
290
|
+
"""Clear session ID for current thread."""
|
|
291
|
+
if hasattr(_sdk_state.thread_local, 'session_id'):
|
|
292
|
+
delattr(_sdk_state.thread_local, 'session_id')
|
|
293
|
+
current_thread = threading.current_thread()
|
|
294
|
+
debug(f"[SDK] Cleared thread-local session for thread {current_thread.name}")
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
def get_thread_session() -> Optional[str]:
|
|
298
|
+
"""Get session ID from thread-local storage."""
|
|
299
|
+
return getattr(_sdk_state.thread_local, 'session_id', None)
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
def is_main_thread() -> bool:
|
|
303
|
+
"""Check if we're running in the main thread."""
|
|
304
|
+
return threading.current_thread() is threading.main_thread()
|
|
305
|
+
|
|
306
|
+
|
|
246
307
|
def get_session_id() -> Optional[str]:
|
|
247
|
-
"""Get the current session ID.
|
|
308
|
+
"""Get the current session ID.
|
|
309
|
+
|
|
310
|
+
Priority:
|
|
311
|
+
1. Task-local session (for async tasks)
|
|
312
|
+
2. Thread-local session (for threads) - NO FALLBACK for threads
|
|
313
|
+
3. SDK state session (for main thread)
|
|
314
|
+
4. Context variable session (fallback for main thread only)
|
|
315
|
+
"""
|
|
316
|
+
# First check task-local storage for async isolation
|
|
317
|
+
try:
|
|
318
|
+
if task := asyncio.current_task():
|
|
319
|
+
if task_session := _sdk_state.task_sessions.get(task):
|
|
320
|
+
debug(f"[SDK] Using task-local session {truncate_id(task_session)}")
|
|
321
|
+
return task_session
|
|
322
|
+
except RuntimeError:
|
|
323
|
+
# Not in async context
|
|
324
|
+
pass
|
|
325
|
+
|
|
326
|
+
# Check if we're in a thread
|
|
327
|
+
if not is_main_thread():
|
|
328
|
+
# For threads, ONLY use thread-local storage - no fallback!
|
|
329
|
+
# This prevents inheriting the parent thread's session
|
|
330
|
+
thread_session = get_thread_session()
|
|
331
|
+
if thread_session:
|
|
332
|
+
debug(f"[SDK] Using thread-local session {truncate_id(thread_session)}")
|
|
333
|
+
else:
|
|
334
|
+
debug(f"[SDK] Thread {threading.current_thread().name} has no thread-local session")
|
|
335
|
+
return thread_session # Return None if not set - don't fall back!
|
|
336
|
+
|
|
337
|
+
# For main thread only: fall back to SDK state or context variable
|
|
248
338
|
return _sdk_state.session_id or current_session_id.get()
|
|
249
339
|
|
|
250
340
|
|
|
@@ -149,6 +149,7 @@ class LucidicLiteLLMCallback(CustomLogger):
|
|
|
149
149
|
# Create event with correct field names
|
|
150
150
|
create_event(
|
|
151
151
|
type="llm_generation",
|
|
152
|
+
session_id=session_id, # Pass session_id explicitly
|
|
152
153
|
provider=provider,
|
|
153
154
|
model=model,
|
|
154
155
|
messages=messages,
|
|
@@ -210,6 +211,7 @@ class LucidicLiteLLMCallback(CustomLogger):
|
|
|
210
211
|
|
|
211
212
|
create_event(
|
|
212
213
|
type="error_traceback",
|
|
214
|
+
session_id=session_id, # Pass session_id explicitly
|
|
213
215
|
error=error_msg,
|
|
214
216
|
traceback="",
|
|
215
217
|
parent_event_id=parent_id, # This will be normalized by EventBuilder
|
|
@@ -41,11 +41,18 @@ class LucidicSpanExporter(SpanExporter):
|
|
|
41
41
|
if not detect_is_llm_span(span):
|
|
42
42
|
verbose(f"[Telemetry] Skipping non-LLM span: {span.name}")
|
|
43
43
|
return
|
|
44
|
-
|
|
44
|
+
|
|
45
45
|
debug(f"[Telemetry] Processing LLM span: {span.name}")
|
|
46
46
|
|
|
47
47
|
attributes = dict(span.attributes or {})
|
|
48
48
|
|
|
49
|
+
# Skip spans that are likely duplicates or incomplete
|
|
50
|
+
# Check if this is a responses.parse span that was already handled
|
|
51
|
+
if span.name == "openai.responses.create" and not attributes.get("lucidic.instrumented"):
|
|
52
|
+
# This might be from incorrect standard instrumentation
|
|
53
|
+
verbose(f"[Telemetry] Skipping potentially duplicate responses span without our marker")
|
|
54
|
+
return
|
|
55
|
+
|
|
49
56
|
# Resolve session id
|
|
50
57
|
target_session_id = attributes.get('lucidic.session_id')
|
|
51
58
|
if not target_session_id:
|
|
@@ -84,7 +91,18 @@ class LucidicSpanExporter(SpanExporter):
|
|
|
84
91
|
provider = self._detect_provider_name(attributes)
|
|
85
92
|
messages = extract_prompts(attributes) or []
|
|
86
93
|
params = self._extract_params(attributes)
|
|
87
|
-
output_text = extract_completions(span, attributes)
|
|
94
|
+
output_text = extract_completions(span, attributes)
|
|
95
|
+
|
|
96
|
+
# Skip spans with no meaningful output (likely incomplete or duplicate instrumentation)
|
|
97
|
+
if not output_text or output_text == "Response received":
|
|
98
|
+
# Only use "Response received" if we have other meaningful data
|
|
99
|
+
if not messages and not attributes.get("lucidic.instrumented"):
|
|
100
|
+
verbose(f"[Telemetry] Skipping span {span.name} with no meaningful content")
|
|
101
|
+
return
|
|
102
|
+
# Use a more descriptive default if we must
|
|
103
|
+
if not output_text:
|
|
104
|
+
output_text = "Response received"
|
|
105
|
+
|
|
88
106
|
input_tokens = self._extract_prompt_tokens(attributes)
|
|
89
107
|
output_tokens = self._extract_completion_tokens(attributes)
|
|
90
108
|
cost = self._calculate_cost(attributes)
|
|
@@ -99,9 +117,10 @@ class LucidicSpanExporter(SpanExporter):
|
|
|
99
117
|
|
|
100
118
|
try:
|
|
101
119
|
# Create immutable event via non-blocking queue
|
|
102
|
-
debug(f"[Telemetry] Creating LLM event with parent_id: {truncate_id(parent_id)}")
|
|
120
|
+
debug(f"[Telemetry] Creating LLM event with parent_id: {truncate_id(parent_id)}, session_id: {truncate_id(target_session_id)}")
|
|
103
121
|
event_id = create_event(
|
|
104
122
|
type="llm_generation",
|
|
123
|
+
session_id=target_session_id, # Pass the session_id explicitly
|
|
105
124
|
occurred_at=occurred_at,
|
|
106
125
|
duration=duration_seconds,
|
|
107
126
|
provider=provider,
|
|
@@ -155,14 +174,15 @@ class LucidicSpanExporter(SpanExporter):
|
|
|
155
174
|
|
|
156
175
|
# Create event
|
|
157
176
|
event_kwargs = {
|
|
177
|
+
'session_id': target_session_id, # Pass session_id explicitly
|
|
158
178
|
'description': description,
|
|
159
179
|
'result': "Processing...", # Will be updated when span ends
|
|
160
180
|
'model': model
|
|
161
181
|
}
|
|
162
|
-
|
|
182
|
+
|
|
163
183
|
if images:
|
|
164
184
|
event_kwargs['screenshots'] = images
|
|
165
|
-
|
|
185
|
+
|
|
166
186
|
return create_event(**event_kwargs)
|
|
167
187
|
|
|
168
188
|
except Exception as e:
|
|
@@ -225,31 +245,35 @@ class LucidicSpanExporter(SpanExporter):
|
|
|
225
245
|
}
|
|
226
246
|
|
|
227
247
|
def _extract_prompt_tokens(self, attributes: Dict[str, Any]) -> int:
|
|
228
|
-
return
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
)
|
|
248
|
+
# Check each attribute and return the first non-None value
|
|
249
|
+
value = attributes.get(SpanAttributes.LLM_USAGE_PROMPT_TOKENS)
|
|
250
|
+
if value is not None:
|
|
251
|
+
return value
|
|
252
|
+
value = attributes.get('gen_ai.usage.prompt_tokens')
|
|
253
|
+
if value is not None:
|
|
254
|
+
return value
|
|
255
|
+
value = attributes.get('gen_ai.usage.input_tokens')
|
|
256
|
+
if value is not None:
|
|
257
|
+
return value
|
|
258
|
+
return 0
|
|
233
259
|
|
|
234
260
|
def _extract_completion_tokens(self, attributes: Dict[str, Any]) -> int:
|
|
235
|
-
return
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
)
|
|
261
|
+
# Check each attribute and return the first non-None value
|
|
262
|
+
value = attributes.get(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS)
|
|
263
|
+
if value is not None:
|
|
264
|
+
return value
|
|
265
|
+
value = attributes.get('gen_ai.usage.completion_tokens')
|
|
266
|
+
if value is not None:
|
|
267
|
+
return value
|
|
268
|
+
value = attributes.get('gen_ai.usage.output_tokens')
|
|
269
|
+
if value is not None:
|
|
270
|
+
return value
|
|
271
|
+
return 0
|
|
240
272
|
|
|
241
273
|
def _calculate_cost(self, attributes: Dict[str, Any]) -> Optional[float]:
|
|
242
|
-
prompt_tokens = (
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
attributes.get('gen_ai.usage.input_tokens') or 0
|
|
246
|
-
)
|
|
247
|
-
completion_tokens = (
|
|
248
|
-
attributes.get(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS) or
|
|
249
|
-
attributes.get('gen_ai.usage.completion_tokens') or
|
|
250
|
-
attributes.get('gen_ai.usage.output_tokens') or 0
|
|
251
|
-
)
|
|
252
|
-
total_tokens = (prompt_tokens or 0) + (completion_tokens or 0)
|
|
274
|
+
prompt_tokens = self._extract_prompt_tokens(attributes)
|
|
275
|
+
completion_tokens = self._extract_completion_tokens(attributes)
|
|
276
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
253
277
|
if total_tokens > 0:
|
|
254
278
|
model = (
|
|
255
279
|
attributes.get(SpanAttributes.LLM_RESPONSE_MODEL) or
|
|
@@ -0,0 +1,295 @@
|
|
|
1
|
+
"""OpenAI responses.parse instrumentation patch.
|
|
2
|
+
|
|
3
|
+
This module provides instrumentation for OpenAI's responses.parse API
|
|
4
|
+
which is not covered by the standard opentelemetry-instrumentation-openai package.
|
|
5
|
+
"""
|
|
6
|
+
import functools
|
|
7
|
+
import logging
|
|
8
|
+
import time
|
|
9
|
+
from typing import Any, Callable, Optional
|
|
10
|
+
|
|
11
|
+
from opentelemetry import trace
|
|
12
|
+
from opentelemetry.trace import Status, StatusCode, SpanKind
|
|
13
|
+
|
|
14
|
+
from ..sdk.context import current_session_id, current_parent_event_id
|
|
15
|
+
from ..utils.logger import debug, verbose, warning
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger("Lucidic")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class OpenAIResponsesPatcher:
|
|
21
|
+
"""Patches OpenAI client to instrument responses.parse method."""
|
|
22
|
+
|
|
23
|
+
def __init__(self, tracer_provider=None):
|
|
24
|
+
"""Initialize the patcher.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
tracer_provider: OpenTelemetry TracerProvider to use
|
|
28
|
+
"""
|
|
29
|
+
self._tracer_provider = tracer_provider or trace.get_tracer_provider()
|
|
30
|
+
self._tracer = self._tracer_provider.get_tracer(__name__)
|
|
31
|
+
self._is_patched = False
|
|
32
|
+
self._original_parse = None
|
|
33
|
+
self._client_refs = [] # Keep track of patched clients for cleanup
|
|
34
|
+
|
|
35
|
+
def patch(self):
|
|
36
|
+
"""Apply the patch to OpenAI client initialization."""
|
|
37
|
+
if self._is_patched:
|
|
38
|
+
debug("[OpenAI Patch] responses.parse already patched")
|
|
39
|
+
return
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
import openai
|
|
43
|
+
from openai import OpenAI
|
|
44
|
+
|
|
45
|
+
# Store the original __init__
|
|
46
|
+
original_init = OpenAI.__init__
|
|
47
|
+
|
|
48
|
+
@functools.wraps(original_init)
|
|
49
|
+
def patched_init(client_self, *args, **kwargs):
|
|
50
|
+
# Call original initialization
|
|
51
|
+
original_init(client_self, *args, **kwargs)
|
|
52
|
+
|
|
53
|
+
# Patch the responses.parse method on this specific instance
|
|
54
|
+
if hasattr(client_self, 'resources') and hasattr(client_self.resources, 'responses'):
|
|
55
|
+
responses = client_self.resources.responses
|
|
56
|
+
if hasattr(responses, 'parse'):
|
|
57
|
+
# Store original and apply wrapper
|
|
58
|
+
original_parse = responses.parse
|
|
59
|
+
responses.parse = self._create_parse_wrapper(original_parse)
|
|
60
|
+
|
|
61
|
+
# Track this client for cleanup
|
|
62
|
+
self._client_refs.append((responses, original_parse))
|
|
63
|
+
|
|
64
|
+
verbose("[OpenAI Patch] Patched responses.parse on client instance")
|
|
65
|
+
|
|
66
|
+
# Also patch the direct access if available
|
|
67
|
+
if hasattr(client_self, 'responses') and hasattr(client_self.responses, 'parse'):
|
|
68
|
+
original_parse = client_self.responses.parse
|
|
69
|
+
client_self.responses.parse = self._create_parse_wrapper(original_parse)
|
|
70
|
+
self._client_refs.append((client_self.responses, original_parse))
|
|
71
|
+
verbose("[OpenAI Patch] Patched client.responses.parse")
|
|
72
|
+
|
|
73
|
+
# Replace the __init__ method
|
|
74
|
+
OpenAI.__init__ = patched_init
|
|
75
|
+
self._original_init = original_init
|
|
76
|
+
self._is_patched = True
|
|
77
|
+
|
|
78
|
+
logger.info("[OpenAI Patch] Successfully patched OpenAI client for responses.parse")
|
|
79
|
+
|
|
80
|
+
except ImportError:
|
|
81
|
+
logger.warning("[OpenAI Patch] OpenAI library not installed, skipping patch")
|
|
82
|
+
except Exception as e:
|
|
83
|
+
logger.error(f"[OpenAI Patch] Failed to patch responses.parse: {e}")
|
|
84
|
+
|
|
85
|
+
def _create_parse_wrapper(self, original_method: Callable) -> Callable:
|
|
86
|
+
"""Create a wrapper for the responses.parse method.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
original_method: The original parse method to wrap
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
Wrapped method with instrumentation
|
|
93
|
+
"""
|
|
94
|
+
@functools.wraps(original_method)
|
|
95
|
+
def wrapper(**kwargs):
|
|
96
|
+
# Create span for tracing
|
|
97
|
+
with self._tracer.start_as_current_span(
|
|
98
|
+
"openai.responses.parse",
|
|
99
|
+
kind=SpanKind.CLIENT
|
|
100
|
+
) as span:
|
|
101
|
+
start_time = time.time()
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
# Extract request parameters
|
|
105
|
+
model = kwargs.get('model', 'unknown')
|
|
106
|
+
temperature = kwargs.get('temperature', 1.0)
|
|
107
|
+
input_param = kwargs.get('input', [])
|
|
108
|
+
text_format = kwargs.get('text_format')
|
|
109
|
+
instructions = kwargs.get('instructions')
|
|
110
|
+
|
|
111
|
+
# Convert input to messages format if needed
|
|
112
|
+
if isinstance(input_param, str):
|
|
113
|
+
messages = [{"role": "user", "content": input_param}]
|
|
114
|
+
elif isinstance(input_param, list):
|
|
115
|
+
messages = input_param
|
|
116
|
+
else:
|
|
117
|
+
messages = []
|
|
118
|
+
|
|
119
|
+
# Set span attributes
|
|
120
|
+
span.set_attribute("gen_ai.system", "openai")
|
|
121
|
+
span.set_attribute("gen_ai.request.model", model)
|
|
122
|
+
span.set_attribute("gen_ai.request.temperature", temperature)
|
|
123
|
+
span.set_attribute("gen_ai.operation.name", "responses.parse")
|
|
124
|
+
|
|
125
|
+
# Add a unique marker for our instrumentation
|
|
126
|
+
span.set_attribute("lucidic.instrumented", "responses.parse")
|
|
127
|
+
span.set_attribute("lucidic.patch.version", "1.0")
|
|
128
|
+
|
|
129
|
+
if text_format and hasattr(text_format, '__name__'):
|
|
130
|
+
span.set_attribute("gen_ai.request.response_format", text_format.__name__)
|
|
131
|
+
|
|
132
|
+
if instructions:
|
|
133
|
+
span.set_attribute("gen_ai.request.instructions", str(instructions))
|
|
134
|
+
|
|
135
|
+
# Always set message attributes for proper event creation
|
|
136
|
+
for i, msg in enumerate(messages): # Include all messages
|
|
137
|
+
if isinstance(msg, dict):
|
|
138
|
+
role = msg.get('role', 'user')
|
|
139
|
+
content = msg.get('content', '')
|
|
140
|
+
span.set_attribute(f"gen_ai.prompt.{i}.role", role)
|
|
141
|
+
# Always include full content - EventQueue handles large messages
|
|
142
|
+
span.set_attribute(f"gen_ai.prompt.{i}.content", str(content))
|
|
143
|
+
|
|
144
|
+
# Call the original method
|
|
145
|
+
result = original_method(**kwargs)
|
|
146
|
+
|
|
147
|
+
# Process the response and set attributes on span
|
|
148
|
+
self._set_response_attributes(span, result, model, messages, start_time, text_format)
|
|
149
|
+
|
|
150
|
+
span.set_status(Status(StatusCode.OK))
|
|
151
|
+
return result
|
|
152
|
+
|
|
153
|
+
except Exception as e:
|
|
154
|
+
# Record error in span
|
|
155
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
|
156
|
+
span.record_exception(e)
|
|
157
|
+
|
|
158
|
+
# The exporter will handle creating error events from the span
|
|
159
|
+
raise
|
|
160
|
+
|
|
161
|
+
return wrapper
|
|
162
|
+
|
|
163
|
+
def _set_response_attributes(self, span, result, model: str, messages: list, start_time: float, text_format):
|
|
164
|
+
"""Set response attributes on the span for the exporter to use.
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
span: OpenTelemetry span
|
|
168
|
+
result: Response from OpenAI
|
|
169
|
+
model: Model name
|
|
170
|
+
messages: Input messages
|
|
171
|
+
start_time: Request start time
|
|
172
|
+
text_format: Response format (Pydantic model)
|
|
173
|
+
"""
|
|
174
|
+
duration = time.time() - start_time
|
|
175
|
+
|
|
176
|
+
# Extract output
|
|
177
|
+
output_text = None
|
|
178
|
+
|
|
179
|
+
# Handle structured output response
|
|
180
|
+
if hasattr(result, 'output_parsed'):
|
|
181
|
+
output_text = str(result.output_parsed)
|
|
182
|
+
|
|
183
|
+
# Always set completion attributes so the exporter can extract them
|
|
184
|
+
span.set_attribute("gen_ai.completion.0.role", "assistant")
|
|
185
|
+
span.set_attribute("gen_ai.completion.0.content", output_text)
|
|
186
|
+
|
|
187
|
+
# Handle usage data
|
|
188
|
+
if hasattr(result, 'usage'):
|
|
189
|
+
usage = result.usage
|
|
190
|
+
|
|
191
|
+
# Debug logging
|
|
192
|
+
debug(f"[OpenAI Patch] Usage object type: {type(usage)}")
|
|
193
|
+
debug(f"[OpenAI Patch] Usage attributes: {[attr for attr in dir(usage) if not attr.startswith('_')]}")
|
|
194
|
+
|
|
195
|
+
# Extract tokens with proper handling
|
|
196
|
+
prompt_tokens = None
|
|
197
|
+
completion_tokens = None
|
|
198
|
+
total_tokens = None
|
|
199
|
+
|
|
200
|
+
# Try different ways to access token data
|
|
201
|
+
if hasattr(usage, 'prompt_tokens'):
|
|
202
|
+
prompt_tokens = usage.prompt_tokens
|
|
203
|
+
elif hasattr(usage, 'input_tokens'):
|
|
204
|
+
prompt_tokens = usage.input_tokens
|
|
205
|
+
|
|
206
|
+
if hasattr(usage, 'completion_tokens'):
|
|
207
|
+
completion_tokens = usage.completion_tokens
|
|
208
|
+
elif hasattr(usage, 'output_tokens'):
|
|
209
|
+
completion_tokens = usage.output_tokens
|
|
210
|
+
|
|
211
|
+
if hasattr(usage, 'total_tokens'):
|
|
212
|
+
total_tokens = usage.total_tokens
|
|
213
|
+
elif prompt_tokens is not None and completion_tokens is not None:
|
|
214
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
215
|
+
|
|
216
|
+
debug(f"[OpenAI Patch] Extracted tokens - prompt: {prompt_tokens}, completion: {completion_tokens}, total: {total_tokens}")
|
|
217
|
+
|
|
218
|
+
# Set usage attributes on span
|
|
219
|
+
if prompt_tokens is not None:
|
|
220
|
+
span.set_attribute("gen_ai.usage.prompt_tokens", prompt_tokens)
|
|
221
|
+
if completion_tokens is not None:
|
|
222
|
+
span.set_attribute("gen_ai.usage.completion_tokens", completion_tokens)
|
|
223
|
+
if total_tokens is not None:
|
|
224
|
+
span.set_attribute("gen_ai.usage.total_tokens", total_tokens)
|
|
225
|
+
|
|
226
|
+
# Set additional metadata for the exporter
|
|
227
|
+
if text_format and hasattr(text_format, '__name__'):
|
|
228
|
+
span.set_attribute("lucidic.response_format", text_format.__name__)
|
|
229
|
+
|
|
230
|
+
# Set duration as attribute
|
|
231
|
+
span.set_attribute("lucidic.duration_seconds", duration)
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
def _should_capture_content(self) -> bool:
|
|
235
|
+
"""Check if message content should be captured.
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
True if content capture is enabled
|
|
239
|
+
"""
|
|
240
|
+
|
|
241
|
+
return True # always capture content for now
|
|
242
|
+
|
|
243
|
+
import os
|
|
244
|
+
# check OTEL standard env var
|
|
245
|
+
otel_capture = os.getenv('OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT', 'false')
|
|
246
|
+
# check Lucidic-specific env var
|
|
247
|
+
lucidic_capture = os.getenv('LUCIDIC_CAPTURE_CONTENT', 'false')
|
|
248
|
+
|
|
249
|
+
return otel_capture.lower() == 'true' or lucidic_capture.lower() == 'true'
|
|
250
|
+
|
|
251
|
+
def unpatch(self):
|
|
252
|
+
"""Remove the patch and restore original behavior."""
|
|
253
|
+
if not self._is_patched:
|
|
254
|
+
return
|
|
255
|
+
|
|
256
|
+
try:
|
|
257
|
+
# restore original __init__ if we have it
|
|
258
|
+
if hasattr(self, '_original_init'):
|
|
259
|
+
import openai
|
|
260
|
+
from openai import OpenAI
|
|
261
|
+
OpenAI.__init__ = self._original_init
|
|
262
|
+
|
|
263
|
+
# restore original parse methods on tracked clients
|
|
264
|
+
for responses_obj, original_parse in self._client_refs:
|
|
265
|
+
try:
|
|
266
|
+
responses_obj.parse = original_parse
|
|
267
|
+
except:
|
|
268
|
+
pass # Client might have been garbage collected
|
|
269
|
+
|
|
270
|
+
self._client_refs.clear()
|
|
271
|
+
self._is_patched = False
|
|
272
|
+
|
|
273
|
+
logger.info("[OpenAI Patch] Successfully removed responses.parse patch")
|
|
274
|
+
|
|
275
|
+
except Exception as e:
|
|
276
|
+
logger.error(f"[OpenAI Patch] Failed to unpatch: {e}")
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
# Global singleton instance
|
|
280
|
+
_patcher_instance: Optional[OpenAIResponsesPatcher] = None
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
def get_responses_patcher(tracer_provider=None) -> OpenAIResponsesPatcher:
|
|
284
|
+
"""Get or create the global patcher instance.
|
|
285
|
+
|
|
286
|
+
Args:
|
|
287
|
+
tracer_provider: OpenTelemetry TracerProvider
|
|
288
|
+
|
|
289
|
+
Returns:
|
|
290
|
+
The singleton patcher instance
|
|
291
|
+
"""
|
|
292
|
+
global _patcher_instance
|
|
293
|
+
if _patcher_instance is None:
|
|
294
|
+
_patcher_instance = OpenAIResponsesPatcher(tracer_provider)
|
|
295
|
+
return _patcher_instance
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"""Utility to uninstrument specific OpenAI methods to prevent duplicates.
|
|
2
|
+
|
|
3
|
+
This module helps prevent the standard OpenTelemetry instrumentation
|
|
4
|
+
from creating duplicate spans for methods we're handling ourselves.
|
|
5
|
+
"""
|
|
6
|
+
import logging
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger("Lucidic")
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def uninstrument_responses(openai_module):
|
|
12
|
+
"""Remove any incorrect instrumentation from responses module.
|
|
13
|
+
|
|
14
|
+
The standard OpenTelemetry instrumentation might try to instrument
|
|
15
|
+
responses.create (which doesn't exist) or other responses methods.
|
|
16
|
+
This function removes any such instrumentation.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
openai_module: The OpenAI module
|
|
20
|
+
"""
|
|
21
|
+
try:
|
|
22
|
+
# Check if responses module exists
|
|
23
|
+
if not hasattr(openai_module, 'resources'):
|
|
24
|
+
return
|
|
25
|
+
|
|
26
|
+
resources = openai_module.resources
|
|
27
|
+
if not hasattr(resources, 'responses'):
|
|
28
|
+
return
|
|
29
|
+
|
|
30
|
+
responses = resources.responses
|
|
31
|
+
|
|
32
|
+
# Check for incorrectly wrapped methods
|
|
33
|
+
methods_to_check = ['create', 'parse']
|
|
34
|
+
|
|
35
|
+
for method_name in methods_to_check:
|
|
36
|
+
if hasattr(responses, method_name):
|
|
37
|
+
method = getattr(responses, method_name)
|
|
38
|
+
|
|
39
|
+
# Check if it's wrapped (wrapped methods usually have __wrapped__ attribute)
|
|
40
|
+
if hasattr(method, '__wrapped__'):
|
|
41
|
+
# Restore original
|
|
42
|
+
original = method.__wrapped__
|
|
43
|
+
setattr(responses, method_name, original)
|
|
44
|
+
logger.debug(f"[OpenAI Uninstrument] Removed wrapper from responses.{method_name}")
|
|
45
|
+
|
|
46
|
+
# Also check for _original_* attributes (another wrapping pattern)
|
|
47
|
+
original_attr = f'_original_{method_name}'
|
|
48
|
+
if hasattr(responses, original_attr):
|
|
49
|
+
original = getattr(responses, original_attr)
|
|
50
|
+
setattr(responses, method_name, original)
|
|
51
|
+
delattr(responses, original_attr)
|
|
52
|
+
logger.debug(f"[OpenAI Uninstrument] Restored original responses.{method_name}")
|
|
53
|
+
|
|
54
|
+
# Also check the Responses class itself
|
|
55
|
+
if hasattr(responses, 'Responses'):
|
|
56
|
+
Responses = responses.Responses
|
|
57
|
+
for method_name in methods_to_check:
|
|
58
|
+
if hasattr(Responses, method_name):
|
|
59
|
+
method = getattr(Responses, method_name)
|
|
60
|
+
if hasattr(method, '__wrapped__'):
|
|
61
|
+
original = method.__wrapped__
|
|
62
|
+
setattr(Responses, method_name, original)
|
|
63
|
+
logger.debug(f"[OpenAI Uninstrument] Removed wrapper from Responses.{method_name}")
|
|
64
|
+
|
|
65
|
+
except Exception as e:
|
|
66
|
+
logger.debug(f"[OpenAI Uninstrument] Error while checking responses instrumentation: {e}")
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def clean_openai_instrumentation():
|
|
70
|
+
"""Clean up any problematic OpenAI instrumentation.
|
|
71
|
+
|
|
72
|
+
This should be called after standard instrumentation but before our patches.
|
|
73
|
+
"""
|
|
74
|
+
try:
|
|
75
|
+
import openai
|
|
76
|
+
uninstrument_responses(openai)
|
|
77
|
+
|
|
78
|
+
# Also check if client instances need cleaning
|
|
79
|
+
if hasattr(openai, 'OpenAI'):
|
|
80
|
+
# The OpenAI class might have wrapped __init__ that creates bad instrumentation
|
|
81
|
+
# We don't want to break it, just ensure responses aren't double-instrumented
|
|
82
|
+
pass
|
|
83
|
+
|
|
84
|
+
except ImportError:
|
|
85
|
+
pass # OpenAI not installed
|
|
86
|
+
except Exception as e:
|
|
87
|
+
logger.debug(f"[OpenAI Uninstrument] Error during cleanup: {e}")
|
|
@@ -55,7 +55,22 @@ def instrument_providers(providers: list, tracer_provider: TracerProvider, exist
|
|
|
55
55
|
inst.instrument(tracer_provider=tracer_provider, enrich_token_usage=True)
|
|
56
56
|
_global_instrumentors["openai"] = inst
|
|
57
57
|
new_instrumentors["openai"] = inst
|
|
58
|
-
|
|
58
|
+
|
|
59
|
+
# Clean up any problematic instrumentation from standard library
|
|
60
|
+
from .openai_uninstrument import clean_openai_instrumentation
|
|
61
|
+
clean_openai_instrumentation()
|
|
62
|
+
|
|
63
|
+
# Add patch for responses.parse (not covered by standard instrumentation)
|
|
64
|
+
import os
|
|
65
|
+
if os.getenv('LUCIDIC_DISABLE_RESPONSES_PATCH', 'false').lower() != 'true':
|
|
66
|
+
from .openai_patch import get_responses_patcher
|
|
67
|
+
patcher = get_responses_patcher(tracer_provider)
|
|
68
|
+
patcher.patch()
|
|
69
|
+
_global_instrumentors["openai_responses_patch"] = patcher
|
|
70
|
+
else:
|
|
71
|
+
logger.info("[Telemetry] Skipping responses.parse patch (disabled via LUCIDIC_DISABLE_RESPONSES_PATCH)")
|
|
72
|
+
|
|
73
|
+
logger.info("[Telemetry] Instrumented OpenAI (including responses.parse)")
|
|
59
74
|
except Exception as e:
|
|
60
75
|
logger.error(f"Failed to instrument OpenAI: {e}")
|
|
61
76
|
|
|
@@ -34,6 +34,8 @@ lucidicai/telemetry/extract.py
|
|
|
34
34
|
lucidicai/telemetry/litellm_bridge.py
|
|
35
35
|
lucidicai/telemetry/lucidic_exporter.py
|
|
36
36
|
lucidicai/telemetry/openai_agents_instrumentor.py
|
|
37
|
+
lucidicai/telemetry/openai_patch.py
|
|
38
|
+
lucidicai/telemetry/openai_uninstrument.py
|
|
37
39
|
lucidicai/telemetry/telemetry_init.py
|
|
38
40
|
lucidicai/telemetry/utils/__init__.py
|
|
39
41
|
lucidicai/telemetry/utils/model_pricing.py
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|