lmnr 0.6.18__py3-none-any.whl → 0.6.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +55 -20
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py +23 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/__init__.py +61 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +442 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +1024 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +297 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/config.py +16 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +308 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_emitter.py +100 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +68 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +185 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v0/__init__.py +176 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/__init__.py +358 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +319 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +132 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +626 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/version.py +1 -0
- lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +1 -3
- lmnr/sdk/browser/browser_use_otel.py +1 -1
- lmnr/sdk/browser/patchright_otel.py +0 -14
- lmnr/sdk/browser/playwright_otel.py +16 -130
- lmnr/sdk/browser/pw_utils.py +45 -31
- lmnr/version.py +1 -1
- {lmnr-0.6.18.dist-info → lmnr-0.6.19.dist-info}/METADATA +2 -5
- {lmnr-0.6.18.dist-info → lmnr-0.6.19.dist-info}/RECORD +28 -11
- {lmnr-0.6.18.dist-info → lmnr-0.6.19.dist-info}/WHEEL +1 -1
- {lmnr-0.6.18.dist-info → lmnr-0.6.19.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,358 @@
|
|
1
|
+
from typing import Collection
|
2
|
+
|
3
|
+
from opentelemetry._events import get_event_logger
|
4
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
5
|
+
from ..shared.chat_wrappers import (
|
6
|
+
achat_wrapper,
|
7
|
+
chat_wrapper,
|
8
|
+
)
|
9
|
+
from ..shared.completion_wrappers import (
|
10
|
+
acompletion_wrapper,
|
11
|
+
completion_wrapper,
|
12
|
+
)
|
13
|
+
from ..shared.config import Config
|
14
|
+
from ..shared.embeddings_wrappers import (
|
15
|
+
aembeddings_wrapper,
|
16
|
+
embeddings_wrapper,
|
17
|
+
)
|
18
|
+
from ..shared.image_gen_wrappers import (
|
19
|
+
image_gen_metrics_wrapper,
|
20
|
+
)
|
21
|
+
from ..utils import is_metrics_enabled
|
22
|
+
from .assistant_wrappers import (
|
23
|
+
assistants_create_wrapper,
|
24
|
+
messages_list_wrapper,
|
25
|
+
runs_create_and_stream_wrapper,
|
26
|
+
runs_create_wrapper,
|
27
|
+
runs_retrieve_wrapper,
|
28
|
+
)
|
29
|
+
|
30
|
+
from .responses_wrappers import (
|
31
|
+
async_responses_cancel_wrapper,
|
32
|
+
async_responses_get_or_create_wrapper,
|
33
|
+
responses_cancel_wrapper,
|
34
|
+
responses_get_or_create_wrapper,
|
35
|
+
)
|
36
|
+
|
37
|
+
from ..version import __version__
|
38
|
+
from opentelemetry.instrumentation.utils import unwrap
|
39
|
+
from opentelemetry.metrics import get_meter
|
40
|
+
from opentelemetry.semconv._incubating.metrics import gen_ai_metrics as GenAIMetrics
|
41
|
+
from opentelemetry.semconv_ai import Meters
|
42
|
+
from opentelemetry.trace import get_tracer
|
43
|
+
from wrapt import wrap_function_wrapper
|
44
|
+
|
45
|
+
|
46
|
+
_instruments = ("openai >= 1.0.0",)
|
47
|
+
|
48
|
+
|
49
|
+
class OpenAIV1Instrumentor(BaseInstrumentor):
|
50
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
51
|
+
return _instruments
|
52
|
+
|
53
|
+
def _try_wrap(self, module, function, wrapper):
|
54
|
+
"""
|
55
|
+
Wrap a function if it exists, otherwise do nothing.
|
56
|
+
This is useful for handling cases where the function is not available in
|
57
|
+
the older versions of the library.
|
58
|
+
|
59
|
+
Args:
|
60
|
+
module (str): The module to wrap, e.g. "openai.resources.chat.completions"
|
61
|
+
function (str): "Object.function" to wrap, e.g. "Completions.parse"
|
62
|
+
wrapper (callable): The wrapper to apply to the function.
|
63
|
+
"""
|
64
|
+
try:
|
65
|
+
wrap_function_wrapper(module, function, wrapper)
|
66
|
+
except (AttributeError, ModuleNotFoundError):
|
67
|
+
pass
|
68
|
+
|
69
|
+
def _instrument(self, **kwargs):
|
70
|
+
tracer_provider = kwargs.get("tracer_provider")
|
71
|
+
tracer = get_tracer(__name__, __version__, tracer_provider)
|
72
|
+
|
73
|
+
# meter and counters are inited here
|
74
|
+
meter_provider = kwargs.get("meter_provider")
|
75
|
+
meter = get_meter(__name__, __version__, meter_provider)
|
76
|
+
|
77
|
+
if not Config.use_legacy_attributes:
|
78
|
+
event_logger_provider = kwargs.get("event_logger_provider")
|
79
|
+
Config.event_logger = get_event_logger(
|
80
|
+
__name__, __version__, event_logger_provider=event_logger_provider
|
81
|
+
)
|
82
|
+
|
83
|
+
if is_metrics_enabled():
|
84
|
+
tokens_histogram = meter.create_histogram(
|
85
|
+
name=Meters.LLM_TOKEN_USAGE,
|
86
|
+
unit="token",
|
87
|
+
description="Measures number of input and output tokens used",
|
88
|
+
)
|
89
|
+
|
90
|
+
chat_choice_counter = meter.create_counter(
|
91
|
+
name=Meters.LLM_GENERATION_CHOICES,
|
92
|
+
unit="choice",
|
93
|
+
description="Number of choices returned by chat completions call",
|
94
|
+
)
|
95
|
+
|
96
|
+
duration_histogram = meter.create_histogram(
|
97
|
+
name=Meters.LLM_OPERATION_DURATION,
|
98
|
+
unit="s",
|
99
|
+
description="GenAI operation duration",
|
100
|
+
)
|
101
|
+
|
102
|
+
chat_exception_counter = meter.create_counter(
|
103
|
+
name=Meters.LLM_COMPLETIONS_EXCEPTIONS,
|
104
|
+
unit="time",
|
105
|
+
description="Number of exceptions occurred during chat completions",
|
106
|
+
)
|
107
|
+
|
108
|
+
streaming_time_to_first_token = meter.create_histogram(
|
109
|
+
name=GenAIMetrics.GEN_AI_SERVER_TIME_TO_FIRST_TOKEN,
|
110
|
+
unit="s",
|
111
|
+
description="Time to first token in streaming chat completions",
|
112
|
+
)
|
113
|
+
streaming_time_to_generate = meter.create_histogram(
|
114
|
+
name=Meters.LLM_STREAMING_TIME_TO_GENERATE,
|
115
|
+
unit="s",
|
116
|
+
description="Time between first token and completion in streaming chat completions",
|
117
|
+
)
|
118
|
+
else:
|
119
|
+
(
|
120
|
+
tokens_histogram,
|
121
|
+
chat_choice_counter,
|
122
|
+
duration_histogram,
|
123
|
+
chat_exception_counter,
|
124
|
+
streaming_time_to_first_token,
|
125
|
+
streaming_time_to_generate,
|
126
|
+
) = (None, None, None, None, None, None)
|
127
|
+
|
128
|
+
wrap_function_wrapper(
|
129
|
+
"openai.resources.chat.completions",
|
130
|
+
"Completions.create",
|
131
|
+
chat_wrapper(
|
132
|
+
tracer,
|
133
|
+
tokens_histogram,
|
134
|
+
chat_choice_counter,
|
135
|
+
duration_histogram,
|
136
|
+
chat_exception_counter,
|
137
|
+
streaming_time_to_first_token,
|
138
|
+
streaming_time_to_generate,
|
139
|
+
),
|
140
|
+
)
|
141
|
+
|
142
|
+
wrap_function_wrapper(
|
143
|
+
"openai.resources.completions",
|
144
|
+
"Completions.create",
|
145
|
+
completion_wrapper(tracer),
|
146
|
+
)
|
147
|
+
|
148
|
+
if is_metrics_enabled():
|
149
|
+
embeddings_vector_size_counter = meter.create_counter(
|
150
|
+
name=Meters.LLM_EMBEDDINGS_VECTOR_SIZE,
|
151
|
+
unit="element",
|
152
|
+
description="he size of returned vector",
|
153
|
+
)
|
154
|
+
embeddings_exception_counter = meter.create_counter(
|
155
|
+
name=Meters.LLM_EMBEDDINGS_EXCEPTIONS,
|
156
|
+
unit="time",
|
157
|
+
description="Number of exceptions occurred during embeddings operation",
|
158
|
+
)
|
159
|
+
else:
|
160
|
+
(
|
161
|
+
tokens_histogram,
|
162
|
+
embeddings_vector_size_counter,
|
163
|
+
embeddings_exception_counter,
|
164
|
+
) = (None, None, None)
|
165
|
+
|
166
|
+
wrap_function_wrapper(
|
167
|
+
"openai.resources.embeddings",
|
168
|
+
"Embeddings.create",
|
169
|
+
embeddings_wrapper(
|
170
|
+
tracer,
|
171
|
+
tokens_histogram,
|
172
|
+
embeddings_vector_size_counter,
|
173
|
+
duration_histogram,
|
174
|
+
embeddings_exception_counter,
|
175
|
+
),
|
176
|
+
)
|
177
|
+
|
178
|
+
wrap_function_wrapper(
|
179
|
+
"openai.resources.chat.completions",
|
180
|
+
"AsyncCompletions.create",
|
181
|
+
achat_wrapper(
|
182
|
+
tracer,
|
183
|
+
tokens_histogram,
|
184
|
+
chat_choice_counter,
|
185
|
+
duration_histogram,
|
186
|
+
chat_exception_counter,
|
187
|
+
streaming_time_to_first_token,
|
188
|
+
streaming_time_to_generate,
|
189
|
+
),
|
190
|
+
)
|
191
|
+
wrap_function_wrapper(
|
192
|
+
"openai.resources.completions",
|
193
|
+
"AsyncCompletions.create",
|
194
|
+
acompletion_wrapper(tracer),
|
195
|
+
)
|
196
|
+
wrap_function_wrapper(
|
197
|
+
"openai.resources.embeddings",
|
198
|
+
"AsyncEmbeddings.create",
|
199
|
+
aembeddings_wrapper(
|
200
|
+
tracer,
|
201
|
+
tokens_histogram,
|
202
|
+
embeddings_vector_size_counter,
|
203
|
+
duration_histogram,
|
204
|
+
embeddings_exception_counter,
|
205
|
+
),
|
206
|
+
)
|
207
|
+
# in newer versions, Completions.parse are out of beta
|
208
|
+
self._try_wrap(
|
209
|
+
"openai.resources.chat.completions",
|
210
|
+
"Completions.parse",
|
211
|
+
chat_wrapper(
|
212
|
+
tracer,
|
213
|
+
tokens_histogram,
|
214
|
+
chat_choice_counter,
|
215
|
+
duration_histogram,
|
216
|
+
chat_exception_counter,
|
217
|
+
streaming_time_to_first_token,
|
218
|
+
streaming_time_to_generate,
|
219
|
+
),
|
220
|
+
)
|
221
|
+
self._try_wrap(
|
222
|
+
"openai.resources.chat.completions",
|
223
|
+
"AsyncCompletions.parse",
|
224
|
+
achat_wrapper(
|
225
|
+
tracer,
|
226
|
+
tokens_histogram,
|
227
|
+
chat_choice_counter,
|
228
|
+
duration_histogram,
|
229
|
+
chat_exception_counter,
|
230
|
+
streaming_time_to_first_token,
|
231
|
+
streaming_time_to_generate,
|
232
|
+
),
|
233
|
+
)
|
234
|
+
|
235
|
+
if is_metrics_enabled():
|
236
|
+
image_gen_exception_counter = meter.create_counter(
|
237
|
+
name=Meters.LLM_IMAGE_GENERATIONS_EXCEPTIONS,
|
238
|
+
unit="time",
|
239
|
+
description="Number of exceptions occurred during image generations operation",
|
240
|
+
)
|
241
|
+
else:
|
242
|
+
image_gen_exception_counter = None
|
243
|
+
|
244
|
+
wrap_function_wrapper(
|
245
|
+
"openai.resources.images",
|
246
|
+
"Images.generate",
|
247
|
+
image_gen_metrics_wrapper(duration_histogram, image_gen_exception_counter),
|
248
|
+
)
|
249
|
+
|
250
|
+
# Beta APIs may not be available consistently in all versions
|
251
|
+
self._try_wrap(
|
252
|
+
"openai.resources.beta.assistants",
|
253
|
+
"Assistants.create",
|
254
|
+
assistants_create_wrapper(tracer),
|
255
|
+
)
|
256
|
+
self._try_wrap(
|
257
|
+
"openai.resources.beta.chat.completions",
|
258
|
+
"Completions.parse",
|
259
|
+
chat_wrapper(
|
260
|
+
tracer,
|
261
|
+
tokens_histogram,
|
262
|
+
chat_choice_counter,
|
263
|
+
duration_histogram,
|
264
|
+
chat_exception_counter,
|
265
|
+
streaming_time_to_first_token,
|
266
|
+
streaming_time_to_generate,
|
267
|
+
),
|
268
|
+
)
|
269
|
+
self._try_wrap(
|
270
|
+
"openai.resources.beta.chat.completions",
|
271
|
+
"AsyncCompletions.parse",
|
272
|
+
achat_wrapper(
|
273
|
+
tracer,
|
274
|
+
tokens_histogram,
|
275
|
+
chat_choice_counter,
|
276
|
+
duration_histogram,
|
277
|
+
chat_exception_counter,
|
278
|
+
streaming_time_to_first_token,
|
279
|
+
streaming_time_to_generate,
|
280
|
+
),
|
281
|
+
)
|
282
|
+
self._try_wrap(
|
283
|
+
"openai.resources.beta.threads.runs",
|
284
|
+
"Runs.create",
|
285
|
+
runs_create_wrapper(tracer),
|
286
|
+
)
|
287
|
+
self._try_wrap(
|
288
|
+
"openai.resources.beta.threads.runs",
|
289
|
+
"Runs.retrieve",
|
290
|
+
runs_retrieve_wrapper(tracer),
|
291
|
+
)
|
292
|
+
self._try_wrap(
|
293
|
+
"openai.resources.beta.threads.runs",
|
294
|
+
"Runs.create_and_stream",
|
295
|
+
runs_create_and_stream_wrapper(tracer),
|
296
|
+
)
|
297
|
+
self._try_wrap(
|
298
|
+
"openai.resources.beta.threads.messages",
|
299
|
+
"Messages.list",
|
300
|
+
messages_list_wrapper(tracer),
|
301
|
+
)
|
302
|
+
self._try_wrap(
|
303
|
+
"openai.resources.responses",
|
304
|
+
"Responses.create",
|
305
|
+
responses_get_or_create_wrapper(tracer),
|
306
|
+
)
|
307
|
+
self._try_wrap(
|
308
|
+
"openai.resources.responses",
|
309
|
+
"Responses.retrieve",
|
310
|
+
responses_get_or_create_wrapper(tracer),
|
311
|
+
)
|
312
|
+
self._try_wrap(
|
313
|
+
"openai.resources.responses",
|
314
|
+
"Responses.cancel",
|
315
|
+
responses_cancel_wrapper(tracer),
|
316
|
+
)
|
317
|
+
self._try_wrap(
|
318
|
+
"openai.resources.responses",
|
319
|
+
"AsyncResponses.create",
|
320
|
+
async_responses_get_or_create_wrapper(tracer),
|
321
|
+
)
|
322
|
+
self._try_wrap(
|
323
|
+
"openai.resources.responses",
|
324
|
+
"AsyncResponses.retrieve",
|
325
|
+
async_responses_get_or_create_wrapper(tracer),
|
326
|
+
)
|
327
|
+
self._try_wrap(
|
328
|
+
"openai.resources.responses",
|
329
|
+
"AsyncResponses.cancel",
|
330
|
+
async_responses_cancel_wrapper(tracer),
|
331
|
+
)
|
332
|
+
|
333
|
+
def _uninstrument(self, **kwargs):
|
334
|
+
unwrap("openai.resources.chat.completions", "Completions.create")
|
335
|
+
unwrap("openai.resources.completions", "Completions.create")
|
336
|
+
unwrap("openai.resources.embeddings", "Embeddings.create")
|
337
|
+
unwrap("openai.resources.chat.completions", "AsyncCompletions.create")
|
338
|
+
unwrap("openai.resources.completions", "AsyncCompletions.create")
|
339
|
+
unwrap("openai.resources.embeddings", "AsyncEmbeddings.create")
|
340
|
+
unwrap("openai.resources.images", "Images.generate")
|
341
|
+
|
342
|
+
# Beta APIs may not be available consistently in all versions
|
343
|
+
try:
|
344
|
+
unwrap("openai.resources.beta.assistants", "Assistants.create")
|
345
|
+
unwrap("openai.resources.beta.chat.completions", "Completions.parse")
|
346
|
+
unwrap("openai.resources.beta.chat.completions", "AsyncCompletions.parse")
|
347
|
+
unwrap("openai.resources.beta.threads.runs", "Runs.create")
|
348
|
+
unwrap("openai.resources.beta.threads.runs", "Runs.retrieve")
|
349
|
+
unwrap("openai.resources.beta.threads.runs", "Runs.create_and_stream")
|
350
|
+
unwrap("openai.resources.beta.threads.messages", "Messages.list")
|
351
|
+
unwrap("openai.resources.responses", "Responses.create")
|
352
|
+
unwrap("openai.resources.responses", "Responses.retrieve")
|
353
|
+
unwrap("openai.resources.responses", "Responses.cancel")
|
354
|
+
unwrap("openai.resources.responses", "AsyncResponses.create")
|
355
|
+
unwrap("openai.resources.responses", "AsyncResponses.retrieve")
|
356
|
+
unwrap("openai.resources.responses", "AsyncResponses.cancel")
|
357
|
+
except ImportError:
|
358
|
+
pass
|
@@ -0,0 +1,319 @@
|
|
1
|
+
import logging
|
2
|
+
import time
|
3
|
+
|
4
|
+
from opentelemetry import context as context_api
|
5
|
+
from ..shared import (
|
6
|
+
_set_span_attribute,
|
7
|
+
model_as_dict,
|
8
|
+
)
|
9
|
+
from ..shared.config import Config
|
10
|
+
from ..shared.event_emitter import emit_event
|
11
|
+
from ..shared.event_models import (
|
12
|
+
ChoiceEvent,
|
13
|
+
MessageEvent,
|
14
|
+
)
|
15
|
+
from ..utils import (
|
16
|
+
_with_tracer_wrapper,
|
17
|
+
dont_throw,
|
18
|
+
should_emit_events,
|
19
|
+
)
|
20
|
+
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
|
21
|
+
from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
|
22
|
+
from opentelemetry.semconv_ai import LLMRequestTypeValues, SpanAttributes
|
23
|
+
from opentelemetry.trace import SpanKind, Status, StatusCode
|
24
|
+
|
25
|
+
from openai._legacy_response import LegacyAPIResponse
|
26
|
+
from openai.types.beta.threads.run import Run
|
27
|
+
|
28
|
+
logger = logging.getLogger(__name__)
|
29
|
+
|
30
|
+
assistants = {}
|
31
|
+
runs = {}
|
32
|
+
|
33
|
+
|
34
|
+
@_with_tracer_wrapper
|
35
|
+
def assistants_create_wrapper(tracer, wrapped, instance, args, kwargs):
|
36
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
37
|
+
return wrapped(*args, **kwargs)
|
38
|
+
|
39
|
+
response = wrapped(*args, **kwargs)
|
40
|
+
|
41
|
+
assistants[response.id] = {
|
42
|
+
"model": kwargs.get("model"),
|
43
|
+
"instructions": kwargs.get("instructions"),
|
44
|
+
}
|
45
|
+
|
46
|
+
return response
|
47
|
+
|
48
|
+
|
49
|
+
@_with_tracer_wrapper
|
50
|
+
def runs_create_wrapper(tracer, wrapped, instance, args, kwargs):
|
51
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
52
|
+
return wrapped(*args, **kwargs)
|
53
|
+
|
54
|
+
thread_id = kwargs.get("thread_id")
|
55
|
+
instructions = kwargs.get("instructions")
|
56
|
+
|
57
|
+
try:
|
58
|
+
response = wrapped(*args, **kwargs)
|
59
|
+
response_dict = model_as_dict(response)
|
60
|
+
|
61
|
+
runs[thread_id] = {
|
62
|
+
"start_time": time.time_ns(),
|
63
|
+
"assistant_id": kwargs.get("assistant_id"),
|
64
|
+
"instructions": instructions,
|
65
|
+
"run_id": response_dict.get("id"),
|
66
|
+
}
|
67
|
+
|
68
|
+
return response
|
69
|
+
except Exception as e:
|
70
|
+
runs[thread_id] = {
|
71
|
+
"exception": e,
|
72
|
+
"end_time": time.time_ns(),
|
73
|
+
}
|
74
|
+
raise
|
75
|
+
|
76
|
+
|
77
|
+
@_with_tracer_wrapper
|
78
|
+
def runs_retrieve_wrapper(tracer, wrapped, instance, args, kwargs):
|
79
|
+
@dont_throw
|
80
|
+
def process_response(response):
|
81
|
+
if type(response) is LegacyAPIResponse:
|
82
|
+
parsed_response = response.parse()
|
83
|
+
else:
|
84
|
+
parsed_response = response
|
85
|
+
assert type(parsed_response) is Run
|
86
|
+
|
87
|
+
if parsed_response.thread_id in runs:
|
88
|
+
thread_id = parsed_response.thread_id
|
89
|
+
runs[thread_id]["end_time"] = time.time_ns()
|
90
|
+
if parsed_response.usage:
|
91
|
+
runs[thread_id]["usage"] = parsed_response.usage
|
92
|
+
|
93
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
94
|
+
return wrapped(*args, **kwargs)
|
95
|
+
|
96
|
+
try:
|
97
|
+
response = wrapped(*args, **kwargs)
|
98
|
+
process_response(response)
|
99
|
+
return response
|
100
|
+
except Exception as e:
|
101
|
+
thread_id = kwargs.get("thread_id")
|
102
|
+
if thread_id in runs:
|
103
|
+
runs[thread_id]["exception"] = e
|
104
|
+
runs[thread_id]["end_time"] = time.time_ns()
|
105
|
+
raise
|
106
|
+
|
107
|
+
|
108
|
+
@_with_tracer_wrapper
|
109
|
+
def messages_list_wrapper(tracer, wrapped, instance, args, kwargs):
|
110
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
111
|
+
return wrapped(*args, **kwargs)
|
112
|
+
|
113
|
+
id = kwargs.get("thread_id")
|
114
|
+
|
115
|
+
response = wrapped(*args, **kwargs)
|
116
|
+
|
117
|
+
response_dict = model_as_dict(response)
|
118
|
+
if id not in runs:
|
119
|
+
return response
|
120
|
+
|
121
|
+
run = runs[id]
|
122
|
+
messages = sorted(response_dict["data"], key=lambda x: x["created_at"])
|
123
|
+
|
124
|
+
span = tracer.start_span(
|
125
|
+
"openai.assistant.run",
|
126
|
+
kind=SpanKind.CLIENT,
|
127
|
+
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.CHAT.value},
|
128
|
+
start_time=run.get("start_time"),
|
129
|
+
)
|
130
|
+
|
131
|
+
if exception := run.get("exception"):
|
132
|
+
span.set_attribute(ERROR_TYPE, exception.__class__.__name__)
|
133
|
+
span.record_exception(exception)
|
134
|
+
span.set_status(Status(StatusCode.ERROR, str(exception)))
|
135
|
+
span.end(run.get("end_time"))
|
136
|
+
|
137
|
+
prompt_index = 0
|
138
|
+
if assistants.get(run["assistant_id"]) is not None or Config.enrich_assistant:
|
139
|
+
if Config.enrich_assistant:
|
140
|
+
assistant = model_as_dict(
|
141
|
+
instance._client.beta.assistants.retrieve(run["assistant_id"])
|
142
|
+
)
|
143
|
+
assistants[run["assistant_id"]] = assistant
|
144
|
+
else:
|
145
|
+
assistant = assistants[run["assistant_id"]]
|
146
|
+
|
147
|
+
_set_span_attribute(
|
148
|
+
span,
|
149
|
+
SpanAttributes.LLM_SYSTEM,
|
150
|
+
"openai",
|
151
|
+
)
|
152
|
+
_set_span_attribute(
|
153
|
+
span,
|
154
|
+
SpanAttributes.LLM_REQUEST_MODEL,
|
155
|
+
assistant["model"],
|
156
|
+
)
|
157
|
+
_set_span_attribute(
|
158
|
+
span,
|
159
|
+
SpanAttributes.LLM_RESPONSE_MODEL,
|
160
|
+
assistant["model"],
|
161
|
+
)
|
162
|
+
if should_emit_events():
|
163
|
+
emit_event(MessageEvent(content=assistant["instructions"], role="system"))
|
164
|
+
else:
|
165
|
+
_set_span_attribute(
|
166
|
+
span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", "system"
|
167
|
+
)
|
168
|
+
_set_span_attribute(
|
169
|
+
span,
|
170
|
+
f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
|
171
|
+
assistant["instructions"],
|
172
|
+
)
|
173
|
+
prompt_index += 1
|
174
|
+
_set_span_attribute(
|
175
|
+
span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", "system"
|
176
|
+
)
|
177
|
+
_set_span_attribute(
|
178
|
+
span,
|
179
|
+
f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
|
180
|
+
run["instructions"],
|
181
|
+
)
|
182
|
+
emit_event(MessageEvent(content=run["instructions"], role="system"))
|
183
|
+
prompt_index += 1
|
184
|
+
|
185
|
+
completion_index = 0
|
186
|
+
for msg in messages:
|
187
|
+
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{completion_index}"
|
188
|
+
content = msg.get("content")
|
189
|
+
|
190
|
+
message_content = content[0].get("text").get("value")
|
191
|
+
message_role = msg.get("role")
|
192
|
+
if message_role in ["user", "system"]:
|
193
|
+
if should_emit_events():
|
194
|
+
emit_event(MessageEvent(content=message_content, role=message_role))
|
195
|
+
else:
|
196
|
+
_set_span_attribute(
|
197
|
+
span,
|
198
|
+
f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role",
|
199
|
+
message_role,
|
200
|
+
)
|
201
|
+
_set_span_attribute(
|
202
|
+
span,
|
203
|
+
f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
|
204
|
+
message_content,
|
205
|
+
)
|
206
|
+
prompt_index += 1
|
207
|
+
else:
|
208
|
+
if should_emit_events():
|
209
|
+
emit_event(
|
210
|
+
ChoiceEvent(
|
211
|
+
index=completion_index,
|
212
|
+
message={"content": message_content, "role": message_role},
|
213
|
+
)
|
214
|
+
)
|
215
|
+
else:
|
216
|
+
_set_span_attribute(span, f"{prefix}.role", msg.get("role"))
|
217
|
+
_set_span_attribute(span, f"{prefix}.content", message_content)
|
218
|
+
_set_span_attribute(
|
219
|
+
span, f"gen_ai.response.{completion_index}.id", msg.get("id")
|
220
|
+
)
|
221
|
+
completion_index += 1
|
222
|
+
|
223
|
+
if run.get("usage"):
|
224
|
+
usage_dict = model_as_dict(run.get("usage"))
|
225
|
+
_set_span_attribute(
|
226
|
+
span,
|
227
|
+
SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
|
228
|
+
usage_dict.get("completion_tokens"),
|
229
|
+
)
|
230
|
+
_set_span_attribute(
|
231
|
+
span,
|
232
|
+
SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
|
233
|
+
usage_dict.get("prompt_tokens"),
|
234
|
+
)
|
235
|
+
|
236
|
+
span.end(run.get("end_time"))
|
237
|
+
|
238
|
+
return response
|
239
|
+
|
240
|
+
|
241
|
+
@_with_tracer_wrapper
|
242
|
+
def runs_create_and_stream_wrapper(tracer, wrapped, instance, args, kwargs):
|
243
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
244
|
+
return wrapped(*args, **kwargs)
|
245
|
+
|
246
|
+
assistant_id = kwargs.get("assistant_id")
|
247
|
+
instructions = kwargs.get("instructions")
|
248
|
+
|
249
|
+
span = tracer.start_span(
|
250
|
+
"openai.assistant.run_stream",
|
251
|
+
kind=SpanKind.CLIENT,
|
252
|
+
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.CHAT.value},
|
253
|
+
)
|
254
|
+
|
255
|
+
i = 0
|
256
|
+
if assistants.get(assistant_id) is not None or Config.enrich_assistant:
|
257
|
+
if Config.enrich_assistant:
|
258
|
+
assistant = model_as_dict(
|
259
|
+
instance._client.beta.assistants.retrieve(assistant_id)
|
260
|
+
)
|
261
|
+
assistants[assistant_id] = assistant
|
262
|
+
else:
|
263
|
+
assistant = assistants[assistant_id]
|
264
|
+
|
265
|
+
_set_span_attribute(
|
266
|
+
span, SpanAttributes.LLM_REQUEST_MODEL, assistants[assistant_id]["model"]
|
267
|
+
)
|
268
|
+
_set_span_attribute(
|
269
|
+
span,
|
270
|
+
SpanAttributes.LLM_SYSTEM,
|
271
|
+
"openai",
|
272
|
+
)
|
273
|
+
_set_span_attribute(
|
274
|
+
span,
|
275
|
+
SpanAttributes.LLM_RESPONSE_MODEL,
|
276
|
+
assistants[assistant_id]["model"],
|
277
|
+
)
|
278
|
+
if should_emit_events():
|
279
|
+
emit_event(
|
280
|
+
MessageEvent(
|
281
|
+
content=assistants[assistant_id]["instructions"], role="system"
|
282
|
+
)
|
283
|
+
)
|
284
|
+
else:
|
285
|
+
_set_span_attribute(
|
286
|
+
span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system"
|
287
|
+
)
|
288
|
+
_set_span_attribute(
|
289
|
+
span,
|
290
|
+
f"{SpanAttributes.LLM_PROMPTS}.{i}.content",
|
291
|
+
assistants[assistant_id]["instructions"],
|
292
|
+
)
|
293
|
+
i += 1
|
294
|
+
if should_emit_events():
|
295
|
+
emit_event(MessageEvent(content=instructions, role="system"))
|
296
|
+
else:
|
297
|
+
_set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system")
|
298
|
+
_set_span_attribute(
|
299
|
+
span, f"{SpanAttributes.LLM_PROMPTS}.{i}.content", instructions
|
300
|
+
)
|
301
|
+
|
302
|
+
from ..v1.event_handler_wrapper import (
|
303
|
+
EventHandlerWrapper,
|
304
|
+
)
|
305
|
+
|
306
|
+
kwargs["event_handler"] = EventHandlerWrapper(
|
307
|
+
original_handler=kwargs["event_handler"],
|
308
|
+
span=span,
|
309
|
+
)
|
310
|
+
|
311
|
+
try:
|
312
|
+
response = wrapped(*args, **kwargs)
|
313
|
+
return response
|
314
|
+
except Exception as e:
|
315
|
+
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
316
|
+
span.record_exception(e)
|
317
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
318
|
+
span.end()
|
319
|
+
raise
|