opentelemetry-instrumentation-openai 0.40.13__py3-none-any.whl → 0.41.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.

Files changed (22) hide show
  1. opentelemetry/instrumentation/openai/__init__.py +3 -2
  2. opentelemetry/instrumentation/openai/shared/__init__.py +125 -28
  3. opentelemetry/instrumentation/openai/shared/chat_wrappers.py +191 -55
  4. opentelemetry/instrumentation/openai/shared/completion_wrappers.py +93 -36
  5. opentelemetry/instrumentation/openai/shared/config.py +8 -2
  6. opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +79 -28
  7. opentelemetry/instrumentation/openai/shared/event_emitter.py +100 -0
  8. opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
  9. opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +1 -1
  10. opentelemetry/instrumentation/openai/shared/span_utils.py +0 -0
  11. opentelemetry/instrumentation/openai/utils.py +30 -4
  12. opentelemetry/instrumentation/openai/v0/__init__.py +31 -11
  13. opentelemetry/instrumentation/openai/v1/__init__.py +176 -69
  14. opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +121 -42
  15. opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +31 -15
  16. opentelemetry/instrumentation/openai/v1/responses_wrappers.py +623 -0
  17. opentelemetry/instrumentation/openai/version.py +1 -1
  18. {opentelemetry_instrumentation_openai-0.40.13.dist-info → opentelemetry_instrumentation_openai-0.41.0.dist-info}/METADATA +2 -2
  19. opentelemetry_instrumentation_openai-0.41.0.dist-info/RECORD +21 -0
  20. opentelemetry_instrumentation_openai-0.40.13.dist-info/RECORD +0 -17
  21. {opentelemetry_instrumentation_openai-0.40.13.dist-info → opentelemetry_instrumentation_openai-0.41.0.dist-info}/WHEEL +0 -0
  22. {opentelemetry_instrumentation_openai-0.40.13.dist-info → opentelemetry_instrumentation_openai-0.41.0.dist-info}/entry_points.txt +0 -0
@@ -1,26 +1,28 @@
1
1
  from typing import Collection
2
2
 
3
+ from opentelemetry._events import get_event_logger
3
4
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
4
- from opentelemetry.trace import get_tracer
5
- from opentelemetry.metrics import get_meter
6
- from wrapt import wrap_function_wrapper
7
- from opentelemetry.semconv._incubating.metrics import gen_ai_metrics as GenAIMetrics
8
-
9
5
  from opentelemetry.instrumentation.openai.shared.chat_wrappers import (
10
- chat_wrapper,
11
6
  achat_wrapper,
7
+ chat_wrapper,
12
8
  )
13
9
  from opentelemetry.instrumentation.openai.shared.completion_wrappers import (
14
- completion_wrapper,
15
10
  acompletion_wrapper,
11
+ completion_wrapper,
16
12
  )
13
+ from opentelemetry.instrumentation.openai.shared.config import Config
17
14
  from opentelemetry.instrumentation.openai.shared.embeddings_wrappers import (
18
- embeddings_wrapper,
19
15
  aembeddings_wrapper,
16
+ embeddings_wrapper,
20
17
  )
21
18
  from opentelemetry.instrumentation.openai.utils import is_metrics_enabled
22
19
  from opentelemetry.instrumentation.openai.version import __version__
20
+ from opentelemetry.instrumentation.utils import unwrap
21
+ from opentelemetry.metrics import get_meter
22
+ from opentelemetry.semconv._incubating.metrics import gen_ai_metrics as GenAIMetrics
23
23
  from opentelemetry.semconv_ai import Meters
24
+ from opentelemetry.trace import get_tracer
25
+ from wrapt import wrap_function_wrapper
24
26
 
25
27
  _instruments = ("openai >= 0.27.0", "openai < 1.0.0")
26
28
 
@@ -36,6 +38,12 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
36
38
  meter_provider = kwargs.get("meter_provider")
37
39
  meter = get_meter(__name__, __version__, meter_provider)
38
40
 
41
+ if not Config.use_legacy_attributes:
42
+ event_logger_provider = kwargs.get("event_logger_provider")
43
+ Config.event_logger = get_event_logger(
44
+ __name__, __version__, event_logger_provider=event_logger_provider
45
+ )
46
+
39
47
  if is_metrics_enabled():
40
48
  tokens_histogram = meter.create_histogram(
41
49
  name=Meters.LLM_TOKEN_USAGE,
@@ -99,9 +107,16 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
99
107
  embeddings_exception_counter,
100
108
  ) = (None, None, None)
101
109
 
102
- wrap_function_wrapper("openai", "Completion.create", completion_wrapper(tracer))
103
110
  wrap_function_wrapper(
104
- "openai", "Completion.acreate", acompletion_wrapper(tracer)
111
+ "openai",
112
+ "Completion.create",
113
+ completion_wrapper(tracer),
114
+ )
115
+
116
+ wrap_function_wrapper(
117
+ "openai",
118
+ "Completion.acreate",
119
+ acompletion_wrapper(tracer),
105
120
  )
106
121
  wrap_function_wrapper(
107
122
  "openai",
@@ -153,4 +168,9 @@ class OpenAIV0Instrumentor(BaseInstrumentor):
153
168
  )
154
169
 
155
170
  def _uninstrument(self, **kwargs):
156
- pass
171
+ unwrap("openai", "Completion.create")
172
+ unwrap("openai", "Completion.acreate")
173
+ unwrap("openai", "ChatCompletion.create")
174
+ unwrap("openai", "ChatCompletion.acreate")
175
+ unwrap("openai", "Embedding.create")
176
+ unwrap("openai", "Embedding.acreate")
@@ -1,40 +1,47 @@
1
1
  from typing import Collection
2
2
 
3
+ from opentelemetry._events import get_event_logger
3
4
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
4
- from opentelemetry.trace import get_tracer
5
-
6
- from opentelemetry.metrics import get_meter
7
- from opentelemetry.semconv._incubating.metrics import gen_ai_metrics as GenAIMetrics
8
-
9
- from wrapt import wrap_function_wrapper
10
-
11
5
  from opentelemetry.instrumentation.openai.shared.chat_wrappers import (
12
- chat_wrapper,
13
6
  achat_wrapper,
7
+ chat_wrapper,
14
8
  )
15
9
  from opentelemetry.instrumentation.openai.shared.completion_wrappers import (
16
- completion_wrapper,
17
10
  acompletion_wrapper,
11
+ completion_wrapper,
18
12
  )
13
+ from opentelemetry.instrumentation.openai.shared.config import Config
19
14
  from opentelemetry.instrumentation.openai.shared.embeddings_wrappers import (
20
- embeddings_wrapper,
21
15
  aembeddings_wrapper,
16
+ embeddings_wrapper,
22
17
  )
23
18
  from opentelemetry.instrumentation.openai.shared.image_gen_wrappers import (
24
19
  image_gen_metrics_wrapper,
25
20
  )
21
+ from opentelemetry.instrumentation.openai.utils import is_metrics_enabled
26
22
  from opentelemetry.instrumentation.openai.v1.assistant_wrappers import (
27
23
  assistants_create_wrapper,
24
+ messages_list_wrapper,
25
+ runs_create_and_stream_wrapper,
28
26
  runs_create_wrapper,
29
27
  runs_retrieve_wrapper,
30
- runs_create_and_stream_wrapper,
31
- messages_list_wrapper,
32
28
  )
33
29
 
34
- from opentelemetry.instrumentation.openai.utils import is_metrics_enabled
35
- from opentelemetry.instrumentation.openai.version import __version__
30
+ from opentelemetry.instrumentation.openai.v1.responses_wrappers import (
31
+ async_responses_cancel_wrapper,
32
+ async_responses_get_or_create_wrapper,
33
+ responses_cancel_wrapper,
34
+ responses_get_or_create_wrapper,
35
+ )
36
36
 
37
+ from opentelemetry.instrumentation.openai.version import __version__
38
+ from opentelemetry.instrumentation.utils import unwrap
39
+ from opentelemetry.metrics import get_meter
40
+ from opentelemetry.semconv._incubating.metrics import gen_ai_metrics as GenAIMetrics
37
41
  from opentelemetry.semconv_ai import Meters
42
+ from opentelemetry.trace import get_tracer
43
+ from wrapt import wrap_function_wrapper
44
+
38
45
 
39
46
  _instruments = ("openai >= 1.0.0",)
40
47
 
@@ -43,6 +50,22 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
43
50
  def instrumentation_dependencies(self) -> Collection[str]:
44
51
  return _instruments
45
52
 
53
+ def _try_wrap(self, module, function, wrapper):
54
+ """
55
+ Wrap a function if it exists, otherwise do nothing.
56
+ This is useful for handling cases where the function is not available in
57
+ the older versions of the library.
58
+
59
+ Args:
60
+ module (str): The module to wrap, e.g. "openai.resources.chat.completions"
61
+ function (str): "Object.function" to wrap, e.g. "Completions.parse"
62
+ wrapper (callable): The wrapper to apply to the function.
63
+ """
64
+ try:
65
+ wrap_function_wrapper(module, function, wrapper)
66
+ except (AttributeError, ModuleNotFoundError):
67
+ pass
68
+
46
69
  def _instrument(self, **kwargs):
47
70
  tracer_provider = kwargs.get("tracer_provider")
48
71
  tracer = get_tracer(__name__, __version__, tracer_provider)
@@ -51,6 +74,12 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
51
74
  meter_provider = kwargs.get("meter_provider")
52
75
  meter = get_meter(__name__, __version__, meter_provider)
53
76
 
77
+ if not Config.use_legacy_attributes:
78
+ event_logger_provider = kwargs.get("event_logger_provider")
79
+ Config.event_logger = get_event_logger(
80
+ __name__, __version__, event_logger_provider=event_logger_provider
81
+ )
82
+
54
83
  if is_metrics_enabled():
55
84
  tokens_histogram = meter.create_histogram(
56
85
  name=Meters.LLM_TOKEN_USAGE,
@@ -175,6 +204,33 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
175
204
  embeddings_exception_counter,
176
205
  ),
177
206
  )
207
+ # in newer versions, Completions.parse are out of beta
208
+ self._try_wrap(
209
+ "openai.resources.chat.completions",
210
+ "Completions.parse",
211
+ chat_wrapper(
212
+ tracer,
213
+ tokens_histogram,
214
+ chat_choice_counter,
215
+ duration_histogram,
216
+ chat_exception_counter,
217
+ streaming_time_to_first_token,
218
+ streaming_time_to_generate,
219
+ ),
220
+ )
221
+ self._try_wrap(
222
+ "openai.resources.chat.completions",
223
+ "AsyncCompletions.parse",
224
+ achat_wrapper(
225
+ tracer,
226
+ tokens_histogram,
227
+ chat_choice_counter,
228
+ duration_histogram,
229
+ chat_exception_counter,
230
+ streaming_time_to_first_token,
231
+ streaming_time_to_generate,
232
+ ),
233
+ )
178
234
 
179
235
  if is_metrics_enabled():
180
236
  image_gen_exception_counter = meter.create_counter(
@@ -192,60 +248,111 @@ class OpenAIV1Instrumentor(BaseInstrumentor):
192
248
  )
193
249
 
194
250
  # Beta APIs may not be available consistently in all versions
195
- try:
196
- wrap_function_wrapper(
197
- "openai.resources.beta.assistants",
198
- "Assistants.create",
199
- assistants_create_wrapper(tracer),
200
- )
201
- wrap_function_wrapper(
202
- "openai.resources.beta.chat.completions",
203
- "Completions.parse",
204
- chat_wrapper(
205
- tracer,
206
- tokens_histogram,
207
- chat_choice_counter,
208
- duration_histogram,
209
- chat_exception_counter,
210
- streaming_time_to_first_token,
211
- streaming_time_to_generate,
212
- ),
213
- )
214
- wrap_function_wrapper(
215
- "openai.resources.beta.chat.completions",
216
- "AsyncCompletions.parse",
217
- achat_wrapper(
218
- tracer,
219
- tokens_histogram,
220
- chat_choice_counter,
221
- duration_histogram,
222
- chat_exception_counter,
223
- streaming_time_to_first_token,
224
- streaming_time_to_generate,
225
- ),
226
- )
227
- wrap_function_wrapper(
228
- "openai.resources.beta.threads.runs",
229
- "Runs.create",
230
- runs_create_wrapper(tracer),
231
- )
232
- wrap_function_wrapper(
233
- "openai.resources.beta.threads.runs",
234
- "Runs.retrieve",
235
- runs_retrieve_wrapper(tracer),
236
- )
237
- wrap_function_wrapper(
238
- "openai.resources.beta.threads.runs",
239
- "Runs.create_and_stream",
240
- runs_create_and_stream_wrapper(tracer),
241
- )
242
- wrap_function_wrapper(
243
- "openai.resources.beta.threads.messages",
244
- "Messages.list",
245
- messages_list_wrapper(tracer),
246
- )
247
- except (AttributeError, ModuleNotFoundError):
248
- pass
251
+ self._try_wrap(
252
+ "openai.resources.beta.assistants",
253
+ "Assistants.create",
254
+ assistants_create_wrapper(tracer),
255
+ )
256
+ self._try_wrap(
257
+ "openai.resources.beta.chat.completions",
258
+ "Completions.parse",
259
+ chat_wrapper(
260
+ tracer,
261
+ tokens_histogram,
262
+ chat_choice_counter,
263
+ duration_histogram,
264
+ chat_exception_counter,
265
+ streaming_time_to_first_token,
266
+ streaming_time_to_generate,
267
+ ),
268
+ )
269
+ self._try_wrap(
270
+ "openai.resources.beta.chat.completions",
271
+ "AsyncCompletions.parse",
272
+ achat_wrapper(
273
+ tracer,
274
+ tokens_histogram,
275
+ chat_choice_counter,
276
+ duration_histogram,
277
+ chat_exception_counter,
278
+ streaming_time_to_first_token,
279
+ streaming_time_to_generate,
280
+ ),
281
+ )
282
+ self._try_wrap(
283
+ "openai.resources.beta.threads.runs",
284
+ "Runs.create",
285
+ runs_create_wrapper(tracer),
286
+ )
287
+ self._try_wrap(
288
+ "openai.resources.beta.threads.runs",
289
+ "Runs.retrieve",
290
+ runs_retrieve_wrapper(tracer),
291
+ )
292
+ self._try_wrap(
293
+ "openai.resources.beta.threads.runs",
294
+ "Runs.create_and_stream",
295
+ runs_create_and_stream_wrapper(tracer),
296
+ )
297
+ self._try_wrap(
298
+ "openai.resources.beta.threads.messages",
299
+ "Messages.list",
300
+ messages_list_wrapper(tracer),
301
+ )
302
+ self._try_wrap(
303
+ "openai.resources.responses",
304
+ "Responses.create",
305
+ responses_get_or_create_wrapper(tracer),
306
+ )
307
+ self._try_wrap(
308
+ "openai.resources.responses",
309
+ "Responses.retrieve",
310
+ responses_get_or_create_wrapper(tracer),
311
+ )
312
+ self._try_wrap(
313
+ "openai.resources.responses",
314
+ "Responses.cancel",
315
+ responses_cancel_wrapper(tracer),
316
+ )
317
+ self._try_wrap(
318
+ "openai.resources.responses",
319
+ "AsyncResponses.create",
320
+ async_responses_get_or_create_wrapper(tracer),
321
+ )
322
+ self._try_wrap(
323
+ "openai.resources.responses",
324
+ "AsyncResponses.retrieve",
325
+ async_responses_get_or_create_wrapper(tracer),
326
+ )
327
+ self._try_wrap(
328
+ "openai.resources.responses",
329
+ "AsyncResponses.cancel",
330
+ async_responses_cancel_wrapper(tracer),
331
+ )
249
332
 
250
333
  def _uninstrument(self, **kwargs):
251
- pass
334
+ unwrap("openai.resources.chat.completions", "Completions.create")
335
+ unwrap("openai.resources.completions", "Completions.create")
336
+ unwrap("openai.resources.embeddings", "Embeddings.create")
337
+ unwrap("openai.resources.chat.completions", "AsyncCompletions.create")
338
+ unwrap("openai.resources.completions", "AsyncCompletions.create")
339
+ unwrap("openai.resources.embeddings", "AsyncEmbeddings.create")
340
+ unwrap("openai.resources.images", "Images.generate")
341
+
342
+ # Beta APIs may not be available consistently in all versions
343
+ try:
344
+ unwrap("openai.resources.beta.assistants", "Assistants.create")
345
+ unwrap("openai.resources.beta.chat.completions", "Completions.parse")
346
+ unwrap("openai.resources.beta.chat.completions", "AsyncCompletions.parse")
347
+ unwrap("openai.resources.beta.threads.runs", "Runs.create")
348
+ unwrap("openai.resources.beta.threads.runs", "Runs.retrieve")
349
+ unwrap("openai.resources.beta.threads.runs", "Runs.create_and_stream")
350
+ unwrap("openai.resources.beta.threads.messages", "Messages.list")
351
+ unwrap("openai.resources.responses", "Responses.create")
352
+ unwrap("openai.resources.responses", "Responses.retrieve")
353
+ unwrap("openai.resources.responses", "Responses.cancel")
354
+ unwrap("openai.resources.responses", "AsyncResponses.create")
355
+ unwrap("openai.resources.responses", "AsyncResponses.retrieve")
356
+ unwrap("openai.resources.responses", "AsyncResponses.cancel")
357
+ except ImportError:
358
+ pass
@@ -1,17 +1,26 @@
1
1
  import logging
2
2
  import time
3
+
3
4
  from opentelemetry import context as context_api
4
5
  from opentelemetry.instrumentation.openai.shared import (
5
6
  _set_span_attribute,
6
7
  model_as_dict,
7
8
  )
8
- from opentelemetry.trace import SpanKind
9
- from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
10
-
11
- from opentelemetry.semconv_ai import SpanAttributes, LLMRequestTypeValues
12
-
13
- from opentelemetry.instrumentation.openai.utils import _with_tracer_wrapper, dont_throw
14
9
  from opentelemetry.instrumentation.openai.shared.config import Config
10
+ from opentelemetry.instrumentation.openai.shared.event_emitter import emit_event
11
+ from opentelemetry.instrumentation.openai.shared.event_models import (
12
+ ChoiceEvent,
13
+ MessageEvent,
14
+ )
15
+ from opentelemetry.instrumentation.openai.utils import (
16
+ _with_tracer_wrapper,
17
+ dont_throw,
18
+ should_emit_events,
19
+ )
20
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
21
+ from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
22
+ from opentelemetry.semconv_ai import LLMRequestTypeValues, SpanAttributes
23
+ from opentelemetry.trace import SpanKind, Status, StatusCode
15
24
 
16
25
  from openai._legacy_response import LegacyAPIResponse
17
26
  from openai.types.beta.threads.run import Run
@@ -45,17 +54,24 @@ def runs_create_wrapper(tracer, wrapped, instance, args, kwargs):
45
54
  thread_id = kwargs.get("thread_id")
46
55
  instructions = kwargs.get("instructions")
47
56
 
48
- response = wrapped(*args, **kwargs)
49
- response_dict = model_as_dict(response)
57
+ try:
58
+ response = wrapped(*args, **kwargs)
59
+ response_dict = model_as_dict(response)
50
60
 
51
- runs[thread_id] = {
52
- "start_time": time.time_ns(),
53
- "assistant_id": kwargs.get("assistant_id"),
54
- "instructions": instructions,
55
- "run_id": response_dict.get("id"),
56
- }
61
+ runs[thread_id] = {
62
+ "start_time": time.time_ns(),
63
+ "assistant_id": kwargs.get("assistant_id"),
64
+ "instructions": instructions,
65
+ "run_id": response_dict.get("id"),
66
+ }
57
67
 
58
- return response
68
+ return response
69
+ except Exception as e:
70
+ runs[thread_id] = {
71
+ "exception": e,
72
+ "end_time": time.time_ns(),
73
+ }
74
+ raise
59
75
 
60
76
 
61
77
  @_with_tracer_wrapper
@@ -77,10 +93,16 @@ def runs_retrieve_wrapper(tracer, wrapped, instance, args, kwargs):
77
93
  if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
78
94
  return wrapped(*args, **kwargs)
79
95
 
80
- response = wrapped(*args, **kwargs)
81
- process_response(response)
82
-
83
- return response
96
+ try:
97
+ response = wrapped(*args, **kwargs)
98
+ process_response(response)
99
+ return response
100
+ except Exception as e:
101
+ thread_id = kwargs.get("thread_id")
102
+ if thread_id in runs:
103
+ runs[thread_id]["exception"] = e
104
+ runs[thread_id]["end_time"] = time.time_ns()
105
+ raise
84
106
 
85
107
 
86
108
  @_with_tracer_wrapper
@@ -105,6 +127,11 @@ def messages_list_wrapper(tracer, wrapped, instance, args, kwargs):
105
127
  attributes={SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.CHAT.value},
106
128
  start_time=run.get("start_time"),
107
129
  )
130
+ if exception := run.get("exception"):
131
+ span.set_attribute(ERROR_TYPE, exception.__class__.__name__)
132
+ span.record_exception(exception)
133
+ span.set_status(Status(StatusCode.ERROR, str(exception)))
134
+ span.end(run.get("end_time"))
108
135
 
109
136
  prompt_index = 0
110
137
  if assistants.get(run["assistant_id"]) is not None or Config.enrich_assistant:
@@ -131,17 +158,27 @@ def messages_list_wrapper(tracer, wrapped, instance, args, kwargs):
131
158
  SpanAttributes.LLM_RESPONSE_MODEL,
132
159
  assistant["model"],
133
160
  )
134
- _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", "system")
135
- _set_span_attribute(
136
- span,
137
- f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
138
- assistant["instructions"],
139
- )
161
+ if should_emit_events():
162
+ emit_event(MessageEvent(content=assistant["instructions"], role="system"))
163
+ else:
164
+ _set_span_attribute(
165
+ span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", "system"
166
+ )
167
+ _set_span_attribute(
168
+ span,
169
+ f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
170
+ assistant["instructions"],
171
+ )
140
172
  prompt_index += 1
141
- _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", "system")
142
173
  _set_span_attribute(
143
- span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content", run["instructions"]
174
+ span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", "system"
144
175
  )
176
+ _set_span_attribute(
177
+ span,
178
+ f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
179
+ run["instructions"],
180
+ )
181
+ emit_event(MessageEvent(content=run["instructions"], role="system"))
145
182
  prompt_index += 1
146
183
 
147
184
  completion_index = 0
@@ -152,13 +189,34 @@ def messages_list_wrapper(tracer, wrapped, instance, args, kwargs):
152
189
  message_content = content[0].get("text").get("value")
153
190
  message_role = msg.get("role")
154
191
  if message_role in ["user", "system"]:
155
- _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", message_role)
156
- _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content", message_content)
192
+ if should_emit_events():
193
+ emit_event(MessageEvent(content=message_content, role=message_role))
194
+ else:
195
+ _set_span_attribute(
196
+ span,
197
+ f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role",
198
+ message_role,
199
+ )
200
+ _set_span_attribute(
201
+ span,
202
+ f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
203
+ message_content,
204
+ )
157
205
  prompt_index += 1
158
206
  else:
159
- _set_span_attribute(span, f"{prefix}.role", msg.get("role"))
160
- _set_span_attribute(span, f"{prefix}.content", message_content)
161
- _set_span_attribute(span, f"gen_ai.response.{completion_index}.id", msg.get("id"))
207
+ if should_emit_events():
208
+ emit_event(
209
+ ChoiceEvent(
210
+ index=completion_index,
211
+ message={"content": message_content, "role": message_role},
212
+ )
213
+ )
214
+ else:
215
+ _set_span_attribute(span, f"{prefix}.role", msg.get("role"))
216
+ _set_span_attribute(span, f"{prefix}.content", message_content)
217
+ _set_span_attribute(
218
+ span, f"gen_ai.response.{completion_index}.id", msg.get("id")
219
+ )
162
220
  completion_index += 1
163
221
 
164
222
  if run.get("usage"):
@@ -216,24 +274,45 @@ def runs_create_and_stream_wrapper(tracer, wrapped, instance, args, kwargs):
216
274
  SpanAttributes.LLM_RESPONSE_MODEL,
217
275
  assistants[assistant_id]["model"],
218
276
  )
277
+ if should_emit_events():
278
+ emit_event(
279
+ MessageEvent(
280
+ content=assistants[assistant_id]["instructions"], role="system"
281
+ )
282
+ )
283
+ else:
284
+ _set_span_attribute(
285
+ span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system"
286
+ )
287
+ _set_span_attribute(
288
+ span,
289
+ f"{SpanAttributes.LLM_PROMPTS}.{i}.content",
290
+ assistants[assistant_id]["instructions"],
291
+ )
292
+ i += 1
293
+ if should_emit_events():
294
+ emit_event(MessageEvent(content=instructions, role="system"))
295
+ else:
219
296
  _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system")
220
297
  _set_span_attribute(
221
- span,
222
- f"{SpanAttributes.LLM_PROMPTS}.{i}.content",
223
- assistants[assistant_id]["instructions"],
298
+ span, f"{SpanAttributes.LLM_PROMPTS}.{i}.content", instructions
224
299
  )
225
- i += 1
226
- _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system")
227
- _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.content", instructions)
228
300
 
229
301
  from opentelemetry.instrumentation.openai.v1.event_handler_wrapper import (
230
302
  EventHandleWrapper,
231
303
  )
232
304
 
233
305
  kwargs["event_handler"] = EventHandleWrapper(
234
- original_handler=kwargs["event_handler"], span=span,
306
+ original_handler=kwargs["event_handler"],
307
+ span=span,
235
308
  )
236
309
 
237
- response = wrapped(*args, **kwargs)
238
-
239
- return response
310
+ try:
311
+ response = wrapped(*args, **kwargs)
312
+ return response
313
+ except Exception as e:
314
+ span.set_attribute(ERROR_TYPE, e.__class__.__name__)
315
+ span.record_exception(e)
316
+ span.set_status(Status(StatusCode.ERROR, str(e)))
317
+ span.end()
318
+ raise