opentelemetry-instrumentation-openai 0.34.1__py3-none-any.whl → 0.49.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.

Files changed (22) hide show
  1. opentelemetry/instrumentation/openai/__init__.py +11 -6
  2. opentelemetry/instrumentation/openai/shared/__init__.py +167 -68
  3. opentelemetry/instrumentation/openai/shared/chat_wrappers.py +544 -231
  4. opentelemetry/instrumentation/openai/shared/completion_wrappers.py +143 -81
  5. opentelemetry/instrumentation/openai/shared/config.py +8 -3
  6. opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +91 -30
  7. opentelemetry/instrumentation/openai/shared/event_emitter.py +108 -0
  8. opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
  9. opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +1 -1
  10. opentelemetry/instrumentation/openai/shared/span_utils.py +0 -0
  11. opentelemetry/instrumentation/openai/utils.py +42 -9
  12. opentelemetry/instrumentation/openai/v0/__init__.py +32 -11
  13. opentelemetry/instrumentation/openai/v1/__init__.py +177 -69
  14. opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +208 -109
  15. opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +41 -19
  16. opentelemetry/instrumentation/openai/v1/responses_wrappers.py +1073 -0
  17. opentelemetry/instrumentation/openai/version.py +1 -1
  18. {opentelemetry_instrumentation_openai-0.34.1.dist-info → opentelemetry_instrumentation_openai-0.49.3.dist-info}/METADATA +7 -8
  19. opentelemetry_instrumentation_openai-0.49.3.dist-info/RECORD +21 -0
  20. {opentelemetry_instrumentation_openai-0.34.1.dist-info → opentelemetry_instrumentation_openai-0.49.3.dist-info}/WHEEL +1 -1
  21. opentelemetry_instrumentation_openai-0.34.1.dist-info/RECORD +0 -17
  22. {opentelemetry_instrumentation_openai-0.34.1.dist-info → opentelemetry_instrumentation_openai-0.49.3.dist-info}/entry_points.txt +0 -0
@@ -1,17 +1,30 @@
1
1
  import logging
2
2
  import time
3
+
3
4
  from opentelemetry import context as context_api
5
+ from opentelemetry import trace
4
6
  from opentelemetry.instrumentation.openai.shared import (
5
7
  _set_span_attribute,
6
8
  model_as_dict,
7
9
  )
8
- from opentelemetry.trace import SpanKind
9
- from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
10
-
11
- from opentelemetry.semconv_ai import SpanAttributes, LLMRequestTypeValues
12
-
13
- from opentelemetry.instrumentation.openai.utils import _with_tracer_wrapper, dont_throw
14
10
  from opentelemetry.instrumentation.openai.shared.config import Config
11
+ from opentelemetry.instrumentation.openai.shared.event_emitter import emit_event
12
+ from opentelemetry.instrumentation.openai.shared.event_models import (
13
+ ChoiceEvent,
14
+ MessageEvent,
15
+ )
16
+ from opentelemetry.instrumentation.openai.utils import (
17
+ _with_tracer_wrapper,
18
+ dont_throw,
19
+ should_emit_events,
20
+ )
21
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
22
+ from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
23
+ from opentelemetry.semconv._incubating.attributes import (
24
+ gen_ai_attributes as GenAIAttributes,
25
+ )
26
+ from opentelemetry.semconv_ai import LLMRequestTypeValues, SpanAttributes
27
+ from opentelemetry.trace import SpanKind, Status, StatusCode
15
28
 
16
29
  from openai._legacy_response import LegacyAPIResponse
17
30
  from openai.types.beta.threads.run import Run
@@ -45,17 +58,24 @@ def runs_create_wrapper(tracer, wrapped, instance, args, kwargs):
45
58
  thread_id = kwargs.get("thread_id")
46
59
  instructions = kwargs.get("instructions")
47
60
 
48
- response = wrapped(*args, **kwargs)
49
- response_dict = model_as_dict(response)
61
+ try:
62
+ response = wrapped(*args, **kwargs)
63
+ response_dict = model_as_dict(response)
50
64
 
51
- runs[thread_id] = {
52
- "start_time": time.time_ns(),
53
- "assistant_id": kwargs.get("assistant_id"),
54
- "instructions": instructions,
55
- "run_id": response_dict.get("id"),
56
- }
65
+ runs[thread_id] = {
66
+ "start_time": time.time_ns(),
67
+ "assistant_id": kwargs.get("assistant_id"),
68
+ "instructions": instructions,
69
+ "run_id": response_dict.get("id"),
70
+ }
57
71
 
58
- return response
72
+ return response
73
+ except Exception as e:
74
+ runs[thread_id] = {
75
+ "exception": e,
76
+ "end_time": time.time_ns(),
77
+ }
78
+ raise
59
79
 
60
80
 
61
81
  @_with_tracer_wrapper
@@ -77,10 +97,16 @@ def runs_retrieve_wrapper(tracer, wrapped, instance, args, kwargs):
77
97
  if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
78
98
  return wrapped(*args, **kwargs)
79
99
 
80
- response = wrapped(*args, **kwargs)
81
- process_response(response)
82
-
83
- return response
100
+ try:
101
+ response = wrapped(*args, **kwargs)
102
+ process_response(response)
103
+ return response
104
+ except Exception as e:
105
+ thread_id = kwargs.get("thread_id")
106
+ if thread_id in runs:
107
+ runs[thread_id]["exception"] = e
108
+ runs[thread_id]["end_time"] = time.time_ns()
109
+ raise
84
110
 
85
111
 
86
112
  @_with_tracer_wrapper
@@ -106,64 +132,114 @@ def messages_list_wrapper(tracer, wrapped, instance, args, kwargs):
106
132
  start_time=run.get("start_time"),
107
133
  )
108
134
 
109
- i = 0
110
- if assistants.get(run["assistant_id"]) is not None or Config.enrich_assistant:
111
- if Config.enrich_assistant:
112
- assistant = model_as_dict(
113
- instance._client.beta.assistants.retrieve(run["assistant_id"])
135
+ # Use the span as current context to ensure events get proper trace context
136
+ with trace.use_span(span, end_on_exit=False):
137
+ if exception := run.get("exception"):
138
+ span.set_attribute(ERROR_TYPE, exception.__class__.__name__)
139
+ span.record_exception(exception)
140
+ span.set_status(Status(StatusCode.ERROR, str(exception)))
141
+ span.end()
142
+ return response
143
+
144
+ prompt_index = 0
145
+ if assistants.get(run["assistant_id"]) is not None or Config.enrich_assistant:
146
+ if Config.enrich_assistant:
147
+ assistant = model_as_dict(
148
+ instance._client.beta.assistants.retrieve(run["assistant_id"])
149
+ )
150
+ assistants[run["assistant_id"]] = assistant
151
+ else:
152
+ assistant = assistants[run["assistant_id"]]
153
+
154
+ _set_span_attribute(
155
+ span,
156
+ GenAIAttributes.GEN_AI_SYSTEM,
157
+ "openai",
114
158
  )
115
- assistants[run["assistant_id"]] = assistant
116
- else:
117
- assistant = assistants[run["assistant_id"]]
118
-
119
- _set_span_attribute(
120
- span,
121
- SpanAttributes.LLM_SYSTEM,
122
- "openai",
123
- )
124
- _set_span_attribute(
125
- span,
126
- SpanAttributes.LLM_REQUEST_MODEL,
127
- assistant["model"],
128
- )
129
- _set_span_attribute(
130
- span,
131
- SpanAttributes.LLM_RESPONSE_MODEL,
132
- assistant["model"],
133
- )
134
- _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system")
135
- _set_span_attribute(
136
- span,
137
- f"{SpanAttributes.LLM_PROMPTS}.{i}.content",
138
- assistant["instructions"],
139
- )
140
- i += 1
141
- _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system")
142
- _set_span_attribute(
143
- span, f"{SpanAttributes.LLM_PROMPTS}.{i}.content", run["instructions"]
144
- )
145
-
146
- for i, msg in enumerate(messages):
147
- prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{i}"
148
- content = msg.get("content")
149
-
150
- _set_span_attribute(span, f"{prefix}.role", msg.get("role"))
151
- _set_span_attribute(
152
- span, f"{prefix}.content", content[0].get("text").get("value")
153
- )
154
-
155
- if run.get("usage"):
156
- usage_dict = model_as_dict(run.get("usage"))
159
+ _set_span_attribute(
160
+ span,
161
+ GenAIAttributes.GEN_AI_REQUEST_MODEL,
162
+ assistant["model"],
163
+ )
164
+ _set_span_attribute(
165
+ span,
166
+ GenAIAttributes.GEN_AI_RESPONSE_MODEL,
167
+ assistant["model"],
168
+ )
169
+ if should_emit_events():
170
+ emit_event(MessageEvent(content=assistant["instructions"], role="system"))
171
+ else:
172
+ _set_span_attribute(
173
+ span, f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.role", "system"
174
+ )
175
+ _set_span_attribute(
176
+ span,
177
+ f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.content",
178
+ assistant["instructions"],
179
+ )
180
+ prompt_index += 1
157
181
  _set_span_attribute(
158
- span,
159
- SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
160
- usage_dict.get("completion_tokens"),
182
+ span, f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.role", "system"
161
183
  )
162
184
  _set_span_attribute(
163
185
  span,
164
- SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
165
- usage_dict.get("prompt_tokens"),
186
+ f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.content",
187
+ run["instructions"],
166
188
  )
189
+ if should_emit_events():
190
+ emit_event(MessageEvent(content=run["instructions"], role="system"))
191
+ prompt_index += 1
192
+
193
+ completion_index = 0
194
+ for msg in messages:
195
+ prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.{completion_index}"
196
+ content = msg.get("content")
197
+
198
+ message_content = content[0].get("text").get("value")
199
+ message_role = msg.get("role")
200
+ if message_role in ["user", "system"]:
201
+ if should_emit_events():
202
+ emit_event(MessageEvent(content=message_content, role=message_role))
203
+ else:
204
+ _set_span_attribute(
205
+ span,
206
+ f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.role",
207
+ message_role,
208
+ )
209
+ _set_span_attribute(
210
+ span,
211
+ f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.content",
212
+ message_content,
213
+ )
214
+ prompt_index += 1
215
+ else:
216
+ if should_emit_events():
217
+ emit_event(
218
+ ChoiceEvent(
219
+ index=completion_index,
220
+ message={"content": message_content, "role": message_role},
221
+ )
222
+ )
223
+ else:
224
+ _set_span_attribute(span, f"{prefix}.role", msg.get("role"))
225
+ _set_span_attribute(span, f"{prefix}.content", message_content)
226
+ _set_span_attribute(
227
+ span, f"gen_ai.response.{completion_index}.id", msg.get("id")
228
+ )
229
+ completion_index += 1
230
+
231
+ if run.get("usage"):
232
+ usage_dict = model_as_dict(run.get("usage"))
233
+ _set_span_attribute(
234
+ span,
235
+ GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS,
236
+ usage_dict.get("completion_tokens"),
237
+ )
238
+ _set_span_attribute(
239
+ span,
240
+ GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS,
241
+ usage_dict.get("prompt_tokens"),
242
+ )
167
243
 
168
244
  span.end(run.get("end_time"))
169
245
 
@@ -184,47 +260,70 @@ def runs_create_and_stream_wrapper(tracer, wrapped, instance, args, kwargs):
184
260
  attributes={SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.CHAT.value},
185
261
  )
186
262
 
187
- i = 0
188
- if assistants.get(assistant_id) is not None or Config.enrich_assistant:
189
- if Config.enrich_assistant:
190
- assistant = model_as_dict(
191
- instance._client.beta.assistants.retrieve(assistant_id)
263
+ # Use the span as current context to ensure events get proper trace context
264
+ with trace.use_span(span, end_on_exit=False):
265
+ i = 0
266
+ if assistants.get(assistant_id) is not None or Config.enrich_assistant:
267
+ if Config.enrich_assistant:
268
+ assistant = model_as_dict(
269
+ instance._client.beta.assistants.retrieve(assistant_id)
270
+ )
271
+ assistants[assistant_id] = assistant
272
+ else:
273
+ assistant = assistants[assistant_id]
274
+
275
+ _set_span_attribute(
276
+ span, GenAIAttributes.GEN_AI_REQUEST_MODEL, assistants[assistant_id]["model"]
277
+ )
278
+ _set_span_attribute(
279
+ span,
280
+ GenAIAttributes.GEN_AI_SYSTEM,
281
+ "openai",
192
282
  )
193
- assistants[assistant_id] = assistant
283
+ _set_span_attribute(
284
+ span,
285
+ GenAIAttributes.GEN_AI_RESPONSE_MODEL,
286
+ assistants[assistant_id]["model"],
287
+ )
288
+ if should_emit_events():
289
+ emit_event(
290
+ MessageEvent(
291
+ content=assistants[assistant_id]["instructions"], role="system"
292
+ )
293
+ )
294
+ else:
295
+ _set_span_attribute(
296
+ span, f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.role", "system"
297
+ )
298
+ _set_span_attribute(
299
+ span,
300
+ f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.content",
301
+ assistants[assistant_id]["instructions"],
302
+ )
303
+ i += 1
304
+ if should_emit_events():
305
+ emit_event(MessageEvent(content=instructions, role="system"))
194
306
  else:
195
- assistant = assistants[assistant_id]
307
+ _set_span_attribute(span, f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.role", "system")
308
+ _set_span_attribute(
309
+ span, f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.content", instructions
310
+ )
196
311
 
197
- _set_span_attribute(
198
- span, SpanAttributes.LLM_REQUEST_MODEL, assistants[assistant_id]["model"]
312
+ from opentelemetry.instrumentation.openai.v1.event_handler_wrapper import (
313
+ EventHandleWrapper,
199
314
  )
200
- _set_span_attribute(
201
- span,
202
- SpanAttributes.LLM_SYSTEM,
203
- "openai",
204
- )
205
- _set_span_attribute(
206
- span,
207
- SpanAttributes.LLM_RESPONSE_MODEL,
208
- assistants[assistant_id]["model"],
209
- )
210
- _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system")
211
- _set_span_attribute(
212
- span,
213
- f"{SpanAttributes.LLM_PROMPTS}.{i}.content",
214
- assistants[assistant_id]["instructions"],
215
- )
216
- i += 1
217
- _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system")
218
- _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.content", instructions)
219
315
 
220
- from opentelemetry.instrumentation.openai.v1.event_handler_wrapper import (
221
- EventHandleWrapper,
222
- )
223
-
224
- kwargs["event_handler"] = EventHandleWrapper(
225
- original_handler=kwargs["event_handler"], span=span
226
- )
227
-
228
- response = wrapped(*args, **kwargs)
316
+ kwargs["event_handler"] = EventHandleWrapper(
317
+ original_handler=kwargs["event_handler"],
318
+ span=span,
319
+ )
229
320
 
230
- return response
321
+ try:
322
+ response = wrapped(*args, **kwargs)
323
+ return response
324
+ except Exception as e:
325
+ span.set_attribute(ERROR_TYPE, e.__class__.__name__)
326
+ span.record_exception(e)
327
+ span.set_status(Status(StatusCode.ERROR, str(e)))
328
+ span.end()
329
+ raise
@@ -1,13 +1,18 @@
1
- from opentelemetry.instrumentation.openai.shared import (
2
- _set_span_attribute,
1
+ from opentelemetry.instrumentation.openai.shared import _set_span_attribute
2
+ from opentelemetry.instrumentation.openai.shared.event_emitter import emit_event
3
+ from opentelemetry.instrumentation.openai.shared.event_models import ChoiceEvent
4
+ from opentelemetry.instrumentation.openai.utils import should_emit_events
5
+ from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
6
+ from opentelemetry.semconv._incubating.attributes import (
7
+ gen_ai_attributes as GenAIAttributes,
3
8
  )
4
- from opentelemetry.semconv_ai import SpanAttributes
5
- from openai import AssistantEventHandler
9
+ from opentelemetry.trace import Status, StatusCode
6
10
  from typing_extensions import override
7
11
 
12
+ from openai import AssistantEventHandler
8
13
 
9
- class EventHandleWrapper(AssistantEventHandler):
10
14
 
15
+ class EventHandleWrapper(AssistantEventHandler):
11
16
  _current_text_index = 0
12
17
  _prompt_tokens = 0
13
18
  _completion_tokens = 0
@@ -21,12 +26,12 @@ class EventHandleWrapper(AssistantEventHandler):
21
26
  def on_end(self):
22
27
  _set_span_attribute(
23
28
  self._span,
24
- SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
29
+ GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS,
25
30
  self._prompt_tokens,
26
31
  )
27
32
  _set_span_attribute(
28
33
  self._span,
29
- SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
34
+ GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS,
30
35
  self._completion_tokens,
31
36
  )
32
37
  self._original_handler.on_end()
@@ -65,6 +70,9 @@ class EventHandleWrapper(AssistantEventHandler):
65
70
 
66
71
  @override
67
72
  def on_exception(self, exception: Exception):
73
+ self._span.set_attribute(ERROR_TYPE, exception.__class__.__name__)
74
+ self._span.record_exception(exception)
75
+ self._span.set_status(Status(StatusCode.ERROR, str(exception)))
68
76
  self._original_handler.on_exception(exception)
69
77
 
70
78
  @override
@@ -81,7 +89,22 @@ class EventHandleWrapper(AssistantEventHandler):
81
89
 
82
90
  @override
83
91
  def on_message_done(self, message):
92
+ _set_span_attribute(
93
+ self._span,
94
+ f"gen_ai.response.{self._current_text_index}.id",
95
+ message.id,
96
+ )
97
+ emit_event(
98
+ ChoiceEvent(
99
+ index=self._current_text_index,
100
+ message={
101
+ "content": [item.model_dump() for item in message.content],
102
+ "role": message.role,
103
+ },
104
+ )
105
+ )
84
106
  self._original_handler.on_message_done(message)
107
+ self._current_text_index += 1
85
108
 
86
109
  @override
87
110
  def on_text_created(self, text):
@@ -94,18 +117,17 @@ class EventHandleWrapper(AssistantEventHandler):
94
117
  @override
95
118
  def on_text_done(self, text):
96
119
  self._original_handler.on_text_done(text)
97
- _set_span_attribute(
98
- self._span,
99
- f"{SpanAttributes.LLM_COMPLETIONS}.{self._current_text_index}.role",
100
- "assistant",
101
- )
102
- _set_span_attribute(
103
- self._span,
104
- f"{SpanAttributes.LLM_COMPLETIONS}.{self._current_text_index}.content",
105
- text.value,
106
- )
107
-
108
- self._current_text_index += 1
120
+ if not should_emit_events():
121
+ _set_span_attribute(
122
+ self._span,
123
+ f"{GenAIAttributes.GEN_AI_COMPLETION}.{self._current_text_index}.role",
124
+ "assistant",
125
+ )
126
+ _set_span_attribute(
127
+ self._span,
128
+ f"{GenAIAttributes.GEN_AI_COMPLETION}.{self._current_text_index}.content",
129
+ text.value,
130
+ )
109
131
 
110
132
  @override
111
133
  def on_image_file_done(self, image_file):