opentelemetry-instrumentation-openai 0.40.14__py3-none-any.whl → 0.41.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.

Files changed (22) hide show
  1. opentelemetry/instrumentation/openai/__init__.py +3 -2
  2. opentelemetry/instrumentation/openai/shared/__init__.py +125 -28
  3. opentelemetry/instrumentation/openai/shared/chat_wrappers.py +191 -55
  4. opentelemetry/instrumentation/openai/shared/completion_wrappers.py +93 -36
  5. opentelemetry/instrumentation/openai/shared/config.py +8 -2
  6. opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +79 -28
  7. opentelemetry/instrumentation/openai/shared/event_emitter.py +100 -0
  8. opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
  9. opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +1 -1
  10. opentelemetry/instrumentation/openai/shared/span_utils.py +0 -0
  11. opentelemetry/instrumentation/openai/utils.py +30 -4
  12. opentelemetry/instrumentation/openai/v0/__init__.py +31 -11
  13. opentelemetry/instrumentation/openai/v1/__init__.py +176 -69
  14. opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +121 -42
  15. opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +31 -15
  16. opentelemetry/instrumentation/openai/v1/responses_wrappers.py +623 -0
  17. opentelemetry/instrumentation/openai/version.py +1 -1
  18. {opentelemetry_instrumentation_openai-0.40.14.dist-info → opentelemetry_instrumentation_openai-0.41.0.dist-info}/METADATA +2 -2
  19. opentelemetry_instrumentation_openai-0.41.0.dist-info/RECORD +21 -0
  20. opentelemetry_instrumentation_openai-0.40.14.dist-info/RECORD +0 -17
  21. {opentelemetry_instrumentation_openai-0.40.14.dist-info → opentelemetry_instrumentation_openai-0.41.0.dist-info}/WHEEL +0 -0
  22. {opentelemetry_instrumentation_openai-0.40.14.dist-info → opentelemetry_instrumentation_openai-0.41.0.dist-info}/entry_points.txt +0 -0
@@ -1,37 +1,42 @@
1
1
  import logging
2
2
 
3
3
  from opentelemetry import context as context_api
4
-
5
- from opentelemetry.semconv_ai import (
6
- SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
7
- SpanAttributes,
8
- LLMRequestTypeValues,
9
- )
10
-
11
- from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
12
- from opentelemetry.instrumentation.openai.utils import _with_tracer_wrapper, dont_throw
13
4
  from opentelemetry.instrumentation.openai.shared import (
14
5
  _set_client_attributes,
15
- _set_request_attributes,
16
- _set_span_attribute,
17
6
  _set_functions_attributes,
7
+ _set_request_attributes,
18
8
  _set_response_attributes,
9
+ _set_span_attribute,
10
+ _set_span_stream_usage,
11
+ get_token_count_from_string,
19
12
  is_streaming_response,
20
- should_send_prompts,
21
13
  model_as_dict,
22
- should_record_stream_token_usage,
23
- get_token_count_from_string,
24
- _set_span_stream_usage,
25
14
  propagate_trace_context,
15
+ should_record_stream_token_usage,
16
+ )
17
+ from opentelemetry.instrumentation.openai.shared.config import Config
18
+ from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
19
+ from opentelemetry.instrumentation.openai.shared.event_emitter import emit_event
20
+ from opentelemetry.instrumentation.openai.shared.event_models import (
21
+ ChoiceEvent,
22
+ MessageEvent,
23
+ )
24
+ from opentelemetry.instrumentation.openai.utils import (
25
+ _with_tracer_wrapper,
26
+ dont_throw,
27
+ is_openai_v1,
28
+ should_emit_events,
29
+ should_send_prompts,
30
+ )
31
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
32
+ from opentelemetry.semconv_ai import (
33
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
34
+ LLMRequestTypeValues,
35
+ SpanAttributes,
26
36
  )
27
-
28
- from opentelemetry.instrumentation.openai.utils import is_openai_v1
29
-
30
37
  from opentelemetry.trace import SpanKind
31
38
  from opentelemetry.trace.status import Status, StatusCode
32
39
 
33
- from opentelemetry.instrumentation.openai.shared.config import Config
34
-
35
40
  SPAN_NAME = "openai.completion"
36
41
  LLM_REQUEST_TYPE = LLMRequestTypeValues.COMPLETION
37
42
 
@@ -53,18 +58,21 @@ def completion_wrapper(tracer, wrapped, instance, args, kwargs):
53
58
  )
54
59
 
55
60
  _handle_request(span, kwargs, instance)
61
+
56
62
  try:
57
63
  response = wrapped(*args, **kwargs)
58
64
  except Exception as e:
65
+ span.set_attribute(ERROR_TYPE, e.__class__.__name__)
66
+ span.record_exception(e)
59
67
  span.set_status(Status(StatusCode.ERROR, str(e)))
60
68
  span.end()
61
- raise e
69
+ raise
62
70
 
63
71
  if is_streaming_response(response):
64
72
  # span will be closed after the generator is done
65
73
  return _build_from_streaming_response(span, kwargs, response)
66
74
  else:
67
- _handle_response(response, span)
75
+ _handle_response(response, span, instance)
68
76
 
69
77
  span.end()
70
78
  return response
@@ -84,18 +92,21 @@ async def acompletion_wrapper(tracer, wrapped, instance, args, kwargs):
84
92
  )
85
93
 
86
94
  _handle_request(span, kwargs, instance)
95
+
87
96
  try:
88
97
  response = await wrapped(*args, **kwargs)
89
98
  except Exception as e:
99
+ span.set_attribute(ERROR_TYPE, e.__class__.__name__)
100
+ span.record_exception(e)
90
101
  span.set_status(Status(StatusCode.ERROR, str(e)))
91
102
  span.end()
92
- raise e
103
+ raise
93
104
 
94
105
  if is_streaming_response(response):
95
106
  # span will be closed after the generator is done
96
107
  return _abuild_from_streaming_response(span, kwargs, response)
97
108
  else:
98
- _handle_response(response, span)
109
+ _handle_response(response, span, instance)
99
110
 
100
111
  span.end()
101
112
  return response
@@ -103,26 +114,41 @@ async def acompletion_wrapper(tracer, wrapped, instance, args, kwargs):
103
114
 
104
115
  @dont_throw
105
116
  def _handle_request(span, kwargs, instance):
106
- _set_request_attributes(span, kwargs)
107
- if should_send_prompts():
108
- _set_prompts(span, kwargs.get("prompt"))
109
- _set_functions_attributes(span, kwargs.get("functions"))
117
+ _set_request_attributes(span, kwargs, instance)
118
+ if should_emit_events():
119
+ _emit_prompts_events(kwargs)
120
+ else:
121
+ if should_send_prompts():
122
+ _set_prompts(span, kwargs.get("prompt"))
123
+ _set_functions_attributes(span, kwargs.get("functions"))
110
124
  _set_client_attributes(span, instance)
111
125
  if Config.enable_trace_context_propagation:
112
126
  propagate_trace_context(span, kwargs)
113
127
 
114
128
 
129
+ def _emit_prompts_events(kwargs):
130
+ prompt = kwargs.get("prompt")
131
+ if isinstance(prompt, list):
132
+ for p in prompt:
133
+ emit_event(MessageEvent(content=p))
134
+ elif isinstance(prompt, str):
135
+ emit_event(MessageEvent(content=prompt))
136
+
137
+
115
138
  @dont_throw
116
- def _handle_response(response, span):
139
+ def _handle_response(response, span, instance=None):
117
140
  if is_openai_v1():
118
141
  response_dict = model_as_dict(response)
119
142
  else:
120
143
  response_dict = response
121
144
 
122
145
  _set_response_attributes(span, response_dict)
123
-
124
- if should_send_prompts():
125
- _set_completions(span, response_dict.get("choices"))
146
+ if should_emit_events():
147
+ for choice in response.choices:
148
+ emit_event(_parse_choice_event(choice))
149
+ else:
150
+ if should_send_prompts():
151
+ _set_completions(span, response_dict.get("choices"))
126
152
 
127
153
 
128
154
  def _set_prompts(span, prompt):
@@ -161,8 +187,11 @@ def _build_from_streaming_response(span, request_kwargs, response):
161
187
 
162
188
  _set_token_usage(span, request_kwargs, complete_response)
163
189
 
164
- if should_send_prompts():
165
- _set_completions(span, complete_response.get("choices"))
190
+ if should_emit_events():
191
+ _emit_streaming_response_events(complete_response)
192
+ else:
193
+ if should_send_prompts():
194
+ _set_completions(span, complete_response.get("choices"))
166
195
 
167
196
  span.set_status(Status(StatusCode.OK))
168
197
  span.end()
@@ -179,13 +208,27 @@ async def _abuild_from_streaming_response(span, request_kwargs, response):
179
208
 
180
209
  _set_token_usage(span, request_kwargs, complete_response)
181
210
 
182
- if should_send_prompts():
183
- _set_completions(span, complete_response.get("choices"))
211
+ if should_emit_events():
212
+ _emit_streaming_response_events(complete_response)
213
+ else:
214
+ if should_send_prompts():
215
+ _set_completions(span, complete_response.get("choices"))
184
216
 
185
217
  span.set_status(Status(StatusCode.OK))
186
218
  span.end()
187
219
 
188
220
 
221
+ def _emit_streaming_response_events(complete_response):
222
+ for i, choice in enumerate(complete_response["choices"]):
223
+ emit_event(
224
+ ChoiceEvent(
225
+ index=choice.get("index", i),
226
+ message={"content": choice.get("text"), "role": "assistant"},
227
+ finish_reason=choice.get("finish_reason", "unknown"),
228
+ )
229
+ )
230
+
231
+
189
232
  @dont_throw
190
233
  def _set_token_usage(span, request_kwargs, complete_response):
191
234
  # use tiktoken calculate token usage
@@ -238,3 +281,17 @@ def _accumulate_streaming_response(complete_response, item):
238
281
  complete_choice["text"] += choice.get("text")
239
282
 
240
283
  return complete_response
284
+
285
+
286
+ def _parse_choice_event(choice) -> ChoiceEvent:
287
+ has_message = choice.text is not None
288
+ has_finish_reason = choice.finish_reason is not None
289
+
290
+ content = choice.text if has_message else None
291
+ finish_reason = choice.finish_reason if has_finish_reason else "unknown"
292
+
293
+ return ChoiceEvent(
294
+ index=choice.index,
295
+ message={"content": content, "role": "assistant"},
296
+ finish_reason=finish_reason,
297
+ )
@@ -1,4 +1,6 @@
1
- from typing import Callable
1
+ from typing import Callable, Optional
2
+
3
+ from opentelemetry._events import EventLogger
2
4
 
3
5
 
4
6
  class Config:
@@ -6,5 +8,9 @@ class Config:
6
8
  enrich_assistant = False
7
9
  exception_logger = None
8
10
  get_common_metrics_attributes: Callable[[], dict] = lambda: {}
9
- upload_base64_image: Callable[[str, str, str], str] = lambda trace_id, span_id, base64_image_url: str
11
+ upload_base64_image: Callable[[str, str, str], str] = (
12
+ lambda trace_id, span_id, base64_image_url: str
13
+ )
10
14
  enable_trace_context_propagation: bool = True
15
+ use_legacy_attributes = True
16
+ event_logger: Optional[EventLogger] = None
@@ -1,40 +1,46 @@
1
1
  import logging
2
2
  import time
3
+ from collections.abc import Iterable
3
4
 
4
5
  from opentelemetry import context as context_api
5
- from opentelemetry.metrics import Counter, Histogram
6
- from opentelemetry.semconv_ai import (
7
- SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
8
- SpanAttributes,
9
- LLMRequestTypeValues,
10
- )
11
-
12
- from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
13
- from opentelemetry.instrumentation.openai.utils import (
14
- dont_throw,
15
- start_as_current_span_async,
16
- _with_embeddings_telemetry_wrapper,
17
- )
18
6
  from opentelemetry.instrumentation.openai.shared import (
19
- metric_shared_attributes,
7
+ OPENAI_LLM_USAGE_TOKEN_TYPES,
8
+ _get_openai_base_url,
20
9
  _set_client_attributes,
21
10
  _set_request_attributes,
22
- _set_span_attribute,
23
11
  _set_response_attributes,
12
+ _set_span_attribute,
24
13
  _token_type,
25
- should_send_prompts,
14
+ metric_shared_attributes,
26
15
  model_as_dict,
27
- _get_openai_base_url,
28
- OPENAI_LLM_USAGE_TOKEN_TYPES,
29
16
  propagate_trace_context,
30
17
  )
31
-
32
18
  from opentelemetry.instrumentation.openai.shared.config import Config
19
+ from opentelemetry.instrumentation.openai.shared.event_emitter import emit_event
20
+ from opentelemetry.instrumentation.openai.shared.event_models import (
21
+ ChoiceEvent,
22
+ MessageEvent,
23
+ )
24
+ from opentelemetry.instrumentation.openai.utils import (
25
+ _with_embeddings_telemetry_wrapper,
26
+ dont_throw,
27
+ is_openai_v1,
28
+ should_emit_events,
29
+ should_send_prompts,
30
+ start_as_current_span_async,
31
+ )
32
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
33
+ from opentelemetry.metrics import Counter, Histogram
34
+ from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
35
+ from opentelemetry.semconv_ai import (
36
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
37
+ LLMRequestTypeValues,
38
+ SpanAttributes,
39
+ )
40
+ from opentelemetry.trace import SpanKind, Status, StatusCode
33
41
 
34
- from opentelemetry.instrumentation.openai.utils import is_openai_v1
35
-
36
- from opentelemetry.trace import SpanKind
37
- from opentelemetry.trace import Status, StatusCode
42
+ from openai._legacy_response import LegacyAPIResponse
43
+ from openai.types.create_embedding_response import CreateEmbeddingResponse
38
44
 
39
45
  SPAN_NAME = "openai.embeddings"
40
46
  LLM_REQUEST_TYPE = LLMRequestTypeValues.EMBEDDING
@@ -84,10 +90,12 @@ def embeddings_wrapper(
84
90
  if exception_counter:
85
91
  exception_counter.add(1, attributes=attributes)
86
92
 
93
+ span.set_attribute(ERROR_TYPE, e.__class__.__name__)
94
+ span.record_exception(e)
87
95
  span.set_status(Status(StatusCode.ERROR, str(e)))
88
96
  span.end()
89
97
 
90
- raise e
98
+ raise
91
99
 
92
100
  duration = end_time - start_time
93
101
 
@@ -128,6 +136,7 @@ async def aembeddings_wrapper(
128
136
  attributes={SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
129
137
  ) as span:
130
138
  _handle_request(span, kwargs, instance)
139
+
131
140
  try:
132
141
  # record time for duration
133
142
  start_time = time.time()
@@ -146,12 +155,15 @@ async def aembeddings_wrapper(
146
155
  if exception_counter:
147
156
  exception_counter.add(1, attributes=attributes)
148
157
 
158
+ span.set_attribute(ERROR_TYPE, e.__class__.__name__)
159
+ span.record_exception(e)
149
160
  span.set_status(Status(StatusCode.ERROR, str(e)))
150
161
  span.end()
151
162
 
152
- raise e
163
+ raise
153
164
 
154
165
  duration = end_time - start_time
166
+
155
167
  _handle_response(
156
168
  response,
157
169
  span,
@@ -167,10 +179,16 @@ async def aembeddings_wrapper(
167
179
 
168
180
  @dont_throw
169
181
  def _handle_request(span, kwargs, instance):
170
- _set_request_attributes(span, kwargs)
171
- if should_send_prompts():
172
- _set_prompts(span, kwargs.get("input"))
182
+ _set_request_attributes(span, kwargs, instance)
183
+
184
+ if should_emit_events():
185
+ _emit_embeddings_message_event(kwargs.get("input"))
186
+ else:
187
+ if should_send_prompts():
188
+ _set_prompts(span, kwargs.get("input"))
189
+
173
190
  _set_client_attributes(span, instance)
191
+
174
192
  if Config.enable_trace_context_propagation:
175
193
  propagate_trace_context(span, kwargs)
176
194
 
@@ -201,6 +219,10 @@ def _handle_response(
201
219
  # span attributes
202
220
  _set_response_attributes(span, response_dict)
203
221
 
222
+ # emit events
223
+ if should_emit_events():
224
+ _emit_embeddings_choice_event(response)
225
+
204
226
 
205
227
  def _set_embeddings_metrics(
206
228
  instance,
@@ -255,3 +277,32 @@ def _set_prompts(span, prompt):
255
277
  f"{SpanAttributes.LLM_PROMPTS}.0.content",
256
278
  prompt,
257
279
  )
280
+
281
+
282
+ def _emit_embeddings_message_event(embeddings) -> None:
283
+ if isinstance(embeddings, str):
284
+ emit_event(MessageEvent(content=embeddings))
285
+ elif isinstance(embeddings, Iterable):
286
+ for i in embeddings:
287
+ emit_event(MessageEvent(content=i))
288
+
289
+
290
+ def _emit_embeddings_choice_event(response) -> None:
291
+ if isinstance(response, CreateEmbeddingResponse):
292
+ for embedding in response.data:
293
+ emit_event(
294
+ ChoiceEvent(
295
+ index=embedding.index,
296
+ message={"content": embedding.embedding, "role": "assistant"},
297
+ )
298
+ )
299
+
300
+ elif isinstance(response, LegacyAPIResponse):
301
+ parsed_response = response.parse()
302
+ for embedding in parsed_response.data:
303
+ emit_event(
304
+ ChoiceEvent(
305
+ index=embedding.index,
306
+ message={"content": embedding.embedding, "role": "assistant"},
307
+ )
308
+ )
@@ -0,0 +1,100 @@
1
+ from dataclasses import asdict
2
+ from enum import Enum
3
+ from typing import Union
4
+
5
+ from opentelemetry._events import Event
6
+ from opentelemetry.instrumentation.openai.shared.event_models import (
7
+ ChoiceEvent,
8
+ MessageEvent,
9
+ )
10
+ from opentelemetry.instrumentation.openai.utils import (
11
+ should_emit_events,
12
+ should_send_prompts,
13
+ )
14
+ from opentelemetry.semconv._incubating.attributes import (
15
+ gen_ai_attributes as GenAIAttributes,
16
+ )
17
+
18
+ from .config import Config
19
+
20
+
21
+ class Roles(Enum):
22
+ USER = "user"
23
+ ASSISTANT = "assistant"
24
+ SYSTEM = "system"
25
+ TOOL = "tool"
26
+
27
+
28
+ VALID_MESSAGE_ROLES = {role.value for role in Roles}
29
+ """The valid roles for naming the message event."""
30
+
31
+ EVENT_ATTRIBUTES = {
32
+ GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.OPENAI.value
33
+ }
34
+ """The attributes to be used for the event."""
35
+
36
+
37
+ def emit_event(event: Union[MessageEvent, ChoiceEvent]) -> None:
38
+ """
39
+ Emit an event to the OpenTelemetry SDK.
40
+
41
+ Args:
42
+ event: The event to emit.
43
+ """
44
+ if not should_emit_events():
45
+ return
46
+
47
+ if isinstance(event, MessageEvent):
48
+ _emit_message_event(event)
49
+ elif isinstance(event, ChoiceEvent):
50
+ _emit_choice_event(event)
51
+ else:
52
+ raise TypeError("Unsupported event type")
53
+
54
+
55
+ def _emit_message_event(event: MessageEvent) -> None:
56
+ body = asdict(event)
57
+
58
+ if event.role in VALID_MESSAGE_ROLES:
59
+ name = "gen_ai.{}.message".format(event.role)
60
+ # According to the semantic conventions, the role is conditionally required if available
61
+ # and not equal to the "role" in the message name. So, remove the role from the body if
62
+ # it is the same as the in the event name.
63
+ body.pop("role", None)
64
+ else:
65
+ name = "gen_ai.user.message"
66
+
67
+ # According to the semantic conventions, only the assistant role has tool call
68
+ if event.role != Roles.ASSISTANT.value and event.tool_calls is not None:
69
+ del body["tool_calls"]
70
+ elif event.tool_calls is None:
71
+ del body["tool_calls"]
72
+
73
+ if not should_send_prompts():
74
+ del body["content"]
75
+ if body.get("tool_calls") is not None:
76
+ for tool_call in body["tool_calls"]:
77
+ tool_call["function"].pop("arguments", None)
78
+
79
+ Config.event_logger.emit(Event(name=name, body=body, attributes=EVENT_ATTRIBUTES))
80
+
81
+
82
+ def _emit_choice_event(event: ChoiceEvent) -> None:
83
+ body = asdict(event)
84
+ if event.message["role"] == Roles.ASSISTANT.value:
85
+ # According to the semantic conventions, the role is conditionally required if available
86
+ # and not equal to "assistant", so remove the role from the body if it is "assistant".
87
+ body["message"].pop("role", None)
88
+
89
+ if event.tool_calls is None:
90
+ del body["tool_calls"]
91
+
92
+ if not should_send_prompts():
93
+ body["message"].pop("content", None)
94
+ if body.get("tool_calls") is not None:
95
+ for tool_call in body["tool_calls"]:
96
+ tool_call["function"].pop("arguments", None)
97
+
98
+ Config.event_logger.emit(
99
+ Event(name="gen_ai.choice", body=body, attributes=EVENT_ATTRIBUTES)
100
+ )
@@ -0,0 +1,41 @@
1
+ from dataclasses import dataclass
2
+ from typing import Any, List, Literal, Optional, TypedDict
3
+
4
+
5
+ class _FunctionToolCall(TypedDict):
6
+ function_name: str
7
+ arguments: Optional[dict[str, Any]]
8
+
9
+
10
+ class ToolCall(TypedDict):
11
+ """Represents a tool call in the AI model."""
12
+
13
+ id: str
14
+ function: _FunctionToolCall
15
+ type: Literal["function"]
16
+
17
+
18
+ class CompletionMessage(TypedDict):
19
+ """Represents a message in the AI model."""
20
+
21
+ content: Any
22
+ role: str = "assistant"
23
+
24
+
25
+ @dataclass
26
+ class MessageEvent:
27
+ """Represents an input event for the AI model."""
28
+
29
+ content: Any
30
+ role: str = "user"
31
+ tool_calls: Optional[List[ToolCall]] = None
32
+
33
+
34
+ @dataclass
35
+ class ChoiceEvent:
36
+ """Represents a completion event for the AI model."""
37
+
38
+ index: int
39
+ message: CompletionMessage
40
+ finish_reason: str = "unknown"
41
+ tool_calls: Optional[List[ToolCall]] = None
@@ -47,7 +47,7 @@ def image_gen_metrics_wrapper(
47
47
  if exception_counter:
48
48
  exception_counter.add(1, attributes=attributes)
49
49
 
50
- raise e
50
+ raise
51
51
 
52
52
  if is_openai_v1():
53
53
  response_dict = model_as_dict(response)
@@ -1,16 +1,21 @@
1
1
  import asyncio
2
- from importlib.metadata import version
3
- from contextlib import asynccontextmanager
4
2
  import logging
5
3
  import os
6
4
  import threading
7
5
  import traceback
6
+ from contextlib import asynccontextmanager
7
+ from importlib.metadata import version
8
8
 
9
- import openai
9
+ from opentelemetry import context as context_api
10
+ from opentelemetry._events import EventLogger
10
11
  from opentelemetry.instrumentation.openai.shared.config import Config
11
12
 
13
+ import openai
14
+
12
15
  _OPENAI_VERSION = version("openai")
13
16
 
17
+ TRACELOOP_TRACE_CONTENT = "TRACELOOP_TRACE_CONTENT"
18
+
14
19
 
15
20
  def is_openai_v1():
16
21
  return _OPENAI_VERSION >= "1.0.0"
@@ -34,7 +39,12 @@ def _with_image_gen_metric_wrapper(func):
34
39
  def _with_metric(duration_histogram, exception_counter):
35
40
  def wrapper(wrapped, instance, args, kwargs):
36
41
  return func(
37
- duration_histogram, exception_counter, wrapped, instance, args, kwargs
42
+ duration_histogram,
43
+ exception_counter,
44
+ wrapped,
45
+ instance,
46
+ args,
47
+ kwargs,
38
48
  )
39
49
 
40
50
  return wrapper
@@ -157,3 +167,19 @@ def run_async(method):
157
167
  thread.join()
158
168
  else:
159
169
  asyncio.run(method)
170
+
171
+
172
+ def should_send_prompts():
173
+ return (
174
+ os.getenv(TRACELOOP_TRACE_CONTENT) or "true"
175
+ ).lower() == "true" or context_api.get_value("override_enable_content_tracing")
176
+
177
+
178
+ def should_emit_events() -> bool:
179
+ """
180
+ Checks if the instrumentation isn't using the legacy attributes
181
+ and if the event logger is not None.
182
+ """
183
+ return not Config.use_legacy_attributes and isinstance(
184
+ Config.event_logger, EventLogger
185
+ )