opentelemetry-instrumentation-openai 0.36.1__py3-none-any.whl → 0.38.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.

@@ -11,6 +11,7 @@ from opentelemetry.trace.propagation import set_span_in_context
11
11
  from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
12
12
 
13
13
  from opentelemetry.instrumentation.openai.shared.config import Config
14
+ from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GEN_AI_RESPONSE_ID
14
15
  from opentelemetry.semconv_ai import SpanAttributes
15
16
  from opentelemetry.instrumentation.openai.utils import (
16
17
  dont_throw,
@@ -22,6 +23,8 @@ OPENAI_LLM_USAGE_TOKEN_TYPES = ["prompt_tokens", "completion_tokens"]
22
23
  PROMPT_FILTER_KEY = "prompt_filter_results"
23
24
  PROMPT_ERROR = "prompt_error"
24
25
 
26
+ _PYDANTIC_VERSION = version("pydantic")
27
+
25
28
  # tiktoken encodings map for different model, key is model_name, value is tiktoken encoding
26
29
  tiktoken_encodings = {}
27
30
 
@@ -149,6 +152,7 @@ def _set_response_attributes(span, response):
149
152
  return
150
153
 
151
154
  _set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model"))
155
+ _set_span_attribute(span, GEN_AI_RESPONSE_ID, response.get("id"))
152
156
 
153
157
  _set_span_attribute(
154
158
  span,
@@ -156,7 +160,6 @@ def _set_response_attributes(span, response):
156
160
  response.get("system_fingerprint"),
157
161
  )
158
162
  _log_prompt_filter(span, response)
159
-
160
163
  usage = response.get("usage")
161
164
  if not usage:
162
165
  return
@@ -235,7 +238,7 @@ def is_streaming_response(response):
235
238
  def model_as_dict(model):
236
239
  if isinstance(model, dict):
237
240
  return model
238
- if version("pydantic") < "2.0.0":
241
+ if _PYDANTIC_VERSION < "2.0.0":
239
242
  return model.dict()
240
243
  if hasattr(model, "model_dump"):
241
244
  return model.model_dump()
@@ -700,7 +700,7 @@ def _build_from_streaming_response(
700
700
  start_time=None,
701
701
  request_kwargs=None,
702
702
  ):
703
- complete_response = {"choices": [], "model": ""}
703
+ complete_response = {"choices": [], "model": "", "id": ""}
704
704
 
705
705
  first_token = True
706
706
  time_of_first_token = start_time # will be updated when first token is received
@@ -767,7 +767,7 @@ async def _abuild_from_streaming_response(
767
767
  start_time=None,
768
768
  request_kwargs=None,
769
769
  ):
770
- complete_response = {"choices": [], "model": ""}
770
+ complete_response = {"choices": [], "model": "", "id": ""}
771
771
 
772
772
  first_token = True
773
773
  time_of_first_token = start_time # will be updated when first token is received
@@ -826,6 +826,7 @@ def _accumulate_stream_items(item, complete_response):
826
826
  item = model_as_dict(item)
827
827
 
828
828
  complete_response["model"] = item.get("model")
829
+ complete_response["id"] = item.get("id")
829
830
 
830
831
  # prompt filter results
831
832
  if item.get("prompt_filter_results"):
@@ -142,7 +142,7 @@ def _set_completions(span, choices):
142
142
 
143
143
  @dont_throw
144
144
  def _build_from_streaming_response(span, request_kwargs, response):
145
- complete_response = {"choices": [], "model": ""}
145
+ complete_response = {"choices": [], "model": "", "id": ""}
146
146
  for item in response:
147
147
  yield item
148
148
  _accumulate_streaming_response(complete_response, item)
@@ -160,7 +160,7 @@ def _build_from_streaming_response(span, request_kwargs, response):
160
160
 
161
161
  @dont_throw
162
162
  async def _abuild_from_streaming_response(span, request_kwargs, response):
163
- complete_response = {"choices": [], "model": ""}
163
+ complete_response = {"choices": [], "model": "", "id": ""}
164
164
  async for item in response:
165
165
  yield item
166
166
  _accumulate_streaming_response(complete_response, item)
@@ -215,7 +215,7 @@ def _accumulate_streaming_response(complete_response, item):
215
215
  item = model_as_dict(item)
216
216
 
217
217
  complete_response["model"] = item.get("model")
218
-
218
+ complete_response["id"] = item.get("id")
219
219
  for choice in item.get("choices"):
220
220
  index = choice.get("index")
221
221
  if len(complete_response.get("choices")) <= index:
@@ -9,9 +9,11 @@ import traceback
9
9
  import openai
10
10
  from opentelemetry.instrumentation.openai.shared.config import Config
11
11
 
12
+ _OPENAI_VERSION = version("openai")
13
+
12
14
 
13
15
  def is_openai_v1():
14
- return version("openai") >= "1.0.0"
16
+ return _OPENAI_VERSION >= "1.0.0"
15
17
 
16
18
 
17
19
  def is_azure_openai(instance):
@@ -151,6 +151,7 @@ def messages_list_wrapper(tracer, wrapped, instance, args, kwargs):
151
151
  _set_span_attribute(
152
152
  span, f"{prefix}.content", content[0].get("text").get("value")
153
153
  )
154
+ _set_span_attribute(span, f"gen_ai.response.{i}.id", msg.get("id"))
154
155
 
155
156
  if run.get("usage"):
156
157
  usage_dict = model_as_dict(run.get("usage"))
@@ -81,7 +81,13 @@ class EventHandleWrapper(AssistantEventHandler):
81
81
 
82
82
  @override
83
83
  def on_message_done(self, message):
84
+ _set_span_attribute(
85
+ self._span,
86
+ f"gen_ai.response.{self._current_text_index}.id",
87
+ message.id,
88
+ )
84
89
  self._original_handler.on_message_done(message)
90
+ self._current_text_index += 1
85
91
 
86
92
  @override
87
93
  def on_text_created(self, text):
@@ -105,8 +111,6 @@ class EventHandleWrapper(AssistantEventHandler):
105
111
  text.value,
106
112
  )
107
113
 
108
- self._current_text_index += 1
109
-
110
114
  @override
111
115
  def on_image_file_done(self, image_file):
112
116
  self._original_handler.on_image_file_done(image_file)
@@ -1 +1 @@
1
- __version__ = "0.36.1"
1
+ __version__ = "0.38.0"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: opentelemetry-instrumentation-openai
3
- Version: 0.36.1
3
+ Version: 0.38.0
4
4
  Summary: OpenTelemetry OpenAI instrumentation
5
5
  License: Apache-2.0
6
6
  Author: Gal Kleinman
@@ -1,17 +1,17 @@
1
1
  opentelemetry/instrumentation/openai/__init__.py,sha256=ly0ZPoOTAVRo1f0EeKfyTWj8_HLna3kZfIo9GWgUJk4,1888
2
- opentelemetry/instrumentation/openai/shared/__init__.py,sha256=HQ2qOn8fiinyUbbN55EO3aP8c33cM5IaWJ886bGiJac,9291
3
- opentelemetry/instrumentation/openai/shared/chat_wrappers.py,sha256=FiVCibvVkU1f6602HOaHodU9N95EDFOAKxmND3zhC2s,28241
4
- opentelemetry/instrumentation/openai/shared/completion_wrappers.py,sha256=P066vEEjc3tKNrnIPXi7Inc56mVhmJl5xEz1fG_vW-I,7037
2
+ opentelemetry/instrumentation/openai/shared/__init__.py,sha256=Ha5oEgK0VrdTu10wdxs-Dqwcv2fbqw1mqlXlsngH7kg,9493
3
+ opentelemetry/instrumentation/openai/shared/chat_wrappers.py,sha256=YYEYBSyqcOSlb5VNORs3LL-J749sk9ZZbOpCn8JikIU,28306
4
+ opentelemetry/instrumentation/openai/shared/completion_wrappers.py,sha256=de35aM1Bjsk3GsfNAdu009OF_mdeFB7H_jBsc4LOKY0,7101
5
5
  opentelemetry/instrumentation/openai/shared/config.py,sha256=dCQviJ1a5cpFlrP0HcKgE7lpiXB98ssnumLy_CIMibQ,355
6
6
  opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py,sha256=ROnTujsXOrO3YVcXiV5Z-IifeuXbPOBrdqa3Ym6IDwI,7263
7
7
  opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py,sha256=A4qdJIeJdA45SfiLaFj3Vo0Ndcqqfuew1BDsGdnJU3E,2122
8
- opentelemetry/instrumentation/openai/utils.py,sha256=KCxCpos2-rmIucUdwzbqbJINhs65I4TKo1DGRN-3kGU,4051
8
+ opentelemetry/instrumentation/openai/utils.py,sha256=B085XRVvCjigZlRewFRwEFTG2hdx5gv5qVakVxgYaVo,4086
9
9
  opentelemetry/instrumentation/openai/v0/__init__.py,sha256=02-bXv0aZbscMYO2W3jsHpjU521vkVK5RzdfSeGXBzg,5475
10
10
  opentelemetry/instrumentation/openai/v1/__init__.py,sha256=B2Ut0X14KQS5in_0jLBMCvXQK73SqBy2rBFIDk5ZRkc,8688
11
- opentelemetry/instrumentation/openai/v1/assistant_wrappers.py,sha256=PxMeM5ikB0iedev95nDA5PunNLVme8oFsFhoKu3ipQc,7115
12
- opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py,sha256=nEdavNRw5mY__PnarGoaszG24BjRC8BqDYNrtLFc6cs,3296
13
- opentelemetry/instrumentation/openai/version.py,sha256=dtFdL58o8_wXO3x70d8jtIt4WHZs03AumzHeW_aRNrI,23
14
- opentelemetry_instrumentation_openai-0.36.1.dist-info/METADATA,sha256=-Aw2kR2CAI7QpIMxhRpGulEFYHSK6gRrpjiCeMEm6W4,2186
15
- opentelemetry_instrumentation_openai-0.36.1.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
16
- opentelemetry_instrumentation_openai-0.36.1.dist-info/entry_points.txt,sha256=vTBfiX5yXji5YHikuJHEOoBZ1TFdPQ1EI4ctd2pZSeE,93
17
- opentelemetry_instrumentation_openai-0.36.1.dist-info/RECORD,,
11
+ opentelemetry/instrumentation/openai/v1/assistant_wrappers.py,sha256=08ZB03chvpzF-v2dtpHKCToJmm5ztYSwKhBq3pbpPlI,7191
12
+ opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py,sha256=4HsnPwR450NA5w-KDgMfQjK8U4do_M-sMMPU4LjXyiQ,3444
13
+ opentelemetry/instrumentation/openai/version.py,sha256=GJwyrcH1eJOX_p_9H-LSIMO2ADxtxl7estBuQ7PgzCM,23
14
+ opentelemetry_instrumentation_openai-0.38.0.dist-info/METADATA,sha256=qEvV3MsmEgcjTd9F4I4bk0dNRLrgYkeACMdUTxq9NBY,2186
15
+ opentelemetry_instrumentation_openai-0.38.0.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
16
+ opentelemetry_instrumentation_openai-0.38.0.dist-info/entry_points.txt,sha256=vTBfiX5yXji5YHikuJHEOoBZ1TFdPQ1EI4ctd2pZSeE,93
17
+ opentelemetry_instrumentation_openai-0.38.0.dist-info/RECORD,,