opentelemetry-instrumentation-openai 0.44.0__py3-none-any.whl → 0.44.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.

@@ -7,6 +7,7 @@ from functools import singledispatch
7
7
  from typing import List, Optional, Union
8
8
 
9
9
  from opentelemetry import context as context_api
10
+ import pydantic
10
11
  from opentelemetry.instrumentation.openai.shared import (
11
12
  OPENAI_LLM_USAGE_TOKEN_TYPES,
12
13
  _get_openai_base_url,
@@ -50,9 +51,6 @@ from opentelemetry.trace import SpanKind, Tracer
50
51
  from opentelemetry.trace.status import Status, StatusCode
51
52
  from wrapt import ObjectProxy
52
53
 
53
- from openai.types.chat import ChatCompletionMessageToolCall
54
- from openai.types.chat.chat_completion_message import FunctionCall
55
-
56
54
  SPAN_NAME = "openai.chat"
57
55
  PROMPT_FILTER_KEY = "prompt_filter_results"
58
56
  CONTENT_FILTER_KEY = "content_filter_results"
@@ -961,8 +959,10 @@ async def _abuild_from_streaming_response(
961
959
  span.end()
962
960
 
963
961
 
962
+ # pydantic.BaseModel here is ChatCompletionMessageFunctionToolCall (as of openai 1.99.7)
963
+ # but we keep to a parent type to support older versions
964
964
  def _parse_tool_calls(
965
- tool_calls: Optional[List[Union[dict, ChatCompletionMessageToolCall]]],
965
+ tool_calls: Optional[List[Union[dict, pydantic.BaseModel]]],
966
966
  ) -> Union[List[ToolCall], None]:
967
967
  """
968
968
  Util to correctly parse the tool calls data from the OpenAI API to this module's
@@ -976,12 +976,11 @@ def _parse_tool_calls(
976
976
  for tool_call in tool_calls:
977
977
  tool_call_data = None
978
978
 
979
- # Handle dict or ChatCompletionMessageToolCall
980
979
  if isinstance(tool_call, dict):
981
980
  tool_call_data = copy.deepcopy(tool_call)
982
- elif isinstance(tool_call, ChatCompletionMessageToolCall):
981
+ elif _is_chat_message_function_tool_call(tool_call):
983
982
  tool_call_data = tool_call.model_dump()
984
- elif isinstance(tool_call, FunctionCall):
983
+ elif _is_function_call(tool_call):
985
984
  function_call = tool_call.model_dump()
986
985
  tool_call_data = ToolCall(
987
986
  id="",
@@ -996,6 +995,34 @@ def _parse_tool_calls(
996
995
  return result
997
996
 
998
997
 
998
+ def _is_chat_message_function_tool_call(model: Union[dict, pydantic.BaseModel]) -> bool:
999
+ try:
1000
+ from openai.types.chat.chat_completion_message_function_tool_call import (
1001
+ ChatCompletionMessageFunctionToolCall,
1002
+ )
1003
+
1004
+ return isinstance(model, ChatCompletionMessageFunctionToolCall)
1005
+ except Exception:
1006
+ try:
1007
+ # Since OpenAI 1.99.3, ChatCompletionMessageToolCall is a Union,
1008
+ # and the isinstance check will fail. This is fine, because in all
1009
+ # those versions, the check above will succeed.
1010
+ from openai.types.chat.chat_completion_message_tool_call import (
1011
+ ChatCompletionMessageToolCall,
1012
+ )
1013
+ return isinstance(model, ChatCompletionMessageToolCall)
1014
+ except Exception:
1015
+ return False
1016
+
1017
+
1018
+ def _is_function_call(model: Union[dict, pydantic.BaseModel]) -> bool:
1019
+ try:
1020
+ from openai.types.chat.chat_completion_message import FunctionCall
1021
+ return isinstance(model, FunctionCall)
1022
+ except Exception:
1023
+ return False
1024
+
1025
+
999
1026
  @singledispatch
1000
1027
  def _parse_choice_event(choice) -> ChoiceEvent:
1001
1028
  has_message = choice.message is not None
@@ -447,6 +447,14 @@ def responses_get_or_create_wrapper(tracer: Tracer, wrapped, instance, args, kwa
447
447
  merged_tools = existing_data.get("tools", []) + request_tools
448
448
 
449
449
  try:
450
+ parsed_response_output_text = None
451
+ if hasattr(parsed_response, "output_text"):
452
+ parsed_response_output_text = parsed_response.output_text
453
+ else:
454
+ try:
455
+ parsed_response_output_text = parsed_response.output[0].content[0].text
456
+ except Exception:
457
+ pass
450
458
  traced_data = TracedData(
451
459
  start_time=existing_data.get("start_time", start_time),
452
460
  response_id=parsed_response.id,
@@ -456,7 +464,7 @@ def responses_get_or_create_wrapper(tracer: Tracer, wrapped, instance, args, kwa
456
464
  output_blocks={block.id: block for block in parsed_response.output}
457
465
  | existing_data.get("output_blocks", {}),
458
466
  usage=existing_data.get("usage", parsed_response.usage),
459
- output_text=existing_data.get("output_text", parsed_response.output_text),
467
+ output_text=existing_data.get("output_text", parsed_response_output_text),
460
468
  request_model=existing_data.get("request_model", kwargs.get("model")),
461
469
  response_model=existing_data.get("response_model", parsed_response.model),
462
470
  )
@@ -541,6 +549,15 @@ async def async_responses_get_or_create_wrapper(
541
549
  merged_tools = existing_data.get("tools", []) + request_tools
542
550
 
543
551
  try:
552
+ parsed_response_output_text = None
553
+ if hasattr(parsed_response, "output_text"):
554
+ parsed_response_output_text = parsed_response.output_text
555
+ else:
556
+ try:
557
+ parsed_response_output_text = parsed_response.output[0].content[0].text
558
+ except Exception:
559
+ pass
560
+
544
561
  traced_data = TracedData(
545
562
  start_time=existing_data.get("start_time", start_time),
546
563
  response_id=parsed_response.id,
@@ -550,7 +567,7 @@ async def async_responses_get_or_create_wrapper(
550
567
  output_blocks={block.id: block for block in parsed_response.output}
551
568
  | existing_data.get("output_blocks", {}),
552
569
  usage=existing_data.get("usage", parsed_response.usage),
553
- output_text=existing_data.get("output_text", parsed_response.output_text),
570
+ output_text=existing_data.get("output_text", parsed_response_output_text),
554
571
  request_model=existing_data.get("request_model", kwargs.get("model")),
555
572
  response_model=existing_data.get("response_model", parsed_response.model),
556
573
  )
@@ -1 +1 @@
1
- __version__ = "0.44.0"
1
+ __version__ = "0.44.2"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: opentelemetry-instrumentation-openai
3
- Version: 0.44.0
3
+ Version: 0.44.2
4
4
  Summary: OpenTelemetry OpenAI instrumentation
5
5
  License: Apache-2.0
6
6
  Author: Gal Kleinman
@@ -1,6 +1,6 @@
1
1
  opentelemetry/instrumentation/openai/__init__.py,sha256=Mx_nwMl0TlhUjrQOR4qdx6MEhBUKp5cuUIIXFzi3mXo,2093
2
2
  opentelemetry/instrumentation/openai/shared/__init__.py,sha256=Ba429tv5NPuQN7RoLzaj00K9oj88BaUBdPmUUsZ-7ic,12346
3
- opentelemetry/instrumentation/openai/shared/chat_wrappers.py,sha256=SH_IJMOIE1uqpn5f2-mlqdBBH_YvJqLG9p5amJSWTm0,37517
3
+ opentelemetry/instrumentation/openai/shared/chat_wrappers.py,sha256=dg71IdSGQKmE4B8mcDziFZlAts7k136MnBF0BUCZcp8,38545
4
4
  opentelemetry/instrumentation/openai/shared/completion_wrappers.py,sha256=LH90ZPytXD4_yXAfRkw7N7BimkVCvMzcuMCYu0RdNVo,8824
5
5
  opentelemetry/instrumentation/openai/shared/config.py,sha256=nQfVXiznVUIv2_BHSUQpaoCnxysG3XpaYpIZdxi0mxM,477
6
6
  opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py,sha256=oRHghd4vSDJ6fNHjL9G8QfKnPnp_NkZfVmTDSgZScVU,9251
@@ -13,9 +13,9 @@ opentelemetry/instrumentation/openai/v0/__init__.py,sha256=FhpVbP8NqjN2We_srppZ_
13
13
  opentelemetry/instrumentation/openai/v1/__init__.py,sha256=oLst4xav77tTteZKXo59uyb-2IWqw_xOafaSMzTxq9g,13255
14
14
  opentelemetry/instrumentation/openai/v1/assistant_wrappers.py,sha256=ZBbJHgsY3_OuTb9n-WN-XGK4xsH4b2zy6fO6IkElxhk,10318
15
15
  opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py,sha256=AT-eDZOFP-K_mm-ecdgQaILoIsEiItZmtwzwAuse86Q,4350
16
- opentelemetry/instrumentation/openai/v1/responses_wrappers.py,sha256=YQt2fdsdyJXczfQ5rqkOJ6CJtvF_Q4be0yX6hYpfdno,23800
17
- opentelemetry/instrumentation/openai/version.py,sha256=jLVEw0OXaD-GYhTB6M4c0IS8rVCF3biDLYCxt25hxuA,23
18
- opentelemetry_instrumentation_openai-0.44.0.dist-info/METADATA,sha256=YiZeIjGsX1HDEjKs-CkP1zDmj3t84HVXzQeCNXTAcoM,2150
19
- opentelemetry_instrumentation_openai-0.44.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
20
- opentelemetry_instrumentation_openai-0.44.0.dist-info/entry_points.txt,sha256=vTBfiX5yXji5YHikuJHEOoBZ1TFdPQ1EI4ctd2pZSeE,93
21
- opentelemetry_instrumentation_openai-0.44.0.dist-info/RECORD,,
16
+ opentelemetry/instrumentation/openai/v1/responses_wrappers.py,sha256=NSty_lrL5HJVt88d_keV-wQ17-4XGVzc9ukMLaITAug,24471
17
+ opentelemetry/instrumentation/openai/version.py,sha256=dVjDIbVOWUBPw78Cqx0fPr4ECxTNqsIeHQeCpeDryYk,23
18
+ opentelemetry_instrumentation_openai-0.44.2.dist-info/METADATA,sha256=1lYqOYpbq8N5qbssk-OaDSonn5U8IMoW0gXbdIpGvQo,2150
19
+ opentelemetry_instrumentation_openai-0.44.2.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
20
+ opentelemetry_instrumentation_openai-0.44.2.dist-info/entry_points.txt,sha256=vTBfiX5yXji5YHikuJHEOoBZ1TFdPQ1EI4ctd2pZSeE,93
21
+ opentelemetry_instrumentation_openai-0.44.2.dist-info/RECORD,,