opentelemetry-instrumentation-openai 0.44.1__tar.gz → 0.44.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/PKG-INFO +1 -1
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +34 -7
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +19 -2
- opentelemetry_instrumentation_openai-0.44.2/opentelemetry/instrumentation/openai/version.py +1 -0
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/pyproject.toml +2 -2
- opentelemetry_instrumentation_openai-0.44.1/opentelemetry/instrumentation/openai/version.py +0 -1
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/README.md +0 -0
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/opentelemetry/instrumentation/openai/__init__.py +0 -0
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/opentelemetry/instrumentation/openai/shared/__init__.py +0 -0
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +0 -0
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/opentelemetry/instrumentation/openai/shared/config.py +0 -0
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +0 -0
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/opentelemetry/instrumentation/openai/shared/event_emitter.py +0 -0
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/opentelemetry/instrumentation/openai/shared/event_models.py +0 -0
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +0 -0
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/opentelemetry/instrumentation/openai/shared/span_utils.py +0 -0
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/opentelemetry/instrumentation/openai/utils.py +0 -0
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/opentelemetry/instrumentation/openai/v0/__init__.py +0 -0
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/opentelemetry/instrumentation/openai/v1/__init__.py +0 -0
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +0 -0
- {opentelemetry_instrumentation_openai-0.44.1 → opentelemetry_instrumentation_openai-0.44.2}/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +0 -0
|
@@ -7,6 +7,7 @@ from functools import singledispatch
|
|
|
7
7
|
from typing import List, Optional, Union
|
|
8
8
|
|
|
9
9
|
from opentelemetry import context as context_api
|
|
10
|
+
import pydantic
|
|
10
11
|
from opentelemetry.instrumentation.openai.shared import (
|
|
11
12
|
OPENAI_LLM_USAGE_TOKEN_TYPES,
|
|
12
13
|
_get_openai_base_url,
|
|
@@ -50,9 +51,6 @@ from opentelemetry.trace import SpanKind, Tracer
|
|
|
50
51
|
from opentelemetry.trace.status import Status, StatusCode
|
|
51
52
|
from wrapt import ObjectProxy
|
|
52
53
|
|
|
53
|
-
from openai.types.chat import ChatCompletionMessageToolCall
|
|
54
|
-
from openai.types.chat.chat_completion_message import FunctionCall
|
|
55
|
-
|
|
56
54
|
SPAN_NAME = "openai.chat"
|
|
57
55
|
PROMPT_FILTER_KEY = "prompt_filter_results"
|
|
58
56
|
CONTENT_FILTER_KEY = "content_filter_results"
|
|
@@ -961,8 +959,10 @@ async def _abuild_from_streaming_response(
|
|
|
961
959
|
span.end()
|
|
962
960
|
|
|
963
961
|
|
|
962
|
+
# pydantic.BaseModel here is ChatCompletionMessageFunctionToolCall (as of openai 1.99.7)
|
|
963
|
+
# but we keep to a parent type to support older versions
|
|
964
964
|
def _parse_tool_calls(
|
|
965
|
-
tool_calls: Optional[List[Union[dict,
|
|
965
|
+
tool_calls: Optional[List[Union[dict, pydantic.BaseModel]]],
|
|
966
966
|
) -> Union[List[ToolCall], None]:
|
|
967
967
|
"""
|
|
968
968
|
Util to correctly parse the tool calls data from the OpenAI API to this module's
|
|
@@ -976,12 +976,11 @@ def _parse_tool_calls(
|
|
|
976
976
|
for tool_call in tool_calls:
|
|
977
977
|
tool_call_data = None
|
|
978
978
|
|
|
979
|
-
# Handle dict or ChatCompletionMessageToolCall
|
|
980
979
|
if isinstance(tool_call, dict):
|
|
981
980
|
tool_call_data = copy.deepcopy(tool_call)
|
|
982
|
-
elif
|
|
981
|
+
elif _is_chat_message_function_tool_call(tool_call):
|
|
983
982
|
tool_call_data = tool_call.model_dump()
|
|
984
|
-
elif
|
|
983
|
+
elif _is_function_call(tool_call):
|
|
985
984
|
function_call = tool_call.model_dump()
|
|
986
985
|
tool_call_data = ToolCall(
|
|
987
986
|
id="",
|
|
@@ -996,6 +995,34 @@ def _parse_tool_calls(
|
|
|
996
995
|
return result
|
|
997
996
|
|
|
998
997
|
|
|
998
|
+
def _is_chat_message_function_tool_call(model: Union[dict, pydantic.BaseModel]) -> bool:
|
|
999
|
+
try:
|
|
1000
|
+
from openai.types.chat.chat_completion_message_function_tool_call import (
|
|
1001
|
+
ChatCompletionMessageFunctionToolCall,
|
|
1002
|
+
)
|
|
1003
|
+
|
|
1004
|
+
return isinstance(model, ChatCompletionMessageFunctionToolCall)
|
|
1005
|
+
except Exception:
|
|
1006
|
+
try:
|
|
1007
|
+
# Since OpenAI 1.99.3, ChatCompletionMessageToolCall is a Union,
|
|
1008
|
+
# and the isinstance check will fail. This is fine, because in all
|
|
1009
|
+
# those versions, the check above will succeed.
|
|
1010
|
+
from openai.types.chat.chat_completion_message_tool_call import (
|
|
1011
|
+
ChatCompletionMessageToolCall,
|
|
1012
|
+
)
|
|
1013
|
+
return isinstance(model, ChatCompletionMessageToolCall)
|
|
1014
|
+
except Exception:
|
|
1015
|
+
return False
|
|
1016
|
+
|
|
1017
|
+
|
|
1018
|
+
def _is_function_call(model: Union[dict, pydantic.BaseModel]) -> bool:
|
|
1019
|
+
try:
|
|
1020
|
+
from openai.types.chat.chat_completion_message import FunctionCall
|
|
1021
|
+
return isinstance(model, FunctionCall)
|
|
1022
|
+
except Exception:
|
|
1023
|
+
return False
|
|
1024
|
+
|
|
1025
|
+
|
|
999
1026
|
@singledispatch
|
|
1000
1027
|
def _parse_choice_event(choice) -> ChoiceEvent:
|
|
1001
1028
|
has_message = choice.message is not None
|
|
@@ -447,6 +447,14 @@ def responses_get_or_create_wrapper(tracer: Tracer, wrapped, instance, args, kwa
|
|
|
447
447
|
merged_tools = existing_data.get("tools", []) + request_tools
|
|
448
448
|
|
|
449
449
|
try:
|
|
450
|
+
parsed_response_output_text = None
|
|
451
|
+
if hasattr(parsed_response, "output_text"):
|
|
452
|
+
parsed_response_output_text = parsed_response.output_text
|
|
453
|
+
else:
|
|
454
|
+
try:
|
|
455
|
+
parsed_response_output_text = parsed_response.output[0].content[0].text
|
|
456
|
+
except Exception:
|
|
457
|
+
pass
|
|
450
458
|
traced_data = TracedData(
|
|
451
459
|
start_time=existing_data.get("start_time", start_time),
|
|
452
460
|
response_id=parsed_response.id,
|
|
@@ -456,7 +464,7 @@ def responses_get_or_create_wrapper(tracer: Tracer, wrapped, instance, args, kwa
|
|
|
456
464
|
output_blocks={block.id: block for block in parsed_response.output}
|
|
457
465
|
| existing_data.get("output_blocks", {}),
|
|
458
466
|
usage=existing_data.get("usage", parsed_response.usage),
|
|
459
|
-
output_text=existing_data.get("output_text",
|
|
467
|
+
output_text=existing_data.get("output_text", parsed_response_output_text),
|
|
460
468
|
request_model=existing_data.get("request_model", kwargs.get("model")),
|
|
461
469
|
response_model=existing_data.get("response_model", parsed_response.model),
|
|
462
470
|
)
|
|
@@ -541,6 +549,15 @@ async def async_responses_get_or_create_wrapper(
|
|
|
541
549
|
merged_tools = existing_data.get("tools", []) + request_tools
|
|
542
550
|
|
|
543
551
|
try:
|
|
552
|
+
parsed_response_output_text = None
|
|
553
|
+
if hasattr(parsed_response, "output_text"):
|
|
554
|
+
parsed_response_output_text = parsed_response.output_text
|
|
555
|
+
else:
|
|
556
|
+
try:
|
|
557
|
+
parsed_response_output_text = parsed_response.output[0].content[0].text
|
|
558
|
+
except Exception:
|
|
559
|
+
pass
|
|
560
|
+
|
|
544
561
|
traced_data = TracedData(
|
|
545
562
|
start_time=existing_data.get("start_time", start_time),
|
|
546
563
|
response_id=parsed_response.id,
|
|
@@ -550,7 +567,7 @@ async def async_responses_get_or_create_wrapper(
|
|
|
550
567
|
output_blocks={block.id: block for block in parsed_response.output}
|
|
551
568
|
| existing_data.get("output_blocks", {}),
|
|
552
569
|
usage=existing_data.get("usage", parsed_response.usage),
|
|
553
|
-
output_text=existing_data.get("output_text",
|
|
570
|
+
output_text=existing_data.get("output_text", parsed_response_output_text),
|
|
554
571
|
request_model=existing_data.get("request_model", kwargs.get("model")),
|
|
555
572
|
response_model=existing_data.get("response_model", parsed_response.model),
|
|
556
573
|
)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.44.2"
|
|
@@ -8,7 +8,7 @@ show_missing = true
|
|
|
8
8
|
|
|
9
9
|
[tool.poetry]
|
|
10
10
|
name = "opentelemetry-instrumentation-openai"
|
|
11
|
-
version = "0.44.
|
|
11
|
+
version = "0.44.2"
|
|
12
12
|
description = "OpenTelemetry OpenAI instrumentation"
|
|
13
13
|
authors = [
|
|
14
14
|
"Gal Kleinman <gal@traceloop.com>",
|
|
@@ -38,7 +38,7 @@ pytest = "^8.2.2"
|
|
|
38
38
|
pytest-sugar = "1.0.0"
|
|
39
39
|
vcrpy = "^6.0.1"
|
|
40
40
|
pytest-recording = "^0.13.1"
|
|
41
|
-
openai = { extras = ["datalib"], version = "
|
|
41
|
+
openai = { extras = ["datalib"], version = "1.99.7" }
|
|
42
42
|
opentelemetry-sdk = "^1.27.0"
|
|
43
43
|
pytest-asyncio = "^0.23.7"
|
|
44
44
|
requests = "^2.31.0"
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "0.44.1"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|