llama-index-llms-openai 0.3.43__tar.gz → 0.4.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llama_index_llms_openai-0.3.43 → llama_index_llms_openai-0.4.0}/PKG-INFO +1 -1
- {llama_index_llms_openai-0.3.43 → llama_index_llms_openai-0.4.0}/llama_index/llms/openai/base.py +8 -3
- {llama_index_llms_openai-0.3.43 → llama_index_llms_openai-0.4.0}/llama_index/llms/openai/responses.py +16 -8
- {llama_index_llms_openai-0.3.43 → llama_index_llms_openai-0.4.0}/llama_index/llms/openai/utils.py +8 -2
- {llama_index_llms_openai-0.3.43 → llama_index_llms_openai-0.4.0}/pyproject.toml +2 -2
- {llama_index_llms_openai-0.3.43 → llama_index_llms_openai-0.4.0}/.gitignore +0 -0
- {llama_index_llms_openai-0.3.43 → llama_index_llms_openai-0.4.0}/LICENSE +0 -0
- {llama_index_llms_openai-0.3.43 → llama_index_llms_openai-0.4.0}/README.md +0 -0
- {llama_index_llms_openai-0.3.43 → llama_index_llms_openai-0.4.0}/llama_index/llms/openai/__init__.py +0 -0
- {llama_index_llms_openai-0.3.43 → llama_index_llms_openai-0.4.0}/llama_index/llms/openai/py.typed +0 -0
{llama_index_llms_openai-0.3.43 → llama_index_llms_openai-0.4.0}/llama_index/llms/openai/base.py
RENAMED
|
@@ -904,12 +904,15 @@ class OpenAI(FunctionCallingLLM):
|
|
|
904
904
|
chat_history: Optional[List[ChatMessage]] = None,
|
|
905
905
|
verbose: bool = False,
|
|
906
906
|
allow_parallel_tool_calls: bool = False,
|
|
907
|
-
|
|
907
|
+
tool_required: bool = False,
|
|
908
|
+
tool_choice: Optional[Union[str, dict]] = None,
|
|
908
909
|
strict: Optional[bool] = None,
|
|
909
910
|
**kwargs: Any,
|
|
910
911
|
) -> Dict[str, Any]:
|
|
911
912
|
"""Predict and call the tool."""
|
|
912
|
-
tool_specs = [
|
|
913
|
+
tool_specs = [
|
|
914
|
+
tool.metadata.to_openai_tool(skip_length_check=True) for tool in tools
|
|
915
|
+
]
|
|
913
916
|
|
|
914
917
|
# if strict is passed in, use, else default to the class-level attribute, else default to True`
|
|
915
918
|
if strict is not None:
|
|
@@ -934,7 +937,9 @@ class OpenAI(FunctionCallingLLM):
|
|
|
934
937
|
return {
|
|
935
938
|
"messages": messages,
|
|
936
939
|
"tools": tool_specs or None,
|
|
937
|
-
"tool_choice": resolve_tool_choice(tool_choice)
|
|
940
|
+
"tool_choice": resolve_tool_choice(tool_choice, tool_required)
|
|
941
|
+
if tool_specs
|
|
942
|
+
else None,
|
|
938
943
|
**kwargs,
|
|
939
944
|
}
|
|
940
945
|
|
|
@@ -14,7 +14,7 @@ from openai.types.responses import (
|
|
|
14
14
|
ResponseFunctionCallArgumentsDoneEvent,
|
|
15
15
|
ResponseInProgressEvent,
|
|
16
16
|
ResponseOutputItemAddedEvent,
|
|
17
|
-
|
|
17
|
+
ResponseOutputTextAnnotationAddedEvent,
|
|
18
18
|
ResponseTextDeltaEvent,
|
|
19
19
|
ResponseWebSearchCallCompletedEvent,
|
|
20
20
|
ResponseOutputItem,
|
|
@@ -570,7 +570,12 @@ class OpenAIResponses(FunctionCallingLLM):
|
|
|
570
570
|
elif isinstance(event, ResponseImageGenCallPartialImageEvent):
|
|
571
571
|
# Partial image
|
|
572
572
|
if event.partial_image_b64:
|
|
573
|
-
blocks.append(
|
|
573
|
+
blocks.append(
|
|
574
|
+
ImageBlock(
|
|
575
|
+
image=base64.b64decode(event.partial_image_b64),
|
|
576
|
+
detail=f"id_{event.partial_image_index}",
|
|
577
|
+
)
|
|
578
|
+
)
|
|
574
579
|
elif isinstance(event, ResponseFunctionCallArgumentsDeltaEvent):
|
|
575
580
|
# Function call arguments are being streamed
|
|
576
581
|
if current_tool_call is not None:
|
|
@@ -588,7 +593,7 @@ class OpenAIResponses(FunctionCallingLLM):
|
|
|
588
593
|
|
|
589
594
|
# clear the current tool call
|
|
590
595
|
current_tool_call = None
|
|
591
|
-
elif isinstance(event,
|
|
596
|
+
elif isinstance(event, ResponseOutputTextAnnotationAddedEvent):
|
|
592
597
|
# Annotations for the text
|
|
593
598
|
annotations = additional_kwargs.get("annotations", [])
|
|
594
599
|
annotations.append(event.annotation)
|
|
@@ -815,7 +820,8 @@ class OpenAIResponses(FunctionCallingLLM):
|
|
|
815
820
|
user_msg: Optional[Union[str, ChatMessage]] = None,
|
|
816
821
|
chat_history: Optional[List[ChatMessage]] = None,
|
|
817
822
|
allow_parallel_tool_calls: bool = True,
|
|
818
|
-
|
|
823
|
+
tool_required: bool = False,
|
|
824
|
+
tool_choice: Optional[Union[str, dict]] = None,
|
|
819
825
|
verbose: bool = False,
|
|
820
826
|
strict: Optional[bool] = None,
|
|
821
827
|
**kwargs: Any,
|
|
@@ -848,7 +854,9 @@ class OpenAIResponses(FunctionCallingLLM):
|
|
|
848
854
|
return {
|
|
849
855
|
"messages": messages,
|
|
850
856
|
"tools": tool_specs or None,
|
|
851
|
-
"tool_choice": resolve_tool_choice(tool_choice)
|
|
857
|
+
"tool_choice": resolve_tool_choice(tool_choice, tool_required)
|
|
858
|
+
if tool_specs
|
|
859
|
+
else None,
|
|
852
860
|
"parallel_tool_calls": allow_parallel_tool_calls,
|
|
853
861
|
**kwargs,
|
|
854
862
|
}
|
|
@@ -860,9 +868,9 @@ class OpenAIResponses(FunctionCallingLLM):
|
|
|
860
868
|
**kwargs: Any,
|
|
861
869
|
) -> List[ToolSelection]:
|
|
862
870
|
"""Predict and call the tool."""
|
|
863
|
-
tool_calls: List[
|
|
864
|
-
|
|
865
|
-
|
|
871
|
+
tool_calls: List[ResponseFunctionToolCall] = (
|
|
872
|
+
response.message.additional_kwargs.get("tool_calls", [])
|
|
873
|
+
)
|
|
866
874
|
|
|
867
875
|
if len(tool_calls) < 1:
|
|
868
876
|
if error_on_no_tool_call:
|
{llama_index_llms_openai-0.3.43 → llama_index_llms_openai-0.4.0}/llama_index/llms/openai/utils.py
RENAMED
|
@@ -754,13 +754,19 @@ def validate_openai_api_key(api_key: Optional[str] = None) -> None:
|
|
|
754
754
|
raise ValueError(MISSING_API_KEY_ERROR_MESSAGE)
|
|
755
755
|
|
|
756
756
|
|
|
757
|
-
def resolve_tool_choice(
|
|
757
|
+
def resolve_tool_choice(
|
|
758
|
+
tool_choice: Optional[Union[str, dict]], tool_required: bool = False
|
|
759
|
+
) -> Union[str, dict]:
|
|
758
760
|
"""
|
|
759
761
|
Resolve tool choice.
|
|
760
762
|
|
|
761
763
|
If tool_choice is a function name string, return the appropriate dict.
|
|
762
764
|
"""
|
|
763
|
-
if
|
|
765
|
+
if tool_choice is None:
|
|
766
|
+
tool_choice = "required" if tool_required else "auto"
|
|
767
|
+
if isinstance(tool_choice, dict):
|
|
768
|
+
return tool_choice
|
|
769
|
+
if tool_choice not in ["none", "auto", "required"]:
|
|
764
770
|
return {"type": "function", "function": {"name": tool_choice}}
|
|
765
771
|
|
|
766
772
|
return tool_choice
|
|
@@ -12,7 +12,7 @@ dev = [
|
|
|
12
12
|
"pytest==7.2.1",
|
|
13
13
|
"pytest-asyncio==0.21.0",
|
|
14
14
|
"pytest-mock==3.11.1",
|
|
15
|
-
"ruff==0.
|
|
15
|
+
"ruff==0.11.11",
|
|
16
16
|
"types-Deprecated>=0.1.0",
|
|
17
17
|
"types-PyYAML>=6.0.12.12,<7",
|
|
18
18
|
"types-protobuf>=4.24.0.4,<5",
|
|
@@ -27,7 +27,7 @@ dev = [
|
|
|
27
27
|
|
|
28
28
|
[project]
|
|
29
29
|
name = "llama-index-llms-openai"
|
|
30
|
-
version = "0.
|
|
30
|
+
version = "0.4.0"
|
|
31
31
|
description = "llama-index llms openai integration"
|
|
32
32
|
authors = [{name = "llama-index"}]
|
|
33
33
|
requires-python = ">=3.9,<4.0"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llama_index_llms_openai-0.3.43 → llama_index_llms_openai-0.4.0}/llama_index/llms/openai/__init__.py
RENAMED
|
File without changes
|
{llama_index_llms_openai-0.3.43 → llama_index_llms_openai-0.4.0}/llama_index/llms/openai/py.typed
RENAMED
|
File without changes
|