llama-index-llms-openai 0.1.26__tar.gz → 0.1.28__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-index-llms-openai
3
- Version: 0.1.26
3
+ Version: 0.1.28
4
4
  Summary: llama-index llms openai integration
5
5
  License: MIT
6
6
  Author: llama-index
@@ -11,7 +11,8 @@ Classifier: Programming Language :: Python :: 3.9
11
11
  Classifier: Programming Language :: Python :: 3.10
12
12
  Classifier: Programming Language :: Python :: 3.11
13
13
  Classifier: Programming Language :: Python :: 3.12
14
- Requires-Dist: llama-index-core (>=0.10.24,<0.11.0)
14
+ Requires-Dist: llama-index-core (>=0.10.57,<0.11.0)
15
+ Requires-Dist: openai (>=1.40.0,<2.0.0)
15
16
  Description-Content-Type: text/markdown
16
17
 
17
18
  # LlamaIndex Llms Integration: Openai
@@ -1,5 +1,4 @@
1
1
  import functools
2
- import json
3
2
  from typing import (
4
3
  TYPE_CHECKING,
5
4
  Any,
@@ -71,9 +70,9 @@ from openai.types.chat.chat_completion_chunk import (
71
70
  ChoiceDelta,
72
71
  ChoiceDeltaToolCall,
73
72
  )
73
+ from llama_index.core.llms.utils import parse_partial_json
74
74
 
75
75
  if TYPE_CHECKING:
76
- from llama_index.core.chat_engine.types import AgentChatResponse
77
76
  from llama_index.core.tools.types import BaseTool
78
77
 
79
78
  DEFAULT_OPENAI_MODEL = "gpt-3.5-turbo"
@@ -202,6 +201,10 @@ class OpenAI(FunctionCallingLLM):
202
201
  api_key: str = Field(default=None, description="The OpenAI API key.")
203
202
  api_base: str = Field(description="The base URL for OpenAI API.")
204
203
  api_version: str = Field(description="The API version for OpenAI API.")
204
+ strict: bool = Field(
205
+ default=False,
206
+ description="Whether to use strict mode for invoking tools/using schemas.",
207
+ )
205
208
 
206
209
  _client: Optional[SyncOpenAI] = PrivateAttr()
207
210
  _aclient: Optional[AsyncOpenAI] = PrivateAttr()
@@ -230,6 +233,7 @@ class OpenAI(FunctionCallingLLM):
230
233
  completion_to_prompt: Optional[Callable[[str], str]] = None,
231
234
  pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
232
235
  output_parser: Optional[BaseOutputParser] = None,
236
+ strict: bool = False,
233
237
  **kwargs: Any,
234
238
  ) -> None:
235
239
  additional_kwargs = additional_kwargs or {}
@@ -258,6 +262,7 @@ class OpenAI(FunctionCallingLLM):
258
262
  completion_to_prompt=completion_to_prompt,
259
263
  pydantic_program_mode=pydantic_program_mode,
260
264
  output_parser=output_parser,
265
+ strict=strict,
261
266
  **kwargs,
262
267
  )
263
268
 
@@ -825,7 +830,7 @@ class OpenAI(FunctionCallingLLM):
825
830
 
826
831
  return gen()
827
832
 
828
- def chat_with_tools(
833
+ def _prepare_chat_with_tools(
829
834
  self,
830
835
  tools: List["BaseTool"],
831
836
  user_msg: Optional[Union[str, ChatMessage]] = None,
@@ -833,14 +838,29 @@ class OpenAI(FunctionCallingLLM):
833
838
  verbose: bool = False,
834
839
  allow_parallel_tool_calls: bool = False,
835
840
  tool_choice: Union[str, dict] = "auto",
841
+ strict: Optional[bool] = None,
836
842
  **kwargs: Any,
837
- ) -> ChatResponse:
843
+ ) -> Dict[str, Any]:
838
844
  """Predict and call the tool."""
839
845
  from llama_index.agent.openai.utils import resolve_tool_choice
840
846
 
841
847
  # misralai uses the same openai tool format
842
848
  tool_specs = [tool.metadata.to_openai_tool() for tool in tools]
843
849
 
850
+ # if strict is passed in, use, else default to the class-level attribute, else default to True`
851
+ if strict is not None:
852
+ strict = strict
853
+ else:
854
+ strict = self.strict
855
+
856
+ if self.metadata.is_function_calling_model:
857
+ for tool_spec in tool_specs:
858
+ if tool_spec["type"] == "function":
859
+ tool_spec["function"]["strict"] = strict
860
+ tool_spec["function"]["parameters"][
861
+ "additionalProperties"
862
+ ] = False # in current openai 1.40.0 it is always false.
863
+
844
864
  if isinstance(user_msg, str):
845
865
  user_msg = ChatMessage(role=MessageRole.USER, content=user_msg)
846
866
 
@@ -848,52 +868,28 @@ class OpenAI(FunctionCallingLLM):
848
868
  if user_msg:
849
869
  messages.append(user_msg)
850
870
 
851
- response = self.chat(
852
- messages,
853
- tools=tool_specs or None,
854
- tool_choice=resolve_tool_choice(tool_choice) if tool_specs else None,
871
+ return {
872
+ "messages": messages,
873
+ "tools": tool_specs or None,
874
+ "tool_choice": resolve_tool_choice(tool_choice) if tool_specs else None,
855
875
  **kwargs,
856
- )
857
- if not allow_parallel_tool_calls:
858
- force_single_tool_call(response)
859
- return response
876
+ }
860
877
 
861
- async def achat_with_tools(
878
+ def _validate_chat_with_tools_response(
862
879
  self,
880
+ response: ChatResponse,
863
881
  tools: List["BaseTool"],
864
- user_msg: Optional[Union[str, ChatMessage]] = None,
865
- chat_history: Optional[List[ChatMessage]] = None,
866
- verbose: bool = False,
867
882
  allow_parallel_tool_calls: bool = False,
868
- tool_choice: Union[str, dict] = "auto",
869
883
  **kwargs: Any,
870
884
  ) -> ChatResponse:
871
- """Predict and call the tool."""
872
- from llama_index.agent.openai.utils import resolve_tool_choice
873
-
874
- # misralai uses the same openai tool format
875
- tool_specs = [tool.metadata.to_openai_tool() for tool in tools]
876
-
877
- if isinstance(user_msg, str):
878
- user_msg = ChatMessage(role=MessageRole.USER, content=user_msg)
879
-
880
- messages = chat_history or []
881
- if user_msg:
882
- messages.append(user_msg)
883
-
884
- response = await self.achat(
885
- messages,
886
- tools=tool_specs or None,
887
- tool_choice=resolve_tool_choice(tool_choice) if tool_specs else None,
888
- **kwargs,
889
- )
885
+ """Validate the response from chat_with_tools."""
890
886
  if not allow_parallel_tool_calls:
891
887
  force_single_tool_call(response)
892
888
  return response
893
889
 
894
890
  def get_tool_calls_from_response(
895
891
  self,
896
- response: "AgentChatResponse",
892
+ response: "ChatResponse",
897
893
  error_on_no_tool_call: bool = True,
898
894
  **kwargs: Any,
899
895
  ) -> List[ToolSelection]:
@@ -914,7 +910,12 @@ class OpenAI(FunctionCallingLLM):
914
910
  raise ValueError("Invalid tool_call object")
915
911
  if tool_call.type != "function":
916
912
  raise ValueError("Invalid tool type. Unsupported by OpenAI")
917
- argument_dict = json.loads(tool_call.function.arguments)
913
+
914
+ # this should handle both complete and partial jsons
915
+ try:
916
+ argument_dict = parse_partial_json(tool_call.function.arguments)
917
+ except ValueError:
918
+ argument_dict = {}
918
919
 
919
920
  tool_selections.append(
920
921
  ToolSelection(
@@ -29,11 +29,12 @@ exclude = ["**/BUILD"]
29
29
  license = "MIT"
30
30
  name = "llama-index-llms-openai"
31
31
  readme = "README.md"
32
- version = "0.1.26"
32
+ version = "0.1.28"
33
33
 
34
34
  [tool.poetry.dependencies]
35
35
  python = ">=3.8.1,<4.0"
36
- llama-index-core = "^0.10.24"
36
+ llama-index-core = "^0.10.57"
37
+ openai = "^1.40.0"
37
38
 
38
39
  [tool.poetry.group.dev.dependencies]
39
40
  ipython = "8.10.0"