llama-index-llms-openai 0.3.26__tar.gz → 0.3.28__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-index-llms-openai
3
- Version: 0.3.26
3
+ Version: 0.3.28
4
4
  Summary: llama-index llms openai integration
5
5
  License: MIT
6
6
  Author: llama-index
@@ -2,6 +2,7 @@ import functools
2
2
  from typing import (
3
3
  TYPE_CHECKING,
4
4
  Any,
5
+ AsyncGenerator,
5
6
  Awaitable,
6
7
  Callable,
7
8
  Dict,
@@ -11,6 +12,7 @@ from typing import (
11
12
  Optional,
12
13
  Protocol,
13
14
  Sequence,
15
+ Type,
14
16
  Union,
15
17
  cast,
16
18
  get_args,
@@ -43,7 +45,6 @@ from llama_index.core.base.llms.types import (
43
45
  MessageRole,
44
46
  )
45
47
  from llama_index.core.bridge.pydantic import (
46
- BaseModel,
47
48
  Field,
48
49
  PrivateAttr,
49
50
  )
@@ -56,9 +57,11 @@ from llama_index.core.llms.callbacks import (
56
57
  llm_completion_callback,
57
58
  )
58
59
  from llama_index.core.llms.function_calling import FunctionCallingLLM
59
- from llama_index.core.llms.llm import ToolSelection
60
+ from llama_index.core.llms.llm import ToolSelection, Model
60
61
  from llama_index.core.llms.utils import parse_partial_json
61
- from llama_index.core.types import BaseOutputParser, Model, PydanticProgramMode
62
+ from llama_index.core.prompts import PromptTemplate
63
+ from llama_index.core.program.utils import FlexibleModel
64
+ from llama_index.core.types import BaseOutputParser, PydanticProgramMode
62
65
  from llama_index.llms.openai.utils import (
63
66
  O1_MODELS,
64
67
  OpenAIToolCall,
@@ -991,62 +994,80 @@ class OpenAI(FunctionCallingLLM):
991
994
 
992
995
  @dispatcher.span
993
996
  def structured_predict(
994
- self, *args: Any, llm_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Any
995
- ) -> BaseModel:
997
+ self,
998
+ output_cls: Type[Model],
999
+ prompt: PromptTemplate,
1000
+ llm_kwargs: Optional[Dict[str, Any]] = None,
1001
+ **prompt_args: Any,
1002
+ ) -> Model:
996
1003
  """Structured predict."""
997
1004
  llm_kwargs = llm_kwargs or {}
998
- all_kwargs = {**llm_kwargs, **kwargs}
999
1005
 
1000
1006
  llm_kwargs["tool_choice"] = (
1001
- "required" if "tool_choice" not in all_kwargs else all_kwargs["tool_choice"]
1007
+ "required" if "tool_choice" not in llm_kwargs else llm_kwargs["tool_choice"]
1002
1008
  )
1003
1009
  # by default structured prediction uses function calling to extract structured outputs
1004
1010
  # here we force tool_choice to be required
1005
- return super().structured_predict(*args, llm_kwargs=llm_kwargs, **kwargs)
1011
+ return super().structured_predict(
1012
+ output_cls, prompt, llm_kwargs=llm_kwargs, **prompt_args
1013
+ )
1006
1014
 
1007
1015
  @dispatcher.span
1008
1016
  async def astructured_predict(
1009
- self, *args: Any, llm_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Any
1010
- ) -> BaseModel:
1017
+ self,
1018
+ output_cls: Type[Model],
1019
+ prompt: PromptTemplate,
1020
+ llm_kwargs: Optional[Dict[str, Any]] = None,
1021
+ **prompt_args: Any,
1022
+ ) -> Model:
1011
1023
  """Structured predict."""
1012
1024
  llm_kwargs = llm_kwargs or {}
1013
- all_kwargs = {**llm_kwargs, **kwargs}
1014
1025
 
1015
1026
  llm_kwargs["tool_choice"] = (
1016
- "required" if "tool_choice" not in all_kwargs else all_kwargs["tool_choice"]
1027
+ "required" if "tool_choice" not in llm_kwargs else llm_kwargs["tool_choice"]
1017
1028
  )
1018
1029
  # by default structured prediction uses function calling to extract structured outputs
1019
1030
  # here we force tool_choice to be required
1020
- return await super().astructured_predict(*args, llm_kwargs=llm_kwargs, **kwargs)
1031
+ return await super().astructured_predict(
1032
+ output_cls, prompt, llm_kwargs=llm_kwargs, **prompt_args
1033
+ )
1021
1034
 
1022
1035
  @dispatcher.span
1023
1036
  def stream_structured_predict(
1024
- self, *args: Any, llm_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Any
1025
- ) -> Generator[Union[Model, List[Model]], None, None]:
1037
+ self,
1038
+ output_cls: Type[Model],
1039
+ prompt: PromptTemplate,
1040
+ llm_kwargs: Optional[Dict[str, Any]] = None,
1041
+ **prompt_args: Any,
1042
+ ) -> Generator[Union[Model, FlexibleModel], None, None]:
1026
1043
  """Stream structured predict."""
1027
1044
  llm_kwargs = llm_kwargs or {}
1028
- all_kwargs = {**llm_kwargs, **kwargs}
1029
1045
 
1030
1046
  llm_kwargs["tool_choice"] = (
1031
- "required" if "tool_choice" not in all_kwargs else all_kwargs["tool_choice"]
1047
+ "required" if "tool_choice" not in llm_kwargs else llm_kwargs["tool_choice"]
1032
1048
  )
1033
1049
  # by default structured prediction uses function calling to extract structured outputs
1034
1050
  # here we force tool_choice to be required
1035
- return super().stream_structured_predict(*args, llm_kwargs=llm_kwargs, **kwargs)
1051
+ return super().stream_structured_predict(
1052
+ output_cls, prompt, llm_kwargs=llm_kwargs, **prompt_args
1053
+ )
1036
1054
 
1037
1055
  @dispatcher.span
1038
1056
  async def astream_structured_predict(
1039
- self, *args: Any, llm_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Any
1040
- ) -> Generator[Union[Model, List[Model]], None, None]:
1057
+ self,
1058
+ output_cls: Type[Model],
1059
+ prompt: PromptTemplate,
1060
+ llm_kwargs: Optional[Dict[str, Any]] = None,
1061
+ **prompt_args: Any,
1062
+ ) -> AsyncGenerator[Union[Model, FlexibleModel], None]:
1041
1063
  """Stream structured predict."""
1042
1064
  llm_kwargs = llm_kwargs or {}
1043
- all_kwargs = {**llm_kwargs, **kwargs}
1044
1065
 
1045
1066
  llm_kwargs["tool_choice"] = (
1046
- "required" if "tool_choice" not in all_kwargs else all_kwargs["tool_choice"]
1067
+ "required" if "tool_choice" not in llm_kwargs else llm_kwargs["tool_choice"]
1047
1068
  )
1048
1069
  # by default structured prediction uses function calling to extract structured outputs
1049
1070
  # here we force tool_choice to be required
1050
1071
  return await super().astream_structured_predict(
1051
- *args, llm_kwargs=llm_kwargs, **kwargs
1072
+ output_cls, prompt, llm_kwargs=llm_kwargs, **prompt_args
1052
1073
  )
@@ -296,7 +296,13 @@ def to_openai_message_dict(
296
296
  elif isinstance(block, ImageBlock):
297
297
  if block.url:
298
298
  content.append(
299
- {"type": "image_url", "image_url": {"url": str(block.url)}}
299
+ {
300
+ "type": "image_url",
301
+ "image_url": {
302
+ "url": str(block.url),
303
+ "detail": block.detail or "auto",
304
+ },
305
+ }
300
306
  )
301
307
  else:
302
308
  img_bytes = block.resolve_image(as_base64=True).read()
@@ -306,7 +312,7 @@ def to_openai_message_dict(
306
312
  "type": "image_url",
307
313
  "image_url": {
308
314
  "url": f"data:{block.image_mimetype};base64,{img_str}",
309
- "detail": block.detail or "low",
315
+ "detail": block.detail or "auto",
310
316
  },
311
317
  }
312
318
  )
@@ -29,7 +29,7 @@ exclude = ["**/BUILD"]
29
29
  license = "MIT"
30
30
  name = "llama-index-llms-openai"
31
31
  readme = "README.md"
32
- version = "0.3.26"
32
+ version = "0.3.28"
33
33
 
34
34
  [tool.poetry.dependencies]
35
35
  python = ">=3.9,<4.0"