llama-index-llms-openai 0.3.26__py3-none-any.whl → 0.3.28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_index/llms/openai/base.py +44 -23
- llama_index/llms/openai/utils.py +8 -2
- {llama_index_llms_openai-0.3.26.dist-info → llama_index_llms_openai-0.3.28.dist-info}/METADATA +1 -1
- llama_index_llms_openai-0.3.28.dist-info/RECORD +8 -0
- llama_index_llms_openai-0.3.26.dist-info/RECORD +0 -8
- {llama_index_llms_openai-0.3.26.dist-info → llama_index_llms_openai-0.3.28.dist-info}/LICENSE +0 -0
- {llama_index_llms_openai-0.3.26.dist-info → llama_index_llms_openai-0.3.28.dist-info}/WHEEL +0 -0
llama_index/llms/openai/base.py
CHANGED
|
@@ -2,6 +2,7 @@ import functools
|
|
|
2
2
|
from typing import (
|
|
3
3
|
TYPE_CHECKING,
|
|
4
4
|
Any,
|
|
5
|
+
AsyncGenerator,
|
|
5
6
|
Awaitable,
|
|
6
7
|
Callable,
|
|
7
8
|
Dict,
|
|
@@ -11,6 +12,7 @@ from typing import (
|
|
|
11
12
|
Optional,
|
|
12
13
|
Protocol,
|
|
13
14
|
Sequence,
|
|
15
|
+
Type,
|
|
14
16
|
Union,
|
|
15
17
|
cast,
|
|
16
18
|
get_args,
|
|
@@ -43,7 +45,6 @@ from llama_index.core.base.llms.types import (
|
|
|
43
45
|
MessageRole,
|
|
44
46
|
)
|
|
45
47
|
from llama_index.core.bridge.pydantic import (
|
|
46
|
-
BaseModel,
|
|
47
48
|
Field,
|
|
48
49
|
PrivateAttr,
|
|
49
50
|
)
|
|
@@ -56,9 +57,11 @@ from llama_index.core.llms.callbacks import (
|
|
|
56
57
|
llm_completion_callback,
|
|
57
58
|
)
|
|
58
59
|
from llama_index.core.llms.function_calling import FunctionCallingLLM
|
|
59
|
-
from llama_index.core.llms.llm import ToolSelection
|
|
60
|
+
from llama_index.core.llms.llm import ToolSelection, Model
|
|
60
61
|
from llama_index.core.llms.utils import parse_partial_json
|
|
61
|
-
from llama_index.core.
|
|
62
|
+
from llama_index.core.prompts import PromptTemplate
|
|
63
|
+
from llama_index.core.program.utils import FlexibleModel
|
|
64
|
+
from llama_index.core.types import BaseOutputParser, PydanticProgramMode
|
|
62
65
|
from llama_index.llms.openai.utils import (
|
|
63
66
|
O1_MODELS,
|
|
64
67
|
OpenAIToolCall,
|
|
@@ -991,62 +994,80 @@ class OpenAI(FunctionCallingLLM):
|
|
|
991
994
|
|
|
992
995
|
@dispatcher.span
|
|
993
996
|
def structured_predict(
|
|
994
|
-
self,
|
|
995
|
-
|
|
997
|
+
self,
|
|
998
|
+
output_cls: Type[Model],
|
|
999
|
+
prompt: PromptTemplate,
|
|
1000
|
+
llm_kwargs: Optional[Dict[str, Any]] = None,
|
|
1001
|
+
**prompt_args: Any,
|
|
1002
|
+
) -> Model:
|
|
996
1003
|
"""Structured predict."""
|
|
997
1004
|
llm_kwargs = llm_kwargs or {}
|
|
998
|
-
all_kwargs = {**llm_kwargs, **kwargs}
|
|
999
1005
|
|
|
1000
1006
|
llm_kwargs["tool_choice"] = (
|
|
1001
|
-
"required" if "tool_choice" not in
|
|
1007
|
+
"required" if "tool_choice" not in llm_kwargs else llm_kwargs["tool_choice"]
|
|
1002
1008
|
)
|
|
1003
1009
|
# by default structured prediction uses function calling to extract structured outputs
|
|
1004
1010
|
# here we force tool_choice to be required
|
|
1005
|
-
return super().structured_predict(
|
|
1011
|
+
return super().structured_predict(
|
|
1012
|
+
output_cls, prompt, llm_kwargs=llm_kwargs, **prompt_args
|
|
1013
|
+
)
|
|
1006
1014
|
|
|
1007
1015
|
@dispatcher.span
|
|
1008
1016
|
async def astructured_predict(
|
|
1009
|
-
self,
|
|
1010
|
-
|
|
1017
|
+
self,
|
|
1018
|
+
output_cls: Type[Model],
|
|
1019
|
+
prompt: PromptTemplate,
|
|
1020
|
+
llm_kwargs: Optional[Dict[str, Any]] = None,
|
|
1021
|
+
**prompt_args: Any,
|
|
1022
|
+
) -> Model:
|
|
1011
1023
|
"""Structured predict."""
|
|
1012
1024
|
llm_kwargs = llm_kwargs or {}
|
|
1013
|
-
all_kwargs = {**llm_kwargs, **kwargs}
|
|
1014
1025
|
|
|
1015
1026
|
llm_kwargs["tool_choice"] = (
|
|
1016
|
-
"required" if "tool_choice" not in
|
|
1027
|
+
"required" if "tool_choice" not in llm_kwargs else llm_kwargs["tool_choice"]
|
|
1017
1028
|
)
|
|
1018
1029
|
# by default structured prediction uses function calling to extract structured outputs
|
|
1019
1030
|
# here we force tool_choice to be required
|
|
1020
|
-
return await super().astructured_predict(
|
|
1031
|
+
return await super().astructured_predict(
|
|
1032
|
+
output_cls, prompt, llm_kwargs=llm_kwargs, **prompt_args
|
|
1033
|
+
)
|
|
1021
1034
|
|
|
1022
1035
|
@dispatcher.span
|
|
1023
1036
|
def stream_structured_predict(
|
|
1024
|
-
self,
|
|
1025
|
-
|
|
1037
|
+
self,
|
|
1038
|
+
output_cls: Type[Model],
|
|
1039
|
+
prompt: PromptTemplate,
|
|
1040
|
+
llm_kwargs: Optional[Dict[str, Any]] = None,
|
|
1041
|
+
**prompt_args: Any,
|
|
1042
|
+
) -> Generator[Union[Model, FlexibleModel], None, None]:
|
|
1026
1043
|
"""Stream structured predict."""
|
|
1027
1044
|
llm_kwargs = llm_kwargs or {}
|
|
1028
|
-
all_kwargs = {**llm_kwargs, **kwargs}
|
|
1029
1045
|
|
|
1030
1046
|
llm_kwargs["tool_choice"] = (
|
|
1031
|
-
"required" if "tool_choice" not in
|
|
1047
|
+
"required" if "tool_choice" not in llm_kwargs else llm_kwargs["tool_choice"]
|
|
1032
1048
|
)
|
|
1033
1049
|
# by default structured prediction uses function calling to extract structured outputs
|
|
1034
1050
|
# here we force tool_choice to be required
|
|
1035
|
-
return super().stream_structured_predict(
|
|
1051
|
+
return super().stream_structured_predict(
|
|
1052
|
+
output_cls, prompt, llm_kwargs=llm_kwargs, **prompt_args
|
|
1053
|
+
)
|
|
1036
1054
|
|
|
1037
1055
|
@dispatcher.span
|
|
1038
1056
|
async def astream_structured_predict(
|
|
1039
|
-
self,
|
|
1040
|
-
|
|
1057
|
+
self,
|
|
1058
|
+
output_cls: Type[Model],
|
|
1059
|
+
prompt: PromptTemplate,
|
|
1060
|
+
llm_kwargs: Optional[Dict[str, Any]] = None,
|
|
1061
|
+
**prompt_args: Any,
|
|
1062
|
+
) -> AsyncGenerator[Union[Model, FlexibleModel], None]:
|
|
1041
1063
|
"""Stream structured predict."""
|
|
1042
1064
|
llm_kwargs = llm_kwargs or {}
|
|
1043
|
-
all_kwargs = {**llm_kwargs, **kwargs}
|
|
1044
1065
|
|
|
1045
1066
|
llm_kwargs["tool_choice"] = (
|
|
1046
|
-
"required" if "tool_choice" not in
|
|
1067
|
+
"required" if "tool_choice" not in llm_kwargs else llm_kwargs["tool_choice"]
|
|
1047
1068
|
)
|
|
1048
1069
|
# by default structured prediction uses function calling to extract structured outputs
|
|
1049
1070
|
# here we force tool_choice to be required
|
|
1050
1071
|
return await super().astream_structured_predict(
|
|
1051
|
-
|
|
1072
|
+
output_cls, prompt, llm_kwargs=llm_kwargs, **prompt_args
|
|
1052
1073
|
)
|
llama_index/llms/openai/utils.py
CHANGED
|
@@ -296,7 +296,13 @@ def to_openai_message_dict(
|
|
|
296
296
|
elif isinstance(block, ImageBlock):
|
|
297
297
|
if block.url:
|
|
298
298
|
content.append(
|
|
299
|
-
{
|
|
299
|
+
{
|
|
300
|
+
"type": "image_url",
|
|
301
|
+
"image_url": {
|
|
302
|
+
"url": str(block.url),
|
|
303
|
+
"detail": block.detail or "auto",
|
|
304
|
+
},
|
|
305
|
+
}
|
|
300
306
|
)
|
|
301
307
|
else:
|
|
302
308
|
img_bytes = block.resolve_image(as_base64=True).read()
|
|
@@ -306,7 +312,7 @@ def to_openai_message_dict(
|
|
|
306
312
|
"type": "image_url",
|
|
307
313
|
"image_url": {
|
|
308
314
|
"url": f"data:{block.image_mimetype};base64,{img_str}",
|
|
309
|
-
"detail": block.detail or "
|
|
315
|
+
"detail": block.detail or "auto",
|
|
310
316
|
},
|
|
311
317
|
}
|
|
312
318
|
)
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
llama_index/llms/openai/__init__.py,sha256=vm3cIBSGkBFlE77GyfyN0EhpJcnJZN95QMhPN53EkbE,148
|
|
2
|
+
llama_index/llms/openai/base.py,sha256=RjkISrh-RvbrQWOfdNdH4nimDQN0byUFm_n6r703jdM,38609
|
|
3
|
+
llama_index/llms/openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
+
llama_index/llms/openai/utils.py,sha256=n7GEv864j34idRUR2ouu0McSBqBTVX2Tko9vf1YOl-k,21624
|
|
5
|
+
llama_index_llms_openai-0.3.28.dist-info/LICENSE,sha256=JPQLUZD9rKvCTdu192Nk0V5PAwklIg6jANii3UmTyMs,1065
|
|
6
|
+
llama_index_llms_openai-0.3.28.dist-info/METADATA,sha256=Bmx7FvGOcpdMpwP5gx-Wx2Dlbkp_42N7QC-zVodKXuw,3322
|
|
7
|
+
llama_index_llms_openai-0.3.28.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
8
|
+
llama_index_llms_openai-0.3.28.dist-info/RECORD,,
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
llama_index/llms/openai/__init__.py,sha256=vm3cIBSGkBFlE77GyfyN0EhpJcnJZN95QMhPN53EkbE,148
|
|
2
|
-
llama_index/llms/openai/base.py,sha256=zQAB6ch5acoKnMmKFk6Ro06-EM3ICnISZ2bhgAlZlCg,38236
|
|
3
|
-
llama_index/llms/openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
-
llama_index/llms/openai/utils.py,sha256=BH9NqETgEBZp8L_LEi0CyMB8lGeXI5bKmwU9WvrZoAQ,21435
|
|
5
|
-
llama_index_llms_openai-0.3.26.dist-info/LICENSE,sha256=JPQLUZD9rKvCTdu192Nk0V5PAwklIg6jANii3UmTyMs,1065
|
|
6
|
-
llama_index_llms_openai-0.3.26.dist-info/METADATA,sha256=2fhAC1UAH7ARCk0msjYUjMNPZr4fxI-l5HB2c4ep-ZM,3322
|
|
7
|
-
llama_index_llms_openai-0.3.26.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
8
|
-
llama_index_llms_openai-0.3.26.dist-info/RECORD,,
|
{llama_index_llms_openai-0.3.26.dist-info → llama_index_llms_openai-0.3.28.dist-info}/LICENSE
RENAMED
|
File without changes
|
|
File without changes
|