llama-index-llms-openai 0.2.2__py3-none-any.whl → 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_index/llms/openai/base.py +61 -1
- {llama_index_llms_openai-0.2.2.dist-info → llama_index_llms_openai-0.2.3.dist-info}/METADATA +3 -4
- llama_index_llms_openai-0.2.3.dist-info/RECORD +6 -0
- {llama_index_llms_openai-0.2.2.dist-info → llama_index_llms_openai-0.2.3.dist-info}/WHEEL +1 -1
- llama_index_llms_openai-0.2.2.dist-info/RECORD +0 -6
llama_index/llms/openai/base.py
CHANGED
|
@@ -2,6 +2,7 @@ import functools
|
|
|
2
2
|
from typing import (
|
|
3
3
|
TYPE_CHECKING,
|
|
4
4
|
Any,
|
|
5
|
+
Generator,
|
|
5
6
|
Awaitable,
|
|
6
7
|
Callable,
|
|
7
8
|
Dict,
|
|
@@ -49,7 +50,7 @@ from llama_index.core.llms.callbacks import (
|
|
|
49
50
|
)
|
|
50
51
|
from llama_index.core.llms.function_calling import FunctionCallingLLM
|
|
51
52
|
from llama_index.core.llms.llm import ToolSelection
|
|
52
|
-
from llama_index.core.types import BaseOutputParser, PydanticProgramMode
|
|
53
|
+
from llama_index.core.types import BaseOutputParser, PydanticProgramMode, Model
|
|
53
54
|
from llama_index.llms.openai.utils import (
|
|
54
55
|
OpenAIToolCall,
|
|
55
56
|
create_retry_decorator,
|
|
@@ -62,6 +63,9 @@ from llama_index.llms.openai.utils import (
|
|
|
62
63
|
resolve_openai_credentials,
|
|
63
64
|
to_openai_message_dicts,
|
|
64
65
|
)
|
|
66
|
+
from llama_index.core.bridge.pydantic import (
|
|
67
|
+
BaseModel,
|
|
68
|
+
)
|
|
65
69
|
|
|
66
70
|
from openai import AsyncOpenAI, AzureOpenAI
|
|
67
71
|
from openai import OpenAI as SyncOpenAI
|
|
@@ -72,6 +76,10 @@ from openai.types.chat.chat_completion_chunk import (
|
|
|
72
76
|
)
|
|
73
77
|
from llama_index.core.llms.utils import parse_partial_json
|
|
74
78
|
|
|
79
|
+
import llama_index.core.instrumentation as instrument
|
|
80
|
+
|
|
81
|
+
dispatcher = instrument.get_dispatcher(__name__)
|
|
82
|
+
|
|
75
83
|
if TYPE_CHECKING:
|
|
76
84
|
from llama_index.core.tools.types import BaseTool
|
|
77
85
|
|
|
@@ -940,3 +948,55 @@ class OpenAI(FunctionCallingLLM):
|
|
|
940
948
|
)
|
|
941
949
|
|
|
942
950
|
return tool_selections
|
|
951
|
+
|
|
952
|
+
@dispatcher.span
|
|
953
|
+
def structured_predict(
|
|
954
|
+
self, *args: Any, llm_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Any
|
|
955
|
+
) -> BaseModel:
|
|
956
|
+
"""Structured predict."""
|
|
957
|
+
llm_kwargs = llm_kwargs or {}
|
|
958
|
+
llm_kwargs["tool_choice"] = (
|
|
959
|
+
"required" if "tool_choice" not in llm_kwargs else llm_kwargs["tool_choice"]
|
|
960
|
+
)
|
|
961
|
+
# by default structured prediction uses function calling to extract structured outputs
|
|
962
|
+
# here we force tool_choice to be required
|
|
963
|
+
return super().structured_predict(*args, llm_kwargs=llm_kwargs, **kwargs)
|
|
964
|
+
|
|
965
|
+
@dispatcher.span
|
|
966
|
+
async def astructured_predict(
|
|
967
|
+
self, *args: Any, llm_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Any
|
|
968
|
+
) -> BaseModel:
|
|
969
|
+
"""Structured predict."""
|
|
970
|
+
llm_kwargs = llm_kwargs or {}
|
|
971
|
+
llm_kwargs["tool_choice"] = (
|
|
972
|
+
"required" if "tool_choice" not in llm_kwargs else llm_kwargs["tool_choice"]
|
|
973
|
+
)
|
|
974
|
+
# by default structured prediction uses function calling to extract structured outputs
|
|
975
|
+
# here we force tool_choice to be required
|
|
976
|
+
return await super().astructured_predict(*args, llm_kwargs=llm_kwargs, **kwargs)
|
|
977
|
+
|
|
978
|
+
@dispatcher.span
|
|
979
|
+
def stream_structured_predict(
|
|
980
|
+
self, *args: Any, llm_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Any
|
|
981
|
+
) -> Generator[Union[Model, List[Model]], None, None]:
|
|
982
|
+
"""Stream structured predict."""
|
|
983
|
+
llm_kwargs = llm_kwargs or {}
|
|
984
|
+
llm_kwargs["tool_choice"] = (
|
|
985
|
+
"required" if "tool_choice" not in llm_kwargs else llm_kwargs["tool_choice"]
|
|
986
|
+
)
|
|
987
|
+
# by default structured prediction uses function calling to extract structured outputs
|
|
988
|
+
# here we force tool_choice to be required
|
|
989
|
+
return super().stream_structured_predict(*args, llm_kwargs=llm_kwargs, **kwargs)
|
|
990
|
+
|
|
991
|
+
@dispatcher.span
|
|
992
|
+
def stream_structured_predict(
|
|
993
|
+
self, *args: Any, llm_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Any
|
|
994
|
+
) -> Generator[Union[Model, List[Model]], None, None]:
|
|
995
|
+
"""Stream structured predict."""
|
|
996
|
+
llm_kwargs = llm_kwargs or {}
|
|
997
|
+
llm_kwargs["tool_choice"] = (
|
|
998
|
+
"required" if "tool_choice" not in llm_kwargs else llm_kwargs["tool_choice"]
|
|
999
|
+
)
|
|
1000
|
+
# by default structured prediction uses function calling to extract structured outputs
|
|
1001
|
+
# here we force tool_choice to be required
|
|
1002
|
+
return super().stream_structured_predict(*args, llm_kwargs=llm_kwargs, **kwargs)
|
{llama_index_llms_openai-0.2.2.dist-info → llama_index_llms_openai-0.2.3.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: llama-index-llms-openai
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.3
|
|
4
4
|
Summary: llama-index llms openai integration
|
|
5
5
|
License: MIT
|
|
6
6
|
Author: llama-index
|
|
@@ -10,9 +10,8 @@ Classifier: Programming Language :: Python :: 3
|
|
|
10
10
|
Classifier: Programming Language :: Python :: 3.9
|
|
11
11
|
Classifier: Programming Language :: Python :: 3.10
|
|
12
12
|
Classifier: Programming Language :: Python :: 3.11
|
|
13
|
-
|
|
14
|
-
Requires-Dist: llama-index-
|
|
15
|
-
Requires-Dist: llama-index-core (>=0.11.0,<0.12.0)
|
|
13
|
+
Requires-Dist: llama-index-agent-openai (>=0.3.1,<0.4.0)
|
|
14
|
+
Requires-Dist: llama-index-core (>=0.11.7,<0.12.0)
|
|
16
15
|
Requires-Dist: openai (>=1.40.0,<2.0.0)
|
|
17
16
|
Description-Content-Type: text/markdown
|
|
18
17
|
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
llama_index/llms/openai/__init__.py,sha256=vm3cIBSGkBFlE77GyfyN0EhpJcnJZN95QMhPN53EkbE,148
|
|
2
|
+
llama_index/llms/openai/base.py,sha256=gn87WGrKkZ2llOos1TishK2XVoWZc05hGK7YrBk8kh0,36757
|
|
3
|
+
llama_index/llms/openai/utils.py,sha256=VuDXkLR_BGVqoZc9IJqiJlVloZwG9Z7s1nGPAhlbvWE,13079
|
|
4
|
+
llama_index_llms_openai-0.2.3.dist-info/METADATA,sha256=ig-bKzvFZSaMSnTtS5NhHVkdr_3HFHRcg2KOXnkLZMo,654
|
|
5
|
+
llama_index_llms_openai-0.2.3.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
|
|
6
|
+
llama_index_llms_openai-0.2.3.dist-info/RECORD,,
|
|
@@ -1,6 +0,0 @@
|
|
|
1
|
-
llama_index/llms/openai/__init__.py,sha256=vm3cIBSGkBFlE77GyfyN0EhpJcnJZN95QMhPN53EkbE,148
|
|
2
|
-
llama_index/llms/openai/base.py,sha256=C_nxCljq-5mV8jcfE3cR7BiRE6UfJ7CxO239lCvRfcI,34064
|
|
3
|
-
llama_index/llms/openai/utils.py,sha256=VuDXkLR_BGVqoZc9IJqiJlVloZwG9Z7s1nGPAhlbvWE,13079
|
|
4
|
-
llama_index_llms_openai-0.2.2.dist-info/METADATA,sha256=dHi8MVPHPI2icQKVEsePMYwYCCyRbl_OXAkuq_1I2JQ,705
|
|
5
|
-
llama_index_llms_openai-0.2.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
6
|
-
llama_index_llms_openai-0.2.2.dist-info/RECORD,,
|