llama-index-llms-openai 0.6.15__py3-none-any.whl → 0.6.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_index/llms/openai/base.py +7 -3
- {llama_index_llms_openai-0.6.15.dist-info → llama_index_llms_openai-0.6.16.dist-info}/METADATA +1 -1
- {llama_index_llms_openai-0.6.15.dist-info → llama_index_llms_openai-0.6.16.dist-info}/RECORD +5 -5
- {llama_index_llms_openai-0.6.15.dist-info → llama_index_llms_openai-0.6.16.dist-info}/WHEEL +0 -0
- {llama_index_llms_openai-0.6.15.dist-info → llama_index_llms_openai-0.6.16.dist-info}/licenses/LICENSE +0 -0
llama_index/llms/openai/base.py
CHANGED
|
@@ -236,7 +236,9 @@ class OpenAI(FunctionCallingLLM):
|
|
|
236
236
|
default=False,
|
|
237
237
|
description="Whether to use strict mode for invoking tools/using schemas.",
|
|
238
238
|
)
|
|
239
|
-
reasoning_effort: Optional[
|
|
239
|
+
reasoning_effort: Optional[
|
|
240
|
+
Literal["none", "minimal", "low", "medium", "high", "xhigh"]
|
|
241
|
+
] = Field(
|
|
240
242
|
default=None,
|
|
241
243
|
description="The effort to use for reasoning models.",
|
|
242
244
|
)
|
|
@@ -279,7 +281,9 @@ class OpenAI(FunctionCallingLLM):
|
|
|
279
281
|
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
|
|
280
282
|
output_parser: Optional[BaseOutputParser] = None,
|
|
281
283
|
strict: bool = False,
|
|
282
|
-
reasoning_effort: Optional[
|
|
284
|
+
reasoning_effort: Optional[
|
|
285
|
+
Literal["none", "minimal", "low", "medium", "high", "xhigh"]
|
|
286
|
+
] = None,
|
|
283
287
|
modalities: Optional[List[str]] = None,
|
|
284
288
|
audio_config: Optional[Dict[str, Any]] = None,
|
|
285
289
|
**kwargs: Any,
|
|
@@ -473,7 +477,7 @@ class OpenAI(FunctionCallingLLM):
|
|
|
473
477
|
)
|
|
474
478
|
all_kwargs.pop("max_tokens", None)
|
|
475
479
|
if self.model in O1_MODELS and self.reasoning_effort is not None:
|
|
476
|
-
# O1 models support reasoning_effort of low, medium, high
|
|
480
|
+
# O1 models support reasoning_effort of none, minimal, low, medium, high, xhigh
|
|
477
481
|
all_kwargs["reasoning_effort"] = self.reasoning_effort
|
|
478
482
|
|
|
479
483
|
if self.modalities is not None:
|
{llama_index_llms_openai-0.6.15.dist-info → llama_index_llms_openai-0.6.16.dist-info}/RECORD
RENAMED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
llama_index/llms/openai/__init__.py,sha256=8nmgixeXifQ4eVSgtCic54WxXqrrpXQPL4rhACWCSFs,229
|
|
2
|
-
llama_index/llms/openai/base.py,sha256=
|
|
2
|
+
llama_index/llms/openai/base.py,sha256=iiU76449a66IpS2V9DoX-NbuOLmn1e6QNj01dJ2cFpo,45416
|
|
3
3
|
llama_index/llms/openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
llama_index/llms/openai/responses.py,sha256=hWzBaqaVhbD8t7tx0llMdnBLMgqopZaAxVBmYDVnUpI,38363
|
|
5
5
|
llama_index/llms/openai/utils.py,sha256=b5iEUEC58OY6eopee8-g83bipNSdBA9KaHchEFE6Bno,33954
|
|
6
|
-
llama_index_llms_openai-0.6.
|
|
7
|
-
llama_index_llms_openai-0.6.
|
|
8
|
-
llama_index_llms_openai-0.6.
|
|
9
|
-
llama_index_llms_openai-0.6.
|
|
6
|
+
llama_index_llms_openai-0.6.16.dist-info/METADATA,sha256=57FQrpe6ium9X-08ZloWWHSxLhY-wIYtGQHmfgyY89o,3040
|
|
7
|
+
llama_index_llms_openai-0.6.16.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
8
|
+
llama_index_llms_openai-0.6.16.dist-info/licenses/LICENSE,sha256=JPQLUZD9rKvCTdu192Nk0V5PAwklIg6jANii3UmTyMs,1065
|
|
9
|
+
llama_index_llms_openai-0.6.16.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|