llama-index-llms-openai 0.3.16__py3-none-any.whl → 0.3.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -218,8 +218,8 @@ class OpenAI(FunctionCallingLLM):
218
218
  default=False,
219
219
  description="Whether to use strict mode for invoking tools/using schemas.",
220
220
  )
221
- reasoning_effort: Literal["low", "medium", "high"] = Field(
222
- default="medium",
221
+ reasoning_effort: Optional[Literal["low", "medium", "high"]] = Field(
222
+ default=None,
223
223
  description="The effort to use for reasoning models.",
224
224
  )
225
225
 
@@ -253,7 +253,7 @@ class OpenAI(FunctionCallingLLM):
253
253
  pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
254
254
  output_parser: Optional[BaseOutputParser] = None,
255
255
  strict: bool = False,
256
- reasoning_effort: Literal["low", "medium", "high"] = "medium",
256
+ reasoning_effort: Optional[Literal["low", "medium", "high"]] = None,
257
257
  **kwargs: Any,
258
258
  ) -> None:
259
259
  additional_kwargs = additional_kwargs or {}
@@ -424,13 +424,13 @@ class OpenAI(FunctionCallingLLM):
424
424
  all_kwargs = {**base_kwargs, **self.additional_kwargs}
425
425
  if "stream" not in all_kwargs and "stream_options" in all_kwargs:
426
426
  del all_kwargs["stream_options"]
427
- if self.model in O1_MODELS and base_kwargs["max_tokens"] is not None:
427
+ if self.model in O1_MODELS and base_kwargs.get("max_tokens") is not None:
428
428
  # O1 models use max_completion_tokens instead of max_tokens
429
429
  all_kwargs["max_completion_tokens"] = all_kwargs.get(
430
430
  "max_completion_tokens", all_kwargs["max_tokens"]
431
431
  )
432
432
  all_kwargs.pop("max_tokens", None)
433
- if self.model in O1_MODELS:
433
+ if self.model in O1_MODELS and self.reasoning_effort is not None:
434
434
  # O1 models support reasoning_effort of low, medium, high
435
435
  all_kwargs["reasoning_effort"] = self.reasoning_effort
436
436
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-index-llms-openai
3
- Version: 0.3.16
3
+ Version: 0.3.18
4
4
  Summary: llama-index llms openai integration
5
5
  License: MIT
6
6
  Author: llama-index
@@ -0,0 +1,7 @@
1
+ llama_index/llms/openai/__init__.py,sha256=vm3cIBSGkBFlE77GyfyN0EhpJcnJZN95QMhPN53EkbE,148
2
+ llama_index/llms/openai/base.py,sha256=Bj7o-NCrUSWK3cES3anFgANMLRbmdLG8AkxC9QrVKqw,36637
3
+ llama_index/llms/openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ llama_index/llms/openai/utils.py,sha256=se_tHHLsNW4u2Ei_25HRPAm1lmzv-kFp2r2WqqL_jfE,18858
5
+ llama_index_llms_openai-0.3.18.dist-info/METADATA,sha256=4qvbR9QPW-vHG6tsGLEedOUP6Sf15LzW5jESGO64jdk,3321
6
+ llama_index_llms_openai-0.3.18.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
7
+ llama_index_llms_openai-0.3.18.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- llama_index/llms/openai/__init__.py,sha256=vm3cIBSGkBFlE77GyfyN0EhpJcnJZN95QMhPN53EkbE,148
2
- llama_index/llms/openai/base.py,sha256=zEM5s_xCrv7ePXsx_pn87nh1c4D8L5WdT-4ZcWOPvD4,36583
3
- llama_index/llms/openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- llama_index/llms/openai/utils.py,sha256=se_tHHLsNW4u2Ei_25HRPAm1lmzv-kFp2r2WqqL_jfE,18858
5
- llama_index_llms_openai-0.3.16.dist-info/METADATA,sha256=YuKy5vdgaNz_9KUObQHAsnp-k3Ii99fJCE1SiZrcbts,3321
6
- llama_index_llms_openai-0.3.16.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
7
- llama_index_llms_openai-0.3.16.dist-info/RECORD,,