llama-index-llms-openai 0.3.31__tar.gz → 0.3.33__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llama_index_llms_openai-0.3.31 → llama_index_llms_openai-0.3.33}/PKG-INFO +1 -1
- {llama_index_llms_openai-0.3.31 → llama_index_llms_openai-0.3.33}/llama_index/llms/openai/responses.py +2 -8
- {llama_index_llms_openai-0.3.31 → llama_index_llms_openai-0.3.33}/pyproject.toml +1 -1
- {llama_index_llms_openai-0.3.31 → llama_index_llms_openai-0.3.33}/LICENSE +0 -0
- {llama_index_llms_openai-0.3.31 → llama_index_llms_openai-0.3.33}/README.md +0 -0
- {llama_index_llms_openai-0.3.31 → llama_index_llms_openai-0.3.33}/llama_index/llms/openai/__init__.py +0 -0
- {llama_index_llms_openai-0.3.31 → llama_index_llms_openai-0.3.33}/llama_index/llms/openai/base.py +0 -0
- {llama_index_llms_openai-0.3.31 → llama_index_llms_openai-0.3.33}/llama_index/llms/openai/py.typed +0 -0
- {llama_index_llms_openai-0.3.31 → llama_index_llms_openai-0.3.33}/llama_index/llms/openai/utils.py +0 -0
|
@@ -387,6 +387,7 @@ class OpenAIResponses(FunctionCallingLLM):
|
|
|
387
387
|
}
|
|
388
388
|
|
|
389
389
|
def _get_model_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
|
|
390
|
+
initial_tools = self.built_in_tools or []
|
|
390
391
|
model_kwargs = {
|
|
391
392
|
"model": self.model,
|
|
392
393
|
"include": self.include,
|
|
@@ -396,7 +397,7 @@ class OpenAIResponses(FunctionCallingLLM):
|
|
|
396
397
|
"previous_response_id": self._previous_response_id,
|
|
397
398
|
"store": self.store,
|
|
398
399
|
"temperature": self.temperature,
|
|
399
|
-
"tools":
|
|
400
|
+
"tools": [*initial_tools, *kwargs.pop("tools", [])],
|
|
400
401
|
"top_p": self.top_p,
|
|
401
402
|
"truncation": self.truncation,
|
|
402
403
|
"user": self.user,
|
|
@@ -406,13 +407,6 @@ class OpenAIResponses(FunctionCallingLLM):
|
|
|
406
407
|
# O1 models support reasoning_effort of low, medium, high
|
|
407
408
|
model_kwargs["reasoning_effort"] = {"effort": self.reasoning_effort}
|
|
408
409
|
|
|
409
|
-
# add tools or extend openai tools
|
|
410
|
-
if "tools" in kwargs:
|
|
411
|
-
if isinstance(model_kwargs["tools"], list):
|
|
412
|
-
model_kwargs["tools"].extend(kwargs.pop("tools"))
|
|
413
|
-
else:
|
|
414
|
-
model_kwargs["tools"] = kwargs.pop("tools")
|
|
415
|
-
|
|
416
410
|
# priority is class args > additional_kwargs > runtime args
|
|
417
411
|
model_kwargs.update(self.additional_kwargs)
|
|
418
412
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llama_index_llms_openai-0.3.31 → llama_index_llms_openai-0.3.33}/llama_index/llms/openai/base.py
RENAMED
|
File without changes
|
{llama_index_llms_openai-0.3.31 → llama_index_llms_openai-0.3.33}/llama_index/llms/openai/py.typed
RENAMED
|
File without changes
|
{llama_index_llms_openai-0.3.31 → llama_index_llms_openai-0.3.33}/llama_index/llms/openai/utils.py
RENAMED
|
File without changes
|