llama-index-llms-openai 0.4.5__py3-none-any.whl → 0.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_index/llms/openai/utils.py +13 -5
- {llama_index_llms_openai-0.4.5.dist-info → llama_index_llms_openai-0.4.7.dist-info}/METADATA +1 -1
- {llama_index_llms_openai-0.4.5.dist-info → llama_index_llms_openai-0.4.7.dist-info}/RECORD +5 -5
- {llama_index_llms_openai-0.4.5.dist-info → llama_index_llms_openai-0.4.7.dist-info}/WHEEL +0 -0
- {llama_index_llms_openai-0.4.5.dist-info → llama_index_llms_openai-0.4.7.dist-info}/licenses/LICENSE +0 -0
llama_index/llms/openai/utils.py
CHANGED
|
@@ -525,6 +525,9 @@ def to_openai_responses_message_dict(
|
|
|
525
525
|
for tool_call in message.additional_kwargs["tool_calls"]
|
|
526
526
|
]
|
|
527
527
|
|
|
528
|
+
if "reasoning" in message.additional_kwargs: # and if it is reasoning model
|
|
529
|
+
message_dicts = [message.additional_kwargs["reasoning"]] + message_dicts
|
|
530
|
+
|
|
528
531
|
return message_dicts
|
|
529
532
|
|
|
530
533
|
# there are some cases (like image generation or MCP tool call) that only support the string input
|
|
@@ -574,7 +577,6 @@ def to_openai_message_dicts(
|
|
|
574
577
|
"""Convert generic messages to OpenAI message dicts."""
|
|
575
578
|
if is_responses_api:
|
|
576
579
|
final_message_dicts = []
|
|
577
|
-
final_message_txt = ""
|
|
578
580
|
for message in messages:
|
|
579
581
|
message_dicts = to_openai_responses_message_dict(
|
|
580
582
|
message,
|
|
@@ -584,12 +586,18 @@ def to_openai_message_dicts(
|
|
|
584
586
|
if isinstance(message_dicts, list):
|
|
585
587
|
final_message_dicts.extend(message_dicts)
|
|
586
588
|
elif isinstance(message_dicts, str):
|
|
587
|
-
|
|
589
|
+
final_message_dicts.append({"role": "user", "content": message_dicts})
|
|
588
590
|
else:
|
|
589
591
|
final_message_dicts.append(message_dicts)
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
592
|
+
|
|
593
|
+
# If there is only one message, and it is a user message, return the content string directly
|
|
594
|
+
if (
|
|
595
|
+
len(final_message_dicts) == 1
|
|
596
|
+
and final_message_dicts[0]["role"] == "user"
|
|
597
|
+
and isinstance(final_message_dicts[0]["content"], str)
|
|
598
|
+
):
|
|
599
|
+
return final_message_dicts[0]["content"]
|
|
600
|
+
|
|
593
601
|
return final_message_dicts
|
|
594
602
|
else:
|
|
595
603
|
return [
|
|
@@ -2,8 +2,8 @@ llama_index/llms/openai/__init__.py,sha256=8nmgixeXifQ4eVSgtCic54WxXqrrpXQPL4rhA
|
|
|
2
2
|
llama_index/llms/openai/base.py,sha256=DhHPvef7ojecrkk_d7FwA-1Esv2iHymOEkBG0PaE4jg,42282
|
|
3
3
|
llama_index/llms/openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
llama_index/llms/openai/responses.py,sha256=Tvlwhst3W_t7EQQCxtj8k-uW6iHmjQPMJ4dh23f-oWg,36182
|
|
5
|
-
llama_index/llms/openai/utils.py,sha256=
|
|
6
|
-
llama_index_llms_openai-0.4.
|
|
7
|
-
llama_index_llms_openai-0.4.
|
|
8
|
-
llama_index_llms_openai-0.4.
|
|
9
|
-
llama_index_llms_openai-0.4.
|
|
5
|
+
llama_index/llms/openai/utils.py,sha256=0j1chdY2xybeXjTMyFIy9RJkvgSXeT4w3MIbJYeexwA,28998
|
|
6
|
+
llama_index_llms_openai-0.4.7.dist-info/METADATA,sha256=H8FJVRneAK-Q1HlNtybdE3Ng525bxhIrub3G8aeQJ9U,3039
|
|
7
|
+
llama_index_llms_openai-0.4.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
8
|
+
llama_index_llms_openai-0.4.7.dist-info/licenses/LICENSE,sha256=JPQLUZD9rKvCTdu192Nk0V5PAwklIg6jANii3UmTyMs,1065
|
|
9
|
+
llama_index_llms_openai-0.4.7.dist-info/RECORD,,
|
|
File without changes
|
{llama_index_llms_openai-0.4.5.dist-info → llama_index_llms_openai-0.4.7.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|