llama-index-llms-openai 0.3.12__py3-none-any.whl → 0.3.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_index/llms/openai/base.py +1 -1
- llama_index/llms/openai/utils.py +21 -13
- {llama_index_llms_openai-0.3.12.dist-info → llama_index_llms_openai-0.3.14.dist-info}/METADATA +1 -1
- llama_index_llms_openai-0.3.14.dist-info/RECORD +7 -0
- llama_index_llms_openai-0.3.12.dist-info/RECORD +0 -7
- {llama_index_llms_openai-0.3.12.dist-info → llama_index_llms_openai-0.3.14.dist-info}/WHEEL +0 -0
llama_index/llms/openai/base.py
CHANGED
|
@@ -170,7 +170,7 @@ class OpenAI(FunctionCallingLLM):
|
|
|
170
170
|
default=DEFAULT_TEMPERATURE,
|
|
171
171
|
description="The temperature to use during generation.",
|
|
172
172
|
ge=0.0,
|
|
173
|
-
le=
|
|
173
|
+
le=2.0,
|
|
174
174
|
)
|
|
175
175
|
max_tokens: Optional[int] = Field(
|
|
176
176
|
description="The maximum number of tokens to generate.",
|
llama_index/llms/openai/utils.py
CHANGED
|
@@ -2,7 +2,13 @@ import logging
|
|
|
2
2
|
import os
|
|
3
3
|
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, Union
|
|
4
4
|
|
|
5
|
+
import openai
|
|
5
6
|
from deprecated import deprecated
|
|
7
|
+
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionMessageToolCall
|
|
8
|
+
from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall
|
|
9
|
+
from openai.types.chat.chat_completion_message import ChatCompletionMessage
|
|
10
|
+
from openai.types.chat.chat_completion_token_logprob import ChatCompletionTokenLogprob
|
|
11
|
+
from openai.types.completion_choice import Logprobs
|
|
6
12
|
from tenacity import (
|
|
7
13
|
before_sleep_log,
|
|
8
14
|
retry,
|
|
@@ -14,20 +20,15 @@ from tenacity import (
|
|
|
14
20
|
)
|
|
15
21
|
from tenacity.stop import stop_base
|
|
16
22
|
|
|
17
|
-
import openai
|
|
18
23
|
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
|
|
19
24
|
from llama_index.core.base.llms.types import (
|
|
20
25
|
ChatMessage,
|
|
21
26
|
ImageBlock,
|
|
22
27
|
LogProb,
|
|
28
|
+
MessageRole,
|
|
23
29
|
TextBlock,
|
|
24
30
|
)
|
|
25
31
|
from llama_index.core.bridge.pydantic import BaseModel
|
|
26
|
-
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionMessageToolCall
|
|
27
|
-
from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall
|
|
28
|
-
from openai.types.chat.chat_completion_message import ChatCompletionMessage
|
|
29
|
-
from openai.types.chat.chat_completion_token_logprob import ChatCompletionTokenLogprob
|
|
30
|
-
from openai.types.completion_choice import Logprobs
|
|
31
32
|
|
|
32
33
|
DEFAULT_OPENAI_API_TYPE = "open_ai"
|
|
33
34
|
DEFAULT_OPENAI_API_BASE = "https://api.openai.com/v1"
|
|
@@ -292,9 +293,14 @@ def to_openai_message_dict(
|
|
|
292
293
|
msg = f"Unsupported content block type: {type(block).__name__}"
|
|
293
294
|
raise ValueError(msg)
|
|
294
295
|
|
|
295
|
-
# NOTE: Sending a
|
|
296
|
-
#
|
|
297
|
-
|
|
296
|
+
# NOTE: Sending a null value (None) for Tool Message to OpenAI will cause error
|
|
297
|
+
# It's only Allowed to send None if it's an Assistant Message
|
|
298
|
+
# Reference: https://platform.openai.com/docs/api-reference/chat/create
|
|
299
|
+
content_txt = (
|
|
300
|
+
None
|
|
301
|
+
if content_txt == "" and message.role == MessageRole.ASSISTANT
|
|
302
|
+
else content_txt
|
|
303
|
+
)
|
|
298
304
|
|
|
299
305
|
# NOTE: Despite what the openai docs say, if the role is ASSISTANT, SYSTEM
|
|
300
306
|
# or TOOL, 'content' cannot be a list and must be string instead.
|
|
@@ -302,10 +308,12 @@ def to_openai_message_dict(
|
|
|
302
308
|
# as the content. This will avoid breaking openai-like APIs.
|
|
303
309
|
message_dict = {
|
|
304
310
|
"role": message.role.value,
|
|
305
|
-
"content":
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
311
|
+
"content": (
|
|
312
|
+
content_txt
|
|
313
|
+
if message.role.value in ("assistant", "tool", "system")
|
|
314
|
+
or all(isinstance(block, TextBlock) for block in message.blocks)
|
|
315
|
+
else content
|
|
316
|
+
),
|
|
309
317
|
}
|
|
310
318
|
|
|
311
319
|
# TODO: O1 models do not support system prompts
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
llama_index/llms/openai/__init__.py,sha256=vm3cIBSGkBFlE77GyfyN0EhpJcnJZN95QMhPN53EkbE,148
|
|
2
|
+
llama_index/llms/openai/base.py,sha256=uvYlJz3TXZIsptoicHCSv8zIc8wK187RhY1lwU7lOVc,35777
|
|
3
|
+
llama_index/llms/openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
+
llama_index/llms/openai/utils.py,sha256=nE6XrWRJXSMbMRuP4c0pw39ftoGNYpMNH4m7kzwnsS8,18801
|
|
5
|
+
llama_index_llms_openai-0.3.14.dist-info/METADATA,sha256=H9yu_YIoyDxD2uen-qQwqzwRp4w0JCqpmebOLvQizco,3321
|
|
6
|
+
llama_index_llms_openai-0.3.14.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
7
|
+
llama_index_llms_openai-0.3.14.dist-info/RECORD,,
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
llama_index/llms/openai/__init__.py,sha256=vm3cIBSGkBFlE77GyfyN0EhpJcnJZN95QMhPN53EkbE,148
|
|
2
|
-
llama_index/llms/openai/base.py,sha256=_kW22tTABTKQ-GFRN00P5OOcJS4IeVAI3vPqbjw1D8o,35777
|
|
3
|
-
llama_index/llms/openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
-
llama_index/llms/openai/utils.py,sha256=xGmeX1TbJaLjdtryETTGHR3-OSdrBm-79UxC9FlrrdE,18563
|
|
5
|
-
llama_index_llms_openai-0.3.12.dist-info/METADATA,sha256=n4knQjg7oDyUJSS8nL5YUNJrLbmNd9cdG-dMEEmWe7I,3321
|
|
6
|
-
llama_index_llms_openai-0.3.12.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
7
|
-
llama_index_llms_openai-0.3.12.dist-info/RECORD,,
|
|
File without changes
|