llama-index-llms-openai 0.3.13__tar.gz → 0.3.15__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llama_index_llms_openai-0.3.13 → llama_index_llms_openai-0.3.15}/PKG-INFO +1 -1
- {llama_index_llms_openai-0.3.13 → llama_index_llms_openai-0.3.15}/llama_index/llms/openai/utils.py +23 -13
- {llama_index_llms_openai-0.3.13 → llama_index_llms_openai-0.3.15}/pyproject.toml +1 -1
- {llama_index_llms_openai-0.3.13 → llama_index_llms_openai-0.3.15}/README.md +0 -0
- {llama_index_llms_openai-0.3.13 → llama_index_llms_openai-0.3.15}/llama_index/llms/openai/__init__.py +0 -0
- {llama_index_llms_openai-0.3.13 → llama_index_llms_openai-0.3.15}/llama_index/llms/openai/base.py +0 -0
- {llama_index_llms_openai-0.3.13 → llama_index_llms_openai-0.3.15}/llama_index/llms/openai/py.typed +0 -0
{llama_index_llms_openai-0.3.13 → llama_index_llms_openai-0.3.15}/llama_index/llms/openai/utils.py
RENAMED
|
@@ -2,7 +2,13 @@ import logging
|
|
|
2
2
|
import os
|
|
3
3
|
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, Union
|
|
4
4
|
|
|
5
|
+
import openai
|
|
5
6
|
from deprecated import deprecated
|
|
7
|
+
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionMessageToolCall
|
|
8
|
+
from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall
|
|
9
|
+
from openai.types.chat.chat_completion_message import ChatCompletionMessage
|
|
10
|
+
from openai.types.chat.chat_completion_token_logprob import ChatCompletionTokenLogprob
|
|
11
|
+
from openai.types.completion_choice import Logprobs
|
|
6
12
|
from tenacity import (
|
|
7
13
|
before_sleep_log,
|
|
8
14
|
retry,
|
|
@@ -14,20 +20,15 @@ from tenacity import (
|
|
|
14
20
|
)
|
|
15
21
|
from tenacity.stop import stop_base
|
|
16
22
|
|
|
17
|
-
import openai
|
|
18
23
|
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
|
|
19
24
|
from llama_index.core.base.llms.types import (
|
|
20
25
|
ChatMessage,
|
|
21
26
|
ImageBlock,
|
|
22
27
|
LogProb,
|
|
28
|
+
MessageRole,
|
|
23
29
|
TextBlock,
|
|
24
30
|
)
|
|
25
31
|
from llama_index.core.bridge.pydantic import BaseModel
|
|
26
|
-
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionMessageToolCall
|
|
27
|
-
from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall
|
|
28
|
-
from openai.types.chat.chat_completion_message import ChatCompletionMessage
|
|
29
|
-
from openai.types.chat.chat_completion_token_logprob import ChatCompletionTokenLogprob
|
|
30
|
-
from openai.types.completion_choice import Logprobs
|
|
31
32
|
|
|
32
33
|
DEFAULT_OPENAI_API_TYPE = "open_ai"
|
|
33
34
|
DEFAULT_OPENAI_API_BASE = "https://api.openai.com/v1"
|
|
@@ -40,6 +41,8 @@ O1_MODELS: Dict[str, int] = {
|
|
|
40
41
|
"o1-preview-2024-09-12": 128000,
|
|
41
42
|
"o1-mini": 128000,
|
|
42
43
|
"o1-mini-2024-09-12": 128000,
|
|
44
|
+
"o3-mini": 200000,
|
|
45
|
+
"o3-mini-2025-01-31": 200000,
|
|
43
46
|
}
|
|
44
47
|
|
|
45
48
|
O1_MODELS_WITHOUT_FUNCTION_CALLING = {
|
|
@@ -292,9 +295,14 @@ def to_openai_message_dict(
|
|
|
292
295
|
msg = f"Unsupported content block type: {type(block).__name__}"
|
|
293
296
|
raise ValueError(msg)
|
|
294
297
|
|
|
295
|
-
# NOTE: Sending a
|
|
296
|
-
#
|
|
297
|
-
|
|
298
|
+
# NOTE: Sending a null value (None) for Tool Message to OpenAI will cause error
|
|
299
|
+
# It's only Allowed to send None if it's an Assistant Message
|
|
300
|
+
# Reference: https://platform.openai.com/docs/api-reference/chat/create
|
|
301
|
+
content_txt = (
|
|
302
|
+
None
|
|
303
|
+
if content_txt == "" and message.role == MessageRole.ASSISTANT
|
|
304
|
+
else content_txt
|
|
305
|
+
)
|
|
298
306
|
|
|
299
307
|
# NOTE: Despite what the openai docs say, if the role is ASSISTANT, SYSTEM
|
|
300
308
|
# or TOOL, 'content' cannot be a list and must be string instead.
|
|
@@ -302,10 +310,12 @@ def to_openai_message_dict(
|
|
|
302
310
|
# as the content. This will avoid breaking openai-like APIs.
|
|
303
311
|
message_dict = {
|
|
304
312
|
"role": message.role.value,
|
|
305
|
-
"content":
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
313
|
+
"content": (
|
|
314
|
+
content_txt
|
|
315
|
+
if message.role.value in ("assistant", "tool", "system")
|
|
316
|
+
or all(isinstance(block, TextBlock) for block in message.blocks)
|
|
317
|
+
else content
|
|
318
|
+
),
|
|
309
319
|
}
|
|
310
320
|
|
|
311
321
|
# TODO: O1 models do not support system prompts
|
|
File without changes
|
|
File without changes
|
{llama_index_llms_openai-0.3.13 → llama_index_llms_openai-0.3.15}/llama_index/llms/openai/base.py
RENAMED
|
File without changes
|
{llama_index_llms_openai-0.3.13 → llama_index_llms_openai-0.3.15}/llama_index/llms/openai/py.typed
RENAMED
|
File without changes
|