llama-index-llms-openai 0.3.13__py3-none-any.whl → 0.3.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,7 +2,13 @@ import logging
2
2
  import os
3
3
  from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, Union
4
4
 
5
+ import openai
5
6
  from deprecated import deprecated
7
+ from openai.types.chat import ChatCompletionMessageParam, ChatCompletionMessageToolCall
8
+ from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall
9
+ from openai.types.chat.chat_completion_message import ChatCompletionMessage
10
+ from openai.types.chat.chat_completion_token_logprob import ChatCompletionTokenLogprob
11
+ from openai.types.completion_choice import Logprobs
6
12
  from tenacity import (
7
13
  before_sleep_log,
8
14
  retry,
@@ -14,20 +20,15 @@ from tenacity import (
14
20
  )
15
21
  from tenacity.stop import stop_base
16
22
 
17
- import openai
18
23
  from llama_index.core.base.llms.generic_utils import get_from_param_or_env
19
24
  from llama_index.core.base.llms.types import (
20
25
  ChatMessage,
21
26
  ImageBlock,
22
27
  LogProb,
28
+ MessageRole,
23
29
  TextBlock,
24
30
  )
25
31
  from llama_index.core.bridge.pydantic import BaseModel
26
- from openai.types.chat import ChatCompletionMessageParam, ChatCompletionMessageToolCall
27
- from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall
28
- from openai.types.chat.chat_completion_message import ChatCompletionMessage
29
- from openai.types.chat.chat_completion_token_logprob import ChatCompletionTokenLogprob
30
- from openai.types.completion_choice import Logprobs
31
32
 
32
33
  DEFAULT_OPENAI_API_TYPE = "open_ai"
33
34
  DEFAULT_OPENAI_API_BASE = "https://api.openai.com/v1"
@@ -292,9 +293,14 @@ def to_openai_message_dict(
292
293
  msg = f"Unsupported content block type: {type(block).__name__}"
293
294
  raise ValueError(msg)
294
295
 
295
- # NOTE: Sending a blank string to openai will cause an error.
296
- # This will commonly happen with tool calls.
297
- content_txt = None if content_txt == "" else content_txt
296
+ # NOTE: Sending a null value (None) for Tool Message to OpenAI will cause error
297
+ # It's only Allowed to send None if it's an Assistant Message
298
+ # Reference: https://platform.openai.com/docs/api-reference/chat/create
299
+ content_txt = (
300
+ None
301
+ if content_txt == "" and message.role == MessageRole.ASSISTANT
302
+ else content_txt
303
+ )
298
304
 
299
305
  # NOTE: Despite what the openai docs say, if the role is ASSISTANT, SYSTEM
300
306
  # or TOOL, 'content' cannot be a list and must be string instead.
@@ -302,10 +308,12 @@ def to_openai_message_dict(
302
308
  # as the content. This will avoid breaking openai-like APIs.
303
309
  message_dict = {
304
310
  "role": message.role.value,
305
- "content": content_txt
306
- if message.role.value in ("assistant", "tool", "system")
307
- or all(isinstance(block, TextBlock) for block in message.blocks)
308
- else content,
311
+ "content": (
312
+ content_txt
313
+ if message.role.value in ("assistant", "tool", "system")
314
+ or all(isinstance(block, TextBlock) for block in message.blocks)
315
+ else content
316
+ ),
309
317
  }
310
318
 
311
319
  # TODO: O1 models do not support system prompts
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-index-llms-openai
3
- Version: 0.3.13
3
+ Version: 0.3.14
4
4
  Summary: llama-index llms openai integration
5
5
  License: MIT
6
6
  Author: llama-index
@@ -0,0 +1,7 @@
1
+ llama_index/llms/openai/__init__.py,sha256=vm3cIBSGkBFlE77GyfyN0EhpJcnJZN95QMhPN53EkbE,148
2
+ llama_index/llms/openai/base.py,sha256=uvYlJz3TXZIsptoicHCSv8zIc8wK187RhY1lwU7lOVc,35777
3
+ llama_index/llms/openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ llama_index/llms/openai/utils.py,sha256=nE6XrWRJXSMbMRuP4c0pw39ftoGNYpMNH4m7kzwnsS8,18801
5
+ llama_index_llms_openai-0.3.14.dist-info/METADATA,sha256=H9yu_YIoyDxD2uen-qQwqzwRp4w0JCqpmebOLvQizco,3321
6
+ llama_index_llms_openai-0.3.14.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
7
+ llama_index_llms_openai-0.3.14.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- llama_index/llms/openai/__init__.py,sha256=vm3cIBSGkBFlE77GyfyN0EhpJcnJZN95QMhPN53EkbE,148
2
- llama_index/llms/openai/base.py,sha256=uvYlJz3TXZIsptoicHCSv8zIc8wK187RhY1lwU7lOVc,35777
3
- llama_index/llms/openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- llama_index/llms/openai/utils.py,sha256=xGmeX1TbJaLjdtryETTGHR3-OSdrBm-79UxC9FlrrdE,18563
5
- llama_index_llms_openai-0.3.13.dist-info/METADATA,sha256=L9Uc2A1uxhFVX-GNzUo1OFssYWg6HTgSO0ZEeIVNDeo,3321
6
- llama_index_llms_openai-0.3.13.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
7
- llama_index_llms_openai-0.3.13.dist-info/RECORD,,