llama-index-llms-openai 0.3.3__tar.gz → 0.3.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-index-llms-openai
3
- Version: 0.3.3
3
+ Version: 0.3.5
4
4
  Summary: llama-index llms openai integration
5
5
  License: MIT
6
6
  Author: llama-index
@@ -12,7 +12,7 @@ Classifier: Programming Language :: Python :: 3.10
12
12
  Classifier: Programming Language :: Python :: 3.11
13
13
  Classifier: Programming Language :: Python :: 3.12
14
14
  Requires-Dist: llama-index-core (>=0.12.4,<0.13.0)
15
- Requires-Dist: openai (>=1.40.0,<2.0.0)
15
+ Requires-Dist: openai (>=1.57.1,<2.0.0)
16
16
  Description-Content-Type: text/markdown
17
17
 
18
18
  # LlamaIndex Llms Integration: Openai
@@ -217,6 +217,10 @@ class OpenAI(FunctionCallingLLM):
217
217
  default=False,
218
218
  description="Whether to use strict mode for invoking tools/using schemas.",
219
219
  )
220
+ supports_content_blocks: bool = Field(
221
+ default=True,
222
+ description="Whether the model supports content blocks in chat messages.",
223
+ )
220
224
 
221
225
  _client: Optional[SyncOpenAI] = PrivateAttr()
222
226
  _aclient: Optional[AsyncOpenAI] = PrivateAttr()
@@ -423,7 +427,11 @@ class OpenAI(FunctionCallingLLM):
423
427
  @llm_retry_decorator
424
428
  def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
425
429
  client = self._get_client()
426
- message_dicts = to_openai_message_dicts(messages, model=self.model)
430
+ message_dicts = to_openai_message_dicts(
431
+ messages,
432
+ model=self.model,
433
+ supports_content_blocks=self.supports_content_blocks,
434
+ )
427
435
 
428
436
  if self.reuse_client:
429
437
  response = client.chat.completions.create(
@@ -458,7 +466,11 @@ class OpenAI(FunctionCallingLLM):
458
466
  self, messages: Sequence[ChatMessage], **kwargs: Any
459
467
  ) -> ChatResponseGen:
460
468
  client = self._get_client()
461
- message_dicts = to_openai_message_dicts(messages, model=self.model)
469
+ message_dicts = to_openai_message_dicts(
470
+ messages,
471
+ model=self.model,
472
+ supports_content_blocks=self.supports_content_blocks,
473
+ )
462
474
 
463
475
  def gen() -> ChatResponseGen:
464
476
  content = ""
@@ -668,7 +680,11 @@ class OpenAI(FunctionCallingLLM):
668
680
  self, messages: Sequence[ChatMessage], **kwargs: Any
669
681
  ) -> ChatResponse:
670
682
  aclient = self._get_aclient()
671
- message_dicts = to_openai_message_dicts(messages, model=self.model)
683
+ message_dicts = to_openai_message_dicts(
684
+ messages,
685
+ model=self.model,
686
+ supports_content_blocks=self.supports_content_blocks,
687
+ )
672
688
 
673
689
  if self.reuse_client:
674
690
  response = await aclient.chat.completions.create(
@@ -701,7 +717,11 @@ class OpenAI(FunctionCallingLLM):
701
717
  self, messages: Sequence[ChatMessage], **kwargs: Any
702
718
  ) -> ChatResponseAsyncGen:
703
719
  aclient = self._get_aclient()
704
- message_dicts = to_openai_message_dicts(messages, model=self.model)
720
+ message_dicts = to_openai_message_dicts(
721
+ messages,
722
+ model=self.model,
723
+ supports_content_blocks=self.supports_content_blocks,
724
+ )
705
725
 
706
726
  async def gen() -> ChatResponseAsyncGen:
707
727
  content = ""
@@ -253,13 +253,22 @@ def is_function_calling_model(model: str) -> bool:
253
253
 
254
254
 
255
255
  def to_openai_message_dict(
256
- message: ChatMessage, drop_none: bool = False, model: Optional[str] = None
256
+ message: ChatMessage,
257
+ drop_none: bool = False,
258
+ model: Optional[str] = None,
259
+ supports_content_blocks: bool = False,
257
260
  ) -> ChatCompletionMessageParam:
258
261
  """Convert a ChatMessage to an OpenAI message dict."""
259
262
  content = []
263
+ content_txt = ""
260
264
  for block in message.blocks:
261
265
  if isinstance(block, TextBlock):
262
- content.append({"type": "text", "text": block.text})
266
+ if message.role.value in ("assistant", "tool", "system"):
267
+ # Despite the docs say otherwise, when role is ASSISTANT, SYSTEM
268
+ # or TOOL, 'content' cannot be a list and must be string instead.
269
+ content_txt += block.text
270
+ else:
271
+ content.append({"type": "text", "text": block.text})
263
272
  elif isinstance(block, ImageBlock):
264
273
  if block.url:
265
274
  content.append(
@@ -283,7 +292,10 @@ def to_openai_message_dict(
283
292
 
284
293
  message_dict = {
285
294
  "role": message.role.value,
286
- "content": content,
295
+ "content": content_txt
296
+ if message.role.value in ("assistant", "tool", "system")
297
+ or not supports_content_blocks
298
+ else content,
287
299
  }
288
300
 
289
301
  # TODO: O1 models do not support system prompts
@@ -309,10 +321,16 @@ def to_openai_message_dicts(
309
321
  messages: Sequence[ChatMessage],
310
322
  drop_none: bool = False,
311
323
  model: Optional[str] = None,
324
+ supports_content_blocks: bool = False,
312
325
  ) -> List[ChatCompletionMessageParam]:
313
326
  """Convert generic messages to OpenAI message dicts."""
314
327
  return [
315
- to_openai_message_dict(message, drop_none=drop_none, model=model)
328
+ to_openai_message_dict(
329
+ message,
330
+ drop_none=drop_none,
331
+ model=model,
332
+ supports_content_blocks=supports_content_blocks,
333
+ )
316
334
  for message in messages
317
335
  ]
318
336
 
@@ -29,11 +29,11 @@ exclude = ["**/BUILD"]
29
29
  license = "MIT"
30
30
  name = "llama-index-llms-openai"
31
31
  readme = "README.md"
32
- version = "0.3.3"
32
+ version = "0.3.5"
33
33
 
34
34
  [tool.poetry.dependencies]
35
35
  python = ">=3.9,<4.0"
36
- openai = "^1.40.0"
36
+ openai = "^1.57.1"
37
37
  llama-index-core = "^0.12.4"
38
38
 
39
39
  [tool.poetry.group.dev.dependencies]