llama-index-llms-openai 0.5.6__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -994,7 +994,9 @@ class OpenAI(FunctionCallingLLM):
994
994
  def _prepare_schema(
995
995
  self, llm_kwargs: Optional[Dict[str, Any]], output_cls: Type[Model]
996
996
  ) -> Dict[str, Any]:
997
- from openai.resources.beta.chat.completions import _type_to_response_format
997
+ from openai.resources.chat.completions.completions import (
998
+ _type_to_response_format,
999
+ )
998
1000
 
999
1001
  llm_kwargs = llm_kwargs or {}
1000
1002
  llm_kwargs["response_format"] = _type_to_response_format(output_cls)
@@ -26,6 +26,7 @@ from openai.types.responses import (
26
26
  ResponseReasoningItem,
27
27
  ResponseCodeInterpreterToolCall,
28
28
  ResponseImageGenCallPartialImageEvent,
29
+ ResponseOutputItemDoneEvent,
29
30
  )
30
31
  from openai.types.responses.response_output_item import ImageGenerationCall, McpCall
31
32
  from typing import (
@@ -65,6 +66,7 @@ from llama_index.core.base.llms.types import (
65
66
  ContentBlock,
66
67
  TextBlock,
67
68
  ImageBlock,
69
+ ThinkingBlock,
68
70
  )
69
71
  from llama_index.core.bridge.pydantic import (
70
72
  Field,
@@ -252,7 +254,7 @@ class OpenAIResponses(FunctionCallingLLM):
252
254
  default_headers: Optional[Dict[str, str]] = Field(
253
255
  default=None, description="The default headers for API requests."
254
256
  )
255
- api_key: str = Field(default=None, description="The OpenAI API key.")
257
+ api_key: Optional[str] = Field(default=None, description="The OpenAI API key.")
256
258
  api_base: str = Field(description="The base URL for OpenAI API.")
257
259
  api_version: str = Field(description="The API version for OpenAI API.")
258
260
  context_window: Optional[int] = Field(
@@ -382,7 +384,7 @@ class OpenAIResponses(FunctionCallingLLM):
382
384
  return model_name
383
385
 
384
386
  def _is_azure_client(self) -> bool:
385
- return isinstance(self._get_client(), AzureOpenAI)
387
+ return isinstance(self._client, AzureOpenAI)
386
388
 
387
389
  def _get_credential_kwargs(self, is_async: bool = False) -> Dict[str, Any]:
388
390
  return {
@@ -484,7 +486,22 @@ class OpenAIResponses(FunctionCallingLLM):
484
486
  elif isinstance(item, ResponseComputerToolCall):
485
487
  additional_kwargs["built_in_tool_calls"].append(item)
486
488
  elif isinstance(item, ResponseReasoningItem):
487
- additional_kwargs["reasoning"] = item
489
+ content: Optional[str] = None
490
+ if item.content:
491
+ content = "\n".join([i.text for i in item.content])
492
+ if item.summary:
493
+ if content:
494
+ content += "\n" + "\n".join([i.text for i in item.summary])
495
+ else:
496
+ content = "\n".join([i.text for i in item.summary])
497
+ message.blocks.append(
498
+ ThinkingBlock(
499
+ content=content,
500
+ additional_information=item.model_dump(
501
+ exclude={"content", "summary"}
502
+ ),
503
+ )
504
+ )
488
505
 
489
506
  if tool_calls and message:
490
507
  message.additional_kwargs["tool_calls"] = tool_calls
@@ -512,6 +529,12 @@ class OpenAIResponses(FunctionCallingLLM):
512
529
  chat_response = self._parse_response_output(response.output)
513
530
  chat_response.raw = response
514
531
  chat_response.additional_kwargs["usage"] = response.usage
532
+ if hasattr(response.usage.output_tokens_details, "reasoning_tokens"):
533
+ for block in chat_response.message.blocks:
534
+ if isinstance(block, ThinkingBlock):
535
+ block.num_tokens = (
536
+ response.usage.output_tokens_details.reasoning_tokens
537
+ )
515
538
 
516
539
  return chat_response
517
540
 
@@ -605,9 +628,27 @@ class OpenAIResponses(FunctionCallingLLM):
605
628
  elif isinstance(event, ResponseWebSearchCallCompletedEvent):
606
629
  # Web search tool call completed
607
630
  built_in_tool_calls.append(event)
608
- elif isinstance(event, ResponseReasoningItem):
631
+ elif isinstance(event, ResponseOutputItemDoneEvent):
609
632
  # Reasoning information
610
- additional_kwargs["reasoning"] = event
633
+ if isinstance(event.item, ResponseReasoningItem):
634
+ content: Optional[str] = None
635
+ if event.item.content:
636
+ content = "\n".join([i.text for i in event.item.content])
637
+ if event.item.summary:
638
+ if content:
639
+ content += "\n" + "\n".join(
640
+ [i.text for i in event.item.summary]
641
+ )
642
+ else:
643
+ content = "\n".join([i.text for i in event.item.summary])
644
+ blocks.append(
645
+ ThinkingBlock(
646
+ content=content,
647
+ additional_information=event.item.model_dump(
648
+ exclude={"content", "summary"}
649
+ ),
650
+ )
651
+ )
611
652
  elif isinstance(event, ResponseCompletedEvent):
612
653
  # Response is complete
613
654
  if hasattr(event, "response") and hasattr(event.response, "usage"):
@@ -29,6 +29,7 @@ from llama_index.core.base.llms.types import (
29
29
  TextBlock,
30
30
  AudioBlock,
31
31
  DocumentBlock,
32
+ ThinkingBlock,
32
33
  )
33
34
  from llama_index.core.bridge.pydantic import BaseModel
34
35
 
@@ -204,7 +205,7 @@ JSON_SCHEMA_MODELS = [
204
205
 
205
206
  def is_json_schema_supported(model: str) -> bool:
206
207
  try:
207
- from openai.resources.beta.chat import completions
208
+ from openai.resources.chat.completions import completions
208
209
 
209
210
  if not hasattr(completions, "_type_to_response_format"):
210
211
  return False
@@ -469,7 +470,10 @@ def to_openai_responses_message_dict(
469
470
 
470
471
  for block in message.blocks:
471
472
  if isinstance(block, TextBlock):
472
- content.append({"type": "input_text", "text": block.text})
473
+ if message.role.value == "user":
474
+ content.append({"type": "input_text", "text": block.text})
475
+ else:
476
+ content.append({"type": "output_text", "text": block.text})
473
477
  content_txt += block.text
474
478
  elif isinstance(block, DocumentBlock):
475
479
  if not block.data:
@@ -505,6 +509,10 @@ def to_openai_responses_message_dict(
505
509
  "detail": block.detail or "auto",
506
510
  }
507
511
  )
512
+ elif isinstance(block, ThinkingBlock):
513
+ if block.content:
514
+ content.append({"type": "output_text", "text": block.content})
515
+ content_txt += block.content
508
516
  else:
509
517
  msg = f"Unsupported content block type: {type(block).__name__}"
510
518
  raise ValueError(msg)
@@ -549,9 +557,6 @@ def to_openai_responses_message_dict(
549
557
  for tool_call in message.additional_kwargs["tool_calls"]
550
558
  ]
551
559
 
552
- if "reasoning" in message.additional_kwargs: # and if it is reasoning model
553
- message_dicts = [message.additional_kwargs["reasoning"]] + message_dicts
554
-
555
560
  return message_dicts
556
561
 
557
562
  # there are some cases (like image generation or MCP tool call) that only support the string input
@@ -1,13 +1,13 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llama-index-llms-openai
3
- Version: 0.5.6
3
+ Version: 0.6.0
4
4
  Summary: llama-index llms openai integration
5
5
  Author: llama-index
6
6
  License-Expression: MIT
7
7
  License-File: LICENSE
8
8
  Requires-Python: <4.0,>=3.9
9
9
  Requires-Dist: llama-index-core<0.15,>=0.13.0
10
- Requires-Dist: openai<2,>=1.81.0
10
+ Requires-Dist: openai<2,>=1.108.1
11
11
  Description-Content-Type: text/markdown
12
12
 
13
13
  # LlamaIndex Llms Integration: Openai
@@ -0,0 +1,9 @@
1
+ llama_index/llms/openai/__init__.py,sha256=8nmgixeXifQ4eVSgtCic54WxXqrrpXQPL4rhACWCSFs,229
2
+ llama_index/llms/openai/base.py,sha256=8ckxJJ8LEnmm8_xMTku62HTwAZi8MyjSK5X69PVpums,41900
3
+ llama_index/llms/openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ llama_index/llms/openai/responses.py,sha256=zeJ2fTdgf6qAs3VJ7cOFInHAbh_KYogqcQrMzMX71E8,38024
5
+ llama_index/llms/openai/utils.py,sha256=qRoGGpA1FdW9nWPnPMji_FF1vi--RiSqih8x_F8K1C0,30137
6
+ llama_index_llms_openai-0.6.0.dist-info/METADATA,sha256=Er2gjG7VG2JKmNihpK6YoJXmK62S6GtPAH3OJzoxKP8,3039
7
+ llama_index_llms_openai-0.6.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
8
+ llama_index_llms_openai-0.6.0.dist-info/licenses/LICENSE,sha256=JPQLUZD9rKvCTdu192Nk0V5PAwklIg6jANii3UmTyMs,1065
9
+ llama_index_llms_openai-0.6.0.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- llama_index/llms/openai/__init__.py,sha256=8nmgixeXifQ4eVSgtCic54WxXqrrpXQPL4rhACWCSFs,229
2
- llama_index/llms/openai/base.py,sha256=6EANUt_39Pukk6N6CYYVJx6CpoAG2jJsMau6MvV3JTI,41868
3
- llama_index/llms/openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- llama_index/llms/openai/responses.py,sha256=Tvlwhst3W_t7EQQCxtj8k-uW6iHmjQPMJ4dh23f-oWg,36182
5
- llama_index/llms/openai/utils.py,sha256=3F6TN_IvYw35eCSXKI23EtWf1tScEWzslFS-tGB58Hw,29938
6
- llama_index_llms_openai-0.5.6.dist-info/METADATA,sha256=9PMeDISUlcyJ2pI2Bw44iqxE93ijoigek14se5PfUjE,3038
7
- llama_index_llms_openai-0.5.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
8
- llama_index_llms_openai-0.5.6.dist-info/licenses/LICENSE,sha256=JPQLUZD9rKvCTdu192Nk0V5PAwklIg6jANii3UmTyMs,1065
9
- llama_index_llms_openai-0.5.6.dist-info/RECORD,,