llama-index-llms-openai 0.5.2__py3-none-any.whl → 0.5.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -77,7 +77,7 @@ from llama_index.llms.openai.utils import (
77
77
  update_tool_calls,
78
78
  is_json_schema_supported,
79
79
  )
80
- from openai import AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI
80
+ from openai import AsyncOpenAI
81
81
  from openai import OpenAI as SyncOpenAI
82
82
  from openai.types.chat.chat_completion_chunk import (
83
83
  ChatCompletionChunk,
@@ -532,10 +532,7 @@ class OpenAI(FunctionCallingLLM):
532
532
  if len(response.choices) > 0:
533
533
  delta = response.choices[0].delta
534
534
  else:
535
- if isinstance(client, AzureOpenAI):
536
- continue
537
- else:
538
- delta = ChoiceDelta()
535
+ delta = ChoiceDelta()
539
536
 
540
537
  if delta is None:
541
538
  continue
@@ -801,10 +798,7 @@ class OpenAI(FunctionCallingLLM):
801
798
  continue
802
799
  delta = response.choices[0].delta
803
800
  else:
804
- if isinstance(aclient, AsyncAzureOpenAI):
805
- continue
806
- else:
807
- delta = ChoiceDelta()
801
+ delta = ChoiceDelta()
808
802
  first_chat_chunk = False
809
803
 
810
804
  if delta is None:
@@ -60,6 +60,7 @@ O1_MODELS: Dict[str, int] = {
60
60
  "gpt-5-mini-2025-08-07": 400000,
61
61
  "gpt-5-nano": 400000,
62
62
  "gpt-5-nano-2025-08-07": 400000,
63
+ "gpt-5-chat-latest": 400000,
63
64
  }
64
65
 
65
66
  O1_MODELS_WITHOUT_FUNCTION_CALLING = {
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llama-index-llms-openai
3
- Version: 0.5.2
3
+ Version: 0.5.4
4
4
  Summary: llama-index llms openai integration
5
5
  Author: llama-index
6
6
  License-Expression: MIT
@@ -0,0 +1,9 @@
1
+ llama_index/llms/openai/__init__.py,sha256=8nmgixeXifQ4eVSgtCic54WxXqrrpXQPL4rhACWCSFs,229
2
+ llama_index/llms/openai/base.py,sha256=R8hwgLVWEYMLYnZsgwwPSOwoBaDaxC6xzfYE9pu4jGc,41859
3
+ llama_index/llms/openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ llama_index/llms/openai/responses.py,sha256=Tvlwhst3W_t7EQQCxtj8k-uW6iHmjQPMJ4dh23f-oWg,36182
5
+ llama_index/llms/openai/utils.py,sha256=HQqxqfwsVLStjVU1UlZekXqfeLpSbVxcWo-hD8DOggQ,29276
6
+ llama_index_llms_openai-0.5.4.dist-info/METADATA,sha256=5W94TJDzRjiBSdPQV2RZ57lu92hFhI-lRPnCRfrqSII,3038
7
+ llama_index_llms_openai-0.5.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
8
+ llama_index_llms_openai-0.5.4.dist-info/licenses/LICENSE,sha256=JPQLUZD9rKvCTdu192Nk0V5PAwklIg6jANii3UmTyMs,1065
9
+ llama_index_llms_openai-0.5.4.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- llama_index/llms/openai/__init__.py,sha256=8nmgixeXifQ4eVSgtCic54WxXqrrpXQPL4rhACWCSFs,229
2
- llama_index/llms/openai/base.py,sha256=9KA3DD9yvgwu0wOe2hIH5IaHWjsPpru3_z5j0Zh6WlU,42134
3
- llama_index/llms/openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- llama_index/llms/openai/responses.py,sha256=Tvlwhst3W_t7EQQCxtj8k-uW6iHmjQPMJ4dh23f-oWg,36182
5
- llama_index/llms/openai/utils.py,sha256=IdvbjJ2y5zWDkntoPgBZ2pGbcrYIbGbg1smoju2XkUI,29243
6
- llama_index_llms_openai-0.5.2.dist-info/METADATA,sha256=0VT5QHRZZ6iXJmunWjWzbPqMNmn-SGHjVuxXAKtGZzY,3038
7
- llama_index_llms_openai-0.5.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
8
- llama_index_llms_openai-0.5.2.dist-info/licenses/LICENSE,sha256=JPQLUZD9rKvCTdu192Nk0V5PAwklIg6jANii3UmTyMs,1065
9
- llama_index_llms_openai-0.5.2.dist-info/RECORD,,