llama-index-llms-openai 0.1.30__py3-none-any.whl → 0.1.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_index/llms/openai/base.py +12 -10
- {llama_index_llms_openai-0.1.30.dist-info → llama_index_llms_openai-0.1.31.dist-info}/METADATA +1 -1
- llama_index_llms_openai-0.1.31.dist-info/RECORD +6 -0
- llama_index_llms_openai-0.1.30.dist-info/RECORD +0 -6
- {llama_index_llms_openai-0.1.30.dist-info → llama_index_llms_openai-0.1.31.dist-info}/WHEEL +0 -0
llama_index/llms/openai/base.py
CHANGED
|
@@ -390,7 +390,13 @@ class OpenAI(FunctionCallingLLM):
|
|
|
390
390
|
base_kwargs["top_logprobs"] = self.top_logprobs
|
|
391
391
|
else:
|
|
392
392
|
base_kwargs["logprobs"] = self.top_logprobs # int in this case
|
|
393
|
-
|
|
393
|
+
|
|
394
|
+
# can't send stream_options to the API when not streaming
|
|
395
|
+
all_kwargs = {**base_kwargs, **self.additional_kwargs}
|
|
396
|
+
if "stream" not in all_kwargs and "stream_options" in all_kwargs:
|
|
397
|
+
del all_kwargs["stream_options"]
|
|
398
|
+
|
|
399
|
+
return all_kwargs
|
|
394
400
|
|
|
395
401
|
@llm_retry_decorator
|
|
396
402
|
def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
|
|
@@ -486,8 +492,7 @@ class OpenAI(FunctionCallingLLM):
|
|
|
486
492
|
is_function = False
|
|
487
493
|
for response in client.chat.completions.create(
|
|
488
494
|
messages=message_dicts,
|
|
489
|
-
stream=True,
|
|
490
|
-
**self._get_model_kwargs(**kwargs),
|
|
495
|
+
**self._get_model_kwargs(stream=True, **kwargs),
|
|
491
496
|
):
|
|
492
497
|
response = cast(ChatCompletionChunk, response)
|
|
493
498
|
if len(response.choices) > 0:
|
|
@@ -561,15 +566,14 @@ class OpenAI(FunctionCallingLLM):
|
|
|
561
566
|
@llm_retry_decorator
|
|
562
567
|
def _stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
|
|
563
568
|
client = self._get_client()
|
|
564
|
-
all_kwargs = self._get_model_kwargs(**kwargs)
|
|
569
|
+
all_kwargs = self._get_model_kwargs(stream=True, **kwargs)
|
|
565
570
|
self._update_max_tokens(all_kwargs, prompt)
|
|
566
571
|
|
|
567
572
|
def gen() -> CompletionResponseGen:
|
|
568
573
|
text = ""
|
|
569
574
|
for response in client.completions.create(
|
|
570
575
|
prompt=prompt,
|
|
571
|
-
|
|
572
|
-
**all_kwargs,
|
|
576
|
+
**kwargs,
|
|
573
577
|
):
|
|
574
578
|
if len(response.choices) > 0:
|
|
575
579
|
delta = response.choices[0].text
|
|
@@ -728,8 +732,7 @@ class OpenAI(FunctionCallingLLM):
|
|
|
728
732
|
first_chat_chunk = True
|
|
729
733
|
async for response in await aclient.chat.completions.create(
|
|
730
734
|
messages=message_dicts,
|
|
731
|
-
stream=True,
|
|
732
|
-
**self._get_model_kwargs(**kwargs),
|
|
735
|
+
**self._get_model_kwargs(stream=True, **kwargs),
|
|
733
736
|
):
|
|
734
737
|
response = cast(ChatCompletionChunk, response)
|
|
735
738
|
if len(response.choices) > 0:
|
|
@@ -815,14 +818,13 @@ class OpenAI(FunctionCallingLLM):
|
|
|
815
818
|
self, prompt: str, **kwargs: Any
|
|
816
819
|
) -> CompletionResponseAsyncGen:
|
|
817
820
|
aclient = self._get_aclient()
|
|
818
|
-
all_kwargs = self._get_model_kwargs(**kwargs)
|
|
821
|
+
all_kwargs = self._get_model_kwargs(stream=True, **kwargs)
|
|
819
822
|
self._update_max_tokens(all_kwargs, prompt)
|
|
820
823
|
|
|
821
824
|
async def gen() -> CompletionResponseAsyncGen:
|
|
822
825
|
text = ""
|
|
823
826
|
async for response in await aclient.completions.create(
|
|
824
827
|
prompt=prompt,
|
|
825
|
-
stream=True,
|
|
826
828
|
**all_kwargs,
|
|
827
829
|
):
|
|
828
830
|
if len(response.choices) > 0:
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
llama_index/llms/openai/__init__.py,sha256=vm3cIBSGkBFlE77GyfyN0EhpJcnJZN95QMhPN53EkbE,148
|
|
2
|
+
llama_index/llms/openai/base.py,sha256=lMDlAJQhPN6Fncn_Wm14Ie7O7kA2xUGyechjsh_8TJM,34027
|
|
3
|
+
llama_index/llms/openai/utils.py,sha256=VuDXkLR_BGVqoZc9IJqiJlVloZwG9Z7s1nGPAhlbvWE,13079
|
|
4
|
+
llama_index_llms_openai-0.1.31.dist-info/METADATA,sha256=JsOKGkOrC6gLiCfRlWLGTPjdm9ICqpMx_2qgIvafEKo,650
|
|
5
|
+
llama_index_llms_openai-0.1.31.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
6
|
+
llama_index_llms_openai-0.1.31.dist-info/RECORD,,
|
|
@@ -1,6 +0,0 @@
|
|
|
1
|
-
llama_index/llms/openai/__init__.py,sha256=vm3cIBSGkBFlE77GyfyN0EhpJcnJZN95QMhPN53EkbE,148
|
|
2
|
-
llama_index/llms/openai/base.py,sha256=j-BiZ9E3pFG2RDSP8Vq75SHfZCId9XU5ymAddBF8x8I,33876
|
|
3
|
-
llama_index/llms/openai/utils.py,sha256=VuDXkLR_BGVqoZc9IJqiJlVloZwG9Z7s1nGPAhlbvWE,13079
|
|
4
|
-
llama_index_llms_openai-0.1.30.dist-info/METADATA,sha256=VnG5MXkR-ZUoxou8H9tUsy1uY8zGoC578GUG0Dx5Eoc,650
|
|
5
|
-
llama_index_llms_openai-0.1.30.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
6
|
-
llama_index_llms_openai-0.1.30.dist-info/RECORD,,
|
|
File without changes
|