llama-index-llms-openai 0.1.30__tar.gz → 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llama_index_llms_openai-0.1.30 → llama_index_llms_openai-0.2.0}/PKG-INFO +2 -2
- {llama_index_llms_openai-0.1.30 → llama_index_llms_openai-0.2.0}/llama_index/llms/openai/base.py +15 -12
- {llama_index_llms_openai-0.1.30 → llama_index_llms_openai-0.2.0}/pyproject.toml +2 -2
- {llama_index_llms_openai-0.1.30 → llama_index_llms_openai-0.2.0}/README.md +0 -0
- {llama_index_llms_openai-0.1.30 → llama_index_llms_openai-0.2.0}/llama_index/llms/openai/__init__.py +0 -0
- {llama_index_llms_openai-0.1.30 → llama_index_llms_openai-0.2.0}/llama_index/llms/openai/utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: llama-index-llms-openai
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.2.0
|
|
4
4
|
Summary: llama-index llms openai integration
|
|
5
5
|
License: MIT
|
|
6
6
|
Author: llama-index
|
|
@@ -11,7 +11,7 @@ Classifier: Programming Language :: Python :: 3.9
|
|
|
11
11
|
Classifier: Programming Language :: Python :: 3.10
|
|
12
12
|
Classifier: Programming Language :: Python :: 3.11
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.12
|
|
14
|
-
Requires-Dist: llama-index-core (>=0.
|
|
14
|
+
Requires-Dist: llama-index-core (>=0.11.0,<0.12.0)
|
|
15
15
|
Requires-Dist: openai (>=1.40.0,<2.0.0)
|
|
16
16
|
Description-Content-Type: text/markdown
|
|
17
17
|
|
{llama_index_llms_openai-0.1.30 → llama_index_llms_openai-0.2.0}/llama_index/llms/openai/base.py
RENAMED
|
@@ -166,7 +166,8 @@ class OpenAI(FunctionCallingLLM):
|
|
|
166
166
|
gt=0,
|
|
167
167
|
)
|
|
168
168
|
logprobs: Optional[bool] = Field(
|
|
169
|
-
description="Whether to return logprobs per token."
|
|
169
|
+
description="Whether to return logprobs per token.",
|
|
170
|
+
default=None,
|
|
170
171
|
)
|
|
171
172
|
top_logprobs: int = Field(
|
|
172
173
|
description="The number of top token log probs to return.",
|
|
@@ -187,7 +188,7 @@ class OpenAI(FunctionCallingLLM):
|
|
|
187
188
|
description="The timeout, in seconds, for API requests.",
|
|
188
189
|
gte=0,
|
|
189
190
|
)
|
|
190
|
-
default_headers: Dict[str, str] = Field(
|
|
191
|
+
default_headers: Optional[Dict[str, str]] = Field(
|
|
191
192
|
default=None, description="The default headers for API requests."
|
|
192
193
|
)
|
|
193
194
|
reuse_client: bool = Field(
|
|
@@ -390,7 +391,13 @@ class OpenAI(FunctionCallingLLM):
|
|
|
390
391
|
base_kwargs["top_logprobs"] = self.top_logprobs
|
|
391
392
|
else:
|
|
392
393
|
base_kwargs["logprobs"] = self.top_logprobs # int in this case
|
|
393
|
-
|
|
394
|
+
|
|
395
|
+
# can't send stream_options to the API when not streaming
|
|
396
|
+
all_kwargs = {**base_kwargs, **self.additional_kwargs}
|
|
397
|
+
if "stream" not in all_kwargs and "stream_options" in all_kwargs:
|
|
398
|
+
del all_kwargs["stream_options"]
|
|
399
|
+
|
|
400
|
+
return all_kwargs
|
|
394
401
|
|
|
395
402
|
@llm_retry_decorator
|
|
396
403
|
def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
|
|
@@ -486,8 +493,7 @@ class OpenAI(FunctionCallingLLM):
|
|
|
486
493
|
is_function = False
|
|
487
494
|
for response in client.chat.completions.create(
|
|
488
495
|
messages=message_dicts,
|
|
489
|
-
stream=True,
|
|
490
|
-
**self._get_model_kwargs(**kwargs),
|
|
496
|
+
**self._get_model_kwargs(stream=True, **kwargs),
|
|
491
497
|
):
|
|
492
498
|
response = cast(ChatCompletionChunk, response)
|
|
493
499
|
if len(response.choices) > 0:
|
|
@@ -561,15 +567,14 @@ class OpenAI(FunctionCallingLLM):
|
|
|
561
567
|
@llm_retry_decorator
|
|
562
568
|
def _stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
|
|
563
569
|
client = self._get_client()
|
|
564
|
-
all_kwargs = self._get_model_kwargs(**kwargs)
|
|
570
|
+
all_kwargs = self._get_model_kwargs(stream=True, **kwargs)
|
|
565
571
|
self._update_max_tokens(all_kwargs, prompt)
|
|
566
572
|
|
|
567
573
|
def gen() -> CompletionResponseGen:
|
|
568
574
|
text = ""
|
|
569
575
|
for response in client.completions.create(
|
|
570
576
|
prompt=prompt,
|
|
571
|
-
|
|
572
|
-
**all_kwargs,
|
|
577
|
+
**kwargs,
|
|
573
578
|
):
|
|
574
579
|
if len(response.choices) > 0:
|
|
575
580
|
delta = response.choices[0].text
|
|
@@ -728,8 +733,7 @@ class OpenAI(FunctionCallingLLM):
|
|
|
728
733
|
first_chat_chunk = True
|
|
729
734
|
async for response in await aclient.chat.completions.create(
|
|
730
735
|
messages=message_dicts,
|
|
731
|
-
stream=True,
|
|
732
|
-
**self._get_model_kwargs(**kwargs),
|
|
736
|
+
**self._get_model_kwargs(stream=True, **kwargs),
|
|
733
737
|
):
|
|
734
738
|
response = cast(ChatCompletionChunk, response)
|
|
735
739
|
if len(response.choices) > 0:
|
|
@@ -815,14 +819,13 @@ class OpenAI(FunctionCallingLLM):
|
|
|
815
819
|
self, prompt: str, **kwargs: Any
|
|
816
820
|
) -> CompletionResponseAsyncGen:
|
|
817
821
|
aclient = self._get_aclient()
|
|
818
|
-
all_kwargs = self._get_model_kwargs(**kwargs)
|
|
822
|
+
all_kwargs = self._get_model_kwargs(stream=True, **kwargs)
|
|
819
823
|
self._update_max_tokens(all_kwargs, prompt)
|
|
820
824
|
|
|
821
825
|
async def gen() -> CompletionResponseAsyncGen:
|
|
822
826
|
text = ""
|
|
823
827
|
async for response in await aclient.completions.create(
|
|
824
828
|
prompt=prompt,
|
|
825
|
-
stream=True,
|
|
826
829
|
**all_kwargs,
|
|
827
830
|
):
|
|
828
831
|
if len(response.choices) > 0:
|
|
@@ -29,12 +29,12 @@ exclude = ["**/BUILD"]
|
|
|
29
29
|
license = "MIT"
|
|
30
30
|
name = "llama-index-llms-openai"
|
|
31
31
|
readme = "README.md"
|
|
32
|
-
version = "0.
|
|
32
|
+
version = "0.2.0"
|
|
33
33
|
|
|
34
34
|
[tool.poetry.dependencies]
|
|
35
35
|
python = ">=3.8.1,<4.0"
|
|
36
|
-
llama-index-core = "^0.10.57"
|
|
37
36
|
openai = "^1.40.0"
|
|
37
|
+
llama-index-core = "^0.11.0"
|
|
38
38
|
|
|
39
39
|
[tool.poetry.group.dev.dependencies]
|
|
40
40
|
ipython = "8.10.0"
|
|
File without changes
|
{llama_index_llms_openai-0.1.30 → llama_index_llms_openai-0.2.0}/llama_index/llms/openai/__init__.py
RENAMED
|
File without changes
|
{llama_index_llms_openai-0.1.30 → llama_index_llms_openai-0.2.0}/llama_index/llms/openai/utils.py
RENAMED
|
File without changes
|