llama-index-llms-openai 0.2.11__tar.gz → 0.2.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,149 @@
1
+ Metadata-Version: 2.1
2
+ Name: llama-index-llms-openai
3
+ Version: 0.2.13
4
+ Summary: llama-index llms openai integration
5
+ License: MIT
6
+ Author: llama-index
7
+ Requires-Python: >=3.8.1,<4.0
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Programming Language :: Python :: 3.9
11
+ Classifier: Programming Language :: Python :: 3.10
12
+ Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: Programming Language :: Python :: 3.12
14
+ Requires-Dist: llama-index-core (>=0.11.7,<0.12.0)
15
+ Requires-Dist: openai (>=1.40.0,<2.0.0)
16
+ Description-Content-Type: text/markdown
17
+
18
+ # LlamaIndex Llms Integration: Openai
19
+
20
+ ## Installation
21
+
22
+ To install the required package, run:
23
+
24
+ ```bash
25
+ %pip install llama-index-llms-openai
26
+ ```
27
+
28
+ ## Setup
29
+
30
+ 1. Set your OpenAI API key as an environment variable. You can replace `"sk-..."` with your actual API key:
31
+
32
+ ```python
33
+ import os
34
+
35
+ os.environ["OPENAI_API_KEY"] = "sk-..."
36
+ ```
37
+
38
+ ## Basic Usage
39
+
40
+ ### Generate Completions
41
+
42
+ To generate a completion for a prompt, use the `complete` method:
43
+
44
+ ```python
45
+ from llama_index.llms.openai import OpenAI
46
+
47
+ resp = OpenAI().complete("Paul Graham is ")
48
+ print(resp)
49
+ ```
50
+
51
+ ### Chat Responses
52
+
53
+ To send a chat message and receive a response, create a list of `ChatMessage` instances and use the `chat` method:
54
+
55
+ ```python
56
+ from llama_index.core.llms import ChatMessage
57
+
58
+ messages = [
59
+ ChatMessage(
60
+ role="system", content="You are a pirate with a colorful personality."
61
+ ),
62
+ ChatMessage(role="user", content="What is your name?"),
63
+ ]
64
+ resp = OpenAI().chat(messages)
65
+ print(resp)
66
+ ```
67
+
68
+ ## Streaming Responses
69
+
70
+ ### Stream Complete
71
+
72
+ To stream responses for a prompt, use the `stream_complete` method:
73
+
74
+ ```python
75
+ from llama_index.llms.openai import OpenAI
76
+
77
+ llm = OpenAI()
78
+ resp = llm.stream_complete("Paul Graham is ")
79
+ for r in resp:
80
+ print(r.delta, end="")
81
+ ```
82
+
83
+ ### Stream Chat
84
+
85
+ To stream chat responses, use the `stream_chat` method:
86
+
87
+ ```python
88
+ from llama_index.llms.openai import OpenAI
89
+ from llama_index.core.llms import ChatMessage
90
+
91
+ llm = OpenAI()
92
+ messages = [
93
+ ChatMessage(
94
+ role="system", content="You are a pirate with a colorful personality."
95
+ ),
96
+ ChatMessage(role="user", content="What is your name?"),
97
+ ]
98
+ resp = llm.stream_chat(messages)
99
+ for r in resp:
100
+ print(r.delta, end="")
101
+ ```
102
+
103
+ ## Configure Model
104
+
105
+ You can specify a particular model when creating the `OpenAI` instance:
106
+
107
+ ```python
108
+ llm = OpenAI(model="gpt-3.5-turbo")
109
+ resp = llm.complete("Paul Graham is ")
110
+ print(resp)
111
+
112
+ messages = [
113
+ ChatMessage(
114
+ role="system", content="You are a pirate with a colorful personality."
115
+ ),
116
+ ChatMessage(role="user", content="What is your name?"),
117
+ ]
118
+ resp = llm.chat(messages)
119
+ print(resp)
120
+ ```
121
+
122
+ ## Asynchronous Usage
123
+
124
+ You can also use asynchronous methods for completion:
125
+
126
+ ```python
127
+ from llama_index.llms.openai import OpenAI
128
+
129
+ llm = OpenAI(model="gpt-3.5-turbo")
130
+ resp = await llm.acomplete("Paul Graham is ")
131
+ print(resp)
132
+ ```
133
+
134
+ ## Set API Key at a Per-Instance Level
135
+
136
+ If desired, you can have separate LLM instances use different API keys:
137
+
138
+ ```python
139
+ from llama_index.llms.openai import OpenAI
140
+
141
+ llm = OpenAI(model="gpt-3.5-turbo", api_key="BAD_KEY")
142
+ resp = OpenAI().complete("Paul Graham is ")
143
+ print(resp)
144
+ ```
145
+
146
+ ### LLM Implementation example
147
+
148
+ https://docs.llamaindex.ai/en/stable/examples/llm/openai/
149
+
@@ -0,0 +1,131 @@
1
+ # LlamaIndex Llms Integration: Openai
2
+
3
+ ## Installation
4
+
5
+ To install the required package, run:
6
+
7
+ ```bash
8
+ %pip install llama-index-llms-openai
9
+ ```
10
+
11
+ ## Setup
12
+
13
+ 1. Set your OpenAI API key as an environment variable. You can replace `"sk-..."` with your actual API key:
14
+
15
+ ```python
16
+ import os
17
+
18
+ os.environ["OPENAI_API_KEY"] = "sk-..."
19
+ ```
20
+
21
+ ## Basic Usage
22
+
23
+ ### Generate Completions
24
+
25
+ To generate a completion for a prompt, use the `complete` method:
26
+
27
+ ```python
28
+ from llama_index.llms.openai import OpenAI
29
+
30
+ resp = OpenAI().complete("Paul Graham is ")
31
+ print(resp)
32
+ ```
33
+
34
+ ### Chat Responses
35
+
36
+ To send a chat message and receive a response, create a list of `ChatMessage` instances and use the `chat` method:
37
+
38
+ ```python
39
+ from llama_index.core.llms import ChatMessage
40
+
41
+ messages = [
42
+ ChatMessage(
43
+ role="system", content="You are a pirate with a colorful personality."
44
+ ),
45
+ ChatMessage(role="user", content="What is your name?"),
46
+ ]
47
+ resp = OpenAI().chat(messages)
48
+ print(resp)
49
+ ```
50
+
51
+ ## Streaming Responses
52
+
53
+ ### Stream Complete
54
+
55
+ To stream responses for a prompt, use the `stream_complete` method:
56
+
57
+ ```python
58
+ from llama_index.llms.openai import OpenAI
59
+
60
+ llm = OpenAI()
61
+ resp = llm.stream_complete("Paul Graham is ")
62
+ for r in resp:
63
+ print(r.delta, end="")
64
+ ```
65
+
66
+ ### Stream Chat
67
+
68
+ To stream chat responses, use the `stream_chat` method:
69
+
70
+ ```python
71
+ from llama_index.llms.openai import OpenAI
72
+ from llama_index.core.llms import ChatMessage
73
+
74
+ llm = OpenAI()
75
+ messages = [
76
+ ChatMessage(
77
+ role="system", content="You are a pirate with a colorful personality."
78
+ ),
79
+ ChatMessage(role="user", content="What is your name?"),
80
+ ]
81
+ resp = llm.stream_chat(messages)
82
+ for r in resp:
83
+ print(r.delta, end="")
84
+ ```
85
+
86
+ ## Configure Model
87
+
88
+ You can specify a particular model when creating the `OpenAI` instance:
89
+
90
+ ```python
91
+ llm = OpenAI(model="gpt-3.5-turbo")
92
+ resp = llm.complete("Paul Graham is ")
93
+ print(resp)
94
+
95
+ messages = [
96
+ ChatMessage(
97
+ role="system", content="You are a pirate with a colorful personality."
98
+ ),
99
+ ChatMessage(role="user", content="What is your name?"),
100
+ ]
101
+ resp = llm.chat(messages)
102
+ print(resp)
103
+ ```
104
+
105
+ ## Asynchronous Usage
106
+
107
+ You can also use asynchronous methods for completion:
108
+
109
+ ```python
110
+ from llama_index.llms.openai import OpenAI
111
+
112
+ llm = OpenAI(model="gpt-3.5-turbo")
113
+ resp = await llm.acomplete("Paul Graham is ")
114
+ print(resp)
115
+ ```
116
+
117
+ ## Set API Key at a Per-Instance Level
118
+
119
+ If desired, you can have separate LLM instances use different API keys:
120
+
121
+ ```python
122
+ from llama_index.llms.openai import OpenAI
123
+
124
+ llm = OpenAI(model="gpt-3.5-turbo", api_key="BAD_KEY")
125
+ resp = OpenAI().complete("Paul Graham is ")
126
+ print(resp)
127
+ ```
128
+
129
+ ### LLM Implementation example
130
+
131
+ https://docs.llamaindex.ai/en/stable/examples/llm/openai/
@@ -488,7 +488,8 @@ class OpenAI(FunctionCallingLLM):
488
488
  additional_kwargs = {}
489
489
  if is_function:
490
490
  tool_calls = update_tool_calls(tool_calls, delta.tool_calls)
491
- additional_kwargs["tool_calls"] = tool_calls
491
+ if tool_calls:
492
+ additional_kwargs["tool_calls"] = tool_calls
492
493
 
493
494
  yield ChatResponse(
494
495
  message=ChatMessage(
@@ -738,7 +739,8 @@ class OpenAI(FunctionCallingLLM):
738
739
  additional_kwargs = {}
739
740
  if is_function:
740
741
  tool_calls = update_tool_calls(tool_calls, delta.tool_calls)
741
- additional_kwargs["tool_calls"] = tool_calls
742
+ if tool_calls:
743
+ additional_kwargs["tool_calls"] = tool_calls
742
744
 
743
745
  yield ChatResponse(
744
746
  message=ChatMessage(
@@ -54,6 +54,8 @@ GPT4_MODELS: Dict[str, int] = {
54
54
  "gpt-4o": 128000,
55
55
  "gpt-4o-2024-05-13": 128000,
56
56
  "gpt-4o-2024-08-06": 128000,
57
+ # Intended for research and evaluation
58
+ "chatgpt-4o-latest": 128000,
57
59
  "gpt-4o-mini": 128000,
58
60
  "gpt-4o-mini-2024-07-18": 128000,
59
61
  # 0613 models (function calling):
@@ -285,7 +287,7 @@ def from_openai_message(openai_message: ChatCompletionMessage) -> ChatMessage:
285
287
  # function_call = None # deprecated in OpenAI v 1.1.0
286
288
 
287
289
  additional_kwargs: Dict[str, Any] = {}
288
- if openai_message.tool_calls is not None:
290
+ if openai_message.tool_calls:
289
291
  tool_calls: List[ChatCompletionMessageToolCall] = openai_message.tool_calls
290
292
  additional_kwargs.update(tool_calls=tool_calls)
291
293
 
@@ -29,7 +29,7 @@ exclude = ["**/BUILD"]
29
29
  license = "MIT"
30
30
  name = "llama-index-llms-openai"
31
31
  readme = "README.md"
32
- version = "0.2.11"
32
+ version = "0.2.13"
33
33
 
34
34
  [tool.poetry.dependencies]
35
35
  python = ">=3.8.1,<4.0"
@@ -1,19 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: llama-index-llms-openai
3
- Version: 0.2.11
4
- Summary: llama-index llms openai integration
5
- License: MIT
6
- Author: llama-index
7
- Requires-Python: >=3.8.1,<4.0
8
- Classifier: License :: OSI Approved :: MIT License
9
- Classifier: Programming Language :: Python :: 3
10
- Classifier: Programming Language :: Python :: 3.9
11
- Classifier: Programming Language :: Python :: 3.10
12
- Classifier: Programming Language :: Python :: 3.11
13
- Classifier: Programming Language :: Python :: 3.12
14
- Requires-Dist: llama-index-core (>=0.11.7,<0.12.0)
15
- Requires-Dist: openai (>=1.40.0,<2.0.0)
16
- Description-Content-Type: text/markdown
17
-
18
- # LlamaIndex Llms Integration: Openai
19
-
@@ -1 +0,0 @@
1
- # LlamaIndex Llms Integration: Openai