ruby-openai 5.2.0 → 6.3.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +38 -0
- data/Gemfile +2 -2
- data/Gemfile.lock +13 -11
- data/README.md +127 -33
- data/lib/openai/assistants.rb +27 -0
- data/lib/openai/audio.rb +4 -0
- data/lib/openai/client.rb +29 -6
- data/lib/openai/compatibility.rb +1 -0
- data/lib/openai/finetunes.rb +6 -14
- data/lib/openai/http.rb +57 -57
- data/lib/openai/http_headers.rb +36 -0
- data/lib/openai/messages.rb +23 -0
- data/lib/openai/run_steps.rb +15 -0
- data/lib/openai/runs.rb +32 -0
- data/lib/openai/threads.rb +27 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +22 -1
- data/ruby-openai.gemspec +1 -1
- metadata +10 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 2e2bdf19ad32b8eb492faca3e937614c30ab57817c374797362ca27ffff1cf7e
|
4
|
+
data.tar.gz: b3d31aaa13bec5bdeb08718c04afad99b528e9e5c1bfbd279a8111e4fa12739c
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: bf6f46dbb52890ff1468d727711681ad95bb82e26b77f092cf6a81be25dbdb7ef3b3a58626090160bfc927ec3585723c17a9c47005ff64b035a3af85ba887e51
|
7
|
+
data.tar.gz: cbe3a5d6c57757beee533c3b9c05aa43fb343f7da542af4fc58bf70223f84cc674900d032e824f1c34e19ef17a2ba0d366fe9353dae67db162ce435ef2f1a496
|
data/CHANGELOG.md
CHANGED
@@ -5,6 +5,44 @@ All notable changes to this project will be documented in this file.
|
|
5
5
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
7
7
|
|
8
|
+
## [6.3.0] - 2023-11-26
|
9
|
+
|
10
|
+
### Added
|
11
|
+
|
12
|
+
- Add ability to pass [Faraday middleware](https://lostisland.github.io/faraday/#/middleware/index) to the client in a block, eg. to enable verbose logging - shout out to [@obie](https://github.com/obie) for pushing for this.
|
13
|
+
- Add better error logging to the client by default.
|
14
|
+
- Bump Event Source to v1, thank you [@atesgoral](https://github.com/atesgoral) @ Shopify!
|
15
|
+
|
16
|
+
## [6.2.0] - 2023-11-15
|
17
|
+
|
18
|
+
### Added
|
19
|
+
|
20
|
+
- Add text-to-speech! Thank you [@codergeek121](https://github.com/codergeek121)
|
21
|
+
|
22
|
+
## [6.1.0] - 2023-11-14
|
23
|
+
|
24
|
+
### Added
|
25
|
+
|
26
|
+
- Add support for Assistants, Threads, Messages and Runs. Thank you [@Haegin](https://github.com/Haegin) for the excellent work on this PR, and many reviewers for their contributions!
|
27
|
+
|
28
|
+
## [6.0.1] - 2023-11-07
|
29
|
+
|
30
|
+
### Fix
|
31
|
+
|
32
|
+
- Gracefully handle the case where an HTTP error response may not have valid JSON in its body. Thank you [@atesgoral](https://github.com/atesgoral)!
|
33
|
+
|
34
|
+
## [6.0.0] - 2023-11-06
|
35
|
+
|
36
|
+
### Added
|
37
|
+
|
38
|
+
- [BREAKING] HTTP errors will now be raised by ruby-openai as Faraday:Errors, including when streaming! Implemented by [@atesgoral](https://github.com/atesgoral)
|
39
|
+
- [BREAKING] Switch from legacy Finetunes to the new Fine-tune-jobs endpoints. Implemented by [@lancecarlson](https://github.com/lancecarlson)
|
40
|
+
- [BREAKING] Remove deprecated Completions endpoints - use Chat instead.
|
41
|
+
|
42
|
+
### Fix
|
43
|
+
|
44
|
+
- [BREAKING] Fix issue where :stream parameters were replaced by a boolean in the client application. Thanks to [@martinjaimem](https://github.com/martinjaimem), [@vickymadrid03](https://github.com/vickymadrid03) and [@nicastelo](https://github.com/nicastelo) for spotting and fixing this issue.
|
45
|
+
|
8
46
|
## [5.2.0] - 2023-10-30
|
9
47
|
|
10
48
|
### Fix
|
data/Gemfile
CHANGED
data/Gemfile.lock
CHANGED
@@ -1,24 +1,26 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
ruby-openai (
|
5
|
-
event_stream_parser (>= 0.3.0, <
|
4
|
+
ruby-openai (6.3.0)
|
5
|
+
event_stream_parser (>= 0.3.0, < 2.0.0)
|
6
6
|
faraday (>= 1)
|
7
7
|
faraday-multipart (>= 1)
|
8
8
|
|
9
9
|
GEM
|
10
10
|
remote: https://rubygems.org/
|
11
11
|
specs:
|
12
|
-
addressable (2.8.
|
12
|
+
addressable (2.8.5)
|
13
13
|
public_suffix (>= 2.0.2, < 6.0)
|
14
14
|
ast (2.4.2)
|
15
|
+
base64 (0.1.1)
|
15
16
|
byebug (11.1.3)
|
16
17
|
crack (0.4.5)
|
17
18
|
rexml
|
18
19
|
diff-lcs (1.5.0)
|
19
20
|
dotenv (2.8.1)
|
20
|
-
event_stream_parser (0.
|
21
|
-
faraday (2.7.
|
21
|
+
event_stream_parser (1.0.0)
|
22
|
+
faraday (2.7.11)
|
23
|
+
base64
|
22
24
|
faraday-net_http (>= 2.0, < 3.1)
|
23
25
|
ruby2_keywords (>= 0.0.4)
|
24
26
|
faraday-multipart (1.0.4)
|
@@ -30,11 +32,11 @@ GEM
|
|
30
32
|
parallel (1.22.1)
|
31
33
|
parser (3.2.2.0)
|
32
34
|
ast (~> 2.4.1)
|
33
|
-
public_suffix (5.0.
|
35
|
+
public_suffix (5.0.3)
|
34
36
|
rainbow (3.1.1)
|
35
|
-
rake (13.0
|
37
|
+
rake (13.1.0)
|
36
38
|
regexp_parser (2.8.0)
|
37
|
-
rexml (3.2.
|
39
|
+
rexml (3.2.6)
|
38
40
|
rspec (3.12.0)
|
39
41
|
rspec-core (~> 3.12.0)
|
40
42
|
rspec-expectations (~> 3.12.0)
|
@@ -64,7 +66,7 @@ GEM
|
|
64
66
|
ruby2_keywords (0.0.5)
|
65
67
|
unicode-display_width (2.4.2)
|
66
68
|
vcr (6.1.0)
|
67
|
-
webmock (3.
|
69
|
+
webmock (3.19.1)
|
68
70
|
addressable (>= 2.8.0)
|
69
71
|
crack (>= 0.3.2)
|
70
72
|
hashdiff (>= 0.4.0, < 2.0.0)
|
@@ -75,12 +77,12 @@ PLATFORMS
|
|
75
77
|
DEPENDENCIES
|
76
78
|
byebug (~> 11.1.3)
|
77
79
|
dotenv (~> 2.8.1)
|
78
|
-
rake (~> 13.
|
80
|
+
rake (~> 13.1)
|
79
81
|
rspec (~> 3.12)
|
80
82
|
rubocop (~> 1.50.2)
|
81
83
|
ruby-openai!
|
82
84
|
vcr (~> 6.1.0)
|
83
|
-
webmock (~> 3.
|
85
|
+
webmock (~> 3.19.1)
|
84
86
|
|
85
87
|
BUNDLED WITH
|
86
88
|
2.4.5
|
data/README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
# Ruby OpenAI
|
2
2
|
|
3
|
-
[![Gem Version](https://
|
3
|
+
[![Gem Version](https://img.shields.io/gem/v/ruby-openai.svg)](https://rubygems.org/gems/ruby-openai)
|
4
4
|
[![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/alexrudall/ruby-openai/blob/main/LICENSE.txt)
|
5
5
|
[![CircleCI Build Status](https://circleci.com/gh/alexrudall/ruby-openai.svg?style=shield)](https://circleci.com/gh/alexrudall/ruby-openai)
|
6
6
|
|
@@ -8,9 +8,7 @@ Use the [OpenAI API](https://openai.com/blog/openai-api/) with Ruby! 🤖❤️
|
|
8
8
|
|
9
9
|
Stream text with GPT-4, transcribe and translate audio with Whisper, or create images with DALL·E...
|
10
10
|
|
11
|
-
🚢
|
12
|
-
|
13
|
-
[🎮 Ruby AI Builders Discord](https://discord.gg/k4Uc224xVD) | [🐦 Twitter](https://twitter.com/alexrudall) | [🧠 Anthropic Gem](https://github.com/alexrudall/anthropic) | [🚂 Midjourney Gem](https://github.com/alexrudall/midjourney)
|
11
|
+
[🚢 Hire me](https://peaceterms.com?utm_source=ruby-openai&utm_medium=readme&utm_id=26072023) | [🎮 Ruby AI Builders Discord](https://discord.gg/k4Uc224xVD) | [🐦 Twitter](https://twitter.com/alexrudall) | [🧠 Anthropic Gem](https://github.com/alexrudall/anthropic) | [🚂 Midjourney Gem](https://github.com/alexrudall/midjourney)
|
14
12
|
|
15
13
|
### Bundler
|
16
14
|
|
@@ -110,6 +108,16 @@ OpenAI.configure do |config|
|
|
110
108
|
end
|
111
109
|
```
|
112
110
|
|
111
|
+
#### Verbose Logging
|
112
|
+
|
113
|
+
You can pass [Faraday middleware](https://lostisland.github.io/faraday/#/middleware/index) to the client in a block, eg. to enable verbose logging with Ruby's [Logger](https://ruby-doc.org/3.2.2/stdlibs/logger/Logger.html):
|
114
|
+
|
115
|
+
```ruby
|
116
|
+
client = OpenAI::Client.new do |f|
|
117
|
+
f.response :logger, Logger.new($stdout), bodies: true
|
118
|
+
end
|
119
|
+
```
|
120
|
+
|
113
121
|
#### Azure
|
114
122
|
|
115
123
|
To use the [Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/) API, you can configure the gem like this:
|
@@ -149,7 +157,7 @@ client.models.retrieve(id: "text-ada-001")
|
|
149
157
|
#### Examples
|
150
158
|
|
151
159
|
- [GPT-4 (limited beta)](https://platform.openai.com/docs/models/gpt-4)
|
152
|
-
- gpt-4
|
160
|
+
- gpt-4 (uses current version)
|
153
161
|
- gpt-4-0314
|
154
162
|
- gpt-4-32k
|
155
163
|
- [GPT-3.5](https://platform.openai.com/docs/models/gpt-3-5)
|
@@ -161,9 +169,9 @@ client.models.retrieve(id: "text-ada-001")
|
|
161
169
|
- text-babbage-001
|
162
170
|
- text-curie-001
|
163
171
|
|
164
|
-
###
|
172
|
+
### Chat
|
165
173
|
|
166
|
-
|
174
|
+
GPT is a model that can be used to generate text in a conversational style. You can use it to [generate a response](https://platform.openai.com/docs/api-reference/chat/create) to a sequence of [messages](https://platform.openai.com/docs/guides/chat/introduction):
|
167
175
|
|
168
176
|
```ruby
|
169
177
|
response = client.chat(
|
@@ -176,11 +184,11 @@ puts response.dig("choices", 0, "message", "content")
|
|
176
184
|
# => "Hello! How may I assist you today?"
|
177
185
|
```
|
178
186
|
|
179
|
-
|
187
|
+
#### Streaming Chat
|
180
188
|
|
181
|
-
[Quick guide to streaming
|
189
|
+
[Quick guide to streaming Chat with Rails 7 and Hotwire](https://gist.github.com/alexrudall/cb5ee1e109353ef358adb4e66631799d)
|
182
190
|
|
183
|
-
You can stream from the API in realtime, which can be much faster and used to create a more engaging user experience. Pass a [Proc](https://ruby-doc.org/core-2.6/Proc.html) (or any object with a `#call` method) to the `stream` parameter to receive the stream of
|
191
|
+
You can stream from the API in realtime, which can be much faster and used to create a more engaging user experience. Pass a [Proc](https://ruby-doc.org/core-2.6/Proc.html) (or any object with a `#call` method) to the `stream` parameter to receive the stream of completion chunks as they are generated. Each time one or more chunks is received, the proc will be called once with each chunk, parsed as a Hash. If OpenAI returns an error, `ruby-openai` will raise a Faraday error.
|
184
192
|
|
185
193
|
```ruby
|
186
194
|
client.chat(
|
@@ -195,7 +203,80 @@ client.chat(
|
|
195
203
|
# => "Anna is a young woman in her mid-twenties, with wavy chestnut hair that falls to her shoulders..."
|
196
204
|
```
|
197
205
|
|
198
|
-
Note:
|
206
|
+
Note: OpenAPI currently does not report token usage for streaming responses. To count tokens while streaming, try `OpenAI.rough_token_count` or [tiktoken_ruby](https://github.com/IAPark/tiktoken_ruby). We think that each call to the stream proc corresponds to a single token, so you can also try counting the number of calls to the proc to get the completion token count.
|
207
|
+
|
208
|
+
#### Vision
|
209
|
+
|
210
|
+
You can use the GPT-4 Vision model to generate a description of an image:
|
211
|
+
|
212
|
+
```ruby
|
213
|
+
messages = [
|
214
|
+
{ "type": "text", "text": "What’s in this image?"},
|
215
|
+
{ "type": "image_url",
|
216
|
+
"image_url": {
|
217
|
+
"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
|
218
|
+
},
|
219
|
+
}
|
220
|
+
]
|
221
|
+
response = client.chat(
|
222
|
+
parameters: {
|
223
|
+
model: "gpt-4-vision-preview", # Required.
|
224
|
+
messages: [{ role: "user", content: messages}], # Required.
|
225
|
+
})
|
226
|
+
puts response.dig("choices", 0, "message", "content")
|
227
|
+
# => "The image depicts a serene natural landscape featuring a long wooden boardwalk extending straight ahead"
|
228
|
+
```
|
229
|
+
|
230
|
+
#### JSON Mode
|
231
|
+
|
232
|
+
You can set the response_format to ask for responses in JSON (at least for `gpt-3.5-turbo-1106`):
|
233
|
+
|
234
|
+
```ruby
|
235
|
+
response = client.chat(
|
236
|
+
parameters: {
|
237
|
+
model: "gpt-3.5-turbo-1106",
|
238
|
+
response_format: { type: "json_object" },
|
239
|
+
messages: [{ role: "user", content: "Hello! Give me some JSON please."}],
|
240
|
+
temperature: 0.7,
|
241
|
+
})
|
242
|
+
puts response.dig("choices", 0, "message", "content")
|
243
|
+
{
|
244
|
+
"name": "John",
|
245
|
+
"age": 30,
|
246
|
+
"city": "New York",
|
247
|
+
"hobbies": ["reading", "traveling", "hiking"],
|
248
|
+
"isStudent": false
|
249
|
+
}
|
250
|
+
```
|
251
|
+
|
252
|
+
You can stream it as well!
|
253
|
+
|
254
|
+
```ruby
|
255
|
+
response = client.chat(
|
256
|
+
parameters: {
|
257
|
+
model: "gpt-3.5-turbo-1106",
|
258
|
+
messages: [{ role: "user", content: "Can I have some JSON please?"}],
|
259
|
+
response_format: { type: "json_object" },
|
260
|
+
stream: proc do |chunk, _bytesize|
|
261
|
+
print chunk.dig("choices", 0, "delta", "content")
|
262
|
+
end
|
263
|
+
})
|
264
|
+
{
|
265
|
+
"message": "Sure, please let me know what specific JSON data you are looking for.",
|
266
|
+
"JSON_data": {
|
267
|
+
"example_1": {
|
268
|
+
"key_1": "value_1",
|
269
|
+
"key_2": "value_2",
|
270
|
+
"key_3": "value_3"
|
271
|
+
},
|
272
|
+
"example_2": {
|
273
|
+
"key_4": "value_4",
|
274
|
+
"key_5": "value_5",
|
275
|
+
"key_6": "value_6"
|
276
|
+
}
|
277
|
+
}
|
278
|
+
}
|
279
|
+
```
|
199
280
|
|
200
281
|
### Functions
|
201
282
|
|
@@ -257,21 +338,6 @@ end
|
|
257
338
|
# => "The weather is nice 🌞"
|
258
339
|
```
|
259
340
|
|
260
|
-
### Completions
|
261
|
-
|
262
|
-
Hit the OpenAI API for a completion using other GPT-3 models:
|
263
|
-
|
264
|
-
```ruby
|
265
|
-
response = client.completions(
|
266
|
-
parameters: {
|
267
|
-
model: "text-davinci-001",
|
268
|
-
prompt: "Once upon a time",
|
269
|
-
max_tokens: 5
|
270
|
-
})
|
271
|
-
puts response["choices"].map { |c| c["text"] }
|
272
|
-
# => [", there lived a great"]
|
273
|
-
```
|
274
|
-
|
275
341
|
### Edits
|
276
342
|
|
277
343
|
Send a string and some instructions for what to do to the string:
|
@@ -323,22 +389,22 @@ client.files.content(id: "file-123")
|
|
323
389
|
client.files.delete(id: "file-123")
|
324
390
|
```
|
325
391
|
|
326
|
-
###
|
392
|
+
### Finetunes
|
327
393
|
|
328
394
|
Upload your fine-tuning data in a `.jsonl` file as above and get its ID:
|
329
395
|
|
330
396
|
```ruby
|
331
|
-
response = client.files.upload(parameters: { file: "path/to/
|
397
|
+
response = client.files.upload(parameters: { file: "path/to/sarcasm.jsonl", purpose: "fine-tune" })
|
332
398
|
file_id = JSON.parse(response.body)["id"]
|
333
399
|
```
|
334
400
|
|
335
|
-
You can then use this file ID to create a fine
|
401
|
+
You can then use this file ID to create a fine tuning job:
|
336
402
|
|
337
403
|
```ruby
|
338
404
|
response = client.finetunes.create(
|
339
405
|
parameters: {
|
340
406
|
training_file: file_id,
|
341
|
-
model: "
|
407
|
+
model: "gpt-3.5-turbo-0613"
|
342
408
|
})
|
343
409
|
fine_tune_id = response["id"]
|
344
410
|
```
|
@@ -369,10 +435,10 @@ response = client.completions(
|
|
369
435
|
response.dig("choices", 0, "text")
|
370
436
|
```
|
371
437
|
|
372
|
-
You can
|
438
|
+
You can also capture the events for a job:
|
373
439
|
|
374
|
-
```
|
375
|
-
client.finetunes.
|
440
|
+
```
|
441
|
+
client.finetunes.list_events(id: fine_tune_id)
|
376
442
|
```
|
377
443
|
|
378
444
|
### Image Generation
|
@@ -455,6 +521,34 @@ puts response["text"]
|
|
455
521
|
# => "Transcription of the text"
|
456
522
|
```
|
457
523
|
|
524
|
+
#### Speech
|
525
|
+
|
526
|
+
The speech API takes as input the text and a voice and returns the content of an audio file you can listen to.
|
527
|
+
|
528
|
+
```ruby
|
529
|
+
response = client.audio.speech(
|
530
|
+
parameters: {
|
531
|
+
model: "tts-1",
|
532
|
+
input: "This is a speech test!",
|
533
|
+
voice: "alloy"
|
534
|
+
}
|
535
|
+
)
|
536
|
+
File.binwrite('demo.mp3', response)
|
537
|
+
# => mp3 file that plays: "This is a speech test!"
|
538
|
+
```
|
539
|
+
|
540
|
+
### Errors
|
541
|
+
|
542
|
+
HTTP errors can be caught like this:
|
543
|
+
|
544
|
+
```
|
545
|
+
begin
|
546
|
+
OpenAI::Client.new.models.retrieve(id: "text-ada-001")
|
547
|
+
rescue Faraday::Error => e
|
548
|
+
raise "Got a Faraday error: #{e}"
|
549
|
+
end
|
550
|
+
```
|
551
|
+
|
458
552
|
## Development
|
459
553
|
|
460
554
|
After checking out the repo, run `bin/setup` to install dependencies. You can run `bin/console` for an interactive prompt that will allow you to experiment.
|
@@ -0,0 +1,27 @@
|
|
1
|
+
module OpenAI
|
2
|
+
class Assistants
|
3
|
+
def initialize(client:)
|
4
|
+
@client = client.beta(assistants: "v1")
|
5
|
+
end
|
6
|
+
|
7
|
+
def list
|
8
|
+
@client.get(path: "/assistants")
|
9
|
+
end
|
10
|
+
|
11
|
+
def retrieve(id:)
|
12
|
+
@client.get(path: "/assistants/#{id}")
|
13
|
+
end
|
14
|
+
|
15
|
+
def create(parameters: {})
|
16
|
+
@client.json_post(path: "/assistants", parameters: parameters)
|
17
|
+
end
|
18
|
+
|
19
|
+
def modify(id:, parameters: {})
|
20
|
+
@client.json_post(path: "/assistants/#{id}", parameters: parameters)
|
21
|
+
end
|
22
|
+
|
23
|
+
def delete(id:)
|
24
|
+
@client.delete(path: "/assistants/#{id}")
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
data/lib/openai/audio.rb
CHANGED
data/lib/openai/client.rb
CHANGED
@@ -11,24 +11,21 @@ module OpenAI
|
|
11
11
|
request_timeout
|
12
12
|
extra_headers
|
13
13
|
].freeze
|
14
|
-
attr_reader *CONFIG_KEYS
|
14
|
+
attr_reader *CONFIG_KEYS, :faraday_middleware
|
15
15
|
|
16
|
-
def initialize(config = {})
|
16
|
+
def initialize(config = {}, &faraday_middleware)
|
17
17
|
CONFIG_KEYS.each do |key|
|
18
18
|
# Set instance variables like api_type & access_token. Fall back to global config
|
19
19
|
# if not present.
|
20
20
|
instance_variable_set("@#{key}", config[key] || OpenAI.configuration.send(key))
|
21
21
|
end
|
22
|
+
@faraday_middleware = faraday_middleware
|
22
23
|
end
|
23
24
|
|
24
25
|
def chat(parameters: {})
|
25
26
|
json_post(path: "/chat/completions", parameters: parameters)
|
26
27
|
end
|
27
28
|
|
28
|
-
def completions(parameters: {})
|
29
|
-
json_post(path: "/completions", parameters: parameters)
|
30
|
-
end
|
31
|
-
|
32
29
|
def edits(parameters: {})
|
33
30
|
json_post(path: "/edits", parameters: parameters)
|
34
31
|
end
|
@@ -57,6 +54,26 @@ module OpenAI
|
|
57
54
|
@models ||= OpenAI::Models.new(client: self)
|
58
55
|
end
|
59
56
|
|
57
|
+
def assistants
|
58
|
+
@assistants ||= OpenAI::Assistants.new(client: self)
|
59
|
+
end
|
60
|
+
|
61
|
+
def threads
|
62
|
+
@threads ||= OpenAI::Threads.new(client: self)
|
63
|
+
end
|
64
|
+
|
65
|
+
def messages
|
66
|
+
@messages ||= OpenAI::Messages.new(client: self)
|
67
|
+
end
|
68
|
+
|
69
|
+
def runs
|
70
|
+
@runs ||= OpenAI::Runs.new(client: self)
|
71
|
+
end
|
72
|
+
|
73
|
+
def run_steps
|
74
|
+
@run_steps ||= OpenAI::RunSteps.new(client: self)
|
75
|
+
end
|
76
|
+
|
60
77
|
def moderations(parameters: {})
|
61
78
|
json_post(path: "/moderations", parameters: parameters)
|
62
79
|
end
|
@@ -64,5 +81,11 @@ module OpenAI
|
|
64
81
|
def azure?
|
65
82
|
@api_type&.to_sym == :azure
|
66
83
|
end
|
84
|
+
|
85
|
+
def beta(apis)
|
86
|
+
dup.tap do |client|
|
87
|
+
client.add_headers("OpenAI-Beta": apis.map { |k, v| "#{k}=#{v}" }.join(";"))
|
88
|
+
end
|
89
|
+
end
|
67
90
|
end
|
68
91
|
end
|
data/lib/openai/compatibility.rb
CHANGED
data/lib/openai/finetunes.rb
CHANGED
@@ -5,31 +5,23 @@ module OpenAI
|
|
5
5
|
end
|
6
6
|
|
7
7
|
def list
|
8
|
-
@client.get(path: "/
|
8
|
+
@client.get(path: "/fine_tuning/jobs")
|
9
9
|
end
|
10
10
|
|
11
11
|
def create(parameters: {})
|
12
|
-
@client.json_post(path: "/
|
12
|
+
@client.json_post(path: "/fine_tuning/jobs", parameters: parameters)
|
13
13
|
end
|
14
14
|
|
15
15
|
def retrieve(id:)
|
16
|
-
@client.get(path: "/
|
16
|
+
@client.get(path: "/fine_tuning/jobs/#{id}")
|
17
17
|
end
|
18
18
|
|
19
19
|
def cancel(id:)
|
20
|
-
@client.
|
20
|
+
@client.json_post(path: "/fine_tuning/jobs/#{id}/cancel", parameters: {})
|
21
21
|
end
|
22
22
|
|
23
|
-
def
|
24
|
-
@client.get(path: "/
|
25
|
-
end
|
26
|
-
|
27
|
-
def delete(fine_tuned_model:)
|
28
|
-
if fine_tuned_model.start_with?("ft-")
|
29
|
-
raise ArgumentError, "Please give a fine_tuned_model name, not a fine-tune ID"
|
30
|
-
end
|
31
|
-
|
32
|
-
@client.delete(path: "/models/#{fine_tuned_model}")
|
23
|
+
def list_events(id:)
|
24
|
+
@client.get(path: "/fine_tuning/jobs/#{id}/events")
|
33
25
|
end
|
34
26
|
end
|
35
27
|
end
|
data/lib/openai/http.rb
CHANGED
@@ -1,57 +1,58 @@
|
|
1
1
|
require "event_stream_parser"
|
2
2
|
|
3
|
+
require_relative "http_headers"
|
4
|
+
|
3
5
|
module OpenAI
|
4
6
|
module HTTP
|
7
|
+
include HTTPHeaders
|
8
|
+
|
5
9
|
def get(path:)
|
6
|
-
|
10
|
+
parse_jsonl(conn.get(uri(path: path)) do |req|
|
7
11
|
req.headers = headers
|
8
12
|
end&.body)
|
9
13
|
end
|
10
14
|
|
11
|
-
def
|
12
|
-
|
13
|
-
if parameters[:stream].respond_to?(:call)
|
14
|
-
req.options.on_data = to_json_stream(user_proc: parameters[:stream])
|
15
|
-
parameters[:stream] = true # Necessary to tell OpenAI to stream.
|
16
|
-
elsif parameters[:stream]
|
17
|
-
raise ArgumentError, "The stream parameter must be a Proc or have a #call method"
|
18
|
-
end
|
19
|
-
|
15
|
+
def post(path:)
|
16
|
+
parse_jsonl(conn.post(uri(path: path)) do |req|
|
20
17
|
req.headers = headers
|
21
|
-
req.body = parameters.to_json
|
22
18
|
end&.body)
|
23
19
|
end
|
24
20
|
|
21
|
+
def json_post(path:, parameters:)
|
22
|
+
conn.post(uri(path: path)) do |req|
|
23
|
+
configure_json_post_request(req, parameters)
|
24
|
+
end&.body
|
25
|
+
end
|
26
|
+
|
25
27
|
def multipart_post(path:, parameters: nil)
|
26
|
-
|
28
|
+
conn(multipart: true).post(uri(path: path)) do |req|
|
27
29
|
req.headers = headers.merge({ "Content-Type" => "multipart/form-data" })
|
28
30
|
req.body = multipart_parameters(parameters)
|
29
|
-
end&.body
|
31
|
+
end&.body
|
30
32
|
end
|
31
33
|
|
32
34
|
def delete(path:)
|
33
|
-
|
35
|
+
conn.delete(uri(path: path)) do |req|
|
34
36
|
req.headers = headers
|
35
|
-
end&.body
|
37
|
+
end&.body
|
36
38
|
end
|
37
39
|
|
38
40
|
private
|
39
41
|
|
40
|
-
def
|
41
|
-
return unless
|
42
|
+
def parse_jsonl(response)
|
43
|
+
return unless response
|
44
|
+
return response unless response.is_a?(String)
|
42
45
|
|
43
|
-
JSON.parse(string)
|
44
|
-
rescue JSON::ParserError
|
45
46
|
# Convert a multiline string of JSON objects to a JSON array.
|
46
|
-
|
47
|
+
response = response.gsub("}\n{", "},{").prepend("[").concat("]")
|
48
|
+
|
49
|
+
JSON.parse(response)
|
47
50
|
end
|
48
51
|
|
49
52
|
# Given a proc, returns an outer proc that can be used to iterate over a JSON stream of chunks.
|
50
53
|
# For each chunk, the inner user_proc is called giving it the JSON object. The JSON object could
|
51
54
|
# be a data object or an error object as described in the OpenAI API documentation.
|
52
55
|
#
|
53
|
-
# If the JSON object for a given data or error message is invalid, it is ignored.
|
54
|
-
#
|
55
56
|
# @param user_proc [Proc] The inner proc to call for each JSON object in the chunk.
|
56
57
|
# @return [Proc] An outer proc that iterates over a raw stream, converting it to JSON.
|
57
58
|
def to_json_stream(user_proc:)
|
@@ -59,26 +60,28 @@ module OpenAI
|
|
59
60
|
|
60
61
|
proc do |chunk, _bytes, env|
|
61
62
|
if env && env.status != 200
|
62
|
-
|
63
|
-
|
64
|
-
parser.feed(chunk) do |_type, data|
|
65
|
-
emit_json(json: data, user_proc: user_proc) unless data == "[DONE]"
|
66
|
-
end
|
63
|
+
raise_error = Faraday::Response::RaiseError.new
|
64
|
+
raise_error.on_complete(env.merge(body: try_parse_json(chunk)))
|
67
65
|
end
|
68
|
-
end
|
69
|
-
end
|
70
66
|
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
67
|
+
parser.feed(chunk) do |_type, data|
|
68
|
+
user_proc.call(JSON.parse(data)) unless data == "[DONE]"
|
69
|
+
end
|
70
|
+
end
|
75
71
|
end
|
76
72
|
|
77
73
|
def conn(multipart: false)
|
78
|
-
Faraday.new do |f|
|
74
|
+
connection = Faraday.new do |f|
|
79
75
|
f.options[:timeout] = @request_timeout
|
80
76
|
f.request(:multipart) if multipart
|
77
|
+
f.use MiddlewareErrors
|
78
|
+
f.response :raise_error
|
79
|
+
f.response :json
|
81
80
|
end
|
81
|
+
|
82
|
+
@faraday_middleware&.call(connection)
|
83
|
+
|
84
|
+
connection
|
82
85
|
end
|
83
86
|
|
84
87
|
def uri(path:)
|
@@ -90,29 +93,6 @@ module OpenAI
|
|
90
93
|
end
|
91
94
|
end
|
92
95
|
|
93
|
-
def headers
|
94
|
-
if azure?
|
95
|
-
azure_headers
|
96
|
-
else
|
97
|
-
openai_headers
|
98
|
-
end.merge(@extra_headers || {})
|
99
|
-
end
|
100
|
-
|
101
|
-
def openai_headers
|
102
|
-
{
|
103
|
-
"Content-Type" => "application/json",
|
104
|
-
"Authorization" => "Bearer #{@access_token}",
|
105
|
-
"OpenAI-Organization" => @organization_id
|
106
|
-
}
|
107
|
-
end
|
108
|
-
|
109
|
-
def azure_headers
|
110
|
-
{
|
111
|
-
"Content-Type" => "application/json",
|
112
|
-
"api-key" => @access_token
|
113
|
-
}
|
114
|
-
end
|
115
|
-
|
116
96
|
def multipart_parameters(parameters)
|
117
97
|
parameters&.transform_values do |value|
|
118
98
|
next value unless value.respond_to?(:close) # File or IO object.
|
@@ -123,5 +103,25 @@ module OpenAI
|
|
123
103
|
Faraday::UploadIO.new(value, "", value.path)
|
124
104
|
end
|
125
105
|
end
|
106
|
+
|
107
|
+
def configure_json_post_request(req, parameters)
|
108
|
+
req_parameters = parameters.dup
|
109
|
+
|
110
|
+
if parameters[:stream].respond_to?(:call)
|
111
|
+
req.options.on_data = to_json_stream(user_proc: parameters[:stream])
|
112
|
+
req_parameters[:stream] = true # Necessary to tell OpenAI to stream.
|
113
|
+
elsif parameters[:stream]
|
114
|
+
raise ArgumentError, "The stream parameter must be a Proc or have a #call method"
|
115
|
+
end
|
116
|
+
|
117
|
+
req.headers = headers
|
118
|
+
req.body = req_parameters.to_json
|
119
|
+
end
|
120
|
+
|
121
|
+
def try_parse_json(maybe_json)
|
122
|
+
JSON.parse(maybe_json)
|
123
|
+
rescue JSON::ParserError
|
124
|
+
maybe_json
|
125
|
+
end
|
126
126
|
end
|
127
127
|
end
|
@@ -0,0 +1,36 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module HTTPHeaders
|
3
|
+
def add_headers(headers)
|
4
|
+
@extra_headers = extra_headers.merge(headers.transform_keys(&:to_s))
|
5
|
+
end
|
6
|
+
|
7
|
+
private
|
8
|
+
|
9
|
+
def headers
|
10
|
+
if azure?
|
11
|
+
azure_headers
|
12
|
+
else
|
13
|
+
openai_headers
|
14
|
+
end.merge(extra_headers)
|
15
|
+
end
|
16
|
+
|
17
|
+
def openai_headers
|
18
|
+
{
|
19
|
+
"Content-Type" => "application/json",
|
20
|
+
"Authorization" => "Bearer #{@access_token}",
|
21
|
+
"OpenAI-Organization" => @organization_id
|
22
|
+
}
|
23
|
+
end
|
24
|
+
|
25
|
+
def azure_headers
|
26
|
+
{
|
27
|
+
"Content-Type" => "application/json",
|
28
|
+
"api-key" => @access_token
|
29
|
+
}
|
30
|
+
end
|
31
|
+
|
32
|
+
def extra_headers
|
33
|
+
@extra_headers ||= {}
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
module OpenAI
|
2
|
+
class Messages
|
3
|
+
def initialize(client:)
|
4
|
+
@client = client.beta(assistants: "v1")
|
5
|
+
end
|
6
|
+
|
7
|
+
def list(thread_id:)
|
8
|
+
@client.get(path: "/threads/#{thread_id}/messages")
|
9
|
+
end
|
10
|
+
|
11
|
+
def retrieve(thread_id:, id:)
|
12
|
+
@client.get(path: "/threads/#{thread_id}/messages/#{id}")
|
13
|
+
end
|
14
|
+
|
15
|
+
def create(thread_id:, parameters: {})
|
16
|
+
@client.json_post(path: "/threads/#{thread_id}/messages", parameters: parameters)
|
17
|
+
end
|
18
|
+
|
19
|
+
def modify(id:, thread_id:, parameters: {})
|
20
|
+
@client.json_post(path: "/threads/#{thread_id}/messages/#{id}", parameters: parameters)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,15 @@
|
|
1
|
+
module OpenAI
|
2
|
+
class RunSteps
|
3
|
+
def initialize(client:)
|
4
|
+
@client = client.beta(assistants: "v1")
|
5
|
+
end
|
6
|
+
|
7
|
+
def list(thread_id:, run_id:)
|
8
|
+
@client.get(path: "/threads/#{thread_id}/runs/#{run_id}/steps")
|
9
|
+
end
|
10
|
+
|
11
|
+
def retrieve(thread_id:, run_id:, id:)
|
12
|
+
@client.get(path: "/threads/#{thread_id}/runs/#{run_id}/steps/#{id}")
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
data/lib/openai/runs.rb
ADDED
@@ -0,0 +1,32 @@
|
|
1
|
+
module OpenAI
|
2
|
+
class Runs
|
3
|
+
def initialize(client:)
|
4
|
+
@client = client.beta(assistants: "v1")
|
5
|
+
end
|
6
|
+
|
7
|
+
def list(thread_id:)
|
8
|
+
@client.get(path: "/threads/#{thread_id}/runs")
|
9
|
+
end
|
10
|
+
|
11
|
+
def retrieve(thread_id:, id:)
|
12
|
+
@client.get(path: "/threads/#{thread_id}/runs/#{id}")
|
13
|
+
end
|
14
|
+
|
15
|
+
def create(thread_id:, parameters: {})
|
16
|
+
@client.json_post(path: "/threads/#{thread_id}/runs", parameters: parameters)
|
17
|
+
end
|
18
|
+
|
19
|
+
def modify(id:, thread_id:, parameters: {})
|
20
|
+
@client.json_post(path: "/threads/#{thread_id}/runs/#{id}", parameters: parameters)
|
21
|
+
end
|
22
|
+
|
23
|
+
def cancel(id:, thread_id:)
|
24
|
+
@client.post(path: "/threads/#{thread_id}/runs/#{id}/cancel")
|
25
|
+
end
|
26
|
+
|
27
|
+
def submit_tool_outputs(thread_id:, run_id:, parameters: {})
|
28
|
+
@client.json_post(path: "/threads/#{thread_id}/runs/#{run_id}/submit_tool_outputs",
|
29
|
+
parameters: parameters)
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
module OpenAI
|
2
|
+
class Threads
|
3
|
+
def initialize(client:)
|
4
|
+
@client = client.beta(assistants: "v1")
|
5
|
+
end
|
6
|
+
|
7
|
+
def list
|
8
|
+
@client.get(path: "/threads")
|
9
|
+
end
|
10
|
+
|
11
|
+
def retrieve(id:)
|
12
|
+
@client.get(path: "/threads/#{id}")
|
13
|
+
end
|
14
|
+
|
15
|
+
def create(parameters: {})
|
16
|
+
@client.json_post(path: "/threads", parameters: parameters)
|
17
|
+
end
|
18
|
+
|
19
|
+
def modify(id:, parameters: {})
|
20
|
+
@client.json_post(path: "/threads/#{id}", parameters: parameters)
|
21
|
+
end
|
22
|
+
|
23
|
+
def delete(id:)
|
24
|
+
@client.delete(path: "/threads/#{id}")
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
data/lib/openai/version.rb
CHANGED
data/lib/openai.rb
CHANGED
@@ -7,6 +7,11 @@ require_relative "openai/files"
|
|
7
7
|
require_relative "openai/finetunes"
|
8
8
|
require_relative "openai/images"
|
9
9
|
require_relative "openai/models"
|
10
|
+
require_relative "openai/assistants"
|
11
|
+
require_relative "openai/threads"
|
12
|
+
require_relative "openai/messages"
|
13
|
+
require_relative "openai/runs"
|
14
|
+
require_relative "openai/run_steps"
|
10
15
|
require_relative "openai/audio"
|
11
16
|
require_relative "openai/version"
|
12
17
|
|
@@ -14,6 +19,22 @@ module OpenAI
|
|
14
19
|
class Error < StandardError; end
|
15
20
|
class ConfigurationError < Error; end
|
16
21
|
|
22
|
+
class MiddlewareErrors < Faraday::Middleware
|
23
|
+
def call(env)
|
24
|
+
@app.call(env)
|
25
|
+
rescue Faraday::Error => e
|
26
|
+
raise e unless e.response.is_a?(Hash)
|
27
|
+
|
28
|
+
logger = Logger.new($stdout)
|
29
|
+
logger.formatter = proc do |_severity, _datetime, _progname, msg|
|
30
|
+
"\033[31mOpenAI HTTP Error (spotted in ruby-openai #{VERSION}): #{msg}\n\033[0m"
|
31
|
+
end
|
32
|
+
logger.error(e.response[:body])
|
33
|
+
|
34
|
+
raise e
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
17
38
|
class Configuration
|
18
39
|
attr_writer :access_token
|
19
40
|
attr_accessor :api_type, :api_version, :organization_id, :uri_base, :request_timeout,
|
@@ -30,7 +51,7 @@ module OpenAI
|
|
30
51
|
@organization_id = nil
|
31
52
|
@uri_base = DEFAULT_URI_BASE
|
32
53
|
@request_timeout = DEFAULT_REQUEST_TIMEOUT
|
33
|
-
@extra_headers =
|
54
|
+
@extra_headers = {}
|
34
55
|
end
|
35
56
|
|
36
57
|
def access_token
|
data/ruby-openai.gemspec
CHANGED
@@ -25,7 +25,7 @@ Gem::Specification.new do |spec|
|
|
25
25
|
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
|
26
26
|
spec.require_paths = ["lib"]
|
27
27
|
|
28
|
-
spec.add_dependency "event_stream_parser", ">= 0.3.0", "<
|
28
|
+
spec.add_dependency "event_stream_parser", ">= 0.3.0", "< 2.0.0"
|
29
29
|
spec.add_dependency "faraday", ">= 1"
|
30
30
|
spec.add_dependency "faraday-multipart", ">= 1"
|
31
31
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby-openai
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version:
|
4
|
+
version: 6.3.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Alex
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-
|
11
|
+
date: 2023-11-26 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: event_stream_parser
|
@@ -19,7 +19,7 @@ dependencies:
|
|
19
19
|
version: 0.3.0
|
20
20
|
- - "<"
|
21
21
|
- !ruby/object:Gem::Version
|
22
|
-
version:
|
22
|
+
version: 2.0.0
|
23
23
|
type: :runtime
|
24
24
|
prerelease: false
|
25
25
|
version_requirements: !ruby/object:Gem::Requirement
|
@@ -29,7 +29,7 @@ dependencies:
|
|
29
29
|
version: 0.3.0
|
30
30
|
- - "<"
|
31
31
|
- !ruby/object:Gem::Version
|
32
|
-
version:
|
32
|
+
version: 2.0.0
|
33
33
|
- !ruby/object:Gem::Dependency
|
34
34
|
name: faraday
|
35
35
|
requirement: !ruby/object:Gem::Requirement
|
@@ -87,14 +87,20 @@ files:
|
|
87
87
|
- bin/console
|
88
88
|
- bin/setup
|
89
89
|
- lib/openai.rb
|
90
|
+
- lib/openai/assistants.rb
|
90
91
|
- lib/openai/audio.rb
|
91
92
|
- lib/openai/client.rb
|
92
93
|
- lib/openai/compatibility.rb
|
93
94
|
- lib/openai/files.rb
|
94
95
|
- lib/openai/finetunes.rb
|
95
96
|
- lib/openai/http.rb
|
97
|
+
- lib/openai/http_headers.rb
|
96
98
|
- lib/openai/images.rb
|
99
|
+
- lib/openai/messages.rb
|
97
100
|
- lib/openai/models.rb
|
101
|
+
- lib/openai/run_steps.rb
|
102
|
+
- lib/openai/runs.rb
|
103
|
+
- lib/openai/threads.rb
|
98
104
|
- lib/openai/version.rb
|
99
105
|
- lib/ruby/openai.rb
|
100
106
|
- pull_request_template.md
|