ruby-openai 7.0.1 → 7.2.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.gitignore +0 -5
- data/CHANGELOG.md +22 -0
- data/Gemfile +2 -2
- data/Gemfile.lock +14 -10
- data/README.md +260 -50
- data/lib/openai/batches.rb +2 -2
- data/lib/openai/client.rb +23 -0
- data/lib/openai/files.rb +3 -2
- data/lib/openai/http_headers.rb +1 -1
- data/lib/openai/messages.rb +5 -1
- data/lib/openai/vector_store_file_batches.rb +29 -0
- data/lib/openai/vector_store_files.rb +23 -0
- data/lib/openai/vector_stores.rb +27 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +3 -0
- metadata +6 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 7108bf76aa6f30cd7c38b41967b0162d6a4014698c1c8364b8116e5c665c044f
|
4
|
+
data.tar.gz: 736587458668b4608e49fa8ec50e460da14eeda0dfb8a739c68806e3bd5d0a2c
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 76cac4818b5941d5732becebc91675c1ebeaba4f27c1710888afbb1893f3f8ff4d18e24a3c90f4d4332eb89730b0f6195507cf99519e764876a614ca3927a0cb
|
7
|
+
data.tar.gz: 4757d8e11b494a75d0ea839ae073610adfa77317ba0e13ddabdd21bf10fd34127a5747138644dba4d9073aa509341bfeaed69a3d00150d1ae90a42ddeadd53e4
|
data/.gitignore
CHANGED
data/CHANGELOG.md
CHANGED
@@ -5,6 +5,28 @@ All notable changes to this project will be documented in this file.
|
|
5
5
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
7
7
|
|
8
|
+
## [7.2.0] - 2024-10-10
|
9
|
+
|
10
|
+
### Added
|
11
|
+
|
12
|
+
- Add ability to pass parameters to Files#list endpoint - thanks to [@parterburn](https://github.com/parterburn)!
|
13
|
+
- Add Velvet observability platform to README - thanks to [@philipithomas](https://github.com/philipithomas)
|
14
|
+
- Add Assistants::Messages#delete endpoint - thanks to [@mochetts](https://github.com/mochetts)!
|
15
|
+
|
16
|
+
## [7.1.0] - 2024-06-10
|
17
|
+
|
18
|
+
### Added
|
19
|
+
|
20
|
+
- Add new Vector Store endpoints - thanks to [@willywg](https://github.com/willywg) for this PR!
|
21
|
+
- Add parameters to batches.list endpoint so you can for example use `after` - thanks to [@marckohlbrugge](https://github.com/marckohlbrugge)!
|
22
|
+
- Add vision as permitted purpose for files - thanks again to [@willywg](https://github.com/willywg) for the PR.
|
23
|
+
- Add improved README example of tool calling - thanks [@krschacht](https://github.com/krschacht) - check out his project [HostedGPT](https://github.com/AllYourBot/hostedgpt)!
|
24
|
+
|
25
|
+
### Fixed
|
26
|
+
|
27
|
+
- Fix broken link in README table of contents - thanks to [@garrettgsb](https://github.com/garrettgsb)!
|
28
|
+
- Skip sending nil headers - thanks to [@drnic](https://github.com/drnic)!
|
29
|
+
|
8
30
|
## [7.0.1] - 2024-04-30
|
9
31
|
|
10
32
|
### Fixed
|
data/Gemfile
CHANGED
data/Gemfile.lock
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
ruby-openai (7.0
|
4
|
+
ruby-openai (7.2.0)
|
5
5
|
event_stream_parser (>= 0.3.0, < 2.0.0)
|
6
6
|
faraday (>= 1)
|
7
7
|
faraday-multipart (>= 1)
|
@@ -9,12 +9,14 @@ PATH
|
|
9
9
|
GEM
|
10
10
|
remote: https://rubygems.org/
|
11
11
|
specs:
|
12
|
-
addressable (2.8.
|
12
|
+
addressable (2.8.6)
|
13
13
|
public_suffix (>= 2.0.2, < 6.0)
|
14
14
|
ast (2.4.2)
|
15
15
|
base64 (0.2.0)
|
16
|
+
bigdecimal (3.1.8)
|
16
17
|
byebug (11.1.3)
|
17
|
-
crack (0.
|
18
|
+
crack (1.0.0)
|
19
|
+
bigdecimal
|
18
20
|
rexml
|
19
21
|
diff-lcs (1.5.1)
|
20
22
|
dotenv (2.8.1)
|
@@ -26,17 +28,18 @@ GEM
|
|
26
28
|
faraday-multipart (1.0.4)
|
27
29
|
multipart-post (~> 2)
|
28
30
|
faraday-net_http (3.0.2)
|
29
|
-
hashdiff (1.0
|
31
|
+
hashdiff (1.1.0)
|
30
32
|
json (2.6.3)
|
31
33
|
multipart-post (2.3.0)
|
32
34
|
parallel (1.22.1)
|
33
35
|
parser (3.2.2.0)
|
34
36
|
ast (~> 2.4.1)
|
35
|
-
public_suffix (5.0.
|
37
|
+
public_suffix (5.0.5)
|
36
38
|
rainbow (3.1.1)
|
37
|
-
rake (13.1
|
39
|
+
rake (13.2.1)
|
38
40
|
regexp_parser (2.8.0)
|
39
|
-
rexml (3.
|
41
|
+
rexml (3.3.6)
|
42
|
+
strscan
|
40
43
|
rspec (3.13.0)
|
41
44
|
rspec-core (~> 3.13.0)
|
42
45
|
rspec-expectations (~> 3.13.0)
|
@@ -64,9 +67,10 @@ GEM
|
|
64
67
|
parser (>= 3.2.1.0)
|
65
68
|
ruby-progressbar (1.13.0)
|
66
69
|
ruby2_keywords (0.0.5)
|
70
|
+
strscan (3.1.0)
|
67
71
|
unicode-display_width (2.4.2)
|
68
72
|
vcr (6.1.0)
|
69
|
-
webmock (3.
|
73
|
+
webmock (3.23.1)
|
70
74
|
addressable (>= 2.8.0)
|
71
75
|
crack (>= 0.3.2)
|
72
76
|
hashdiff (>= 0.4.0, < 2.0.0)
|
@@ -77,12 +81,12 @@ PLATFORMS
|
|
77
81
|
DEPENDENCIES
|
78
82
|
byebug (~> 11.1.3)
|
79
83
|
dotenv (~> 2.8.1)
|
80
|
-
rake (~> 13.
|
84
|
+
rake (~> 13.2)
|
81
85
|
rspec (~> 3.13)
|
82
86
|
rubocop (~> 1.50.2)
|
83
87
|
ruby-openai!
|
84
88
|
vcr (~> 6.1.0)
|
85
|
-
webmock (~> 3.
|
89
|
+
webmock (~> 3.23.1)
|
86
90
|
|
87
91
|
BUNDLED WITH
|
88
92
|
2.4.5
|
data/README.md
CHANGED
@@ -6,9 +6,9 @@
|
|
6
6
|
|
7
7
|
Use the [OpenAI API](https://openai.com/blog/openai-api/) with Ruby! 🤖❤️
|
8
8
|
|
9
|
-
Stream text with GPT-
|
9
|
+
Stream text with GPT-4o, transcribe and translate audio with Whisper, or create images with DALL·E...
|
10
10
|
|
11
|
-
[
|
11
|
+
[📚 Rails AI (FREE Book)](https://railsai.com) | [🎮 Ruby AI Builders Discord](https://discord.gg/k4Uc224xVD) | [🐦 X](https://x.com/alexrudall) | [🧠 Anthropic Gem](https://github.com/alexrudall/anthropic) | [🚂 Midjourney Gem](https://github.com/alexrudall/midjourney)
|
12
12
|
|
13
13
|
## Contents
|
14
14
|
|
@@ -35,14 +35,20 @@ Stream text with GPT-4, transcribe and translate audio with Whisper, or create i
|
|
35
35
|
- [Vision](#vision)
|
36
36
|
- [JSON Mode](#json-mode)
|
37
37
|
- [Functions](#functions)
|
38
|
-
- [
|
38
|
+
- [Completions](#completions)
|
39
39
|
- [Embeddings](#embeddings)
|
40
40
|
- [Batches](#batches)
|
41
41
|
- [Files](#files)
|
42
|
+
- [For fine-tuning purposes](#for-fine-tuning-purposes)
|
43
|
+
- [For assistant purposes](#for-assistant-purposes)
|
42
44
|
- [Finetunes](#finetunes)
|
45
|
+
- [Vector Stores](#vector-stores)
|
46
|
+
- [Vector Store Files](#vector-store-files)
|
47
|
+
- [Vector Store File Batches](#vector-store-file-batches)
|
43
48
|
- [Assistants](#assistants)
|
44
49
|
- [Threads and Messages](#threads-and-messages)
|
45
50
|
- [Runs](#runs)
|
51
|
+
- [Create and Run](#create-and-run)
|
46
52
|
- [Runs involving function tools](#runs-involving-function-tools)
|
47
53
|
- [Image Generation](#image-generation)
|
48
54
|
- [DALL·E 2](#dalle-2)
|
@@ -54,7 +60,7 @@ Stream text with GPT-4, transcribe and translate audio with Whisper, or create i
|
|
54
60
|
- [Translate](#translate)
|
55
61
|
- [Transcribe](#transcribe)
|
56
62
|
- [Speech](#speech)
|
57
|
-
- [Errors](#errors)
|
63
|
+
- [Errors](#errors-1)
|
58
64
|
- [Development](#development)
|
59
65
|
- [Release](#release)
|
60
66
|
- [Contributing](#contributing)
|
@@ -103,7 +109,7 @@ For a quick test you can pass your token directly to a new client:
|
|
103
109
|
```ruby
|
104
110
|
client = OpenAI::Client.new(
|
105
111
|
access_token: "access_token_goes_here",
|
106
|
-
log_errors: true # Highly recommended in development, so you can see what errors OpenAI is returning. Not recommended in production.
|
112
|
+
log_errors: true # Highly recommended in development, so you can see what errors OpenAI is returning. Not recommended in production because it could leak private data to your logs.
|
107
113
|
)
|
108
114
|
```
|
109
115
|
|
@@ -114,8 +120,8 @@ For a more robust setup, you can configure the gem with your API keys, for examp
|
|
114
120
|
```ruby
|
115
121
|
OpenAI.configure do |config|
|
116
122
|
config.access_token = ENV.fetch("OPENAI_ACCESS_TOKEN")
|
117
|
-
config.organization_id = ENV.fetch("OPENAI_ORGANIZATION_ID") # Optional
|
118
|
-
config.log_errors = true # Highly recommended in development, so you can see what errors OpenAI is returning. Not recommended in production.
|
123
|
+
config.organization_id = ENV.fetch("OPENAI_ORGANIZATION_ID") # Optional
|
124
|
+
config.log_errors = true # Highly recommended in development, so you can see what errors OpenAI is returning. Not recommended in production because it could leak private data to your logs.
|
119
125
|
end
|
120
126
|
```
|
121
127
|
|
@@ -133,7 +139,9 @@ client = OpenAI::Client.new(access_token: "access_token_goes_here")
|
|
133
139
|
|
134
140
|
#### Custom timeout or base URI
|
135
141
|
|
136
|
-
The default timeout for any request using this library is 120 seconds. You can change that by passing a number of seconds to the `request_timeout` when initializing the client.
|
142
|
+
- The default timeout for any request using this library is 120 seconds. You can change that by passing a number of seconds to the `request_timeout` when initializing the client.
|
143
|
+
- You can also change the base URI used for all requests, eg. to use observability tools like [Helicone](https://docs.helicone.ai/quickstart/integrate-in-one-line-of-code) or [Velvet](https://docs.usevelvet.com/docs/getting-started)
|
144
|
+
- You can also add arbitrary other headers e.g. for [openai-caching-proxy-worker](https://github.com/6/openai-caching-proxy-worker), eg.:
|
137
145
|
|
138
146
|
```ruby
|
139
147
|
client = OpenAI::Client.new(
|
@@ -251,7 +259,7 @@ client.chat(
|
|
251
259
|
```ruby
|
252
260
|
client = OpenAI::Client.new(
|
253
261
|
access_token: "groq_access_token_goes_here",
|
254
|
-
uri_base: "https://api.groq.com/"
|
262
|
+
uri_base: "https://api.groq.com/openai"
|
255
263
|
)
|
256
264
|
|
257
265
|
client.chat(
|
@@ -273,7 +281,7 @@ To estimate the token-count of your text:
|
|
273
281
|
|
274
282
|
```ruby
|
275
283
|
OpenAI.rough_token_count("Your text")
|
276
|
-
|
284
|
+
```
|
277
285
|
|
278
286
|
If you need a more accurate count, try [tiktoken_ruby](https://github.com/IAPark/tiktoken_ruby).
|
279
287
|
|
@@ -283,7 +291,7 @@ There are different models that can be used to generate text. For a full list an
|
|
283
291
|
|
284
292
|
```ruby
|
285
293
|
client.models.list
|
286
|
-
client.models.retrieve(id: "gpt-
|
294
|
+
client.models.retrieve(id: "gpt-4o")
|
287
295
|
```
|
288
296
|
|
289
297
|
### Chat
|
@@ -293,7 +301,7 @@ GPT is a model that can be used to generate text in a conversational style. You
|
|
293
301
|
```ruby
|
294
302
|
response = client.chat(
|
295
303
|
parameters: {
|
296
|
-
model: "gpt-
|
304
|
+
model: "gpt-4o", # Required.
|
297
305
|
messages: [{ role: "user", content: "Hello!"}], # Required.
|
298
306
|
temperature: 0.7,
|
299
307
|
})
|
@@ -310,7 +318,7 @@ You can stream from the API in realtime, which can be much faster and used to cr
|
|
310
318
|
```ruby
|
311
319
|
client.chat(
|
312
320
|
parameters: {
|
313
|
-
model: "gpt-
|
321
|
+
model: "gpt-4o", # Required.
|
314
322
|
messages: [{ role: "user", content: "Describe a character called Anna!"}], # Required.
|
315
323
|
temperature: 0.7,
|
316
324
|
stream: proc do |chunk, _bytesize|
|
@@ -320,7 +328,28 @@ client.chat(
|
|
320
328
|
# => "Anna is a young woman in her mid-twenties, with wavy chestnut hair that falls to her shoulders..."
|
321
329
|
```
|
322
330
|
|
323
|
-
Note:
|
331
|
+
Note: In order to get usage information, you can provide the [`stream_options` parameter](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stream_options) and OpenAI will provide a final chunk with the usage. Here is an example:
|
332
|
+
|
333
|
+
```ruby
|
334
|
+
stream_proc = proc { |chunk, _bytesize| puts "--------------"; puts chunk.inspect; }
|
335
|
+
client.chat(
|
336
|
+
parameters: {
|
337
|
+
model: "gpt-4o",
|
338
|
+
stream: stream_proc,
|
339
|
+
stream_options: { include_usage: true },
|
340
|
+
messages: [{ role: "user", content: "Hello!"}],
|
341
|
+
})
|
342
|
+
# => --------------
|
343
|
+
# => {"id"=>"chatcmpl-7bbq05PiZqlHxjV1j7OHnKKDURKaf", "object"=>"chat.completion.chunk", "created"=>1718750612, "model"=>"gpt-4o-2024-05-13", "system_fingerprint"=>"fp_9cb5d38cf7", "choices"=>[{"index"=>0, "delta"=>{"role"=>"assistant", "content"=>""}, "logprobs"=>nil, "finish_reason"=>nil}], "usage"=>nil}
|
344
|
+
# => --------------
|
345
|
+
# => {"id"=>"chatcmpl-7bbq05PiZqlHxjV1j7OHnKKDURKaf", "object"=>"chat.completion.chunk", "created"=>1718750612, "model"=>"gpt-4o-2024-05-13", "system_fingerprint"=>"fp_9cb5d38cf7", "choices"=>[{"index"=>0, "delta"=>{"content"=>"Hello"}, "logprobs"=>nil, "finish_reason"=>nil}], "usage"=>nil}
|
346
|
+
# => --------------
|
347
|
+
# => ... more content chunks
|
348
|
+
# => --------------
|
349
|
+
# => {"id"=>"chatcmpl-7bbq05PiZqlHxjV1j7OHnKKDURKaf", "object"=>"chat.completion.chunk", "created"=>1718750612, "model"=>"gpt-4o-2024-05-13", "system_fingerprint"=>"fp_9cb5d38cf7", "choices"=>[{"index"=>0, "delta"=>{}, "logprobs"=>nil, "finish_reason"=>"stop"}], "usage"=>nil}
|
350
|
+
# => --------------
|
351
|
+
# => {"id"=>"chatcmpl-7bbq05PiZqlHxjV1j7OHnKKDURKaf", "object"=>"chat.completion.chunk", "created"=>1718750612, "model"=>"gpt-4o-2024-05-13", "system_fingerprint"=>"fp_9cb5d38cf7", "choices"=>[], "usage"=>{"prompt_tokens"=>9, "completion_tokens"=>9, "total_tokens"=>18}}
|
352
|
+
```
|
324
353
|
|
325
354
|
#### Vision
|
326
355
|
|
@@ -351,7 +380,7 @@ You can set the response_format to ask for responses in JSON:
|
|
351
380
|
```ruby
|
352
381
|
response = client.chat(
|
353
382
|
parameters: {
|
354
|
-
model: "gpt-
|
383
|
+
model: "gpt-4o",
|
355
384
|
response_format: { type: "json_object" },
|
356
385
|
messages: [{ role: "user", content: "Hello! Give me some JSON please."}],
|
357
386
|
temperature: 0.7,
|
@@ -371,7 +400,7 @@ You can stream it as well!
|
|
371
400
|
```ruby
|
372
401
|
response = client.chat(
|
373
402
|
parameters: {
|
374
|
-
model: "gpt-
|
403
|
+
model: "gpt-4o",
|
375
404
|
messages: [{ role: "user", content: "Can I have some JSON please?"}],
|
376
405
|
response_format: { type: "json_object" },
|
377
406
|
stream: proc do |chunk, _bytesize|
|
@@ -402,26 +431,29 @@ You can describe and pass in functions and the model will intelligently choose t
|
|
402
431
|
```ruby
|
403
432
|
|
404
433
|
def get_current_weather(location:, unit: "fahrenheit")
|
405
|
-
# use a weather api to fetch weather
|
434
|
+
# Here you could use a weather api to fetch the weather.
|
435
|
+
"The weather in #{location} is nice 🌞 #{unit}"
|
406
436
|
end
|
407
437
|
|
438
|
+
messages = [
|
439
|
+
{
|
440
|
+
"role": "user",
|
441
|
+
"content": "What is the weather like in San Francisco?",
|
442
|
+
},
|
443
|
+
]
|
444
|
+
|
408
445
|
response =
|
409
446
|
client.chat(
|
410
447
|
parameters: {
|
411
|
-
model: "gpt-
|
412
|
-
messages:
|
413
|
-
{
|
414
|
-
"role": "user",
|
415
|
-
"content": "What is the weather like in San Francisco?",
|
416
|
-
},
|
417
|
-
],
|
448
|
+
model: "gpt-4o",
|
449
|
+
messages: messages, # Defined above because we'll use it again
|
418
450
|
tools: [
|
419
451
|
{
|
420
452
|
type: "function",
|
421
453
|
function: {
|
422
454
|
name: "get_current_weather",
|
423
455
|
description: "Get the current weather in a given location",
|
424
|
-
parameters: {
|
456
|
+
parameters: { # Format: https://json-schema.org/understanding-json-schema
|
425
457
|
type: :object,
|
426
458
|
properties: {
|
427
459
|
location: {
|
@@ -438,31 +470,51 @@ response =
|
|
438
470
|
},
|
439
471
|
}
|
440
472
|
],
|
441
|
-
tool_choice:
|
442
|
-
|
443
|
-
function: {
|
444
|
-
name: "get_current_weather"
|
445
|
-
}
|
446
|
-
}
|
473
|
+
tool_choice: "required" # Optional, defaults to "auto"
|
474
|
+
# Can also put "none" or specific functions, see docs
|
447
475
|
},
|
448
476
|
)
|
449
477
|
|
450
478
|
message = response.dig("choices", 0, "message")
|
451
479
|
|
452
480
|
if message["role"] == "assistant" && message["tool_calls"]
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
481
|
+
message["tool_calls"].each do |tool_call|
|
482
|
+
tool_call_id = tool_call.dig("id")
|
483
|
+
function_name = tool_call.dig("function", "name")
|
484
|
+
function_args = JSON.parse(
|
485
|
+
tool_call.dig("function", "arguments"),
|
457
486
|
{ symbolize_names: true },
|
458
487
|
)
|
488
|
+
function_response = case function_name
|
489
|
+
when "get_current_weather"
|
490
|
+
get_current_weather(**function_args) # => "The weather is nice 🌞"
|
491
|
+
else
|
492
|
+
# decide how to handle
|
493
|
+
end
|
459
494
|
|
460
|
-
|
461
|
-
|
462
|
-
|
495
|
+
# For a subsequent message with the role "tool", OpenAI requires the preceding message to have a tool_calls argument.
|
496
|
+
messages << message
|
497
|
+
|
498
|
+
messages << {
|
499
|
+
tool_call_id: tool_call_id,
|
500
|
+
role: "tool",
|
501
|
+
name: function_name,
|
502
|
+
content: function_response
|
503
|
+
} # Extend the conversation with the results of the functions
|
463
504
|
end
|
505
|
+
|
506
|
+
second_response = client.chat(
|
507
|
+
parameters: {
|
508
|
+
model: "gpt-4o",
|
509
|
+
messages: messages
|
510
|
+
})
|
511
|
+
|
512
|
+
puts second_response.dig("choices", 0, "message", "content")
|
513
|
+
|
514
|
+
# At this point, the model has decided to call functions, you've called the functions
|
515
|
+
# and provided the response back, and the model has considered this and responded.
|
464
516
|
end
|
465
|
-
# => "
|
517
|
+
# => "It looks like the weather is nice and sunny in San Francisco! If you're planning to go out, it should be a pleasant day."
|
466
518
|
```
|
467
519
|
|
468
520
|
### Completions
|
@@ -472,7 +524,7 @@ Hit the OpenAI API for a completion using other GPT-3 models:
|
|
472
524
|
```ruby
|
473
525
|
response = client.completions(
|
474
526
|
parameters: {
|
475
|
-
model: "gpt-
|
527
|
+
model: "gpt-4o",
|
476
528
|
prompt: "Once upon a time",
|
477
529
|
max_tokens: 5
|
478
530
|
})
|
@@ -498,7 +550,7 @@ puts response.dig("data", 0, "embedding")
|
|
498
550
|
|
499
551
|
### Batches
|
500
552
|
|
501
|
-
The Batches endpoint allows you to create and manage large batches of API requests to run asynchronously. Currently,
|
553
|
+
The Batches endpoint allows you to create and manage large batches of API requests to run asynchronously. Currently, the supported endpoints for batches are `/v1/chat/completions` (Chat Completions API) and `/v1/embeddings` (Embeddings API).
|
502
554
|
|
503
555
|
To use the Batches endpoint, you need to first upload a JSONL file containing the batch requests using the Files endpoint. The file must be uploaded with the purpose set to `batch`. Each line in the JSONL file represents a single request and should have the following format:
|
504
556
|
|
@@ -508,7 +560,7 @@ To use the Batches endpoint, you need to first upload a JSONL file containing th
|
|
508
560
|
"method": "POST",
|
509
561
|
"url": "/v1/chat/completions",
|
510
562
|
"body": {
|
511
|
-
"model": "gpt-
|
563
|
+
"model": "gpt-4o",
|
512
564
|
"messages": [
|
513
565
|
{ "role": "system", "content": "You are a helpful assistant." },
|
514
566
|
{ "role": "user", "content": "What is 2+2?" }
|
@@ -568,7 +620,7 @@ These files are in JSONL format, with each line representing the output or error
|
|
568
620
|
"id": "chatcmpl-abc123",
|
569
621
|
"object": "chat.completion",
|
570
622
|
"created": 1677858242,
|
571
|
-
"model": "gpt-
|
623
|
+
"model": "gpt-4o",
|
572
624
|
"choices": [
|
573
625
|
{
|
574
626
|
"index": 0,
|
@@ -586,6 +638,8 @@ If a request fails with a non-HTTP error, the error object will contain more inf
|
|
586
638
|
|
587
639
|
### Files
|
588
640
|
|
641
|
+
#### For fine-tuning purposes
|
642
|
+
|
589
643
|
Put your data in a `.jsonl` file like this:
|
590
644
|
|
591
645
|
```json
|
@@ -603,6 +657,23 @@ client.files.content(id: "file-123")
|
|
603
657
|
client.files.delete(id: "file-123")
|
604
658
|
```
|
605
659
|
|
660
|
+
#### For assistant purposes
|
661
|
+
|
662
|
+
You can send a file path:
|
663
|
+
|
664
|
+
```ruby
|
665
|
+
client.files.upload(parameters: { file: "path/to/file.pdf", purpose: "assistants" })
|
666
|
+
```
|
667
|
+
|
668
|
+
or a File object
|
669
|
+
|
670
|
+
```ruby
|
671
|
+
my_file = File.open("path/to/file.pdf", "rb")
|
672
|
+
client.files.upload(parameters: { file: my_file, purpose: "assistants" })
|
673
|
+
```
|
674
|
+
|
675
|
+
See supported file types on [API documentation](https://platform.openai.com/docs/assistants/tools/file-search/supported-files).
|
676
|
+
|
606
677
|
### Finetunes
|
607
678
|
|
608
679
|
Upload your fine-tuning data in a `.jsonl` file as above and get its ID:
|
@@ -618,7 +689,7 @@ You can then use this file ID to create a fine tuning job:
|
|
618
689
|
response = client.finetunes.create(
|
619
690
|
parameters: {
|
620
691
|
training_file: file_id,
|
621
|
-
model: "gpt-
|
692
|
+
model: "gpt-4o"
|
622
693
|
})
|
623
694
|
fine_tune_id = response["id"]
|
624
695
|
```
|
@@ -655,6 +726,139 @@ You can also capture the events for a job:
|
|
655
726
|
client.finetunes.list_events(id: fine_tune_id)
|
656
727
|
```
|
657
728
|
|
729
|
+
### Vector Stores
|
730
|
+
|
731
|
+
Vector Store objects give the File Search tool the ability to search your files.
|
732
|
+
|
733
|
+
You can create a new vector store:
|
734
|
+
|
735
|
+
```ruby
|
736
|
+
response = client.vector_stores.create(
|
737
|
+
parameters: {
|
738
|
+
name: "my vector store",
|
739
|
+
file_ids: ["file-abc123", "file-def456"]
|
740
|
+
}
|
741
|
+
)
|
742
|
+
|
743
|
+
vector_store_id = response["id"]
|
744
|
+
```
|
745
|
+
|
746
|
+
Given a `vector_store_id` you can `retrieve` the current field values:
|
747
|
+
|
748
|
+
```ruby
|
749
|
+
client.vector_stores.retrieve(id: vector_store_id)
|
750
|
+
```
|
751
|
+
|
752
|
+
You can get a `list` of all vector stores currently available under the organization:
|
753
|
+
|
754
|
+
```ruby
|
755
|
+
client.vector_stores.list
|
756
|
+
```
|
757
|
+
|
758
|
+
You can modify an existing vector store, except for the `file_ids`:
|
759
|
+
|
760
|
+
```ruby
|
761
|
+
response = client.vector_stores.modify(
|
762
|
+
id: vector_store_id,
|
763
|
+
parameters: {
|
764
|
+
name: "Modified Test Vector Store",
|
765
|
+
}
|
766
|
+
)
|
767
|
+
```
|
768
|
+
|
769
|
+
You can delete vector stores:
|
770
|
+
|
771
|
+
```ruby
|
772
|
+
client.vector_stores.delete(id: vector_store_id)
|
773
|
+
```
|
774
|
+
|
775
|
+
### Vector Store Files
|
776
|
+
|
777
|
+
Vector store files represent files inside a vector store.
|
778
|
+
|
779
|
+
You can create a new vector store file by attaching a File to a vector store.
|
780
|
+
|
781
|
+
```ruby
|
782
|
+
response = client.vector_store_files.create(
|
783
|
+
vector_store_id: "vector-store-abc123",
|
784
|
+
parameters: {
|
785
|
+
file_id: "file-abc123"
|
786
|
+
}
|
787
|
+
)
|
788
|
+
|
789
|
+
vector_store_file_id = response["id"]
|
790
|
+
```
|
791
|
+
|
792
|
+
Given a `vector_store_file_id` you can `retrieve` the current field values:
|
793
|
+
|
794
|
+
```ruby
|
795
|
+
client.vector_store_files.retrieve(
|
796
|
+
vector_store_id: "vector-store-abc123",
|
797
|
+
id: vector_store_file_id
|
798
|
+
)
|
799
|
+
```
|
800
|
+
|
801
|
+
You can get a `list` of all vector store files currently available under the vector store:
|
802
|
+
|
803
|
+
```ruby
|
804
|
+
client.vector_store_files.list(vector_store_id: "vector-store-abc123")
|
805
|
+
```
|
806
|
+
|
807
|
+
You can delete a vector store file:
|
808
|
+
|
809
|
+
```ruby
|
810
|
+
client.vector_store_files.delete(
|
811
|
+
vector_store_id: "vector-store-abc123",
|
812
|
+
id: vector_store_file_id
|
813
|
+
)
|
814
|
+
```
|
815
|
+
|
816
|
+
Note: This will remove the file from the vector store but the file itself will not be deleted. To delete the file, use the delete file endpoint.
|
817
|
+
|
818
|
+
### Vector Store File Batches
|
819
|
+
|
820
|
+
Vector store file batches represent operations to add multiple files to a vector store.
|
821
|
+
|
822
|
+
You can create a new vector store file batch by attaching multiple Files to a vector store.
|
823
|
+
|
824
|
+
```ruby
|
825
|
+
response = client.vector_store_file_batches.create(
|
826
|
+
vector_store_id: "vector-store-abc123",
|
827
|
+
parameters: {
|
828
|
+
file_ids: ["file-abc123", "file-def456"]
|
829
|
+
}
|
830
|
+
)
|
831
|
+
|
832
|
+
file_batch_id = response["id"]
|
833
|
+
```
|
834
|
+
|
835
|
+
Given a `file_batch_id` you can `retrieve` the current field values:
|
836
|
+
|
837
|
+
```ruby
|
838
|
+
client.vector_store_file_batches.retrieve(
|
839
|
+
vector_store_id: "vector-store-abc123",
|
840
|
+
id: file_batch_id
|
841
|
+
)
|
842
|
+
```
|
843
|
+
|
844
|
+
You can get a `list` of all vector store files in a batch currently available under the vector store:
|
845
|
+
|
846
|
+
```ruby
|
847
|
+
client.vector_store_file_batches.list(
|
848
|
+
vector_store_id: "vector-store-abc123",
|
849
|
+
id: file_batch_id
|
850
|
+
)
|
851
|
+
```
|
852
|
+
|
853
|
+
You can cancel a vector store file batch (This attempts to cancel the processing of files in this batch as soon as possible):
|
854
|
+
|
855
|
+
```ruby
|
856
|
+
client.vector_store_file_batches.cancel(
|
857
|
+
vector_store_id: "vector-store-abc123",
|
858
|
+
id: file_batch_id
|
859
|
+
)
|
860
|
+
```
|
861
|
+
|
658
862
|
### Assistants
|
659
863
|
|
660
864
|
Assistants are stateful actors that can have many conversations and use tools to perform tasks (see [Assistant Overview](https://platform.openai.com/docs/assistants/overview)).
|
@@ -664,16 +868,20 @@ To create a new assistant:
|
|
664
868
|
```ruby
|
665
869
|
response = client.assistants.create(
|
666
870
|
parameters: {
|
667
|
-
model: "gpt-
|
871
|
+
model: "gpt-4o",
|
668
872
|
name: "OpenAI-Ruby test assistant",
|
669
873
|
description: nil,
|
670
874
|
instructions: "You are a Ruby dev bot. When asked a question, write and run Ruby code to answer the question",
|
671
875
|
tools: [
|
672
876
|
{ type: "code_interpreter" },
|
877
|
+
{ type: "file_search" }
|
673
878
|
],
|
674
879
|
tool_resources: {
|
675
|
-
|
676
|
-
|
880
|
+
code_interpreter: {
|
881
|
+
file_ids: [] # See Files section above for how to upload files
|
882
|
+
},
|
883
|
+
file_search: {
|
884
|
+
vector_store_ids: [] # See Vector Stores section above for how to add vector stores
|
677
885
|
}
|
678
886
|
},
|
679
887
|
"metadata": { my_internal_version_id: "1.0.0" }
|
@@ -995,7 +1203,7 @@ response = client.audio.transcribe(
|
|
995
1203
|
parameters: {
|
996
1204
|
model: "whisper-1",
|
997
1205
|
file: File.open("path_to_file", "rb"),
|
998
|
-
language: "en" # Optional
|
1206
|
+
language: "en" # Optional
|
999
1207
|
})
|
1000
1208
|
puts response["text"]
|
1001
1209
|
# => "Transcription of the text"
|
@@ -1010,7 +1218,9 @@ response = client.audio.speech(
|
|
1010
1218
|
parameters: {
|
1011
1219
|
model: "tts-1",
|
1012
1220
|
input: "This is a speech test!",
|
1013
|
-
voice: "alloy"
|
1221
|
+
voice: "alloy",
|
1222
|
+
response_format: "mp3", # Optional
|
1223
|
+
speed: 1.0 # Optional
|
1014
1224
|
}
|
1015
1225
|
)
|
1016
1226
|
File.binwrite('demo.mp3', response)
|
@@ -1023,7 +1233,7 @@ HTTP errors can be caught like this:
|
|
1023
1233
|
|
1024
1234
|
```
|
1025
1235
|
begin
|
1026
|
-
OpenAI::Client.new.models.retrieve(id: "gpt-
|
1236
|
+
OpenAI::Client.new.models.retrieve(id: "gpt-4o")
|
1027
1237
|
rescue Faraday::Error => e
|
1028
1238
|
raise "Got a Faraday error: #{e}"
|
1029
1239
|
end
|
data/lib/openai/batches.rb
CHANGED
data/lib/openai/client.rb
CHANGED
@@ -2,6 +2,7 @@ module OpenAI
|
|
2
2
|
class Client
|
3
3
|
include OpenAI::HTTP
|
4
4
|
|
5
|
+
SENSITIVE_ATTRIBUTES = %i[@access_token @organization_id @extra_headers].freeze
|
5
6
|
CONFIG_KEYS = %i[
|
6
7
|
api_type
|
7
8
|
api_version
|
@@ -78,6 +79,18 @@ module OpenAI
|
|
78
79
|
@run_steps ||= OpenAI::RunSteps.new(client: self)
|
79
80
|
end
|
80
81
|
|
82
|
+
def vector_stores
|
83
|
+
@vector_stores ||= OpenAI::VectorStores.new(client: self)
|
84
|
+
end
|
85
|
+
|
86
|
+
def vector_store_files
|
87
|
+
@vector_store_files ||= OpenAI::VectorStoreFiles.new(client: self)
|
88
|
+
end
|
89
|
+
|
90
|
+
def vector_store_file_batches
|
91
|
+
@vector_store_file_batches ||= OpenAI::VectorStoreFileBatches.new(client: self)
|
92
|
+
end
|
93
|
+
|
81
94
|
def batches
|
82
95
|
@batches ||= OpenAI::Batches.new(client: self)
|
83
96
|
end
|
@@ -95,5 +108,15 @@ module OpenAI
|
|
95
108
|
client.add_headers("OpenAI-Beta": apis.map { |k, v| "#{k}=#{v}" }.join(";"))
|
96
109
|
end
|
97
110
|
end
|
111
|
+
|
112
|
+
def inspect
|
113
|
+
vars = instance_variables.map do |var|
|
114
|
+
value = instance_variable_get(var)
|
115
|
+
|
116
|
+
SENSITIVE_ATTRIBUTES.include?(var) ? "#{var}=[REDACTED]" : "#{var}=#{value.inspect}"
|
117
|
+
end
|
118
|
+
|
119
|
+
"#<#{self.class}:#{object_id} #{vars.join(', ')}>"
|
120
|
+
end
|
98
121
|
end
|
99
122
|
end
|
data/lib/openai/files.rb
CHANGED
@@ -4,14 +4,15 @@ module OpenAI
|
|
4
4
|
assistants
|
5
5
|
batch
|
6
6
|
fine-tune
|
7
|
+
vision
|
7
8
|
].freeze
|
8
9
|
|
9
10
|
def initialize(client:)
|
10
11
|
@client = client
|
11
12
|
end
|
12
13
|
|
13
|
-
def list
|
14
|
-
@client.get(path: "/files")
|
14
|
+
def list(parameters: {})
|
15
|
+
@client.get(path: "/files", parameters: parameters)
|
15
16
|
end
|
16
17
|
|
17
18
|
def upload(parameters: {})
|
data/lib/openai/http_headers.rb
CHANGED
data/lib/openai/messages.rb
CHANGED
@@ -16,8 +16,12 @@ module OpenAI
|
|
16
16
|
@client.json_post(path: "/threads/#{thread_id}/messages", parameters: parameters)
|
17
17
|
end
|
18
18
|
|
19
|
-
def modify(
|
19
|
+
def modify(thread_id:, id:, parameters: {})
|
20
20
|
@client.json_post(path: "/threads/#{thread_id}/messages/#{id}", parameters: parameters)
|
21
21
|
end
|
22
|
+
|
23
|
+
def delete(thread_id:, id:)
|
24
|
+
@client.delete(path: "/threads/#{thread_id}/messages/#{id}")
|
25
|
+
end
|
22
26
|
end
|
23
27
|
end
|
@@ -0,0 +1,29 @@
|
|
1
|
+
module OpenAI
|
2
|
+
class VectorStoreFileBatches
|
3
|
+
def initialize(client:)
|
4
|
+
@client = client.beta(assistants: OpenAI::Assistants::BETA_VERSION)
|
5
|
+
end
|
6
|
+
|
7
|
+
def list(vector_store_id:, id:, parameters: {})
|
8
|
+
@client.get(
|
9
|
+
path: "/vector_stores/#{vector_store_id}/file_batches/#{id}/files",
|
10
|
+
parameters: parameters
|
11
|
+
)
|
12
|
+
end
|
13
|
+
|
14
|
+
def retrieve(vector_store_id:, id:)
|
15
|
+
@client.get(path: "/vector_stores/#{vector_store_id}/file_batches/#{id}")
|
16
|
+
end
|
17
|
+
|
18
|
+
def create(vector_store_id:, parameters: {})
|
19
|
+
@client.json_post(
|
20
|
+
path: "/vector_stores/#{vector_store_id}/file_batches",
|
21
|
+
parameters: parameters
|
22
|
+
)
|
23
|
+
end
|
24
|
+
|
25
|
+
def cancel(vector_store_id:, id:)
|
26
|
+
@client.post(path: "/vector_stores/#{vector_store_id}/file_batches/#{id}/cancel")
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
module OpenAI
|
2
|
+
class VectorStoreFiles
|
3
|
+
def initialize(client:)
|
4
|
+
@client = client.beta(assistants: OpenAI::Assistants::BETA_VERSION)
|
5
|
+
end
|
6
|
+
|
7
|
+
def list(vector_store_id:, parameters: {})
|
8
|
+
@client.get(path: "/vector_stores/#{vector_store_id}/files", parameters: parameters)
|
9
|
+
end
|
10
|
+
|
11
|
+
def retrieve(vector_store_id:, id:)
|
12
|
+
@client.get(path: "/vector_stores/#{vector_store_id}/files/#{id}")
|
13
|
+
end
|
14
|
+
|
15
|
+
def create(vector_store_id:, parameters: {})
|
16
|
+
@client.json_post(path: "/vector_stores/#{vector_store_id}/files", parameters: parameters)
|
17
|
+
end
|
18
|
+
|
19
|
+
def delete(vector_store_id:, id:)
|
20
|
+
@client.delete(path: "/vector_stores/#{vector_store_id}/files/#{id}")
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
module OpenAI
|
2
|
+
class VectorStores
|
3
|
+
def initialize(client:)
|
4
|
+
@client = client.beta(assistants: OpenAI::Assistants::BETA_VERSION)
|
5
|
+
end
|
6
|
+
|
7
|
+
def list(parameters: {})
|
8
|
+
@client.get(path: "/vector_stores", parameters: parameters)
|
9
|
+
end
|
10
|
+
|
11
|
+
def retrieve(id:)
|
12
|
+
@client.get(path: "/vector_stores/#{id}")
|
13
|
+
end
|
14
|
+
|
15
|
+
def create(parameters: {})
|
16
|
+
@client.json_post(path: "/vector_stores", parameters: parameters)
|
17
|
+
end
|
18
|
+
|
19
|
+
def modify(id:, parameters: {})
|
20
|
+
@client.json_post(path: "/vector_stores/#{id}", parameters: parameters)
|
21
|
+
end
|
22
|
+
|
23
|
+
def delete(id:)
|
24
|
+
@client.delete(path: "/vector_stores/#{id}")
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
data/lib/openai/version.rb
CHANGED
data/lib/openai.rb
CHANGED
@@ -12,6 +12,9 @@ require_relative "openai/threads"
|
|
12
12
|
require_relative "openai/messages"
|
13
13
|
require_relative "openai/runs"
|
14
14
|
require_relative "openai/run_steps"
|
15
|
+
require_relative "openai/vector_stores"
|
16
|
+
require_relative "openai/vector_store_files"
|
17
|
+
require_relative "openai/vector_store_file_batches"
|
15
18
|
require_relative "openai/audio"
|
16
19
|
require_relative "openai/version"
|
17
20
|
require_relative "openai/batches"
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby-openai
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 7.0
|
4
|
+
version: 7.2.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Alex
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-
|
11
|
+
date: 2024-10-10 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: event_stream_parser
|
@@ -102,6 +102,9 @@ files:
|
|
102
102
|
- lib/openai/run_steps.rb
|
103
103
|
- lib/openai/runs.rb
|
104
104
|
- lib/openai/threads.rb
|
105
|
+
- lib/openai/vector_store_file_batches.rb
|
106
|
+
- lib/openai/vector_store_files.rb
|
107
|
+
- lib/openai/vector_stores.rb
|
105
108
|
- lib/openai/version.rb
|
106
109
|
- lib/ruby/openai.rb
|
107
110
|
- pull_request_template.md
|
@@ -130,7 +133,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
130
133
|
- !ruby/object:Gem::Version
|
131
134
|
version: '0'
|
132
135
|
requirements: []
|
133
|
-
rubygems_version: 3.5.
|
136
|
+
rubygems_version: 3.5.11
|
134
137
|
signing_key:
|
135
138
|
specification_version: 4
|
136
139
|
summary: "OpenAI API + Ruby! \U0001F916❤️"
|