ruby-openai 7.0.1 → 7.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +0 -5
- data/CHANGELOG.md +14 -0
- data/Gemfile +2 -2
- data/Gemfile.lock +14 -10
- data/README.md +230 -50
- data/lib/openai/batches.rb +2 -2
- data/lib/openai/client.rb +12 -0
- data/lib/openai/files.rb +1 -0
- data/lib/openai/http_headers.rb +1 -1
- data/lib/openai/vector_store_file_batches.rb +29 -0
- data/lib/openai/vector_store_files.rb +23 -0
- data/lib/openai/vector_stores.rb +27 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +3 -0
- metadata +6 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a86dc627f27eeea7cf3eb1bf2eec2b0209d0bb8c11fef0eb6fd6518f7f10cfe9
|
4
|
+
data.tar.gz: 712ab627670853d680c8858a9d27aef5a82be09de8d53e5f7156ee608ba8d939
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 72e14dc39495046b71ca147953582a24f8c9261955f2ca2a8d898ca7f8e136b459c31583620c16db3fa80c39da61c1f3a4cc932c5b3f2e71741fed42719eaeaf
|
7
|
+
data.tar.gz: 82db19d40f9b44fedb73d8f310771af71096d3d7e8e56f96d000a70f4c61abb1f21cbe98187d70fec1c3d639921eabe8cd8e456cac92dbcfa6e2efcb655f865b
|
data/.gitignore
CHANGED
data/CHANGELOG.md
CHANGED
@@ -5,6 +5,20 @@ All notable changes to this project will be documented in this file.
|
|
5
5
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
7
7
|
|
8
|
+
## [7.1.0] - 2024-06-10
|
9
|
+
|
10
|
+
### Added
|
11
|
+
|
12
|
+
- Add new Vector Store endpoints - thanks to [@willywg](https://github.com/willywg) for this PR!
|
13
|
+
- Add parameters to batches.list endpoint so you can for example use `after` - thanks to [@marckohlbrugge](https://github.com/marckohlbrugge)!
|
14
|
+
- Add vision as permitted purpose for files - thanks again to [@willywg](https://github.com/willywg) for the PR.
|
15
|
+
- Add improved README example of tool calling - thanks [@krschacht](https://github.com/krschacht) - check out his project [HostedGPT](https://github.com/AllYourBot/hostedgpt)!
|
16
|
+
|
17
|
+
### Fixed
|
18
|
+
|
19
|
+
- Fix broken link in README table of contents - thanks to [@garrettgsb](https://github.com/garrettgsb)!
|
20
|
+
- Skip sending nil headers - thanks to [@drnic](https://github.com/drnic)!
|
21
|
+
|
8
22
|
## [7.0.1] - 2024-04-30
|
9
23
|
|
10
24
|
### Fixed
|
data/Gemfile
CHANGED
data/Gemfile.lock
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
ruby-openai (7.0
|
4
|
+
ruby-openai (7.1.0)
|
5
5
|
event_stream_parser (>= 0.3.0, < 2.0.0)
|
6
6
|
faraday (>= 1)
|
7
7
|
faraday-multipart (>= 1)
|
@@ -9,12 +9,14 @@ PATH
|
|
9
9
|
GEM
|
10
10
|
remote: https://rubygems.org/
|
11
11
|
specs:
|
12
|
-
addressable (2.8.
|
12
|
+
addressable (2.8.6)
|
13
13
|
public_suffix (>= 2.0.2, < 6.0)
|
14
14
|
ast (2.4.2)
|
15
15
|
base64 (0.2.0)
|
16
|
+
bigdecimal (3.1.8)
|
16
17
|
byebug (11.1.3)
|
17
|
-
crack (0.
|
18
|
+
crack (1.0.0)
|
19
|
+
bigdecimal
|
18
20
|
rexml
|
19
21
|
diff-lcs (1.5.1)
|
20
22
|
dotenv (2.8.1)
|
@@ -26,17 +28,18 @@ GEM
|
|
26
28
|
faraday-multipart (1.0.4)
|
27
29
|
multipart-post (~> 2)
|
28
30
|
faraday-net_http (3.0.2)
|
29
|
-
hashdiff (1.0
|
31
|
+
hashdiff (1.1.0)
|
30
32
|
json (2.6.3)
|
31
33
|
multipart-post (2.3.0)
|
32
34
|
parallel (1.22.1)
|
33
35
|
parser (3.2.2.0)
|
34
36
|
ast (~> 2.4.1)
|
35
|
-
public_suffix (5.0.
|
37
|
+
public_suffix (5.0.5)
|
36
38
|
rainbow (3.1.1)
|
37
|
-
rake (13.1
|
39
|
+
rake (13.2.1)
|
38
40
|
regexp_parser (2.8.0)
|
39
|
-
rexml (3.2.
|
41
|
+
rexml (3.2.9)
|
42
|
+
strscan
|
40
43
|
rspec (3.13.0)
|
41
44
|
rspec-core (~> 3.13.0)
|
42
45
|
rspec-expectations (~> 3.13.0)
|
@@ -64,9 +67,10 @@ GEM
|
|
64
67
|
parser (>= 3.2.1.0)
|
65
68
|
ruby-progressbar (1.13.0)
|
66
69
|
ruby2_keywords (0.0.5)
|
70
|
+
strscan (3.1.0)
|
67
71
|
unicode-display_width (2.4.2)
|
68
72
|
vcr (6.1.0)
|
69
|
-
webmock (3.
|
73
|
+
webmock (3.23.1)
|
70
74
|
addressable (>= 2.8.0)
|
71
75
|
crack (>= 0.3.2)
|
72
76
|
hashdiff (>= 0.4.0, < 2.0.0)
|
@@ -77,12 +81,12 @@ PLATFORMS
|
|
77
81
|
DEPENDENCIES
|
78
82
|
byebug (~> 11.1.3)
|
79
83
|
dotenv (~> 2.8.1)
|
80
|
-
rake (~> 13.
|
84
|
+
rake (~> 13.2)
|
81
85
|
rspec (~> 3.13)
|
82
86
|
rubocop (~> 1.50.2)
|
83
87
|
ruby-openai!
|
84
88
|
vcr (~> 6.1.0)
|
85
|
-
webmock (~> 3.
|
89
|
+
webmock (~> 3.23.1)
|
86
90
|
|
87
91
|
BUNDLED WITH
|
88
92
|
2.4.5
|
data/README.md
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
|
7
7
|
Use the [OpenAI API](https://openai.com/blog/openai-api/) with Ruby! 🤖❤️
|
8
8
|
|
9
|
-
Stream text with GPT-
|
9
|
+
Stream text with GPT-4o, transcribe and translate audio with Whisper, or create images with DALL·E...
|
10
10
|
|
11
11
|
[🚢 Hire me](https://peaceterms.com?utm_source=ruby-openai&utm_medium=readme&utm_id=26072023) | [🎮 Ruby AI Builders Discord](https://discord.gg/k4Uc224xVD) | [🐦 Twitter](https://twitter.com/alexrudall) | [🧠 Anthropic Gem](https://github.com/alexrudall/anthropic) | [🚂 Midjourney Gem](https://github.com/alexrudall/midjourney)
|
12
12
|
|
@@ -35,14 +35,20 @@ Stream text with GPT-4, transcribe and translate audio with Whisper, or create i
|
|
35
35
|
- [Vision](#vision)
|
36
36
|
- [JSON Mode](#json-mode)
|
37
37
|
- [Functions](#functions)
|
38
|
-
- [
|
38
|
+
- [Completions](#completions)
|
39
39
|
- [Embeddings](#embeddings)
|
40
40
|
- [Batches](#batches)
|
41
41
|
- [Files](#files)
|
42
|
+
- [For fine-tuning purposes](#for-fine-tuning-purposes)
|
43
|
+
- [For assistant purposes](#for-assistant-purposes)
|
42
44
|
- [Finetunes](#finetunes)
|
45
|
+
- [Vector Stores](#vector-stores)
|
46
|
+
- [Vector Store Files](#vector-store-files)
|
47
|
+
- [Vector Store File Batches](#vector-store-file-batches)
|
43
48
|
- [Assistants](#assistants)
|
44
49
|
- [Threads and Messages](#threads-and-messages)
|
45
50
|
- [Runs](#runs)
|
51
|
+
- [Create and Run](#create-and-run)
|
46
52
|
- [Runs involving function tools](#runs-involving-function-tools)
|
47
53
|
- [Image Generation](#image-generation)
|
48
54
|
- [DALL·E 2](#dalle-2)
|
@@ -54,7 +60,7 @@ Stream text with GPT-4, transcribe and translate audio with Whisper, or create i
|
|
54
60
|
- [Translate](#translate)
|
55
61
|
- [Transcribe](#transcribe)
|
56
62
|
- [Speech](#speech)
|
57
|
-
- [Errors](#errors)
|
63
|
+
- [Errors](#errors-1)
|
58
64
|
- [Development](#development)
|
59
65
|
- [Release](#release)
|
60
66
|
- [Contributing](#contributing)
|
@@ -103,7 +109,7 @@ For a quick test you can pass your token directly to a new client:
|
|
103
109
|
```ruby
|
104
110
|
client = OpenAI::Client.new(
|
105
111
|
access_token: "access_token_goes_here",
|
106
|
-
log_errors: true # Highly recommended in development, so you can see what errors OpenAI is returning. Not recommended in production.
|
112
|
+
log_errors: true # Highly recommended in development, so you can see what errors OpenAI is returning. Not recommended in production because it could leak private data to your logs.
|
107
113
|
)
|
108
114
|
```
|
109
115
|
|
@@ -114,8 +120,8 @@ For a more robust setup, you can configure the gem with your API keys, for examp
|
|
114
120
|
```ruby
|
115
121
|
OpenAI.configure do |config|
|
116
122
|
config.access_token = ENV.fetch("OPENAI_ACCESS_TOKEN")
|
117
|
-
config.organization_id = ENV.fetch("OPENAI_ORGANIZATION_ID") # Optional
|
118
|
-
config.log_errors = true # Highly recommended in development, so you can see what errors OpenAI is returning. Not recommended in production.
|
123
|
+
config.organization_id = ENV.fetch("OPENAI_ORGANIZATION_ID") # Optional
|
124
|
+
config.log_errors = true # Highly recommended in development, so you can see what errors OpenAI is returning. Not recommended in production because it could leak private data to your logs.
|
119
125
|
end
|
120
126
|
```
|
121
127
|
|
@@ -251,7 +257,7 @@ client.chat(
|
|
251
257
|
```ruby
|
252
258
|
client = OpenAI::Client.new(
|
253
259
|
access_token: "groq_access_token_goes_here",
|
254
|
-
uri_base: "https://api.groq.com/"
|
260
|
+
uri_base: "https://api.groq.com/openai"
|
255
261
|
)
|
256
262
|
|
257
263
|
client.chat(
|
@@ -273,7 +279,7 @@ To estimate the token-count of your text:
|
|
273
279
|
|
274
280
|
```ruby
|
275
281
|
OpenAI.rough_token_count("Your text")
|
276
|
-
|
282
|
+
```
|
277
283
|
|
278
284
|
If you need a more accurate count, try [tiktoken_ruby](https://github.com/IAPark/tiktoken_ruby).
|
279
285
|
|
@@ -283,7 +289,7 @@ There are different models that can be used to generate text. For a full list an
|
|
283
289
|
|
284
290
|
```ruby
|
285
291
|
client.models.list
|
286
|
-
client.models.retrieve(id: "gpt-
|
292
|
+
client.models.retrieve(id: "gpt-4o")
|
287
293
|
```
|
288
294
|
|
289
295
|
### Chat
|
@@ -293,7 +299,7 @@ GPT is a model that can be used to generate text in a conversational style. You
|
|
293
299
|
```ruby
|
294
300
|
response = client.chat(
|
295
301
|
parameters: {
|
296
|
-
model: "gpt-
|
302
|
+
model: "gpt-4o", # Required.
|
297
303
|
messages: [{ role: "user", content: "Hello!"}], # Required.
|
298
304
|
temperature: 0.7,
|
299
305
|
})
|
@@ -310,7 +316,7 @@ You can stream from the API in realtime, which can be much faster and used to cr
|
|
310
316
|
```ruby
|
311
317
|
client.chat(
|
312
318
|
parameters: {
|
313
|
-
model: "gpt-
|
319
|
+
model: "gpt-4o", # Required.
|
314
320
|
messages: [{ role: "user", content: "Describe a character called Anna!"}], # Required.
|
315
321
|
temperature: 0.7,
|
316
322
|
stream: proc do |chunk, _bytesize|
|
@@ -351,7 +357,7 @@ You can set the response_format to ask for responses in JSON:
|
|
351
357
|
```ruby
|
352
358
|
response = client.chat(
|
353
359
|
parameters: {
|
354
|
-
model: "gpt-
|
360
|
+
model: "gpt-4o",
|
355
361
|
response_format: { type: "json_object" },
|
356
362
|
messages: [{ role: "user", content: "Hello! Give me some JSON please."}],
|
357
363
|
temperature: 0.7,
|
@@ -371,7 +377,7 @@ You can stream it as well!
|
|
371
377
|
```ruby
|
372
378
|
response = client.chat(
|
373
379
|
parameters: {
|
374
|
-
model: "gpt-
|
380
|
+
model: "gpt-4o",
|
375
381
|
messages: [{ role: "user", content: "Can I have some JSON please?"}],
|
376
382
|
response_format: { type: "json_object" },
|
377
383
|
stream: proc do |chunk, _bytesize|
|
@@ -402,26 +408,29 @@ You can describe and pass in functions and the model will intelligently choose t
|
|
402
408
|
```ruby
|
403
409
|
|
404
410
|
def get_current_weather(location:, unit: "fahrenheit")
|
405
|
-
# use a weather api to fetch weather
|
411
|
+
# Here you could use a weather api to fetch the weather.
|
412
|
+
"The weather in #{location} is nice 🌞 #{unit}"
|
406
413
|
end
|
407
414
|
|
415
|
+
messages = [
|
416
|
+
{
|
417
|
+
"role": "user",
|
418
|
+
"content": "What is the weather like in San Francisco?",
|
419
|
+
},
|
420
|
+
]
|
421
|
+
|
408
422
|
response =
|
409
423
|
client.chat(
|
410
424
|
parameters: {
|
411
|
-
model: "gpt-
|
412
|
-
messages:
|
413
|
-
{
|
414
|
-
"role": "user",
|
415
|
-
"content": "What is the weather like in San Francisco?",
|
416
|
-
},
|
417
|
-
],
|
425
|
+
model: "gpt-4o",
|
426
|
+
messages: messages, # Defined above because we'll use it again
|
418
427
|
tools: [
|
419
428
|
{
|
420
429
|
type: "function",
|
421
430
|
function: {
|
422
431
|
name: "get_current_weather",
|
423
432
|
description: "Get the current weather in a given location",
|
424
|
-
parameters: {
|
433
|
+
parameters: { # Format: https://json-schema.org/understanding-json-schema
|
425
434
|
type: :object,
|
426
435
|
properties: {
|
427
436
|
location: {
|
@@ -438,31 +447,51 @@ response =
|
|
438
447
|
},
|
439
448
|
}
|
440
449
|
],
|
441
|
-
tool_choice:
|
442
|
-
|
443
|
-
function: {
|
444
|
-
name: "get_current_weather"
|
445
|
-
}
|
446
|
-
}
|
450
|
+
tool_choice: "required" # Optional, defaults to "auto"
|
451
|
+
# Can also put "none" or specific functions, see docs
|
447
452
|
},
|
448
453
|
)
|
449
454
|
|
450
455
|
message = response.dig("choices", 0, "message")
|
451
456
|
|
452
457
|
if message["role"] == "assistant" && message["tool_calls"]
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
458
|
+
message["tool_calls"].each do |tool_call|
|
459
|
+
tool_call_id = tool_call.dig("id")
|
460
|
+
function_name = tool_call.dig("function", "name")
|
461
|
+
function_args = JSON.parse(
|
462
|
+
tool_call.dig("function", "arguments"),
|
457
463
|
{ symbolize_names: true },
|
458
464
|
)
|
465
|
+
function_response = case function_name
|
466
|
+
when "get_current_weather"
|
467
|
+
get_current_weather(**function_args) # => "The weather is nice 🌞"
|
468
|
+
else
|
469
|
+
# decide how to handle
|
470
|
+
end
|
459
471
|
|
460
|
-
|
461
|
-
|
462
|
-
|
472
|
+
# For a subsequent message with the role "tool", OpenAI requires the preceding message to have a tool_calls argument.
|
473
|
+
messages << message
|
474
|
+
|
475
|
+
messages << {
|
476
|
+
tool_call_id: tool_call_id,
|
477
|
+
role: "tool",
|
478
|
+
name: function_name,
|
479
|
+
content: function_response
|
480
|
+
} # Extend the conversation with the results of the functions
|
463
481
|
end
|
482
|
+
|
483
|
+
second_response = client.chat(
|
484
|
+
parameters: {
|
485
|
+
model: "gpt-4o",
|
486
|
+
messages: messages
|
487
|
+
})
|
488
|
+
|
489
|
+
puts second_response.dig("choices", 0, "message", "content")
|
490
|
+
|
491
|
+
# At this point, the model has decided to call functions, you've called the functions
|
492
|
+
# and provided the response back, and the model has considered this and responded.
|
464
493
|
end
|
465
|
-
# => "
|
494
|
+
# => "It looks like the weather is nice and sunny in San Francisco! If you're planning to go out, it should be a pleasant day."
|
466
495
|
```
|
467
496
|
|
468
497
|
### Completions
|
@@ -472,7 +501,7 @@ Hit the OpenAI API for a completion using other GPT-3 models:
|
|
472
501
|
```ruby
|
473
502
|
response = client.completions(
|
474
503
|
parameters: {
|
475
|
-
model: "gpt-
|
504
|
+
model: "gpt-4o",
|
476
505
|
prompt: "Once upon a time",
|
477
506
|
max_tokens: 5
|
478
507
|
})
|
@@ -497,18 +526,16 @@ puts response.dig("data", 0, "embedding")
|
|
497
526
|
```
|
498
527
|
|
499
528
|
### Batches
|
500
|
-
|
501
|
-
The Batches endpoint allows you to create and manage large batches of API requests to run asynchronously. Currently, only the `/v1/chat/completions` endpoint is supported for batches.
|
529
|
+
The Batches endpoint allows you to create and manage large batches of API requests to run asynchronously. Currently, the supported endpoints for batches are `/v1/chat/completions` (Chat Completions API) and `/v1/embeddings` (Embeddings API).
|
502
530
|
|
503
531
|
To use the Batches endpoint, you need to first upload a JSONL file containing the batch requests using the Files endpoint. The file must be uploaded with the purpose set to `batch`. Each line in the JSONL file represents a single request and should have the following format:
|
504
|
-
|
505
532
|
```json
|
506
533
|
{
|
507
534
|
"custom_id": "request-1",
|
508
535
|
"method": "POST",
|
509
536
|
"url": "/v1/chat/completions",
|
510
537
|
"body": {
|
511
|
-
"model": "gpt-
|
538
|
+
"model": "gpt-4o",
|
512
539
|
"messages": [
|
513
540
|
{ "role": "system", "content": "You are a helpful assistant." },
|
514
541
|
{ "role": "user", "content": "What is 2+2?" }
|
@@ -568,7 +595,7 @@ These files are in JSONL format, with each line representing the output or error
|
|
568
595
|
"id": "chatcmpl-abc123",
|
569
596
|
"object": "chat.completion",
|
570
597
|
"created": 1677858242,
|
571
|
-
"model": "gpt-
|
598
|
+
"model": "gpt-4o",
|
572
599
|
"choices": [
|
573
600
|
{
|
574
601
|
"index": 0,
|
@@ -585,7 +612,7 @@ These files are in JSONL format, with each line representing the output or error
|
|
585
612
|
If a request fails with a non-HTTP error, the error object will contain more information about the cause of the failure.
|
586
613
|
|
587
614
|
### Files
|
588
|
-
|
615
|
+
#### For fine-tuning purposes
|
589
616
|
Put your data in a `.jsonl` file like this:
|
590
617
|
|
591
618
|
```json
|
@@ -603,6 +630,24 @@ client.files.content(id: "file-123")
|
|
603
630
|
client.files.delete(id: "file-123")
|
604
631
|
```
|
605
632
|
|
633
|
+
#### For assistant purposes
|
634
|
+
|
635
|
+
You can send a file path:
|
636
|
+
|
637
|
+
```ruby
|
638
|
+
client.files.upload(parameters: { file: "path/to/file.pdf", purpose: "assistants" })
|
639
|
+
```
|
640
|
+
|
641
|
+
or a File object
|
642
|
+
|
643
|
+
```ruby
|
644
|
+
my_file = File.open("path/to/file.pdf", "rb")
|
645
|
+
client.files.upload(parameters: { file: my_file, purpose: "assistants" })
|
646
|
+
```
|
647
|
+
|
648
|
+
|
649
|
+
See supported file types on [API documentation](https://platform.openai.com/docs/assistants/tools/file-search/supported-files).
|
650
|
+
|
606
651
|
### Finetunes
|
607
652
|
|
608
653
|
Upload your fine-tuning data in a `.jsonl` file as above and get its ID:
|
@@ -618,7 +663,7 @@ You can then use this file ID to create a fine tuning job:
|
|
618
663
|
response = client.finetunes.create(
|
619
664
|
parameters: {
|
620
665
|
training_file: file_id,
|
621
|
-
model: "gpt-
|
666
|
+
model: "gpt-4o"
|
622
667
|
})
|
623
668
|
fine_tune_id = response["id"]
|
624
669
|
```
|
@@ -655,6 +700,135 @@ You can also capture the events for a job:
|
|
655
700
|
client.finetunes.list_events(id: fine_tune_id)
|
656
701
|
```
|
657
702
|
|
703
|
+
### Vector Stores
|
704
|
+
Vector Store objects give the File Search tool the ability to search your files.
|
705
|
+
|
706
|
+
You can create a new vector store:
|
707
|
+
|
708
|
+
```ruby
|
709
|
+
response = client.vector_stores.create(
|
710
|
+
parameters: {
|
711
|
+
name: "my vector store",
|
712
|
+
file_ids: ["file-abc123", "file-def456"]
|
713
|
+
}
|
714
|
+
)
|
715
|
+
|
716
|
+
vector_store_id = response["id"]
|
717
|
+
```
|
718
|
+
|
719
|
+
Given a `vector_store_id` you can `retrieve` the current field values:
|
720
|
+
|
721
|
+
```ruby
|
722
|
+
client.vector_stores.retrieve(id: vector_store_id)
|
723
|
+
```
|
724
|
+
|
725
|
+
You can get a `list` of all vector stores currently available under the organization:
|
726
|
+
|
727
|
+
```ruby
|
728
|
+
client.vector_stores.list
|
729
|
+
```
|
730
|
+
|
731
|
+
You can modify an existing vector store, except for the `file_ids`:
|
732
|
+
|
733
|
+
```ruby
|
734
|
+
response = client.vector_stores.modify(
|
735
|
+
id: vector_store_id,
|
736
|
+
parameters: {
|
737
|
+
name: "Modified Test Vector Store",
|
738
|
+
}
|
739
|
+
)
|
740
|
+
```
|
741
|
+
|
742
|
+
You can delete vector stores:
|
743
|
+
|
744
|
+
```ruby
|
745
|
+
client.vector_stores.delete(id: vector_store_id)
|
746
|
+
```
|
747
|
+
|
748
|
+
### Vector Store Files
|
749
|
+
Vector store files represent files inside a vector store.
|
750
|
+
|
751
|
+
You can create a new vector store file by attaching a File to a vector store.
|
752
|
+
|
753
|
+
```ruby
|
754
|
+
response = client.vector_store_files.create(
|
755
|
+
vector_store_id: "vector-store-abc123",
|
756
|
+
parameters: {
|
757
|
+
file_id: "file-abc123"
|
758
|
+
}
|
759
|
+
)
|
760
|
+
|
761
|
+
vector_store_file_id = response["id"]
|
762
|
+
```
|
763
|
+
|
764
|
+
Given a `vector_store_file_id` you can `retrieve` the current field values:
|
765
|
+
|
766
|
+
```ruby
|
767
|
+
client.vector_store_files.retrieve(
|
768
|
+
vector_store_id: "vector-store-abc123",
|
769
|
+
id: vector_store_file_id
|
770
|
+
)
|
771
|
+
```
|
772
|
+
|
773
|
+
You can get a `list` of all vector store files currently available under the vector store:
|
774
|
+
|
775
|
+
```ruby
|
776
|
+
client.vector_store_files.list(vector_store_id: "vector-store-abc123")
|
777
|
+
```
|
778
|
+
|
779
|
+
You can delete a vector store file:
|
780
|
+
|
781
|
+
```ruby
|
782
|
+
client.vector_store_files.delete(
|
783
|
+
vector_store_id: "vector-store-abc123",
|
784
|
+
id: vector_store_file_id
|
785
|
+
)
|
786
|
+
```
|
787
|
+
Note: This will remove the file from the vector store but the file itself will not be deleted. To delete the file, use the delete file endpoint.
|
788
|
+
|
789
|
+
### Vector Store File Batches
|
790
|
+
Vector store file batches represent operations to add multiple files to a vector store.
|
791
|
+
|
792
|
+
You can create a new vector store file batch by attaching multiple Files to a vector store.
|
793
|
+
|
794
|
+
```ruby
|
795
|
+
response = client.vector_store_file_batches.create(
|
796
|
+
vector_store_id: "vector-store-abc123",
|
797
|
+
parameters: {
|
798
|
+
file_ids: ["file-abc123", "file-def456"]
|
799
|
+
}
|
800
|
+
)
|
801
|
+
|
802
|
+
file_batch_id = response["id"]
|
803
|
+
```
|
804
|
+
|
805
|
+
Given a `file_batch_id` you can `retrieve` the current field values:
|
806
|
+
|
807
|
+
```ruby
|
808
|
+
client.vector_store_file_batches.retrieve(
|
809
|
+
vector_store_id: "vector-store-abc123",
|
810
|
+
id: file_batch_id
|
811
|
+
)
|
812
|
+
```
|
813
|
+
|
814
|
+
You can get a `list` of all vector store files in a batch currently available under the vector store:
|
815
|
+
|
816
|
+
```ruby
|
817
|
+
client.vector_store_file_batches.list(
|
818
|
+
vector_store_id: "vector-store-abc123",
|
819
|
+
id: file_batch_id
|
820
|
+
)
|
821
|
+
```
|
822
|
+
|
823
|
+
You can cancel a vector store file batch (This attempts to cancel the processing of files in this batch as soon as possible):
|
824
|
+
|
825
|
+
```ruby
|
826
|
+
client.vector_store_file_batches.cancel(
|
827
|
+
vector_store_id: "vector-store-abc123",
|
828
|
+
id: file_batch_id
|
829
|
+
)
|
830
|
+
```
|
831
|
+
|
658
832
|
### Assistants
|
659
833
|
|
660
834
|
Assistants are stateful actors that can have many conversations and use tools to perform tasks (see [Assistant Overview](https://platform.openai.com/docs/assistants/overview)).
|
@@ -664,16 +838,20 @@ To create a new assistant:
|
|
664
838
|
```ruby
|
665
839
|
response = client.assistants.create(
|
666
840
|
parameters: {
|
667
|
-
model: "gpt-
|
841
|
+
model: "gpt-4o",
|
668
842
|
name: "OpenAI-Ruby test assistant",
|
669
843
|
description: nil,
|
670
844
|
instructions: "You are a Ruby dev bot. When asked a question, write and run Ruby code to answer the question",
|
671
845
|
tools: [
|
672
846
|
{ type: "code_interpreter" },
|
847
|
+
{ type: "file_search" }
|
673
848
|
],
|
674
849
|
tool_resources: {
|
675
|
-
|
676
|
-
|
850
|
+
code_interpreter: {
|
851
|
+
file_ids: [] # See Files section above for how to upload files
|
852
|
+
},
|
853
|
+
file_search: {
|
854
|
+
vector_store_ids: [] # See Vector Stores section above for how to add vector stores
|
677
855
|
}
|
678
856
|
},
|
679
857
|
"metadata": { my_internal_version_id: "1.0.0" }
|
@@ -995,7 +1173,7 @@ response = client.audio.transcribe(
|
|
995
1173
|
parameters: {
|
996
1174
|
model: "whisper-1",
|
997
1175
|
file: File.open("path_to_file", "rb"),
|
998
|
-
language: "en" # Optional
|
1176
|
+
language: "en" # Optional
|
999
1177
|
})
|
1000
1178
|
puts response["text"]
|
1001
1179
|
# => "Transcription of the text"
|
@@ -1010,7 +1188,9 @@ response = client.audio.speech(
|
|
1010
1188
|
parameters: {
|
1011
1189
|
model: "tts-1",
|
1012
1190
|
input: "This is a speech test!",
|
1013
|
-
voice: "alloy"
|
1191
|
+
voice: "alloy",
|
1192
|
+
response_format: "mp3", # Optional
|
1193
|
+
speed: 1.0 # Optional
|
1014
1194
|
}
|
1015
1195
|
)
|
1016
1196
|
File.binwrite('demo.mp3', response)
|
@@ -1023,7 +1203,7 @@ HTTP errors can be caught like this:
|
|
1023
1203
|
|
1024
1204
|
```
|
1025
1205
|
begin
|
1026
|
-
OpenAI::Client.new.models.retrieve(id: "gpt-
|
1206
|
+
OpenAI::Client.new.models.retrieve(id: "gpt-4o")
|
1027
1207
|
rescue Faraday::Error => e
|
1028
1208
|
raise "Got a Faraday error: #{e}"
|
1029
1209
|
end
|
data/lib/openai/batches.rb
CHANGED
data/lib/openai/client.rb
CHANGED
@@ -78,6 +78,18 @@ module OpenAI
|
|
78
78
|
@run_steps ||= OpenAI::RunSteps.new(client: self)
|
79
79
|
end
|
80
80
|
|
81
|
+
def vector_stores
|
82
|
+
@vector_stores ||= OpenAI::VectorStores.new(client: self)
|
83
|
+
end
|
84
|
+
|
85
|
+
def vector_store_files
|
86
|
+
@vector_store_files ||= OpenAI::VectorStoreFiles.new(client: self)
|
87
|
+
end
|
88
|
+
|
89
|
+
def vector_store_file_batches
|
90
|
+
@vector_store_file_batches ||= OpenAI::VectorStoreFileBatches.new(client: self)
|
91
|
+
end
|
92
|
+
|
81
93
|
def batches
|
82
94
|
@batches ||= OpenAI::Batches.new(client: self)
|
83
95
|
end
|
data/lib/openai/files.rb
CHANGED
data/lib/openai/http_headers.rb
CHANGED
@@ -0,0 +1,29 @@
|
|
1
|
+
module OpenAI
|
2
|
+
class VectorStoreFileBatches
|
3
|
+
def initialize(client:)
|
4
|
+
@client = client.beta(assistants: OpenAI::Assistants::BETA_VERSION)
|
5
|
+
end
|
6
|
+
|
7
|
+
def list(vector_store_id:, id:, parameters: {})
|
8
|
+
@client.get(
|
9
|
+
path: "/vector_stores/#{vector_store_id}/file_batches/#{id}/files",
|
10
|
+
parameters: parameters
|
11
|
+
)
|
12
|
+
end
|
13
|
+
|
14
|
+
def retrieve(vector_store_id:, id:)
|
15
|
+
@client.get(path: "/vector_stores/#{vector_store_id}/file_batches/#{id}")
|
16
|
+
end
|
17
|
+
|
18
|
+
def create(vector_store_id:, parameters: {})
|
19
|
+
@client.json_post(
|
20
|
+
path: "/vector_stores/#{vector_store_id}/file_batches",
|
21
|
+
parameters: parameters
|
22
|
+
)
|
23
|
+
end
|
24
|
+
|
25
|
+
def cancel(vector_store_id:, id:)
|
26
|
+
@client.post(path: "/vector_stores/#{vector_store_id}/file_batches/#{id}/cancel")
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
module OpenAI
|
2
|
+
class VectorStoreFiles
|
3
|
+
def initialize(client:)
|
4
|
+
@client = client.beta(assistants: OpenAI::Assistants::BETA_VERSION)
|
5
|
+
end
|
6
|
+
|
7
|
+
def list(vector_store_id:, parameters: {})
|
8
|
+
@client.get(path: "/vector_stores/#{vector_store_id}/files", parameters: parameters)
|
9
|
+
end
|
10
|
+
|
11
|
+
def retrieve(vector_store_id:, id:)
|
12
|
+
@client.get(path: "/vector_stores/#{vector_store_id}/files/#{id}")
|
13
|
+
end
|
14
|
+
|
15
|
+
def create(vector_store_id:, parameters: {})
|
16
|
+
@client.json_post(path: "/vector_stores/#{vector_store_id}/files", parameters: parameters)
|
17
|
+
end
|
18
|
+
|
19
|
+
def delete(vector_store_id:, id:)
|
20
|
+
@client.delete(path: "/vector_stores/#{vector_store_id}/files/#{id}")
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
module OpenAI
|
2
|
+
class VectorStores
|
3
|
+
def initialize(client:)
|
4
|
+
@client = client.beta(assistants: OpenAI::Assistants::BETA_VERSION)
|
5
|
+
end
|
6
|
+
|
7
|
+
def list(parameters: {})
|
8
|
+
@client.get(path: "/vector_stores", parameters: parameters)
|
9
|
+
end
|
10
|
+
|
11
|
+
def retrieve(id:)
|
12
|
+
@client.get(path: "/vector_stores/#{id}")
|
13
|
+
end
|
14
|
+
|
15
|
+
def create(parameters: {})
|
16
|
+
@client.json_post(path: "/vector_stores", parameters: parameters)
|
17
|
+
end
|
18
|
+
|
19
|
+
def modify(id:, parameters: {})
|
20
|
+
@client.json_post(path: "/vector_stores/#{id}", parameters: parameters)
|
21
|
+
end
|
22
|
+
|
23
|
+
def delete(id:)
|
24
|
+
@client.delete(path: "/vector_stores/#{id}")
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
data/lib/openai/version.rb
CHANGED
data/lib/openai.rb
CHANGED
@@ -12,6 +12,9 @@ require_relative "openai/threads"
|
|
12
12
|
require_relative "openai/messages"
|
13
13
|
require_relative "openai/runs"
|
14
14
|
require_relative "openai/run_steps"
|
15
|
+
require_relative "openai/vector_stores"
|
16
|
+
require_relative "openai/vector_store_files"
|
17
|
+
require_relative "openai/vector_store_file_batches"
|
15
18
|
require_relative "openai/audio"
|
16
19
|
require_relative "openai/version"
|
17
20
|
require_relative "openai/batches"
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby-openai
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 7.0
|
4
|
+
version: 7.1.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Alex
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-
|
11
|
+
date: 2024-06-10 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: event_stream_parser
|
@@ -102,6 +102,9 @@ files:
|
|
102
102
|
- lib/openai/run_steps.rb
|
103
103
|
- lib/openai/runs.rb
|
104
104
|
- lib/openai/threads.rb
|
105
|
+
- lib/openai/vector_store_file_batches.rb
|
106
|
+
- lib/openai/vector_store_files.rb
|
107
|
+
- lib/openai/vector_stores.rb
|
105
108
|
- lib/openai/version.rb
|
106
109
|
- lib/ruby/openai.rb
|
107
110
|
- pull_request_template.md
|
@@ -130,7 +133,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
130
133
|
- !ruby/object:Gem::Version
|
131
134
|
version: '0'
|
132
135
|
requirements: []
|
133
|
-
rubygems_version: 3.5.
|
136
|
+
rubygems_version: 3.5.11
|
134
137
|
signing_key:
|
135
138
|
specification_version: 4
|
136
139
|
summary: "OpenAI API + Ruby! \U0001F916❤️"
|