ruby-openai 7.4.0 → 8.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.circleci/config.yml +1 -1
- data/CHANGELOG.md +29 -1
- data/Gemfile +6 -5
- data/Gemfile.lock +31 -23
- data/README.md +173 -4
- data/lib/openai/batches.rb +1 -1
- data/lib/openai/client.rb +5 -1
- data/lib/openai/files.rb +7 -3
- data/lib/openai/http.rb +16 -11
- data/lib/openai/models.rb +4 -0
- data/lib/openai/responses.rb +23 -0
- data/lib/openai/vector_stores.rb +4 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +32 -24
- data/lib/ruby/openai.rb +0 -1
- metadata +3 -3
- data/lib/openai/compatibility.rb +0 -11
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 25124958f17722e0cc4168772bf02daf283b0d43dd2a2c143575ad23fc5539ac
|
4
|
+
data.tar.gz: cfaa4cb81ee668a4296774d7bce0591a2c23a3f0944794acde1db3e89586d199
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: d2ded6da66a157a190c24b0f7748a833d13ae9a4da67b52879eae8510e74cd90d80ffa6faaf573375c3a26d74b091165af6b30ca51c4cf860b1ba6f5c633cbdf
|
7
|
+
data.tar.gz: d31a28fafe6cac7caa99537f97a0fa1b0b918ebea09371d730e6d425b9bf228c726d9207c7a2bd08070103d49c9aa71e34ecf4f7ea888c2321f5fa128abf4fdc
|
data/.circleci/config.yml
CHANGED
data/CHANGELOG.md
CHANGED
@@ -5,7 +5,35 @@ All notable changes to this project will be documented in this file.
|
|
5
5
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
7
7
|
|
8
|
-
## [
|
8
|
+
## [8.1.0] - 2025-03-30
|
9
|
+
|
10
|
+
### Added
|
11
|
+
|
12
|
+
- Add Vector#search endpoint - thank you [@jaebrownn](https://github.com/jaebrownn) for this PR!
|
13
|
+
|
14
|
+
## [8.0.0] - 2025-03-14
|
15
|
+
|
16
|
+
### Added
|
17
|
+
|
18
|
+
- Add Responses endpoints - thanks to my excellent colleague [@simonx1](https://github.com/simonx1) for your work on this!
|
19
|
+
- Add docs for the Deepseek chat API.
|
20
|
+
- Add Models#delete - thanks to [bennysghost](https://github.com/bennysghost).
|
21
|
+
|
22
|
+
### Fixed
|
23
|
+
|
24
|
+
- [BREAKING] Try to JSON parse everything. If it fails, fall back gracefully to returning the raw response. Thank you to [@gregszero](https://github.com/gregszero) and the many others who raised this issue.
|
25
|
+
- [BREAKING] An unknown file type will no longer prevent file upload, but instead raise a warning.
|
26
|
+
- [BREAKING] ruby-openai longer requires "faraday/multipart" for Faraday 1 users (Faraday 1 already includes it and it was causing a warning). Thanks to [ajGingrich](https://github.com/ajGingrich) for raising this!
|
27
|
+
- Add `user_data` and `evals` as options for known File types - thank you to [jontec](https://github.com/jontec) for this fix!
|
28
|
+
- Fix a syntax ambiguity in Client.rb - thank you to [viralpraxis](https://github.com/viralpraxis).
|
29
|
+
|
30
|
+
### Removed
|
31
|
+
|
32
|
+
- [BREAKING] Backwards compatibility for `require "ruby/openai"` is removed - from v8 on you MUST use `require "openai"`. This fixes a deprecation warning with Ruby 3.4. Thanks to [@ndemianc](https://github.com/ndemianc) for this PR.
|
33
|
+
- [BREAKING] Removed support for Ruby 2.6. ruby-openai may still work with this version but it's no longer supported.
|
34
|
+
- Removed the 'OpenAI-Beta' header from Batches API requests.
|
35
|
+
|
36
|
+
## [7.4.0] - 2025-02-10
|
9
37
|
|
10
38
|
### Added
|
11
39
|
|
data/Gemfile
CHANGED
@@ -3,10 +3,11 @@ source "https://rubygems.org"
|
|
3
3
|
# Include gem dependencies from ruby-openai.gemspec
|
4
4
|
gemspec
|
5
5
|
|
6
|
+
# Development dependencies. Not included in the publised gem.
|
6
7
|
gem "byebug", "~> 11.1.3"
|
7
|
-
gem "dotenv", "~> 2.8.1"
|
8
|
-
gem "rake", "~> 13.2"
|
8
|
+
gem "dotenv", "~> 2.8.1" # >= v3 will require removing support for Ruby 2.7 from CI.
|
9
|
+
gem "rake", "~> 13.2.1"
|
9
10
|
gem "rspec", "~> 3.13"
|
10
|
-
gem "rubocop", "~> 1.
|
11
|
-
gem "vcr", "~> 6.1
|
12
|
-
gem "webmock", "~> 3.
|
11
|
+
gem "rubocop", "~> 1.74.0"
|
12
|
+
gem "vcr", "~> 6.3.1"
|
13
|
+
gem "webmock", "~> 3.25.1"
|
data/Gemfile.lock
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
ruby-openai (
|
4
|
+
ruby-openai (8.1.0)
|
5
5
|
event_stream_parser (>= 0.3.0, < 2.0.0)
|
6
6
|
faraday (>= 1)
|
7
7
|
faraday-multipart (>= 1)
|
@@ -13,7 +13,7 @@ GEM
|
|
13
13
|
public_suffix (>= 2.0.2, < 7.0)
|
14
14
|
ast (2.4.2)
|
15
15
|
base64 (0.2.0)
|
16
|
-
bigdecimal (3.1.
|
16
|
+
bigdecimal (3.1.9)
|
17
17
|
byebug (11.1.3)
|
18
18
|
crack (1.0.0)
|
19
19
|
bigdecimal
|
@@ -28,16 +28,20 @@ GEM
|
|
28
28
|
faraday-multipart (1.0.4)
|
29
29
|
multipart-post (~> 2)
|
30
30
|
faraday-net_http (3.0.2)
|
31
|
-
hashdiff (1.1.
|
32
|
-
json (2.
|
31
|
+
hashdiff (1.1.2)
|
32
|
+
json (2.10.2)
|
33
|
+
language_server-protocol (3.17.0.4)
|
34
|
+
lint_roller (1.1.0)
|
33
35
|
multipart-post (2.3.0)
|
34
|
-
parallel (1.
|
35
|
-
parser (3.
|
36
|
+
parallel (1.26.3)
|
37
|
+
parser (3.3.7.1)
|
36
38
|
ast (~> 2.4.1)
|
37
|
-
|
39
|
+
racc
|
40
|
+
public_suffix (6.0.1)
|
41
|
+
racc (1.8.1)
|
38
42
|
rainbow (3.1.1)
|
39
43
|
rake (13.2.1)
|
40
|
-
regexp_parser (2.
|
44
|
+
regexp_parser (2.10.0)
|
41
45
|
rexml (3.3.9)
|
42
46
|
rspec (3.13.0)
|
43
47
|
rspec-core (~> 3.13.0)
|
@@ -52,23 +56,27 @@ GEM
|
|
52
56
|
diff-lcs (>= 1.2.0, < 2.0)
|
53
57
|
rspec-support (~> 3.13.0)
|
54
58
|
rspec-support (3.13.1)
|
55
|
-
rubocop (1.
|
59
|
+
rubocop (1.74.0)
|
56
60
|
json (~> 2.3)
|
61
|
+
language_server-protocol (~> 3.17.0.2)
|
62
|
+
lint_roller (~> 1.1.0)
|
57
63
|
parallel (~> 1.10)
|
58
|
-
parser (>= 3.
|
64
|
+
parser (>= 3.3.0.2)
|
59
65
|
rainbow (>= 2.2.2, < 4.0)
|
60
|
-
regexp_parser (>=
|
61
|
-
|
62
|
-
rubocop-ast (>= 1.28.0, < 2.0)
|
66
|
+
regexp_parser (>= 2.9.3, < 3.0)
|
67
|
+
rubocop-ast (>= 1.38.0, < 2.0)
|
63
68
|
ruby-progressbar (~> 1.7)
|
64
|
-
unicode-display_width (>= 2.4.0, <
|
65
|
-
rubocop-ast (1.
|
66
|
-
parser (>= 3.
|
69
|
+
unicode-display_width (>= 2.4.0, < 4.0)
|
70
|
+
rubocop-ast (1.38.1)
|
71
|
+
parser (>= 3.3.1.0)
|
67
72
|
ruby-progressbar (1.13.0)
|
68
73
|
ruby2_keywords (0.0.5)
|
69
|
-
unicode-display_width (
|
70
|
-
|
71
|
-
|
74
|
+
unicode-display_width (3.1.4)
|
75
|
+
unicode-emoji (~> 4.0, >= 4.0.4)
|
76
|
+
unicode-emoji (4.0.4)
|
77
|
+
vcr (6.3.1)
|
78
|
+
base64
|
79
|
+
webmock (3.25.1)
|
72
80
|
addressable (>= 2.8.0)
|
73
81
|
crack (>= 0.3.2)
|
74
82
|
hashdiff (>= 0.4.0, < 2.0.0)
|
@@ -79,12 +87,12 @@ PLATFORMS
|
|
79
87
|
DEPENDENCIES
|
80
88
|
byebug (~> 11.1.3)
|
81
89
|
dotenv (~> 2.8.1)
|
82
|
-
rake (~> 13.2)
|
90
|
+
rake (~> 13.2.1)
|
83
91
|
rspec (~> 3.13)
|
84
|
-
rubocop (~> 1.
|
92
|
+
rubocop (~> 1.74.0)
|
85
93
|
ruby-openai!
|
86
|
-
vcr (~> 6.1
|
87
|
-
webmock (~> 3.
|
94
|
+
vcr (~> 6.3.1)
|
95
|
+
webmock (~> 3.25.1)
|
88
96
|
|
89
97
|
BUNDLED WITH
|
90
98
|
2.4.5
|
data/README.md
CHANGED
@@ -1,11 +1,12 @@
|
|
1
1
|
# Ruby OpenAI
|
2
|
+
|
2
3
|
[](https://rubygems.org/gems/ruby-openai)
|
3
4
|
[](https://github.com/alexrudall/ruby-openai/blob/main/LICENSE.txt)
|
4
5
|
[](https://circleci.com/gh/alexrudall/ruby-openai)
|
5
6
|
|
6
7
|
Use the [OpenAI API](https://openai.com/blog/openai-api/) with Ruby! 🤖❤️
|
7
8
|
|
8
|
-
Stream
|
9
|
+
Stream chats with the Responses API, transcribe and translate audio with Whisper, create images with DALL·E, and much more...
|
9
10
|
|
10
11
|
💥 Click [subscribe now](https://mailchi.mp/8c7b574726a9/ruby-openai) to hear first about new releases in the Rails AI newsletter!
|
11
12
|
|
@@ -16,7 +17,7 @@ Stream text with GPT-4, transcribe and translate audio with Whisper, or create i
|
|
16
17
|
## Contents
|
17
18
|
|
18
19
|
- [Ruby OpenAI](#ruby-openai)
|
19
|
-
- [
|
20
|
+
- [Contents](#contents)
|
20
21
|
- [Installation](#installation)
|
21
22
|
- [Bundler](#bundler)
|
22
23
|
- [Gem install](#gem-install)
|
@@ -29,6 +30,7 @@ Stream text with GPT-4, transcribe and translate audio with Whisper, or create i
|
|
29
30
|
- [Errors](#errors)
|
30
31
|
- [Faraday middleware](#faraday-middleware)
|
31
32
|
- [Azure](#azure)
|
33
|
+
- [Deepseek](#deepseek)
|
32
34
|
- [Ollama](#ollama)
|
33
35
|
- [Groq](#groq)
|
34
36
|
- [Counting Tokens](#counting-tokens)
|
@@ -37,6 +39,14 @@ Stream text with GPT-4, transcribe and translate audio with Whisper, or create i
|
|
37
39
|
- [Streaming Chat](#streaming-chat)
|
38
40
|
- [Vision](#vision)
|
39
41
|
- [JSON Mode](#json-mode)
|
42
|
+
- [Responses API](#responses-api)
|
43
|
+
- [Create a Response](#create-a-response)
|
44
|
+
- [Follow-up Messages](#follow-up-messages)
|
45
|
+
- [Tool Calls](#tool-calls)
|
46
|
+
- [Streaming](#streaming)
|
47
|
+
- [Retrieve a Response](#retrieve-a-response)
|
48
|
+
- [Delete a Response](#delete-a-response)
|
49
|
+
- [List Input Items](#list-input-items)
|
40
50
|
- [Functions](#functions)
|
41
51
|
- [Completions](#completions)
|
42
52
|
- [Embeddings](#embeddings)
|
@@ -68,6 +78,7 @@ Stream text with GPT-4, transcribe and translate audio with Whisper, or create i
|
|
68
78
|
- [Usage](#usage)
|
69
79
|
- [Errors](#errors-1)
|
70
80
|
- [Development](#development)
|
81
|
+
- [To check for deprecations](#to-check-for-deprecations)
|
71
82
|
- [Release](#release)
|
72
83
|
- [Contributing](#contributing)
|
73
84
|
- [License](#license)
|
@@ -86,7 +97,7 @@ gem "ruby-openai"
|
|
86
97
|
And then execute:
|
87
98
|
|
88
99
|
```bash
|
89
|
-
|
100
|
+
bundle install
|
90
101
|
```
|
91
102
|
|
92
103
|
### Gem install
|
@@ -94,7 +105,7 @@ $ bundle install
|
|
94
105
|
Or install with:
|
95
106
|
|
96
107
|
```bash
|
97
|
-
|
108
|
+
gem install ruby-openai
|
98
109
|
```
|
99
110
|
|
100
111
|
and require with:
|
@@ -228,6 +239,28 @@ end
|
|
228
239
|
|
229
240
|
where `AZURE_OPENAI_URI` is e.g. `https://custom-domain.openai.azure.com/openai/deployments/gpt-35-turbo`
|
230
241
|
|
242
|
+
#### Deepseek
|
243
|
+
|
244
|
+
[Deepseek](https://api-docs.deepseek.com/) is compatible with the OpenAI chat API. Get an access token from [here](https://platform.deepseek.com/api_keys), then:
|
245
|
+
|
246
|
+
```ruby
|
247
|
+
client = OpenAI::Client.new(
|
248
|
+
access_token: "deepseek_access_token_goes_here",
|
249
|
+
uri_base: "https://api.deepseek.com/"
|
250
|
+
)
|
251
|
+
|
252
|
+
client.chat(
|
253
|
+
parameters: {
|
254
|
+
model: "deepseek-chat", # Required.
|
255
|
+
messages: [{ role: "user", content: "Hello!"}], # Required.
|
256
|
+
temperature: 0.7,
|
257
|
+
stream: proc do |chunk, _bytesize|
|
258
|
+
print chunk.dig("choices", 0, "delta", "content")
|
259
|
+
end
|
260
|
+
}
|
261
|
+
)
|
262
|
+
```
|
263
|
+
|
231
264
|
#### Ollama
|
232
265
|
|
233
266
|
Ollama allows you to run open-source LLMs, such as Llama 3, locally. It [offers chat compatibility](https://github.com/ollama/ollama/blob/main/docs/openai.md) with the OpenAI API.
|
@@ -304,6 +337,12 @@ client.models.list
|
|
304
337
|
client.models.retrieve(id: "gpt-4o")
|
305
338
|
```
|
306
339
|
|
340
|
+
You can also delete any finetuned model you generated, if you're an account Owner on your OpenAI organization:
|
341
|
+
|
342
|
+
```ruby
|
343
|
+
client.models.delete(id: "ft:gpt-4o-mini:acemeco:suffix:abc123")
|
344
|
+
```
|
345
|
+
|
307
346
|
### Chat
|
308
347
|
|
309
348
|
GPT is a model that can be used to generate text in a conversational style. You can use it to [generate a response](https://platform.openai.com/docs/api-reference/chat/create) to a sequence of [messages](https://platform.openai.com/docs/guides/chat/introduction):
|
@@ -441,6 +480,102 @@ You can stream it as well!
|
|
441
480
|
# }
|
442
481
|
```
|
443
482
|
|
483
|
+
### Responses API
|
484
|
+
|
485
|
+
[OpenAI's most advanced interface for generating model responses](https://platform.openai.com/docs/api-reference/responses). Supports text and image inputs, and text outputs. Create stateful interactions with the model, using the output of previous responses as input. Extend the model's capabilities with built-in tools for file search, web search, computer use, and more. Allow the model access to external systems and data using function calling.
|
486
|
+
|
487
|
+
#### Create a Response
|
488
|
+
|
489
|
+
```ruby
|
490
|
+
response = client.responses.create(parameters: {
|
491
|
+
model: "gpt-4o",
|
492
|
+
input: "Hello! I'm Szymon!"
|
493
|
+
})
|
494
|
+
puts response.dig("output", 0, "content", 0, "text")
|
495
|
+
# => Hello Szymon! How can I assist you today?
|
496
|
+
```
|
497
|
+
|
498
|
+
#### Follow-up Messages
|
499
|
+
|
500
|
+
```ruby
|
501
|
+
followup = client.responses.create(parameters: {
|
502
|
+
model: "gpt-4o",
|
503
|
+
input: "Remind me, what is my name?",
|
504
|
+
previous_response_id: response["id"]
|
505
|
+
})
|
506
|
+
puts followup.dig("output", 0, "content", 0, "text")
|
507
|
+
# => Your name is Szymon! How can I help you today?
|
508
|
+
```
|
509
|
+
|
510
|
+
#### Tool Calls
|
511
|
+
|
512
|
+
```ruby
|
513
|
+
response = client.responses.create(parameters: {
|
514
|
+
model: "gpt-4o",
|
515
|
+
input: "What's the weather in Paris?",
|
516
|
+
tools: [
|
517
|
+
{
|
518
|
+
"type" => "function",
|
519
|
+
"name" => "get_current_weather",
|
520
|
+
"description" => "Get the current weather in a given location",
|
521
|
+
"parameters" => {
|
522
|
+
"type" => "object",
|
523
|
+
"properties" => {
|
524
|
+
"location" => {
|
525
|
+
"type" => "string",
|
526
|
+
"description" => "The geographic location to get the weather for"
|
527
|
+
}
|
528
|
+
},
|
529
|
+
"required" => ["location"]
|
530
|
+
}
|
531
|
+
}
|
532
|
+
]
|
533
|
+
})
|
534
|
+
puts response.dig("output", 0, "name")
|
535
|
+
# => "get_current_weather"
|
536
|
+
```
|
537
|
+
|
538
|
+
#### Streaming
|
539
|
+
|
540
|
+
```ruby
|
541
|
+
client.responses.create(
|
542
|
+
parameters: {
|
543
|
+
model: "gpt-4o", # Required.
|
544
|
+
input: "Hello!", # Required.
|
545
|
+
stream: proc do |chunk, _bytesize|
|
546
|
+
if chunk["type"] == "response.output_text.delta"
|
547
|
+
print chunk["delta"]
|
548
|
+
$stdout.flush # Ensure output is displayed immediately
|
549
|
+
end
|
550
|
+
end
|
551
|
+
}
|
552
|
+
)
|
553
|
+
# => "Hi there! How can I assist you today?..."
|
554
|
+
```
|
555
|
+
|
556
|
+
#### Retrieve a Response
|
557
|
+
|
558
|
+
```ruby
|
559
|
+
retrieved_response = client.responses.retrieve(response_id: response["id"])
|
560
|
+
puts retrieved_response["object"]
|
561
|
+
# => "response"
|
562
|
+
```
|
563
|
+
|
564
|
+
#### Delete a Response
|
565
|
+
|
566
|
+
```ruby
|
567
|
+
deletion = client.responses.delete(response_id: response["id"])
|
568
|
+
puts deletion["deleted"]
|
569
|
+
# => true
|
570
|
+
```
|
571
|
+
|
572
|
+
#### List Input Items
|
573
|
+
|
574
|
+
```ruby
|
575
|
+
input_items = client.responses.input_items(response_id: response["id"])
|
576
|
+
puts input_items["object"] # => "list"
|
577
|
+
```
|
578
|
+
|
444
579
|
### Functions
|
445
580
|
|
446
581
|
You can describe and pass in functions and the model will intelligently choose to output a JSON object containing arguments to call them - eg., to use your method `get_current_weather` to get the weather in a given location. Note that tool_choice is optional, but if you exclude it, the model will choose whether to use the function or not ([see here](https://platform.openai.com/docs/api-reference/chat/create#chat-create-tool_choice)).
|
@@ -746,6 +881,12 @@ You can also capture the events for a job:
|
|
746
881
|
client.finetunes.list_events(id: fine_tune_id)
|
747
882
|
```
|
748
883
|
|
884
|
+
You can also delete any finetuned model you generated, if you're an account Owner on your OpenAI organization:
|
885
|
+
|
886
|
+
```ruby
|
887
|
+
client.models.delete(id: fine_tune_id)
|
888
|
+
```
|
889
|
+
|
749
890
|
### Vector Stores
|
750
891
|
|
751
892
|
Vector Store objects give the File Search tool the ability to search your files.
|
@@ -786,6 +927,27 @@ response = client.vector_stores.modify(
|
|
786
927
|
)
|
787
928
|
```
|
788
929
|
|
930
|
+
You can search a vector store for relevant chunks based on a query:
|
931
|
+
|
932
|
+
```ruby
|
933
|
+
response = client.vector_stores.search(
|
934
|
+
id: vector_store_id,
|
935
|
+
parameters: {
|
936
|
+
query: "What is the return policy?",
|
937
|
+
max_num_results: 20,
|
938
|
+
ranking_options: {
|
939
|
+
# Add any ranking options here in line with the API documentation
|
940
|
+
},
|
941
|
+
rewrite_query: true,
|
942
|
+
filters: {
|
943
|
+
type: "eq",
|
944
|
+
property: "region",
|
945
|
+
value: "us"
|
946
|
+
}
|
947
|
+
}
|
948
|
+
)
|
949
|
+
```
|
950
|
+
|
789
951
|
You can delete vector stores:
|
790
952
|
|
791
953
|
```ruby
|
@@ -1464,6 +1626,7 @@ File.binwrite('demo.mp3', response)
|
|
1464
1626
|
```
|
1465
1627
|
|
1466
1628
|
### Usage
|
1629
|
+
|
1467
1630
|
The Usage API provides information about the cost of various OpenAI services within your organization.
|
1468
1631
|
To use Admin APIs like Usage, you need to set an OPENAI_ADMIN_TOKEN, which can be generated [here](https://platform.openai.com/settings/organization/admin-keys).
|
1469
1632
|
|
@@ -1544,6 +1707,12 @@ To run all tests, execute the command `bundle exec rake`, which will also run th
|
|
1544
1707
|
> [!WARNING]
|
1545
1708
|
> If you have an `OPENAI_ACCESS_TOKEN` and `OPENAI_ADMIN_TOKEN` in your `ENV`, running the specs will hit the actual API, which will be slow and cost you money - 2 cents or more! Remove them from your environment with `unset` or similar if you just want to run the specs against the stored VCR responses.
|
1546
1709
|
|
1710
|
+
### To check for deprecations
|
1711
|
+
|
1712
|
+
```
|
1713
|
+
bundle exec ruby -e "Warning[:deprecated] = true; require 'rspec'; exit RSpec::Core::Runner.run(['spec/openai/client/http_spec.rb:25'])"
|
1714
|
+
```
|
1715
|
+
|
1547
1716
|
## Release
|
1548
1717
|
|
1549
1718
|
First run the specs without VCR so they actually hit the API. This will cost 2 cents or more. Set OPENAI_ACCESS_TOKEN and OPENAI_ADMIN_TOKEN in your environment.
|
data/lib/openai/batches.rb
CHANGED
data/lib/openai/client.rb
CHANGED
@@ -5,7 +5,7 @@ module OpenAI
|
|
5
5
|
SENSITIVE_ATTRIBUTES = %i[@access_token @admin_token @organization_id @extra_headers].freeze
|
6
6
|
CONFIG_KEYS = %i[access_token admin_token api_type api_version extra_headers
|
7
7
|
log_errors organization_id request_timeout uri_base].freeze
|
8
|
-
attr_reader
|
8
|
+
attr_reader(*CONFIG_KEYS, :faraday_middleware)
|
9
9
|
attr_writer :access_token
|
10
10
|
|
11
11
|
def initialize(config = {}, &faraday_middleware)
|
@@ -52,6 +52,10 @@ module OpenAI
|
|
52
52
|
@models ||= OpenAI::Models.new(client: self)
|
53
53
|
end
|
54
54
|
|
55
|
+
def responses
|
56
|
+
@responses ||= OpenAI::Responses.new(client: self)
|
57
|
+
end
|
58
|
+
|
55
59
|
def assistants
|
56
60
|
@assistants ||= OpenAI::Assistants.new(client: self)
|
57
61
|
end
|
data/lib/openai/files.rb
CHANGED
@@ -5,6 +5,8 @@ module OpenAI
|
|
5
5
|
batch
|
6
6
|
fine-tune
|
7
7
|
vision
|
8
|
+
user_data
|
9
|
+
evals
|
8
10
|
].freeze
|
9
11
|
|
10
12
|
def initialize(client:)
|
@@ -18,9 +20,7 @@ module OpenAI
|
|
18
20
|
def upload(parameters: {})
|
19
21
|
file_input = parameters[:file]
|
20
22
|
file = prepare_file_input(file_input: file_input)
|
21
|
-
|
22
23
|
validate(file: file, purpose: parameters[:purpose], file_input: file_input)
|
23
|
-
|
24
24
|
@client.multipart_post(
|
25
25
|
path: "/files",
|
26
26
|
parameters: parameters.merge(file: file)
|
@@ -55,8 +55,12 @@ module OpenAI
|
|
55
55
|
|
56
56
|
def validate(file:, purpose:, file_input:)
|
57
57
|
raise ArgumentError, "`file` is required" if file.nil?
|
58
|
+
|
58
59
|
unless PURPOSES.include?(purpose)
|
59
|
-
|
60
|
+
filename = file_input.is_a?(String) ? File.basename(file_input) : "uploaded file"
|
61
|
+
message = "The purpose '#{purpose}' for file '#{filename}' is not in the known purpose "
|
62
|
+
message += "list: #{PURPOSES.join(', ')}."
|
63
|
+
OpenAI.log_message("Warning", message, :warn)
|
60
64
|
end
|
61
65
|
|
62
66
|
validate_jsonl(file: file) if file_input.is_a?(String) && file_input.end_with?(".jsonl")
|
data/lib/openai/http.rb
CHANGED
@@ -7,47 +7,52 @@ module OpenAI
|
|
7
7
|
include HTTPHeaders
|
8
8
|
|
9
9
|
def get(path:, parameters: nil)
|
10
|
-
|
10
|
+
parse_json(conn.get(uri(path: path), parameters) do |req|
|
11
11
|
req.headers = headers
|
12
12
|
end&.body)
|
13
13
|
end
|
14
14
|
|
15
15
|
def post(path:)
|
16
|
-
|
16
|
+
parse_json(conn.post(uri(path: path)) do |req|
|
17
17
|
req.headers = headers
|
18
18
|
end&.body)
|
19
19
|
end
|
20
20
|
|
21
21
|
def json_post(path:, parameters:, query_parameters: {})
|
22
|
-
conn.post(uri(path: path)) do |req|
|
22
|
+
parse_json(conn.post(uri(path: path)) do |req|
|
23
23
|
configure_json_post_request(req, parameters)
|
24
24
|
req.params = req.params.merge(query_parameters)
|
25
|
-
end&.body
|
25
|
+
end&.body)
|
26
26
|
end
|
27
27
|
|
28
28
|
def multipart_post(path:, parameters: nil)
|
29
|
-
conn(multipart: true).post(uri(path: path)) do |req|
|
29
|
+
parse_json(conn(multipart: true).post(uri(path: path)) do |req|
|
30
30
|
req.headers = headers.merge({ "Content-Type" => "multipart/form-data" })
|
31
31
|
req.body = multipart_parameters(parameters)
|
32
|
-
end&.body
|
32
|
+
end&.body)
|
33
33
|
end
|
34
34
|
|
35
35
|
def delete(path:)
|
36
|
-
conn.delete(uri(path: path)) do |req|
|
36
|
+
parse_json(conn.delete(uri(path: path)) do |req|
|
37
37
|
req.headers = headers
|
38
|
-
end&.body
|
38
|
+
end&.body)
|
39
39
|
end
|
40
40
|
|
41
41
|
private
|
42
42
|
|
43
|
-
def
|
43
|
+
def parse_json(response)
|
44
44
|
return unless response
|
45
45
|
return response unless response.is_a?(String)
|
46
46
|
|
47
|
-
|
48
|
-
|
47
|
+
original_response = response.dup
|
48
|
+
if response.include?("}\n{")
|
49
|
+
# Attempt to convert what looks like a multiline string of JSON objects to a JSON array.
|
50
|
+
response = response.gsub("}\n{", "},{").prepend("[").concat("]")
|
51
|
+
end
|
49
52
|
|
50
53
|
JSON.parse(response)
|
54
|
+
rescue JSON::ParserError
|
55
|
+
original_response
|
51
56
|
end
|
52
57
|
|
53
58
|
# Given a proc, returns an outer proc that can be used to iterate over a JSON stream of chunks.
|
data/lib/openai/models.rb
CHANGED
@@ -0,0 +1,23 @@
|
|
1
|
+
module OpenAI
|
2
|
+
class Responses
|
3
|
+
def initialize(client:)
|
4
|
+
@client = client
|
5
|
+
end
|
6
|
+
|
7
|
+
def create(parameters: {})
|
8
|
+
@client.json_post(path: "/responses", parameters: parameters)
|
9
|
+
end
|
10
|
+
|
11
|
+
def retrieve(response_id:)
|
12
|
+
@client.get(path: "/responses/#{response_id}")
|
13
|
+
end
|
14
|
+
|
15
|
+
def delete(response_id:)
|
16
|
+
@client.delete(path: "/responses/#{response_id}")
|
17
|
+
end
|
18
|
+
|
19
|
+
def input_items(response_id:, parameters: {})
|
20
|
+
@client.get(path: "/responses/#{response_id}/input_items", parameters: parameters)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
data/lib/openai/vector_stores.rb
CHANGED
data/lib/openai/version.rb
CHANGED
data/lib/openai.rb
CHANGED
@@ -1,12 +1,12 @@
|
|
1
1
|
require "faraday"
|
2
|
-
require "faraday/multipart"
|
3
|
-
|
2
|
+
require "faraday/multipart" if Gem::Version.new(Faraday::VERSION) >= Gem::Version.new("2.0")
|
4
3
|
require_relative "openai/http"
|
5
4
|
require_relative "openai/client"
|
6
5
|
require_relative "openai/files"
|
7
6
|
require_relative "openai/finetunes"
|
8
7
|
require_relative "openai/images"
|
9
8
|
require_relative "openai/models"
|
9
|
+
require_relative "openai/responses"
|
10
10
|
require_relative "openai/assistants"
|
11
11
|
require_relative "openai/threads"
|
12
12
|
require_relative "openai/messages"
|
@@ -31,12 +31,7 @@ module OpenAI
|
|
31
31
|
rescue Faraday::Error => e
|
32
32
|
raise e unless e.response.is_a?(Hash)
|
33
33
|
|
34
|
-
|
35
|
-
logger.formatter = proc do |_severity, _datetime, _progname, msg|
|
36
|
-
"\033[31mOpenAI HTTP Error (spotted in ruby-openai #{VERSION}): #{msg}\n\033[0m"
|
37
|
-
end
|
38
|
-
logger.error(e.response[:body])
|
39
|
-
|
34
|
+
OpenAI.log_message("OpenAI HTTP Error", e.response[:body], :error)
|
40
35
|
raise e
|
41
36
|
end
|
42
37
|
end
|
@@ -72,25 +67,38 @@ module OpenAI
|
|
72
67
|
|
73
68
|
class << self
|
74
69
|
attr_writer :configuration
|
75
|
-
end
|
76
70
|
|
77
|
-
|
78
|
-
|
79
|
-
|
71
|
+
def configuration
|
72
|
+
@configuration ||= OpenAI::Configuration.new
|
73
|
+
end
|
80
74
|
|
81
|
-
|
82
|
-
|
83
|
-
|
75
|
+
def configure
|
76
|
+
yield(configuration)
|
77
|
+
end
|
84
78
|
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
79
|
+
# Estimate the number of tokens in a string, using the rules of thumb from OpenAI:
|
80
|
+
# https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
|
81
|
+
def rough_token_count(content = "")
|
82
|
+
raise ArgumentError, "rough_token_count requires a string" unless content.is_a? String
|
83
|
+
return 0 if content.empty?
|
84
|
+
|
85
|
+
count_by_chars = content.size / 4.0
|
86
|
+
count_by_words = content.split.size * 4.0 / 3
|
87
|
+
estimate = ((count_by_chars + count_by_words) / 2.0).round
|
88
|
+
[1, estimate].max
|
89
|
+
end
|
90
90
|
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
[
|
91
|
+
# Log a message with appropriate formatting
|
92
|
+
# @param prefix [String] Prefix to add to the message
|
93
|
+
# @param message [String] The message to log
|
94
|
+
# @param level [Symbol] The log level (:error, :warn, etc.)
|
95
|
+
def log_message(prefix, message, level = :warn)
|
96
|
+
color = level == :error ? "\033[31m" : "\033[33m"
|
97
|
+
logger = Logger.new($stdout)
|
98
|
+
logger.formatter = proc do |_severity, _datetime, _progname, msg|
|
99
|
+
"#{color}#{prefix} (spotted in ruby-openai #{VERSION}): #{msg}\n\033[0m"
|
100
|
+
end
|
101
|
+
logger.send(level, message)
|
102
|
+
end
|
95
103
|
end
|
96
104
|
end
|
data/lib/ruby/openai.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby-openai
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version:
|
4
|
+
version: 8.1.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Alex
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2025-
|
11
|
+
date: 2025-03-30 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: event_stream_parser
|
@@ -91,7 +91,6 @@ files:
|
|
91
91
|
- lib/openai/audio.rb
|
92
92
|
- lib/openai/batches.rb
|
93
93
|
- lib/openai/client.rb
|
94
|
-
- lib/openai/compatibility.rb
|
95
94
|
- lib/openai/files.rb
|
96
95
|
- lib/openai/finetunes.rb
|
97
96
|
- lib/openai/http.rb
|
@@ -99,6 +98,7 @@ files:
|
|
99
98
|
- lib/openai/images.rb
|
100
99
|
- lib/openai/messages.rb
|
101
100
|
- lib/openai/models.rb
|
101
|
+
- lib/openai/responses.rb
|
102
102
|
- lib/openai/run_steps.rb
|
103
103
|
- lib/openai/runs.rb
|
104
104
|
- lib/openai/threads.rb
|
data/lib/openai/compatibility.rb
DELETED
@@ -1,11 +0,0 @@
|
|
1
|
-
module Ruby
|
2
|
-
module OpenAI
|
3
|
-
VERSION = ::OpenAI::VERSION
|
4
|
-
|
5
|
-
Error = ::OpenAI::Error
|
6
|
-
AuthenticationError = ::OpenAI::AuthenticationError
|
7
|
-
ConfigurationError = ::OpenAI::ConfigurationError
|
8
|
-
Configuration = ::OpenAI::Configuration
|
9
|
-
MiddlewareErrors = ::OpenAI::MiddlewareErrors
|
10
|
-
end
|
11
|
-
end
|