ruby-openai 5.1.0 → 6.3.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.devcontainer/Dockerfile +16 -0
- data/.devcontainer/devcontainer.json +36 -0
- data/.devcontainer/docker-compose.yml +19 -0
- data/.github/FUNDING.yml +13 -0
- data/CHANGELOG.md +46 -0
- data/Gemfile +2 -2
- data/Gemfile.lock +13 -9
- data/README.md +127 -35
- data/lib/openai/assistants.rb +27 -0
- data/lib/openai/audio.rb +4 -0
- data/lib/openai/client.rb +29 -6
- data/lib/openai/compatibility.rb +1 -0
- data/lib/openai/finetunes.rb +6 -14
- data/lib/openai/http.rb +63 -51
- data/lib/openai/http_headers.rb +36 -0
- data/lib/openai/messages.rb +23 -0
- data/lib/openai/run_steps.rb +15 -0
- data/lib/openai/runs.rb +32 -0
- data/lib/openai/threads.rb +27 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +22 -1
- data/ruby-openai.gemspec +1 -0
- metadata +37 -7
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 2e2bdf19ad32b8eb492faca3e937614c30ab57817c374797362ca27ffff1cf7e
|
4
|
+
data.tar.gz: b3d31aaa13bec5bdeb08718c04afad99b528e9e5c1bfbd279a8111e4fa12739c
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: bf6f46dbb52890ff1468d727711681ad95bb82e26b77f092cf6a81be25dbdb7ef3b3a58626090160bfc927ec3585723c17a9c47005ff64b035a3af85ba887e51
|
7
|
+
data.tar.gz: cbe3a5d6c57757beee533c3b9c05aa43fb343f7da542af4fc58bf70223f84cc674900d032e824f1c34e19ef17a2ba0d366fe9353dae67db162ce435ef2f1a496
|
@@ -0,0 +1,16 @@
|
|
1
|
+
FROM ruby:3.2.2-slim-bullseye
|
2
|
+
|
3
|
+
ENV TZ="Europe/London"
|
4
|
+
|
5
|
+
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
6
|
+
&& apt-get -y install --no-install-recommends \
|
7
|
+
apt-utils \
|
8
|
+
build-essential \
|
9
|
+
curl \
|
10
|
+
git \
|
11
|
+
vim \
|
12
|
+
zsh
|
13
|
+
|
14
|
+
RUN gem install bundler
|
15
|
+
|
16
|
+
WORKDIR /workspace
|
@@ -0,0 +1,36 @@
|
|
1
|
+
// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at:
|
2
|
+
// https://github.com/microsoft/vscode-dev-containers/tree/v0.245.2/containers/ruby-rails-postgres
|
3
|
+
// Update the VARIANT arg in docker-compose.yml to pick a Ruby version
|
4
|
+
{
|
5
|
+
"name": "ruby-openai",
|
6
|
+
"dockerComposeFile": "docker-compose.yml",
|
7
|
+
"service": "app",
|
8
|
+
"workspaceFolder": "/workspace",
|
9
|
+
"containerEnv": {
|
10
|
+
"GITHUB_TOKEN": "${localEnv:GITHUB_TOKEN}",
|
11
|
+
"GITHUB_USER": "${localEnv:GITHUB_USER}"
|
12
|
+
},
|
13
|
+
// Configure tool-specific properties.
|
14
|
+
"customizations": {
|
15
|
+
// Configure properties specific to VS Code.
|
16
|
+
"vscode": {
|
17
|
+
// Add the IDs of extensions you want installed when the container is created.
|
18
|
+
"extensions": [
|
19
|
+
"rebornix.Ruby",
|
20
|
+
"sleistner.vscode-fileutils",
|
21
|
+
"ms-azuretools.vscode-docker",
|
22
|
+
"samverschueren.final-newline",
|
23
|
+
"GitHub.copilot",
|
24
|
+
"usernamehw.remove-empty-lines",
|
25
|
+
"wingrunr21.vscode-ruby",
|
26
|
+
]
|
27
|
+
}
|
28
|
+
},
|
29
|
+
// Use 'postCreateCommand' to run commands after the container is created.
|
30
|
+
"postCreateCommand": "bundle install",
|
31
|
+
// Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
|
32
|
+
"features": {
|
33
|
+
"git": "os-provided",
|
34
|
+
"github-cli": "latest"
|
35
|
+
}
|
36
|
+
}
|
@@ -0,0 +1,19 @@
|
|
1
|
+
version: "3"
|
2
|
+
|
3
|
+
services:
|
4
|
+
app:
|
5
|
+
build:
|
6
|
+
context: ..
|
7
|
+
dockerfile: .devcontainer/Dockerfile
|
8
|
+
|
9
|
+
volumes:
|
10
|
+
- ..:/workspace:cached
|
11
|
+
- bundle_cache:/bundle
|
12
|
+
|
13
|
+
command: sleep infinity
|
14
|
+
|
15
|
+
environment:
|
16
|
+
TZ: Europe/London
|
17
|
+
|
18
|
+
volumes:
|
19
|
+
bundle_cache:
|
data/.github/FUNDING.yml
ADDED
@@ -0,0 +1,13 @@
|
|
1
|
+
# These are supported funding model platforms
|
2
|
+
|
3
|
+
github: alexrudall
|
4
|
+
patreon: # Replace with a single Patreon username
|
5
|
+
open_collective: # Replace with a single Open Collective username
|
6
|
+
ko_fi: # Replace with a single Ko-fi username
|
7
|
+
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
8
|
+
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
9
|
+
liberapay: # Replace with a single Liberapay username
|
10
|
+
issuehunt: # Replace with a single IssueHunt username
|
11
|
+
otechie: # Replace with a single Otechie username
|
12
|
+
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
13
|
+
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
data/CHANGELOG.md
CHANGED
@@ -5,6 +5,52 @@ All notable changes to this project will be documented in this file.
|
|
5
5
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
7
7
|
|
8
|
+
## [6.3.0] - 2023-11-26
|
9
|
+
|
10
|
+
### Added
|
11
|
+
|
12
|
+
- Add ability to pass [Faraday middleware](https://lostisland.github.io/faraday/#/middleware/index) to the client in a block, eg. to enable verbose logging - shout out to [@obie](https://github.com/obie) for pushing for this.
|
13
|
+
- Add better error logging to the client by default.
|
14
|
+
- Bump Event Source to v1, thank you [@atesgoral](https://github.com/atesgoral) @ Shopify!
|
15
|
+
|
16
|
+
## [6.2.0] - 2023-11-15
|
17
|
+
|
18
|
+
### Added
|
19
|
+
|
20
|
+
- Add text-to-speech! Thank you [@codergeek121](https://github.com/codergeek121)
|
21
|
+
|
22
|
+
## [6.1.0] - 2023-11-14
|
23
|
+
|
24
|
+
### Added
|
25
|
+
|
26
|
+
- Add support for Assistants, Threads, Messages and Runs. Thank you [@Haegin](https://github.com/Haegin) for the excellent work on this PR, and many reviewers for their contributions!
|
27
|
+
|
28
|
+
## [6.0.1] - 2023-11-07
|
29
|
+
|
30
|
+
### Fix
|
31
|
+
|
32
|
+
- Gracefully handle the case where an HTTP error response may not have valid JSON in its body. Thank you [@atesgoral](https://github.com/atesgoral)!
|
33
|
+
|
34
|
+
## [6.0.0] - 2023-11-06
|
35
|
+
|
36
|
+
### Added
|
37
|
+
|
38
|
+
- [BREAKING] HTTP errors will now be raised by ruby-openai as Faraday:Errors, including when streaming! Implemented by [@atesgoral](https://github.com/atesgoral)
|
39
|
+
- [BREAKING] Switch from legacy Finetunes to the new Fine-tune-jobs endpoints. Implemented by [@lancecarlson](https://github.com/lancecarlson)
|
40
|
+
- [BREAKING] Remove deprecated Completions endpoints - use Chat instead.
|
41
|
+
|
42
|
+
### Fix
|
43
|
+
|
44
|
+
- [BREAKING] Fix issue where :stream parameters were replaced by a boolean in the client application. Thanks to [@martinjaimem](https://github.com/martinjaimem), [@vickymadrid03](https://github.com/vickymadrid03) and [@nicastelo](https://github.com/nicastelo) for spotting and fixing this issue.
|
45
|
+
|
46
|
+
## [5.2.0] - 2023-10-30
|
47
|
+
|
48
|
+
### Fix
|
49
|
+
|
50
|
+
- Added more spec-compliant SSE parsing: see here https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation
|
51
|
+
- Fixes issue where OpenAI or an intermediary returns only partial JSON per chunk of streamed data
|
52
|
+
- Huge thanks to [@atesgoral](https://github.com/atesgoral) for this important fix!
|
53
|
+
|
8
54
|
## [5.1.0] - 2023-08-20
|
9
55
|
|
10
56
|
### Added
|
data/Gemfile
CHANGED
data/Gemfile.lock
CHANGED
@@ -1,22 +1,26 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
ruby-openai (
|
4
|
+
ruby-openai (6.3.0)
|
5
|
+
event_stream_parser (>= 0.3.0, < 2.0.0)
|
5
6
|
faraday (>= 1)
|
6
7
|
faraday-multipart (>= 1)
|
7
8
|
|
8
9
|
GEM
|
9
10
|
remote: https://rubygems.org/
|
10
11
|
specs:
|
11
|
-
addressable (2.8.
|
12
|
+
addressable (2.8.5)
|
12
13
|
public_suffix (>= 2.0.2, < 6.0)
|
13
14
|
ast (2.4.2)
|
15
|
+
base64 (0.1.1)
|
14
16
|
byebug (11.1.3)
|
15
17
|
crack (0.4.5)
|
16
18
|
rexml
|
17
19
|
diff-lcs (1.5.0)
|
18
20
|
dotenv (2.8.1)
|
19
|
-
|
21
|
+
event_stream_parser (1.0.0)
|
22
|
+
faraday (2.7.11)
|
23
|
+
base64
|
20
24
|
faraday-net_http (>= 2.0, < 3.1)
|
21
25
|
ruby2_keywords (>= 0.0.4)
|
22
26
|
faraday-multipart (1.0.4)
|
@@ -28,11 +32,11 @@ GEM
|
|
28
32
|
parallel (1.22.1)
|
29
33
|
parser (3.2.2.0)
|
30
34
|
ast (~> 2.4.1)
|
31
|
-
public_suffix (5.0.
|
35
|
+
public_suffix (5.0.3)
|
32
36
|
rainbow (3.1.1)
|
33
|
-
rake (13.0
|
37
|
+
rake (13.1.0)
|
34
38
|
regexp_parser (2.8.0)
|
35
|
-
rexml (3.2.
|
39
|
+
rexml (3.2.6)
|
36
40
|
rspec (3.12.0)
|
37
41
|
rspec-core (~> 3.12.0)
|
38
42
|
rspec-expectations (~> 3.12.0)
|
@@ -62,7 +66,7 @@ GEM
|
|
62
66
|
ruby2_keywords (0.0.5)
|
63
67
|
unicode-display_width (2.4.2)
|
64
68
|
vcr (6.1.0)
|
65
|
-
webmock (3.
|
69
|
+
webmock (3.19.1)
|
66
70
|
addressable (>= 2.8.0)
|
67
71
|
crack (>= 0.3.2)
|
68
72
|
hashdiff (>= 0.4.0, < 2.0.0)
|
@@ -73,12 +77,12 @@ PLATFORMS
|
|
73
77
|
DEPENDENCIES
|
74
78
|
byebug (~> 11.1.3)
|
75
79
|
dotenv (~> 2.8.1)
|
76
|
-
rake (~> 13.
|
80
|
+
rake (~> 13.1)
|
77
81
|
rspec (~> 3.12)
|
78
82
|
rubocop (~> 1.50.2)
|
79
83
|
ruby-openai!
|
80
84
|
vcr (~> 6.1.0)
|
81
|
-
webmock (~> 3.
|
85
|
+
webmock (~> 3.19.1)
|
82
86
|
|
83
87
|
BUNDLED WITH
|
84
88
|
2.4.5
|
data/README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
# Ruby OpenAI
|
2
2
|
|
3
|
-
[![Gem Version](https://
|
3
|
+
[![Gem Version](https://img.shields.io/gem/v/ruby-openai.svg)](https://rubygems.org/gems/ruby-openai)
|
4
4
|
[![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/alexrudall/ruby-openai/blob/main/LICENSE.txt)
|
5
5
|
[![CircleCI Build Status](https://circleci.com/gh/alexrudall/ruby-openai.svg?style=shield)](https://circleci.com/gh/alexrudall/ruby-openai)
|
6
6
|
|
@@ -8,11 +8,7 @@ Use the [OpenAI API](https://openai.com/blog/openai-api/) with Ruby! 🤖❤️
|
|
8
8
|
|
9
9
|
Stream text with GPT-4, transcribe and translate audio with Whisper, or create images with DALL·E...
|
10
10
|
|
11
|
-
[Ruby AI Builders Discord](https://discord.gg/k4Uc224xVD)
|
12
|
-
|
13
|
-
[Quick guide to streaming ChatGPT with Rails 7 and Hotwire](https://gist.github.com/alexrudall/cb5ee1e109353ef358adb4e66631799d)
|
14
|
-
|
15
|
-
Follow me on [Twitter](https://twitter.com/alexrudall) for more Ruby / AI content
|
11
|
+
[🚢 Hire me](https://peaceterms.com?utm_source=ruby-openai&utm_medium=readme&utm_id=26072023) | [🎮 Ruby AI Builders Discord](https://discord.gg/k4Uc224xVD) | [🐦 Twitter](https://twitter.com/alexrudall) | [🧠 Anthropic Gem](https://github.com/alexrudall/anthropic) | [🚂 Midjourney Gem](https://github.com/alexrudall/midjourney)
|
16
12
|
|
17
13
|
### Bundler
|
18
14
|
|
@@ -112,6 +108,16 @@ OpenAI.configure do |config|
|
|
112
108
|
end
|
113
109
|
```
|
114
110
|
|
111
|
+
#### Verbose Logging
|
112
|
+
|
113
|
+
You can pass [Faraday middleware](https://lostisland.github.io/faraday/#/middleware/index) to the client in a block, eg. to enable verbose logging with Ruby's [Logger](https://ruby-doc.org/3.2.2/stdlibs/logger/Logger.html):
|
114
|
+
|
115
|
+
```ruby
|
116
|
+
client = OpenAI::Client.new do |f|
|
117
|
+
f.response :logger, Logger.new($stdout), bodies: true
|
118
|
+
end
|
119
|
+
```
|
120
|
+
|
115
121
|
#### Azure
|
116
122
|
|
117
123
|
To use the [Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/) API, you can configure the gem like this:
|
@@ -151,7 +157,7 @@ client.models.retrieve(id: "text-ada-001")
|
|
151
157
|
#### Examples
|
152
158
|
|
153
159
|
- [GPT-4 (limited beta)](https://platform.openai.com/docs/models/gpt-4)
|
154
|
-
- gpt-4
|
160
|
+
- gpt-4 (uses current version)
|
155
161
|
- gpt-4-0314
|
156
162
|
- gpt-4-32k
|
157
163
|
- [GPT-3.5](https://platform.openai.com/docs/models/gpt-3-5)
|
@@ -163,9 +169,9 @@ client.models.retrieve(id: "text-ada-001")
|
|
163
169
|
- text-babbage-001
|
164
170
|
- text-curie-001
|
165
171
|
|
166
|
-
###
|
172
|
+
### Chat
|
167
173
|
|
168
|
-
|
174
|
+
GPT is a model that can be used to generate text in a conversational style. You can use it to [generate a response](https://platform.openai.com/docs/api-reference/chat/create) to a sequence of [messages](https://platform.openai.com/docs/guides/chat/introduction):
|
169
175
|
|
170
176
|
```ruby
|
171
177
|
response = client.chat(
|
@@ -178,11 +184,11 @@ puts response.dig("choices", 0, "message", "content")
|
|
178
184
|
# => "Hello! How may I assist you today?"
|
179
185
|
```
|
180
186
|
|
181
|
-
|
187
|
+
#### Streaming Chat
|
182
188
|
|
183
|
-
[Quick guide to streaming
|
189
|
+
[Quick guide to streaming Chat with Rails 7 and Hotwire](https://gist.github.com/alexrudall/cb5ee1e109353ef358adb4e66631799d)
|
184
190
|
|
185
|
-
You can stream from the API in realtime, which can be much faster and used to create a more engaging user experience. Pass a [Proc](https://ruby-doc.org/core-2.6/Proc.html) (or any object with a `#call` method) to the `stream` parameter to receive the stream of
|
191
|
+
You can stream from the API in realtime, which can be much faster and used to create a more engaging user experience. Pass a [Proc](https://ruby-doc.org/core-2.6/Proc.html) (or any object with a `#call` method) to the `stream` parameter to receive the stream of completion chunks as they are generated. Each time one or more chunks is received, the proc will be called once with each chunk, parsed as a Hash. If OpenAI returns an error, `ruby-openai` will raise a Faraday error.
|
186
192
|
|
187
193
|
```ruby
|
188
194
|
client.chat(
|
@@ -197,7 +203,80 @@ client.chat(
|
|
197
203
|
# => "Anna is a young woman in her mid-twenties, with wavy chestnut hair that falls to her shoulders..."
|
198
204
|
```
|
199
205
|
|
200
|
-
Note:
|
206
|
+
Note: OpenAPI currently does not report token usage for streaming responses. To count tokens while streaming, try `OpenAI.rough_token_count` or [tiktoken_ruby](https://github.com/IAPark/tiktoken_ruby). We think that each call to the stream proc corresponds to a single token, so you can also try counting the number of calls to the proc to get the completion token count.
|
207
|
+
|
208
|
+
#### Vision
|
209
|
+
|
210
|
+
You can use the GPT-4 Vision model to generate a description of an image:
|
211
|
+
|
212
|
+
```ruby
|
213
|
+
messages = [
|
214
|
+
{ "type": "text", "text": "What’s in this image?"},
|
215
|
+
{ "type": "image_url",
|
216
|
+
"image_url": {
|
217
|
+
"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
|
218
|
+
},
|
219
|
+
}
|
220
|
+
]
|
221
|
+
response = client.chat(
|
222
|
+
parameters: {
|
223
|
+
model: "gpt-4-vision-preview", # Required.
|
224
|
+
messages: [{ role: "user", content: messages}], # Required.
|
225
|
+
})
|
226
|
+
puts response.dig("choices", 0, "message", "content")
|
227
|
+
# => "The image depicts a serene natural landscape featuring a long wooden boardwalk extending straight ahead"
|
228
|
+
```
|
229
|
+
|
230
|
+
#### JSON Mode
|
231
|
+
|
232
|
+
You can set the response_format to ask for responses in JSON (at least for `gpt-3.5-turbo-1106`):
|
233
|
+
|
234
|
+
```ruby
|
235
|
+
response = client.chat(
|
236
|
+
parameters: {
|
237
|
+
model: "gpt-3.5-turbo-1106",
|
238
|
+
response_format: { type: "json_object" },
|
239
|
+
messages: [{ role: "user", content: "Hello! Give me some JSON please."}],
|
240
|
+
temperature: 0.7,
|
241
|
+
})
|
242
|
+
puts response.dig("choices", 0, "message", "content")
|
243
|
+
{
|
244
|
+
"name": "John",
|
245
|
+
"age": 30,
|
246
|
+
"city": "New York",
|
247
|
+
"hobbies": ["reading", "traveling", "hiking"],
|
248
|
+
"isStudent": false
|
249
|
+
}
|
250
|
+
```
|
251
|
+
|
252
|
+
You can stream it as well!
|
253
|
+
|
254
|
+
```ruby
|
255
|
+
response = client.chat(
|
256
|
+
parameters: {
|
257
|
+
model: "gpt-3.5-turbo-1106",
|
258
|
+
messages: [{ role: "user", content: "Can I have some JSON please?"}],
|
259
|
+
response_format: { type: "json_object" },
|
260
|
+
stream: proc do |chunk, _bytesize|
|
261
|
+
print chunk.dig("choices", 0, "delta", "content")
|
262
|
+
end
|
263
|
+
})
|
264
|
+
{
|
265
|
+
"message": "Sure, please let me know what specific JSON data you are looking for.",
|
266
|
+
"JSON_data": {
|
267
|
+
"example_1": {
|
268
|
+
"key_1": "value_1",
|
269
|
+
"key_2": "value_2",
|
270
|
+
"key_3": "value_3"
|
271
|
+
},
|
272
|
+
"example_2": {
|
273
|
+
"key_4": "value_4",
|
274
|
+
"key_5": "value_5",
|
275
|
+
"key_6": "value_6"
|
276
|
+
}
|
277
|
+
}
|
278
|
+
}
|
279
|
+
```
|
201
280
|
|
202
281
|
### Functions
|
203
282
|
|
@@ -259,21 +338,6 @@ end
|
|
259
338
|
# => "The weather is nice 🌞"
|
260
339
|
```
|
261
340
|
|
262
|
-
### Completions
|
263
|
-
|
264
|
-
Hit the OpenAI API for a completion using other GPT-3 models:
|
265
|
-
|
266
|
-
```ruby
|
267
|
-
response = client.completions(
|
268
|
-
parameters: {
|
269
|
-
model: "text-davinci-001",
|
270
|
-
prompt: "Once upon a time",
|
271
|
-
max_tokens: 5
|
272
|
-
})
|
273
|
-
puts response["choices"].map { |c| c["text"] }
|
274
|
-
# => [", there lived a great"]
|
275
|
-
```
|
276
|
-
|
277
341
|
### Edits
|
278
342
|
|
279
343
|
Send a string and some instructions for what to do to the string:
|
@@ -325,22 +389,22 @@ client.files.content(id: "file-123")
|
|
325
389
|
client.files.delete(id: "file-123")
|
326
390
|
```
|
327
391
|
|
328
|
-
###
|
392
|
+
### Finetunes
|
329
393
|
|
330
394
|
Upload your fine-tuning data in a `.jsonl` file as above and get its ID:
|
331
395
|
|
332
396
|
```ruby
|
333
|
-
response = client.files.upload(parameters: { file: "path/to/
|
397
|
+
response = client.files.upload(parameters: { file: "path/to/sarcasm.jsonl", purpose: "fine-tune" })
|
334
398
|
file_id = JSON.parse(response.body)["id"]
|
335
399
|
```
|
336
400
|
|
337
|
-
You can then use this file ID to create a fine
|
401
|
+
You can then use this file ID to create a fine tuning job:
|
338
402
|
|
339
403
|
```ruby
|
340
404
|
response = client.finetunes.create(
|
341
405
|
parameters: {
|
342
406
|
training_file: file_id,
|
343
|
-
model: "
|
407
|
+
model: "gpt-3.5-turbo-0613"
|
344
408
|
})
|
345
409
|
fine_tune_id = response["id"]
|
346
410
|
```
|
@@ -371,10 +435,10 @@ response = client.completions(
|
|
371
435
|
response.dig("choices", 0, "text")
|
372
436
|
```
|
373
437
|
|
374
|
-
You can
|
438
|
+
You can also capture the events for a job:
|
375
439
|
|
376
|
-
```
|
377
|
-
client.finetunes.
|
440
|
+
```
|
441
|
+
client.finetunes.list_events(id: fine_tune_id)
|
378
442
|
```
|
379
443
|
|
380
444
|
### Image Generation
|
@@ -457,6 +521,34 @@ puts response["text"]
|
|
457
521
|
# => "Transcription of the text"
|
458
522
|
```
|
459
523
|
|
524
|
+
#### Speech
|
525
|
+
|
526
|
+
The speech API takes as input the text and a voice and returns the content of an audio file you can listen to.
|
527
|
+
|
528
|
+
```ruby
|
529
|
+
response = client.audio.speech(
|
530
|
+
parameters: {
|
531
|
+
model: "tts-1",
|
532
|
+
input: "This is a speech test!",
|
533
|
+
voice: "alloy"
|
534
|
+
}
|
535
|
+
)
|
536
|
+
File.binwrite('demo.mp3', response)
|
537
|
+
# => mp3 file that plays: "This is a speech test!"
|
538
|
+
```
|
539
|
+
|
540
|
+
### Errors
|
541
|
+
|
542
|
+
HTTP errors can be caught like this:
|
543
|
+
|
544
|
+
```
|
545
|
+
begin
|
546
|
+
OpenAI::Client.new.models.retrieve(id: "text-ada-001")
|
547
|
+
rescue Faraday::Error => e
|
548
|
+
raise "Got a Faraday error: #{e}"
|
549
|
+
end
|
550
|
+
```
|
551
|
+
|
460
552
|
## Development
|
461
553
|
|
462
554
|
After checking out the repo, run `bin/setup` to install dependencies. You can run `bin/console` for an interactive prompt that will allow you to experiment.
|
@@ -0,0 +1,27 @@
|
|
1
|
+
module OpenAI
|
2
|
+
class Assistants
|
3
|
+
def initialize(client:)
|
4
|
+
@client = client.beta(assistants: "v1")
|
5
|
+
end
|
6
|
+
|
7
|
+
def list
|
8
|
+
@client.get(path: "/assistants")
|
9
|
+
end
|
10
|
+
|
11
|
+
def retrieve(id:)
|
12
|
+
@client.get(path: "/assistants/#{id}")
|
13
|
+
end
|
14
|
+
|
15
|
+
def create(parameters: {})
|
16
|
+
@client.json_post(path: "/assistants", parameters: parameters)
|
17
|
+
end
|
18
|
+
|
19
|
+
def modify(id:, parameters: {})
|
20
|
+
@client.json_post(path: "/assistants/#{id}", parameters: parameters)
|
21
|
+
end
|
22
|
+
|
23
|
+
def delete(id:)
|
24
|
+
@client.delete(path: "/assistants/#{id}")
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
data/lib/openai/audio.rb
CHANGED
data/lib/openai/client.rb
CHANGED
@@ -11,24 +11,21 @@ module OpenAI
|
|
11
11
|
request_timeout
|
12
12
|
extra_headers
|
13
13
|
].freeze
|
14
|
-
attr_reader *CONFIG_KEYS
|
14
|
+
attr_reader *CONFIG_KEYS, :faraday_middleware
|
15
15
|
|
16
|
-
def initialize(config = {})
|
16
|
+
def initialize(config = {}, &faraday_middleware)
|
17
17
|
CONFIG_KEYS.each do |key|
|
18
18
|
# Set instance variables like api_type & access_token. Fall back to global config
|
19
19
|
# if not present.
|
20
20
|
instance_variable_set("@#{key}", config[key] || OpenAI.configuration.send(key))
|
21
21
|
end
|
22
|
+
@faraday_middleware = faraday_middleware
|
22
23
|
end
|
23
24
|
|
24
25
|
def chat(parameters: {})
|
25
26
|
json_post(path: "/chat/completions", parameters: parameters)
|
26
27
|
end
|
27
28
|
|
28
|
-
def completions(parameters: {})
|
29
|
-
json_post(path: "/completions", parameters: parameters)
|
30
|
-
end
|
31
|
-
|
32
29
|
def edits(parameters: {})
|
33
30
|
json_post(path: "/edits", parameters: parameters)
|
34
31
|
end
|
@@ -57,6 +54,26 @@ module OpenAI
|
|
57
54
|
@models ||= OpenAI::Models.new(client: self)
|
58
55
|
end
|
59
56
|
|
57
|
+
def assistants
|
58
|
+
@assistants ||= OpenAI::Assistants.new(client: self)
|
59
|
+
end
|
60
|
+
|
61
|
+
def threads
|
62
|
+
@threads ||= OpenAI::Threads.new(client: self)
|
63
|
+
end
|
64
|
+
|
65
|
+
def messages
|
66
|
+
@messages ||= OpenAI::Messages.new(client: self)
|
67
|
+
end
|
68
|
+
|
69
|
+
def runs
|
70
|
+
@runs ||= OpenAI::Runs.new(client: self)
|
71
|
+
end
|
72
|
+
|
73
|
+
def run_steps
|
74
|
+
@run_steps ||= OpenAI::RunSteps.new(client: self)
|
75
|
+
end
|
76
|
+
|
60
77
|
def moderations(parameters: {})
|
61
78
|
json_post(path: "/moderations", parameters: parameters)
|
62
79
|
end
|
@@ -64,5 +81,11 @@ module OpenAI
|
|
64
81
|
def azure?
|
65
82
|
@api_type&.to_sym == :azure
|
66
83
|
end
|
84
|
+
|
85
|
+
def beta(apis)
|
86
|
+
dup.tap do |client|
|
87
|
+
client.add_headers("OpenAI-Beta": apis.map { |k, v| "#{k}=#{v}" }.join(";"))
|
88
|
+
end
|
89
|
+
end
|
67
90
|
end
|
68
91
|
end
|
data/lib/openai/compatibility.rb
CHANGED
data/lib/openai/finetunes.rb
CHANGED
@@ -5,31 +5,23 @@ module OpenAI
|
|
5
5
|
end
|
6
6
|
|
7
7
|
def list
|
8
|
-
@client.get(path: "/
|
8
|
+
@client.get(path: "/fine_tuning/jobs")
|
9
9
|
end
|
10
10
|
|
11
11
|
def create(parameters: {})
|
12
|
-
@client.json_post(path: "/
|
12
|
+
@client.json_post(path: "/fine_tuning/jobs", parameters: parameters)
|
13
13
|
end
|
14
14
|
|
15
15
|
def retrieve(id:)
|
16
|
-
@client.get(path: "/
|
16
|
+
@client.get(path: "/fine_tuning/jobs/#{id}")
|
17
17
|
end
|
18
18
|
|
19
19
|
def cancel(id:)
|
20
|
-
@client.
|
20
|
+
@client.json_post(path: "/fine_tuning/jobs/#{id}/cancel", parameters: {})
|
21
21
|
end
|
22
22
|
|
23
|
-
def
|
24
|
-
@client.get(path: "/
|
25
|
-
end
|
26
|
-
|
27
|
-
def delete(fine_tuned_model:)
|
28
|
-
if fine_tuned_model.start_with?("ft-")
|
29
|
-
raise ArgumentError, "Please give a fine_tuned_model name, not a fine-tune ID"
|
30
|
-
end
|
31
|
-
|
32
|
-
@client.delete(path: "/models/#{fine_tuned_model}")
|
23
|
+
def list_events(id:)
|
24
|
+
@client.get(path: "/fine_tuning/jobs/#{id}/events")
|
33
25
|
end
|
34
26
|
end
|
35
27
|
end
|
data/lib/openai/http.rb
CHANGED
@@ -1,72 +1,87 @@
|
|
1
|
+
require "event_stream_parser"
|
2
|
+
|
3
|
+
require_relative "http_headers"
|
4
|
+
|
1
5
|
module OpenAI
|
2
6
|
module HTTP
|
7
|
+
include HTTPHeaders
|
8
|
+
|
3
9
|
def get(path:)
|
4
|
-
|
10
|
+
parse_jsonl(conn.get(uri(path: path)) do |req|
|
5
11
|
req.headers = headers
|
6
12
|
end&.body)
|
7
13
|
end
|
8
14
|
|
9
|
-
def
|
10
|
-
|
11
|
-
if parameters[:stream].respond_to?(:call)
|
12
|
-
req.options.on_data = to_json_stream(user_proc: parameters[:stream])
|
13
|
-
parameters[:stream] = true # Necessary to tell OpenAI to stream.
|
14
|
-
elsif parameters[:stream]
|
15
|
-
raise ArgumentError, "The stream parameter must be a Proc or have a #call method"
|
16
|
-
end
|
17
|
-
|
15
|
+
def post(path:)
|
16
|
+
parse_jsonl(conn.post(uri(path: path)) do |req|
|
18
17
|
req.headers = headers
|
19
|
-
req.body = parameters.to_json
|
20
18
|
end&.body)
|
21
19
|
end
|
22
20
|
|
21
|
+
def json_post(path:, parameters:)
|
22
|
+
conn.post(uri(path: path)) do |req|
|
23
|
+
configure_json_post_request(req, parameters)
|
24
|
+
end&.body
|
25
|
+
end
|
26
|
+
|
23
27
|
def multipart_post(path:, parameters: nil)
|
24
|
-
|
28
|
+
conn(multipart: true).post(uri(path: path)) do |req|
|
25
29
|
req.headers = headers.merge({ "Content-Type" => "multipart/form-data" })
|
26
30
|
req.body = multipart_parameters(parameters)
|
27
|
-
end&.body
|
31
|
+
end&.body
|
28
32
|
end
|
29
33
|
|
30
34
|
def delete(path:)
|
31
|
-
|
35
|
+
conn.delete(uri(path: path)) do |req|
|
32
36
|
req.headers = headers
|
33
|
-
end&.body
|
37
|
+
end&.body
|
34
38
|
end
|
35
39
|
|
36
40
|
private
|
37
41
|
|
38
|
-
def
|
39
|
-
return unless
|
42
|
+
def parse_jsonl(response)
|
43
|
+
return unless response
|
44
|
+
return response unless response.is_a?(String)
|
40
45
|
|
41
|
-
JSON.parse(string)
|
42
|
-
rescue JSON::ParserError
|
43
46
|
# Convert a multiline string of JSON objects to a JSON array.
|
44
|
-
|
47
|
+
response = response.gsub("}\n{", "},{").prepend("[").concat("]")
|
48
|
+
|
49
|
+
JSON.parse(response)
|
45
50
|
end
|
46
51
|
|
47
52
|
# Given a proc, returns an outer proc that can be used to iterate over a JSON stream of chunks.
|
48
53
|
# For each chunk, the inner user_proc is called giving it the JSON object. The JSON object could
|
49
54
|
# be a data object or an error object as described in the OpenAI API documentation.
|
50
55
|
#
|
51
|
-
# If the JSON object for a given data or error message is invalid, it is ignored.
|
52
|
-
#
|
53
56
|
# @param user_proc [Proc] The inner proc to call for each JSON object in the chunk.
|
54
57
|
# @return [Proc] An outer proc that iterates over a raw stream, converting it to JSON.
|
55
58
|
def to_json_stream(user_proc:)
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
59
|
+
parser = EventStreamParser::Parser.new
|
60
|
+
|
61
|
+
proc do |chunk, _bytes, env|
|
62
|
+
if env && env.status != 200
|
63
|
+
raise_error = Faraday::Response::RaiseError.new
|
64
|
+
raise_error.on_complete(env.merge(body: try_parse_json(chunk)))
|
65
|
+
end
|
66
|
+
|
67
|
+
parser.feed(chunk) do |_type, data|
|
68
|
+
user_proc.call(JSON.parse(data)) unless data == "[DONE]"
|
61
69
|
end
|
62
70
|
end
|
63
71
|
end
|
64
72
|
|
65
73
|
def conn(multipart: false)
|
66
|
-
Faraday.new do |f|
|
74
|
+
connection = Faraday.new do |f|
|
67
75
|
f.options[:timeout] = @request_timeout
|
68
76
|
f.request(:multipart) if multipart
|
77
|
+
f.use MiddlewareErrors
|
78
|
+
f.response :raise_error
|
79
|
+
f.response :json
|
69
80
|
end
|
81
|
+
|
82
|
+
@faraday_middleware&.call(connection)
|
83
|
+
|
84
|
+
connection
|
70
85
|
end
|
71
86
|
|
72
87
|
def uri(path:)
|
@@ -78,29 +93,6 @@ module OpenAI
|
|
78
93
|
end
|
79
94
|
end
|
80
95
|
|
81
|
-
def headers
|
82
|
-
if azure?
|
83
|
-
azure_headers
|
84
|
-
else
|
85
|
-
openai_headers
|
86
|
-
end.merge(@extra_headers || {})
|
87
|
-
end
|
88
|
-
|
89
|
-
def openai_headers
|
90
|
-
{
|
91
|
-
"Content-Type" => "application/json",
|
92
|
-
"Authorization" => "Bearer #{@access_token}",
|
93
|
-
"OpenAI-Organization" => @organization_id
|
94
|
-
}
|
95
|
-
end
|
96
|
-
|
97
|
-
def azure_headers
|
98
|
-
{
|
99
|
-
"Content-Type" => "application/json",
|
100
|
-
"api-key" => @access_token
|
101
|
-
}
|
102
|
-
end
|
103
|
-
|
104
96
|
def multipart_parameters(parameters)
|
105
97
|
parameters&.transform_values do |value|
|
106
98
|
next value unless value.respond_to?(:close) # File or IO object.
|
@@ -111,5 +103,25 @@ module OpenAI
|
|
111
103
|
Faraday::UploadIO.new(value, "", value.path)
|
112
104
|
end
|
113
105
|
end
|
106
|
+
|
107
|
+
def configure_json_post_request(req, parameters)
|
108
|
+
req_parameters = parameters.dup
|
109
|
+
|
110
|
+
if parameters[:stream].respond_to?(:call)
|
111
|
+
req.options.on_data = to_json_stream(user_proc: parameters[:stream])
|
112
|
+
req_parameters[:stream] = true # Necessary to tell OpenAI to stream.
|
113
|
+
elsif parameters[:stream]
|
114
|
+
raise ArgumentError, "The stream parameter must be a Proc or have a #call method"
|
115
|
+
end
|
116
|
+
|
117
|
+
req.headers = headers
|
118
|
+
req.body = req_parameters.to_json
|
119
|
+
end
|
120
|
+
|
121
|
+
def try_parse_json(maybe_json)
|
122
|
+
JSON.parse(maybe_json)
|
123
|
+
rescue JSON::ParserError
|
124
|
+
maybe_json
|
125
|
+
end
|
114
126
|
end
|
115
127
|
end
|
@@ -0,0 +1,36 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module HTTPHeaders
|
3
|
+
def add_headers(headers)
|
4
|
+
@extra_headers = extra_headers.merge(headers.transform_keys(&:to_s))
|
5
|
+
end
|
6
|
+
|
7
|
+
private
|
8
|
+
|
9
|
+
def headers
|
10
|
+
if azure?
|
11
|
+
azure_headers
|
12
|
+
else
|
13
|
+
openai_headers
|
14
|
+
end.merge(extra_headers)
|
15
|
+
end
|
16
|
+
|
17
|
+
def openai_headers
|
18
|
+
{
|
19
|
+
"Content-Type" => "application/json",
|
20
|
+
"Authorization" => "Bearer #{@access_token}",
|
21
|
+
"OpenAI-Organization" => @organization_id
|
22
|
+
}
|
23
|
+
end
|
24
|
+
|
25
|
+
def azure_headers
|
26
|
+
{
|
27
|
+
"Content-Type" => "application/json",
|
28
|
+
"api-key" => @access_token
|
29
|
+
}
|
30
|
+
end
|
31
|
+
|
32
|
+
def extra_headers
|
33
|
+
@extra_headers ||= {}
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
module OpenAI
|
2
|
+
class Messages
|
3
|
+
def initialize(client:)
|
4
|
+
@client = client.beta(assistants: "v1")
|
5
|
+
end
|
6
|
+
|
7
|
+
def list(thread_id:)
|
8
|
+
@client.get(path: "/threads/#{thread_id}/messages")
|
9
|
+
end
|
10
|
+
|
11
|
+
def retrieve(thread_id:, id:)
|
12
|
+
@client.get(path: "/threads/#{thread_id}/messages/#{id}")
|
13
|
+
end
|
14
|
+
|
15
|
+
def create(thread_id:, parameters: {})
|
16
|
+
@client.json_post(path: "/threads/#{thread_id}/messages", parameters: parameters)
|
17
|
+
end
|
18
|
+
|
19
|
+
def modify(id:, thread_id:, parameters: {})
|
20
|
+
@client.json_post(path: "/threads/#{thread_id}/messages/#{id}", parameters: parameters)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,15 @@
|
|
1
|
+
module OpenAI
|
2
|
+
class RunSteps
|
3
|
+
def initialize(client:)
|
4
|
+
@client = client.beta(assistants: "v1")
|
5
|
+
end
|
6
|
+
|
7
|
+
def list(thread_id:, run_id:)
|
8
|
+
@client.get(path: "/threads/#{thread_id}/runs/#{run_id}/steps")
|
9
|
+
end
|
10
|
+
|
11
|
+
def retrieve(thread_id:, run_id:, id:)
|
12
|
+
@client.get(path: "/threads/#{thread_id}/runs/#{run_id}/steps/#{id}")
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
data/lib/openai/runs.rb
ADDED
@@ -0,0 +1,32 @@
|
|
1
|
+
module OpenAI
|
2
|
+
class Runs
|
3
|
+
def initialize(client:)
|
4
|
+
@client = client.beta(assistants: "v1")
|
5
|
+
end
|
6
|
+
|
7
|
+
def list(thread_id:)
|
8
|
+
@client.get(path: "/threads/#{thread_id}/runs")
|
9
|
+
end
|
10
|
+
|
11
|
+
def retrieve(thread_id:, id:)
|
12
|
+
@client.get(path: "/threads/#{thread_id}/runs/#{id}")
|
13
|
+
end
|
14
|
+
|
15
|
+
def create(thread_id:, parameters: {})
|
16
|
+
@client.json_post(path: "/threads/#{thread_id}/runs", parameters: parameters)
|
17
|
+
end
|
18
|
+
|
19
|
+
def modify(id:, thread_id:, parameters: {})
|
20
|
+
@client.json_post(path: "/threads/#{thread_id}/runs/#{id}", parameters: parameters)
|
21
|
+
end
|
22
|
+
|
23
|
+
def cancel(id:, thread_id:)
|
24
|
+
@client.post(path: "/threads/#{thread_id}/runs/#{id}/cancel")
|
25
|
+
end
|
26
|
+
|
27
|
+
def submit_tool_outputs(thread_id:, run_id:, parameters: {})
|
28
|
+
@client.json_post(path: "/threads/#{thread_id}/runs/#{run_id}/submit_tool_outputs",
|
29
|
+
parameters: parameters)
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
module OpenAI
|
2
|
+
class Threads
|
3
|
+
def initialize(client:)
|
4
|
+
@client = client.beta(assistants: "v1")
|
5
|
+
end
|
6
|
+
|
7
|
+
def list
|
8
|
+
@client.get(path: "/threads")
|
9
|
+
end
|
10
|
+
|
11
|
+
def retrieve(id:)
|
12
|
+
@client.get(path: "/threads/#{id}")
|
13
|
+
end
|
14
|
+
|
15
|
+
def create(parameters: {})
|
16
|
+
@client.json_post(path: "/threads", parameters: parameters)
|
17
|
+
end
|
18
|
+
|
19
|
+
def modify(id:, parameters: {})
|
20
|
+
@client.json_post(path: "/threads/#{id}", parameters: parameters)
|
21
|
+
end
|
22
|
+
|
23
|
+
def delete(id:)
|
24
|
+
@client.delete(path: "/threads/#{id}")
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
data/lib/openai/version.rb
CHANGED
data/lib/openai.rb
CHANGED
@@ -7,6 +7,11 @@ require_relative "openai/files"
|
|
7
7
|
require_relative "openai/finetunes"
|
8
8
|
require_relative "openai/images"
|
9
9
|
require_relative "openai/models"
|
10
|
+
require_relative "openai/assistants"
|
11
|
+
require_relative "openai/threads"
|
12
|
+
require_relative "openai/messages"
|
13
|
+
require_relative "openai/runs"
|
14
|
+
require_relative "openai/run_steps"
|
10
15
|
require_relative "openai/audio"
|
11
16
|
require_relative "openai/version"
|
12
17
|
|
@@ -14,6 +19,22 @@ module OpenAI
|
|
14
19
|
class Error < StandardError; end
|
15
20
|
class ConfigurationError < Error; end
|
16
21
|
|
22
|
+
class MiddlewareErrors < Faraday::Middleware
|
23
|
+
def call(env)
|
24
|
+
@app.call(env)
|
25
|
+
rescue Faraday::Error => e
|
26
|
+
raise e unless e.response.is_a?(Hash)
|
27
|
+
|
28
|
+
logger = Logger.new($stdout)
|
29
|
+
logger.formatter = proc do |_severity, _datetime, _progname, msg|
|
30
|
+
"\033[31mOpenAI HTTP Error (spotted in ruby-openai #{VERSION}): #{msg}\n\033[0m"
|
31
|
+
end
|
32
|
+
logger.error(e.response[:body])
|
33
|
+
|
34
|
+
raise e
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
17
38
|
class Configuration
|
18
39
|
attr_writer :access_token
|
19
40
|
attr_accessor :api_type, :api_version, :organization_id, :uri_base, :request_timeout,
|
@@ -30,7 +51,7 @@ module OpenAI
|
|
30
51
|
@organization_id = nil
|
31
52
|
@uri_base = DEFAULT_URI_BASE
|
32
53
|
@request_timeout = DEFAULT_REQUEST_TIMEOUT
|
33
|
-
@extra_headers =
|
54
|
+
@extra_headers = {}
|
34
55
|
end
|
35
56
|
|
36
57
|
def access_token
|
data/ruby-openai.gemspec
CHANGED
@@ -25,6 +25,7 @@ Gem::Specification.new do |spec|
|
|
25
25
|
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
|
26
26
|
spec.require_paths = ["lib"]
|
27
27
|
|
28
|
+
spec.add_dependency "event_stream_parser", ">= 0.3.0", "< 2.0.0"
|
28
29
|
spec.add_dependency "faraday", ">= 1"
|
29
30
|
spec.add_dependency "faraday-multipart", ">= 1"
|
30
31
|
end
|
metadata
CHANGED
@@ -1,15 +1,35 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby-openai
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version:
|
4
|
+
version: 6.3.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Alex
|
8
|
-
autorequire:
|
8
|
+
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-
|
11
|
+
date: 2023-11-26 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: event_stream_parser
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - ">="
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: 0.3.0
|
20
|
+
- - "<"
|
21
|
+
- !ruby/object:Gem::Version
|
22
|
+
version: 2.0.0
|
23
|
+
type: :runtime
|
24
|
+
prerelease: false
|
25
|
+
version_requirements: !ruby/object:Gem::Requirement
|
26
|
+
requirements:
|
27
|
+
- - ">="
|
28
|
+
- !ruby/object:Gem::Version
|
29
|
+
version: 0.3.0
|
30
|
+
- - "<"
|
31
|
+
- !ruby/object:Gem::Version
|
32
|
+
version: 2.0.0
|
13
33
|
- !ruby/object:Gem::Dependency
|
14
34
|
name: faraday
|
15
35
|
requirement: !ruby/object:Gem::Requirement
|
@@ -38,7 +58,7 @@ dependencies:
|
|
38
58
|
- - ">="
|
39
59
|
- !ruby/object:Gem::Version
|
40
60
|
version: '1'
|
41
|
-
description:
|
61
|
+
description:
|
42
62
|
email:
|
43
63
|
- alexrudall@users.noreply.github.com
|
44
64
|
executables: []
|
@@ -46,6 +66,10 @@ extensions: []
|
|
46
66
|
extra_rdoc_files: []
|
47
67
|
files:
|
48
68
|
- ".circleci/config.yml"
|
69
|
+
- ".devcontainer/Dockerfile"
|
70
|
+
- ".devcontainer/devcontainer.json"
|
71
|
+
- ".devcontainer/docker-compose.yml"
|
72
|
+
- ".github/FUNDING.yml"
|
49
73
|
- ".github/ISSUE_TEMPLATE/bug_report.md"
|
50
74
|
- ".github/ISSUE_TEMPLATE/feature_request.md"
|
51
75
|
- ".github/dependabot.yml"
|
@@ -63,14 +87,20 @@ files:
|
|
63
87
|
- bin/console
|
64
88
|
- bin/setup
|
65
89
|
- lib/openai.rb
|
90
|
+
- lib/openai/assistants.rb
|
66
91
|
- lib/openai/audio.rb
|
67
92
|
- lib/openai/client.rb
|
68
93
|
- lib/openai/compatibility.rb
|
69
94
|
- lib/openai/files.rb
|
70
95
|
- lib/openai/finetunes.rb
|
71
96
|
- lib/openai/http.rb
|
97
|
+
- lib/openai/http_headers.rb
|
72
98
|
- lib/openai/images.rb
|
99
|
+
- lib/openai/messages.rb
|
73
100
|
- lib/openai/models.rb
|
101
|
+
- lib/openai/run_steps.rb
|
102
|
+
- lib/openai/runs.rb
|
103
|
+
- lib/openai/threads.rb
|
74
104
|
- lib/openai/version.rb
|
75
105
|
- lib/ruby/openai.rb
|
76
106
|
- pull_request_template.md
|
@@ -83,7 +113,7 @@ metadata:
|
|
83
113
|
source_code_uri: https://github.com/alexrudall/ruby-openai
|
84
114
|
changelog_uri: https://github.com/alexrudall/ruby-openai/blob/main/CHANGELOG.md
|
85
115
|
rubygems_mfa_required: 'true'
|
86
|
-
post_install_message:
|
116
|
+
post_install_message:
|
87
117
|
rdoc_options: []
|
88
118
|
require_paths:
|
89
119
|
- lib
|
@@ -98,8 +128,8 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
98
128
|
- !ruby/object:Gem::Version
|
99
129
|
version: '0'
|
100
130
|
requirements: []
|
101
|
-
rubygems_version: 3.4.
|
102
|
-
signing_key:
|
131
|
+
rubygems_version: 3.4.10
|
132
|
+
signing_key:
|
103
133
|
specification_version: 4
|
104
134
|
summary: "OpenAI API + Ruby! \U0001F916❤️"
|
105
135
|
test_files: []
|