ruby-openai 1.4.0 → 1.5.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.circleci/config.yml +7 -2
- data/CHANGELOG.md +7 -0
- data/Gemfile +3 -3
- data/Gemfile.lock +31 -31
- data/README.md +122 -101
- data/lib/ruby/openai/client.rb +9 -1
- data/lib/ruby/openai/engines.rb +1 -1
- data/lib/ruby/openai/files.rb +1 -1
- data/lib/ruby/openai/finetunes.rb +1 -1
- data/lib/ruby/openai/version.rb +1 -1
- data/ruby-openai.gemspec +1 -1
- metadata +11 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: eae2966d67581585125aac11431d33db3af2f472d0970dad242316783ded514e
|
4
|
+
data.tar.gz: 53f357b92d77a79c48539b69216c4b06c2f0fd0584be17dfbf54576ea02ffed4
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 67c875aa8dde9a199aead346a6b45af8aed00ddf81b2db667c055c1ef9e86dea2d43e5b3d23e1e659dbfb0468a40ab9c419b52115a61491217c8c1b0d65aff3c
|
7
|
+
data.tar.gz: 4d55769b42b9b295ff4c065c9f4abdbaaf5e96b998eb707e3a64f4161355d20c5e8ee0b417c80029b376e70fe9c69e0521c397ac82523aa05874e8ecbeaac7ce
|
data/.circleci/config.yml
CHANGED
@@ -8,7 +8,7 @@ jobs:
|
|
8
8
|
rubocop:
|
9
9
|
parallelism: 1
|
10
10
|
docker:
|
11
|
-
- image: cimg/ruby:3.
|
11
|
+
- image: cimg/ruby:3.1-node
|
12
12
|
steps:
|
13
13
|
- checkout
|
14
14
|
- ruby/install-deps
|
@@ -37,4 +37,9 @@ workflows:
|
|
37
37
|
- test:
|
38
38
|
matrix:
|
39
39
|
parameters:
|
40
|
-
ruby-image:
|
40
|
+
ruby-image:
|
41
|
+
- cimg/ruby:2.5-node
|
42
|
+
- cimg/ruby:2.6-node
|
43
|
+
- cimg/ruby:2.7-node
|
44
|
+
- cimg/ruby:3.0-node
|
45
|
+
- cimg/ruby:3.1-node
|
data/CHANGELOG.md
CHANGED
@@ -5,6 +5,13 @@ All notable changes to this project will be documented in this file.
|
|
5
5
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
7
7
|
|
8
|
+
## [1.5.0] - 2022-09-18
|
9
|
+
|
10
|
+
### Added
|
11
|
+
|
12
|
+
- Add Client#moderations endpoint to check OpenAI's Content Policy.
|
13
|
+
- Add Client#edits endpoints to transform inputs according to instructions.
|
14
|
+
|
8
15
|
## [1.4.0] - 2021-12-11
|
9
16
|
|
10
17
|
### Added
|
data/Gemfile
CHANGED
data/Gemfile.lock
CHANGED
@@ -1,8 +1,8 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
ruby-openai (1.
|
5
|
-
dotenv (
|
4
|
+
ruby-openai (1.5.0)
|
5
|
+
dotenv (>= 2.7.6, < 2.9.0)
|
6
6
|
httparty (>= 0.18.1, < 0.21.0)
|
7
7
|
|
8
8
|
GEM
|
@@ -14,52 +14,52 @@ GEM
|
|
14
14
|
byebug (11.1.3)
|
15
15
|
crack (0.4.5)
|
16
16
|
rexml
|
17
|
-
diff-lcs (1.
|
18
|
-
dotenv (2.
|
17
|
+
diff-lcs (1.5.0)
|
18
|
+
dotenv (2.8.1)
|
19
19
|
hashdiff (1.0.1)
|
20
20
|
httparty (0.20.0)
|
21
21
|
mime-types (~> 3.0)
|
22
22
|
multi_xml (>= 0.5.2)
|
23
|
-
mime-types (3.
|
23
|
+
mime-types (3.4.1)
|
24
24
|
mime-types-data (~> 3.2015)
|
25
|
-
mime-types-data (3.
|
25
|
+
mime-types-data (3.2022.0105)
|
26
26
|
multi_xml (0.6.0)
|
27
|
-
parallel (1.
|
28
|
-
parser (3.
|
27
|
+
parallel (1.22.1)
|
28
|
+
parser (3.1.2.0)
|
29
29
|
ast (~> 2.4.1)
|
30
|
-
public_suffix (4.0.
|
31
|
-
rainbow (3.
|
30
|
+
public_suffix (4.0.7)
|
31
|
+
rainbow (3.1.1)
|
32
32
|
rake (13.0.6)
|
33
|
-
regexp_parser (2.
|
33
|
+
regexp_parser (2.3.1)
|
34
34
|
rexml (3.2.5)
|
35
|
-
rspec (3.
|
36
|
-
rspec-core (~> 3.
|
37
|
-
rspec-expectations (~> 3.
|
38
|
-
rspec-mocks (~> 3.
|
39
|
-
rspec-core (3.
|
40
|
-
rspec-support (~> 3.
|
41
|
-
rspec-expectations (3.
|
35
|
+
rspec (3.11.0)
|
36
|
+
rspec-core (~> 3.11.0)
|
37
|
+
rspec-expectations (~> 3.11.0)
|
38
|
+
rspec-mocks (~> 3.11.0)
|
39
|
+
rspec-core (3.11.0)
|
40
|
+
rspec-support (~> 3.11.0)
|
41
|
+
rspec-expectations (3.11.0)
|
42
42
|
diff-lcs (>= 1.2.0, < 2.0)
|
43
|
-
rspec-support (~> 3.
|
44
|
-
rspec-mocks (3.
|
43
|
+
rspec-support (~> 3.11.0)
|
44
|
+
rspec-mocks (3.11.0)
|
45
45
|
diff-lcs (>= 1.2.0, < 2.0)
|
46
|
-
rspec-support (~> 3.
|
47
|
-
rspec-support (3.
|
48
|
-
rubocop (1.
|
46
|
+
rspec-support (~> 3.11.0)
|
47
|
+
rspec-support (3.11.0)
|
48
|
+
rubocop (1.28.2)
|
49
49
|
parallel (~> 1.10)
|
50
|
-
parser (>= 3.
|
50
|
+
parser (>= 3.1.0.0)
|
51
51
|
rainbow (>= 2.2.2, < 4.0)
|
52
52
|
regexp_parser (>= 1.8, < 3.0)
|
53
53
|
rexml
|
54
|
-
rubocop-ast (>= 1.
|
54
|
+
rubocop-ast (>= 1.17.0, < 2.0)
|
55
55
|
ruby-progressbar (~> 1.7)
|
56
56
|
unicode-display_width (>= 1.4.0, < 3.0)
|
57
|
-
rubocop-ast (1.
|
58
|
-
parser (>= 3.
|
57
|
+
rubocop-ast (1.17.0)
|
58
|
+
parser (>= 3.1.1.0)
|
59
59
|
ruby-progressbar (1.11.0)
|
60
60
|
unicode-display_width (2.1.0)
|
61
61
|
vcr (6.0.0)
|
62
|
-
webmock (3.
|
62
|
+
webmock (3.18.1)
|
63
63
|
addressable (>= 2.8.0)
|
64
64
|
crack (>= 0.3.2)
|
65
65
|
hashdiff (>= 0.4.0, < 2.0.0)
|
@@ -70,11 +70,11 @@ PLATFORMS
|
|
70
70
|
DEPENDENCIES
|
71
71
|
byebug (~> 11.1.3)
|
72
72
|
rake (~> 13.0)
|
73
|
-
rspec (~> 3.
|
74
|
-
rubocop (~> 1.
|
73
|
+
rspec (~> 3.11)
|
74
|
+
rubocop (~> 1.28.2)
|
75
75
|
ruby-openai!
|
76
76
|
vcr (~> 6.0.0)
|
77
|
-
webmock (~> 3.
|
77
|
+
webmock (~> 3.18.1)
|
78
78
|
|
79
79
|
BUNDLED WITH
|
80
80
|
2.2.20
|
data/README.md
CHANGED
@@ -5,7 +5,7 @@
|
|
5
5
|
[![CircleCI Build Status](https://circleci.com/gh/alexrudall/ruby-openai.svg?style=shield)](https://circleci.com/gh/alexrudall/ruby-openai)
|
6
6
|
[![Maintainability](https://api.codeclimate.com/v1/badges/a99a88d28ad37a79dbf6/maintainability)](https://codeclimate.com/github/codeclimate/codeclimate/maintainability)
|
7
7
|
|
8
|
-
|
8
|
+
Use the [OpenAI GPT-3 API](https://openai.com/blog/openai-api/) with Ruby! 🤖❤️
|
9
9
|
|
10
10
|
## Installation
|
11
11
|
|
@@ -35,7 +35,7 @@ and require with:
|
|
35
35
|
|
36
36
|
## Usage
|
37
37
|
|
38
|
-
Get your API key from [https://beta.openai.com/
|
38
|
+
Get your API key from [https://beta.openai.com/account/api-keys](https://beta.openai.com/account/api-keys)
|
39
39
|
|
40
40
|
### With dotenv
|
41
41
|
|
@@ -59,30 +59,16 @@ Alternatively you can pass your key directly to a new client:
|
|
59
59
|
client = OpenAI::Client.new(access_token: "access_token_goes_here")
|
60
60
|
```
|
61
61
|
|
62
|
-
### Engines
|
63
|
-
|
64
|
-
There are different engines that can be used to generate text. For a full list and to retrieve information about a single engine:
|
65
|
-
|
66
|
-
```ruby
|
67
|
-
client.engines.list
|
68
|
-
client.engines.retrieve(id: 'ada')
|
69
|
-
```
|
70
|
-
|
71
62
|
#### Examples
|
72
63
|
|
73
|
-
- [
|
74
|
-
- ada
|
75
|
-
- babbage
|
76
|
-
- curie
|
77
|
-
- davinci
|
78
|
-
- [Instruct](https://beta.openai.com/docs/engines/instruct-series-beta)
|
79
|
-
- ada-instruct-beta
|
80
|
-
- babbage-instruct-beta
|
81
|
-
- curie-instruct-beta-v2
|
82
|
-
- davinci-instruct-beta-v3
|
64
|
+
- [GPT-3](https://beta.openai.com/docs/engines/gpt-3)
|
65
|
+
- text-ada-001
|
66
|
+
- text-babbage-001
|
67
|
+
- text-curie-001
|
68
|
+
- text-davinci-001
|
83
69
|
- [Codex (private beta)](https://beta.openai.com/docs/engines/codex-series-private-beta)
|
84
|
-
- davinci-
|
85
|
-
- cushman-
|
70
|
+
- code-davinci-002
|
71
|
+
- code-cushman-001
|
86
72
|
- [Content Filter](https://beta.openai.com/docs/engines/content-filter)
|
87
73
|
- content-filter-alpha
|
88
74
|
|
@@ -91,95 +77,56 @@ There are different engines that can be used to generate text. For a full list a
|
|
91
77
|
Hit the OpenAI API for a completion:
|
92
78
|
|
93
79
|
```ruby
|
94
|
-
response = client.completions(engine: "davinci", parameters: { prompt: "Once upon a time", max_tokens: 5 })
|
80
|
+
response = client.completions(engine: "text-davinci-001", parameters: { prompt: "Once upon a time", max_tokens: 5 })
|
95
81
|
puts response.parsed_response['choices'].map{ |c| c["text"] }
|
96
82
|
=> [", there lived a great"]
|
97
83
|
```
|
98
84
|
|
99
|
-
###
|
100
|
-
|
101
|
-
Put your data in a `.jsonl` file like this:
|
102
|
-
|
103
|
-
```json
|
104
|
-
{"text": "puppy A is happy", "metadata": "emotional state of puppy A"}
|
105
|
-
{"text": "puppy B is sad", "metadata": "emotional state of puppy B"}
|
106
|
-
```
|
107
|
-
|
108
|
-
and pass the path to `client.files.upload` to upload it to OpenAI, and then interact with it:
|
109
|
-
|
110
|
-
```ruby
|
111
|
-
client.files.upload(parameters: { file: 'path/to/puppy.jsonl', purpose: 'search' })
|
112
|
-
client.files.list
|
113
|
-
client.files.retrieve(id: 123)
|
114
|
-
client.files.delete(id: 123)
|
115
|
-
```
|
116
|
-
|
117
|
-
### Search
|
85
|
+
### Edits
|
118
86
|
|
119
|
-
|
87
|
+
Send a string and some instructions for what to do to the string:
|
120
88
|
|
121
89
|
```ruby
|
122
|
-
response = client.
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
90
|
+
response = client.edits(
|
91
|
+
parameters: {
|
92
|
+
model: "text-davinci-edit-001",
|
93
|
+
input: "What day of the wek is it?",
|
94
|
+
instruction: "Fix the spelling mistakes"
|
95
|
+
}
|
96
|
+
)
|
97
|
+
puts response.dig("choices", 0, "text")
|
98
|
+
=> What day of the week is it?
|
131
99
|
```
|
132
100
|
|
133
|
-
###
|
134
|
-
|
135
|
-
Pass documents, a question string, and an example question/response to get an answer to a question:
|
136
|
-
|
137
|
-
```ruby
|
138
|
-
response = client.answers(parameters: {
|
139
|
-
documents: ["Puppy A is happy.", "Puppy B is sad."],
|
140
|
-
question: "which puppy is happy?",
|
141
|
-
model: "curie",
|
142
|
-
examples_context: "In 2017, U.S. life expectancy was 78.6 years.",
|
143
|
-
examples: [["What is human life expectancy in the United States?","78 years."]],
|
144
|
-
})
|
145
|
-
```
|
101
|
+
### Embeddings
|
146
102
|
|
147
|
-
|
103
|
+
You can use the embeddings endpoint to get a vector of numbers representing an input. You can then compare these vectors for different inputs to efficiently check how similar the inputs are.
|
148
104
|
|
149
105
|
```ruby
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
})
|
106
|
+
client.embeddings(
|
107
|
+
engine: "babbage-similarity",
|
108
|
+
parameters: {
|
109
|
+
input: "The food was delicious and the waiter..."
|
110
|
+
}
|
111
|
+
)
|
157
112
|
```
|
158
113
|
|
159
|
-
###
|
114
|
+
### Files
|
160
115
|
|
161
|
-
|
116
|
+
Put your data in a `.jsonl` file like this:
|
162
117
|
|
163
|
-
```
|
164
|
-
|
165
|
-
|
166
|
-
["A happy moment", "Positive"],
|
167
|
-
["I am sad.", "Negative"],
|
168
|
-
["I am feeling awesome", "Positive"]
|
169
|
-
],
|
170
|
-
query: "It is a raining day :(",
|
171
|
-
model: "ada"
|
172
|
-
})
|
118
|
+
```json
|
119
|
+
{"text": "puppy A is happy", "metadata": "emotional state of puppy A"}
|
120
|
+
{"text": "puppy B is sad", "metadata": "emotional state of puppy B"}
|
173
121
|
```
|
174
122
|
|
175
|
-
|
123
|
+
and pass the path to `client.files.upload` to upload it to OpenAI, and then interact with it:
|
176
124
|
|
177
125
|
```ruby
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
})
|
126
|
+
client.files.upload(parameters: { file: 'path/to/puppy.jsonl', purpose: 'search' })
|
127
|
+
client.files.list
|
128
|
+
client.files.retrieve(id: 123)
|
129
|
+
client.files.delete(id: 123)
|
183
130
|
```
|
184
131
|
|
185
132
|
### Fine-tunes
|
@@ -204,7 +151,7 @@ You can then use this file ID to create a fine-tune model:
|
|
204
151
|
response = client.finetunes.create(
|
205
152
|
parameters: {
|
206
153
|
training_file: file_id,
|
207
|
-
model: "ada"
|
154
|
+
model: "text-ada-001"
|
208
155
|
})
|
209
156
|
fine_tune_id = JSON.parse(response.body)["id"]
|
210
157
|
```
|
@@ -237,17 +184,91 @@ This fine-tuned model name can then be used in classifications:
|
|
237
184
|
|
238
185
|
Do not pass the engine parameter when using a fine-tuned model.
|
239
186
|
|
240
|
-
###
|
187
|
+
### Moderations
|
241
188
|
|
242
|
-
|
189
|
+
Pass a string to check if it violates OpenAI's Content Policy:
|
243
190
|
|
244
191
|
```ruby
|
245
|
-
client.
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
192
|
+
response = client.moderations(parameters: { input: "I'm worried about that." })
|
193
|
+
puts response.dig("results", 0, "category_scores", "hate")
|
194
|
+
=> 5.505014632944949e-05
|
195
|
+
```
|
196
|
+
|
197
|
+
### Searches
|
198
|
+
|
199
|
+
Pass documents and a query string to get semantic search scores against each document:
|
200
|
+
|
201
|
+
```ruby
|
202
|
+
response = client.search(engine: "text-ada-001", parameters: { documents: %w[washington hospital school], query: "president" })
|
203
|
+
puts response["data"].map { |d| d["score"] }
|
204
|
+
=> [202.0, 48.052, 19.247]
|
205
|
+
```
|
206
|
+
|
207
|
+
You can alternatively search using the ID of a file you've uploaded:
|
208
|
+
|
209
|
+
```ruby
|
210
|
+
client.search(engine: "text-ada-001", parameters: { file: "abc123", query: "happy" })
|
211
|
+
```
|
212
|
+
|
213
|
+
### Classifications
|
214
|
+
|
215
|
+
Pass examples and a query to predict the most likely labels:
|
216
|
+
|
217
|
+
```ruby
|
218
|
+
response = client.classifications(parameters: {
|
219
|
+
examples: [
|
220
|
+
["A happy moment", "Positive"],
|
221
|
+
["I am sad.", "Negative"],
|
222
|
+
["I am feeling awesome", "Positive"]
|
223
|
+
],
|
224
|
+
query: "It is a raining day :(",
|
225
|
+
model: "text-ada-001"
|
226
|
+
})
|
227
|
+
```
|
228
|
+
|
229
|
+
Or use the ID of a file you've uploaded:
|
230
|
+
|
231
|
+
```ruby
|
232
|
+
response = client.classifications(parameters: {
|
233
|
+
file: "123abc,
|
234
|
+
query: "It is a raining day :(",
|
235
|
+
model: "text-ada-001"
|
236
|
+
})
|
237
|
+
```
|
238
|
+
|
239
|
+
### Answers
|
240
|
+
|
241
|
+
Pass documents, a question string, and an example question/response to get an answer to a question:
|
242
|
+
|
243
|
+
```ruby
|
244
|
+
response = client.answers(parameters: {
|
245
|
+
documents: ["Puppy A is happy.", "Puppy B is sad."],
|
246
|
+
question: "which puppy is happy?",
|
247
|
+
model: "text-curie-001",
|
248
|
+
examples_context: "In 2017, U.S. life expectancy was 78.6 years.",
|
249
|
+
examples: [["What is human life expectancy in the United States?","78 years."]],
|
250
|
+
})
|
251
|
+
```
|
252
|
+
|
253
|
+
Or use the ID of a file you've uploaded:
|
254
|
+
|
255
|
+
```ruby
|
256
|
+
response = client.answers(parameters: {
|
257
|
+
file: "123abc",
|
258
|
+
question: "which puppy is happy?",
|
259
|
+
model: "text-curie-001",
|
260
|
+
examples_context: "In 2017, U.S. life expectancy was 78.6 years.",
|
261
|
+
examples: [["What is human life expectancy in the United States?","78 years."]],
|
262
|
+
})
|
263
|
+
```
|
264
|
+
|
265
|
+
### Engines
|
266
|
+
|
267
|
+
There are different engines that can be used to generate text. For a full list and to retrieve information about a single engine:
|
268
|
+
|
269
|
+
```ruby
|
270
|
+
client.engines.list
|
271
|
+
client.engines.retrieve(id: 'text-ada-001')
|
251
272
|
```
|
252
273
|
|
253
274
|
## Development
|
data/lib/ruby/openai/client.rb
CHANGED
@@ -4,7 +4,7 @@ module OpenAI
|
|
4
4
|
base_uri "https://api.openai.com"
|
5
5
|
|
6
6
|
def initialize(access_token: nil)
|
7
|
-
@access_token = access_token || ENV
|
7
|
+
@access_token = access_token || ENV.fetch("OPENAI_ACCESS_TOKEN")
|
8
8
|
end
|
9
9
|
|
10
10
|
def answers(version: default_version, parameters: {})
|
@@ -23,6 +23,10 @@ module OpenAI
|
|
23
23
|
end
|
24
24
|
end
|
25
25
|
|
26
|
+
def edits(version: default_version, parameters: {})
|
27
|
+
post(url: "/#{version}/edits", parameters: parameters)
|
28
|
+
end
|
29
|
+
|
26
30
|
def embeddings(engine:, version: default_version, parameters: {})
|
27
31
|
post(url: "/#{version}/engines/#{engine}/embeddings", parameters: parameters)
|
28
32
|
end
|
@@ -39,6 +43,10 @@ module OpenAI
|
|
39
43
|
@finetunes ||= OpenAI::Finetunes.new(access_token: @access_token)
|
40
44
|
end
|
41
45
|
|
46
|
+
def moderations(version: default_version, parameters: {})
|
47
|
+
post(url: "/#{version}/moderations", parameters: parameters)
|
48
|
+
end
|
49
|
+
|
42
50
|
# rubocop:disable Layout/LineLength
|
43
51
|
# rubocop:disable Metrics/ParameterLists
|
44
52
|
def search(engine:, query: nil, documents: nil, file: nil, version: default_version, parameters: {})
|
data/lib/ruby/openai/engines.rb
CHANGED
data/lib/ruby/openai/files.rb
CHANGED
data/lib/ruby/openai/version.rb
CHANGED
data/ruby-openai.gemspec
CHANGED
@@ -25,6 +25,6 @@ Gem::Specification.new do |spec|
|
|
25
25
|
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
|
26
26
|
spec.require_paths = ["lib"]
|
27
27
|
|
28
|
-
spec.add_dependency "dotenv", "
|
28
|
+
spec.add_dependency "dotenv", ">= 2.7.6", "< 2.9.0"
|
29
29
|
spec.add_dependency "httparty", ">= 0.18.1", "< 0.21.0"
|
30
30
|
end
|
metadata
CHANGED
@@ -1,29 +1,35 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby-openai
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.5.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Alex
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2022-09-18 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: dotenv
|
15
15
|
requirement: !ruby/object:Gem::Requirement
|
16
16
|
requirements:
|
17
|
-
- - "
|
17
|
+
- - ">="
|
18
18
|
- !ruby/object:Gem::Version
|
19
19
|
version: 2.7.6
|
20
|
+
- - "<"
|
21
|
+
- !ruby/object:Gem::Version
|
22
|
+
version: 2.9.0
|
20
23
|
type: :runtime
|
21
24
|
prerelease: false
|
22
25
|
version_requirements: !ruby/object:Gem::Requirement
|
23
26
|
requirements:
|
24
|
-
- - "
|
27
|
+
- - ">="
|
25
28
|
- !ruby/object:Gem::Version
|
26
29
|
version: 2.7.6
|
30
|
+
- - "<"
|
31
|
+
- !ruby/object:Gem::Version
|
32
|
+
version: 2.9.0
|
27
33
|
- !ruby/object:Gem::Dependency
|
28
34
|
name: httparty
|
29
35
|
requirement: !ruby/object:Gem::Requirement
|
@@ -100,7 +106,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
100
106
|
- !ruby/object:Gem::Version
|
101
107
|
version: '0'
|
102
108
|
requirements: []
|
103
|
-
rubygems_version: 3.
|
109
|
+
rubygems_version: 3.2.33
|
104
110
|
signing_key:
|
105
111
|
specification_version: 4
|
106
112
|
summary: A Ruby gem for the OpenAI GPT-3 API
|