ruby-openai 5.2.0 → 6.0.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 996d39cd32c3c05c73efea0177c12d0751b5dda208b2855aaac440af7b2702d8
4
- data.tar.gz: 65471a670e34f537fe4878322c87978f1c2beaf93336a7f2104baaa86b018c60
3
+ metadata.gz: 492acab028ee10ea62f7f95814d674299f0fb83de535322bfc935c48b41b74f3
4
+ data.tar.gz: 290cd1cf80ac93bf880434e8c5969b22a077582bf07fc93f6718ad186ebd0d66
5
5
  SHA512:
6
- metadata.gz: deab41c7c7f4ee21b4ed1a17f289b147b2e4960b33fd12ce863d5bdb8c835a955215d01438890c1ab8d9a1c7026faba0e5b8359c1fe3d9139082f8de58dce616
7
- data.tar.gz: 3309d1c3a68736816c4f3bd1d465021ee3f162b5f5c3dbb7915ed5ce6f3a8d7014f9f1c4b07cf630f3f90201bdbe0ec308f1dc00fb6b075f45546fe519afb553
6
+ metadata.gz: 6641fa5f1ecfccc6945ef479d4d59ae400f9f9032cee74a1b8580988c4c7c11aa363789fd1fec981bc32c3119748e813319f018c0d7bb0c7744c6fb8c12d7544
7
+ data.tar.gz: bfc6ddbde1d9fc342551c49c3f6ab26fd23e165fb98e037cb9731c2cd300bfe11dc89fd2c20e5e1c3ad8564958fcaffabedc38611ca7eec3c9ea0b1ba9730fd0
data/CHANGELOG.md CHANGED
@@ -5,6 +5,24 @@ All notable changes to this project will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
+ ## [6.0.1] - 2023-11-07
9
+
10
+ ### Fix
11
+
12
+ - Gracefully handle the case where an HTTP error response may not have valid JSON in its body. Thank you [@atesgoral](https://github.com/atesgoral)!
13
+
14
+ ## [6.0.0] - 2023-11-06
15
+
16
+ ### Added
17
+
18
+ - [BREAKING] HTTP errors will now be raised by ruby-openai as Faraday:Errors, including when streaming! Implemented by [@atesgoral](https://github.com/atesgoral)
19
+ - [BREAKING] Switch from legacy Finetunes to the new Fine-tune-jobs endpoints. Implemented by [@lancecarlson](https://github.com/lancecarlson)
20
+ - [BREAKING] Remove deprecated Completions endpoints - use Chat instead.
21
+
22
+ ### Fix
23
+
24
+ - [BREAKING] Fix issue where :stream parameters were replaced by a boolean in the client application. Thanks to [@martinjaimem](https://github.com/martinjaimem), [@vickymadrid03](https://github.com/vickymadrid03) and [@nicastelo](https://github.com/nicastelo) for spotting and fixing this issue.
25
+
8
26
  ## [5.2.0] - 2023-10-30
9
27
 
10
28
  ### Fix
data/Gemfile CHANGED
@@ -5,8 +5,8 @@ gemspec
5
5
 
6
6
  gem "byebug", "~> 11.1.3"
7
7
  gem "dotenv", "~> 2.8.1"
8
- gem "rake", "~> 13.0"
8
+ gem "rake", "~> 13.1"
9
9
  gem "rspec", "~> 3.12"
10
10
  gem "rubocop", "~> 1.50.2"
11
11
  gem "vcr", "~> 6.1.0"
12
- gem "webmock", "~> 3.18.1"
12
+ gem "webmock", "~> 3.19.1"
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- ruby-openai (5.2.0)
4
+ ruby-openai (6.0.1)
5
5
  event_stream_parser (>= 0.3.0, < 1.0.0)
6
6
  faraday (>= 1)
7
7
  faraday-multipart (>= 1)
@@ -9,16 +9,18 @@ PATH
9
9
  GEM
10
10
  remote: https://rubygems.org/
11
11
  specs:
12
- addressable (2.8.1)
12
+ addressable (2.8.5)
13
13
  public_suffix (>= 2.0.2, < 6.0)
14
14
  ast (2.4.2)
15
+ base64 (0.1.1)
15
16
  byebug (11.1.3)
16
17
  crack (0.4.5)
17
18
  rexml
18
19
  diff-lcs (1.5.0)
19
20
  dotenv (2.8.1)
20
21
  event_stream_parser (0.3.0)
21
- faraday (2.7.10)
22
+ faraday (2.7.11)
23
+ base64
22
24
  faraday-net_http (>= 2.0, < 3.1)
23
25
  ruby2_keywords (>= 0.0.4)
24
26
  faraday-multipart (1.0.4)
@@ -30,11 +32,11 @@ GEM
30
32
  parallel (1.22.1)
31
33
  parser (3.2.2.0)
32
34
  ast (~> 2.4.1)
33
- public_suffix (5.0.1)
35
+ public_suffix (5.0.3)
34
36
  rainbow (3.1.1)
35
- rake (13.0.6)
37
+ rake (13.1.0)
36
38
  regexp_parser (2.8.0)
37
- rexml (3.2.5)
39
+ rexml (3.2.6)
38
40
  rspec (3.12.0)
39
41
  rspec-core (~> 3.12.0)
40
42
  rspec-expectations (~> 3.12.0)
@@ -64,7 +66,7 @@ GEM
64
66
  ruby2_keywords (0.0.5)
65
67
  unicode-display_width (2.4.2)
66
68
  vcr (6.1.0)
67
- webmock (3.18.1)
69
+ webmock (3.19.1)
68
70
  addressable (>= 2.8.0)
69
71
  crack (>= 0.3.2)
70
72
  hashdiff (>= 0.4.0, < 2.0.0)
@@ -75,12 +77,12 @@ PLATFORMS
75
77
  DEPENDENCIES
76
78
  byebug (~> 11.1.3)
77
79
  dotenv (~> 2.8.1)
78
- rake (~> 13.0)
80
+ rake (~> 13.1)
79
81
  rspec (~> 3.12)
80
82
  rubocop (~> 1.50.2)
81
83
  ruby-openai!
82
84
  vcr (~> 6.1.0)
83
- webmock (~> 3.18.1)
85
+ webmock (~> 3.19.1)
84
86
 
85
87
  BUNDLED WITH
86
88
  2.4.5
data/README.md CHANGED
@@ -8,9 +8,7 @@ Use the [OpenAI API](https://openai.com/blog/openai-api/) with Ruby! 🤖❤️
8
8
 
9
9
  Stream text with GPT-4, transcribe and translate audio with Whisper, or create images with DALL·E...
10
10
 
11
- 🚢 Based in the UK and want to hire me? Now you can! [railsai.com](https://railsai.com?utm_source=ruby-openai&utm_medium=readme&utm_id=26072023)
12
-
13
- [🎮 Ruby AI Builders Discord](https://discord.gg/k4Uc224xVD) | [🐦 Twitter](https://twitter.com/alexrudall) | [🧠 Anthropic Gem](https://github.com/alexrudall/anthropic) | [🚂 Midjourney Gem](https://github.com/alexrudall/midjourney)
11
+ [🚢 Hire me](https://peaceterms.com?utm_source=ruby-openai&utm_medium=readme&utm_id=26072023) | [🎮 Ruby AI Builders Discord](https://discord.gg/k4Uc224xVD) | [🐦 Twitter](https://twitter.com/alexrudall) | [🧠 Anthropic Gem](https://github.com/alexrudall/anthropic) | [🚂 Midjourney Gem](https://github.com/alexrudall/midjourney)
14
12
 
15
13
  ### Bundler
16
14
 
@@ -149,7 +147,7 @@ client.models.retrieve(id: "text-ada-001")
149
147
  #### Examples
150
148
 
151
149
  - [GPT-4 (limited beta)](https://platform.openai.com/docs/models/gpt-4)
152
- - gpt-4
150
+ - gpt-4 (uses current version)
153
151
  - gpt-4-0314
154
152
  - gpt-4-32k
155
153
  - [GPT-3.5](https://platform.openai.com/docs/models/gpt-3-5)
@@ -161,9 +159,9 @@ client.models.retrieve(id: "text-ada-001")
161
159
  - text-babbage-001
162
160
  - text-curie-001
163
161
 
164
- ### ChatGPT
162
+ ### Chat
165
163
 
166
- ChatGPT is a model that can be used to generate text in a conversational style. You can use it to [generate a response](https://platform.openai.com/docs/api-reference/chat/create) to a sequence of [messages](https://platform.openai.com/docs/guides/chat/introduction):
164
+ GPT is a model that can be used to generate text in a conversational style. You can use it to [generate a response](https://platform.openai.com/docs/api-reference/chat/create) to a sequence of [messages](https://platform.openai.com/docs/guides/chat/introduction):
167
165
 
168
166
  ```ruby
169
167
  response = client.chat(
@@ -176,11 +174,11 @@ puts response.dig("choices", 0, "message", "content")
176
174
  # => "Hello! How may I assist you today?"
177
175
  ```
178
176
 
179
- ### Streaming ChatGPT
177
+ ### Streaming Chat
180
178
 
181
- [Quick guide to streaming ChatGPT with Rails 7 and Hotwire](https://gist.github.com/alexrudall/cb5ee1e109353ef358adb4e66631799d)
179
+ [Quick guide to streaming Chat with Rails 7 and Hotwire](https://gist.github.com/alexrudall/cb5ee1e109353ef358adb4e66631799d)
182
180
 
183
- You can stream from the API in realtime, which can be much faster and used to create a more engaging user experience. Pass a [Proc](https://ruby-doc.org/core-2.6/Proc.html) (or any object with a `#call` method) to the `stream` parameter to receive the stream of text chunks as they are generated. Each time one or more chunks is received, the proc will be called once with each chunk, parsed as a Hash. If OpenAI returns an error, `ruby-openai` will pass that to your proc as a Hash.
181
+ You can stream from the API in realtime, which can be much faster and used to create a more engaging user experience. Pass a [Proc](https://ruby-doc.org/core-2.6/Proc.html) (or any object with a `#call` method) to the `stream` parameter to receive the stream of completion chunks as they are generated. Each time one or more chunks is received, the proc will be called once with each chunk, parsed as a Hash. If OpenAI returns an error, `ruby-openai` will raise a Faraday error.
184
182
 
185
183
  ```ruby
186
184
  client.chat(
@@ -195,7 +193,7 @@ client.chat(
195
193
  # => "Anna is a young woman in her mid-twenties, with wavy chestnut hair that falls to her shoulders..."
196
194
  ```
197
195
 
198
- Note: the API docs state that token usage is included in the streamed chat chunk objects, but this doesn't currently appear to be the case. To count tokens while streaming, try `OpenAI.rough_token_count` or [tiktoken_ruby](https://github.com/IAPark/tiktoken_ruby).
196
+ Note: OpenAPI currently does not report token usage for streaming responses. To count tokens while streaming, try `OpenAI.rough_token_count` or [tiktoken_ruby](https://github.com/IAPark/tiktoken_ruby). We think that each call to the stream proc corresponds to a single token, so you can also try counting the number of calls to the proc to get the completion token count.
199
197
 
200
198
  ### Functions
201
199
 
@@ -257,21 +255,6 @@ end
257
255
  # => "The weather is nice 🌞"
258
256
  ```
259
257
 
260
- ### Completions
261
-
262
- Hit the OpenAI API for a completion using other GPT-3 models:
263
-
264
- ```ruby
265
- response = client.completions(
266
- parameters: {
267
- model: "text-davinci-001",
268
- prompt: "Once upon a time",
269
- max_tokens: 5
270
- })
271
- puts response["choices"].map { |c| c["text"] }
272
- # => [", there lived a great"]
273
- ```
274
-
275
258
  ### Edits
276
259
 
277
260
  Send a string and some instructions for what to do to the string:
@@ -323,22 +306,22 @@ client.files.content(id: "file-123")
323
306
  client.files.delete(id: "file-123")
324
307
  ```
325
308
 
326
- ### Fine-tunes
309
+ ### Finetunes
327
310
 
328
311
  Upload your fine-tuning data in a `.jsonl` file as above and get its ID:
329
312
 
330
313
  ```ruby
331
- response = client.files.upload(parameters: { file: "path/to/sentiment.jsonl", purpose: "fine-tune" })
314
+ response = client.files.upload(parameters: { file: "path/to/sarcasm.jsonl", purpose: "fine-tune" })
332
315
  file_id = JSON.parse(response.body)["id"]
333
316
  ```
334
317
 
335
- You can then use this file ID to create a fine-tune model:
318
+ You can then use this file ID to create a fine tuning job:
336
319
 
337
320
  ```ruby
338
321
  response = client.finetunes.create(
339
322
  parameters: {
340
323
  training_file: file_id,
341
- model: "ada"
324
+ model: "gpt-3.5-turbo-0613"
342
325
  })
343
326
  fine_tune_id = response["id"]
344
327
  ```
@@ -369,10 +352,10 @@ response = client.completions(
369
352
  response.dig("choices", 0, "text")
370
353
  ```
371
354
 
372
- You can delete the fine-tuned model when you are done with it:
355
+ You can also capture the events for a job:
373
356
 
374
- ```ruby
375
- client.finetunes.delete(fine_tuned_model: fine_tuned_model)
357
+ ```
358
+ client.finetunes.list_events(id: fine_tune_id)
376
359
  ```
377
360
 
378
361
  ### Image Generation
@@ -455,6 +438,18 @@ puts response["text"]
455
438
  # => "Transcription of the text"
456
439
  ```
457
440
 
441
+ #### Errors
442
+
443
+ HTTP errors can be caught like this:
444
+
445
+ ```
446
+ begin
447
+ OpenAI::Client.new.models.retrieve(id: "text-ada-001")
448
+ rescue Faraday::Error => e
449
+ raise "Got a Faraday error: #{e}"
450
+ end
451
+ ```
452
+
458
453
  ## Development
459
454
 
460
455
  After checking out the repo, run `bin/setup` to install dependencies. You can run `bin/console` for an interactive prompt that will allow you to experiment.
data/lib/openai/client.rb CHANGED
@@ -25,10 +25,6 @@ module OpenAI
25
25
  json_post(path: "/chat/completions", parameters: parameters)
26
26
  end
27
27
 
28
- def completions(parameters: {})
29
- json_post(path: "/completions", parameters: parameters)
30
- end
31
-
32
28
  def edits(parameters: {})
33
29
  json_post(path: "/edits", parameters: parameters)
34
30
  end
@@ -5,31 +5,23 @@ module OpenAI
5
5
  end
6
6
 
7
7
  def list
8
- @client.get(path: "/fine-tunes")
8
+ @client.get(path: "/fine_tuning/jobs")
9
9
  end
10
10
 
11
11
  def create(parameters: {})
12
- @client.json_post(path: "/fine-tunes", parameters: parameters)
12
+ @client.json_post(path: "/fine_tuning/jobs", parameters: parameters)
13
13
  end
14
14
 
15
15
  def retrieve(id:)
16
- @client.get(path: "/fine-tunes/#{id}")
16
+ @client.get(path: "/fine_tuning/jobs/#{id}")
17
17
  end
18
18
 
19
19
  def cancel(id:)
20
- @client.multipart_post(path: "/fine-tunes/#{id}/cancel")
20
+ @client.json_post(path: "/fine_tuning/jobs/#{id}/cancel", parameters: {})
21
21
  end
22
22
 
23
- def events(id:)
24
- @client.get(path: "/fine-tunes/#{id}/events")
25
- end
26
-
27
- def delete(fine_tuned_model:)
28
- if fine_tuned_model.start_with?("ft-")
29
- raise ArgumentError, "Please give a fine_tuned_model name, not a fine-tune ID"
30
- end
31
-
32
- @client.delete(path: "/models/#{fine_tuned_model}")
23
+ def list_events(id:)
24
+ @client.get(path: "/fine_tuning/jobs/#{id}/events")
33
25
  end
34
26
  end
35
27
  end
data/lib/openai/http.rb CHANGED
@@ -3,55 +3,46 @@ require "event_stream_parser"
3
3
  module OpenAI
4
4
  module HTTP
5
5
  def get(path:)
6
- to_json(conn.get(uri(path: path)) do |req|
6
+ parse_jsonl(conn.get(uri(path: path)) do |req|
7
7
  req.headers = headers
8
8
  end&.body)
9
9
  end
10
10
 
11
11
  def json_post(path:, parameters:)
12
- to_json(conn.post(uri(path: path)) do |req|
13
- if parameters[:stream].respond_to?(:call)
14
- req.options.on_data = to_json_stream(user_proc: parameters[:stream])
15
- parameters[:stream] = true # Necessary to tell OpenAI to stream.
16
- elsif parameters[:stream]
17
- raise ArgumentError, "The stream parameter must be a Proc or have a #call method"
18
- end
19
-
20
- req.headers = headers
21
- req.body = parameters.to_json
22
- end&.body)
12
+ conn.post(uri(path: path)) do |req|
13
+ configure_json_post_request(req, parameters)
14
+ end&.body
23
15
  end
24
16
 
25
17
  def multipart_post(path:, parameters: nil)
26
- to_json(conn(multipart: true).post(uri(path: path)) do |req|
18
+ conn(multipart: true).post(uri(path: path)) do |req|
27
19
  req.headers = headers.merge({ "Content-Type" => "multipart/form-data" })
28
20
  req.body = multipart_parameters(parameters)
29
- end&.body)
21
+ end&.body
30
22
  end
31
23
 
32
24
  def delete(path:)
33
- to_json(conn.delete(uri(path: path)) do |req|
25
+ conn.delete(uri(path: path)) do |req|
34
26
  req.headers = headers
35
- end&.body)
27
+ end&.body
36
28
  end
37
29
 
38
30
  private
39
31
 
40
- def to_json(string)
41
- return unless string
32
+ def parse_jsonl(response)
33
+ return unless response
34
+ return response unless response.is_a?(String)
42
35
 
43
- JSON.parse(string)
44
- rescue JSON::ParserError
45
36
  # Convert a multiline string of JSON objects to a JSON array.
46
- JSON.parse(string.gsub("}\n{", "},{").prepend("[").concat("]"))
37
+ response = response.gsub("}\n{", "},{").prepend("[").concat("]")
38
+
39
+ JSON.parse(response)
47
40
  end
48
41
 
49
42
  # Given a proc, returns an outer proc that can be used to iterate over a JSON stream of chunks.
50
43
  # For each chunk, the inner user_proc is called giving it the JSON object. The JSON object could
51
44
  # be a data object or an error object as described in the OpenAI API documentation.
52
45
  #
53
- # If the JSON object for a given data or error message is invalid, it is ignored.
54
- #
55
46
  # @param user_proc [Proc] The inner proc to call for each JSON object in the chunk.
56
47
  # @return [Proc] An outer proc that iterates over a raw stream, converting it to JSON.
57
48
  def to_json_stream(user_proc:)
@@ -59,25 +50,22 @@ module OpenAI
59
50
 
60
51
  proc do |chunk, _bytes, env|
61
52
  if env && env.status != 200
62
- emit_json(json: chunk, user_proc: user_proc)
63
- else
64
- parser.feed(chunk) do |_type, data|
65
- emit_json(json: data, user_proc: user_proc) unless data == "[DONE]"
66
- end
53
+ raise_error = Faraday::Response::RaiseError.new
54
+ raise_error.on_complete(env.merge(body: try_parse_json(chunk)))
67
55
  end
68
- end
69
- end
70
56
 
71
- def emit_json(json:, user_proc:)
72
- user_proc.call(JSON.parse(json))
73
- rescue JSON::ParserError
74
- # Ignore invalid JSON.
57
+ parser.feed(chunk) do |_type, data|
58
+ user_proc.call(JSON.parse(data)) unless data == "[DONE]"
59
+ end
60
+ end
75
61
  end
76
62
 
77
63
  def conn(multipart: false)
78
64
  Faraday.new do |f|
79
65
  f.options[:timeout] = @request_timeout
80
66
  f.request(:multipart) if multipart
67
+ f.response :raise_error
68
+ f.response :json
81
69
  end
82
70
  end
83
71
 
@@ -123,5 +111,25 @@ module OpenAI
123
111
  Faraday::UploadIO.new(value, "", value.path)
124
112
  end
125
113
  end
114
+
115
+ def configure_json_post_request(req, parameters)
116
+ req_parameters = parameters.dup
117
+
118
+ if parameters[:stream].respond_to?(:call)
119
+ req.options.on_data = to_json_stream(user_proc: parameters[:stream])
120
+ req_parameters[:stream] = true # Necessary to tell OpenAI to stream.
121
+ elsif parameters[:stream]
122
+ raise ArgumentError, "The stream parameter must be a Proc or have a #call method"
123
+ end
124
+
125
+ req.headers = headers
126
+ req.body = req_parameters.to_json
127
+ end
128
+
129
+ def try_parse_json(maybe_json)
130
+ JSON.parse(maybe_json)
131
+ rescue JSON::ParserError
132
+ maybe_json
133
+ end
126
134
  end
127
135
  end
@@ -1,3 +1,3 @@
1
1
  module OpenAI
2
- VERSION = "5.2.0".freeze
2
+ VERSION = "6.0.1".freeze
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-openai
3
3
  version: !ruby/object:Gem::Version
4
- version: 5.2.0
4
+ version: 6.0.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Alex
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2023-10-30 00:00:00.000000000 Z
11
+ date: 2023-11-07 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: event_stream_parser