openai.rb 0.0.1 → 0.0.4

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: e4cc9dca4191bdbae2964d7389c67e7d43ce756481c66c5059721f0ff2404c16
4
- data.tar.gz: 727d12a574d750d050073547699aa0b837e96982e3c660ded4be9434d0bacc80
3
+ metadata.gz: 96f82f97fa18d7fc75b7efd8e01fe015b50c0d4e33c06894b37b2f807315138b
4
+ data.tar.gz: b563a01a6b737282477a7e47c29f1d514b0bcecd806f0b3d9b0ad3f9394fb605
5
5
  SHA512:
6
- metadata.gz: 2d5415f3d67872d7b57bed8e1f8101873aee5b80559c8079c6ee7b450517a913b1d6f637bc5bae70ecb5ad9207d065f22459db699d65bf3d9217e124079dc32d
7
- data.tar.gz: d4d6364fbcaf8534e2743f3f6056285731642474d8006a436c43cac0bb011c3c5fe25d75410649d8fe18ed85a634dca5fcf41bfba8dfefe9da595d1d44dd1e79
6
+ metadata.gz: a51cc63b3023d1bd0ca33e3ab6325ea8bd9d8a64fbff301b2e044b113e656c0245fdcb891a2633fb0d9edd18a59073e02ff4ca14fd5d32a571f71e43948402c8
7
+ data.tar.gz: 8eea70ae9c26df48a0b0651354bf6aa4cf7996e348bc6a6e5714aaa239a6815e1831a063c9c3efe912f283aeacaf794908189709f4ebd34209192ab42942af55
@@ -0,0 +1,27 @@
1
+ name: Ruby Gem CI
2
+
3
+ on: [push, pull_request]
4
+
5
+ jobs:
6
+ test:
7
+ runs-on: ubuntu-latest
8
+ strategy:
9
+ matrix:
10
+ http_version: ['~> 4.4', '~> 5.1']
11
+ ruby_version: ['2.7.8']
12
+
13
+ steps:
14
+ - uses: actions/checkout@v2
15
+ - name: Update .ruby-version
16
+ run: echo "${{ matrix.ruby_version }}" > .ruby-version
17
+ - name: Set up Ruby
18
+ uses: ruby/setup-ruby@v1
19
+ with:
20
+ ruby-version: ${{ matrix.ruby_version }}
21
+ bundler-cache: true
22
+ - name: Install dependencies
23
+ run: |
24
+ echo "gem 'http', '${{ matrix.http_version }}'" >> Gemfile.local
25
+ bundle install
26
+ - name: Run tests
27
+ run: bundle exec rspec
data/.rubocop.yml ADDED
@@ -0,0 +1,18 @@
1
+ Metrics/AbcSize:
2
+ Enabled: false
3
+
4
+ Metrics/BlockLength:
5
+ Enabled: false
6
+
7
+ Metrics/MethodLength:
8
+ Enabled: false
9
+
10
+ Style/Documentation:
11
+ Enabled: false
12
+
13
+ Layout/LineLength:
14
+ Enabled: false
15
+
16
+ Style/FrozenStringLiteralComment:
17
+ SafeAutoCorrect: true
18
+ Enabled: true
data/CHANGELOG.md ADDED
@@ -0,0 +1,3 @@
1
+ # v0.0.4
2
+
3
+ * Added support for specifying `OpenAI.new(api_key, organization: 'org-123')`
data/Gemfile CHANGED
@@ -16,8 +16,12 @@ group :lint do
16
16
  end
17
17
 
18
18
  gem 'pry', '~> 0.13.1'
19
- gem 'pry-byebug', '~> 3.9'
19
+ gem 'pry-byebug', '3.9.0'
20
20
 
21
21
  gem 'dotenv', '~> 2.8'
22
22
 
23
23
  gem 'slop', '~> 4.10'
24
+
25
+ gem 'http', '~> 4.4' # For testing the older version of HTTP.rb
26
+
27
+ gem 'rb_sys', '~> 0.9.70'
data/Gemfile.lock CHANGED
@@ -1,11 +1,11 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- openai.rb (0.0.1)
4
+ openai.rb (0.0.4)
5
5
  abstract_type (~> 0.0.7)
6
6
  anima (~> 0.3)
7
7
  concord (~> 0.1)
8
- http (~> 5.1)
8
+ http (>= 4.4, < 6.0)
9
9
  ice_nine (~> 0.11.x)
10
10
  memoizable (~> 0.4.2)
11
11
  tiktoken_ruby (~> 0.0.3)
@@ -17,7 +17,7 @@ GEM
17
17
  adamantium (0.2.0)
18
18
  ice_nine (~> 0.11.0)
19
19
  memoizable (~> 0.4.0)
20
- addressable (2.8.1)
20
+ addressable (2.8.2)
21
21
  public_suffix (>= 2.0.2, < 6.0)
22
22
  anima (0.3.2)
23
23
  abstract_type (~> 0.0.7)
@@ -38,24 +38,23 @@ GEM
38
38
  ffi-compiler (1.0.1)
39
39
  ffi (>= 1.0.0)
40
40
  rake
41
- http (5.1.1)
42
- addressable (~> 2.8)
41
+ http (4.4.1)
42
+ addressable (~> 2.3)
43
43
  http-cookie (~> 1.0)
44
44
  http-form_data (~> 2.2)
45
- llhttp-ffi (~> 0.4.0)
45
+ http-parser (~> 1.2.0)
46
46
  http-cookie (1.0.5)
47
47
  domain_name (~> 0.5)
48
48
  http-form_data (2.3.0)
49
+ http-parser (1.2.3)
50
+ ffi-compiler (>= 1.0, < 2.0)
49
51
  ice_nine (0.11.2)
50
52
  json (2.6.3)
51
- llhttp-ffi (0.4.0)
52
- ffi-compiler (~> 1.0)
53
- rake (~> 13.0)
54
53
  memoizable (0.4.2)
55
54
  thread_safe (~> 0.3, >= 0.3.1)
56
55
  method_source (1.0.0)
57
56
  parallel (1.22.1)
58
- parser (3.2.1.1)
57
+ parser (3.2.2.0)
59
58
  ast (~> 2.4.1)
60
59
  pry (0.13.1)
61
60
  coderay (~> 1.1)
@@ -66,7 +65,8 @@ GEM
66
65
  public_suffix (5.0.1)
67
66
  rainbow (3.1.1)
68
67
  rake (13.0.6)
69
- regexp_parser (2.6.2)
68
+ rb_sys (0.9.70)
69
+ regexp_parser (2.7.0)
70
70
  rexml (3.2.5)
71
71
  rspec (3.12.0)
72
72
  rspec-core (~> 3.12.0)
@@ -77,7 +77,7 @@ GEM
77
77
  rspec-expectations (3.12.2)
78
78
  diff-lcs (>= 1.2.0, < 2.0)
79
79
  rspec-support (~> 3.12.0)
80
- rspec-mocks (3.12.4)
80
+ rspec-mocks (3.12.5)
81
81
  diff-lcs (>= 1.2.0, < 2.0)
82
82
  rspec-support (~> 3.12.0)
83
83
  rspec-support (3.12.0)
@@ -98,7 +98,7 @@ GEM
98
98
  ruby-progressbar (1.13.0)
99
99
  slop (4.10.1)
100
100
  thread_safe (0.3.6)
101
- tiktoken_ruby (0.0.3-arm64-darwin)
101
+ tiktoken_ruby (0.0.4)
102
102
  unf (0.1.4)
103
103
  unf_ext
104
104
  unf_ext (0.0.8.2)
@@ -110,9 +110,11 @@ PLATFORMS
110
110
 
111
111
  DEPENDENCIES
112
112
  dotenv (~> 2.8)
113
+ http (~> 4.4)
113
114
  openai.rb!
114
115
  pry (~> 0.13.1)
115
- pry-byebug (~> 3.9)
116
+ pry-byebug (= 3.9.0)
117
+ rb_sys (~> 0.9.70)
116
118
  rspec (~> 3.12)
117
119
  rubocop (~> 1.31.1)
118
120
  rubocop-rspec (~> 2.11.1)
data/bin/console CHANGED
@@ -9,14 +9,11 @@ require 'pry-byebug'
9
9
 
10
10
  Dotenv.load
11
11
 
12
- def start_repl
13
- cache_dir = Pathname.new(__dir__).parent.join('tmp/console_cache')
14
- cache_dir.mkpath unless cache_dir.exist?
15
- openai = OpenAI.create(
16
- ENV.fetch('OPENAI_API_KEY'),
17
- cache: cache_dir
18
- )
19
- binding.pry
20
- end
12
+ cache_dir = Pathname.new(__dir__).parent.join('tmp/console_cache')
13
+ cache_dir.mkpath unless cache_dir.exist?
14
+ openai = OpenAI.create(
15
+ ENV.fetch('OPENAI_API_KEY'),
16
+ cache: cache_dir
17
+ )
21
18
 
22
- start_repl
19
+ Pry.start_without_pry_byebug(binding, quiet: true)
@@ -3,18 +3,18 @@
3
3
  class OpenAI
4
4
  class API
5
5
  class Client
6
- include Concord.new(:api_key, :http)
6
+ include Concord.new(:api_key, :organization_id, :http)
7
7
 
8
8
  public :api_key
9
9
 
10
10
  HOST = Addressable::URI.parse('https://api.openai.com/v1')
11
11
 
12
- def initialize(api_key, http: HTTP)
13
- super(api_key, http)
12
+ def initialize(api_key, organization_id: nil, http: HTTP)
13
+ super(api_key, organization_id, http)
14
14
  end
15
15
 
16
16
  def inspect
17
- "#<#{self.class}>"
17
+ "#<#{self.class} organization_id=#{organization_id.inspect}>"
18
18
  end
19
19
 
20
20
  def get(route)
@@ -69,7 +69,7 @@ class OpenAI
69
69
  end
70
70
 
71
71
  def unwrap_response(response)
72
- raise API::Error, response unless response.status.success?
72
+ raise API::Error.parse(response) unless response.status.success?
73
73
 
74
74
  response.body.to_str
75
75
  end
@@ -79,7 +79,9 @@ class OpenAI
79
79
  end
80
80
 
81
81
  def http_client
82
- http.headers('Authorization' => "Bearer #{api_key}")
82
+ headers = { 'Authorization' => "Bearer #{api_key}" }
83
+ headers['OpenAI-Organization'] = organization_id if organization_id
84
+ http.headers(headers)
83
85
  end
84
86
  end
85
87
  end
@@ -33,11 +33,8 @@ class OpenAI
33
33
  )
34
34
  payload = kwargs.merge(stream: stream)
35
35
 
36
- if stream && !block_given?
37
- raise 'Streaming responses require a block'
38
- elsif !stream && block_given?
39
- raise 'Non-streaming responses do not support blocks'
40
- end
36
+ raise 'Streaming responses require a block' if stream && !block_given?
37
+ raise 'Non-streaming responses do not support blocks' if !stream && block_given?
41
38
 
42
39
  if stream
43
40
  post(endpoint, **payload) do |chunk|
@@ -106,6 +106,12 @@ class OpenAI
106
106
  end
107
107
  end
108
108
 
109
+ class Usage < Response
110
+ field :prompt_tokens
111
+ field :completion_tokens
112
+ field :total_tokens
113
+ end
114
+
109
115
  class Completion < Response
110
116
  class Choice < Response
111
117
  field :text
@@ -114,18 +120,24 @@ class OpenAI
114
120
  field :finish_reason
115
121
  end
116
122
 
117
- class Usage < Response
118
- field :prompt_tokens
119
- field :completion_tokens
120
- field :total_tokens
121
- end
122
-
123
123
  field :id
124
124
  field :object
125
125
  field :created
126
126
  field :model
127
127
  field :choices, wrapper: Choice
128
128
  optional_field :usage, wrapper: Usage
129
+
130
+ def choice
131
+ Util.one(choices)
132
+ end
133
+
134
+ # This is a convenience method for getting the response text when there is exactly
135
+ # one choice.
136
+ #
137
+ # @see #response
138
+ def response_text
139
+ choice.text
140
+ end
129
141
  end
130
142
 
131
143
  class ChatCompletion < Response
@@ -140,17 +152,27 @@ class OpenAI
140
152
  field :finish_reason
141
153
  end
142
154
 
143
- class Usage < Response
144
- field :prompt_tokens
145
- field :completion_tokens
146
- field :total_tokens
147
- end
148
-
149
155
  field :id
150
156
  field :object
151
157
  field :created
152
158
  field :choices, wrapper: Choice
153
159
  field :usage, wrapper: Usage
160
+
161
+ # This is a convenience method for the common use case where you have exactly
162
+ # one choice and you want to get the message out.
163
+ #
164
+ # @see #response_text
165
+ def response
166
+ Util.one(choices).message
167
+ end
168
+
169
+ # This is a convenience method for getting the response text when there is exactly
170
+ # one choice.
171
+ #
172
+ # @see #response
173
+ def response_text
174
+ response.content
175
+ end
154
176
  end
155
177
 
156
178
  class ChatCompletionChunk < Response
@@ -172,6 +194,22 @@ class OpenAI
172
194
  field :created
173
195
  field :model
174
196
  field :choices, wrapper: Choice
197
+
198
+ # This is a convenience method for the common use case where you have exactly
199
+ # one choice and you want to get the message out.
200
+ #
201
+ # @see #response_text
202
+ def response
203
+ Util.one(choices).delta
204
+ end
205
+
206
+ # This is a convenience method for getting the response text when there is exactly
207
+ # one choice.
208
+ #
209
+ # @see #response
210
+ def response_text
211
+ response.content
212
+ end
175
213
  end
176
214
 
177
215
  class Embedding < Response
@@ -241,12 +279,6 @@ class OpenAI
241
279
  field :index
242
280
  end
243
281
 
244
- class Usage < Response
245
- field :prompt_tokens
246
- field :completion_tokens
247
- field :total_tokens
248
- end
249
-
250
282
  field :object
251
283
  field :created
252
284
  field :choices, wrapper: Choice
data/lib/openai/api.rb CHANGED
@@ -7,6 +7,17 @@ class OpenAI
7
7
  class Error < StandardError
8
8
  include Concord::Public.new(:http_response)
9
9
 
10
+ def self.parse(http_response)
11
+ data = JSON.parse(http_response.body.to_s, symbolize_names: true)
12
+ if data.dig(:error, :code) == 'context_length_exceeded'
13
+ Error::ContextLengthExceeded.new(http_response)
14
+ else
15
+ new(http_response)
16
+ end
17
+ rescue JSON::ParserError
18
+ new(http_response)
19
+ end
20
+
10
21
  def message
11
22
  <<~ERROR
12
23
  Unexpected response status! Expected 2xx but got: #{http_response.status}
@@ -16,6 +27,9 @@ class OpenAI
16
27
  #{http_response.body}
17
28
  ERROR
18
29
  end
30
+
31
+ class ContextLengthExceeded < self
32
+ end
19
33
  end
20
34
 
21
35
  def completions
data/lib/openai/chat.rb CHANGED
@@ -2,9 +2,10 @@
2
2
 
3
3
  class OpenAI
4
4
  class Chat
5
- include Anima.new(:messages, :settings, :api)
5
+ include Anima.new(:messages, :api_settings, :openai, :config)
6
+ using Util::Colorize
6
7
 
7
- def initialize(messages:, **kwargs)
8
+ def initialize(messages:, settings: {}, config: Config.create, **kwargs)
8
9
  messages = messages.map do |msg|
9
10
  if msg.is_a?(Hash)
10
11
  Message.new(msg)
@@ -13,7 +14,16 @@ class OpenAI
13
14
  end
14
15
  end
15
16
 
16
- super(messages: messages, **kwargs)
17
+ super(
18
+ messages: messages,
19
+ api_settings: settings,
20
+ config: config,
21
+ **kwargs
22
+ )
23
+ end
24
+
25
+ def configure(**configuration)
26
+ with(config: config.with(configuration))
17
27
  end
18
28
 
19
29
  def add_user_message(message)
@@ -32,26 +42,49 @@ class OpenAI
32
42
  alias assistant add_assistant_message
33
43
 
34
44
  def submit
35
- response = api.chat_completions.create(
36
- **settings,
37
- messages: raw_messages
38
- )
45
+ openai.logger.info("[Chat] [tokens=#{total_tokens}] Submitting messages:\n\n#{to_log_format}")
46
+
47
+ begin
48
+ response = openai.api.chat_completions.create(
49
+ **api_settings,
50
+ messages: raw_messages
51
+ )
52
+ rescue OpenAI::API::Error::ContextLengthExceeded
53
+ raise 'Context length exceeded.'
54
+ openai.logger.warn('[Chat] Context length exceeded. Shifting chat')
55
+ return shift_history.submit
56
+ end
39
57
 
40
58
  msg = response.choices.first.message
41
59
 
42
- add_message(msg.role, msg.content)
60
+ add_message(msg.role, msg.content).tap do |new_chat|
61
+ openai.logger.info("[Chat] Response:\n\n#{new_chat.last_message.to_log_format(config)}")
62
+ end
43
63
  end
44
64
 
45
65
  def last_message
46
- API::Response::ChatCompletion::Choice::Message.new(messages.last)
66
+ messages.last
47
67
  end
48
68
 
49
69
  def to_log_format
50
- messages.map(&:to_log_format).join("\n\n")
70
+ messages.map do |msg|
71
+ msg.to_log_format(config)
72
+ end.join("\n\n")
51
73
  end
52
74
 
53
75
  private
54
76
 
77
+ def shift_history
78
+ drop_index = messages.index { |msg| msg.role != 'system' }
79
+ new_messages = messages.slice(0...drop_index) + messages.slice((drop_index + 1)..)
80
+
81
+ with(messages: new_messages)
82
+ end
83
+
84
+ def total_tokens
85
+ openai.tokens.for_model(api_settings.fetch(:model)).num_tokens(messages.map(&:content).join(' '))
86
+ end
87
+
55
88
  def raw_messages
56
89
  messages.map(&:to_h)
57
90
  end
@@ -64,11 +97,28 @@ class OpenAI
64
97
  with(messages: messages + [message])
65
98
  end
66
99
 
100
+ class Config
101
+ include Anima.new(:assistant_name)
102
+
103
+ def self.create
104
+ new(assistant_name: 'assistant')
105
+ end
106
+ end
107
+
67
108
  class Message
68
109
  include Anima.new(:role, :content)
69
110
 
70
- def to_log_format
71
- "#{role.upcase}: #{content}"
111
+ def to_log_format(config)
112
+ prefix =
113
+ case role
114
+ when 'user' then "#{role}:".upcase.green
115
+ when 'system' then "#{role}:".upcase.yellow
116
+ when 'assistant' then "#{config.assistant_name}:".upcase.red
117
+ else
118
+ raise "Unknown role: #{role}"
119
+ end
120
+
121
+ "#{prefix} #{content}"
72
122
  end
73
123
  end
74
124
  end
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ class OpenAI
4
+ module Util
5
+ OneError = Class.new(ArgumentError)
6
+
7
+ def self.one(list)
8
+ raise OneError, "Expected exactly one element, got #{list.size}" unless list.size == 1
9
+
10
+ list.first
11
+ end
12
+
13
+ module Colorize
14
+ refine String do
15
+ def red
16
+ colorize(31)
17
+ end
18
+
19
+ def green
20
+ colorize(32)
21
+ end
22
+
23
+ def yellow
24
+ colorize(33)
25
+ end
26
+
27
+ def blue
28
+ colorize(34)
29
+ end
30
+
31
+ def magenta
32
+ colorize(35)
33
+ end
34
+
35
+ def cyan
36
+ colorize(36)
37
+ end
38
+
39
+ private
40
+
41
+ def colorize(color_code)
42
+ "\e[#{color_code}m#{self}\e[0m"
43
+ end
44
+ end
45
+ end
46
+ end
47
+ end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  class OpenAI
4
- VERSION = '0.0.1'
4
+ VERSION = '0.0.4'
5
5
  end
data/lib/openai.rb CHANGED
@@ -1,5 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require 'pathname'
4
+ require 'logger'
5
+
3
6
  require 'concord'
4
7
  require 'anima'
5
8
  require 'abstract_type'
@@ -8,6 +11,7 @@ require 'addressable'
8
11
  require 'ice_nine'
9
12
  require 'tiktoken_ruby'
10
13
 
14
+ require 'openai/util'
11
15
  require 'openai/tokenizer'
12
16
  require 'openai/chat'
13
17
  require 'openai/api'
@@ -18,12 +22,14 @@ require 'openai/api/response'
18
22
  require 'openai/version'
19
23
 
20
24
  class OpenAI
21
- include Concord.new(:api_client)
25
+ include Concord.new(:api_client, :logger)
26
+
27
+ public :logger
22
28
 
23
29
  ROOT = Pathname.new(__dir__).parent.expand_path.freeze
24
30
 
25
- def self.create(api_key, cache: nil)
26
- client = API::Client.new(api_key)
31
+ def self.create(api_key, cache: nil, organization: nil, logger: Logger.new('/dev/null'))
32
+ client = API::Client.new(api_key, organization_id: organization)
27
33
 
28
34
  if cache.is_a?(Pathname) && cache.directory?
29
35
  client = API::Cache.new(
@@ -32,7 +38,7 @@ class OpenAI
32
38
  )
33
39
  end
34
40
 
35
- new(client)
41
+ new(client, logger)
36
42
  end
37
43
 
38
44
  private_class_method :new
@@ -47,6 +53,10 @@ class OpenAI
47
53
  alias tokens tokenizer
48
54
 
49
55
  def chat(model:, history: [], **kwargs)
50
- Chat.new(api: api, settings: kwargs.merge(model: model), messages: history)
56
+ Chat.new(
57
+ openai: self,
58
+ settings: kwargs.merge(model: model),
59
+ messages: history
60
+ )
51
61
  end
52
62
  end
data/openai.gemspec CHANGED
@@ -16,11 +16,13 @@ Gem::Specification.new do |spec|
16
16
  spec.require_paths = %w[lib]
17
17
  spec.executables = []
18
18
 
19
+ spec.required_ruby_version = '>= 2.7'
20
+
21
+ spec.add_dependency 'abstract_type', '~> 0.0.7'
19
22
  spec.add_dependency 'anima', '~> 0.3'
20
23
  spec.add_dependency 'concord', '~> 0.1'
21
- spec.add_dependency 'http', '~> 5.1'
22
- spec.add_dependency 'memoizable', '~> 0.4.2'
23
- spec.add_dependency 'abstract_type', '~> 0.0.7'
24
+ spec.add_dependency 'http', '>= 4.4', '< 6.0'
24
25
  spec.add_dependency 'ice_nine', '~> 0.11.x'
26
+ spec.add_dependency 'memoizable', '~> 0.4.2'
25
27
  spec.add_dependency 'tiktoken_ruby', '~> 0.0.3'
26
28
  end
@@ -27,14 +27,16 @@ RSpec.describe OpenAI::API, '#chat_completions' do
27
27
  }
28
28
  end
29
29
 
30
- it 'can create a chat completion' do
30
+ let(:completion) do
31
31
  messages = [
32
32
  { "text": 'Hello there!', "user": 'customer' },
33
33
  { "text": 'Can you help me with my order?', "user": 'customer' },
34
34
  { "text": 'Sure, what would you like to do?', "user": 'assistant' }
35
35
  ]
36
- completion = resource.create(model: 'text-davinci-002', messages: messages)
36
+ resource.create(model: 'text-davinci-002', messages: messages)
37
+ end
37
38
 
39
+ it 'can create a chat completion' do
38
40
  expect(completion.id).to eql('chatcmpl-123')
39
41
  expect(completion.choices.first.index).to eql(0)
40
42
  expect(completion.choices.first.message.role).to eql('assistant')
@@ -45,6 +47,15 @@ RSpec.describe OpenAI::API, '#chat_completions' do
45
47
  expect(completion.usage.total_tokens).to eql(21)
46
48
  end
47
49
 
50
+ it 'exposes a #response_text helper method' do
51
+ expect(completion.response_text).to eql("\n\nHello there, how may I assist you today?")
52
+ end
53
+
54
+ it 'exposes a #response helper method' do
55
+ expect(completion.response.content).to eql("\n\nHello there, how may I assist you today?")
56
+ expect(completion.response.role).to eql('assistant')
57
+ end
58
+
48
59
  it 'raises when a block is given for a non-streaming request' do
49
60
  expect { resource.create(model: 'text-davinci-002', messages: []) { print 'noop' } }
50
61
  .to raise_error('Non-streaming responses do not support blocks')
@@ -106,6 +117,9 @@ RSpec.describe OpenAI::API, '#chat_completions' do
106
117
  expect(chunks).to all(be_an_instance_of(OpenAI::API::Response::ChatCompletionChunk))
107
118
  texts = chunks.map { |chunk| chunk.choices.first.delta.content }
108
119
  expect(texts.join('')).to eql('Hello, world!')
120
+
121
+ expect(chunks[0].response.role).to eql('assistant')
122
+ expect(chunks[1].response_text).to eql('He')
109
123
  end
110
124
 
111
125
  it 'raises when a block is not given' do
@@ -27,8 +27,10 @@ RSpec.describe OpenAI::API, '#completions' do
27
27
  }
28
28
  end
29
29
 
30
+ let(:completion) { resource.create(model: 'text-davinci-002', prompt: 'Hello, world!') }
31
+
30
32
  it 'can create a completion' do
31
- completion = resource.create(model: 'text-davinci-002', prompt: 'Hello, world!')
33
+ completion
32
34
 
33
35
  expect(http)
34
36
  .to have_received(:post)
@@ -50,6 +52,10 @@ RSpec.describe OpenAI::API, '#completions' do
50
52
  .to raise_error('Non-streaming responses do not support blocks')
51
53
  end
52
54
 
55
+ it 'exposes a #response_text helper method' do
56
+ expect(completion.response_text).to eql("\n\nThis is indeed a test")
57
+ end
58
+
53
59
  context 'when streaming is enabled' do
54
60
  let(:response_chunks) do
55
61
  [
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  RSpec.describe OpenAI::API, '#moderations' do
2
4
  include_context 'an API Resource'
3
5
 
@@ -41,7 +41,7 @@ RSpec.describe OpenAI::API::Response do
41
41
  {
42
42
  meta: {
43
43
  birth: {
44
- created: Time.new(2023).to_i
44
+ created: Time.utc(2023).to_i
45
45
  }
46
46
  },
47
47
  text: 'This is a post',
@@ -126,7 +126,7 @@ RSpec.describe OpenAI::API::Response do
126
126
  describe '.field' do
127
127
  it 'exposes the field' do
128
128
  expect(sample_response.text).to eql('This is a post')
129
- expect(sample_response.created_at).to eql(1_672_549_200)
129
+ expect(sample_response.created_at).to eql(1_672_531_200)
130
130
  end
131
131
 
132
132
  it 'can expose fields under a different name than the key path' do
@@ -0,0 +1,32 @@
1
+ # frozen_string_literal: true
2
+
3
+ RSpec.describe OpenAI::Chat do
4
+ let(:messages) { [{ role: 'user', content: 'Hello' }] }
5
+ let(:settings) { { model: 'gpt-3' } }
6
+ let(:openai) { double('OpenAI') }
7
+
8
+ describe 'initialization and adding messages' do
9
+ it 'initializes with messages and adds user, system, and assistant messages' do
10
+ chat = OpenAI::Chat.new(messages: messages, settings: settings, openai: openai)
11
+
12
+ expect(chat.messages.count).to eq(1)
13
+ expect(chat.messages.first.role).to eq('user')
14
+ expect(chat.messages.first.content).to eq('Hello')
15
+
16
+ chat = chat.add_user_message('How are you?')
17
+ expect(chat.messages.count).to eq(2)
18
+ expect(chat.messages.last.role).to eq('user')
19
+ expect(chat.messages.last.content).to eq('How are you?')
20
+
21
+ chat = chat.add_system_message('System message')
22
+ expect(chat.messages.count).to eq(3)
23
+ expect(chat.messages.last.role).to eq('system')
24
+ expect(chat.messages.last.content).to eq('System message')
25
+
26
+ chat = chat.add_assistant_message('I am fine, thank you.')
27
+ expect(chat.messages.count).to eq(4)
28
+ expect(chat.messages.last.role).to eq('assistant')
29
+ expect(chat.messages.last.content).to eq('I am fine, thank you.')
30
+ end
31
+ end
32
+ end
@@ -37,6 +37,25 @@ RSpec.describe OpenAI do
37
37
  )
38
38
  end
39
39
 
40
+ context 'when the organization ID is given to the client' do
41
+ let(:api_client) do
42
+ OpenAI::API::Client.new(
43
+ 'sk-123',
44
+ organization_id: 'org-123',
45
+ http: http
46
+ )
47
+ end
48
+
49
+ it 'authenticates the request and includes the organization id' do
50
+ resource.create(model: 'text-davinci-002', prompt: 'Hello, world!')
51
+
52
+ expect(http).to have_received(:headers).with(
53
+ 'OpenAI-Organization' => 'org-123',
54
+ 'Authorization' => 'Bearer sk-123'
55
+ )
56
+ end
57
+ end
58
+
40
59
  context 'when the request is not 2xx' do
41
60
  let(:response_body) do
42
61
  {
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: openai.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.1
4
+ version: 0.0.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - John
@@ -9,92 +9,98 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2023-03-25 00:00:00.000000000 Z
12
+ date: 2023-04-03 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
- name: anima
15
+ name: abstract_type
16
16
  requirement: !ruby/object:Gem::Requirement
17
17
  requirements:
18
18
  - - "~>"
19
19
  - !ruby/object:Gem::Version
20
- version: '0.3'
20
+ version: 0.0.7
21
21
  type: :runtime
22
22
  prerelease: false
23
23
  version_requirements: !ruby/object:Gem::Requirement
24
24
  requirements:
25
25
  - - "~>"
26
26
  - !ruby/object:Gem::Version
27
- version: '0.3'
27
+ version: 0.0.7
28
28
  - !ruby/object:Gem::Dependency
29
- name: concord
29
+ name: anima
30
30
  requirement: !ruby/object:Gem::Requirement
31
31
  requirements:
32
32
  - - "~>"
33
33
  - !ruby/object:Gem::Version
34
- version: '0.1'
34
+ version: '0.3'
35
35
  type: :runtime
36
36
  prerelease: false
37
37
  version_requirements: !ruby/object:Gem::Requirement
38
38
  requirements:
39
39
  - - "~>"
40
40
  - !ruby/object:Gem::Version
41
- version: '0.1'
41
+ version: '0.3'
42
42
  - !ruby/object:Gem::Dependency
43
- name: http
43
+ name: concord
44
44
  requirement: !ruby/object:Gem::Requirement
45
45
  requirements:
46
46
  - - "~>"
47
47
  - !ruby/object:Gem::Version
48
- version: '5.1'
48
+ version: '0.1'
49
49
  type: :runtime
50
50
  prerelease: false
51
51
  version_requirements: !ruby/object:Gem::Requirement
52
52
  requirements:
53
53
  - - "~>"
54
54
  - !ruby/object:Gem::Version
55
- version: '5.1'
55
+ version: '0.1'
56
56
  - !ruby/object:Gem::Dependency
57
- name: memoizable
57
+ name: http
58
58
  requirement: !ruby/object:Gem::Requirement
59
59
  requirements:
60
- - - "~>"
60
+ - - ">="
61
61
  - !ruby/object:Gem::Version
62
- version: 0.4.2
62
+ version: '4.4'
63
+ - - "<"
64
+ - !ruby/object:Gem::Version
65
+ version: '6.0'
63
66
  type: :runtime
64
67
  prerelease: false
65
68
  version_requirements: !ruby/object:Gem::Requirement
66
69
  requirements:
67
- - - "~>"
70
+ - - ">="
68
71
  - !ruby/object:Gem::Version
69
- version: 0.4.2
72
+ version: '4.4'
73
+ - - "<"
74
+ - !ruby/object:Gem::Version
75
+ version: '6.0'
70
76
  - !ruby/object:Gem::Dependency
71
- name: abstract_type
77
+ name: ice_nine
72
78
  requirement: !ruby/object:Gem::Requirement
73
79
  requirements:
74
80
  - - "~>"
75
81
  - !ruby/object:Gem::Version
76
- version: 0.0.7
82
+ version: 0.11.x
77
83
  type: :runtime
78
84
  prerelease: false
79
85
  version_requirements: !ruby/object:Gem::Requirement
80
86
  requirements:
81
87
  - - "~>"
82
88
  - !ruby/object:Gem::Version
83
- version: 0.0.7
89
+ version: 0.11.x
84
90
  - !ruby/object:Gem::Dependency
85
- name: ice_nine
91
+ name: memoizable
86
92
  requirement: !ruby/object:Gem::Requirement
87
93
  requirements:
88
94
  - - "~>"
89
95
  - !ruby/object:Gem::Version
90
- version: 0.11.x
96
+ version: 0.4.2
91
97
  type: :runtime
92
98
  prerelease: false
93
99
  version_requirements: !ruby/object:Gem::Requirement
94
100
  requirements:
95
101
  - - "~>"
96
102
  - !ruby/object:Gem::Version
97
- version: 0.11.x
103
+ version: 0.4.2
98
104
  - !ruby/object:Gem::Dependency
99
105
  name: tiktoken_ruby
100
106
  requirement: !ruby/object:Gem::Requirement
@@ -116,13 +122,15 @@ executables: []
116
122
  extensions: []
117
123
  extra_rdoc_files: []
118
124
  files:
125
+ - ".github/workflows/main.yml"
119
126
  - ".gitignore"
120
127
  - ".rspec"
128
+ - ".rubocop.yml"
121
129
  - ".ruby-version"
130
+ - CHANGELOG.md
122
131
  - Gemfile
123
132
  - Gemfile.lock
124
133
  - README.md
125
- - bin/codegen
126
134
  - bin/console
127
135
  - lib/openai.rb
128
136
  - lib/openai/api.rb
@@ -132,6 +140,7 @@ files:
132
140
  - lib/openai/api/response.rb
133
141
  - lib/openai/chat.rb
134
142
  - lib/openai/tokenizer.rb
143
+ - lib/openai/util.rb
135
144
  - lib/openai/version.rb
136
145
  - openai.gemspec
137
146
  - spec/data/sample.jsonl
@@ -153,6 +162,7 @@ files:
153
162
  - spec/unit/openai/api/models_spec.rb
154
163
  - spec/unit/openai/api/moderations_spec.rb
155
164
  - spec/unit/openai/api/response_spec.rb
165
+ - spec/unit/openai/chat_spec.rb
156
166
  - spec/unit/openai/tokenizer_spec.rb
157
167
  - spec/unit/openai_spec.rb
158
168
  homepage: https://github.com/backus/openai-ruby
@@ -166,7 +176,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
166
176
  requirements:
167
177
  - - ">="
168
178
  - !ruby/object:Gem::Version
169
- version: '0'
179
+ version: '2.7'
170
180
  required_rubygems_version: !ruby/object:Gem::Requirement
171
181
  requirements:
172
182
  - - ">="
data/bin/codegen DELETED
@@ -1,380 +0,0 @@
1
- #!/usr/bin/env ruby
2
- # frozen_string_literal: true
3
-
4
- require 'bundler/setup'
5
- require 'openai'
6
- require 'dotenv'
7
- require 'pry'
8
- require 'pry-byebug'
9
- require 'slop'
10
- require 'yaml'
11
-
12
- Dotenv.load
13
- api = OpenAI.new(ENV.fetch('OPENAI_API_KEY'))
14
-
15
- class Codegen
16
- include Anima.new(:openapi_file, :route, :verb, :mime)
17
- include Memoizable
18
-
19
- def self.parse_cli(argv)
20
- opts = Slop.parse(argv) do |o|
21
- o.string '--openapi-file', 'Path to OpenAPI file', required: true
22
- o.string '--route', 'API route', required: true
23
- o.string '--verb', 'HTTP verb', required: true
24
- o.string '--mime', 'Mime type', default: 'application/json'
25
- end
26
-
27
- openapi_file = Pathname.new(opts[:openapi_file])
28
- raise ArgumentError, "OpenAPI file #{openapi_file} does not exist" unless openapi_file.exist?
29
-
30
- route = opts[:route]
31
- verb = opts[:verb]
32
- mime = opts[:mime]
33
-
34
- new(openapi_file: openapi_file, route: route, verb: verb, mime: mime)
35
- end
36
-
37
- def validate!
38
- paths = doc.fetch('paths')
39
- unless paths.key?(route)
40
-
41
- raise <<~ERR
42
- Invalid route!
43
-
44
- Given: #{route}
45
-
46
- Valid routes:
47
-
48
- #{paths.keys.sort.join("\n")}
49
- ERR
50
-
51
- end
52
-
53
- path_def = paths.fetch(route)
54
-
55
- return if path_def.key?(verb)
56
-
57
- raise <<~ERR
58
- Invalid verb!
59
-
60
- Given: #{verb}
61
-
62
- Valid verbs: #{path_def.keys.sort.join(', ')}
63
- ERR
64
- end
65
-
66
- def get?
67
- verb == 'get'
68
- end
69
-
70
- def no_request_body?
71
- !action.key?('requestBody')
72
- end
73
-
74
- def sample_response
75
- action.fetch('x-oaiMeta').fetch('response')
76
- end
77
-
78
- def request_body_summary
79
- fields = request_body.fetch('properties').keys
80
- required = request_body.fetch('required')
81
-
82
- { field: fields, required: required }
83
- end
84
-
85
- def request_body
86
- ref =
87
- if mime == 'application/json'
88
- deep_fetch(action, ['requestBody', 'content', 'application/json', 'schema', '$ref'])
89
- elsif mime == 'multipart/form-data'
90
- deep_fetch(action, ['requestBody', 'content', 'multipart/form-data', 'schema', '$ref'])
91
- else
92
- raise "Unknown mime type #{mime}"
93
- end
94
- get_ref(ref)
95
- end
96
- memoize :request_body
97
-
98
- def response_body
99
- response = action.fetch('responses').first.last
100
- ref = deep_fetch(response, %w[content application/json schema $ref])
101
- get_ref(ref)
102
- end
103
-
104
- def get_ref(ref)
105
- ref_path = ref.delete_prefix('#/').split('/')
106
- deep_fetch(doc, ref_path)
107
- end
108
-
109
- def action
110
- doc.fetch('paths').fetch(route).fetch(verb)
111
- end
112
-
113
- def doc
114
- @doc ||= YAML.load_file(openapi_file)
115
- end
116
-
117
- def deep_fetch(obj, path)
118
- path.reduce(obj) do |acc, key|
119
- acc.fetch(key) do
120
- raise "No key #{key} in #{acc.inspect}"
121
- end
122
- end
123
- end
124
- end
125
-
126
- codegen = Codegen.parse_cli(ARGV)
127
- codegen.validate!
128
-
129
- user_message_template = <<~MSG
130
- Please create an API call, a response wrapper, and a test for this request.
131
- OpenAPI context in JSON:
132
-
133
- ACTION: %<verb>s %<route>s
134
- REQUEST MIME TYPE: %<mime>s
135
-
136
- REQUEST SUMMARY: %<summary>s
137
-
138
- SAMPLE RESPONSE: %<response>s
139
- MSG
140
-
141
- assistant_response_json_example = <<~RUBY
142
- # api call
143
- class Completion < self
144
- def create(model:, **kwargs)
145
- Response::Completion.from_json(
146
- post('/v1/completions', model: model, **kwargs)
147
- )
148
- end
149
- end
150
-
151
- # wrapper
152
- class Completion < JSONPayload
153
- class Choice < JSONPayload
154
- field :text
155
- field :index
156
- field :logprobs
157
- field :finish_reason
158
- end
159
-
160
- class Usage < JSONPayload
161
- field :prompt_tokens
162
- field :completion_tokens
163
- field :total_tokens
164
- end
165
-
166
- field :id
167
- field :object
168
- field :created
169
- field :model
170
- field :choices, wrapper: Choice
171
- field :usage, wrapper: Usage
172
- end
173
-
174
- # test
175
- RSpec.describe OpenAI::API, '#completions' do
176
- include_context 'an API Resource'
177
-
178
- let(:resource) { api.completions }
179
-
180
- let(:response_body) do
181
- {
182
- "id": 'cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7',
183
- "object": 'text_completion',
184
- "created": 1_589_478_378,
185
- "model": 'text-davinci-003',
186
- "choices": [
187
- {
188
- "text": "\n\nThis is indeed a test",
189
- "index": 0,
190
- "logprobs": nil,
191
- "finish_reason": 'length'
192
- }
193
- ],
194
- "usage": {
195
- "prompt_tokens": 5,
196
- "completion_tokens": 7,
197
- "total_tokens": 12
198
- }
199
- }
200
- end
201
-
202
- it 'can create a completion' do
203
- completion = resource.create(model: 'text-davinci-002', prompt: 'Hello, world!')
204
-
205
- expect(http)
206
- .to have_received(:post)
207
- .with('https://api.openai.com/v1/completions', hash_including(:json))
208
-
209
- expect(completion.id).to eql('cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7')
210
- expect(completion.model).to eql('text-davinci-003')
211
- expect(completion.choices.first.text).to eql("\n\nThis is indeed a test")
212
- expect(completion.choices.first.index).to eql(0)
213
- expect(completion.choices.first.logprobs).to be_nil
214
- expect(completion.choices.first.finish_reason).to eql('length')
215
- expect(completion.usage.prompt_tokens).to eql(5)
216
- expect(completion.usage.completion_tokens).to eql(7)
217
- expect(completion.usage.total_tokens).to eql(12)
218
- end
219
- end
220
- RUBY
221
-
222
- assistant_response_form_example = <<~RUBY
223
- # api call
224
- class File < self
225
- def create(file:, purpose:)
226
- Response::File.from_json(
227
- post_form_multipart('/v1/files', file: form_file(file), purpose: purpose)
228
- )
229
- end
230
- end
231
-
232
- # wrapper
233
- class File < JSONPayload
234
- field :id
235
- field :object
236
- field :bytes
237
- field :created_at
238
- field :filename
239
- field :purpose
240
- optional_field :deleted?, path: :deleted
241
- end
242
-
243
- class FileList < JSONPayload
244
- field :data, wrapper: File
245
- field :object
246
- end
247
-
248
- # test
249
- RSpec.describe OpenAI::API, '#files' do
250
- include_context 'an API Resource'
251
-
252
- let(:resource) { api.files }
253
- let(:sample_file) { OpenAISpec::SPEC_ROOT.join('data/sample.jsonl') }
254
-
255
- context 'when creating a file' do
256
- let(:response_body) do
257
- {
258
- "id": 'file-XjGxS3KTG0uNmNOK362iJua3',
259
- "object": 'file',
260
- "bytes": 140,
261
- "created_at": 1_613_779_121,
262
- "filename": 'sample.jsonl',
263
- "purpose": 'fine-tune'
264
- }
265
- end
266
-
267
- it 'can create a file' do
268
- file = resource.create(
269
- file: sample_file,
270
- purpose: 'fine-tune'
271
- )
272
-
273
- expect(http)
274
- .to have_received(:post)
275
- .with(
276
- 'https://api.openai.com/v1/files',
277
- hash_including(
278
- form: hash_including(
279
- {
280
- file: instance_of(HTTP::FormData::File),
281
- purpose: 'fine-tune'
282
- }
283
- )
284
- )
285
- )
286
-
287
- expect(file.id).to eql('file-XjGxS3KTG0uNmNOK362iJua3')
288
- expect(file.object).to eql('file')
289
- expect(file.bytes).to eql(140)
290
- expect(file.created_at).to eql(1_613_779_121)
291
- expect(file.filename).to eql('sample.jsonl')
292
- expect(file.purpose).to eql('fine-tune')
293
- expect(file.deleted?).to be(nil)
294
- end
295
- end
296
- end
297
- RUBY
298
-
299
- create_completion_example =
300
- codegen.with(
301
- route: '/completions',
302
- verb: 'post',
303
- mime: 'application/json'
304
- )
305
-
306
- history_json_example = [
307
- {
308
- role: 'user',
309
- content: format(
310
- user_message_template,
311
- mime: create_completion_example.mime,
312
- verb: create_completion_example.verb,
313
- route: create_completion_example.route,
314
- summary: create_completion_example.request_body_summary,
315
- response: create_completion_example.sample_response
316
- )
317
- },
318
- {
319
- role: 'assistant',
320
- content: assistant_response_json_example
321
- }
322
- ]
323
-
324
- create_file_example =
325
- codegen.with(
326
- route: '/files',
327
- verb: 'post',
328
- mime: 'multipart/form-data'
329
- )
330
-
331
- history_form_example = [
332
- {
333
- role: 'user',
334
- content: format(
335
- user_message_template,
336
- mime: create_file_example.mime,
337
- verb: create_file_example.verb,
338
- route: create_file_example.route,
339
- summary: create_file_example.request_body_summary,
340
- response: create_file_example.sample_response
341
- )
342
- },
343
- {
344
- role: 'assistant',
345
- content: assistant_response_form_example
346
- }
347
- ]
348
-
349
- history = [
350
- *(codegen.mime == 'application/json' ? history_json_example : history_form_example),
351
- {
352
- role: 'user',
353
- content: format(
354
- user_message_template,
355
- mime: codegen.mime,
356
- verb: codegen.verb,
357
- route: codegen.route,
358
- summary: codegen.no_request_body? ? '(none)' : codegen.request_body_summary,
359
- response: codegen.sample_response
360
- )
361
- }
362
- ]
363
-
364
- cache_dir = Pathname.new(__dir__).parent.join('tmp/codegen')
365
- cache_dir.mkpath unless cache_dir.directory?
366
- cache_file = cache_dir.join("#{codegen.verb}_#{codegen.route.gsub('/', '_')}.txt")
367
-
368
- if cache_file.file?
369
- puts cache_file.read
370
- else
371
- completion = api.chat_completions.create(
372
- model: 'gpt-3.5-turbo',
373
- messages: history,
374
- max_tokens: 2000,
375
- temperature: 0
376
- )
377
- output = completion.choices[0].message.content
378
- cache_file.write(output)
379
- puts output
380
- end