omniai-openai 0.2.0 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 046a2d4be43b4a9537a18b90fc69a9a7bfb191d76471d5a9374cc731c5e8b8a3
4
- data.tar.gz: e1f2914a05ae5a527c09126d015df285e6efaa86d37fca2ac16ec694799f186f
3
+ metadata.gz: 667d8318e96d3e6611a3ca20ced5dcefefc0896a4c84c48427ce441756cb9e60
4
+ data.tar.gz: 13161c5726cc33fcc7934a4164f751244b3c577fb52a1cdf30301dc10f733c84
5
5
  SHA512:
6
- metadata.gz: cace4852fc0a5ce7b59cb0a74728324396384a3c7793611bc48c61081bb910ec2cfc3dd09e0b3d4bb14fe16767913663cc2621b964ac0e87f93dcb1fab10873c
7
- data.tar.gz: 16fbad4c8b63260e069f07314811ad8cbbbb4bdbbcd2b412b63902372abf7271d9ccc87bbdc06cf529530f633408d80ab3d9f05d0032d1fc7a85d82ecd7f8d2c
6
+ metadata.gz: d44485bdcebe5ee67a846c4517ea62fdb4fd98dfbab464f64fd10316561a396369ce5579367c8f073d79edfee0a790baa46c2fc31dfd2a4522221f0d16679a23
7
+ data.tar.gz: 2f1054f58e35f37012bc6b95f7605b0eeee01010718c2446a14b623b37cde0028d08ed56330d10bf1145fe5dfa69c3c138f54f5ec7d2bf65e55eb4727a21c664
data/Gemfile CHANGED
@@ -11,5 +11,4 @@ gem 'rspec_junit_formatter'
11
11
  gem 'rubocop'
12
12
  gem 'rubocop-rake'
13
13
  gem 'rubocop-rspec'
14
- gem 'vcr'
15
14
  gem 'webmock'
data/README.md CHANGED
@@ -49,7 +49,7 @@ completion.choice.message.content # 'Why did the chicken cross the road? To get
49
49
 
50
50
  ```ruby
51
51
  completion = client.chat.completion({
52
- role: OmniAI::OpenAI::Chat::Role::USER,
52
+ role: OmniAI::Chat::Role::USER,
53
53
  content: 'Is it wise to jump off a bridge?'
54
54
  })
55
55
  completion.choice.message.content # 'No.'
@@ -58,7 +58,7 @@ completion.choice.message.content # 'No.'
58
58
  ```ruby
59
59
  completion = client.chat.completion([
60
60
  {
61
- role: OmniAI::OpenAI::Chat::Role::SYSTEM,
61
+ role: OmniAI::Chat::Role::SYSTEM,
62
62
  content: 'You are a helpful assistant.'
63
63
  },
64
64
  'What is the capital of Canada?',
@@ -75,6 +75,8 @@ completion = client.chat.completion('How fast is a cheetah?', model: OmniAI::Ope
75
75
  completion.choice.message.content # 'A cheetah can reach speeds over 100 km/h.'
76
76
  ```
77
77
 
78
+ [OpenAI API Reference `model`](https://platform.openai.com/docs/api-reference/chat/create#chat-create-model)
79
+
78
80
  #### Temperature
79
81
 
80
82
  `temperature` takes an optional float between `0.0` and `2.0` (defaults is `0.7`):
@@ -84,6 +86,8 @@ completion = client.chat.completion('Pick a number between 1 and 5', temperature
84
86
  completion.choice.message.content # '3'
85
87
  ```
86
88
 
89
+ [OpenAI API Reference `temperature`](https://platform.openai.com/docs/api-reference/chat/create#chat-create-temperature)
90
+
87
91
  #### Stream
88
92
 
89
93
  `stream` takes an optional a proc to stream responses in real-time chunks instead of waiting for a complete response:
@@ -95,11 +99,20 @@ end
95
99
  client.chat.completion('Be poetic.', stream:)
96
100
  ```
97
101
 
102
+ [OpenAI API Reference `stream`](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stream)
103
+
98
104
  #### Format
99
105
 
100
- `format` takes an optional symbol (i.e. `:json`) and switches the client to parsing message content as `json`:
106
+ `format` takes an optional symbol (`:json`) and that setes the `response_format` to `json_object`:
101
107
 
102
108
  ```ruby
103
- completion = client.chat.completion('Please provide a color name / hex / hsl as JSON.', format: :json)
104
- completion.choice.message.content # { "name": "Black", "hex": "#000", "hsl": { "h": 0, "s": 0, "l": 0 } }
109
+ completion = client.chat.completion([
110
+ { role: OmniAI::Chat::Role::SYSTEM, content: OmniAI::Chat::JSON_PROMPT },
111
+ { role: OmniAI::Chat::Role::USER, content: 'What is the name of the drummer for the Beatles?' }
112
+ ], format: :json)
113
+ JSON.parse(completion.choice.message.content) # { "name": "Ringo" }
105
114
  ```
115
+
116
+ [OpenAI API Reference `response_format`](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stream)
117
+
118
+ > When using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.
@@ -3,15 +3,9 @@
3
3
  module OmniAI
4
4
  module OpenAI
5
5
  # An OpenAI chat implementation.
6
- #
7
- # Usage:
8
- #
9
- # chat = OmniAI::OpenAI::Chat.new(client: client)
10
- # chat.completion('Tell me a joke.')
11
- # chat.completion(['Tell me a joke.'])
12
- # chat.completion({ role: 'user', content: 'Tell me a joke.' })
13
- # chat.completion([{ role: 'system', content: 'Tell me a joke.' }])
14
6
  class Chat < OmniAI::Chat
7
+ JSON_RESPONSE_FORMAT = { type: 'json_object' }.freeze
8
+
15
9
  module Model
16
10
  GPT_4O = 'gpt-4o'
17
11
  GPT_4 = 'gpt-4'
@@ -19,24 +13,22 @@ module OmniAI
19
13
  GPT_3_5_TURBO = 'gpt-3.5-turbo'
20
14
  end
21
15
 
22
- module Role
23
- ASSISTANT = 'assistant'
24
- USER = 'user'
25
- SYSTEM = 'system'
16
+ protected
17
+
18
+ # @return [Hash]
19
+ def payload
20
+ OmniAI::OpenAI.config.chat_options.merge({
21
+ messages:,
22
+ model: @model,
23
+ stream: @stream.nil? ? nil : !@stream.nil?,
24
+ temperature: @temperature,
25
+ response_format: (JSON_RESPONSE_FORMAT if @format.eql?(:json)),
26
+ }).compact
26
27
  end
27
28
 
28
- # @raise [OmniAI::Error]
29
- #
30
- # @param prompt [String]
31
- # @param model [String] optional
32
- # @param format [Symbol] optional :text or :json
33
- # @param temperature [Float, nil] optional
34
- # @param stream [Proc, nil] optional
35
- #
36
- # @return [OmniAI::OpenAi::Chat::Response]
37
- def completion(messages, model: Model::GPT_4O, temperature: nil, format: nil, stream: nil)
38
- request = Request.new(client: @client, messages:, model:, temperature:, format:, stream:)
39
- request.process!
29
+ # @return [String]
30
+ def path
31
+ "/#{OmniAI::OpenAI::Client::VERSION}/chat/completions"
40
32
  end
41
33
  end
42
34
  end
@@ -52,9 +52,17 @@ module OmniAI
52
52
  end
53
53
  end
54
54
 
55
- # @return [OmniAI::OpenAI::Chat]
56
- def chat
57
- Chat.new(client: self)
55
+ # @raise [OmniAI::Error]
56
+ #
57
+ # @param messages [String, Array, Hash]
58
+ # @param model [String] optional
59
+ # @param format [Symbol] optional :text or :json
60
+ # @param temperature [Float, nil] optional
61
+ # @param stream [Proc, nil] optional
62
+ #
63
+ # @return [OmniAI::Chat::Completion]
64
+ def chat(messages, model: Chat::Model::GPT_4O, temperature: nil, format: nil, stream: nil)
65
+ Chat.process!(messages, model:, temperature:, format:, stream:, client: self)
58
66
  end
59
67
  end
60
68
  end
@@ -4,7 +4,7 @@ module OmniAI
4
4
  module OpenAI
5
5
  # Configuration for managing the OpenAI `api_key` / `organization` / `project` / `logger`.
6
6
  class Config < OmniAI::Config
7
- attr_accessor :organization, :project
7
+ attr_accessor :organization, :project, :chat_options
8
8
 
9
9
  def initialize
10
10
  super
@@ -12,6 +12,7 @@ module OmniAI
12
12
  @organization = ENV.fetch('OPENAI_ORGANIZATION', nil)
13
13
  @project = ENV.fetch('OPENAI_PROJECT', nil)
14
14
  @host = ENV.fetch('OPENAI_HOST', 'https://api.openai.com')
15
+ @chat_options = {}
15
16
  end
16
17
  end
17
18
  end
@@ -2,6 +2,6 @@
2
2
 
3
3
  module OmniAI
4
4
  module OpenAI
5
- VERSION = '0.2.0'
5
+ VERSION = '1.0.2'
6
6
  end
7
7
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: omniai-openai
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.0
4
+ version: 1.0.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Kevin Sylvestre
8
- autorequire:
8
+ autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-06-14 00:00:00.000000000 Z
11
+ date: 2024-06-15 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: event_stream_parser
@@ -63,7 +63,6 @@ files:
63
63
  - README.md
64
64
  - lib/omniai/openai.rb
65
65
  - lib/omniai/openai/chat.rb
66
- - lib/omniai/openai/chat/request.rb
67
66
  - lib/omniai/openai/client.rb
68
67
  - lib/omniai/openai/config.rb
69
68
  - lib/omniai/openai/version.rb
@@ -73,7 +72,7 @@ metadata:
73
72
  homepage_uri: https://github.com/ksylvest/omniai-openai
74
73
  changelog_uri: https://github.com/ksylvest/omniai-openai/releases
75
74
  rubygems_mfa_required: 'true'
76
- post_install_message:
75
+ post_install_message:
77
76
  rdoc_options: []
78
77
  require_paths:
79
78
  - lib
@@ -88,8 +87,8 @@ required_rubygems_version: !ruby/object:Gem::Requirement
88
87
  - !ruby/object:Gem::Version
89
88
  version: '0'
90
89
  requirements: []
91
- rubygems_version: 3.5.11
92
- signing_key:
90
+ rubygems_version: 3.5.3
91
+ signing_key:
93
92
  specification_version: 4
94
93
  summary: A generalized framework for interacting with OpenAI
95
94
  test_files: []
@@ -1,46 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module OmniAI
4
- module OpenAI
5
- class Chat
6
- # An implementation of OmniAI::Chat::Request for OpenAI.
7
- class Request < OmniAI::Chat::Request
8
- protected
9
-
10
- # @return [Hash]
11
- def payload
12
- { messages: }.tap do |payload|
13
- payload[:model] = @model
14
- payload[:stream] = !@stream.nil? unless @stream.nil?
15
- payload[:temperature] = @temperature if @temperature
16
- payload[:response_format] = { type: 'json_object' } if @format.eql?(:json)
17
- end
18
- end
19
-
20
- # @return [Array<Hash>]
21
- def messages
22
- arrayify(@messages).map do |content|
23
- case content
24
- when String then { role: OmniAI::OpenAI::Chat::Role::USER, content: }
25
- when Hash then content
26
- else raise Error, "Unsupported content=#{content.inspect}"
27
- end
28
- end
29
- end
30
-
31
- # @return [String]
32
- def path
33
- "/#{OmniAI::OpenAI::Client::VERSION}/chat/completions"
34
- end
35
-
36
- private
37
-
38
- # @param value [Object, Array<Object>]
39
- # @return [Array<Object>]
40
- def arrayify(value)
41
- value.is_a?(Array) ? value : [value]
42
- end
43
- end
44
- end
45
- end
46
- end