omniai-openai 0.2.0 → 1.0.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Gemfile +0 -1
- data/README.md +16 -3
- data/lib/omniai/openai/chat.rb +16 -24
- data/lib/omniai/openai/client.rb +11 -3
- data/lib/omniai/openai/config.rb +2 -1
- data/lib/omniai/openai/version.rb +1 -1
- metadata +1 -2
- data/lib/omniai/openai/chat/request.rb +0 -46
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 17b76524bf985fda37fec8e7cea18aece3380a1831be7de6abe2652767aafc92
|
4
|
+
data.tar.gz: e32a791f07f2697f56f2dfa15f11e8b05abf4416610925880d12cc7b5d0dda90
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 9aa657f83ece505e056597a36c73a77f24abc45af4094c5e982b949bc9631ba6a401ff410220855c43073232578bda7fe37ed7003e006259caaf8d1333825dab
|
7
|
+
data.tar.gz: a96fe7cb53c737220b21c43013af12fab828476336827c446bfb12e64b02340ef55cac0a762facd0d5103db8afad078bf72b94fe760bbda2d36437eeee68468b
|
data/Gemfile
CHANGED
data/README.md
CHANGED
@@ -75,6 +75,8 @@ completion = client.chat.completion('How fast is a cheetah?', model: OmniAI::Ope
|
|
75
75
|
completion.choice.message.content # 'A cheetah can reach speeds over 100 km/h.'
|
76
76
|
```
|
77
77
|
|
78
|
+
[OpenAI API Reference `model`](https://platform.openai.com/docs/api-reference/chat/create#chat-create-model)
|
79
|
+
|
78
80
|
#### Temperature
|
79
81
|
|
80
82
|
`temperature` takes an optional float between `0.0` and `2.0` (defaults is `0.7`):
|
@@ -84,6 +86,8 @@ completion = client.chat.completion('Pick a number between 1 and 5', temperature
|
|
84
86
|
completion.choice.message.content # '3'
|
85
87
|
```
|
86
88
|
|
89
|
+
[OpenAI API Reference `temperature`](https://platform.openai.com/docs/api-reference/chat/create#chat-create-temperature)
|
90
|
+
|
87
91
|
#### Stream
|
88
92
|
|
89
93
|
`stream` takes an optional a proc to stream responses in real-time chunks instead of waiting for a complete response:
|
@@ -95,11 +99,20 @@ end
|
|
95
99
|
client.chat.completion('Be poetic.', stream:)
|
96
100
|
```
|
97
101
|
|
102
|
+
[OpenAI API Reference `stream`](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stream)
|
103
|
+
|
98
104
|
#### Format
|
99
105
|
|
100
|
-
`format` takes an optional symbol (
|
106
|
+
`format` takes an optional symbol (`:json`) and that setes the `response_format` to `json_object`:
|
101
107
|
|
102
108
|
```ruby
|
103
|
-
completion = client.chat.completion(
|
104
|
-
|
109
|
+
completion = client.chat.completion([
|
110
|
+
{ role: OmniAI::Chat::Role::SYSTEM, content: OmniAI::Chat::JSON_PROMPT },
|
111
|
+
{ role: OmniAI::Chat::Role::USER, content: 'What is the name of the drummer for the Beatles?' }
|
112
|
+
], format: :json)
|
113
|
+
JSON.parse(completion.choice.message.content) # { "name": "Ringo" }
|
105
114
|
```
|
115
|
+
|
116
|
+
[OpenAI API Reference `response_format`](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stream)
|
117
|
+
|
118
|
+
> When using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.
|
data/lib/omniai/openai/chat.rb
CHANGED
@@ -3,15 +3,9 @@
|
|
3
3
|
module OmniAI
|
4
4
|
module OpenAI
|
5
5
|
# An OpenAI chat implementation.
|
6
|
-
#
|
7
|
-
# Usage:
|
8
|
-
#
|
9
|
-
# chat = OmniAI::OpenAI::Chat.new(client: client)
|
10
|
-
# chat.completion('Tell me a joke.')
|
11
|
-
# chat.completion(['Tell me a joke.'])
|
12
|
-
# chat.completion({ role: 'user', content: 'Tell me a joke.' })
|
13
|
-
# chat.completion([{ role: 'system', content: 'Tell me a joke.' }])
|
14
6
|
class Chat < OmniAI::Chat
|
7
|
+
JSON_RESPONSE_FORMAT = { type: 'json_object' }.freeze
|
8
|
+
|
15
9
|
module Model
|
16
10
|
GPT_4O = 'gpt-4o'
|
17
11
|
GPT_4 = 'gpt-4'
|
@@ -19,24 +13,22 @@ module OmniAI
|
|
19
13
|
GPT_3_5_TURBO = 'gpt-3.5-turbo'
|
20
14
|
end
|
21
15
|
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
16
|
+
protected
|
17
|
+
|
18
|
+
# @return [Hash]
|
19
|
+
def payload
|
20
|
+
OmniAI::OpenAI.config.chat_options.merge({
|
21
|
+
messages:,
|
22
|
+
model: @model,
|
23
|
+
stream: @stream.nil? ? nil : !@stream.nil?,
|
24
|
+
temperature: @temperature,
|
25
|
+
response_format: (JSON_RESPONSE_FORMAT if @format.eql?(:json)),
|
26
|
+
}).compact
|
26
27
|
end
|
27
28
|
|
28
|
-
# @
|
29
|
-
|
30
|
-
|
31
|
-
# @param model [String] optional
|
32
|
-
# @param format [Symbol] optional :text or :json
|
33
|
-
# @param temperature [Float, nil] optional
|
34
|
-
# @param stream [Proc, nil] optional
|
35
|
-
#
|
36
|
-
# @return [OmniAI::OpenAi::Chat::Response]
|
37
|
-
def completion(messages, model: Model::GPT_4O, temperature: nil, format: nil, stream: nil)
|
38
|
-
request = Request.new(client: @client, messages:, model:, temperature:, format:, stream:)
|
39
|
-
request.process!
|
29
|
+
# @return [String]
|
30
|
+
def path
|
31
|
+
"/#{OmniAI::OpenAI::Client::VERSION}/chat/completions"
|
40
32
|
end
|
41
33
|
end
|
42
34
|
end
|
data/lib/omniai/openai/client.rb
CHANGED
@@ -52,9 +52,17 @@ module OmniAI
|
|
52
52
|
end
|
53
53
|
end
|
54
54
|
|
55
|
-
# @
|
56
|
-
|
57
|
-
|
55
|
+
# @raise [OmniAI::Error]
|
56
|
+
#
|
57
|
+
# @param messages [String, Array, Hash]
|
58
|
+
# @param model [String] optional
|
59
|
+
# @param format [Symbol] optional :text or :json
|
60
|
+
# @param temperature [Float, nil] optional
|
61
|
+
# @param stream [Proc, nil] optional
|
62
|
+
#
|
63
|
+
# @return [OmniAI::Chat::Completion]
|
64
|
+
def chat(messages, model: Chat::Model::GPT_4O, temperature: nil, format: nil, stream: nil)
|
65
|
+
Chat.process!(messages, model:, temperature:, format:, stream:, client: self)
|
58
66
|
end
|
59
67
|
end
|
60
68
|
end
|
data/lib/omniai/openai/config.rb
CHANGED
@@ -4,7 +4,7 @@ module OmniAI
|
|
4
4
|
module OpenAI
|
5
5
|
# Configuration for managing the OpenAI `api_key` / `organization` / `project` / `logger`.
|
6
6
|
class Config < OmniAI::Config
|
7
|
-
attr_accessor :organization, :project
|
7
|
+
attr_accessor :organization, :project, :chat_options
|
8
8
|
|
9
9
|
def initialize
|
10
10
|
super
|
@@ -12,6 +12,7 @@ module OmniAI
|
|
12
12
|
@organization = ENV.fetch('OPENAI_ORGANIZATION', nil)
|
13
13
|
@project = ENV.fetch('OPENAI_PROJECT', nil)
|
14
14
|
@host = ENV.fetch('OPENAI_HOST', 'https://api.openai.com')
|
15
|
+
@chat_options = {}
|
15
16
|
end
|
16
17
|
end
|
17
18
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: omniai-openai
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 1.0.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Kevin Sylvestre
|
@@ -63,7 +63,6 @@ files:
|
|
63
63
|
- README.md
|
64
64
|
- lib/omniai/openai.rb
|
65
65
|
- lib/omniai/openai/chat.rb
|
66
|
-
- lib/omniai/openai/chat/request.rb
|
67
66
|
- lib/omniai/openai/client.rb
|
68
67
|
- lib/omniai/openai/config.rb
|
69
68
|
- lib/omniai/openai/version.rb
|
@@ -1,46 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module OmniAI
|
4
|
-
module OpenAI
|
5
|
-
class Chat
|
6
|
-
# An implementation of OmniAI::Chat::Request for OpenAI.
|
7
|
-
class Request < OmniAI::Chat::Request
|
8
|
-
protected
|
9
|
-
|
10
|
-
# @return [Hash]
|
11
|
-
def payload
|
12
|
-
{ messages: }.tap do |payload|
|
13
|
-
payload[:model] = @model
|
14
|
-
payload[:stream] = !@stream.nil? unless @stream.nil?
|
15
|
-
payload[:temperature] = @temperature if @temperature
|
16
|
-
payload[:response_format] = { type: 'json_object' } if @format.eql?(:json)
|
17
|
-
end
|
18
|
-
end
|
19
|
-
|
20
|
-
# @return [Array<Hash>]
|
21
|
-
def messages
|
22
|
-
arrayify(@messages).map do |content|
|
23
|
-
case content
|
24
|
-
when String then { role: OmniAI::OpenAI::Chat::Role::USER, content: }
|
25
|
-
when Hash then content
|
26
|
-
else raise Error, "Unsupported content=#{content.inspect}"
|
27
|
-
end
|
28
|
-
end
|
29
|
-
end
|
30
|
-
|
31
|
-
# @return [String]
|
32
|
-
def path
|
33
|
-
"/#{OmniAI::OpenAI::Client::VERSION}/chat/completions"
|
34
|
-
end
|
35
|
-
|
36
|
-
private
|
37
|
-
|
38
|
-
# @param value [Object, Array<Object>]
|
39
|
-
# @return [Array<Object>]
|
40
|
-
def arrayify(value)
|
41
|
-
value.is_a?(Array) ? value : [value]
|
42
|
-
end
|
43
|
-
end
|
44
|
-
end
|
45
|
-
end
|
46
|
-
end
|