roseflow-openai 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.rspec +3 -0
- data/.standard.yml +3 -0
- data/CHANGELOG.md +5 -0
- data/CODE_OF_CONDUCT.md +7 -0
- data/Gemfile +10 -0
- data/LICENSE.txt +21 -0
- data/README.md +47 -0
- data/Rakefile +10 -0
- data/config/openai.yml +14 -0
- data/lib/roseflow/openai/client.rb +185 -0
- data/lib/roseflow/openai/config.rb +38 -0
- data/lib/roseflow/openai/embedding.rb +18 -0
- data/lib/roseflow/openai/model.rb +152 -0
- data/lib/roseflow/openai/model_repository.rb +45 -0
- data/lib/roseflow/openai/provider.rb +110 -0
- data/lib/roseflow/openai/response.rb +181 -0
- data/lib/roseflow/openai/structs.rb +21 -0
- data/lib/roseflow/openai/version.rb +18 -0
- data/lib/roseflow/openai.rb +9 -0
- data/roseflow-openai.gemspec +45 -0
- data/sig/roseflow/openai.rbs +6 -0
- metadata +207 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 022b3ba6d3486eb3a7ed6abce48815272b2e4d5a52502dc814ebcc26c8c44712
|
4
|
+
data.tar.gz: eac633fb4aab864322916a3c5aa4e71cf15fe937d4c34aec84cbf8b444b44778
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 4fb415158051794661293c7ec227c9384ac8edd3882e209a70cb559f95710d99248f01bba3c91633d6398594132cb1fcb9c5780477074a1aec1f86a95ba43555
|
7
|
+
data.tar.gz: c8d660546cbc37cb46e577f32e51ddfc71c5454f0dfc62fa8fbbbdfa90ba3ae0d40018fd71de856755dab4ed4350ddb30592d82185e0f29d6710b2105441f29d
|
data/.rspec
ADDED
data/.standard.yml
ADDED
data/CHANGELOG.md
ADDED
data/CODE_OF_CONDUCT.md
ADDED
data/Gemfile
ADDED
data/LICENSE.txt
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
The MIT License (MIT)
|
2
|
+
|
3
|
+
Copyright (c) 2023 Lauri Jutila
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in
|
13
|
+
all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
+
THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,47 @@
|
|
1
|
+
# Roseflow OpenAI integration
|
2
|
+
|
3
|
+
This gem adds OpenAI support and integration for Roseflow, a framework for interacting with AI in Ruby.
|
4
|
+
|
5
|
+
## Prerequisites
|
6
|
+
|
7
|
+
To use this gem effectively, you need the [core Roseflow gem](https://github.com/roseflow-ai/roseflow).
|
8
|
+
|
9
|
+
## Installation
|
10
|
+
|
11
|
+
Install the gem and add to the application's Gemfile by executing:
|
12
|
+
|
13
|
+
$ bundle add roseflow-openai
|
14
|
+
|
15
|
+
If bundler is not being used to manage dependencies, install the gem by executing:
|
16
|
+
|
17
|
+
$ gem install roseflow-openai
|
18
|
+
|
19
|
+
## Usage
|
20
|
+
|
21
|
+
See full documentation how to configure and use Roseflow with OpenAI at [docs.roseflow.ai](https://docs.roseflow.ai/openai).
|
22
|
+
|
23
|
+
## Contributing
|
24
|
+
|
25
|
+
Bug reports and pull requests are welcome on GitHub at https://github.com/roseflow-ai/roseflow-openai.
|
26
|
+
|
27
|
+
## Community
|
28
|
+
|
29
|
+
### Discord
|
30
|
+
|
31
|
+
Join us in our [Discord](https://discord.gg/roseflow).
|
32
|
+
|
33
|
+
### Twitter
|
34
|
+
|
35
|
+
Connect with the core team on Twitter.
|
36
|
+
|
37
|
+
<a href="https://twitter.com/ljuti" target="_blank">
|
38
|
+
<img alt="Twitter Follow" src="https://img.shields.io/twitter/follow/ljuti?logo=twitter&style=social">
|
39
|
+
</a>
|
40
|
+
|
41
|
+
## License
|
42
|
+
|
43
|
+
The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
|
44
|
+
|
45
|
+
## Code of Conduct
|
46
|
+
|
47
|
+
Everyone interacting in the Roseflow OpenAI project's codebases, issue trackers, chat rooms and mailing lists is expected to follow the [code of conduct](https://github.com/roseflow-ai/roseflow-openai/blob/main/CODE_OF_CONDUCT.md).
|
data/Rakefile
ADDED
data/config/openai.yml
ADDED
@@ -0,0 +1,14 @@
|
|
1
|
+
default: &default
|
2
|
+
organization_id: <YOUR OPENAI ORGANIZATION ID>
|
3
|
+
|
4
|
+
development:
|
5
|
+
<<: *default
|
6
|
+
api_key: <YOUR OPENAI API KEY FOR DEVELOPMENT>
|
7
|
+
|
8
|
+
test:
|
9
|
+
<<: *default
|
10
|
+
api_key: <YOUR OPENAI API KEY FOR TEST>
|
11
|
+
|
12
|
+
production:
|
13
|
+
<<: *default
|
14
|
+
api_key: <YOUR OPENAI API KEY FOR PRODUCTION>
|
@@ -0,0 +1,185 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "faraday"
|
4
|
+
require "faraday/retry"
|
5
|
+
require "roseflow/openai/config"
|
6
|
+
require "roseflow/openai/model"
|
7
|
+
require "roseflow/openai/response"
|
8
|
+
|
9
|
+
FARADAY_RETRY_OPTIONS = {
|
10
|
+
max: 3,
|
11
|
+
interval: 0.05,
|
12
|
+
interval_randomness: 0.5,
|
13
|
+
backoff_factor: 2,
|
14
|
+
}
|
15
|
+
|
16
|
+
module Roseflow
|
17
|
+
module OpenAI
|
18
|
+
class Client
|
19
|
+
def initialize(config = Config.new, provider = nil)
|
20
|
+
@config = config
|
21
|
+
@provider = provider
|
22
|
+
end
|
23
|
+
|
24
|
+
# Returns the available models from the API.
|
25
|
+
#
|
26
|
+
# @return [Array<OpenAI::Model>] the available models
|
27
|
+
def models
|
28
|
+
response = connection.get("/v1/models")
|
29
|
+
body = JSON.parse(response.body)
|
30
|
+
body.fetch("data", []).map do |model|
|
31
|
+
OpenAI::Model.new(model, self)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
# Creates a chat completion.
|
36
|
+
#
|
37
|
+
# @param model [Roseflow::OpenAI::Model] the model to use
|
38
|
+
# @param messages [Array<String>] the messages to use
|
39
|
+
# @param options [Hash] the options to use
|
40
|
+
# @return [OpenAI::TextApiResponse] the API response object
|
41
|
+
def create_chat_completion(model:, messages:, **options)
|
42
|
+
response = connection.post("/v1/chat/completions") do |request|
|
43
|
+
request.body = options.merge({
|
44
|
+
model: model.name,
|
45
|
+
messages: messages
|
46
|
+
})
|
47
|
+
end
|
48
|
+
ChatResponse.new(response)
|
49
|
+
end
|
50
|
+
|
51
|
+
# Creates a chat completion and streams the response.
|
52
|
+
#
|
53
|
+
# @param model [Roseflow::OpenAI::Model] the model to use
|
54
|
+
# @param messages [Array<String>] the messages to use
|
55
|
+
# @param options [Hash] the options to use
|
56
|
+
# @yield [String] the streamed API response
|
57
|
+
# @return [Array<String>] the streamed API response if no block is given
|
58
|
+
def streaming_chat_completion(model:, messages:, **options, &block)
|
59
|
+
streamed = []
|
60
|
+
connection.post("/v1/chat/completions") do |request|
|
61
|
+
request.body = options.merge({
|
62
|
+
model: model.name,
|
63
|
+
messages: messages,
|
64
|
+
stream: true
|
65
|
+
})
|
66
|
+
request.options.on_data = Proc.new do |chunk|
|
67
|
+
yield streaming_chunk(chunk) if block_given?
|
68
|
+
streamed << chunk unless block_given?
|
69
|
+
end
|
70
|
+
end
|
71
|
+
streamed unless block_given?
|
72
|
+
end
|
73
|
+
|
74
|
+
# Creates a text completion for the provided prompt and parameters.
|
75
|
+
#
|
76
|
+
# @param model [Roseflow::OpenAI::Model] the model to use
|
77
|
+
# @param prompt [String] the prompt to use
|
78
|
+
# @param options [Hash] the options to use
|
79
|
+
# @return [OpenAI::TextApiResponse] the API response object
|
80
|
+
def create_completion(model:, prompt:, **options)
|
81
|
+
response = connection.post("/v1/completions") do |request|
|
82
|
+
request.body = options.merge({
|
83
|
+
model: model.name,
|
84
|
+
prompt: prompt
|
85
|
+
})
|
86
|
+
end
|
87
|
+
CompletionResponse.new(response)
|
88
|
+
end
|
89
|
+
|
90
|
+
# Creates a text completion for the provided prompt and parameters and streams the response.
|
91
|
+
#
|
92
|
+
# @param model [Roseflow::OpenAI::Model] the model to use
|
93
|
+
# @param prompt [String] the prompt to use
|
94
|
+
# @param options [Hash] the options to use
|
95
|
+
# @yield [String] the streamed API response
|
96
|
+
# @return [Array<String>] the streamed API response if no block is given
|
97
|
+
def streaming_completion(model:, prompt:, **options, &block)
|
98
|
+
streamed = []
|
99
|
+
connection.post("/v1/completions") do |request|
|
100
|
+
request.body = options.merge({
|
101
|
+
model: model.name,
|
102
|
+
prompt: prompt,
|
103
|
+
stream: true
|
104
|
+
})
|
105
|
+
request.options.on_data = Proc.new do |chunk|
|
106
|
+
yield streaming_chunk(chunk) if block_given?
|
107
|
+
streamed << chunk unless block_given?
|
108
|
+
end
|
109
|
+
end
|
110
|
+
streamed unless block_given?
|
111
|
+
end
|
112
|
+
|
113
|
+
# Given a prompt and an instruction, the model will return an edited version of the prompt.
|
114
|
+
#
|
115
|
+
# @param model [String] the model to use
|
116
|
+
# @param instruction [String] the instruction to use
|
117
|
+
# @param options [Hash] the options to use
|
118
|
+
# @return [OpenAI::TextApiResponse] the API response object
|
119
|
+
def create_edit(model:, instruction:, **options)
|
120
|
+
response = connection.post("/v1/edits") do |request|
|
121
|
+
request.body = options.merge({
|
122
|
+
model: model.name,
|
123
|
+
instruction: instruction
|
124
|
+
})
|
125
|
+
end
|
126
|
+
EditResponse.new(response)
|
127
|
+
end
|
128
|
+
|
129
|
+
def create_image(prompt:, **options)
|
130
|
+
ImageApiResponse.new(
|
131
|
+
connection.post("/v1/images/generations") do |request|
|
132
|
+
request.body = options.merge(prompt: prompt)
|
133
|
+
end
|
134
|
+
)
|
135
|
+
end
|
136
|
+
|
137
|
+
# Creates an embedding vector representing the input text.
|
138
|
+
#
|
139
|
+
# @param model [Roseflow::OpenAI::Model] the model to use
|
140
|
+
# @param input [String] the input text to use
|
141
|
+
# @return [OpenAI::EmbeddingApiResponse] the API response object
|
142
|
+
def create_embedding(model:, input:)
|
143
|
+
EmbeddingApiResponse.new(
|
144
|
+
connection.post("/v1/embeddings") do |request|
|
145
|
+
request.body = {
|
146
|
+
model: model.name,
|
147
|
+
input: input
|
148
|
+
}
|
149
|
+
end
|
150
|
+
)
|
151
|
+
end
|
152
|
+
|
153
|
+
private
|
154
|
+
|
155
|
+
attr_reader :config, :provider
|
156
|
+
|
157
|
+
# The connection object used to make requests to the API.
|
158
|
+
def connection
|
159
|
+
@connection ||= Faraday.new(
|
160
|
+
url: Config::OPENAI_API_URL,
|
161
|
+
headers: {
|
162
|
+
# "Content-Type" => "application/json",
|
163
|
+
"OpenAI-Organization" => config.organization_id
|
164
|
+
}
|
165
|
+
) do |faraday|
|
166
|
+
faraday.request :authorization, "Bearer", -> { config.api_key }
|
167
|
+
faraday.request :json
|
168
|
+
faraday.request :retry, FARADAY_RETRY_OPTIONS
|
169
|
+
faraday.adapter Faraday.default_adapter
|
170
|
+
end
|
171
|
+
end
|
172
|
+
|
173
|
+
# Parses streaming chunks from the API response.
|
174
|
+
#
|
175
|
+
# @param chunk [String] the chunk to parse
|
176
|
+
# @return [String] the parsed chunk
|
177
|
+
def streaming_chunk(chunk)
|
178
|
+
return chunk unless chunk.match(/{.*}/)
|
179
|
+
chunk.scan(/{.*}/).map do |json|
|
180
|
+
JSON.parse(json).dig("choices", 0, "delta", "content")
|
181
|
+
end.join("")
|
182
|
+
end
|
183
|
+
end # Client
|
184
|
+
end # OpenAI
|
185
|
+
end # Roseflow
|
@@ -0,0 +1,38 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "anyway_config"
|
4
|
+
|
5
|
+
module Roseflow
|
6
|
+
module OpenAI
|
7
|
+
# Configuration class for the OpenAI provider.
|
8
|
+
class Config < Anyway::Config
|
9
|
+
config_name :openai
|
10
|
+
|
11
|
+
attr_config :api_key, :organization_id
|
12
|
+
|
13
|
+
required :api_key
|
14
|
+
required :organization_id
|
15
|
+
|
16
|
+
OPENAI_API_URL = "https://api.openai.com"
|
17
|
+
CHAT_MODELS = %w(gpt-4 gpt-4-0314 gpt-4-32k gpt-4-32k-0314 gpt-3.5-turbo gpt-3.5-turbo-0301).freeze
|
18
|
+
COMPLETION_MODELS = %w(text-davinci-003 text-davinci-002 text-curie-001 text-babbage-001 text-ada-001 davinci curie babbage ada).freeze
|
19
|
+
EDIT_MODELS = %w(text-davinci-edit-001 code-davinci-edit-001).freeze
|
20
|
+
TRANSCRIPTION_MODELS = %w(whisper-1).freeze
|
21
|
+
TRANSLATION_MODELS = %w(whisper-1).freeze
|
22
|
+
FINE_TUNE_MODELS = %w(davinci curie babbage ada).freeze
|
23
|
+
EMBEDDING_MODELS = %w(text-embedding-ada-002 text-search-ada-doc-001).freeze
|
24
|
+
MODERATION_MODELS = %w(text-moderation-stable text-moderation-latest).freeze
|
25
|
+
MAX_TOKENS = {
|
26
|
+
"gpt-4": 8192,
|
27
|
+
"gpt-4-0314": 8192,
|
28
|
+
"gpt-4-32k": 32_768,
|
29
|
+
"gpt-4-32k-0314": 32_768,
|
30
|
+
"gpt-3.5-turbo": 4096,
|
31
|
+
"gpt-3.5-turbo-0301": 4096,
|
32
|
+
"text-davinci-003": 4097,
|
33
|
+
"text-davinci-002": 4097,
|
34
|
+
"code-davinci-002": 8001
|
35
|
+
}
|
36
|
+
end # Config
|
37
|
+
end # OpenAI
|
38
|
+
end # Roseflow
|
@@ -0,0 +1,18 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "roseflow/types"
|
4
|
+
require "roseflow/embeddings/embedding"
|
5
|
+
|
6
|
+
module Roseflow
|
7
|
+
module OpenAI
|
8
|
+
class Embedding < Dry::Struct
|
9
|
+
transform_keys(&:to_sym)
|
10
|
+
|
11
|
+
attribute :embedding, Types::Array.of(Types::Float)
|
12
|
+
|
13
|
+
def to_embedding
|
14
|
+
Roseflow::Embeddings::Embedding.new(vector: embedding, length: embedding.length)
|
15
|
+
end
|
16
|
+
end # Embedding
|
17
|
+
end # OpenAI
|
18
|
+
end # Roseflow
|
@@ -0,0 +1,152 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "dry-struct"
|
4
|
+
require "roseflow/tokenizer"
|
5
|
+
require "active_support/core_ext/module/delegation"
|
6
|
+
|
7
|
+
module Types
|
8
|
+
include Dry.Types()
|
9
|
+
end
|
10
|
+
|
11
|
+
module Roseflow
|
12
|
+
module OpenAI
|
13
|
+
class Model
|
14
|
+
attr_reader :name
|
15
|
+
|
16
|
+
# Initializes a new model instance.
|
17
|
+
#
|
18
|
+
# @param model [Hash] Model data from the API
|
19
|
+
# @param provider [Roseflow::OpenAI::Provider] Provider instance
|
20
|
+
def initialize(model, provider)
|
21
|
+
@model_ = model
|
22
|
+
@provider_ = provider
|
23
|
+
assign_attributes
|
24
|
+
end
|
25
|
+
|
26
|
+
# Tokenizer instance for the model.
|
27
|
+
def tokenizer
|
28
|
+
@tokenizer_ ||= Tokenizer.new(model: name)
|
29
|
+
end
|
30
|
+
|
31
|
+
# Handles the model call.
|
32
|
+
# FIXME: Operations should be rewritten to match the client API.
|
33
|
+
#
|
34
|
+
# @param operation [Symbol] Operation to perform
|
35
|
+
# @param input [String] Input to use
|
36
|
+
def call(operation, input, **options)
|
37
|
+
token_count = tokenizer.count_tokens(transform_chat_messages(input))
|
38
|
+
if token_count < max_tokens
|
39
|
+
case operation
|
40
|
+
when :chat
|
41
|
+
@provider_.create_chat_completion(model: name, messages: transform_chat_messages(input), **options)
|
42
|
+
when :completion
|
43
|
+
@provider_.create_completion(input)
|
44
|
+
when :image
|
45
|
+
@provider_.create_image_completion(input)
|
46
|
+
when :embed
|
47
|
+
@provider_.create_embedding(input)
|
48
|
+
else
|
49
|
+
raise ArgumentError, "Invalid operation: #{operation}"
|
50
|
+
end
|
51
|
+
else
|
52
|
+
raise TokenLimitExceededError, "Token limit for model #{name} exceeded: #{token_count} is more than #{max_tokens}"
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
# Indicates if the model is chattable.
|
57
|
+
def chattable?
|
58
|
+
OpenAI::Config::CHAT_MODELS.include?(name)
|
59
|
+
end
|
60
|
+
|
61
|
+
# Indicates if the model can do completions.
|
62
|
+
def completionable?
|
63
|
+
OpenAI::Config::COMPLETION_MODELS.include?(name)
|
64
|
+
end
|
65
|
+
|
66
|
+
# Indicates if the model can do image completions.
|
67
|
+
def imageable?
|
68
|
+
OpenAI::Config::IMAGE_MODELS.include?(name)
|
69
|
+
end
|
70
|
+
|
71
|
+
# Indicates if the model can do embeddings.
|
72
|
+
def embeddable?
|
73
|
+
OpenAI::Config::EMBEDDING_MODELS.include?(name)
|
74
|
+
end
|
75
|
+
|
76
|
+
# Indicates if the model is fine-tunable.
|
77
|
+
def finetuneable?
|
78
|
+
@permissions_.fetch("allow_fine_tuning")
|
79
|
+
end
|
80
|
+
|
81
|
+
# Indicates if the model has searchable indices.
|
82
|
+
def searchable_indices?
|
83
|
+
@permissions_.fetch("allow_search_indices")
|
84
|
+
end
|
85
|
+
|
86
|
+
# Indicates if the model can be sampled.
|
87
|
+
def sampleable?
|
88
|
+
@permissions_.fetch("allow_sampling")
|
89
|
+
end
|
90
|
+
|
91
|
+
def blocking?
|
92
|
+
@permissions_.fetch("is_blocking")
|
93
|
+
end
|
94
|
+
|
95
|
+
# Returns the maximum number of tokens for the model.
|
96
|
+
def max_tokens
|
97
|
+
OpenAI::Config::MAX_TOKENS.fetch(name, 2049)
|
98
|
+
end
|
99
|
+
|
100
|
+
private
|
101
|
+
|
102
|
+
def assign_attributes
|
103
|
+
@name = @model_.fetch("id")
|
104
|
+
@created_at = Time.at(@model_.fetch("created"))
|
105
|
+
@permissions_ = @model_.fetch("permission").first
|
106
|
+
end
|
107
|
+
|
108
|
+
def transform_chat_messages(input)
|
109
|
+
input.map(&:to_h)
|
110
|
+
end
|
111
|
+
end # Model
|
112
|
+
|
113
|
+
# Represents a model permission.
|
114
|
+
class ModelPermission < Dry::Struct
|
115
|
+
transform_keys(&:to_sym)
|
116
|
+
|
117
|
+
attribute :id, Types::String
|
118
|
+
attribute :object, Types::String
|
119
|
+
attribute :created, Types::Integer
|
120
|
+
attribute :allow_create_engine, Types::Bool
|
121
|
+
attribute :allow_sampling, Types::Bool
|
122
|
+
attribute :allow_logprobs, Types::Bool
|
123
|
+
attribute :allow_search_indices, Types::Bool
|
124
|
+
attribute :allow_view, Types::Bool
|
125
|
+
attribute :allow_fine_tuning, Types::Bool
|
126
|
+
attribute :organization, Types::String
|
127
|
+
attribute :is_blocking, Types::Bool
|
128
|
+
|
129
|
+
alias_method :finetuneable?, :allow_fine_tuning
|
130
|
+
alias_method :is_blocking?, :is_blocking
|
131
|
+
end # ModelPermission
|
132
|
+
|
133
|
+
# Represents a model configuration.
|
134
|
+
class ModelConfiguration < Dry::Struct
|
135
|
+
transform_keys(&:to_sym)
|
136
|
+
|
137
|
+
attribute :id, Types::String
|
138
|
+
attribute :created, Types::Integer
|
139
|
+
attribute :permission, Types::Array.of(ModelPermission)
|
140
|
+
attribute :root, Types::String
|
141
|
+
attribute :parent, Types::String | Types::Nil
|
142
|
+
|
143
|
+
alias_method :name, :id
|
144
|
+
|
145
|
+
def permissions
|
146
|
+
permission.first
|
147
|
+
end
|
148
|
+
|
149
|
+
delegate :finetuneable?, :is_blocking?, to: :permissions
|
150
|
+
end # ModelConfiguration
|
151
|
+
end # OpenAI
|
152
|
+
end # Roseflow
|
@@ -0,0 +1,45 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "active_support/core_ext/module/delegation"
|
4
|
+
|
5
|
+
module Roseflow
|
6
|
+
module OpenAI
|
7
|
+
class ModelRepository
|
8
|
+
attr_reader :models
|
9
|
+
|
10
|
+
delegate :each, :all, to: :models
|
11
|
+
|
12
|
+
def initialize(provider)
|
13
|
+
@provider = provider
|
14
|
+
@models = provider.client.models
|
15
|
+
end
|
16
|
+
|
17
|
+
# Finds a model by name.
|
18
|
+
#
|
19
|
+
# @param name [String] Name of the model
|
20
|
+
def find(name)
|
21
|
+
@models.select{ |model| model.name == name }.first
|
22
|
+
end
|
23
|
+
|
24
|
+
# Returns all models that are chattable.
|
25
|
+
def chattable
|
26
|
+
@models.select(&:chattable?)
|
27
|
+
end
|
28
|
+
|
29
|
+
# Returns all models that are completionable.
|
30
|
+
def completionable
|
31
|
+
@models.select(&:completionable?)
|
32
|
+
end
|
33
|
+
|
34
|
+
# Returns all models that are support edits.
|
35
|
+
def editable
|
36
|
+
@models.select(&:editable?)
|
37
|
+
end
|
38
|
+
|
39
|
+
# Returns all models that are support embeddings.
|
40
|
+
def embeddable
|
41
|
+
@models.select(&:embeddable?)
|
42
|
+
end
|
43
|
+
end # ModelRepository
|
44
|
+
end # OpenAI
|
45
|
+
end # Roseflow
|
@@ -0,0 +1,110 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "roseflow/openai/client"
|
4
|
+
require "roseflow/openai/model_repository"
|
5
|
+
|
6
|
+
module Roseflow
|
7
|
+
module OpenAI
|
8
|
+
class Provider
|
9
|
+
def initialize(config = Roseflow::OpenAI::Config.new)
|
10
|
+
@config = config
|
11
|
+
end
|
12
|
+
|
13
|
+
# Returns the client for the provider
|
14
|
+
def client
|
15
|
+
@client ||= Client.new(config, self)
|
16
|
+
end
|
17
|
+
|
18
|
+
# Returns the model repository for the provider
|
19
|
+
def models
|
20
|
+
@models ||= ModelRepository.new(self)
|
21
|
+
end
|
22
|
+
|
23
|
+
# Chat with a model
|
24
|
+
#
|
25
|
+
# @param model [Roseflow::OpenAI::Model] The model object to use
|
26
|
+
# @param messages [Array<String>] The messages to send to the model
|
27
|
+
# @param options [Hash] Additional options to pass to the API
|
28
|
+
# @option options [Integer] :max_tokens The maximum number of tokens to generate in the completion.
|
29
|
+
# @option options [Float] :temperature Sampling temperature to use, between 0 and 2
|
30
|
+
# @option options [Float] :top_p The cumulative probability of tokens to use.
|
31
|
+
# @option options [Integer] :n The number of completions to generate.
|
32
|
+
# @option options [Integer] :logprobs Include the log probabilities on the logprobs most likely tokens.
|
33
|
+
# @option options [Boolean] :echo Whether to echo the question as part of the completion.
|
34
|
+
# @option options [String | Array] :stop Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
|
35
|
+
# @option options [Float] :presence_penalty Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
|
36
|
+
# @option options [Float] :frequency_penalty Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
|
37
|
+
# @option options [Integer] :best_of Generates `best_of` completions server-side and returns the "best" (the one with the lowest log probability per token)
|
38
|
+
# @option options [Integer] :streaming Whether to stream back partial progress
|
39
|
+
# @option options [String] :user A unique identifier representing your end-user
|
40
|
+
# @return [Roseflow::OpenAI::ChatResponse] The response object from the API.
|
41
|
+
def chat(model:, messages:, **options)
|
42
|
+
streaming = options.fetch(:streaming, false)
|
43
|
+
|
44
|
+
if streaming
|
45
|
+
client.streaming_chat_completion(model: model, messages: messages.map(&:to_h), **options)
|
46
|
+
else
|
47
|
+
client.create_chat_completion(model: model, messages: messages.map(&:to_h), **options)
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
# Create a completion.
|
52
|
+
#
|
53
|
+
# @param model [Roseflow::OpenAI::Model] The model object to use
|
54
|
+
# @param prompt [String] The prompt to use for completion
|
55
|
+
# @param options [Hash] Additional options to pass to the API
|
56
|
+
# @option options [Integer] :max_tokens The maximum number of tokens to generate in the completion.
|
57
|
+
# @option options [Float] :temperature Sampling temperature to use, between 0 and 2
|
58
|
+
# @option options [Float] :top_p The cumulative probability of tokens to use.
|
59
|
+
# @option options [Integer] :n The number of completions to generate.
|
60
|
+
# @option options [Integer] :logprobs Include the log probabilities on the logprobs most likely tokens.
|
61
|
+
# @option options [Boolean] :echo Whether to echo the question as part of the completion.
|
62
|
+
# @option options [String | Array] :stop Up to 4 sequences where the API will stop generating further tokens.
|
63
|
+
# @option options [Float] :presence_penalty Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
|
64
|
+
# @option options [Float] :frequency_penalty Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
|
65
|
+
# @option options [Integer] :best_of Generates `best_of` completions server-side and returns the "best" (the one with the lowest log probability per token)
|
66
|
+
# @option options [Integer] :streaming Whether to stream back partial progress
|
67
|
+
# @option options [String] :user A unique identifier representing your end-user
|
68
|
+
# @return [Roseflow::OpenAI::CompletionResponse] The response object from the API.
|
69
|
+
def completion(model:, prompt:, **options)
|
70
|
+
streaming = options.fetch(:streaming, false)
|
71
|
+
|
72
|
+
if streaming
|
73
|
+
client.streaming_completion(model: model, prompt: prompt, **options)
|
74
|
+
else
|
75
|
+
client.create_completion(model: model, prompt: prompt, **options)
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
# Creates a new edit for the provided input, instruction, and parameters.
|
80
|
+
#
|
81
|
+
# @param model [Roseflow::OpenAI::Model] The model object to use
|
82
|
+
# @param instruction [String] The instruction to use for editing
|
83
|
+
# @param options [Hash] Additional options to pass to the API
|
84
|
+
# @option options [String] :input The input text to use as a starting point for the edit.
|
85
|
+
# @option options [Integer] :n The number of edits to generate.
|
86
|
+
# @option options [Float] :temperature Sampling temperature to use, between 0 and 2
|
87
|
+
# @option options [Float] :top_p The cumulative probability of tokens to use.
|
88
|
+
# @return [Roseflow::OpenAI::EditResponse] The response object from the API.
|
89
|
+
def edit(model:, instruction:, **options)
|
90
|
+
client.create_edit(model: model, instruction: instruction, **options)
|
91
|
+
end
|
92
|
+
|
93
|
+
# Creates an embedding vector representing the input text.
|
94
|
+
#
|
95
|
+
# @param model [Roseflow::OpenAI::Model] The model object to use
|
96
|
+
# @param input [String] The input text to use for embedding
|
97
|
+
# @param options [Hash] Additional options to pass to the API
|
98
|
+
# @option options [String] :user A unique identifier representing your end-user
|
99
|
+
def embedding(model:, input:, **options)
|
100
|
+
client.create_embedding(model: model, input: input, **options).embedding.to_embedding
|
101
|
+
end
|
102
|
+
|
103
|
+
def image(prompt:, **options)
|
104
|
+
client.create_image(prompt: prompt, **options)
|
105
|
+
end
|
106
|
+
|
107
|
+
attr_reader :config
|
108
|
+
end # Provider
|
109
|
+
end # OpenAI
|
110
|
+
end # Roseflow
|
@@ -0,0 +1,181 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "dry-struct"
|
4
|
+
require "roseflow/types"
|
5
|
+
require "roseflow/openai/embedding"
|
6
|
+
|
7
|
+
module Types
|
8
|
+
include Dry.Types()
|
9
|
+
Number = Types::Integer | Types::Float
|
10
|
+
end
|
11
|
+
|
12
|
+
module Roseflow
|
13
|
+
module OpenAI
|
14
|
+
FailedToCreateEmbeddingError = Class.new(StandardError)
|
15
|
+
|
16
|
+
class ApiResponse
|
17
|
+
def initialize(response)
|
18
|
+
@response = response
|
19
|
+
end
|
20
|
+
|
21
|
+
def success?
|
22
|
+
@response.success?
|
23
|
+
end
|
24
|
+
|
25
|
+
def status
|
26
|
+
@response.status
|
27
|
+
end
|
28
|
+
|
29
|
+
def body
|
30
|
+
raise NotImplementedError, "Subclasses must implement this method."
|
31
|
+
end
|
32
|
+
end # ApiResponse
|
33
|
+
|
34
|
+
class TextApiResponse < ApiResponse
|
35
|
+
def body
|
36
|
+
@body ||= ApiResponseBody.new(JSON.parse(@response.body))
|
37
|
+
end
|
38
|
+
|
39
|
+
def choices
|
40
|
+
body.choices.map { |choice| Choice.new(choice) }
|
41
|
+
end
|
42
|
+
end # TextApiResponse
|
43
|
+
|
44
|
+
class ChatResponse < TextApiResponse
|
45
|
+
def response
|
46
|
+
choices.first
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
class CompletionResponse < TextApiResponse
|
51
|
+
def response
|
52
|
+
choices.first
|
53
|
+
end
|
54
|
+
|
55
|
+
def responses
|
56
|
+
choices
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
class EditResponse < TextApiResponse
|
61
|
+
def response
|
62
|
+
choices.first
|
63
|
+
end
|
64
|
+
|
65
|
+
def responses
|
66
|
+
choices
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
class ImageApiResponse < ApiResponse
|
71
|
+
def body
|
72
|
+
@body ||= ImageApiResponseBody.new(JSON.parse(@response.body))
|
73
|
+
end
|
74
|
+
|
75
|
+
def images
|
76
|
+
body.data.map { |image| Image.new(image) }
|
77
|
+
end
|
78
|
+
end # ImageApiResponse
|
79
|
+
|
80
|
+
class EmbeddingApiResponse < ApiResponse
|
81
|
+
def body
|
82
|
+
@body ||= begin
|
83
|
+
case @response.status
|
84
|
+
when 200
|
85
|
+
EmbeddingApiResponseBody.new(JSON.parse(@response.body))
|
86
|
+
else
|
87
|
+
EmbeddingApiResponseErrorBody.new(JSON.parse(@response.body))
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
92
|
+
def embedding
|
93
|
+
case @response.status
|
94
|
+
when 200
|
95
|
+
body.data.map { |embedding| Embedding.new(embedding) }.first
|
96
|
+
else
|
97
|
+
raise FailedToCreateEmbeddingError, body.error.message
|
98
|
+
end
|
99
|
+
end
|
100
|
+
end # EmbeddingApiResponse
|
101
|
+
|
102
|
+
class Image < Dry::Struct
|
103
|
+
transform_keys(&:to_sym)
|
104
|
+
|
105
|
+
attribute :url, Types::String
|
106
|
+
end # Image
|
107
|
+
|
108
|
+
class Choice < Dry::Struct
|
109
|
+
transform_keys(&:to_sym)
|
110
|
+
|
111
|
+
attribute? :text, Types::String
|
112
|
+
attribute? :message do
|
113
|
+
attribute :role, Types::String
|
114
|
+
attribute :content, Types::String
|
115
|
+
end
|
116
|
+
|
117
|
+
attribute? :finish_reason, Types::String
|
118
|
+
attribute :index, Types::Integer
|
119
|
+
|
120
|
+
def to_s
|
121
|
+
return message.content if message
|
122
|
+
return text if text
|
123
|
+
end
|
124
|
+
end # Choice
|
125
|
+
|
126
|
+
class ApiUsage < Dry::Struct
|
127
|
+
transform_keys(&:to_sym)
|
128
|
+
|
129
|
+
attribute :prompt_tokens, Types::Integer
|
130
|
+
attribute? :completion_tokens, Types::Integer
|
131
|
+
attribute :total_tokens, Types::Integer
|
132
|
+
end # ApiUsage
|
133
|
+
|
134
|
+
class ImageApiResponseBody < Dry::Struct
|
135
|
+
transform_keys(&:to_sym)
|
136
|
+
|
137
|
+
attribute :created, Types::Integer
|
138
|
+
attribute :data, Types::Array(Types::Hash)
|
139
|
+
end # ImageApiResponseBody
|
140
|
+
|
141
|
+
class OpenAIEmbedding < Dry::Struct
|
142
|
+
transform_keys(&:to_sym)
|
143
|
+
|
144
|
+
attribute :object, Types::String.default("embedding")
|
145
|
+
attribute :embedding, Types::Array(Types::Number)
|
146
|
+
attribute :index, Types::Integer
|
147
|
+
end # OpenAIEmbedding
|
148
|
+
|
149
|
+
class EmbeddingApiResponseBody < Dry::Struct
|
150
|
+
transform_keys(&:to_sym)
|
151
|
+
|
152
|
+
attribute :object, Types::String
|
153
|
+
attribute :data, Types::Array(OpenAIEmbedding)
|
154
|
+
attribute :model, Types::String
|
155
|
+
attribute :usage, ApiUsage
|
156
|
+
end # EmbeddingApiResponseBody
|
157
|
+
|
158
|
+
class EmbeddingApiResponseErrorBody < Dry::Struct
|
159
|
+
transform_keys(&:to_sym)
|
160
|
+
|
161
|
+
attribute :error do
|
162
|
+
attribute :message, Types::String
|
163
|
+
end
|
164
|
+
end # EmbeddingApiResponseErrorBody
|
165
|
+
|
166
|
+
class ApiResponseBody < Dry::Struct
|
167
|
+
transform_keys(&:to_sym)
|
168
|
+
|
169
|
+
attribute? :id, Types::String
|
170
|
+
attribute :object, Types::String
|
171
|
+
attribute :created, Types::Integer
|
172
|
+
attribute? :model, Types::String
|
173
|
+
attribute :usage, ApiUsage
|
174
|
+
attribute :choices, Types::Array
|
175
|
+
|
176
|
+
def success?
|
177
|
+
true
|
178
|
+
end
|
179
|
+
end # ApiResponseBody
|
180
|
+
end # OpenAI
|
181
|
+
end # Roseflow
|
@@ -0,0 +1,21 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "dry-struct"
|
4
|
+
|
5
|
+
module Roseflow
|
6
|
+
module OpenAI
|
7
|
+
# A model instruction struct. Used to pass instructions to the model.
|
8
|
+
# @param instruction [String] The instruction that tells the model how to edit the prompt.
|
9
|
+
# @param input [String] The input text to use as a starting point for the edit.
|
10
|
+
# @param n [Integer] Number of results to be returned by the model.
|
11
|
+
# @param temperature [Float] Sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
|
12
|
+
# @param top_p [Float] An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
|
13
|
+
class EditModelInstruction < Dry::Struct
|
14
|
+
attribute :instruction, Types::String
|
15
|
+
attribute :input, Types::String.default("")
|
16
|
+
attribute :n, Types::Integer.default(1)
|
17
|
+
attribute :temperature, Types::Float.default(1)
|
18
|
+
attribute :top_p, Types::Float.default(1)
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
@@ -0,0 +1,18 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Roseflow
|
4
|
+
module OpenAI
|
5
|
+
def self.gem_version
|
6
|
+
Gem::Version.new VERSION::STRING
|
7
|
+
end
|
8
|
+
|
9
|
+
module VERSION
|
10
|
+
MAJOR = 0
|
11
|
+
MINOR = 1
|
12
|
+
PATCH = 0
|
13
|
+
PRE = nil
|
14
|
+
|
15
|
+
STRING = [MAJOR, MINOR, PATCH, PRE].compact.join(".")
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
@@ -0,0 +1,45 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "lib/roseflow/openai/version"
|
4
|
+
|
5
|
+
Gem::Specification.new do |spec|
|
6
|
+
spec.name = "roseflow-openai"
|
7
|
+
spec.version = Roseflow::OpenAI.gem_version
|
8
|
+
spec.authors = ["Lauri Jutila"]
|
9
|
+
spec.email = ["git@laurijutila.com"]
|
10
|
+
|
11
|
+
spec.summary = "Roseflow meets OpenAI"
|
12
|
+
spec.description = "OpenAI integration and models for Roseflow."
|
13
|
+
spec.homepage = "https://github.com/roseflow-ai/roseflow-openai"
|
14
|
+
spec.license = "MIT"
|
15
|
+
spec.required_ruby_version = ">= 3.2.0"
|
16
|
+
|
17
|
+
spec.metadata["homepage_uri"] = spec.homepage
|
18
|
+
spec.metadata["source_code_uri"] = "https://github.com/roseflow-ai/roseflow-openai"
|
19
|
+
spec.metadata["changelog_uri"] = "https://github.com/roseflow-ai/roseflow-openai/blob/master/CHANGELOG.md"
|
20
|
+
|
21
|
+
# Specify which files should be added to the gem when it is released.
|
22
|
+
# The `git ls-files -z` loads the files in the RubyGem that have been added into git.
|
23
|
+
spec.files = Dir.chdir(__dir__) do
|
24
|
+
`git ls-files -z`.split("\x0").reject do |f|
|
25
|
+
(File.expand_path(f) == __FILE__) || f.start_with?(*%w[bin/ test/ spec/ features/ .git .circleci appveyor])
|
26
|
+
end
|
27
|
+
end
|
28
|
+
spec.bindir = "exe"
|
29
|
+
spec.executables = spec.files.grep(%r{\Aexe/}) { |f| File.basename(f) }
|
30
|
+
spec.require_paths = ["lib"]
|
31
|
+
|
32
|
+
spec.add_dependency "activesupport"
|
33
|
+
spec.add_dependency "anyway_config", "~> 2.0"
|
34
|
+
spec.add_dependency "dry-struct"
|
35
|
+
spec.add_dependency "faraday"
|
36
|
+
spec.add_dependency "faraday-retry"
|
37
|
+
|
38
|
+
spec.add_development_dependency "awesome_print"
|
39
|
+
spec.add_development_dependency "pry"
|
40
|
+
spec.add_development_dependency "roseflow"
|
41
|
+
spec.add_development_dependency "webmock"
|
42
|
+
spec.add_development_dependency "vcr"
|
43
|
+
# For more information and examples about making a new gem, check out our
|
44
|
+
# guide at: https://bundler.io/guides/creating_gem.html
|
45
|
+
end
|
metadata
ADDED
@@ -0,0 +1,207 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: roseflow-openai
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.1.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Lauri Jutila
|
8
|
+
autorequire:
|
9
|
+
bindir: exe
|
10
|
+
cert_chain: []
|
11
|
+
date: 2023-05-10 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: activesupport
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - ">="
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '0'
|
20
|
+
type: :runtime
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - ">="
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '0'
|
27
|
+
- !ruby/object:Gem::Dependency
|
28
|
+
name: anyway_config
|
29
|
+
requirement: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - "~>"
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: '2.0'
|
34
|
+
type: :runtime
|
35
|
+
prerelease: false
|
36
|
+
version_requirements: !ruby/object:Gem::Requirement
|
37
|
+
requirements:
|
38
|
+
- - "~>"
|
39
|
+
- !ruby/object:Gem::Version
|
40
|
+
version: '2.0'
|
41
|
+
- !ruby/object:Gem::Dependency
|
42
|
+
name: dry-struct
|
43
|
+
requirement: !ruby/object:Gem::Requirement
|
44
|
+
requirements:
|
45
|
+
- - ">="
|
46
|
+
- !ruby/object:Gem::Version
|
47
|
+
version: '0'
|
48
|
+
type: :runtime
|
49
|
+
prerelease: false
|
50
|
+
version_requirements: !ruby/object:Gem::Requirement
|
51
|
+
requirements:
|
52
|
+
- - ">="
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: '0'
|
55
|
+
- !ruby/object:Gem::Dependency
|
56
|
+
name: faraday
|
57
|
+
requirement: !ruby/object:Gem::Requirement
|
58
|
+
requirements:
|
59
|
+
- - ">="
|
60
|
+
- !ruby/object:Gem::Version
|
61
|
+
version: '0'
|
62
|
+
type: :runtime
|
63
|
+
prerelease: false
|
64
|
+
version_requirements: !ruby/object:Gem::Requirement
|
65
|
+
requirements:
|
66
|
+
- - ">="
|
67
|
+
- !ruby/object:Gem::Version
|
68
|
+
version: '0'
|
69
|
+
- !ruby/object:Gem::Dependency
|
70
|
+
name: faraday-retry
|
71
|
+
requirement: !ruby/object:Gem::Requirement
|
72
|
+
requirements:
|
73
|
+
- - ">="
|
74
|
+
- !ruby/object:Gem::Version
|
75
|
+
version: '0'
|
76
|
+
type: :runtime
|
77
|
+
prerelease: false
|
78
|
+
version_requirements: !ruby/object:Gem::Requirement
|
79
|
+
requirements:
|
80
|
+
- - ">="
|
81
|
+
- !ruby/object:Gem::Version
|
82
|
+
version: '0'
|
83
|
+
- !ruby/object:Gem::Dependency
|
84
|
+
name: awesome_print
|
85
|
+
requirement: !ruby/object:Gem::Requirement
|
86
|
+
requirements:
|
87
|
+
- - ">="
|
88
|
+
- !ruby/object:Gem::Version
|
89
|
+
version: '0'
|
90
|
+
type: :development
|
91
|
+
prerelease: false
|
92
|
+
version_requirements: !ruby/object:Gem::Requirement
|
93
|
+
requirements:
|
94
|
+
- - ">="
|
95
|
+
- !ruby/object:Gem::Version
|
96
|
+
version: '0'
|
97
|
+
- !ruby/object:Gem::Dependency
|
98
|
+
name: pry
|
99
|
+
requirement: !ruby/object:Gem::Requirement
|
100
|
+
requirements:
|
101
|
+
- - ">="
|
102
|
+
- !ruby/object:Gem::Version
|
103
|
+
version: '0'
|
104
|
+
type: :development
|
105
|
+
prerelease: false
|
106
|
+
version_requirements: !ruby/object:Gem::Requirement
|
107
|
+
requirements:
|
108
|
+
- - ">="
|
109
|
+
- !ruby/object:Gem::Version
|
110
|
+
version: '0'
|
111
|
+
- !ruby/object:Gem::Dependency
|
112
|
+
name: roseflow
|
113
|
+
requirement: !ruby/object:Gem::Requirement
|
114
|
+
requirements:
|
115
|
+
- - ">="
|
116
|
+
- !ruby/object:Gem::Version
|
117
|
+
version: '0'
|
118
|
+
type: :development
|
119
|
+
prerelease: false
|
120
|
+
version_requirements: !ruby/object:Gem::Requirement
|
121
|
+
requirements:
|
122
|
+
- - ">="
|
123
|
+
- !ruby/object:Gem::Version
|
124
|
+
version: '0'
|
125
|
+
- !ruby/object:Gem::Dependency
|
126
|
+
name: webmock
|
127
|
+
requirement: !ruby/object:Gem::Requirement
|
128
|
+
requirements:
|
129
|
+
- - ">="
|
130
|
+
- !ruby/object:Gem::Version
|
131
|
+
version: '0'
|
132
|
+
type: :development
|
133
|
+
prerelease: false
|
134
|
+
version_requirements: !ruby/object:Gem::Requirement
|
135
|
+
requirements:
|
136
|
+
- - ">="
|
137
|
+
- !ruby/object:Gem::Version
|
138
|
+
version: '0'
|
139
|
+
- !ruby/object:Gem::Dependency
|
140
|
+
name: vcr
|
141
|
+
requirement: !ruby/object:Gem::Requirement
|
142
|
+
requirements:
|
143
|
+
- - ">="
|
144
|
+
- !ruby/object:Gem::Version
|
145
|
+
version: '0'
|
146
|
+
type: :development
|
147
|
+
prerelease: false
|
148
|
+
version_requirements: !ruby/object:Gem::Requirement
|
149
|
+
requirements:
|
150
|
+
- - ">="
|
151
|
+
- !ruby/object:Gem::Version
|
152
|
+
version: '0'
|
153
|
+
description: OpenAI integration and models for Roseflow.
|
154
|
+
email:
|
155
|
+
- git@laurijutila.com
|
156
|
+
executables: []
|
157
|
+
extensions: []
|
158
|
+
extra_rdoc_files: []
|
159
|
+
files:
|
160
|
+
- ".rspec"
|
161
|
+
- ".standard.yml"
|
162
|
+
- CHANGELOG.md
|
163
|
+
- CODE_OF_CONDUCT.md
|
164
|
+
- Gemfile
|
165
|
+
- LICENSE.txt
|
166
|
+
- README.md
|
167
|
+
- Rakefile
|
168
|
+
- config/openai.yml
|
169
|
+
- lib/roseflow/openai.rb
|
170
|
+
- lib/roseflow/openai/client.rb
|
171
|
+
- lib/roseflow/openai/config.rb
|
172
|
+
- lib/roseflow/openai/embedding.rb
|
173
|
+
- lib/roseflow/openai/model.rb
|
174
|
+
- lib/roseflow/openai/model_repository.rb
|
175
|
+
- lib/roseflow/openai/provider.rb
|
176
|
+
- lib/roseflow/openai/response.rb
|
177
|
+
- lib/roseflow/openai/structs.rb
|
178
|
+
- lib/roseflow/openai/version.rb
|
179
|
+
- roseflow-openai.gemspec
|
180
|
+
- sig/roseflow/openai.rbs
|
181
|
+
homepage: https://github.com/roseflow-ai/roseflow-openai
|
182
|
+
licenses:
|
183
|
+
- MIT
|
184
|
+
metadata:
|
185
|
+
homepage_uri: https://github.com/roseflow-ai/roseflow-openai
|
186
|
+
source_code_uri: https://github.com/roseflow-ai/roseflow-openai
|
187
|
+
changelog_uri: https://github.com/roseflow-ai/roseflow-openai/blob/master/CHANGELOG.md
|
188
|
+
post_install_message:
|
189
|
+
rdoc_options: []
|
190
|
+
require_paths:
|
191
|
+
- lib
|
192
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
193
|
+
requirements:
|
194
|
+
- - ">="
|
195
|
+
- !ruby/object:Gem::Version
|
196
|
+
version: 3.2.0
|
197
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
198
|
+
requirements:
|
199
|
+
- - ">="
|
200
|
+
- !ruby/object:Gem::Version
|
201
|
+
version: '0'
|
202
|
+
requirements: []
|
203
|
+
rubygems_version: 3.4.1
|
204
|
+
signing_key:
|
205
|
+
specification_version: 4
|
206
|
+
summary: Roseflow meets OpenAI
|
207
|
+
test_files: []
|