raix-openai-eight 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/lib/raix/mcp.rb ADDED
@@ -0,0 +1,255 @@
1
+ # Simple integration layer that lets Raix classes declare an MCP server
2
+ # with a single DSL call:
3
+ #
4
+ # mcp "https://my-server.example.com/sse"
5
+ #
6
+ # The concern fetches the remote server's tool list (via JSON‑RPC 2.0
7
+ # `tools/list`) and exposes each remote tool as if it were an inline
8
+ # `function` declared with Raix::FunctionDispatch. When the tool is
9
+ # invoked by the model, the generated instance method forwards the
10
+ # request to the remote server using `tools/call`, captures the result,
11
+ # and appends the appropriate messages to the transcript so that the
12
+ # conversation history stays consistent.
13
+
14
+ require "active_support/concern"
15
+ require "active_support/inflector"
16
+ require "securerandom"
17
+ require "uri"
18
+
19
+ require_relative "../mcp/sse_client"
20
+ require_relative "../mcp/stdio_client"
21
+
22
+ module Raix
23
+ # Model Context Protocol integration for Raix
24
+ #
25
+ # Allows declaring MCP servers with a simple DSL that automatically:
26
+ # - Queries tools from the remote server
27
+ # - Exposes each tool as a function callable by LLMs
28
+ # - Handles transcript recording and response processing
29
+ module MCP
30
+ extend ActiveSupport::Concern
31
+
32
+ # Error raised when there's a protocol-level error in MCP communication
33
+ class ProtocolError < StandardError; end
34
+
35
+ JSONRPC_VERSION = "2.0".freeze
36
+
37
+ class_methods do
38
+ # Declare an MCP server by URL, using the SSE transport.
39
+ #
40
+ # sse_mcp "https://server.example.com/sse",
41
+ # headers: { "Authorization" => "Bearer <token>" },
42
+ # only: [:get_issue]
43
+ #
44
+ def sse_mcp(url, headers: {}, only: nil, except: nil)
45
+ mcp(only:, except:, client: MCP::SseClient.new(url, headers:))
46
+ end
47
+
48
+ # Declare an MCP server by command line arguments, and environment variables ,
49
+ # using the stdio transport.
50
+ #
51
+ # stdio_mcp "docker", "run", "-i", "--rm",
52
+ # "-e", "GITHUB_PERSONAL_ACCESS_TOKEN",
53
+ # "ghcr.io/github/github-mcp-server",
54
+ # env: { GITHUB_PERSONAL_ACCESS_TOKEN: "${input:github_token}" },
55
+ # only: [:github_search]
56
+ #
57
+ def stdio_mcp(*args, env: {}, only: nil, except: nil)
58
+ mcp(only:, except:, client: MCP::StdioClient.new(*args, env))
59
+ end
60
+
61
+ # Declare an MCP server, using the given client.
62
+ #
63
+ # mcp client: MCP::SseClient.new("https://server.example.com/sse")
64
+ #
65
+ # This will automatically:
66
+ # • query `tools/list` on the server
67
+ # • register each remote tool with FunctionDispatch so that the
68
+ # OpenAI / OpenRouter request body includes its JSON‑Schema
69
+ # • define an instance method for each tool that forwards the
70
+ # call to the server and appends the proper messages to the
71
+ # transcript.
72
+ # NOTE TO SELF: NEVER MOCK SERVER RESPONSES! THIS MUST WORK WITH REAL SERVERS!
73
+ def mcp(client:, only: nil, except: nil)
74
+ @mcp_servers ||= {}
75
+
76
+ return if @mcp_servers.key?(client.unique_key) # avoid duplicate definitions
77
+
78
+ # Fetch tools
79
+ tools = client.tools
80
+
81
+ if tools.empty?
82
+ # puts "[MCP DEBUG] No tools found from MCP server at #{url}"
83
+ client.close
84
+ return nil
85
+ end
86
+
87
+ # Apply filters
88
+ filtered_tools = if only.present?
89
+ only_symbols = Array(only).map(&:to_sym)
90
+ tools.select { |tool| only_symbols.include?(tool.name.to_sym) }
91
+ elsif except.present?
92
+ except_symbols = Array(except).map(&:to_sym)
93
+ tools.reject { |tool| except_symbols.include?(tool.name.to_sym) }
94
+ else
95
+ tools
96
+ end
97
+
98
+ # Ensure FunctionDispatch is included in the class
99
+ include FunctionDispatch unless included_modules.include?(FunctionDispatch)
100
+ # puts "[MCP DEBUG] FunctionDispatch included in #{name}"
101
+
102
+ filtered_tools.each do |tool|
103
+ remote_name = tool.name
104
+ # TODO: Revisit later whether this much context is needed in the function name
105
+ local_name = :"#{remote_name}_#{client.unique_key}"
106
+
107
+ description = tool.description
108
+ input_schema = tool.input_schema || {}
109
+
110
+ # --- register with FunctionDispatch (adds to .functions)
111
+ function(local_name, description, **{}) # placeholder parameters replaced next
112
+ latest_definition = functions.last
113
+ latest_definition[:parameters] = input_schema.deep_symbolize_keys || {}
114
+
115
+ # Required by OpenAI
116
+ latest_definition[:parameters][:properties] ||= {}
117
+
118
+ # Store the schema for type coercion
119
+ tool_schemas = @tool_schemas ||= {}
120
+ tool_schemas[local_name] = input_schema
121
+
122
+ # --- define an instance method that proxies to the server
123
+ define_method(local_name) do |arguments, _cache|
124
+ arguments ||= {}
125
+
126
+ # Coerce argument types based on the input schema
127
+ stored_schema = self.class.instance_variable_get(:@tool_schemas)&.dig(local_name)
128
+ coerced_arguments = coerce_arguments(arguments, stored_schema)
129
+
130
+ content_text = client.call_tool(remote_name, **coerced_arguments)
131
+ call_id = SecureRandom.uuid
132
+
133
+ # Mirror FunctionDispatch transcript behaviour
134
+ transcript << [
135
+ {
136
+ role: "assistant",
137
+ content: nil,
138
+ tool_calls: [
139
+ {
140
+ id: call_id,
141
+ type: "function",
142
+ function: {
143
+ name: local_name.to_s,
144
+ arguments: arguments.to_json
145
+ }
146
+ }
147
+ ]
148
+ },
149
+ {
150
+ role: "tool",
151
+ tool_call_id: call_id,
152
+ name: local_name.to_s,
153
+ content: content_text
154
+ }
155
+ ]
156
+
157
+ # Return the content - ChatCompletion will automatically continue
158
+ # the conversation after tool execution
159
+ content_text
160
+ end
161
+ end
162
+
163
+ # Store the URL, tools, and client for future use
164
+ @mcp_servers[client.unique_key] = { tools: filtered_tools, client: }
165
+ end
166
+ end
167
+
168
+ private
169
+
170
+ # Coerce argument types based on the JSON schema
171
+ def coerce_arguments(arguments, schema)
172
+ return arguments unless schema.is_a?(Hash) && schema["properties"].is_a?(Hash)
173
+
174
+ coerced = {}
175
+ schema["properties"].each do |key, prop_schema|
176
+ value = if arguments.key?(key)
177
+ arguments[key]
178
+ elsif arguments.key?(key.to_sym)
179
+ arguments[key.to_sym]
180
+ end
181
+ next if value.nil?
182
+
183
+ coerced[key] = coerce_value(value, prop_schema)
184
+ end
185
+
186
+ # Include any additional arguments not in the schema
187
+ arguments.each do |key, value|
188
+ key_str = key.to_s
189
+ coerced[key_str] = value unless coerced.key?(key_str)
190
+ end
191
+
192
+ coerced.with_indifferent_access
193
+ end
194
+
195
+ # Coerce a single value based on its schema
196
+ def coerce_value(value, schema)
197
+ return value unless schema.is_a?(Hash)
198
+
199
+ case schema["type"]
200
+ when "number", "integer"
201
+ if value.is_a?(String) && value.match?(/\A-?\d+(\.\d+)?\z/)
202
+ schema["type"] == "integer" ? value.to_i : value.to_f
203
+ else
204
+ value
205
+ end
206
+ when "boolean"
207
+ case value
208
+ when "true", true then true
209
+ when "false", false then false
210
+ else value
211
+ end
212
+ when "array"
213
+ array_value = begin
214
+ value.is_a?(String) ? JSON.parse(value) : value
215
+ rescue JSON::ParserError
216
+ value
217
+ end
218
+
219
+ # If there's an items schema, coerce each element
220
+ if array_value.is_a?(Array) && schema["items"]
221
+ array_value.map { |item| coerce_value(item, schema["items"]) }
222
+ else
223
+ array_value
224
+ end
225
+ when "object"
226
+ object_value = begin
227
+ value.is_a?(String) ? JSON.parse(value) : value
228
+ rescue JSON::ParserError
229
+ value
230
+ end
231
+
232
+ # If there are properties defined, coerce them recursively
233
+ if object_value.is_a?(Hash) && schema["properties"]
234
+ coerced_object = {}
235
+ schema["properties"].each do |prop_key, prop_schema|
236
+ prop_value = object_value[prop_key] || object_value[prop_key.to_sym]
237
+ coerced_object[prop_key] = coerce_value(prop_value, prop_schema) unless prop_value.nil?
238
+ end
239
+
240
+ # Include any additional properties not in the schema
241
+ object_value.each do |obj_key, obj_value|
242
+ obj_key_str = obj_key.to_s
243
+ coerced_object[obj_key_str] = obj_value unless coerced_object.key?(obj_key_str)
244
+ end
245
+
246
+ coerced_object
247
+ else
248
+ object_value
249
+ end
250
+ else
251
+ value
252
+ end
253
+ end
254
+ end
255
+ end
@@ -0,0 +1,50 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "active_support/core_ext/module/delegation"
4
+
5
+ module Raix
6
+ module MessageAdapters
7
+ # Transforms messages into the format expected by the OpenAI API
8
+ class Base
9
+ attr_accessor :context
10
+
11
+ delegate :cache_at, :model, to: :context
12
+
13
+ def initialize(context)
14
+ @context = context
15
+ end
16
+
17
+ def transform(message)
18
+ return message if message[:role].present?
19
+
20
+ if message[:function].present?
21
+ { role: "assistant", name: message.dig(:function, :name), content: message.dig(:function, :arguments).to_json }
22
+ elsif message[:result].present?
23
+ { role: "function", name: message[:name], content: message[:result] }
24
+ else
25
+ content(message)
26
+ end
27
+ end
28
+
29
+ protected
30
+
31
+ def content(message)
32
+ case message
33
+ in { system: content }
34
+ { role: "system", content: }
35
+ in { user: content }
36
+ { role: "user", content: }
37
+ in { assistant: content }
38
+ { role: "assistant", content: }
39
+ else
40
+ raise ArgumentError, "Invalid message format: #{message.inspect}"
41
+ end.tap do |msg|
42
+ # convert to anthropic multipart format if model is claude-3 and cache_at is set
43
+ if model.to_s.include?("anthropic/claude-3") && cache_at && msg[:content].to_s.length > cache_at.to_i
44
+ msg[:content] = [{ type: "text", text: msg[:content], cache_control: { type: "ephemeral" } }]
45
+ end
46
+ end
47
+ end
48
+ end
49
+ end
50
+ end
@@ -0,0 +1,68 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Raix
4
+ # A module for handling yes/no questions using AI chat completion.
5
+ # When included in a class, it provides methods to define handlers for
6
+ # yes and no responses. All handlers are optional. Any response that
7
+ # does not begin with "yes, " or "no, " will be considered a maybe.
8
+ #
9
+ # @example
10
+ # class Question
11
+ # include Raix::Predicate
12
+ #
13
+ # yes? do |explanation|
14
+ # puts "Yes: #{explanation}"
15
+ # end
16
+ #
17
+ # no? do |explanation|
18
+ # puts "No: #{explanation}"
19
+ # end
20
+ #
21
+ # maybe? do |explanation|
22
+ # puts "Maybe: #{explanation}"
23
+ # end
24
+ # end
25
+ #
26
+ # question = Question.new
27
+ # question.ask("Is Ruby a programming language?")
28
+ module Predicate
29
+ extend ActiveSupport::Concern
30
+ include ChatCompletion
31
+
32
+ def ask(question, openai: false)
33
+ raise "Please define a yes and/or no block" if self.class.yes_block.nil? && self.class.no_block.nil?
34
+
35
+ transcript << { system: "Always answer 'Yes, ', 'No, ', or 'Maybe, ' followed by a concise explanation!" }
36
+ transcript << { user: question }
37
+
38
+ chat_completion(openai:).tap do |response|
39
+ if response.downcase.start_with?("yes,")
40
+ instance_exec(response, &self.class.yes_block) if self.class.yes_block
41
+ elsif response.downcase.start_with?("no,")
42
+ instance_exec(response, &self.class.no_block) if self.class.no_block
43
+ elsif self.class.maybe_block
44
+ instance_exec(response, &self.class.maybe_block)
45
+ else
46
+ puts "[Raix::Predicate] Unhandled response: #{response}"
47
+ end
48
+ end
49
+ end
50
+
51
+ # Class methods added to the including class
52
+ module ClassMethods
53
+ attr_reader :yes_block, :no_block, :maybe_block
54
+
55
+ def yes?(&block)
56
+ @yes_block = block
57
+ end
58
+
59
+ def no?(&block)
60
+ @no_block = block
61
+ end
62
+
63
+ def maybe?(&block)
64
+ @maybe_block = block
65
+ end
66
+ end
67
+ end
68
+ end
@@ -0,0 +1,166 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "ostruct"
4
+
5
+ # This module provides a way to chain prompts and handle
6
+ # user responses in a serialized manner, with support for
7
+ # functions if the FunctionDispatch module is also included.
8
+ module Raix
9
+ # The PromptDeclarations module provides a way to chain prompts and handle
10
+ # user responses in a serialized manner, with support for
11
+ # functions if the FunctionDispatch module is also included.
12
+ module PromptDeclarations
13
+ extend ActiveSupport::Concern
14
+
15
+ module ClassMethods # rubocop:disable Style/Documentation
16
+ # Adds a prompt to the list of prompts. At minimum, provide a `text` or `call` parameter.
17
+ #
18
+ # @param system [Proc] A lambda that generates the system message.
19
+ # @param call [ChatCompletion] A callable class that includes ChatCompletion. Will be passed a context object when initialized.
20
+ # @param text Accepts 1) a lambda that returns the prompt text, 2) a string, or 3) a symbol that references a method.
21
+ # @param stream [Proc] A lambda stream handler
22
+ # @param success [Proc] The block of code to execute when the prompt is answered.
23
+ # @param params [Hash] Additional parameters for the completion API call
24
+ # @param if [Proc] A lambda that determines if the prompt should be executed.
25
+ def prompt(system: nil, call: nil, text: nil, stream: nil, success: nil, params: {}, if: nil, unless: nil, until: nil)
26
+ name = Digest::SHA256.hexdigest(text.inspect)[0..7]
27
+ prompts << OpenStruct.new({ name:, system:, call:, text:, stream:, success:, if:, unless:, until:, params: })
28
+
29
+ define_method(name) do |response|
30
+ return response if success.nil?
31
+ return send(success, response) if success.is_a?(Symbol)
32
+
33
+ instance_exec(response, &success)
34
+ end
35
+ end
36
+
37
+ def prompts
38
+ @prompts ||= []
39
+ end
40
+ end
41
+
42
+ attr_reader :current_prompt, :last_response
43
+
44
+ MAX_LOOP_COUNT = 5
45
+
46
+ # Executes the chat completion process based on the class-level declared prompts.
47
+ # The response to each prompt is added to the transcript automatically and returned.
48
+ #
49
+ # Raises an error if there are not enough prompts defined.
50
+ #
51
+ # Uses system prompt in following order of priority:
52
+ # - system lambda specified in the prompt declaration
53
+ # - system_prompt instance method if defined
54
+ # - system_prompt class-level declaration if defined
55
+ #
56
+ # Prompts require a text lambda to be defined at minimum.
57
+ # TODO: shortcut syntax passes just a string prompt if no other options are needed.
58
+ #
59
+ # @raise [RuntimeError] If no prompts are defined.
60
+ #
61
+ # @param prompt [String] The prompt to use for the chat completion.
62
+ # @param params [Hash] Parameters for the chat completion.
63
+ # @param raw [Boolean] Whether to return the raw response.
64
+ #
65
+ # TODO: SHOULD NOT HAVE A DIFFERENT INTERFACE THAN PARENT
66
+ def chat_completion(prompt = nil, params: {}, raw: false, openai: false)
67
+ raise "No prompts defined" unless self.class.prompts.present?
68
+
69
+ loop_count = 0
70
+
71
+ current_prompts = self.class.prompts.clone
72
+
73
+ while (@current_prompt = current_prompts.shift)
74
+ next if @current_prompt.if.present? && !instance_exec(&@current_prompt.if)
75
+ next if @current_prompt.unless.present? && instance_exec(&@current_prompt.unless)
76
+
77
+ input = case current_prompt.text
78
+ when Proc
79
+ instance_exec(&current_prompt.text)
80
+ when String
81
+ current_prompt.text
82
+ when Symbol
83
+ send(current_prompt.text)
84
+ else
85
+ last_response.presence || prompt
86
+ end
87
+
88
+ if current_prompt.call.present?
89
+ current_prompt.call.new(self).call(input).tap do |response|
90
+ if response.present?
91
+ transcript << { assistant: response }
92
+ @last_response = send(current_prompt.name, response)
93
+ end
94
+ end
95
+ else
96
+ __system_prompt = instance_exec(&current_prompt.system) if current_prompt.system.present? # rubocop:disable Lint/UnderscorePrefixedVariableName
97
+ __system_prompt ||= system_prompt if respond_to?(:system_prompt)
98
+ __system_prompt ||= self.class.system_prompt.presence
99
+ transcript << { system: __system_prompt } if __system_prompt
100
+ transcript << { user: instance_exec(&current_prompt.text) } # text is required
101
+
102
+ params = current_prompt.params.merge(params)
103
+
104
+ # set the stream if necessary
105
+ self.stream = instance_exec(&current_prompt.stream) if current_prompt.stream.present?
106
+
107
+ execute_ai_request(params:, raw:, openai:, transcript:, loop_count:)
108
+ end
109
+
110
+ next unless current_prompt.until.present? && !instance_exec(&current_prompt.until)
111
+
112
+ if loop_count >= MAX_LOOP_COUNT
113
+ warn "Max loop count reached in chat_completion. Forcing return."
114
+
115
+ return last_response
116
+ else
117
+ current_prompts.unshift(@current_prompt) # put it back at the front
118
+ loop_count += 1
119
+ end
120
+ end
121
+
122
+ last_response
123
+ end
124
+
125
+ def execute_ai_request(params:, raw:, openai:, transcript:, loop_count:)
126
+ chat_completion_from_superclass(params:, raw:, openai:).then do |response|
127
+ transcript << { assistant: response }
128
+ @last_response = send(current_prompt.name, response)
129
+ self.stream = nil # clear it again so it's not used for the next prompt
130
+ end
131
+ rescue StandardError => e
132
+ # Bubbles the error up the stack if no loops remain
133
+ raise e if loop_count >= MAX_LOOP_COUNT
134
+
135
+ sleep 1 # Wait before continuing
136
+ end
137
+
138
+ # Returns the model parameter of the current prompt or the default model.
139
+ #
140
+ # @return [Object] The model parameter of the current prompt or the default model.
141
+ def model
142
+ @current_prompt.params[:model] || super
143
+ end
144
+
145
+ # Returns the temperature parameter of the current prompt or the default temperature.
146
+ #
147
+ # @return [Float] The temperature parameter of the current prompt or the default temperature.
148
+ def temperature
149
+ @current_prompt.params[:temperature] || super
150
+ end
151
+
152
+ # Returns the max_tokens parameter of the current prompt or the default max_tokens.
153
+ #
154
+ # @return [Integer] The max_tokens parameter of the current prompt or the default max_tokens.
155
+ def max_tokens
156
+ @current_prompt.params[:max_tokens] || super
157
+ end
158
+
159
+ protected
160
+
161
+ # workaround for super.chat_completion, which is not available in ruby
162
+ def chat_completion_from_superclass(*, **kargs)
163
+ method(:chat_completion).super_method.call(*, **kargs)
164
+ end
165
+ end
166
+ end
@@ -0,0 +1,81 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "active_support/core_ext/object/deep_dup"
4
+ require "active_support/core_ext/string/filters"
5
+
6
+ module Raix
7
+ # Handles the formatting of responses for AI interactions.
8
+ #
9
+ # This class is responsible for converting input data into a JSON schema
10
+ # that can be used to structure and validate AI responses. It supports
11
+ # nested structures and arrays, ensuring that the output conforms to
12
+ # the expected format for AI model interactions.
13
+ #
14
+ # @example
15
+ # input = { name: { type: "string" }, age: { type: "integer" } }
16
+ # format = ResponseFormat.new("PersonInfo", input)
17
+ # schema = format.to_schema
18
+ #
19
+ # @attr_reader [String] name The name of the response format
20
+ # @attr_reader [Hash] input The input data to be formatted
21
+ class ResponseFormat
22
+ def initialize(name, input)
23
+ @name = name
24
+ @input = input
25
+ end
26
+
27
+ def to_json(*)
28
+ JSON.pretty_generate(to_schema)
29
+ end
30
+
31
+ def to_schema
32
+ {
33
+ type: "json_schema",
34
+ json_schema: {
35
+ name: @name,
36
+ schema: {
37
+ type: "object",
38
+ properties: decode(@input.deep_dup),
39
+ required: @input.keys,
40
+ additionalProperties: false
41
+ },
42
+ strict: true
43
+ }
44
+ }
45
+ end
46
+
47
+ private
48
+
49
+ def decode(input)
50
+ {}.tap do |response|
51
+ case input
52
+ when Array
53
+ response[:type] = "array"
54
+
55
+ if input.size == 1 && input.first.is_a?(String)
56
+ response[:items] = { type: input.first }
57
+ else
58
+ properties = {}
59
+ input.each { |item| properties.merge!(decode(item)) }
60
+ response[:items] = {
61
+ type: "object",
62
+ properties:,
63
+ required: properties.keys.select { |key| properties[key].delete(:required) },
64
+ additionalProperties: false
65
+ }
66
+ end
67
+ when Hash
68
+ input.each do |key, value|
69
+ response[key] = if value.is_a?(Hash) && value.key?(:type)
70
+ value
71
+ else
72
+ decode(value)
73
+ end
74
+ end
75
+ else
76
+ raise "Invalid input"
77
+ end
78
+ end
79
+ end
80
+ end
81
+ end
@@ -0,0 +1,5 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Raix
4
+ VERSION = "1.0.1"
5
+ end
data/lib/raix.rb ADDED
@@ -0,0 +1,27 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "raix/version"
4
+ require_relative "raix/configuration"
5
+ require_relative "raix/chat_completion"
6
+ require_relative "raix/function_dispatch"
7
+ require_relative "raix/prompt_declarations"
8
+ require_relative "raix/predicate"
9
+ require_relative "raix/response_format"
10
+ require_relative "raix/mcp"
11
+
12
+ # The Raix module provides configuration options for the Raix gem.
13
+ module Raix
14
+ class << self
15
+ attr_writer :configuration
16
+ end
17
+
18
+ # Returns the current configuration instance.
19
+ def self.configuration
20
+ @configuration ||= Configuration.new
21
+ end
22
+
23
+ # Configures the Raix gem using a block.
24
+ def self.configure
25
+ yield(configuration)
26
+ end
27
+ end
@@ -0,0 +1,36 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "lib/raix/version"
4
+
5
+ Gem::Specification.new do |spec|
6
+ spec.name = "raix-openai-eight"
7
+ spec.version = Raix::VERSION
8
+ spec.authors = ["Paulo Arruda"]
9
+ spec.email = ["parrudaj@gmail.com"]
10
+
11
+ spec.summary = "Ruby AI eXtensions"
12
+ spec.homepage = "https://github.com/parrudaj/raix-openai-eight"
13
+ spec.license = "MIT"
14
+ spec.required_ruby_version = ">= 3.2.2"
15
+
16
+ spec.metadata["homepage_uri"] = spec.homepage
17
+ spec.metadata["source_code_uri"] = "https://github.com/parrudaj/raix-openai-eight"
18
+ spec.metadata["changelog_uri"] = "https://github.com/parrudaj/raix-openai-eight/blob/main/CHANGELOG.md"
19
+
20
+ # Specify which files should be added to the gem when it is released.
21
+ # The `git ls-files -z` loads the files in the RubyGem that have been added into git.
22
+ spec.files = Dir.chdir(__dir__) do
23
+ `git ls-files -z`.split("\x0").reject do |f|
24
+ (File.expand_path(f) == __FILE__) || f.start_with?(*%w[bin/ test/ spec/ features/ .git .circleci appveyor])
25
+ end
26
+ end
27
+ spec.bindir = "exe"
28
+ spec.executables = spec.files.grep(%r{\Aexe/}) { |f| File.basename(f) }
29
+ spec.require_paths = ["lib"]
30
+
31
+ spec.add_dependency "activesupport", ">= 6.0"
32
+ spec.add_dependency "faraday-retry", "~> 2.0"
33
+ spec.add_dependency "open_router", "~> 0.2"
34
+ spec.add_dependency "ostruct"
35
+ spec.add_dependency "ruby-openai", "~> 8.1"
36
+ end
data/sig/raix.rbs ADDED
@@ -0,0 +1,4 @@
1
+ module Raix
2
+ VERSION: String
3
+ # See the writing guide of rbs: https://github.com/ruby/rbs#guides
4
+ end