raix 1.0.0 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 56a4f5695ac0b5685715aa48e1bbb926efd05c5bd15b2f2198df3acda3ef30e4
4
- data.tar.gz: e46d6284e3d4201fa6a8bc5b59403b41434d9e92ce131944d5d20d896925402f
3
+ metadata.gz: 9ae2cafb0dfef15674cc95ed9067309f158d88c24a2ea23581f6331f20a49063
4
+ data.tar.gz: 214e5257dc699b26f4fa529278b367cf9fa30f6b27e81de8225b3eecfb33a297
5
5
  SHA512:
6
- metadata.gz: 07224f3ff3b8ea20b01d77bd149f7aa3afc597c145db34cf65ecf188a4e2fb2f366729cd8b8c671783346ce743106a234bbf8e551b2243f5a5f8fb9f652ed28b
7
- data.tar.gz: ed03422c1168de36a21729ed75281ff6dcfabead31232cefc22201fb670a18bae57877fc4e440aa8bffaf0705c1288511369f0e040757e92198e702922c407be
6
+ metadata.gz: d6f319bb682d8ad3134aee647e1052dd63c46e5a899d370b6a25f846b02ca52de5b03008e006dcc9e9496788a68c3e19a3c05676715c9f5324b3a47e8faf11e6
7
+ data.tar.gz: e00be61584c497bcdef12ac42df9b21a10d724e2a50031490db225450d7cb0dc41818b21c50fc4718854e5f461aada2ee030b087819dec789f70e4abfa89c891
data/CHANGELOG.md CHANGED
@@ -1,3 +1,23 @@
1
+ ## [1.0.2] - 2025-07-16
2
+ ### Added
3
+ - Added method to check for API client availability in Configuration
4
+
5
+ ### Changed
6
+ - Updated ruby-openai dependency to ~> 8.1
7
+
8
+ ### Fixed
9
+ - Fixed gemspec file reference
10
+
11
+ ## [1.0.1] - 2025-06-04
12
+ ### Fixed
13
+ - Fixed PromptDeclarations module namespace - now properly namespaced under Raix
14
+ - Removed Rails.logger dependencies from PromptDeclarations for non-Rails environments
15
+ - Fixed documentation example showing incorrect `openai: true` usage (should be model string)
16
+ - Added comprehensive tests for PromptDeclarations module
17
+
18
+ ### Changed
19
+ - Improved error handling in PromptDeclarations to catch StandardError instead of generic rescue
20
+
1
21
  ## [1.0.0] - 2025-06-04
2
22
  ### Breaking Changes
3
23
  - **Deprecated `loop` parameter in ChatCompletion** - The system now automatically continues conversations after tool calls until the AI provides a text response. The `loop` parameter shows a deprecation warning but still works for backwards compatibility.
data/Gemfile CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  source "https://rubygems.org"
4
4
 
5
- # Specify your gem's dependencies in raix-rails.gemspec
5
+ # Specify your gem's dependencies in raix.gemspec
6
6
  gemspec
7
7
 
8
8
  group :development do
data/Gemfile.lock CHANGED
@@ -1,12 +1,12 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- raix (0.9.2)
4
+ raix (1.0.2)
5
5
  activesupport (>= 6.0)
6
6
  faraday-retry (~> 2.0)
7
7
  open_router (~> 0.2)
8
8
  ostruct
9
- ruby-openai (~> 7)
9
+ ruby-openai (~> 8.1)
10
10
 
11
11
  GEM
12
12
  remote: https://rubygems.org/
@@ -148,7 +148,7 @@ GEM
148
148
  unicode-display_width (>= 2.4.0, < 3.0)
149
149
  rubocop-ast (1.31.2)
150
150
  parser (>= 3.3.0.4)
151
- ruby-openai (7.1.0)
151
+ ruby-openai (8.1.0)
152
152
  event_stream_parser (>= 0.3.0, < 2.0.0)
153
153
  faraday (>= 1)
154
154
  faraday-multipart (>= 1)
data/README.md CHANGED
@@ -460,7 +460,7 @@ class CustomPromptExample
460
460
 
461
461
  # Prompt using OpenAI directly
462
462
  prompt text: "Use OpenAI",
463
- openai: true
463
+ openai: "gpt-4o"
464
464
  end
465
465
  ```
466
466
 
@@ -713,12 +713,27 @@ If bundler is not being used to manage dependencies, install the gem by executin
713
713
 
714
714
  If you are using the default OpenRouter API, Raix expects `Raix.configuration.openrouter_client` to initialized with the OpenRouter API client instance.
715
715
 
716
- You can add an initializer to your application's `config/initializers` directory:
716
+ You can add an initializer to your application's `config/initializers` directory that looks like this example (setting up both providers, OpenRouter and OpenAI):
717
717
 
718
718
  ```ruby
719
719
  # config/initializers/raix.rb
720
+ OpenRouter.configure do |config|
721
+ config.faraday do |f|
722
+ f.request :retry, retry_options
723
+ f.response :logger, Logger.new($stdout), { headers: true, bodies: true, errors: true } do |logger|
724
+ logger.filter(/(Bearer) (\S+)/, '\1[REDACTED]')
725
+ end
726
+ end
727
+ end
728
+
720
729
  Raix.configure do |config|
721
- config.openrouter_client = OpenRouter::Client.new
730
+ config.openrouter_client = OpenRouter::Client.new(access_token: ENV.fetch("OR_ACCESS_TOKEN", nil))
731
+ config.openai_client = OpenAI::Client.new(access_token: ENV.fetch("OAI_ACCESS_TOKEN", nil)) do |f|
732
+ f.request :retry, retry_options
733
+ f.response :logger, Logger.new($stdout), { headers: true, bodies: true, errors: true } do |logger|
734
+ logger.filter(/(Bearer) (\S+)/, '\1[REDACTED]')
735
+ end
736
+ end
722
737
  end
723
738
  ```
724
739
 
@@ -56,6 +56,10 @@ module Raix
56
56
  self.fallback = fallback
57
57
  end
58
58
 
59
+ def client?
60
+ !!(openrouter_client || openai_client)
61
+ end
62
+
59
63
  private
60
64
 
61
65
  attr_accessor :fallback
@@ -5,172 +5,162 @@ require "ostruct"
5
5
  # This module provides a way to chain prompts and handle
6
6
  # user responses in a serialized manner, with support for
7
7
  # functions if the FunctionDispatch module is also included.
8
- module PromptDeclarations
9
- extend ActiveSupport::Concern
8
+ module Raix
9
+ # The PromptDeclarations module provides a way to chain prompts and handle
10
+ # user responses in a serialized manner, with support for
11
+ # functions if the FunctionDispatch module is also included.
12
+ module PromptDeclarations
13
+ extend ActiveSupport::Concern
14
+
15
+ module ClassMethods # rubocop:disable Style/Documentation
16
+ # Adds a prompt to the list of prompts. At minimum, provide a `text` or `call` parameter.
17
+ #
18
+ # @param system [Proc] A lambda that generates the system message.
19
+ # @param call [ChatCompletion] A callable class that includes ChatCompletion. Will be passed a context object when initialized.
20
+ # @param text Accepts 1) a lambda that returns the prompt text, 2) a string, or 3) a symbol that references a method.
21
+ # @param stream [Proc] A lambda stream handler
22
+ # @param success [Proc] The block of code to execute when the prompt is answered.
23
+ # @param params [Hash] Additional parameters for the completion API call
24
+ # @param if [Proc] A lambda that determines if the prompt should be executed.
25
+ def prompt(system: nil, call: nil, text: nil, stream: nil, success: nil, params: {}, if: nil, unless: nil, until: nil)
26
+ name = Digest::SHA256.hexdigest(text.inspect)[0..7]
27
+ prompts << OpenStruct.new({ name:, system:, call:, text:, stream:, success:, if:, unless:, until:, params: })
28
+
29
+ define_method(name) do |response|
30
+ return response if success.nil?
31
+ return send(success, response) if success.is_a?(Symbol)
32
+
33
+ instance_exec(response, &success)
34
+ end
35
+ end
10
36
 
11
- module ClassMethods # rubocop:disable Style/Documentation
12
- # Adds a prompt to the list of prompts. At minimum, provide a `text` or `call` parameter.
13
- #
14
- # @param system [Proc] A lambda that generates the system message.
15
- # @param call [ChatCompletion] A callable class that includes ChatCompletion. Will be passed a context object when initialized.
16
- # @param text Accepts 1) a lambda that returns the prompt text, 2) a string, or 3) a symbol that references a method.
17
- # @param stream [Proc] A lambda stream handler
18
- # @param success [Proc] The block of code to execute when the prompt is answered.
19
- # @param params [Hash] Additional parameters for the completion API call
20
- # @param if [Proc] A lambda that determines if the prompt should be executed.
21
- def prompt(system: nil, call: nil, text: nil, stream: nil, success: nil, params: {}, if: nil, unless: nil, until: nil)
22
- name = Digest::SHA256.hexdigest(text.inspect)[0..7]
23
- prompts << OpenStruct.new({ name:, system:, call:, text:, stream:, success:, if:, unless:, until:, params: })
24
-
25
- define_method(name) do |response|
26
- puts "_" * 80
27
- puts "PromptDeclarations#response:"
28
- puts "#{text&.source_location} (#{name})"
29
- puts response
30
- puts "_" * 80
31
-
32
- return response if success.nil?
33
- return send(success, response) if success.is_a?(Symbol)
34
-
35
- instance_exec(response, &success)
37
+ def prompts
38
+ @prompts ||= []
36
39
  end
37
40
  end
38
41
 
39
- def prompts
40
- @prompts ||= []
41
- end
42
- end
42
+ attr_reader :current_prompt, :last_response
43
+
44
+ MAX_LOOP_COUNT = 5
43
45
 
44
- attr_reader :current_prompt, :last_response
45
-
46
- MAX_LOOP_COUNT = 5
47
-
48
- # Executes the chat completion process based on the class-level declared prompts.
49
- # The response to each prompt is added to the transcript automatically and returned.
50
- #
51
- # Raises an error if there are not enough prompts defined.
52
- #
53
- # Uses system prompt in following order of priority:
54
- # - system lambda specified in the prompt declaration
55
- # - system_prompt instance method if defined
56
- # - system_prompt class-level declaration if defined
57
- #
58
- # Prompts require a text lambda to be defined at minimum.
59
- # TODO: shortcut syntax passes just a string prompt if no other options are needed.
60
- #
61
- # @raise [RuntimeError] If no prompts are defined.
62
- #
63
- # @param prompt [String] The prompt to use for the chat completion.
64
- # @param params [Hash] Parameters for the chat completion.
65
- # @param raw [Boolean] Whether to return the raw response.
66
- #
67
- # TODO: SHOULD NOT HAVE A DIFFERENT INTERFACE THAN PARENT
68
- def chat_completion(prompt = nil, params: {}, raw: false, openai: false)
69
- raise "No prompts defined" unless self.class.prompts.present?
70
-
71
- loop_count = 0
72
-
73
- current_prompts = self.class.prompts.clone
74
-
75
- while (@current_prompt = current_prompts.shift)
76
- next if @current_prompt.if.present? && !instance_exec(&@current_prompt.if)
77
- next if @current_prompt.unless.present? && instance_exec(&@current_prompt.unless)
78
-
79
- input = case current_prompt.text
80
- when Proc
81
- instance_exec(&current_prompt.text)
82
- when String
83
- current_prompt.text
84
- when Symbol
85
- send(current_prompt.text)
86
- else
87
- last_response.presence || prompt
88
- end
89
-
90
- if current_prompt.call.present?
91
- Rails.logger.debug "Calling #{current_prompt.call} with input: #{input}"
92
- current_prompt.call.new(self).call(input).tap do |response|
93
- if response.present?
94
- transcript << { assistant: response }
95
- @last_response = send(current_prompt.name, response)
46
+ # Executes the chat completion process based on the class-level declared prompts.
47
+ # The response to each prompt is added to the transcript automatically and returned.
48
+ #
49
+ # Raises an error if there are not enough prompts defined.
50
+ #
51
+ # Uses system prompt in following order of priority:
52
+ # - system lambda specified in the prompt declaration
53
+ # - system_prompt instance method if defined
54
+ # - system_prompt class-level declaration if defined
55
+ #
56
+ # Prompts require a text lambda to be defined at minimum.
57
+ # TODO: shortcut syntax passes just a string prompt if no other options are needed.
58
+ #
59
+ # @raise [RuntimeError] If no prompts are defined.
60
+ #
61
+ # @param prompt [String] The prompt to use for the chat completion.
62
+ # @param params [Hash] Parameters for the chat completion.
63
+ # @param raw [Boolean] Whether to return the raw response.
64
+ #
65
+ # TODO: SHOULD NOT HAVE A DIFFERENT INTERFACE THAN PARENT
66
+ def chat_completion(prompt = nil, params: {}, raw: false, openai: false)
67
+ raise "No prompts defined" unless self.class.prompts.present?
68
+
69
+ loop_count = 0
70
+
71
+ current_prompts = self.class.prompts.clone
72
+
73
+ while (@current_prompt = current_prompts.shift)
74
+ next if @current_prompt.if.present? && !instance_exec(&@current_prompt.if)
75
+ next if @current_prompt.unless.present? && instance_exec(&@current_prompt.unless)
76
+
77
+ input = case current_prompt.text
78
+ when Proc
79
+ instance_exec(&current_prompt.text)
80
+ when String
81
+ current_prompt.text
82
+ when Symbol
83
+ send(current_prompt.text)
84
+ else
85
+ last_response.presence || prompt
86
+ end
87
+
88
+ if current_prompt.call.present?
89
+ current_prompt.call.new(self).call(input).tap do |response|
90
+ if response.present?
91
+ transcript << { assistant: response }
92
+ @last_response = send(current_prompt.name, response)
93
+ end
96
94
  end
95
+ else
96
+ __system_prompt = instance_exec(&current_prompt.system) if current_prompt.system.present? # rubocop:disable Lint/UnderscorePrefixedVariableName
97
+ __system_prompt ||= system_prompt if respond_to?(:system_prompt)
98
+ __system_prompt ||= self.class.system_prompt.presence
99
+ transcript << { system: __system_prompt } if __system_prompt
100
+ transcript << { user: instance_exec(&current_prompt.text) } # text is required
101
+
102
+ params = current_prompt.params.merge(params)
103
+
104
+ # set the stream if necessary
105
+ self.stream = instance_exec(&current_prompt.stream) if current_prompt.stream.present?
106
+
107
+ execute_ai_request(params:, raw:, openai:, transcript:, loop_count:)
97
108
  end
98
- else
99
- __system_prompt = instance_exec(&current_prompt.system) if current_prompt.system.present? # rubocop:disable Lint/UnderscorePrefixedVariableName
100
- __system_prompt ||= system_prompt if respond_to?(:system_prompt)
101
- __system_prompt ||= self.class.system_prompt.presence
102
- transcript << { system: __system_prompt } if __system_prompt
103
- transcript << { user: instance_exec(&current_prompt.text) } # text is required
104
109
 
105
- params = current_prompt.params.merge(params)
110
+ next unless current_prompt.until.present? && !instance_exec(&current_prompt.until)
106
111
 
107
- # set the stream if necessary
108
- self.stream = instance_exec(&current_prompt.stream) if current_prompt.stream.present?
112
+ if loop_count >= MAX_LOOP_COUNT
113
+ warn "Max loop count reached in chat_completion. Forcing return."
109
114
 
110
- execute_ai_request(params:, raw:, openai:, transcript:, loop_count:)
115
+ return last_response
116
+ else
117
+ current_prompts.unshift(@current_prompt) # put it back at the front
118
+ loop_count += 1
119
+ end
111
120
  end
112
121
 
113
- next unless current_prompt.until.present? && !instance_exec(&current_prompt.until)
114
-
115
- if loop_count >= MAX_LOOP_COUNT
116
- Honeybadger.notify(
117
- "Max loop count reached in chat_completion. Forcing return.",
118
- context: {
119
- current_prompts:,
120
- prompt:,
121
- usage_subject: usage_subject.inspect,
122
- last_response: Current.or_response
123
- }
124
- )
125
-
126
- return last_response
127
- else
128
- current_prompts.unshift(@current_prompt) # put it back at the front
129
- loop_count += 1
130
- end
122
+ last_response
131
123
  end
132
124
 
133
- last_response
134
- end
125
+ def execute_ai_request(params:, raw:, openai:, transcript:, loop_count:)
126
+ chat_completion_from_superclass(params:, raw:, openai:).then do |response|
127
+ transcript << { assistant: response }
128
+ @last_response = send(current_prompt.name, response)
129
+ self.stream = nil # clear it again so it's not used for the next prompt
130
+ end
131
+ rescue StandardError => e
132
+ # Bubbles the error up the stack if no loops remain
133
+ raise e if loop_count >= MAX_LOOP_COUNT
135
134
 
136
- def execute_ai_request(params:, raw:, openai:, transcript:, loop_count:)
137
- chat_completion_from_superclass(params:, raw:, openai:).then do |response|
138
- transcript << { assistant: response }
139
- @last_response = send(current_prompt.name, response)
140
- self.stream = nil # clear it again so it's not used for the next prompt
135
+ sleep 1 # Wait before continuing
141
136
  end
142
- rescue Conversation::StreamError => e
143
- # Bubbles the error up the stack if no loops remain
144
- raise Faraday::ServerError.new(nil, { status: e.status, body: e.response }) if loop_count >= MAX_LOOP_COUNT
145
-
146
- sleep 1.second # Wait before continuing
147
- end
148
137
 
149
- # Returns the model parameter of the current prompt or the default model.
150
- #
151
- # @return [Object] The model parameter of the current prompt or the default model.
152
- def model
153
- @current_prompt.params[:model] || super
154
- end
138
+ # Returns the model parameter of the current prompt or the default model.
139
+ #
140
+ # @return [Object] The model parameter of the current prompt or the default model.
141
+ def model
142
+ @current_prompt.params[:model] || super
143
+ end
155
144
 
156
- # Returns the temperature parameter of the current prompt or the default temperature.
157
- #
158
- # @return [Float] The temperature parameter of the current prompt or the default temperature.
159
- def temperature
160
- @current_prompt.params[:temperature] || super
161
- end
145
+ # Returns the temperature parameter of the current prompt or the default temperature.
146
+ #
147
+ # @return [Float] The temperature parameter of the current prompt or the default temperature.
148
+ def temperature
149
+ @current_prompt.params[:temperature] || super
150
+ end
162
151
 
163
- # Returns the max_tokens parameter of the current prompt or the default max_tokens.
164
- #
165
- # @return [Integer] The max_tokens parameter of the current prompt or the default max_tokens.
166
- def max_tokens
167
- @current_prompt.params[:max_tokens] || super
168
- end
152
+ # Returns the max_tokens parameter of the current prompt or the default max_tokens.
153
+ #
154
+ # @return [Integer] The max_tokens parameter of the current prompt or the default max_tokens.
155
+ def max_tokens
156
+ @current_prompt.params[:max_tokens] || super
157
+ end
169
158
 
170
- protected
159
+ protected
171
160
 
172
- # workaround for super.chat_completion, which is not available in ruby
173
- def chat_completion_from_superclass(*, **kargs)
174
- method(:chat_completion).super_method.call(*, **kargs)
161
+ # workaround for super.chat_completion, which is not available in ruby
162
+ def chat_completion_from_superclass(*, **kargs)
163
+ method(:chat_completion).super_method.call(*, **kargs)
164
+ end
175
165
  end
176
166
  end
data/lib/raix/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Raix
4
- VERSION = "1.0.0"
4
+ VERSION = "1.0.2"
5
5
  end
data/raix.gemspec CHANGED
@@ -32,5 +32,5 @@ Gem::Specification.new do |spec|
32
32
  spec.add_dependency "faraday-retry", "~> 2.0"
33
33
  spec.add_dependency "open_router", "~> 0.2"
34
34
  spec.add_dependency "ostruct"
35
- spec.add_dependency "ruby-openai", "~> 7"
35
+ spec.add_dependency "ruby-openai", "~> 8.1"
36
36
  end
metadata CHANGED
@@ -1,13 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: raix
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.0.0
4
+ version: 1.0.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Obie Fernandez
8
8
  bindir: exe
9
9
  cert_chain: []
10
- date: 1980-01-02 00:00:00.000000000 Z
10
+ date: 2025-07-16 00:00:00.000000000 Z
11
11
  dependencies:
12
12
  - !ruby/object:Gem::Dependency
13
13
  name: activesupport
@@ -71,14 +71,14 @@ dependencies:
71
71
  requirements:
72
72
  - - "~>"
73
73
  - !ruby/object:Gem::Version
74
- version: '7'
74
+ version: '8.1'
75
75
  type: :runtime
76
76
  prerelease: false
77
77
  version_requirements: !ruby/object:Gem::Requirement
78
78
  requirements:
79
79
  - - "~>"
80
80
  - !ruby/object:Gem::Version
81
- version: '7'
81
+ version: '8.1'
82
82
  email:
83
83
  - obiefernandez@gmail.com
84
84
  executables: []
@@ -134,7 +134,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
134
134
  - !ruby/object:Gem::Version
135
135
  version: '0'
136
136
  requirements: []
137
- rubygems_version: 3.6.8
137
+ rubygems_version: 3.6.2
138
138
  specification_version: 4
139
139
  summary: Ruby AI eXtensions
140
140
  test_files: []