raix 1.0.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 56a4f5695ac0b5685715aa48e1bbb926efd05c5bd15b2f2198df3acda3ef30e4
4
- data.tar.gz: e46d6284e3d4201fa6a8bc5b59403b41434d9e92ce131944d5d20d896925402f
3
+ metadata.gz: 5413df57139a529bf573642931910e4109cd260beae531c3fdce54fdc3961485
4
+ data.tar.gz: 1feb3f57924489378b7029981bc9a5ad42b131848b6a378c7a1860b1456c4fc5
5
5
  SHA512:
6
- metadata.gz: 07224f3ff3b8ea20b01d77bd149f7aa3afc597c145db34cf65ecf188a4e2fb2f366729cd8b8c671783346ce743106a234bbf8e551b2243f5a5f8fb9f652ed28b
7
- data.tar.gz: ed03422c1168de36a21729ed75281ff6dcfabead31232cefc22201fb670a18bae57877fc4e440aa8bffaf0705c1288511369f0e040757e92198e702922c407be
6
+ metadata.gz: 7988de746caf67aafc4e21c1f9da3d1129c3b595723a28d41383dd4208bd7c84414d973c7530275920ff7c0732951e1cddc650aeb4eecfc99f28e3c32c86126a
7
+ data.tar.gz: b7cff4d03a83f584b4d0b414850b9eb25db02277e1b17e5a22c2415398e01135edd190f7ee249658616921d4ff93a25bb57dbab736b2936cf886763e4d975030
data/CHANGELOG.md CHANGED
@@ -1,3 +1,13 @@
1
+ ## [1.0.1] - 2025-06-04
2
+ ### Fixed
3
+ - Fixed PromptDeclarations module namespace - now properly namespaced under Raix
4
+ - Removed Rails.logger dependencies from PromptDeclarations for non-Rails environments
5
+ - Fixed documentation example showing incorrect `openai: true` usage (should be model string)
6
+ - Added comprehensive tests for PromptDeclarations module
7
+
8
+ ### Changed
9
+ - Improved error handling in PromptDeclarations to catch StandardError instead of generic rescue
10
+
1
11
  ## [1.0.0] - 2025-06-04
2
12
  ### Breaking Changes
3
13
  - **Deprecated `loop` parameter in ChatCompletion** - The system now automatically continues conversations after tool calls until the AI provides a text response. The `loop` parameter shows a deprecation warning but still works for backwards compatibility.
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- raix (0.9.2)
4
+ raix (1.0.1)
5
5
  activesupport (>= 6.0)
6
6
  faraday-retry (~> 2.0)
7
7
  open_router (~> 0.2)
data/README.md CHANGED
@@ -460,7 +460,7 @@ class CustomPromptExample
460
460
 
461
461
  # Prompt using OpenAI directly
462
462
  prompt text: "Use OpenAI",
463
- openai: true
463
+ openai: "gpt-4o"
464
464
  end
465
465
  ```
466
466
 
@@ -5,172 +5,162 @@ require "ostruct"
5
5
  # This module provides a way to chain prompts and handle
6
6
  # user responses in a serialized manner, with support for
7
7
  # functions if the FunctionDispatch module is also included.
8
- module PromptDeclarations
9
- extend ActiveSupport::Concern
8
+ module Raix
9
+ # The PromptDeclarations module provides a way to chain prompts and handle
10
+ # user responses in a serialized manner, with support for
11
+ # functions if the FunctionDispatch module is also included.
12
+ module PromptDeclarations
13
+ extend ActiveSupport::Concern
14
+
15
+ module ClassMethods # rubocop:disable Style/Documentation
16
+ # Adds a prompt to the list of prompts. At minimum, provide a `text` or `call` parameter.
17
+ #
18
+ # @param system [Proc] A lambda that generates the system message.
19
+ # @param call [ChatCompletion] A callable class that includes ChatCompletion. Will be passed a context object when initialized.
20
+ # @param text Accepts 1) a lambda that returns the prompt text, 2) a string, or 3) a symbol that references a method.
21
+ # @param stream [Proc] A lambda stream handler
22
+ # @param success [Proc] The block of code to execute when the prompt is answered.
23
+ # @param params [Hash] Additional parameters for the completion API call
24
+ # @param if [Proc] A lambda that determines if the prompt should be executed.
25
+ def prompt(system: nil, call: nil, text: nil, stream: nil, success: nil, params: {}, if: nil, unless: nil, until: nil)
26
+ name = Digest::SHA256.hexdigest(text.inspect)[0..7]
27
+ prompts << OpenStruct.new({ name:, system:, call:, text:, stream:, success:, if:, unless:, until:, params: })
28
+
29
+ define_method(name) do |response|
30
+ return response if success.nil?
31
+ return send(success, response) if success.is_a?(Symbol)
32
+
33
+ instance_exec(response, &success)
34
+ end
35
+ end
10
36
 
11
- module ClassMethods # rubocop:disable Style/Documentation
12
- # Adds a prompt to the list of prompts. At minimum, provide a `text` or `call` parameter.
13
- #
14
- # @param system [Proc] A lambda that generates the system message.
15
- # @param call [ChatCompletion] A callable class that includes ChatCompletion. Will be passed a context object when initialized.
16
- # @param text Accepts 1) a lambda that returns the prompt text, 2) a string, or 3) a symbol that references a method.
17
- # @param stream [Proc] A lambda stream handler
18
- # @param success [Proc] The block of code to execute when the prompt is answered.
19
- # @param params [Hash] Additional parameters for the completion API call
20
- # @param if [Proc] A lambda that determines if the prompt should be executed.
21
- def prompt(system: nil, call: nil, text: nil, stream: nil, success: nil, params: {}, if: nil, unless: nil, until: nil)
22
- name = Digest::SHA256.hexdigest(text.inspect)[0..7]
23
- prompts << OpenStruct.new({ name:, system:, call:, text:, stream:, success:, if:, unless:, until:, params: })
24
-
25
- define_method(name) do |response|
26
- puts "_" * 80
27
- puts "PromptDeclarations#response:"
28
- puts "#{text&.source_location} (#{name})"
29
- puts response
30
- puts "_" * 80
31
-
32
- return response if success.nil?
33
- return send(success, response) if success.is_a?(Symbol)
34
-
35
- instance_exec(response, &success)
37
+ def prompts
38
+ @prompts ||= []
36
39
  end
37
40
  end
38
41
 
39
- def prompts
40
- @prompts ||= []
41
- end
42
- end
42
+ attr_reader :current_prompt, :last_response
43
+
44
+ MAX_LOOP_COUNT = 5
43
45
 
44
- attr_reader :current_prompt, :last_response
45
-
46
- MAX_LOOP_COUNT = 5
47
-
48
- # Executes the chat completion process based on the class-level declared prompts.
49
- # The response to each prompt is added to the transcript automatically and returned.
50
- #
51
- # Raises an error if there are not enough prompts defined.
52
- #
53
- # Uses system prompt in following order of priority:
54
- # - system lambda specified in the prompt declaration
55
- # - system_prompt instance method if defined
56
- # - system_prompt class-level declaration if defined
57
- #
58
- # Prompts require a text lambda to be defined at minimum.
59
- # TODO: shortcut syntax passes just a string prompt if no other options are needed.
60
- #
61
- # @raise [RuntimeError] If no prompts are defined.
62
- #
63
- # @param prompt [String] The prompt to use for the chat completion.
64
- # @param params [Hash] Parameters for the chat completion.
65
- # @param raw [Boolean] Whether to return the raw response.
66
- #
67
- # TODO: SHOULD NOT HAVE A DIFFERENT INTERFACE THAN PARENT
68
- def chat_completion(prompt = nil, params: {}, raw: false, openai: false)
69
- raise "No prompts defined" unless self.class.prompts.present?
70
-
71
- loop_count = 0
72
-
73
- current_prompts = self.class.prompts.clone
74
-
75
- while (@current_prompt = current_prompts.shift)
76
- next if @current_prompt.if.present? && !instance_exec(&@current_prompt.if)
77
- next if @current_prompt.unless.present? && instance_exec(&@current_prompt.unless)
78
-
79
- input = case current_prompt.text
80
- when Proc
81
- instance_exec(&current_prompt.text)
82
- when String
83
- current_prompt.text
84
- when Symbol
85
- send(current_prompt.text)
86
- else
87
- last_response.presence || prompt
88
- end
89
-
90
- if current_prompt.call.present?
91
- Rails.logger.debug "Calling #{current_prompt.call} with input: #{input}"
92
- current_prompt.call.new(self).call(input).tap do |response|
93
- if response.present?
94
- transcript << { assistant: response }
95
- @last_response = send(current_prompt.name, response)
46
+ # Executes the chat completion process based on the class-level declared prompts.
47
+ # The response to each prompt is added to the transcript automatically and returned.
48
+ #
49
+ # Raises an error if there are not enough prompts defined.
50
+ #
51
+ # Uses system prompt in following order of priority:
52
+ # - system lambda specified in the prompt declaration
53
+ # - system_prompt instance method if defined
54
+ # - system_prompt class-level declaration if defined
55
+ #
56
+ # Prompts require a text lambda to be defined at minimum.
57
+ # TODO: shortcut syntax passes just a string prompt if no other options are needed.
58
+ #
59
+ # @raise [RuntimeError] If no prompts are defined.
60
+ #
61
+ # @param prompt [String] The prompt to use for the chat completion.
62
+ # @param params [Hash] Parameters for the chat completion.
63
+ # @param raw [Boolean] Whether to return the raw response.
64
+ #
65
+ # TODO: SHOULD NOT HAVE A DIFFERENT INTERFACE THAN PARENT
66
+ def chat_completion(prompt = nil, params: {}, raw: false, openai: false)
67
+ raise "No prompts defined" unless self.class.prompts.present?
68
+
69
+ loop_count = 0
70
+
71
+ current_prompts = self.class.prompts.clone
72
+
73
+ while (@current_prompt = current_prompts.shift)
74
+ next if @current_prompt.if.present? && !instance_exec(&@current_prompt.if)
75
+ next if @current_prompt.unless.present? && instance_exec(&@current_prompt.unless)
76
+
77
+ input = case current_prompt.text
78
+ when Proc
79
+ instance_exec(&current_prompt.text)
80
+ when String
81
+ current_prompt.text
82
+ when Symbol
83
+ send(current_prompt.text)
84
+ else
85
+ last_response.presence || prompt
86
+ end
87
+
88
+ if current_prompt.call.present?
89
+ current_prompt.call.new(self).call(input).tap do |response|
90
+ if response.present?
91
+ transcript << { assistant: response }
92
+ @last_response = send(current_prompt.name, response)
93
+ end
96
94
  end
95
+ else
96
+ __system_prompt = instance_exec(&current_prompt.system) if current_prompt.system.present? # rubocop:disable Lint/UnderscorePrefixedVariableName
97
+ __system_prompt ||= system_prompt if respond_to?(:system_prompt)
98
+ __system_prompt ||= self.class.system_prompt.presence
99
+ transcript << { system: __system_prompt } if __system_prompt
100
+ transcript << { user: instance_exec(&current_prompt.text) } # text is required
101
+
102
+ params = current_prompt.params.merge(params)
103
+
104
+ # set the stream if necessary
105
+ self.stream = instance_exec(&current_prompt.stream) if current_prompt.stream.present?
106
+
107
+ execute_ai_request(params:, raw:, openai:, transcript:, loop_count:)
97
108
  end
98
- else
99
- __system_prompt = instance_exec(&current_prompt.system) if current_prompt.system.present? # rubocop:disable Lint/UnderscorePrefixedVariableName
100
- __system_prompt ||= system_prompt if respond_to?(:system_prompt)
101
- __system_prompt ||= self.class.system_prompt.presence
102
- transcript << { system: __system_prompt } if __system_prompt
103
- transcript << { user: instance_exec(&current_prompt.text) } # text is required
104
109
 
105
- params = current_prompt.params.merge(params)
110
+ next unless current_prompt.until.present? && !instance_exec(&current_prompt.until)
106
111
 
107
- # set the stream if necessary
108
- self.stream = instance_exec(&current_prompt.stream) if current_prompt.stream.present?
112
+ if loop_count >= MAX_LOOP_COUNT
113
+ warn "Max loop count reached in chat_completion. Forcing return."
109
114
 
110
- execute_ai_request(params:, raw:, openai:, transcript:, loop_count:)
115
+ return last_response
116
+ else
117
+ current_prompts.unshift(@current_prompt) # put it back at the front
118
+ loop_count += 1
119
+ end
111
120
  end
112
121
 
113
- next unless current_prompt.until.present? && !instance_exec(&current_prompt.until)
114
-
115
- if loop_count >= MAX_LOOP_COUNT
116
- Honeybadger.notify(
117
- "Max loop count reached in chat_completion. Forcing return.",
118
- context: {
119
- current_prompts:,
120
- prompt:,
121
- usage_subject: usage_subject.inspect,
122
- last_response: Current.or_response
123
- }
124
- )
125
-
126
- return last_response
127
- else
128
- current_prompts.unshift(@current_prompt) # put it back at the front
129
- loop_count += 1
130
- end
122
+ last_response
131
123
  end
132
124
 
133
- last_response
134
- end
125
+ def execute_ai_request(params:, raw:, openai:, transcript:, loop_count:)
126
+ chat_completion_from_superclass(params:, raw:, openai:).then do |response|
127
+ transcript << { assistant: response }
128
+ @last_response = send(current_prompt.name, response)
129
+ self.stream = nil # clear it again so it's not used for the next prompt
130
+ end
131
+ rescue StandardError => e
132
+ # Bubbles the error up the stack if no loops remain
133
+ raise e if loop_count >= MAX_LOOP_COUNT
135
134
 
136
- def execute_ai_request(params:, raw:, openai:, transcript:, loop_count:)
137
- chat_completion_from_superclass(params:, raw:, openai:).then do |response|
138
- transcript << { assistant: response }
139
- @last_response = send(current_prompt.name, response)
140
- self.stream = nil # clear it again so it's not used for the next prompt
135
+ sleep 1 # Wait before continuing
141
136
  end
142
- rescue Conversation::StreamError => e
143
- # Bubbles the error up the stack if no loops remain
144
- raise Faraday::ServerError.new(nil, { status: e.status, body: e.response }) if loop_count >= MAX_LOOP_COUNT
145
-
146
- sleep 1.second # Wait before continuing
147
- end
148
137
 
149
- # Returns the model parameter of the current prompt or the default model.
150
- #
151
- # @return [Object] The model parameter of the current prompt or the default model.
152
- def model
153
- @current_prompt.params[:model] || super
154
- end
138
+ # Returns the model parameter of the current prompt or the default model.
139
+ #
140
+ # @return [Object] The model parameter of the current prompt or the default model.
141
+ def model
142
+ @current_prompt.params[:model] || super
143
+ end
155
144
 
156
- # Returns the temperature parameter of the current prompt or the default temperature.
157
- #
158
- # @return [Float] The temperature parameter of the current prompt or the default temperature.
159
- def temperature
160
- @current_prompt.params[:temperature] || super
161
- end
145
+ # Returns the temperature parameter of the current prompt or the default temperature.
146
+ #
147
+ # @return [Float] The temperature parameter of the current prompt or the default temperature.
148
+ def temperature
149
+ @current_prompt.params[:temperature] || super
150
+ end
162
151
 
163
- # Returns the max_tokens parameter of the current prompt or the default max_tokens.
164
- #
165
- # @return [Integer] The max_tokens parameter of the current prompt or the default max_tokens.
166
- def max_tokens
167
- @current_prompt.params[:max_tokens] || super
168
- end
152
+ # Returns the max_tokens parameter of the current prompt or the default max_tokens.
153
+ #
154
+ # @return [Integer] The max_tokens parameter of the current prompt or the default max_tokens.
155
+ def max_tokens
156
+ @current_prompt.params[:max_tokens] || super
157
+ end
169
158
 
170
- protected
159
+ protected
171
160
 
172
- # workaround for super.chat_completion, which is not available in ruby
173
- def chat_completion_from_superclass(*, **kargs)
174
- method(:chat_completion).super_method.call(*, **kargs)
161
+ # workaround for super.chat_completion, which is not available in ruby
162
+ def chat_completion_from_superclass(*, **kargs)
163
+ method(:chat_completion).super_method.call(*, **kargs)
164
+ end
175
165
  end
176
166
  end
data/lib/raix/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Raix
4
- VERSION = "1.0.0"
4
+ VERSION = "1.0.1"
5
5
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: raix
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.0.0
4
+ version: 1.0.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Obie Fernandez
@@ -111,7 +111,6 @@ files:
111
111
  - lib/raix/prompt_declarations.rb
112
112
  - lib/raix/response_format.rb
113
113
  - lib/raix/version.rb
114
- - raix.gemspec
115
114
  - sig/raix.rbs
116
115
  homepage: https://github.com/OlympiaAI/raix
117
116
  licenses:
data/raix.gemspec DELETED
@@ -1,36 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require_relative "lib/raix/version"
4
-
5
- Gem::Specification.new do |spec|
6
- spec.name = "raix"
7
- spec.version = Raix::VERSION
8
- spec.authors = ["Obie Fernandez"]
9
- spec.email = ["obiefernandez@gmail.com"]
10
-
11
- spec.summary = "Ruby AI eXtensions"
12
- spec.homepage = "https://github.com/OlympiaAI/raix"
13
- spec.license = "MIT"
14
- spec.required_ruby_version = ">= 3.2.2"
15
-
16
- spec.metadata["homepage_uri"] = spec.homepage
17
- spec.metadata["source_code_uri"] = "https://github.com/OlympiaAI/raix"
18
- spec.metadata["changelog_uri"] = "https://github.com/OlympiaAI/raix/blob/main/CHANGELOG.md"
19
-
20
- # Specify which files should be added to the gem when it is released.
21
- # The `git ls-files -z` loads the files in the RubyGem that have been added into git.
22
- spec.files = Dir.chdir(__dir__) do
23
- `git ls-files -z`.split("\x0").reject do |f|
24
- (File.expand_path(f) == __FILE__) || f.start_with?(*%w[bin/ test/ spec/ features/ .git .circleci appveyor])
25
- end
26
- end
27
- spec.bindir = "exe"
28
- spec.executables = spec.files.grep(%r{\Aexe/}) { |f| File.basename(f) }
29
- spec.require_paths = ["lib"]
30
-
31
- spec.add_dependency "activesupport", ">= 6.0"
32
- spec.add_dependency "faraday-retry", "~> 2.0"
33
- spec.add_dependency "open_router", "~> 0.2"
34
- spec.add_dependency "ostruct"
35
- spec.add_dependency "ruby-openai", "~> 7"
36
- end