raix 1.0.3 → 2.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +46 -0
- data/Gemfile.lock +33 -24
- data/README.md +184 -28
- data/lib/raix/chat_completion.rb +136 -51
- data/lib/raix/completion_context.rb +36 -0
- data/lib/raix/configuration.rb +18 -3
- data/lib/raix/function_tool_adapter.rb +51 -0
- data/lib/{mcp → raix/mcp}/sse_client.rb +1 -2
- data/lib/{mcp → raix/mcp}/stdio_client.rb +0 -1
- data/lib/raix/mcp.rb +0 -3
- data/lib/raix/transcript_adapter.rb +121 -0
- data/lib/raix/version.rb +1 -1
- data/lib/raix.rb +7 -9
- data/raix.gemspec +2 -2
- metadata +21 -18
- /data/lib/{mcp → raix/mcp}/tool.rb +0 -0
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 101c916df93e569b21330ea028dafac8d095021a7c2c27bb91a911fffc55bc29
|
|
4
|
+
data.tar.gz: 1b4498f818350fab8823a66535334f24f2b89ede02d79cadcc23e052e3cd0dcb
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: e5953aa12e7918e5b0266e23017c5c11874d919c4b83a10403c10a75d6998c2d8f2d3f8a11ab3d6abcf996f0ee30d827a564bd9ed5fd5862f673c689c6e8a06a
|
|
7
|
+
data.tar.gz: 41aabaa233555d191a02628055465e5759aea86de69c321ff43145b6ddcc1714d5c2828ebf659d7f4773e6de8d04ec647435c5c7e1a001b196bea7c6dee8c0f7
|
data/CHANGELOG.md
CHANGED
|
@@ -1,3 +1,49 @@
|
|
|
1
|
+
## [2.0.1] - 2026-03-20
|
|
2
|
+
|
|
3
|
+
### Changed
|
|
4
|
+
- Replaced `require_relative` with Zeitwerk autoloading (thanks @seuros, PR #47)
|
|
5
|
+
|
|
6
|
+
## [2.0.0] - 2025-12-17
|
|
7
|
+
|
|
8
|
+
### Breaking Changes
|
|
9
|
+
- **Migrated from OpenRouter/OpenAI gems to RubyLLM** - Raix now uses [RubyLLM](https://github.com/crmne/ruby_llm) as its unified backend for all LLM providers. This provides better multi-provider support and a more consistent API.
|
|
10
|
+
- **Configuration changes** - API keys are now configured through RubyLLM's configuration system instead of separate client instances.
|
|
11
|
+
- **Removed direct client dependencies** - `openrouter` and `ruby-openai` gems are no longer direct dependencies; RubyLLM handles provider connections.
|
|
12
|
+
|
|
13
|
+
### Added
|
|
14
|
+
- **`before_completion` hook** - New hook system for intercepting and modifying chat completion requests before they're sent to the AI provider.
|
|
15
|
+
- Configure at global, class, or instance levels
|
|
16
|
+
- Hooks receive a `CompletionContext` with access to messages, params, and the chat completion instance
|
|
17
|
+
- Messages are mutable for content filtering, PII redaction, adding system prompts, etc.
|
|
18
|
+
- Params can be modified for dynamic model selection, A/B testing, and more
|
|
19
|
+
- Supports any callable object (Proc, Lambda, or object responding to `#call`)
|
|
20
|
+
- Use cases: database-backed configuration, logging, PII redaction, content filtering, cost tracking
|
|
21
|
+
- **`FunctionToolAdapter`** - New adapter for converting Raix function declarations to RubyLLM tool format
|
|
22
|
+
- **`TranscriptAdapter`** - New adapter for bridging Raix's abbreviated message format with standard OpenAI format
|
|
23
|
+
|
|
24
|
+
### Changed
|
|
25
|
+
- Chat completions now use RubyLLM's unified API for all providers (OpenAI, Anthropic, Google, etc.)
|
|
26
|
+
- Improved provider detection based on model name patterns
|
|
27
|
+
- Streamlined internal architecture with dedicated adapters
|
|
28
|
+
|
|
29
|
+
### Migration Guide
|
|
30
|
+
Update your configuration from:
|
|
31
|
+
```ruby
|
|
32
|
+
Raix.configure do |config|
|
|
33
|
+
config.openrouter_client = OpenRouter::Client.new(access_token: "...")
|
|
34
|
+
config.openai_client = OpenAI::Client.new(access_token: "...")
|
|
35
|
+
end
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
To:
|
|
39
|
+
```ruby
|
|
40
|
+
RubyLLM.configure do |config|
|
|
41
|
+
config.openrouter_api_key = ENV["OPENROUTER_API_KEY"]
|
|
42
|
+
config.openai_api_key = ENV["OPENAI_API_KEY"]
|
|
43
|
+
# Also supports: anthropic_api_key, gemini_api_key
|
|
44
|
+
end
|
|
45
|
+
```
|
|
46
|
+
|
|
1
47
|
## [1.0.2] - 2025-07-16
|
|
2
48
|
### Added
|
|
3
49
|
- Added method to check for API client availability in Configuration
|
data/Gemfile.lock
CHANGED
|
@@ -1,26 +1,28 @@
|
|
|
1
1
|
PATH
|
|
2
2
|
remote: .
|
|
3
3
|
specs:
|
|
4
|
-
raix (
|
|
4
|
+
raix (2.0.1)
|
|
5
5
|
activesupport (>= 6.0)
|
|
6
6
|
faraday-retry (~> 2.0)
|
|
7
|
-
open_router (~> 0.2)
|
|
8
7
|
ostruct
|
|
9
|
-
|
|
8
|
+
ruby_llm (~> 1.9)
|
|
9
|
+
zeitwerk (~> 2.7)
|
|
10
10
|
|
|
11
11
|
GEM
|
|
12
12
|
remote: https://rubygems.org/
|
|
13
13
|
specs:
|
|
14
|
-
activesupport (7.
|
|
14
|
+
activesupport (7.2.3)
|
|
15
15
|
base64
|
|
16
|
+
benchmark (>= 0.3)
|
|
16
17
|
bigdecimal
|
|
17
|
-
concurrent-ruby (~> 1.0, >= 1.
|
|
18
|
+
concurrent-ruby (~> 1.0, >= 1.3.1)
|
|
18
19
|
connection_pool (>= 2.2.5)
|
|
19
20
|
drb
|
|
20
21
|
i18n (>= 1.6, < 2)
|
|
22
|
+
logger (>= 1.4.2)
|
|
21
23
|
minitest (>= 5.1)
|
|
22
|
-
|
|
23
|
-
tzinfo (~> 2.0)
|
|
24
|
+
securerandom (>= 0.3)
|
|
25
|
+
tzinfo (~> 2.0, >= 2.0.5)
|
|
24
26
|
addressable (2.8.6)
|
|
25
27
|
public_suffix (>= 2.0.2, < 6.0)
|
|
26
28
|
ast (2.4.2)
|
|
@@ -41,14 +43,14 @@ GEM
|
|
|
41
43
|
event_stream_parser (1.0.0)
|
|
42
44
|
faraday (2.9.2)
|
|
43
45
|
faraday-net_http (>= 2.0, < 3.2)
|
|
44
|
-
faraday-multipart (1.0
|
|
45
|
-
multipart-post (~> 2)
|
|
46
|
+
faraday-multipart (1.2.0)
|
|
47
|
+
multipart-post (~> 2.0)
|
|
46
48
|
faraday-net_http (3.1.0)
|
|
47
49
|
net-http
|
|
48
|
-
faraday-retry (2.
|
|
50
|
+
faraday-retry (2.4.0)
|
|
49
51
|
faraday (~> 2.0)
|
|
52
|
+
ffi (1.17.2)
|
|
50
53
|
ffi (1.17.2-arm64-darwin)
|
|
51
|
-
ffi (1.17.2-x86_64-linux-gnu)
|
|
52
54
|
formatador (1.1.0)
|
|
53
55
|
guard (2.18.1)
|
|
54
56
|
formatador (>= 0.2.4)
|
|
@@ -77,27 +79,25 @@ GEM
|
|
|
77
79
|
listen (3.9.0)
|
|
78
80
|
rb-fsevent (~> 0.10, >= 0.10.3)
|
|
79
81
|
rb-inotify (~> 0.9, >= 0.9.10)
|
|
82
|
+
logger (1.7.0)
|
|
80
83
|
lumberjack (1.2.10)
|
|
84
|
+
marcel (1.1.0)
|
|
81
85
|
method_source (1.1.0)
|
|
82
|
-
|
|
86
|
+
mini_portile2 (2.8.9)
|
|
87
|
+
minitest (5.27.0)
|
|
83
88
|
multipart-post (2.4.1)
|
|
84
|
-
mutex_m (0.2.0)
|
|
85
89
|
nenv (0.3.0)
|
|
86
90
|
net-http (0.4.1)
|
|
87
91
|
uri
|
|
88
92
|
netrc (0.11.0)
|
|
89
|
-
nokogiri (1.18.8
|
|
93
|
+
nokogiri (1.18.8)
|
|
94
|
+
mini_portile2 (~> 2.8.2)
|
|
90
95
|
racc (~> 1.4)
|
|
91
|
-
nokogiri (1.18.8-
|
|
96
|
+
nokogiri (1.18.8-arm64-darwin)
|
|
92
97
|
racc (~> 1.4)
|
|
93
98
|
notiffany (0.1.3)
|
|
94
99
|
nenv (~> 0.1)
|
|
95
100
|
shellany (~> 0.0)
|
|
96
|
-
open_router (0.3.3)
|
|
97
|
-
activesupport (>= 6.0)
|
|
98
|
-
dotenv (>= 2)
|
|
99
|
-
faraday (>= 1)
|
|
100
|
-
faraday-multipart (>= 1)
|
|
101
101
|
ostruct (0.6.1)
|
|
102
102
|
parallel (1.24.0)
|
|
103
103
|
parser (3.3.0.5)
|
|
@@ -148,11 +148,19 @@ GEM
|
|
|
148
148
|
unicode-display_width (>= 2.4.0, < 3.0)
|
|
149
149
|
rubocop-ast (1.31.2)
|
|
150
150
|
parser (>= 3.3.0.4)
|
|
151
|
-
ruby-openai (8.1.0)
|
|
152
|
-
event_stream_parser (>= 0.3.0, < 2.0.0)
|
|
153
|
-
faraday (>= 1)
|
|
154
|
-
faraday-multipart (>= 1)
|
|
155
151
|
ruby-progressbar (1.13.0)
|
|
152
|
+
ruby_llm (1.14.0)
|
|
153
|
+
base64
|
|
154
|
+
event_stream_parser (~> 1)
|
|
155
|
+
faraday (>= 1.10.0)
|
|
156
|
+
faraday-multipart (>= 1)
|
|
157
|
+
faraday-net_http (>= 1)
|
|
158
|
+
faraday-retry (>= 1)
|
|
159
|
+
marcel (~> 1)
|
|
160
|
+
ruby_llm-schema (~> 0)
|
|
161
|
+
zeitwerk (~> 2)
|
|
162
|
+
ruby_llm-schema (0.2.5)
|
|
163
|
+
securerandom (0.4.1)
|
|
156
164
|
shellany (0.0.1)
|
|
157
165
|
solargraph (0.50.0)
|
|
158
166
|
backport (~> 1.2)
|
|
@@ -210,6 +218,7 @@ GEM
|
|
|
210
218
|
yard-sorbet (0.8.1)
|
|
211
219
|
sorbet-runtime (>= 0.5)
|
|
212
220
|
yard (>= 0.9)
|
|
221
|
+
zeitwerk (2.7.3)
|
|
213
222
|
|
|
214
223
|
PLATFORMS
|
|
215
224
|
arm64-darwin-21
|
data/README.md
CHANGED
|
@@ -6,7 +6,7 @@ Raix (pronounced "ray" because the x is silent) is a library that gives you ever
|
|
|
6
6
|
|
|
7
7
|
Understanding how to use discrete AI components in otherwise normal code is key to productively leveraging Raix, and the subject of a book written by Raix's author Obie Fernandez, titled [Patterns of Application Development Using AI](https://leanpub.com/patterns-of-application-development-using-ai). You can easily support the ongoing development of this project by buying the book at Leanpub.
|
|
8
8
|
|
|
9
|
-
|
|
9
|
+
Raix 2.0 is powered by [RubyLLM](https://github.com/crmne/ruby_llm), giving you unified access to OpenAI, Anthropic, Google Gemini, and dozens of other providers through OpenRouter. Note that you can use Raix to add AI capabilities to non-Rails applications as long as you include ActiveSupport as a dependency.
|
|
10
10
|
|
|
11
11
|
### Chat Completions
|
|
12
12
|
|
|
@@ -105,6 +105,148 @@ When using JSON mode with non-OpenAI providers, Raix automatically sets the `req
|
|
|
105
105
|
=> { "key": "value" }
|
|
106
106
|
```
|
|
107
107
|
|
|
108
|
+
### before_completion Hook
|
|
109
|
+
|
|
110
|
+
The `before_completion` hook lets you intercept and modify chat completion requests before they're sent to the AI provider. This is useful for dynamic parameter resolution, logging, content filtering, PII redaction, and more.
|
|
111
|
+
|
|
112
|
+
#### Configuration Levels
|
|
113
|
+
|
|
114
|
+
Hooks can be configured at three levels, with later levels overriding earlier ones:
|
|
115
|
+
|
|
116
|
+
```ruby
|
|
117
|
+
# Global level - applies to all chat completions
|
|
118
|
+
Raix.configure do |config|
|
|
119
|
+
config.before_completion = ->(context) {
|
|
120
|
+
# Return a hash of params to merge, or modify context.messages directly
|
|
121
|
+
{ temperature: 0.7 }
|
|
122
|
+
}
|
|
123
|
+
end
|
|
124
|
+
|
|
125
|
+
# Class level - applies to all instances of a class
|
|
126
|
+
class MyAssistant
|
|
127
|
+
include Raix::ChatCompletion
|
|
128
|
+
|
|
129
|
+
configure do |config|
|
|
130
|
+
config.before_completion = ->(context) { { model: "gpt-4o" } }
|
|
131
|
+
end
|
|
132
|
+
end
|
|
133
|
+
|
|
134
|
+
# Instance level - applies to a single instance
|
|
135
|
+
assistant = MyAssistant.new
|
|
136
|
+
assistant.before_completion = ->(context) { { max_tokens: 500 } }
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
When hooks exist at multiple levels, they're called in order (global → class → instance), with returned params merged together. Later hooks override earlier ones for the same parameter.
|
|
140
|
+
|
|
141
|
+
#### The CompletionContext Object
|
|
142
|
+
|
|
143
|
+
Hooks receive a `CompletionContext` object with access to:
|
|
144
|
+
|
|
145
|
+
```ruby
|
|
146
|
+
context.chat_completion # The ChatCompletion instance
|
|
147
|
+
context.messages # Array of messages (mutable, in OpenAI format)
|
|
148
|
+
context.params # Hash of params (mutable)
|
|
149
|
+
context.transcript # The instance's transcript
|
|
150
|
+
context.current_model # Currently configured model
|
|
151
|
+
context.chat_completion_class # The class including ChatCompletion
|
|
152
|
+
context.configuration # The instance's configuration
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
#### Use Cases
|
|
156
|
+
|
|
157
|
+
**Dynamic model selection from database:**
|
|
158
|
+
|
|
159
|
+
```ruby
|
|
160
|
+
Raix.configure do |config|
|
|
161
|
+
config.before_completion = ->(context) {
|
|
162
|
+
settings = TenantSettings.find_by(tenant: Current.tenant)
|
|
163
|
+
{
|
|
164
|
+
model: settings.preferred_model,
|
|
165
|
+
temperature: settings.temperature,
|
|
166
|
+
max_tokens: settings.max_tokens
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
end
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
**PII redaction:**
|
|
173
|
+
|
|
174
|
+
```ruby
|
|
175
|
+
class SecureAssistant
|
|
176
|
+
include Raix::ChatCompletion
|
|
177
|
+
|
|
178
|
+
before_completion = ->(context) {
|
|
179
|
+
context.messages.each do |msg|
|
|
180
|
+
next unless msg[:content].is_a?(String)
|
|
181
|
+
# Redact SSN patterns
|
|
182
|
+
msg[:content] = msg[:content].gsub(/\d{3}-\d{2}-\d{4}/, "[SSN REDACTED]")
|
|
183
|
+
# Redact email addresses
|
|
184
|
+
msg[:content] = msg[:content].gsub(/[\w.-]+@[\w.-]+\.\w+/, "[EMAIL REDACTED]")
|
|
185
|
+
end
|
|
186
|
+
{} # Return empty hash if not modifying params
|
|
187
|
+
}
|
|
188
|
+
end
|
|
189
|
+
```
|
|
190
|
+
|
|
191
|
+
**Request logging:**
|
|
192
|
+
|
|
193
|
+
```ruby
|
|
194
|
+
Raix.configure do |config|
|
|
195
|
+
config.before_completion = ->(context) {
|
|
196
|
+
Rails.logger.info({
|
|
197
|
+
event: "chat_completion_request",
|
|
198
|
+
model: context.current_model,
|
|
199
|
+
message_count: context.messages.length,
|
|
200
|
+
params: context.params.except(:messages)
|
|
201
|
+
}.to_json)
|
|
202
|
+
{} # Return empty hash, just logging
|
|
203
|
+
}
|
|
204
|
+
end
|
|
205
|
+
```
|
|
206
|
+
|
|
207
|
+
**Adding system prompts:**
|
|
208
|
+
|
|
209
|
+
```ruby
|
|
210
|
+
assistant.before_completion = ->(context) {
|
|
211
|
+
context.messages.unshift({
|
|
212
|
+
role: "system",
|
|
213
|
+
content: "Always be helpful and respectful."
|
|
214
|
+
})
|
|
215
|
+
{}
|
|
216
|
+
}
|
|
217
|
+
```
|
|
218
|
+
|
|
219
|
+
**A/B testing models:**
|
|
220
|
+
|
|
221
|
+
```ruby
|
|
222
|
+
Raix.configure do |config|
|
|
223
|
+
config.before_completion = ->(context) {
|
|
224
|
+
if Flipper.enabled?(:new_model, Current.user)
|
|
225
|
+
{ model: "gpt-4o" }
|
|
226
|
+
else
|
|
227
|
+
{ model: "gpt-4o-mini" }
|
|
228
|
+
end
|
|
229
|
+
}
|
|
230
|
+
end
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
Hooks can also be any object that responds to `#call`:
|
|
234
|
+
|
|
235
|
+
```ruby
|
|
236
|
+
class CostTracker
|
|
237
|
+
def call(context)
|
|
238
|
+
# Track estimated cost based on message length
|
|
239
|
+
estimated_tokens = context.messages.sum { |m| m[:content].to_s.length / 4 }
|
|
240
|
+
StatsD.gauge("ai.estimated_input_tokens", estimated_tokens)
|
|
241
|
+
{}
|
|
242
|
+
end
|
|
243
|
+
end
|
|
244
|
+
|
|
245
|
+
Raix.configure do |config|
|
|
246
|
+
config.before_completion = CostTracker.new
|
|
247
|
+
end
|
|
248
|
+
```
|
|
249
|
+
|
|
108
250
|
### Use of Tools/Functions
|
|
109
251
|
|
|
110
252
|
The second (optional) module that you can add to your Ruby classes after `ChatCompletion` is `FunctionDispatch`. It lets you declare and implement functions to be called at the AI's discretion in a declarative, Rails-like "DSL" fashion.
|
|
@@ -711,49 +853,63 @@ If bundler is not being used to manage dependencies, install the gem by executin
|
|
|
711
853
|
|
|
712
854
|
$ gem install raix
|
|
713
855
|
|
|
714
|
-
|
|
856
|
+
### Configuration
|
|
715
857
|
|
|
716
|
-
|
|
858
|
+
Raix 2.0 uses [RubyLLM](https://github.com/crmne/ruby_llm) as its backend for LLM provider connections. Configure your API keys through RubyLLM:
|
|
717
859
|
|
|
718
860
|
```ruby
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
end
|
|
728
|
-
|
|
729
|
-
Raix.configure do |config|
|
|
730
|
-
config.openrouter_client = OpenRouter::Client.new(access_token: ENV.fetch("OR_ACCESS_TOKEN", nil))
|
|
731
|
-
config.openai_client = OpenAI::Client.new(access_token: ENV.fetch("OAI_ACCESS_TOKEN", nil)) do |f|
|
|
732
|
-
f.request :retry, retry_options
|
|
733
|
-
f.response :logger, Logger.new($stdout), { headers: true, bodies: true, errors: true } do |logger|
|
|
734
|
-
logger.filter(/(Bearer) (\S+)/, '\1[REDACTED]')
|
|
735
|
-
end
|
|
736
|
-
end
|
|
737
|
-
end
|
|
861
|
+
# config/initializers/raix.rb
|
|
862
|
+
RubyLLM.configure do |config|
|
|
863
|
+
config.openrouter_api_key = ENV["OPENROUTER_API_KEY"]
|
|
864
|
+
config.openai_api_key = ENV["OPENAI_API_KEY"]
|
|
865
|
+
# Optional: configure other providers
|
|
866
|
+
# config.anthropic_api_key = ENV["ANTHROPIC_API_KEY"]
|
|
867
|
+
# config.gemini_api_key = ENV["GEMINI_API_KEY"]
|
|
868
|
+
end
|
|
738
869
|
```
|
|
739
870
|
|
|
740
|
-
|
|
871
|
+
Raix will automatically use the appropriate provider based on the model name:
|
|
872
|
+
- Models starting with `gpt-` or `o1` use OpenAI directly
|
|
873
|
+
- All other models route through OpenRouter
|
|
741
874
|
|
|
742
|
-
### Global vs
|
|
875
|
+
### Global vs Class-Level Configuration
|
|
743
876
|
|
|
744
|
-
You can
|
|
745
|
-
same syntax:
|
|
877
|
+
You can configure Raix options globally or at the class level:
|
|
746
878
|
|
|
747
879
|
```ruby
|
|
748
|
-
|
|
880
|
+
# Global configuration
|
|
881
|
+
Raix.configure do |config|
|
|
882
|
+
config.temperature = 0.7
|
|
883
|
+
config.max_tokens = 1000
|
|
884
|
+
config.model = "gpt-4o"
|
|
885
|
+
config.max_tool_calls = 25
|
|
886
|
+
end
|
|
887
|
+
|
|
888
|
+
# Class-level configuration (overrides global)
|
|
889
|
+
class MyAssistant
|
|
749
890
|
include Raix::ChatCompletion
|
|
750
891
|
|
|
751
892
|
configure do |config|
|
|
752
|
-
config.
|
|
893
|
+
config.model = "anthropic/claude-3-opus"
|
|
894
|
+
config.temperature = 0.5
|
|
753
895
|
end
|
|
754
896
|
end
|
|
755
897
|
```
|
|
756
898
|
|
|
899
|
+
### Upgrading from Raix 1.x
|
|
900
|
+
|
|
901
|
+
If upgrading from Raix 1.x, update your configuration from:
|
|
902
|
+
|
|
903
|
+
```ruby
|
|
904
|
+
# Old 1.x configuration
|
|
905
|
+
Raix.configure do |config|
|
|
906
|
+
config.openrouter_client = OpenRouter::Client.new(access_token: "...")
|
|
907
|
+
config.openai_client = OpenAI::Client.new(access_token: "...")
|
|
908
|
+
end
|
|
909
|
+
```
|
|
910
|
+
|
|
911
|
+
To the new RubyLLM-based configuration shown above.
|
|
912
|
+
|
|
757
913
|
## Development
|
|
758
914
|
|
|
759
915
|
After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
|
data/lib/raix/chat_completion.rb
CHANGED
|
@@ -3,10 +3,8 @@
|
|
|
3
3
|
require "active_support/concern"
|
|
4
4
|
require "active_support/core_ext/object/blank"
|
|
5
5
|
require "active_support/core_ext/string/filters"
|
|
6
|
-
require "
|
|
7
|
-
require "
|
|
8
|
-
|
|
9
|
-
require_relative "message_adapters/base"
|
|
6
|
+
require "active_support/core_ext/hash/indifferent_access"
|
|
7
|
+
require "ruby_llm"
|
|
10
8
|
|
|
11
9
|
module Raix
|
|
12
10
|
class UndeclaredToolError < StandardError; end
|
|
@@ -40,10 +38,10 @@ module Raix
|
|
|
40
38
|
module ChatCompletion
|
|
41
39
|
extend ActiveSupport::Concern
|
|
42
40
|
|
|
43
|
-
attr_accessor :cache_at, :frequency_penalty, :logit_bias, :logprobs, :loop, :min_p, :model,
|
|
44
|
-
:prediction, :repetition_penalty, :response_format, :stream, :temperature,
|
|
45
|
-
:max_tokens, :seed, :stop, :top_a, :top_k, :top_logprobs, :top_p, :tools,
|
|
46
|
-
:max_tool_calls, :stop_tool_calls_and_respond
|
|
41
|
+
attr_accessor :before_completion, :cache_at, :frequency_penalty, :logit_bias, :logprobs, :loop, :min_p, :model,
|
|
42
|
+
:presence_penalty, :prediction, :repetition_penalty, :response_format, :stream, :temperature,
|
|
43
|
+
:max_completion_tokens, :max_tokens, :seed, :stop, :top_a, :top_k, :top_logprobs, :top_p, :tools,
|
|
44
|
+
:available_tools, :tool_choice, :provider, :max_tool_calls, :stop_tool_calls_and_respond
|
|
47
45
|
|
|
48
46
|
class_methods do
|
|
49
47
|
# Returns the current configuration of this class. Falls back to global configuration for unset values.
|
|
@@ -142,12 +140,12 @@ module Raix
|
|
|
142
140
|
messages = messages.map { |msg| adapter.transform(msg) }.dup
|
|
143
141
|
raise "Can't complete an empty transcript" if messages.blank?
|
|
144
142
|
|
|
143
|
+
# Run before_completion hooks (global -> class -> instance)
|
|
144
|
+
# Hooks can modify params and messages for logging, filtering, PII redaction, etc.
|
|
145
|
+
run_before_completion_hooks(params, messages)
|
|
146
|
+
|
|
145
147
|
begin
|
|
146
|
-
response =
|
|
147
|
-
openai_request(params:, model: openai, messages:)
|
|
148
|
-
else
|
|
149
|
-
openrouter_request(params:, model:, messages:)
|
|
150
|
-
end
|
|
148
|
+
response = ruby_llm_request(params:, model: openai || model, messages:, openai_override: openai)
|
|
151
149
|
retry_count = 0
|
|
152
150
|
content = nil
|
|
153
151
|
|
|
@@ -155,7 +153,7 @@ module Raix
|
|
|
155
153
|
return if stream && response.blank?
|
|
156
154
|
|
|
157
155
|
# tuck the full response into a thread local in case needed
|
|
158
|
-
Thread.current[:chat_completion_response] = response.with_indifferent_access
|
|
156
|
+
Thread.current[:chat_completion_response] = response.is_a?(Hash) ? response.with_indifferent_access : response
|
|
159
157
|
|
|
160
158
|
# TODO: add a standardized callback hook for usage events
|
|
161
159
|
# broadcast(:usage_event, usage_subject, self.class.name.to_s, response, premium?)
|
|
@@ -171,11 +169,7 @@ module Raix
|
|
|
171
169
|
|
|
172
170
|
# Force a final response without tools
|
|
173
171
|
params[:tools] = nil
|
|
174
|
-
response =
|
|
175
|
-
openai_request(params:, model: openai, messages:)
|
|
176
|
-
else
|
|
177
|
-
openrouter_request(params:, model:, messages:)
|
|
178
|
-
end
|
|
172
|
+
response = ruby_llm_request(params:, model: openai || model, messages:, openai_override: openai)
|
|
179
173
|
|
|
180
174
|
# Process the final response
|
|
181
175
|
content = response.dig("choices", 0, "message", "content")
|
|
@@ -217,11 +211,7 @@ module Raix
|
|
|
217
211
|
elsif @stop_tool_calls_and_respond
|
|
218
212
|
# If stop_tool_calls_and_respond was set, force a final response without tools
|
|
219
213
|
params[:tools] = nil
|
|
220
|
-
response =
|
|
221
|
-
openai_request(params:, model: openai, messages:)
|
|
222
|
-
else
|
|
223
|
-
openrouter_request(params:, model:, messages:)
|
|
224
|
-
end
|
|
214
|
+
response = ruby_llm_request(params:, model: openai || model, messages:, openai_override: openai)
|
|
225
215
|
|
|
226
216
|
content = response.dig("choices", 0, "message", "content")
|
|
227
217
|
transcript << { assistant: content } if save_response
|
|
@@ -279,7 +269,23 @@ module Raix
|
|
|
279
269
|
#
|
|
280
270
|
# @return [Array] The transcript array.
|
|
281
271
|
def transcript
|
|
282
|
-
@transcript ||=
|
|
272
|
+
@transcript ||= TranscriptAdapter.new(ruby_llm_chat)
|
|
273
|
+
end
|
|
274
|
+
|
|
275
|
+
# Returns the RubyLLM::Chat instance for this conversation
|
|
276
|
+
def ruby_llm_chat
|
|
277
|
+
@ruby_llm_chat ||= begin
|
|
278
|
+
model_id = model || configuration.model
|
|
279
|
+
|
|
280
|
+
# Determine provider based on model format or explicit openai flag
|
|
281
|
+
provider = if model_id.to_s.start_with?("openai/") || model_id.to_s.match?(/^gpt-/)
|
|
282
|
+
:openai
|
|
283
|
+
else
|
|
284
|
+
:openrouter
|
|
285
|
+
end
|
|
286
|
+
|
|
287
|
+
RubyLLM.chat(model: model_id, provider:, assume_model_exists: true)
|
|
288
|
+
end
|
|
283
289
|
end
|
|
284
290
|
|
|
285
291
|
# Dispatches a tool function call with the given function name and arguments.
|
|
@@ -307,42 +313,121 @@ module Raix
|
|
|
307
313
|
tools.select { |tool| requested_tools.include?(tool.dig(:function, :name).to_sym) }
|
|
308
314
|
end
|
|
309
315
|
|
|
310
|
-
def
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
end
|
|
316
|
+
def run_before_completion_hooks(params, messages)
|
|
317
|
+
hooks = [
|
|
318
|
+
Raix.configuration.before_completion,
|
|
319
|
+
self.class.configuration.before_completion,
|
|
320
|
+
before_completion
|
|
321
|
+
].compact
|
|
317
322
|
|
|
318
|
-
|
|
319
|
-
params[:stream_options] = { include_usage: true } if params[:stream]
|
|
323
|
+
return if hooks.empty?
|
|
320
324
|
|
|
321
|
-
|
|
325
|
+
context = CompletionContext.new(
|
|
326
|
+
chat_completion: self,
|
|
327
|
+
messages:,
|
|
328
|
+
params:
|
|
329
|
+
)
|
|
322
330
|
|
|
323
|
-
|
|
331
|
+
hooks.each do |hook|
|
|
332
|
+
result = hook.call(context) if hook.respond_to?(:call)
|
|
333
|
+
next unless result.is_a?(Hash)
|
|
334
|
+
|
|
335
|
+
# Handle model separately since it's passed as a keyword arg to ruby_llm_request
|
|
336
|
+
self.model = result[:model] if result.key?(:model)
|
|
337
|
+
params.merge!(result.compact)
|
|
338
|
+
end
|
|
324
339
|
end
|
|
325
340
|
|
|
326
|
-
def
|
|
327
|
-
#
|
|
328
|
-
|
|
341
|
+
def ruby_llm_request(params:, model:, messages:, openai_override: nil)
|
|
342
|
+
# Create a temporary chat instance for this request
|
|
343
|
+
provider = determine_provider(model, openai_override)
|
|
344
|
+
chat = RubyLLM.chat(model:, provider:, assume_model_exists: true)
|
|
345
|
+
|
|
346
|
+
# Apply messages to the chat
|
|
347
|
+
# Track if we have a user message to determine how to call ask
|
|
348
|
+
has_user_message = false
|
|
349
|
+
|
|
350
|
+
messages.each do |msg|
|
|
351
|
+
role = msg[:role] || msg["role"]
|
|
352
|
+
content = msg[:content] || msg["content"]
|
|
353
|
+
|
|
354
|
+
case role.to_s
|
|
355
|
+
when "system"
|
|
356
|
+
chat.with_instructions(content)
|
|
357
|
+
when "user"
|
|
358
|
+
has_user_message = true
|
|
359
|
+
chat.add_message(role: :user, content:)
|
|
360
|
+
when "assistant"
|
|
361
|
+
if msg[:tool_calls] || msg["tool_calls"]
|
|
362
|
+
chat.add_message(role: :assistant, content:, tool_calls: msg[:tool_calls] || msg["tool_calls"])
|
|
363
|
+
else
|
|
364
|
+
chat.add_message(role: :assistant, content:)
|
|
365
|
+
end
|
|
366
|
+
when "tool"
|
|
367
|
+
chat.add_message(
|
|
368
|
+
role: :tool,
|
|
369
|
+
content:,
|
|
370
|
+
tool_call_id: msg[:tool_call_id] || msg["tool_call_id"]
|
|
371
|
+
)
|
|
372
|
+
end
|
|
373
|
+
end
|
|
329
374
|
|
|
330
|
-
|
|
375
|
+
# Apply configuration parameters
|
|
376
|
+
chat.with_temperature(params[:temperature]) if params[:temperature]
|
|
331
377
|
|
|
332
|
-
params
|
|
378
|
+
# Apply additional params (RubyLLM with_params expects keyword args)
|
|
379
|
+
additional_params = params.compact.except(:temperature, :tools, :max_tokens, :max_completion_tokens)
|
|
380
|
+
chat.with_params(**additional_params) if additional_params.any?
|
|
333
381
|
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
retry_count += 1
|
|
340
|
-
sleep 1 * retry_count # backoff
|
|
341
|
-
retry if retry_count < 5
|
|
342
|
-
end
|
|
382
|
+
# Handle tools - convert Raix function declarations to RubyLLM tools
|
|
383
|
+
if params[:tools].present? && respond_to?(:class) && self.class.respond_to?(:functions)
|
|
384
|
+
ruby_llm_tools = FunctionToolAdapter.convert_tools_for_ruby_llm(self)
|
|
385
|
+
ruby_llm_tools.each { |tool| chat.with_tool(tool) }
|
|
386
|
+
end
|
|
343
387
|
|
|
344
|
-
|
|
388
|
+
# Execute the completion
|
|
389
|
+
if stream.present?
|
|
390
|
+
# Streaming mode
|
|
391
|
+
if has_user_message
|
|
392
|
+
chat.complete(&stream)
|
|
393
|
+
else
|
|
394
|
+
chat.ask(&stream)
|
|
395
|
+
end
|
|
396
|
+
nil # Return nil for streaming as per original behavior
|
|
397
|
+
else
|
|
398
|
+
# Non-streaming mode - return OpenAI-compatible response format
|
|
399
|
+
response_message = has_user_message ? chat.complete : chat.ask
|
|
400
|
+
|
|
401
|
+
# Convert RubyLLM response to OpenAI format for compatibility
|
|
402
|
+
{
|
|
403
|
+
"choices" => [
|
|
404
|
+
{
|
|
405
|
+
"message" => {
|
|
406
|
+
"role" => "assistant",
|
|
407
|
+
"content" => response_message.content,
|
|
408
|
+
"tool_calls" => response_message.tool_calls
|
|
409
|
+
},
|
|
410
|
+
"finish_reason" => response_message.tool_call? ? "tool_calls" : "stop"
|
|
411
|
+
}
|
|
412
|
+
],
|
|
413
|
+
"usage" => {
|
|
414
|
+
"prompt_tokens" => response_message.input_tokens,
|
|
415
|
+
"completion_tokens" => response_message.output_tokens,
|
|
416
|
+
"total_tokens" => (response_message.input_tokens || 0) + (response_message.output_tokens || 0)
|
|
417
|
+
}
|
|
418
|
+
}
|
|
345
419
|
end
|
|
420
|
+
rescue StandardError => e
|
|
421
|
+
warn "RubyLLM request failed: #{e.message}"
|
|
422
|
+
raise e
|
|
423
|
+
end
|
|
424
|
+
|
|
425
|
+
def determine_provider(model, openai_override)
|
|
426
|
+
return :openai if openai_override
|
|
427
|
+
return :openai if model.to_s.match?(/^gpt-/) || model.to_s.match?(/^o\d/)
|
|
428
|
+
|
|
429
|
+
# Default to openrouter for model IDs with provider prefix
|
|
430
|
+
:openrouter
|
|
346
431
|
end
|
|
347
432
|
end
|
|
348
433
|
end
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Raix
|
|
4
|
+
# Context object passed to before_completion hooks.
|
|
5
|
+
# Provides access to the chat completion instance, messages, and request parameters.
|
|
6
|
+
# Messages can be mutated for content filtering, PII redaction, etc.
|
|
7
|
+
class CompletionContext
|
|
8
|
+
attr_reader :chat_completion, :messages, :params
|
|
9
|
+
|
|
10
|
+
def initialize(chat_completion:, messages:, params:)
|
|
11
|
+
@chat_completion = chat_completion
|
|
12
|
+
@messages = messages # mutable - hooks can modify for filtering, redaction, etc.
|
|
13
|
+
@params = params # mutable - hooks can modify parameters
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
# Convenience accessor for the transcript
|
|
17
|
+
def transcript
|
|
18
|
+
chat_completion.transcript
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
# Get the currently configured model
|
|
22
|
+
def current_model
|
|
23
|
+
chat_completion.model || chat_completion.configuration.model
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
# Get the class that includes ChatCompletion
|
|
27
|
+
def chat_completion_class
|
|
28
|
+
chat_completion.class
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
# Get the current configuration
|
|
32
|
+
def configuration
|
|
33
|
+
chat_completion.configuration
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
end
|
data/lib/raix/configuration.rb
CHANGED
|
@@ -30,16 +30,24 @@ module Raix
|
|
|
30
30
|
# is normally set in each class that includes the ChatCompletion module.
|
|
31
31
|
attr_accessor_with_fallback :model
|
|
32
32
|
|
|
33
|
-
#
|
|
33
|
+
# DEPRECATED: Use ruby_llm_config.openrouter_api_key instead
|
|
34
34
|
attr_accessor_with_fallback :openrouter_client
|
|
35
35
|
|
|
36
|
-
#
|
|
36
|
+
# DEPRECATED: Use ruby_llm_config.openai_api_key instead
|
|
37
37
|
attr_accessor_with_fallback :openai_client
|
|
38
38
|
|
|
39
39
|
# The max_tool_calls option determines the maximum number of tool calls
|
|
40
40
|
# before forcing a text response to prevent excessive function invocations.
|
|
41
41
|
attr_accessor_with_fallback :max_tool_calls
|
|
42
42
|
|
|
43
|
+
# Access to RubyLLM configuration
|
|
44
|
+
attr_accessor_with_fallback :ruby_llm_config
|
|
45
|
+
|
|
46
|
+
# A callable hook that runs before each chat completion request.
|
|
47
|
+
# Receives a CompletionContext and can modify params and messages.
|
|
48
|
+
# Use for: dynamic parameter resolution, logging, content filtering, PII redaction, etc.
|
|
49
|
+
attr_accessor_with_fallback :before_completion
|
|
50
|
+
|
|
43
51
|
DEFAULT_MAX_TOKENS = 1000
|
|
44
52
|
DEFAULT_MAX_COMPLETION_TOKENS = 16_384
|
|
45
53
|
DEFAULT_MODEL = "meta-llama/llama-3.3-8b-instruct:free"
|
|
@@ -53,11 +61,18 @@ module Raix
|
|
|
53
61
|
self.max_tokens = DEFAULT_MAX_TOKENS
|
|
54
62
|
self.model = DEFAULT_MODEL
|
|
55
63
|
self.max_tool_calls = DEFAULT_MAX_TOOL_CALLS
|
|
64
|
+
self.ruby_llm_config = RubyLLM.config
|
|
56
65
|
self.fallback = fallback
|
|
57
66
|
end
|
|
58
67
|
|
|
59
68
|
def client?
|
|
60
|
-
|
|
69
|
+
# Support legacy openrouter_client/openai_client or new RubyLLM config
|
|
70
|
+
!!(openrouter_client || openai_client || ruby_llm_configured?)
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
def ruby_llm_configured?
|
|
74
|
+
ruby_llm_config&.openai_api_key || ruby_llm_config&.openrouter_api_key ||
|
|
75
|
+
ruby_llm_config&.anthropic_api_key || ruby_llm_config&.gemini_api_key
|
|
61
76
|
end
|
|
62
77
|
|
|
63
78
|
private
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Raix
|
|
4
|
+
# Adapter to convert Raix function declarations to RubyLLM::Tool instances
|
|
5
|
+
class FunctionToolAdapter
|
|
6
|
+
def self.create_tool_from_function(function_def, instance)
|
|
7
|
+
tool_class = Class.new(RubyLLM::Tool) do
|
|
8
|
+
description function_def[:description] if function_def[:description]
|
|
9
|
+
|
|
10
|
+
# Define parameters based on function definition
|
|
11
|
+
function_def[:parameters][:properties]&.each do |param_name, param_def|
|
|
12
|
+
required = function_def[:parameters][:required]&.include?(param_name)
|
|
13
|
+
param param_name.to_sym, type: param_def[:type], desc: param_def[:description], required:
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
# Store reference to the instance and function name
|
|
17
|
+
define_method(:raix_instance) { instance }
|
|
18
|
+
define_method(:raix_function_name) { function_def[:name] }
|
|
19
|
+
|
|
20
|
+
# Override execute to call the Raix function
|
|
21
|
+
define_method(:execute) do |**args|
|
|
22
|
+
raix_instance.public_send(raix_function_name, args.with_indifferent_access, nil)
|
|
23
|
+
end
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
# Set a meaningful name for the tool class
|
|
27
|
+
tool_class.define_singleton_method(:name) do
|
|
28
|
+
"Raix::GeneratedTool::#{function_def[:name].to_s.camelize}"
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
tool_instance = tool_class.new
|
|
32
|
+
|
|
33
|
+
# Override the name method to return the original function name
|
|
34
|
+
# This ensures RubyLLM can match the tool call from the AI
|
|
35
|
+
tool_instance.define_singleton_method(:name) do
|
|
36
|
+
function_def[:name].to_s
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
tool_instance
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
def self.convert_tools_for_ruby_llm(raix_instance)
|
|
43
|
+
return [] unless raix_instance.class.respond_to?(:functions)
|
|
44
|
+
return [] if raix_instance.class.functions.blank?
|
|
45
|
+
|
|
46
|
+
raix_instance.class.functions.map do |function_def|
|
|
47
|
+
create_tool_from_function(function_def, raix_instance)
|
|
48
|
+
end
|
|
49
|
+
end
|
|
50
|
+
end
|
|
51
|
+
end
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
require_relative "tool"
|
|
2
1
|
require "json"
|
|
3
2
|
require "securerandom"
|
|
4
3
|
require "faraday"
|
|
@@ -138,7 +137,7 @@ module Raix
|
|
|
138
137
|
# Process SSE buffer for complete events
|
|
139
138
|
def process_sse_buffer
|
|
140
139
|
while (idx = @buffer.index("\n\n"))
|
|
141
|
-
event_text = @buffer.slice!(0..idx + 1)
|
|
140
|
+
event_text = @buffer.slice!(0..(idx + 1))
|
|
142
141
|
event_type, event_data = parse_sse_fields(event_text)
|
|
143
142
|
|
|
144
143
|
case event_type
|
data/lib/raix/mcp.rb
CHANGED
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Raix
|
|
4
|
+
# Adapter to convert between Raix's transcript array format and RubyLLM's Message objects
|
|
5
|
+
class TranscriptAdapter
|
|
6
|
+
attr_reader :ruby_llm_chat
|
|
7
|
+
|
|
8
|
+
def initialize(ruby_llm_chat)
|
|
9
|
+
@ruby_llm_chat = ruby_llm_chat
|
|
10
|
+
@pending_messages = []
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
# Add a message in Raix format (hash) to the transcript
|
|
14
|
+
def <<(message_hash)
|
|
15
|
+
case message_hash
|
|
16
|
+
when Array
|
|
17
|
+
# Handle nested arrays (from function dispatch)
|
|
18
|
+
message_hash.each { |msg| self << msg }
|
|
19
|
+
when Hash
|
|
20
|
+
add_message_from_hash(message_hash)
|
|
21
|
+
end
|
|
22
|
+
self
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
# Return all messages in Raix-compatible format
|
|
26
|
+
def flatten
|
|
27
|
+
ruby_llm_messages = @ruby_llm_chat.messages.map { |msg| message_to_raix_format(msg) }
|
|
28
|
+
pending = @pending_messages.map { |msg| normalize_message_format(msg) }
|
|
29
|
+
(ruby_llm_messages + pending).flatten
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
# Get all messages including pending ones
|
|
33
|
+
def to_a
|
|
34
|
+
flatten
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
# Allow iteration
|
|
38
|
+
def compact
|
|
39
|
+
flatten.compact
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
# Clear all messages
|
|
43
|
+
def clear
|
|
44
|
+
@ruby_llm_chat.reset_messages!
|
|
45
|
+
@pending_messages.clear
|
|
46
|
+
self
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
# Get last message
|
|
50
|
+
def last
|
|
51
|
+
flatten.last
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
# Get size of transcript
|
|
55
|
+
def size
|
|
56
|
+
flatten.size
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
alias length size
|
|
60
|
+
|
|
61
|
+
private
|
|
62
|
+
|
|
63
|
+
def add_message_from_hash(hash)
|
|
64
|
+
# Raix abbreviated format: { system: "text" }, { user: "text" }, { assistant: "text" }
|
|
65
|
+
if hash.key?(:system) || hash.key?("system")
|
|
66
|
+
content = hash[:system] || hash["system"]
|
|
67
|
+
@ruby_llm_chat.with_instructions(content)
|
|
68
|
+
@pending_messages << { role: "system", content: }
|
|
69
|
+
elsif hash.key?(:user) || hash.key?("user")
|
|
70
|
+
content = hash[:user] || hash["user"]
|
|
71
|
+
# Don't add to ruby_llm_chat yet - wait for chat_completion call
|
|
72
|
+
@pending_messages << { role: "user", content: }
|
|
73
|
+
elsif hash.key?(:assistant) || hash.key?("assistant")
|
|
74
|
+
content = hash[:assistant] || hash["assistant"]
|
|
75
|
+
@pending_messages << { role: "assistant", content: }
|
|
76
|
+
elsif hash[:role] || hash["role"]
|
|
77
|
+
# Standard OpenAI format (tool messages, assistant with tool_calls, etc.)
|
|
78
|
+
@pending_messages << hash.with_indifferent_access
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
def message_to_raix_format(message)
|
|
83
|
+
# Return in Raix abbreviated format { system: "...", user: "...", assistant: "..." }
|
|
84
|
+
# unless it's a tool message which needs full format
|
|
85
|
+
if message.tool_call? || message.tool_result?
|
|
86
|
+
result = {
|
|
87
|
+
role: message.role.to_s,
|
|
88
|
+
content: message.content
|
|
89
|
+
}
|
|
90
|
+
result[:tool_calls] = message.tool_calls if message.tool_call?
|
|
91
|
+
result[:tool_call_id] = message.tool_call_id if message.tool_result?
|
|
92
|
+
result
|
|
93
|
+
else
|
|
94
|
+
# Use abbreviated format
|
|
95
|
+
{ message.role.to_sym => message.content }
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
def normalize_message_format(msg)
|
|
100
|
+
# If already in abbreviated format, return as-is
|
|
101
|
+
return msg if msg.key?(:system) || msg.key?(:user) || msg.key?(:assistant)
|
|
102
|
+
return msg if msg["system"] || msg["user"] || msg["assistant"]
|
|
103
|
+
|
|
104
|
+
# If in standard format with role/content, convert to abbreviated
|
|
105
|
+
if msg[:role] || msg["role"]
|
|
106
|
+
role = (msg[:role] || msg["role"]).to_sym
|
|
107
|
+
content = msg[:content] || msg["content"]
|
|
108
|
+
|
|
109
|
+
# Tool messages stay in full format
|
|
110
|
+
if msg[:tool_calls] || msg["tool_calls"] || msg[:tool_call_id] || msg["tool_call_id"]
|
|
111
|
+
return msg
|
|
112
|
+
end
|
|
113
|
+
|
|
114
|
+
# Convert to abbreviated format
|
|
115
|
+
{ role => content }
|
|
116
|
+
else
|
|
117
|
+
msg
|
|
118
|
+
end
|
|
119
|
+
end
|
|
120
|
+
end
|
|
121
|
+
end
|
data/lib/raix/version.rb
CHANGED
data/lib/raix.rb
CHANGED
|
@@ -1,15 +1,9 @@
|
|
|
1
1
|
# frozen_string_literal: true
|
|
2
2
|
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
require_relative "raix/chat_completion"
|
|
6
|
-
require_relative "raix/function_dispatch"
|
|
7
|
-
require_relative "raix/prompt_declarations"
|
|
8
|
-
require_relative "raix/predicate"
|
|
9
|
-
require_relative "raix/response_format"
|
|
10
|
-
require_relative "raix/mcp"
|
|
3
|
+
require "ruby_llm"
|
|
4
|
+
require "zeitwerk"
|
|
11
5
|
|
|
12
|
-
#
|
|
6
|
+
# Ruby AI eXtensions
|
|
13
7
|
module Raix
|
|
14
8
|
class << self
|
|
15
9
|
attr_writer :configuration
|
|
@@ -25,3 +19,7 @@ module Raix
|
|
|
25
19
|
yield(configuration)
|
|
26
20
|
end
|
|
27
21
|
end
|
|
22
|
+
|
|
23
|
+
loader = Zeitwerk::Loader.for_gem
|
|
24
|
+
loader.inflector.inflect("mcp" => "MCP")
|
|
25
|
+
loader.setup
|
data/raix.gemspec
CHANGED
|
@@ -30,7 +30,7 @@ Gem::Specification.new do |spec|
|
|
|
30
30
|
|
|
31
31
|
spec.add_dependency "activesupport", ">= 6.0"
|
|
32
32
|
spec.add_dependency "faraday-retry", "~> 2.0"
|
|
33
|
-
spec.add_dependency "open_router", "~> 0.2"
|
|
34
33
|
spec.add_dependency "ostruct"
|
|
35
|
-
spec.add_dependency "
|
|
34
|
+
spec.add_dependency "ruby_llm", "~> 1.9"
|
|
35
|
+
spec.add_dependency "zeitwerk", "~> 2.7"
|
|
36
36
|
end
|
metadata
CHANGED
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: raix
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version:
|
|
4
|
+
version: 2.0.1
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Obie Fernandez
|
|
8
8
|
bindir: exe
|
|
9
9
|
cert_chain: []
|
|
10
|
-
date:
|
|
10
|
+
date: 2026-03-20 00:00:00.000000000 Z
|
|
11
11
|
dependencies:
|
|
12
12
|
- !ruby/object:Gem::Dependency
|
|
13
13
|
name: activesupport
|
|
@@ -38,47 +38,47 @@ dependencies:
|
|
|
38
38
|
- !ruby/object:Gem::Version
|
|
39
39
|
version: '2.0'
|
|
40
40
|
- !ruby/object:Gem::Dependency
|
|
41
|
-
name:
|
|
41
|
+
name: ostruct
|
|
42
42
|
requirement: !ruby/object:Gem::Requirement
|
|
43
43
|
requirements:
|
|
44
|
-
- - "
|
|
44
|
+
- - ">="
|
|
45
45
|
- !ruby/object:Gem::Version
|
|
46
|
-
version: '0
|
|
46
|
+
version: '0'
|
|
47
47
|
type: :runtime
|
|
48
48
|
prerelease: false
|
|
49
49
|
version_requirements: !ruby/object:Gem::Requirement
|
|
50
50
|
requirements:
|
|
51
|
-
- - "
|
|
51
|
+
- - ">="
|
|
52
52
|
- !ruby/object:Gem::Version
|
|
53
|
-
version: '0
|
|
53
|
+
version: '0'
|
|
54
54
|
- !ruby/object:Gem::Dependency
|
|
55
|
-
name:
|
|
55
|
+
name: ruby_llm
|
|
56
56
|
requirement: !ruby/object:Gem::Requirement
|
|
57
57
|
requirements:
|
|
58
|
-
- - "
|
|
58
|
+
- - "~>"
|
|
59
59
|
- !ruby/object:Gem::Version
|
|
60
|
-
version: '
|
|
60
|
+
version: '1.9'
|
|
61
61
|
type: :runtime
|
|
62
62
|
prerelease: false
|
|
63
63
|
version_requirements: !ruby/object:Gem::Requirement
|
|
64
64
|
requirements:
|
|
65
|
-
- - "
|
|
65
|
+
- - "~>"
|
|
66
66
|
- !ruby/object:Gem::Version
|
|
67
|
-
version: '
|
|
67
|
+
version: '1.9'
|
|
68
68
|
- !ruby/object:Gem::Dependency
|
|
69
|
-
name:
|
|
69
|
+
name: zeitwerk
|
|
70
70
|
requirement: !ruby/object:Gem::Requirement
|
|
71
71
|
requirements:
|
|
72
72
|
- - "~>"
|
|
73
73
|
- !ruby/object:Gem::Version
|
|
74
|
-
version: '
|
|
74
|
+
version: '2.7'
|
|
75
75
|
type: :runtime
|
|
76
76
|
prerelease: false
|
|
77
77
|
version_requirements: !ruby/object:Gem::Requirement
|
|
78
78
|
requirements:
|
|
79
79
|
- - "~>"
|
|
80
80
|
- !ruby/object:Gem::Version
|
|
81
|
-
version: '
|
|
81
|
+
version: '2.7'
|
|
82
82
|
email:
|
|
83
83
|
- obiefernandez@gmail.com
|
|
84
84
|
executables: []
|
|
@@ -98,18 +98,21 @@ files:
|
|
|
98
98
|
- README.llm
|
|
99
99
|
- README.md
|
|
100
100
|
- Rakefile
|
|
101
|
-
- lib/mcp/sse_client.rb
|
|
102
|
-
- lib/mcp/stdio_client.rb
|
|
103
|
-
- lib/mcp/tool.rb
|
|
104
101
|
- lib/raix.rb
|
|
105
102
|
- lib/raix/chat_completion.rb
|
|
103
|
+
- lib/raix/completion_context.rb
|
|
106
104
|
- lib/raix/configuration.rb
|
|
107
105
|
- lib/raix/function_dispatch.rb
|
|
106
|
+
- lib/raix/function_tool_adapter.rb
|
|
108
107
|
- lib/raix/mcp.rb
|
|
108
|
+
- lib/raix/mcp/sse_client.rb
|
|
109
|
+
- lib/raix/mcp/stdio_client.rb
|
|
110
|
+
- lib/raix/mcp/tool.rb
|
|
109
111
|
- lib/raix/message_adapters/base.rb
|
|
110
112
|
- lib/raix/predicate.rb
|
|
111
113
|
- lib/raix/prompt_declarations.rb
|
|
112
114
|
- lib/raix/response_format.rb
|
|
115
|
+
- lib/raix/transcript_adapter.rb
|
|
113
116
|
- lib/raix/version.rb
|
|
114
117
|
- raix.gemspec
|
|
115
118
|
- sig/raix.rbs
|
|
File without changes
|