ruby_llm 1.3.2beta1 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +13 -14
  3. data/lib/generators/ruby_llm/install/templates/INSTALL_INFO.md.tt +108 -0
  4. data/lib/generators/ruby_llm/install/templates/chat_model.rb.tt +3 -0
  5. data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +8 -0
  6. data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +15 -0
  7. data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +14 -0
  8. data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +6 -0
  9. data/lib/generators/ruby_llm/install/templates/message_model.rb.tt +3 -0
  10. data/lib/generators/ruby_llm/install/templates/tool_call_model.rb.tt +3 -0
  11. data/lib/generators/ruby_llm/install_generator.rb +121 -0
  12. data/lib/ruby_llm/active_record/acts_as.rb +23 -5
  13. data/lib/ruby_llm/aliases.json +6 -21
  14. data/lib/ruby_llm/chat.rb +46 -3
  15. data/lib/ruby_llm/configuration.rb +2 -0
  16. data/lib/ruby_llm/error.rb +1 -0
  17. data/lib/ruby_llm/message.rb +3 -1
  18. data/lib/ruby_llm/models.json +1942 -1849
  19. data/lib/ruby_llm/provider.rb +12 -6
  20. data/lib/ruby_llm/providers/anthropic/chat.rb +13 -12
  21. data/lib/ruby_llm/providers/anthropic/media.rb +2 -0
  22. data/lib/ruby_llm/providers/anthropic/tools.rb +23 -13
  23. data/lib/ruby_llm/providers/bedrock/chat.rb +4 -5
  24. data/lib/ruby_llm/providers/bedrock/media.rb +2 -0
  25. data/lib/ruby_llm/providers/bedrock/streaming/base.rb +2 -2
  26. data/lib/ruby_llm/providers/gemini/chat.rb +37 -2
  27. data/lib/ruby_llm/providers/gemini/media.rb +2 -0
  28. data/lib/ruby_llm/providers/gpustack/chat.rb +17 -0
  29. data/lib/ruby_llm/providers/gpustack/models.rb +55 -0
  30. data/lib/ruby_llm/providers/gpustack.rb +36 -0
  31. data/lib/ruby_llm/providers/ollama/media.rb +2 -0
  32. data/lib/ruby_llm/providers/openai/chat.rb +17 -2
  33. data/lib/ruby_llm/providers/openai/media.rb +2 -0
  34. data/lib/ruby_llm/providers/openai/streaming.rb +14 -0
  35. data/lib/ruby_llm/railtie.rb +5 -0
  36. data/lib/ruby_llm/stream_accumulator.rb +3 -2
  37. data/lib/ruby_llm/streaming.rb +25 -7
  38. data/lib/ruby_llm/utils.rb +10 -0
  39. data/lib/ruby_llm/version.rb +1 -1
  40. data/lib/ruby_llm.rb +3 -0
  41. data/lib/tasks/models_docs.rake +2 -1
  42. metadata +13 -1
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: b069393971ae461a63487ba8a13d1f81a791d96fc0f537fceb02de88d9e690d0
4
- data.tar.gz: c34f0884ffd9cffd3c0311ebbe8d7fbacc151b17512984b999dace24fa0bfe74
3
+ metadata.gz: cb8de9c3cea5895389517529bcf2a69c9c2f02aadbc6cc299fae6192e7776135
4
+ data.tar.gz: da3fabd00fbe863009d04d88c0092f03c90d4ab4bea1ec9c727048b479e9b0dd
5
5
  SHA512:
6
- metadata.gz: 5ba83dd19febb3070e0f127eb6dc87fd92664d8afae2f4024240588fdc8624dabdcc5c2b089e5147d22601c3505b966f4bed7383fbf6e36ce8dff5bea568ef87
7
- data.tar.gz: b7940d1a6b8bf72ba08eb9d63440505935022d372ffdab6aac51599fb23a9e68af49085cb965488f7306d28143f7b39ec9c0138a93ba52ad3ca3ac574e03a442
6
+ metadata.gz: 23ef0c8b9b4afa983526fa3327d06432bec87dbb0f002a726b9ae44c0d65c390416ac46208a93d5956d3ec8539505c72e3fffc44ca5907faadc14e444d6910d9
7
+ data.tar.gz: 31be38b7ff4ad1dad3f49e2eb8a27d4027f0970d2da3dcdce55a132887bf00459f7c92fd8271c73c70ff844f3aab9ba8edf19f6b0dd030977fe6447ed8a6df2b
data/README.md CHANGED
@@ -12,6 +12,8 @@
12
12
  <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/deepseek-text.svg" alt="DeepSeek" class="logo-small">
13
13
  &nbsp;
14
14
  <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/gemini-brand-color.svg" alt="Gemini" class="logo-large">
15
+ <br>
16
+ <img src="https://raw.githubusercontent.com/gpustack/gpustack/main/docs/assets/gpustack-logo.png" alt="GPUStack" class="logo-medium" height="16">
15
17
  &nbsp;
16
18
  <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/ollama.svg" alt="Ollama" class="logo-medium">
17
19
  <img src="https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/ollama-text.svg" alt="Ollama" class="logo-medium">
@@ -121,34 +123,31 @@ See the [Installation Guide](https://rubyllm.com/installation) for full details.
121
123
 
122
124
  Add persistence to your chat models effortlessly:
123
125
 
126
+ ```bash
127
+ # Generate models and migrations (available in v1.4.0)
128
+ rails generate ruby_llm:install
129
+ ```
130
+
124
131
  ```ruby
125
- # app/models/chat.rb
132
+ # Or add to existing models
126
133
  class Chat < ApplicationRecord
127
134
  acts_as_chat # Automatically saves messages & tool calls
128
- # ... your other model logic ...
129
135
  end
130
136
 
131
- # app/models/message.rb
132
137
  class Message < ApplicationRecord
133
138
  acts_as_message
134
- # ...
135
139
  end
136
140
 
137
- # app/models/tool_call.rb (if using tools)
138
141
  class ToolCall < ApplicationRecord
139
142
  acts_as_tool_call
140
- # ...
141
143
  end
142
144
 
143
- # Now interacting with a Chat record persists the conversation:
144
- chat_record = Chat.create!(model_id: "gpt-4.1-nano")
145
- chat_record.ask("Explain Active Record callbacks.") # User & Assistant messages saved
146
-
147
- # Works seamlessly with file attachments - types automatically detected
148
- chat_record.ask("What's in this file?", with: "report.pdf")
149
- chat_record.ask("Analyze these", with: ["image.jpg", "data.csv", "notes.txt"])
145
+ # Now chats persist automatically
146
+ chat = Chat.create!(model_id: "gpt-4.1-nano")
147
+ chat.ask("What's in this file?", with: "report.pdf")
150
148
  ```
151
- Check the [Rails Integration Guide](https://rubyllm.com/guides/rails) for more.
149
+
150
+ See the [Rails Integration Guide](https://rubyllm.com/guides/rails) for details.
152
151
 
153
152
  ## Learn More
154
153
 
@@ -0,0 +1,108 @@
1
+ # RubyLLM Rails Setup Complete!
2
+
3
+ Thanks for installing RubyLLM in your Rails application. Here's what was created:
4
+
5
+ ## Models
6
+
7
+ - `<%= options[:chat_model_name] %>` - Stores chat sessions and their associated model ID
8
+ - `<%= options[:message_model_name] %>` - Stores individual messages in a chat
9
+ - `<%= options[:tool_call_model_name] %>` - Stores tool calls made by language models
10
+
11
+ **Note:** Do not add `validates :content, presence: true` to your Message model - RubyLLM creates empty assistant messages before API calls for streaming support.
12
+
13
+ ## Configuration Options
14
+
15
+ The generator supports the following options to customize model names:
16
+
17
+ ```bash
18
+ rails generate ruby_llm:install \
19
+ --chat-model-name=Conversation \
20
+ --message-model-name=ChatMessage \
21
+ --tool-call-model-name=FunctionCall
22
+ ```
23
+
24
+ This is useful when you need to avoid namespace collisions with existing models in your application. Table names will be automatically derived from the model names following Rails conventions.
25
+
26
+ ## Next Steps
27
+
28
+ 1. **Run migrations:**
29
+ ```bash
30
+ rails db:migrate
31
+ ```
32
+
33
+ **Database Note:** The migrations use `jsonb` for PostgreSQL and `json` for MySQL/SQLite automatically.
34
+
35
+ 2. **Set your API keys** in `config/initializers/ruby_llm.rb` or using environment variables:
36
+ ```ruby
37
+ # config/initializers/ruby_llm.rb
38
+ RubyLLM.configure do |config|
39
+ config.openai_api_key = ENV['OPENAI_API_KEY']
40
+ config.anthropic_api_key = ENV['ANTHROPIC_API_KEY']
41
+ config.gemini_api_key = ENV['GEMINI_API_KEY']
42
+ # ... add other providers as needed
43
+ end
44
+ ```
45
+
46
+ 3. **Start using RubyLLM in your code:**
47
+ ```ruby
48
+ # Basic usage
49
+ chat = <%= options[:chat_model_name] %>.create!(model_id: 'gpt-4.1-nano')
50
+ response = chat.ask("What is Ruby on Rails?")
51
+
52
+ # With file attachments (requires ActiveStorage setup)
53
+ chat.ask("What's in this file?", with: "report.pdf")
54
+ chat.ask("Analyze these files", with: ["image.jpg", "data.csv", "notes.txt"])
55
+ ```
56
+
57
+ 4. **For streaming responses** with Hotwire/Turbo:
58
+ ```ruby
59
+ # app/models/<%= options[:message_model_name].underscore %>.rb
60
+ class <%= options[:message_model_name] %> < ApplicationRecord
61
+ acts_as_message
62
+
63
+ # Helper to broadcast chunks during streaming
64
+ def broadcast_append_chunk(chunk_content)
65
+ broadcast_append_to [ chat, "messages" ],
66
+ target: dom_id(self, "content"),
67
+ html: chunk_content
68
+ end
69
+ end
70
+
71
+ # app/jobs/chat_stream_job.rb
72
+ class ChatStreamJob < ApplicationJob
73
+ def perform(chat_id, user_content)
74
+ chat = <%= options[:chat_model_name] %>.find(chat_id)
75
+ chat.ask(user_content) do |chunk|
76
+ assistant_message = chat.messages.last
77
+ if chunk.content && assistant_message
78
+ assistant_message.broadcast_append_chunk(chunk.content)
79
+ end
80
+ end
81
+ end
82
+ end
83
+
84
+ # In your controller
85
+ ChatStreamJob.perform_later(@chat.id, params[:content])
86
+ ```
87
+
88
+ ## Optional: ActiveStorage for Attachments
89
+
90
+ If you want to use file attachments (PDFs, images, etc.), set up ActiveStorage:
91
+
92
+ ```bash
93
+ rails active_storage:install
94
+ rails db:migrate
95
+ ```
96
+
97
+ Then add to your Message model:
98
+ ```ruby
99
+ class <%= options[:message_model_name] %> < ApplicationRecord
100
+ acts_as_message
101
+ has_many_attached :attachments
102
+ end
103
+ ```
104
+
105
+ ## Learn More
106
+
107
+ - See the [Rails Integration Guide](https://rubyllm.com/guides/rails) for detailed examples
108
+ - Visit the [RubyLLM Documentation](https://rubyllm.com) for full API reference
@@ -0,0 +1,3 @@
1
+ class <%= options[:chat_model_name] %> < ApplicationRecord
2
+ <%= acts_as_chat_declaration %>
3
+ end
@@ -0,0 +1,8 @@
1
+ class Create<%= options[:chat_model_name].pluralize %> < ActiveRecord::Migration<%= migration_version %>
2
+ def change
3
+ create_table :<%= options[:chat_model_name].tableize %> do |t|
4
+ t.string :model_id
5
+ t.timestamps
6
+ end
7
+ end
8
+ end
@@ -0,0 +1,15 @@
1
+ # Migration for creating messages table with references to chats and tool_calls
2
+ class Create<%= options[:message_model_name].pluralize %> < ActiveRecord::Migration<%= migration_version %>
3
+ def change
4
+ create_table :<%= options[:message_model_name].tableize %> do |t|
5
+ t.references :<%= options[:chat_model_name].tableize.singularize %>, null: false, foreign_key: true
6
+ t.string :role
7
+ t.text :content
8
+ t.string :model_id
9
+ t.integer :input_tokens
10
+ t.integer :output_tokens
11
+ t.references :<%= options[:tool_call_model_name].tableize.singularize %>
12
+ t.timestamps
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,14 @@
1
+ <%#- # Migration for creating tool_calls table with database-specific JSON handling -%>
2
+ class Create<%= options[:tool_call_model_name].pluralize %> < ActiveRecord::Migration<%= migration_version %>
3
+ def change
4
+ create_table :<%= options[:tool_call_model_name].tableize %> do |t|
5
+ t.references :<%= options[:message_model_name].tableize.singularize %>, null: false, foreign_key: true
6
+ t.string :tool_call_id, null: false
7
+ t.string :name, null: false
8
+ t.<%= postgresql? ? 'jsonb' : 'json' %> :arguments, default: {}
9
+ t.timestamps
10
+ end
11
+
12
+ add_index :<%= options[:tool_call_model_name].tableize %>, :tool_call_id
13
+ end
14
+ end
@@ -0,0 +1,6 @@
1
+ RubyLLM.configure do |config|
2
+ config.openai_api_key = ENV["OPENAI_API_KEY"]
3
+ config.anthropic_api_key = ENV["ANTHROPIC_API_KEY"]
4
+
5
+ # config.default_model = "gpt-4.1-nano"
6
+ end
@@ -0,0 +1,3 @@
1
+ class <%= options[:message_model_name] %> < ApplicationRecord
2
+ <%= acts_as_message_declaration %>
3
+ end
@@ -0,0 +1,3 @@
1
+ class <%= options[:tool_call_model_name] %> < ApplicationRecord
2
+ <%= acts_as_tool_call_declaration %>
3
+ end
@@ -0,0 +1,121 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'rails/generators'
4
+ require 'rails/generators/active_record'
5
+
6
+ module RubyLLM
7
+ # Generator for RubyLLM Rails models and migrations
8
+ class InstallGenerator < Rails::Generators::Base
9
+ include Rails::Generators::Migration
10
+
11
+ namespace 'ruby_llm:install'
12
+
13
+ source_root File.expand_path('install/templates', __dir__)
14
+
15
+ class_option :chat_model_name, type: :string, default: 'Chat',
16
+ desc: 'Name of the Chat model class'
17
+ class_option :message_model_name, type: :string, default: 'Message',
18
+ desc: 'Name of the Message model class'
19
+ class_option :tool_call_model_name, type: :string, default: 'ToolCall',
20
+ desc: 'Name of the ToolCall model class'
21
+
22
+ desc 'Creates model files for Chat, Message, and ToolCall, and creates migrations for RubyLLM Rails integration'
23
+
24
+ def self.next_migration_number(dirname)
25
+ ::ActiveRecord::Generators::Base.next_migration_number(dirname)
26
+ end
27
+
28
+ def migration_version
29
+ "[#{Rails::VERSION::MAJOR}.#{Rails::VERSION::MINOR}]"
30
+ end
31
+
32
+ def postgresql?
33
+ ActiveRecord::Base.connection.adapter_name.downcase.include?('postgresql')
34
+ rescue StandardError
35
+ false
36
+ end
37
+
38
+ def acts_as_chat_declaration
39
+ acts_as_chat_params = []
40
+ if options[:message_model_name] != 'Message'
41
+ acts_as_chat_params << "message_class: \"#{options[:message_model_name]}\""
42
+ end
43
+ if options[:tool_call_model_name] != 'ToolCall'
44
+ acts_as_chat_params << "tool_call_class: \"#{options[:tool_call_model_name]}\""
45
+ end
46
+ if acts_as_chat_params.any?
47
+ "acts_as_chat #{acts_as_chat_params.join(', ')}"
48
+ else
49
+ 'acts_as_chat'
50
+ end
51
+ end
52
+
53
+ def acts_as_message_declaration
54
+ acts_as_message_params = []
55
+ acts_as_message_params << "chat_class: \"#{options[:chat_model_name]}\"" if options[:chat_model_name] != 'Chat'
56
+ if options[:tool_call_model_name] != 'ToolCall'
57
+ acts_as_message_params << "tool_call_class: \"#{options[:tool_call_model_name]}\""
58
+ end
59
+ if acts_as_message_params.any?
60
+ "acts_as_message #{acts_as_message_params.join(', ')}"
61
+ else
62
+ 'acts_as_message'
63
+ end
64
+ end
65
+
66
+ def acts_as_tool_call_declaration
67
+ acts_as_tool_call_params = []
68
+ if options[:message_model_name] != 'Message'
69
+ acts_as_tool_call_params << "message_class: \"#{options[:message_model_name]}\""
70
+ end
71
+ if acts_as_tool_call_params.any?
72
+ "acts_as_tool_call #{acts_as_tool_call_params.join(', ')}"
73
+ else
74
+ 'acts_as_tool_call'
75
+ end
76
+ end
77
+
78
+ def create_migration_files
79
+ # Create migrations with timestamps to ensure proper order
80
+ # First create chats table
81
+ migration_template 'create_chats_migration.rb.tt',
82
+ "db/migrate/create_#{options[:chat_model_name].tableize}.rb"
83
+
84
+ # Then create tool_calls table
85
+ sleep 1 # Ensure different timestamp
86
+ migration_template 'create_tool_calls_migration.rb.tt',
87
+ "db/migrate/create_#{options[:tool_call_model_name].tableize}.rb"
88
+
89
+ # Finally create messages table
90
+ sleep 1 # Ensure different timestamp
91
+ migration_template 'create_messages_migration.rb.tt',
92
+ "db/migrate/create_#{options[:message_model_name].tableize}.rb"
93
+ end
94
+
95
+ def create_model_files
96
+ template 'chat_model.rb.tt', "app/models/#{options[:chat_model_name].underscore}.rb"
97
+ template 'message_model.rb.tt', "app/models/#{options[:message_model_name].underscore}.rb"
98
+ template 'tool_call_model.rb.tt', "app/models/#{options[:tool_call_model_name].underscore}.rb"
99
+ end
100
+
101
+ def create_initializer
102
+ template 'initializer.rb.tt', 'config/initializers/ruby_llm.rb'
103
+ end
104
+
105
+ def show_install_info
106
+ say "\n ✅ RubyLLM installed!", :green
107
+
108
+ say "\n Next steps:", :yellow
109
+ say ' 1. Run: rails db:migrate'
110
+ say ' 2. Set your API keys in config/initializers/ruby_llm.rb'
111
+ say " 3. Start chatting: #{options[:chat_model_name]}.create!(model_id: 'gpt-4.1-nano').ask('Hello!')"
112
+
113
+ say "\n 📚 Full docs: https://rubyllm.com", :cyan
114
+
115
+ say "\n ❤️ Love RubyLLM?", :magenta
116
+ say ' • ⭐ Star on GitHub: https://github.com/crmne/ruby_llm'
117
+ say ' • 💖 Sponsor: https://github.com/sponsors/crmne'
118
+ say "\n"
119
+ end
120
+ end
121
+ end
@@ -84,8 +84,12 @@ module RubyLLM
84
84
  attr_reader :tool_call_class
85
85
  end
86
86
 
87
- def to_llm
88
- @chat ||= RubyLLM.chat(model: model_id)
87
+ def to_llm(context: nil)
88
+ @chat ||= if context
89
+ context.chat(model: model_id)
90
+ else
91
+ RubyLLM.chat(model: model_id)
92
+ end
89
93
  @chat.reset_messages!
90
94
 
91
95
  messages.each do |msg|
@@ -125,8 +129,18 @@ module RubyLLM
125
129
  self
126
130
  end
127
131
 
128
- def with_context(...)
129
- to_llm.with_context(...)
132
+ def with_context(context)
133
+ to_llm(context: context)
134
+ self
135
+ end
136
+
137
+ def with_params(...)
138
+ to_llm.with_params(...)
139
+ self
140
+ end
141
+
142
+ def with_schema(...)
143
+ to_llm.with_schema(...)
130
144
  self
131
145
  end
132
146
 
@@ -175,9 +189,13 @@ module RubyLLM
175
189
  tool_call_id = find_tool_call_id(message.tool_call_id) if message.tool_call_id
176
190
 
177
191
  transaction do
192
+ # Convert parsed JSON back to JSON string for storage
193
+ content = message.content
194
+ content = content.to_json if content.is_a?(Hash) || content.is_a?(Array)
195
+
178
196
  @message.update!(
179
197
  role: message.role,
180
- content: message.content,
198
+ content: content,
181
199
  model_id: message.model_id,
182
200
  input_tokens: message.input_tokens,
183
201
  output_tokens: message.output_tokens
@@ -3,16 +3,6 @@
3
3
  "openai": "chatgpt-4o-latest",
4
4
  "openrouter": "openai/chatgpt-4o-latest"
5
5
  },
6
- "claude-2.0": {
7
- "anthropic": "claude-2.0",
8
- "openrouter": "anthropic/claude-2.0",
9
- "bedrock": "anthropic.claude-v2:1:200k"
10
- },
11
- "claude-2.1": {
12
- "anthropic": "claude-2.1",
13
- "openrouter": "anthropic/claude-2.1",
14
- "bedrock": "anthropic.claude-v2:1:200k"
15
- },
16
6
  "claude-3-5-haiku": {
17
7
  "anthropic": "claude-3-5-haiku-20241022",
18
8
  "openrouter": "anthropic/claude-3.5-haiku",
@@ -39,9 +29,8 @@
39
29
  "bedrock": "anthropic.claude-3-opus-20240229-v1:0:200k"
40
30
  },
41
31
  "claude-3-sonnet": {
42
- "anthropic": "claude-3-sonnet-20240229",
43
- "openrouter": "anthropic/claude-3-sonnet",
44
- "bedrock": "anthropic.claude-3-sonnet-20240229-v1:0:200k"
32
+ "bedrock": "anthropic.claude-3-sonnet-20240229-v1:0",
33
+ "openrouter": "anthropic/claude-3-sonnet"
45
34
  },
46
35
  "claude-opus-4": {
47
36
  "anthropic": "claude-opus-4-20250514",
@@ -69,6 +58,10 @@
69
58
  "gemini": "gemini-2.5-flash",
70
59
  "openrouter": "google/gemini-2.5-flash"
71
60
  },
61
+ "gemini-2.5-flash-lite": {
62
+ "gemini": "gemini-2.5-flash-lite",
63
+ "openrouter": "google/gemini-2.5-flash-lite"
64
+ },
72
65
  "gemini-2.5-flash-lite-preview-06-17": {
73
66
  "gemini": "gemini-2.5-flash-lite-preview-06-17",
74
67
  "openrouter": "google/gemini-2.5-flash-lite-preview-06-17"
@@ -181,14 +174,6 @@
181
174
  "openai": "o1-mini-2024-09-12",
182
175
  "openrouter": "openai/o1-mini-2024-09-12"
183
176
  },
184
- "o1-preview": {
185
- "openai": "o1-preview",
186
- "openrouter": "openai/o1-preview"
187
- },
188
- "o1-preview-2024-09-12": {
189
- "openai": "o1-preview-2024-09-12",
190
- "openrouter": "openai/o1-preview-2024-09-12"
191
- },
192
177
  "o1-pro": {
193
178
  "openai": "o1-pro",
194
179
  "openrouter": "openai/o1-pro"
data/lib/ruby_llm/chat.rb CHANGED
@@ -11,7 +11,7 @@ module RubyLLM
11
11
  class Chat
12
12
  include Enumerable
13
13
 
14
- attr_reader :model, :messages, :tools
14
+ attr_reader :model, :messages, :tools, :params, :schema
15
15
 
16
16
  def initialize(model: nil, provider: nil, assume_model_exists: false, context: nil)
17
17
  if assume_model_exists && !provider
@@ -25,9 +25,12 @@ module RubyLLM
25
25
  @temperature = 0.7
26
26
  @messages = []
27
27
  @tools = {}
28
+ @params = {}
29
+ @schema = nil
28
30
  @on = {
29
31
  new_message: nil,
30
- end_message: nil
32
+ end_message: nil,
33
+ tool_call: nil
31
34
  }
32
35
  end
33
36
 
@@ -78,6 +81,28 @@ module RubyLLM
78
81
  self
79
82
  end
80
83
 
84
+ def with_params(**params)
85
+ @params = params
86
+ self
87
+ end
88
+
89
+ def with_schema(schema, force: false)
90
+ unless force || @model.structured_output?
91
+ raise UnsupportedStructuredOutputError, "Model #{@model.id} doesn't support structured output"
92
+ end
93
+
94
+ schema_instance = schema.is_a?(Class) ? schema.new : schema
95
+
96
+ # Accept both RubyLLM::Schema instances and plain JSON schemas
97
+ @schema = if schema_instance.respond_to?(:to_json_schema)
98
+ schema_instance.to_json_schema[:schema]
99
+ else
100
+ schema_instance
101
+ end
102
+
103
+ self
104
+ end
105
+
81
106
  def on_new_message(&block)
82
107
  @on[:new_message] = block
83
108
  self
@@ -88,21 +113,38 @@ module RubyLLM
88
113
  self
89
114
  end
90
115
 
116
+ def on_tool_call(&block)
117
+ @on[:tool_call] = block
118
+ self
119
+ end
120
+
91
121
  def each(&)
92
122
  messages.each(&)
93
123
  end
94
124
 
95
- def complete(&)
125
+ def complete(&) # rubocop:disable Metrics/PerceivedComplexity
96
126
  response = @provider.complete(
97
127
  messages,
98
128
  tools: @tools,
99
129
  temperature: @temperature,
100
130
  model: @model.id,
101
131
  connection: @connection,
132
+ params: @params,
133
+ schema: @schema,
102
134
  &wrap_streaming_block(&)
103
135
  )
104
136
 
105
137
  @on[:new_message]&.call unless block_given?
138
+
139
+ # Parse JSON if schema was set
140
+ if @schema && response.content.is_a?(String)
141
+ begin
142
+ response.content = JSON.parse(response.content)
143
+ rescue JSON::ParserError
144
+ # If parsing fails, keep content as string
145
+ end
146
+ end
147
+
106
148
  add_message response
107
149
  @on[:end_message]&.call(response)
108
150
 
@@ -145,6 +187,7 @@ module RubyLLM
145
187
  def handle_tool_calls(response, &)
146
188
  response.tool_calls.each_value do |tool_call|
147
189
  @on[:new_message]&.call
190
+ @on[:tool_call]&.call(tool_call)
148
191
  result = execute_tool tool_call
149
192
  message = add_message role: :tool, content: result.to_s, tool_call_id: tool_call.id
150
193
  @on[:end_message]&.call(message)
@@ -24,6 +24,8 @@ module RubyLLM
24
24
  :bedrock_session_token,
25
25
  :openrouter_api_key,
26
26
  :ollama_api_base,
27
+ :gpustack_api_base,
28
+ :gpustack_api_key,
27
29
  # Default models
28
30
  :default_model,
29
31
  :default_embedding_model,
@@ -25,6 +25,7 @@ module RubyLLM
25
25
  class ModelNotFoundError < StandardError; end
26
26
  class UnsupportedFunctionsError < StandardError; end
27
27
  class UnsupportedAttachmentError < StandardError; end
28
+ class UnsupportedStructuredOutputError < StandardError; end
28
29
 
29
30
  # Error classes for different HTTP status codes
30
31
  class BadRequestError < Error; end
@@ -7,7 +7,8 @@ module RubyLLM
7
7
  class Message
8
8
  ROLES = %i[system user assistant tool].freeze
9
9
 
10
- attr_reader :role, :tool_calls, :tool_call_id, :input_tokens, :output_tokens, :model_id
10
+ attr_reader :role, :tool_calls, :tool_call_id, :input_tokens, :output_tokens, :model_id, :raw
11
+ attr_writer :content
11
12
 
12
13
  def initialize(options = {})
13
14
  @role = options.fetch(:role).to_sym
@@ -17,6 +18,7 @@ module RubyLLM
17
18
  @output_tokens = options[:output_tokens]
18
19
  @model_id = options[:model_id]
19
20
  @tool_call_id = options[:tool_call_id]
21
+ @raw = options[:raw]
20
22
 
21
23
  ensure_valid_role
22
24
  end