llm.rb 4.16.1 → 4.18.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,7 +13,8 @@ module LLM::Sequel
13
13
  # default) or as a structured object (`format: :json` / `:jsonb`) for
14
14
  # databases such as PostgreSQL that can persist JSON natively.
15
15
  # `:json` and `:jsonb` expect a real JSON column type with Sequel handling
16
- # JSON typecasting for the model.
16
+ # JSON typecasting for the model. `provider:`, `context:`, and `tracer:`
17
+ # can also be configured as symbols that are called on the model.
17
18
  module Plugin
18
19
  EMPTY_HASH = {}.freeze
19
20
  DEFAULT_USAGE_COLUMNS = {
@@ -27,6 +28,7 @@ module LLM::Sequel
27
28
  data_column: :data,
28
29
  format: :string,
29
30
  usage_columns: DEFAULT_USAGE_COLUMNS,
31
+ tracer: nil,
30
32
  provider: EMPTY_HASH,
31
33
  context: EMPTY_HASH
32
34
  }.freeze
@@ -59,6 +61,9 @@ module LLM::Sequel
59
61
  # Storage format for the serialized context. Use `:string` for text
60
62
  # columns, or `:json` / `:jsonb` for structured JSON columns with Sequel
61
63
  # JSON typecasting enabled.
64
+ # @option options [Proc, Symbol, LLM::Tracer, nil] :tracer
65
+ # Optional tracer, method name, or proc that resolves to one and is
66
+ # assigned through `llm.tracer = ...` on the resolved provider.
62
67
  # @return [void]
63
68
  def self.configure(model, options = EMPTY_HASH)
64
69
  options = DEFAULTS.merge(options)
@@ -111,6 +116,13 @@ module LLM::Sequel
111
116
  ctx.call(...)
112
117
  end
113
118
 
119
+ ##
120
+ # @see LLM::Context#mode
121
+ # @return [Symbol]
122
+ def mode
123
+ ctx.mode
124
+ end
125
+
114
126
  ##
115
127
  # @see LLM::Context#messages
116
128
  # @return [Array<LLM::Message>]
@@ -134,6 +146,13 @@ module LLM::Sequel
134
146
  ctx.functions
135
147
  end
136
148
 
149
+ ##
150
+ # @see LLM::Context#returns
151
+ # @return [Array<LLM::Function::Return>]
152
+ def returns
153
+ ctx.returns
154
+ end
155
+
137
156
  ##
138
157
  # @see LLM::Context#cost
139
158
  # @return [LLM::Cost]
@@ -161,6 +180,50 @@ module LLM::Sequel
161
180
  )
162
181
  end
163
182
 
183
+ ##
184
+ # @see LLM::Context#interrupt!
185
+ # @return [nil]
186
+ def interrupt!
187
+ ctx.interrupt!
188
+ end
189
+ alias_method :cancel!, :interrupt!
190
+
191
+ ##
192
+ # @see LLM::Context#prompt
193
+ # @return [LLM::Prompt]
194
+ def prompt(&)
195
+ ctx.prompt(&)
196
+ end
197
+ alias_method :build_prompt, :prompt
198
+
199
+ ##
200
+ # @see LLM::Context#image_url
201
+ # @return [LLM::Object]
202
+ def image_url(...)
203
+ ctx.image_url(...)
204
+ end
205
+
206
+ ##
207
+ # @see LLM::Context#local_file
208
+ # @return [LLM::Object]
209
+ def local_file(...)
210
+ ctx.local_file(...)
211
+ end
212
+
213
+ ##
214
+ # @see LLM::Context#remote_file
215
+ # @return [LLM::Object]
216
+ def remote_file(...)
217
+ ctx.remote_file(...)
218
+ end
219
+
220
+ ##
221
+ # @see LLM::Context#tracer
222
+ # @return [LLM::Tracer]
223
+ def tracer
224
+ ctx.tracer
225
+ end
226
+
164
227
  private
165
228
 
166
229
  ##
@@ -170,7 +233,10 @@ module LLM::Sequel
170
233
  options = self.class.llm_plugin_options
171
234
  provider = self[columns[:provider_column]]
172
235
  kwargs = resolve_options(options[:provider])
173
- @llm ||= LLM.method(provider).call(**kwargs)
236
+ return @llm if @llm
237
+ @llm = LLM.method(provider).call(**kwargs)
238
+ @llm.tracer = resolve_option(options[:tracer]) if options[:tracer]
239
+ @llm
174
240
  end
175
241
 
176
242
  ##
@@ -211,6 +277,7 @@ module LLM::Sequel
211
277
  def resolve_option(option)
212
278
  case option
213
279
  when Proc then instance_exec(&option)
280
+ when Symbol then send(option)
214
281
  when Hash then option.dup
215
282
  else option
216
283
  end
@@ -38,6 +38,7 @@ class LLM::Stream
38
38
  # - `:thread`: Use threads
39
39
  # - `:task`: Use async tasks (requires async gem)
40
40
  # - `:fiber`: Use raw fibers
41
+ # - `:ractor`: Use Ruby ractors (class-based tools only; MCP tools are not supported)
41
42
  # @return [Array<LLM::Function::Return>]
42
43
  def wait(strategy)
43
44
  returns, tasks = @items.shift(@items.length).partition { LLM::Function::Return === _1 }
@@ -45,7 +46,8 @@ class LLM::Stream
45
46
  when :thread then LLM::Function::ThreadGroup.new(tasks).wait
46
47
  when :task then LLM::Function::TaskGroup.new(tasks).wait
47
48
  when :fiber then LLM::Function::FiberGroup.new(tasks).wait
48
- else raise ArgumentError, "Unknown strategy: #{strategy.inspect}. Expected :thread, :task, or :fiber"
49
+ when :ractor then LLM::Function::Ractor::Group.new(tasks).wait
50
+ else raise ArgumentError, "Unknown strategy: #{strategy.inspect}. Expected :thread, :task, :fiber, or :ractor"
49
51
  end
50
52
  returns.concat fire_hooks(tasks, results)
51
53
  end
data/lib/llm/stream.rb CHANGED
@@ -22,6 +22,13 @@ module LLM
22
22
  class Stream
23
23
  require_relative "stream/queue"
24
24
 
25
+ ##
26
+ # Returns extra context associated with the current streamed request.
27
+ # @return [Hash]
28
+ def extra
29
+ @extra ||= LLM::Object.from({})
30
+ end
31
+
25
32
  ##
26
33
  # Returns a lazily-initialized queue for tool results or spawned work.
27
34
  # @return [LLM::Stream::Queue]
@@ -63,13 +70,16 @@ module LLM
63
70
  # Called when a streamed tool call has been fully constructed.
64
71
  # @note A stream implementation may start tool execution here, for
65
72
  # example by pushing `tool.spawn(:thread)`, `tool.spawn(:fiber)`, or
66
- # `tool.spawn(:task)` onto {#queue}. When a streamed tool cannot be
67
- # resolved, `error` is passed as an {LLM::Function::Return}. It can be
68
- # sent back to the model, allowing the tool-call path to recover and the
69
- # session to continue. Tool resolution depends on
73
+ # `tool.spawn(:task)` onto {#queue}. Mixed strategies can also be
74
+ # selected per tool, such as `tool.mcp? ? tool.spawn(:task) :
75
+ # tool.spawn(:ractor)`. When a streamed tool cannot be resolved, `error`
76
+ # is passed as an {LLM::Function::Return}. It can be sent back to the
77
+ # model, allowing the tool-call path to recover and the session to
78
+ # continue. Tool resolution depends on
70
79
  # {LLM::Function.registry}, which includes {LLM::Tool LLM::Tool}
71
80
  # subclasses, including MCP tools, but not functions defined with
72
- # {LLM.function}.
81
+ # {LLM.function}. The current `:ractor` mode is for class-based tools
82
+ # and does not support MCP tools.
73
83
  # @param [LLM::Function] tool
74
84
  # The parsed tool call.
75
85
  # @param [LLM::Function::Return, nil] error
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "4.16.1"
4
+ VERSION = "4.18.0"
5
5
  end
data/llm.gemspec CHANGED
@@ -8,25 +8,20 @@ Gem::Specification.new do |spec|
8
8
  spec.authors = ["Antar Azri", "0x1eef", "Christos Maris", "Rodrigo Serrano"]
9
9
  spec.email = ["azantar@proton.me", "0x1eef@hardenedbsd.org"]
10
10
 
11
- spec.summary = "System integration layer for LLMs, tools, MCP, and APIs in Ruby."
11
+ spec.summary = "Lightweight runtime for building capable AI systems in Ruby."
12
12
 
13
13
  spec.description = <<~DESCRIPTION
14
- llm.rb is a runtime for building AI systems that integrate directly with your
15
- application. It is not just an API wrapper. It provides a unified execution
16
- model for providers, tools, MCP servers, streaming, schemas, files, and
17
- state.
18
-
19
- It is built for engineers who want control over how these systems run.
20
- llm.rb stays close to Ruby, runs on the standard library by default, loads
21
- optional pieces only when needed, and remains easy to extend. It also works
22
- well in Rails or ActiveRecord applications, where a small wrapper around
23
- context persistence is enough to save and restore long-lived conversation
24
- state across requests, jobs, or retries.
25
-
26
- Most LLM libraries stop at request/response APIs. Building real systems
27
- means stitching together streaming, tools, state, persistence, and external
28
- services by hand. llm.rb provides a single execution model for all of these,
29
- so they compose naturally instead of becoming separate subsystems.
14
+ llm.rb is a lightweight runtime for building capable AI systems in Ruby.
15
+ It is not just an API wrapper. llm.rb gives you one runtime for providers,
16
+ contexts, agents, tools, MCP servers, streaming, schemas, files, and
17
+ persisted state, so real systems can be built out of one coherent
18
+ execution model instead of a pile of adapters. It stays close to Ruby, runs
19
+ on the standard library by default, loads optional pieces only when needed,
20
+ includes built-in ActiveRecord support through acts_as_llm and
21
+ acts_as_agent, includes built-in Sequel support through plugin :llm,
22
+ and is designed for engineers who want control over long-lived,
23
+ tool-capable, stateful AI workflows instead of just request/response
24
+ helpers.
30
25
  DESCRIPTION
31
26
 
32
27
  spec.license = "0BSD"
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 4.16.1
4
+ version: 4.18.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Antar Azri
@@ -195,22 +195,17 @@ dependencies:
195
195
  - !ruby/object:Gem::Version
196
196
  version: '1.7'
197
197
  description: |
198
- llm.rb is a runtime for building AI systems that integrate directly with your
199
- application. It is not just an API wrapper. It provides a unified execution
200
- model for providers, tools, MCP servers, streaming, schemas, files, and
201
- state.
202
-
203
- It is built for engineers who want control over how these systems run.
204
- llm.rb stays close to Ruby, runs on the standard library by default, loads
205
- optional pieces only when needed, and remains easy to extend. It also works
206
- well in Rails or ActiveRecord applications, where a small wrapper around
207
- context persistence is enough to save and restore long-lived conversation
208
- state across requests, jobs, or retries.
209
-
210
- Most LLM libraries stop at request/response APIs. Building real systems
211
- means stitching together streaming, tools, state, persistence, and external
212
- services by hand. llm.rb provides a single execution model for all of these,
213
- so they compose naturally instead of becoming separate subsystems.
198
+ llm.rb is a lightweight runtime for building capable AI systems in Ruby.
199
+ It is not just an API wrapper. llm.rb gives you one runtime for providers,
200
+ contexts, agents, tools, MCP servers, streaming, schemas, files, and
201
+ persisted state, so real systems can be built out of one coherent
202
+ execution model instead of a pile of adapters. It stays close to Ruby, runs
203
+ on the standard library by default, loads optional pieces only when needed,
204
+ includes built-in ActiveRecord support through acts_as_llm and
205
+ acts_as_agent, includes built-in Sequel support through plugin :llm,
206
+ and is designed for engineers who want control over long-lived,
207
+ tool-capable, stateful AI workflows instead of just request/response
208
+ helpers.
214
209
  email:
215
210
  - azantar@proton.me
216
211
  - 0x1eef@hardenedbsd.org
@@ -229,6 +224,7 @@ files:
229
224
  - data/zai.json
230
225
  - lib/llm.rb
231
226
  - lib/llm/active_record.rb
227
+ - lib/llm/active_record/acts_as_agent.rb
232
228
  - lib/llm/active_record/acts_as_llm.rb
233
229
  - lib/llm/agent.rb
234
230
  - lib/llm/bot.rb
@@ -247,6 +243,11 @@ files:
247
243
  - lib/llm/function.rb
248
244
  - lib/llm/function/array.rb
249
245
  - lib/llm/function/fiber_group.rb
246
+ - lib/llm/function/ractor.rb
247
+ - lib/llm/function/ractor/job.rb
248
+ - lib/llm/function/ractor/mailbox.rb
249
+ - lib/llm/function/ractor/task.rb
250
+ - lib/llm/function/ractor_group.rb
250
251
  - lib/llm/function/registry.rb
251
252
  - lib/llm/function/task.rb
252
253
  - lib/llm/function/task_group.rb
@@ -408,7 +409,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
408
409
  - !ruby/object:Gem::Version
409
410
  version: '0'
410
411
  requirements: []
411
- rubygems_version: 3.6.9
412
+ rubygems_version: 4.0.3
412
413
  specification_version: 4
413
- summary: System integration layer for LLMs, tools, MCP, and APIs in Ruby.
414
+ summary: Lightweight runtime for building capable AI systems in Ruby.
414
415
  test_files: []