llm.rb 4.16.1 → 4.18.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/lib/llm/agent.rb CHANGED
@@ -6,10 +6,18 @@ module LLM
6
6
  # reusable, preconfigured assistants with defaults for model,
7
7
  # tools, schema, and instructions.
8
8
  #
9
+ # It wraps the same stateful runtime surface as
10
+ # {LLM::Context LLM::Context}: message history, usage, persistence,
11
+ # streaming parameters, and provider-backed requests still flow through
12
+ # an underlying context. The defining behavior of an agent is that it
13
+ # automatically resolves pending tool calls for you during `talk` and
14
+ # `respond`, instead of leaving tool loops to the caller.
15
+ #
9
16
  # **Notes:**
10
17
  # * Instructions are injected only on the first request.
11
- # * An agent will automatically execute tool calls (unlike {LLM::Context LLM::Context}).
12
- # * The idea originally came from RubyLLM and was adapted to llm.rb.
18
+ # * An agent automatically executes tool loops (unlike {LLM::Context LLM::Context}).
19
+ # * Tool loop execution can be configured with `concurrency :call`,
20
+ # `:thread`, `:task`, `:fiber`, or `:ractor`.
13
21
  #
14
22
  # @example
15
23
  # class SystemAdmin < LLM::Agent
@@ -72,6 +80,23 @@ module LLM
72
80
  @instructions = instructions
73
81
  end
74
82
 
83
+ ##
84
+ # Set or get the tool execution concurrency.
85
+ #
86
+ # @param [Symbol, nil] concurrency
87
+ # Controls how pending tool loops are executed:
88
+ # - `:call`: sequential calls
89
+ # - `:thread`: concurrent threads
90
+ # - `:task`: concurrent async tasks
91
+ # - `:fiber`: concurrent raw fibers
92
+ # - `:ractor`: concurrent Ruby ractors for class-based tools; MCP tools are not supported,
93
+ # and this mode is especially useful for CPU-bound tool work
94
+ # @return [Symbol, nil]
95
+ def self.concurrency(concurrency = nil)
96
+ return @concurrency if concurrency.nil?
97
+ @concurrency = concurrency
98
+ end
99
+
75
100
  ##
76
101
  # @param [LLM::Provider] provider
77
102
  # A provider
@@ -82,8 +107,10 @@ module LLM
82
107
  # @option params [String] :model Defaults to the provider's default model
83
108
  # @option params [Array<LLM::Function>, nil] :tools Defaults to nil
84
109
  # @option params [#to_json, nil] :schema Defaults to nil
110
+ # @option params [Symbol, nil] :concurrency Defaults to the agent class concurrency
85
111
  def initialize(llm, params = {})
86
112
  defaults = {model: self.class.model, tools: self.class.tools, schema: self.class.schema}.compact
113
+ @concurrency = params.delete(:concurrency) || self.class.concurrency
87
114
  @llm = llm
88
115
  @ctx = LLM::Context.new(llm, defaults.merge(params))
89
116
  end
@@ -94,7 +121,7 @@ module LLM
94
121
  #
95
122
  # @param prompt (see LLM::Provider#complete)
96
123
  # @param [Hash] params The params passed to the provider, including optional :stream, :tools, :schema etc.
97
- # @option params [Integer] :max_tool_rounds The maxinum number of tool call iterations (default 10)
124
+ # @option params [Integer] :tool_attempts The maxinum number of tool call iterations (default 10)
98
125
  # @return [LLM::Response] Returns the LLM's response for this turn.
99
126
  # @example
100
127
  # llm = LLM.openai(key: ENV["KEY"])
@@ -102,13 +129,13 @@ module LLM
102
129
  # response = agent.talk("Hello, what is your name?")
103
130
  # puts response.choices[0].content
104
131
  def talk(prompt, params = {})
105
- i, max = 0, Integer(params.delete(:max_tool_rounds) || 10)
132
+ max = Integer(params.delete(:tool_attempts) || 10)
106
133
  res = @ctx.talk(apply_instructions(prompt), params)
107
- until @ctx.functions.empty?
108
- raise LLM::ToolLoopError, "pending tool calls remain" if i >= max
109
- res = @ctx.talk @ctx.functions.map(&:call), params
110
- i += 1
134
+ max.times do
135
+ break if @ctx.functions.empty?
136
+ res = @ctx.talk(call_functions, params)
111
137
  end
138
+ raise LLM::ToolLoopError, "pending tool calls remain" unless @ctx.functions.empty?
112
139
  res
113
140
  end
114
141
  alias_method :chat, :talk
@@ -120,7 +147,7 @@ module LLM
120
147
  # @note Not all LLM providers support this API
121
148
  # @param prompt (see LLM::Provider#complete)
122
149
  # @param [Hash] params The params passed to the provider, including optional :stream, :tools, :schema etc.
123
- # @option params [Integer] :max_tool_rounds The maxinum number of tool call iterations (default 10)
150
+ # @option params [Integer] :tool_attempts The maxinum number of tool call iterations (default 10)
124
151
  # @return [LLM::Response] Returns the LLM's response for this turn.
125
152
  # @example
126
153
  # llm = LLM.openai(key: ENV["KEY"])
@@ -128,13 +155,13 @@ module LLM
128
155
  # res = agent.respond("What is the capital of France?")
129
156
  # puts res.output_text
130
157
  def respond(prompt, params = {})
131
- i, max = 0, Integer(params.delete(:max_tool_rounds) || 10)
158
+ max = Integer(params.delete(:tool_attempts) || 10)
132
159
  res = @ctx.respond(apply_instructions(prompt), params)
133
- until @ctx.functions.empty?
134
- raise LLM::ToolLoopError, "pending tool calls remain" if i >= max
135
- res = @ctx.respond @ctx.functions.map(&:call), params
136
- i += 1
160
+ max.times do
161
+ break if @ctx.functions.empty?
162
+ res = @ctx.respond(call_functions, params)
137
163
  end
164
+ raise LLM::ToolLoopError, "pending tool calls remain" unless @ctx.functions.empty?
138
165
  res
139
166
  end
140
167
 
@@ -150,12 +177,41 @@ module LLM
150
177
  @ctx.functions
151
178
  end
152
179
 
180
+ ##
181
+ # @see LLM::Context#returns
182
+ # @return [Array<LLM::Function::Return>]
183
+ def returns
184
+ @ctx.returns
185
+ end
186
+
187
+ ##
188
+ # @see LLM::Context#call
189
+ # @return [Object]
190
+ def call(...)
191
+ @ctx.call(...)
192
+ end
193
+
194
+ ##
195
+ # @see LLM::Context#wait
196
+ # @return [Array<LLM::Function::Return>]
197
+ def wait(...)
198
+ @ctx.wait(...)
199
+ end
200
+
153
201
  ##
154
202
  # @return [LLM::Object]
155
203
  def usage
156
204
  @ctx.usage
157
205
  end
158
206
 
207
+ ##
208
+ # Interrupt the active request, if any.
209
+ # @return [nil]
210
+ def interrupt!
211
+ @ctx.interrupt!
212
+ end
213
+ alias_method :cancel!, :interrupt!
214
+
159
215
  ##
160
216
  # @param (see LLM::Context#prompt)
161
217
  # @return (see LLM::Context#prompt)
@@ -206,6 +262,53 @@ module LLM
206
262
  @ctx.model
207
263
  end
208
264
 
265
+ ##
266
+ # @return [Symbol]
267
+ def mode
268
+ @ctx.mode
269
+ end
270
+
271
+ ##
272
+ # Returns the configured tool execution concurrency.
273
+ # @return [Symbol, nil]
274
+ def concurrency
275
+ @concurrency
276
+ end
277
+
278
+ ##
279
+ # @see LLM::Context#cost
280
+ # @return [LLM::Cost]
281
+ def cost
282
+ @ctx.cost
283
+ end
284
+
285
+ ##
286
+ # @see LLM::Context#context_window
287
+ # @return [Integer]
288
+ def context_window
289
+ @ctx.context_window
290
+ end
291
+
292
+ ##
293
+ # @see LLM::Context#to_h
294
+ # @return [Hash]
295
+ def to_h
296
+ @ctx.to_h
297
+ end
298
+
299
+ ##
300
+ # @return [String]
301
+ def to_json(...)
302
+ to_h.to_json(...)
303
+ end
304
+
305
+ ##
306
+ # @return [String]
307
+ def inspect
308
+ "#<#{self.class.name}:0x#{object_id.to_s(16)} " \
309
+ "@llm=#{@llm.class}, @mode=#{mode.inspect}, @messages=#{messages.inspect}>"
310
+ end
311
+
209
312
  ##
210
313
  # @param (see LLM::Context#serialize)
211
314
  # @return (see LLM::Context#serialize)
@@ -230,14 +333,24 @@ module LLM
230
333
  instr = self.class.instructions
231
334
  return new_prompt unless instr
232
335
  if LLM::Prompt === new_prompt
233
- @ctx.messages.empty? ? new_prompt.system(instr) : nil
336
+ new_prompt.system(instr) if @ctx.messages.empty?
234
337
  new_prompt
235
338
  else
236
339
  prompt do
237
- @ctx.messages.empty? ? _1.system(instr) : nil
340
+ _1.system(instr) if @ctx.messages.empty?
238
341
  _1.user(new_prompt)
239
342
  end
240
343
  end
241
344
  end
345
+
346
+ ##
347
+ # @return [Array<LLM::Function::Return>]
348
+ def call_functions
349
+ case concurrency || :call
350
+ when :call then call(:functions)
351
+ when :thread, :task, :fiber, :ractor then wait(concurrency)
352
+ else raise ArgumentError, "Unknown concurrency: #{concurrency.inspect}. Expected :call, :thread, :task, :fiber, or :ractor"
353
+ end
354
+ end
242
355
  end
243
356
  end
data/lib/llm/context.rb CHANGED
@@ -86,6 +86,7 @@ module LLM
86
86
  return respond(prompt, params) if mode == :responses
87
87
  params = params.merge(messages: @messages.to_a)
88
88
  params = @params.merge(params)
89
+ bind!(params[:stream], params[:model])
89
90
  res = @llm.complete(prompt, params)
90
91
  role = params[:role] || @llm.user_role
91
92
  role = @llm.tool_role if params[:role].nil? && [*prompt].grep(LLM::Function::Return).any?
@@ -110,6 +111,7 @@ module LLM
110
111
  # puts res.output_text
111
112
  def respond(prompt, params = {})
112
113
  params = @params.merge(params)
114
+ bind!(params[:stream], params[:model])
113
115
  res_id = params[:store] == false ? nil : @messages.find(&:assistant?)&.response&.response_id
114
116
  params = params.merge(previous_response_id: res_id, input: @messages.to_a).compact
115
117
  res = @llm.responses.create(prompt, params)
@@ -356,6 +358,14 @@ module LLM
356
358
  rescue LLM::NoSuchModelError, LLM::NoSuchRegistryError
357
359
  0
358
360
  end
361
+
362
+ private
363
+
364
+ def bind!(stream, model)
365
+ return unless LLM::Stream === stream
366
+ stream.extra[:tracer] = tracer
367
+ stream.extra[:model] = model
368
+ end
359
369
  end
360
370
 
361
371
  # Backward-compatible alias
@@ -27,8 +27,9 @@ class LLM::Function
27
27
  # - `:thread`: Use threads
28
28
  # - `:task`: Use async tasks (requires async gem)
29
29
  # - `:fiber`: Use raw fibers
30
+ # - `:ractor`: Use Ruby ractors (class-based tools only; MCP tools are not supported)
30
31
  #
31
- # @return [LLM::Function::ThreadGroup, LLM::Function::TaskGroup, LLM::Function::FiberGroup]
32
+ # @return [LLM::Function::ThreadGroup, LLM::Function::TaskGroup, LLM::Function::FiberGroup, LLM::Function::Ractor::Group]
32
33
  def spawn(strategy)
33
34
  case strategy
34
35
  when :task
@@ -37,8 +38,10 @@ class LLM::Function
37
38
  ThreadGroup.new(map { |fn| fn.spawn(:thread) })
38
39
  when :fiber
39
40
  FiberGroup.new(map { |fn| fn.spawn(:fiber) })
41
+ when :ractor
42
+ Ractor::Group.new(map { |fn| fn.spawn(:ractor) })
40
43
  else
41
- raise ArgumentError, "Unknown strategy: #{strategy.inspect}. Expected :thread, :task, or :fiber"
44
+ raise ArgumentError, "Unknown strategy: #{strategy.inspect}. Expected :thread, :task, :fiber, or :ractor"
42
45
  end
43
46
  end
44
47
 
@@ -51,6 +54,7 @@ class LLM::Function
51
54
  # - `:thread`: Use threads
52
55
  # - `:task`: Use async tasks (requires async gem)
53
56
  # - `:fiber`: Use raw fibers
57
+ # - `:ractor`: Use Ruby ractors (class-based tools only; MCP tools are not supported)
54
58
  #
55
59
  # @return [Array<LLM::Function::Return>]
56
60
  # Returns values to be reported back to the LLM.
@@ -0,0 +1,59 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Function
4
+ ##
5
+ # The {LLM::Function::Ractor::Job} class manages execution and mailbox
6
+ # coordination for a single ractor-backed function call.
7
+ class Ractor::Job
8
+ ##
9
+ # @param [::Ractor] mailbox
10
+ # @param [Class] runner_class
11
+ # @param [String, nil] id
12
+ # @param [String] name
13
+ # @param [Hash, Array, nil] arguments
14
+ # @return [LLM::Function::Ractor::Job]
15
+ def initialize(mailbox, runner_class, id, name, arguments)
16
+ @mailbox = mailbox
17
+ @runner_class = runner_class
18
+ @id = id
19
+ @name = name
20
+ @arguments = arguments
21
+ end
22
+
23
+ ##
24
+ # @return [void]
25
+ def call
26
+ spawn
27
+ wait
28
+ end
29
+
30
+ private
31
+
32
+ def wait
33
+ done = false
34
+ result = nil
35
+ waiters = []
36
+ loop do
37
+ case ::Ractor.receive
38
+ in [:done, *result]
39
+ done = true
40
+ waiters.each { _1.send(result) }
41
+ waiters.clear
42
+ in [:alive?, reply]
43
+ reply.send(!done)
44
+ in [:wait, reply]
45
+ done ? reply.send(result) : waiters << reply
46
+ end
47
+ end
48
+ end
49
+
50
+ def spawn
51
+ ::Ractor.new(@mailbox, @runner_class, @id, @name, @arguments) do |mailbox, runner_class, id, name, arguments|
52
+ kwargs = Hash === arguments ? arguments.transform_keys(&:to_sym) : arguments
53
+ mailbox.send([:done, id, name, runner_class.new.call(**kwargs)])
54
+ rescue => ex
55
+ mailbox.send([:done, id, name, {error: true, type: ex.class.name, message: ex.message}])
56
+ end
57
+ end
58
+ end
59
+ end
@@ -0,0 +1,39 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Function
4
+ ##
5
+ # The {LLM::Function::Ractor::Mailbox} class manages the mailbox protocol
6
+ # for a single ractor-backed function call.
7
+ class Ractor::Mailbox
8
+ ##
9
+ # @return [::Ractor]
10
+ attr_reader :task
11
+
12
+ ##
13
+ # @param [::Ractor] task
14
+ # @return [LLM::Function::Ractor::Mailbox]
15
+ def initialize(task)
16
+ @task = task
17
+ end
18
+
19
+ ##
20
+ # @return [Boolean]
21
+ def alive?
22
+ request(:alive?)
23
+ end
24
+
25
+ ##
26
+ # @return [Array]
27
+ def wait
28
+ request(:wait)
29
+ end
30
+
31
+ private
32
+
33
+ def request(type)
34
+ reply = ::Ractor.new { ::Ractor.receive }
35
+ task.send([type, reply])
36
+ reply.respond_to?(:take) ? reply.take : ::Ractor.select(reply).last
37
+ end
38
+ end
39
+ end
@@ -0,0 +1,45 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Function
4
+ ##
5
+ # The {LLM::Function::Ractor::Task} class wraps a ractor-backed function
6
+ # call and delegates mailbox coordination to
7
+ # {LLM::Function::Ractor::Mailbox}.
8
+ class Ractor::Task
9
+ ##
10
+ # @return [LLM::Function::Ractor::Mailbox]
11
+ attr_reader :mailbox
12
+
13
+ ##
14
+ # @param [Class] runner_class
15
+ # @param [String, nil] id
16
+ # @param [String] name
17
+ # @param [Hash, Array, nil] arguments
18
+ # @return [LLM::Function::Ractor::Task]
19
+ def initialize(runner_class, id, name, arguments)
20
+ @mailbox = Ractor::Mailbox.new(build_task(runner_class, id, name, arguments))
21
+ end
22
+
23
+ ##
24
+ # @return [Boolean]
25
+ def alive?
26
+ mailbox.alive?
27
+ end
28
+
29
+ ##
30
+ # @return [LLM::Function::Return]
31
+ def wait
32
+ id, name, value = mailbox.wait
33
+ Return.new(id, name, value)
34
+ end
35
+ alias_method :value, :wait
36
+
37
+ private
38
+
39
+ def build_task(runner_class, id, name, arguments)
40
+ ::Ractor.new(runner_class, id, name, arguments) do |runner_class, id, name, arguments|
41
+ LLM::Function::Ractor::Job.new(::Ractor.current, runner_class, id, name, arguments).call
42
+ end
43
+ end
44
+ end
45
+ end
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Function
4
+ module Ractor
5
+ require_relative "ractor/mailbox"
6
+ require_relative "ractor/job"
7
+ require_relative "ractor/task"
8
+ end
9
+ end
@@ -0,0 +1,29 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Function
4
+ ##
5
+ # The {LLM::Function::Ractor::Group} class wraps an array of
6
+ # {LLM::Function::Ractor::Task} objects that are running
7
+ # {LLM::Function} calls concurrently.
8
+ class Ractor::Group
9
+ ##
10
+ # @param [Array<LLM::Function::Task>] tasks
11
+ # @return [LLM::Function::Ractor::Group]
12
+ def initialize(tasks)
13
+ @tasks = tasks
14
+ end
15
+
16
+ ##
17
+ # @return [Boolean]
18
+ def alive?
19
+ @tasks.any?(&:alive?)
20
+ end
21
+
22
+ ##
23
+ # @return [Array<LLM::Function::Return>]
24
+ def wait
25
+ @tasks.map(&:wait)
26
+ end
27
+ alias_method :value, :wait
28
+ end
29
+ end
@@ -14,7 +14,7 @@ class LLM::Function
14
14
  attr_reader :function
15
15
 
16
16
  ##
17
- # @param [Thread, Fiber, Async::Task] task
17
+ # @param [Thread, Fiber, Async::Task, Ractor, LLM::Function::Ractor::Task] task
18
18
  # @param [LLM::Function, nil] function
19
19
  # @return [LLM::Function::Task]
20
20
  def initialize(task, function = nil)
@@ -25,7 +25,8 @@ class LLM::Function
25
25
  ##
26
26
  # @return [Boolean]
27
27
  def alive?
28
- task.alive?
28
+ return task.alive? if task.respond_to?(:alive?)
29
+ false
29
30
  end
30
31
 
31
32
  ##
data/lib/llm/function.rb CHANGED
@@ -36,6 +36,8 @@ class LLM::Function
36
36
  require_relative "function/thread_group"
37
37
  require_relative "function/fiber_group"
38
38
  require_relative "function/task_group"
39
+ require_relative "function/ractor"
40
+ require_relative "function/ractor_group"
39
41
 
40
42
  extend LLM::Function::Registry
41
43
  prepend LLM::Function::Tracing
@@ -174,6 +176,7 @@ class LLM::Function
174
176
  # - `:thread`: Use threads
175
177
  # - `:task`: Use async tasks (requires async gem)
176
178
  # - `:fiber`: Use raw fibers
179
+ # - `:ractor`: Use Ruby ractors (class-based tools only; MCP tools are not supported)
177
180
  #
178
181
  # @return [LLM::Function::Task]
179
182
  # Returns a task whose `#value` is an {LLM::Function::Return}.
@@ -181,17 +184,20 @@ class LLM::Function
181
184
  task = case strategy
182
185
  when :task
183
186
  require "async" unless defined?(::Async)
184
- Async { call_function }
187
+ Async { call }
185
188
  when :thread
186
- Thread.new { call_function }
189
+ Thread.new { call }
187
190
  when :fiber
188
191
  Fiber.new do
189
- call_function
192
+ call
190
193
  ensure
191
194
  Fiber.yield
192
195
  end.tap(&:resume)
196
+ when :ractor
197
+ raise ArgumentError, "Ractor concurrency only supports class-based tools" unless Class === @runner
198
+ Ractor::Task.new(@runner, id, name, arguments)
193
199
  else
194
- raise ArgumentError, "Unknown strategy: #{strategy.inspect}. Expected :thread, :task, or :fiber"
200
+ raise ArgumentError, "Unknown strategy: #{strategy.inspect}. Expected :thread, :task, :fiber, or :ractor"
195
201
  end
196
202
  Task.new(task, self)
197
203
  ensure
data/lib/llm/provider.rb CHANGED
@@ -271,34 +271,48 @@ class LLM::Provider
271
271
 
272
272
  ##
273
273
  # @return [LLM::Tracer]
274
- # Returns a fiber-local tracer
274
+ # Returns the current scoped tracer override or provider default tracer
275
275
  def tracer
276
- weakmap[self] || LLM::Tracer::Null.new(self)
276
+ weakmap[self] || @tracer || LLM::Tracer::Null.new(self)
277
277
  end
278
278
 
279
279
  ##
280
- # Set a fiber-local tracer
280
+ # Set the provider's default tracer
281
+ # This tracer is shared by the provider instance and becomes the fallback
282
+ # whenever no scoped override is active.
281
283
  # @example
282
284
  # llm = LLM.openai(key: ENV["KEY"])
283
- # Thread.new do
284
- # llm.tracer = LLM::Tracer::Logger.new(llm, path: "/path/to/log/1.txt")
285
- # end
286
- # Thread.new do
287
- # llm.tracer = LLM::Tracer::Logger.new(llm, path: "/path/to/log/2.txt")
288
- # end
289
- # # ...
285
+ # llm.tracer = LLM::Tracer::Logger.new(llm, path: "/path/to/log.txt")
290
286
  # @param [LLM::Tracer] tracer
291
287
  # A tracer
292
288
  # @return [void]
293
289
  def tracer=(tracer)
294
- if tracer.nil?
295
- if weakmap.respond_to?(:delete)
296
- weakmap.delete(self)
297
- else
298
- weakmap[self] = nil
299
- end
290
+ @tracer = tracer
291
+ end
292
+
293
+ ##
294
+ # Override the tracer for the current fiber while the block runs.
295
+ # This is useful when you want per-request or per-turn tracing without
296
+ # replacing the provider's default tracer.
297
+ # @example
298
+ # llm.with_tracer(LLM::Tracer::Logger.new(llm, io: $stdout)) do
299
+ # llm.complete("hello", model: "gpt-5.4-mini")
300
+ # end
301
+ # @param [LLM::Tracer] tracer
302
+ # @yield
303
+ # @return [Object]
304
+ def with_tracer(tracer)
305
+ had_override = weakmap.key?(self)
306
+ previous = weakmap[self]
307
+ weakmap[self] = tracer
308
+ yield
309
+ ensure
310
+ if had_override
311
+ weakmap[self] = previous
312
+ elsif weakmap.respond_to?(:delete)
313
+ weakmap.delete(self)
300
314
  else
301
- weakmap[self] = tracer
315
+ weakmap[self] = nil
302
316
  end
303
317
  end
304
318
 
@@ -109,6 +109,8 @@ class LLM::Anthropic
109
109
  fn = (registered || LLM::Function.new(tool["name"])).dup.tap do |fn|
110
110
  fn.id = tool["id"]
111
111
  fn.arguments = LLM::Anthropic.parse_tool_input(tool["input"])
112
+ fn.tracer = @stream.extra[:tracer]
113
+ fn.model = @stream.extra[:model]
112
114
  end
113
115
  [fn, (registered ? nil : @stream.tool_not_found(fn))]
114
116
  end
@@ -157,6 +157,8 @@ class LLM::Google
157
157
  fn = (registered || LLM::Function.new(call["name"])).dup.tap do |fn|
158
158
  fn.id = LLM::Google.tool_id(part:, cindex:, pindex:)
159
159
  fn.arguments = call["args"]
160
+ fn.tracer = @stream.extra[:tracer]
161
+ fn.model = @stream.extra[:model]
160
162
  end
161
163
  [fn, (registered ? nil : @stream.tool_not_found(fn))]
162
164
  end
@@ -273,6 +273,8 @@ class LLM::OpenAI
273
273
  fn = (registered || LLM::Function.new(tool["name"])).dup.tap do |fn|
274
274
  fn.id = tool["call_id"]
275
275
  fn.arguments = arguments
276
+ fn.tracer = @stream.extra[:tracer]
277
+ fn.model = @stream.extra[:model]
276
278
  end
277
279
  [fn, (registered ? nil : @stream.tool_not_found(fn))]
278
280
  end
@@ -189,6 +189,8 @@ class LLM::OpenAI
189
189
  fn = (registered || LLM::Function.new(function["name"])).dup.tap do |fn|
190
190
  fn.id = tool["id"]
191
191
  fn.arguments = arguments
192
+ fn.tracer = @stream.extra[:tracer]
193
+ fn.model = @stream.extra[:model]
192
194
  end
193
195
  [fn, (registered ? nil : @stream.tool_not_found(fn))]
194
196
  end