llm.rb 4.11.1 → 4.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/lib/llm/context.rb CHANGED
@@ -103,9 +103,9 @@ module LLM
103
103
  # res = ctx.respond("What is the capital of France?")
104
104
  # puts res.output_text
105
105
  def respond(prompt, params = {})
106
- res_id = @messages.find(&:assistant?)&.response&.response_id
107
- params = params.merge(previous_response_id: res_id, input: @messages.to_a).compact
108
106
  params = @params.merge(params)
107
+ res_id = params[:store] == false ? nil : @messages.find(&:assistant?)&.response&.response_id
108
+ params = params.merge(previous_response_id: res_id, input: @messages.to_a).compact
109
109
  res = @llm.responses.create(prompt, params)
110
110
  role = params[:role] || @llm.user_role
111
111
  @messages.concat LLM::Prompt === prompt ? prompt.to_a : [LLM::Message.new(role, prompt)]
@@ -9,11 +9,17 @@ class LLM::Function
9
9
  # @return [Object]
10
10
  attr_reader :task
11
11
 
12
+ ##
13
+ # @return [LLM::Function, nil]
14
+ attr_reader :function
15
+
12
16
  ##
13
17
  # @param [Thread, Fiber, Async::Task] task
18
+ # @param [LLM::Function, nil] function
14
19
  # @return [LLM::Function::Task]
15
- def initialize(task)
20
+ def initialize(task, function = nil)
16
21
  @task = task
22
+ @function = function
17
23
  end
18
24
 
19
25
  ##
data/lib/llm/function.rb CHANGED
@@ -41,6 +41,13 @@ class LLM::Function
41
41
  prepend LLM::Function::Tracing
42
42
 
43
43
  Return = Struct.new(:id, :name, :value) do
44
+ ##
45
+ # Returns true when the return value represents an error.
46
+ # @return [Boolean]
47
+ def error?
48
+ Hash === value && value[:error] == true
49
+ end
50
+
44
51
  ##
45
52
  # Returns a Hash representation of {LLM::Function::Return}
46
53
  # @return [Hash]
@@ -186,7 +193,7 @@ class LLM::Function
186
193
  else
187
194
  raise ArgumentError, "Unknown strategy: #{strategy.inspect}. Expected :thread, :task, or :fiber"
188
195
  end
189
- Task.new(task)
196
+ Task.new(task, self)
190
197
  ensure
191
198
  @called = true
192
199
  end
@@ -233,7 +240,11 @@ class LLM::Function
233
240
  when "LLM::Google"
234
241
  {name: @name, description: @description, parameters: @params}.compact
235
242
  when "LLM::Anthropic"
236
- {name: @name, description: @description, input_schema: @params}.compact
243
+ {
244
+ name: @name,
245
+ description: @description,
246
+ input_schema: @params || {type: "object", properties: {}}
247
+ }.compact
237
248
  else
238
249
  format_openai(provider)
239
250
  end
@@ -246,7 +257,7 @@ class LLM::Function
246
257
  when "LLM::OpenAI::Responses"
247
258
  {
248
259
  type: "function", name: @name, description: @description,
249
- parameters: @params.to_h.merge(additionalProperties: false), strict: true
260
+ parameters: (@params || {type: "object", properties: {}}).to_h.merge(additionalProperties: false), strict: false
250
261
  }.compact
251
262
  else
252
263
  {
data/lib/llm/mcp/error.rb CHANGED
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  class LLM::MCP
4
- class Error < LLM::Error
4
+ Error = Class.new(LLM::Error) do
5
5
  attr_reader :code, :data
6
6
 
7
7
  ##
@@ -27,5 +27,35 @@ class LLM::MCP
27
27
  end
28
28
  end
29
29
 
30
+ MismatchError = Class.new(Error) do
31
+ ##
32
+ # @return [Integer, String]
33
+ # The request id the client was waiting for
34
+ attr_reader :expected_id
35
+
36
+ ##
37
+ # @return [Integer, String]
38
+ # The response id received from the server
39
+ attr_reader :actual_id
40
+
41
+ ##
42
+ # @param [Integer, String] expected_id
43
+ # The request id the client was waiting for
44
+ # @param [Integer, String] actual_id
45
+ # The response id received from the server instead
46
+ def initialize(expected_id:, actual_id:)
47
+ @expected_id = expected_id
48
+ @actual_id = actual_id
49
+ super(message)
50
+ end
51
+
52
+ ##
53
+ # @return [String]
54
+ def message
55
+ "mismatched MCP response id #{actual_id.inspect} " \
56
+ "while waiting for #{expected_id.inspect}"
57
+ end
58
+ end
59
+
30
60
  TimeoutError = Class.new(Error)
31
61
  end
data/lib/llm/mcp/rpc.rb CHANGED
@@ -53,11 +53,14 @@ class LLM::MCP
53
53
  poll(timeout:, ex: [IO::WaitReadable]) do
54
54
  loop do
55
55
  res = transport.read_nonblock
56
- next unless res["id"] == id
57
- if res["error"]
56
+ if res["id"] == id && res["error"]
58
57
  raise LLM::MCP::Error.from(response: res)
59
- else
58
+ elsif res["id"] == id
60
59
  break res["result"]
60
+ elsif res["method"]
61
+ next
62
+ elsif res.key?("id")
63
+ raise LLM::MCP::MismatchError.new(expected_id: id, actual_id: res["id"])
61
64
  end
62
65
  end
63
66
  end
@@ -101,6 +104,8 @@ class LLM::MCP
101
104
  # The exceptions to retry when raised
102
105
  # @yield
103
106
  # The block to run
107
+ # @raise [LLM::MCP::MismatchError]
108
+ # When an unrelated response id is received while waiting
104
109
  # @raise [LLM::MCP::TimeoutError]
105
110
  # When the block takes longer than the timeout
106
111
  # @return [Object]
@@ -104,7 +104,7 @@ module LLM::MCP::Transport
104
104
  # Configures the transport to use a persistent HTTP connection pool
105
105
  # via the optional dependency [Net::HTTP::Persistent](https://github.com/drbrain/net-http-persistent)
106
106
  # @example
107
- # mcp = LLM.mcp(http: {url: "https://example.com/mcp"}).persist!
107
+ # mcp = LLM.mcp(http: {url: "https://example.com/mcp"}).persistent
108
108
  # # do something with 'mcp'
109
109
  # @return [LLM::MCP::Transport::HTTP]
110
110
  def persist!
@@ -119,6 +119,7 @@ module LLM::MCP::Transport
119
119
  end
120
120
  self
121
121
  end
122
+ alias_method :persistent, :persist!
122
123
 
123
124
  private
124
125
 
@@ -84,6 +84,7 @@ module LLM::MCP::Transport
84
84
  def persist!
85
85
  self
86
86
  end
87
+ alias_method :persistent, :persist!
87
88
 
88
89
  private
89
90
 
data/lib/llm/mcp.rb CHANGED
@@ -104,13 +104,14 @@ class LLM::MCP
104
104
  # Configures an HTTP MCP transport to use a persistent connection pool
105
105
  # via the optional dependency [Net::HTTP::Persistent](https://github.com/drbrain/net-http-persistent)
106
106
  # @example
107
- # mcp = LLM.mcp(http: {url: "https://example.com/mcp"}).persist!
107
+ # mcp = LLM.mcp(http: {url: "https://example.com/mcp"}).persistent
108
108
  # # do something with 'mcp'
109
109
  # @return [LLM::MCP]
110
110
  def persist!
111
111
  transport.persist!
112
112
  self
113
113
  end
114
+ alias_method :persistent, :persist!
114
115
 
115
116
  ##
116
117
  # Returns the tools provided by the MCP process.
@@ -120,6 +121,34 @@ class LLM::MCP
120
121
  res["tools"].map { LLM::Tool.mcp(self, _1) }
121
122
  end
122
123
 
124
+ ##
125
+ # Returns the prompts provided by the MCP process.
126
+ # @return [Array<LLM::Object>]
127
+ def prompts
128
+ res = call(transport, "prompts/list")
129
+ LLM::Object.from(res["prompts"])
130
+ end
131
+
132
+ ##
133
+ # Returns a prompt by name.
134
+ # @param [String] name The prompt name
135
+ # @param [Hash<String, String>, nil] arguments The prompt arguments
136
+ # @return [LLM::Object]
137
+ def find_prompt(name:, arguments: nil)
138
+ params = {name:}
139
+ params[:arguments] = arguments if arguments
140
+ res = call(transport, "prompts/get", params)
141
+ res["messages"] = [*res["messages"]].map do |message|
142
+ LLM::Message.new(
143
+ message["role"],
144
+ adapt_content(message["content"]),
145
+ {original_content: message["content"]}
146
+ )
147
+ end
148
+ LLM::Object.from(res)
149
+ end
150
+ alias_method :get_prompt, :find_prompt
151
+
123
152
  ##
124
153
  # Calls a tool by name with the given arguments
125
154
  # @param [String] name The name of the tool to call
@@ -134,6 +163,19 @@ class LLM::MCP
134
163
 
135
164
  attr_reader :llm, :command, :transport, :timeout
136
165
 
166
+ def adapt_content(content)
167
+ case content
168
+ when String
169
+ content
170
+ when Hash
171
+ content["type"] == "text" ? content["text"].to_s : LLM::Object.from(content)
172
+ when Array
173
+ content.map { adapt_content(_1) }
174
+ else
175
+ content
176
+ end
177
+ end
178
+
137
179
  def adapt_tool_result(result)
138
180
  if result["structuredContent"]
139
181
  result["structuredContent"]
data/lib/llm/provider.rb CHANGED
@@ -308,7 +308,7 @@ class LLM::Provider
308
308
  # This method configures a provider to use a persistent connection pool
309
309
  # via the optional dependency [Net::HTTP::Persistent](https://github.com/drbrain/net-http-persistent)
310
310
  # @example
311
- # llm = LLM.openai(key: ENV["KEY"]).persist!
311
+ # llm = LLM.openai(key: ENV["KEY"]).persistent
312
312
  # # do something with 'llm'
313
313
  # @return [LLM::Provider]
314
314
  def persist!
@@ -317,14 +317,13 @@ class LLM::Provider
317
317
  tap { @client = client }
318
318
  end
319
319
  end
320
+ alias_method :persistent, :persist!
320
321
 
321
322
  ##
322
323
  # @param [Object] stream
323
324
  # @return [Boolean]
324
325
  def streamable?(stream)
325
- stream.respond_to?(:on_content) ||
326
- stream.respond_to?(:on_reasoning_content) ||
327
- stream.respond_to?(:<<)
326
+ LLM::Stream === stream || stream.respond_to?(:<<)
328
327
  end
329
328
 
330
329
  private
@@ -28,12 +28,19 @@ module LLM::Anthropic::RequestAdapter
28
28
 
29
29
  def adapt_message
30
30
  if message.tool_call?
31
- {role: message.role, content: message.extra[:original_tool_calls]}
31
+ {role: message.role, content: adapt_tool_calls}
32
32
  else
33
33
  {role: message.role, content: adapt_content(content)}
34
34
  end
35
35
  end
36
36
 
37
+ def adapt_tool_calls
38
+ message.extra[:tool_calls].filter_map do |tool|
39
+ next unless tool[:id] && tool[:name]
40
+ {type: "tool_use", id: tool[:id], name: tool[:name], input: LLM::Anthropic.parse_tool_input(tool[:arguments])}
41
+ end
42
+ end
43
+
37
44
  ##
38
45
  # @param [String, URI] content
39
46
  # The content to format
@@ -66,7 +66,8 @@ module LLM::Anthropic::ResponseAdapter
66
66
  private
67
67
 
68
68
  def adapt_choices
69
- texts.map.with_index do |choice, index|
69
+ source = texts.empty? && tools.any? ? [{"text" => ""}] : texts
70
+ source.map.with_index do |choice, index|
70
71
  extra = {
71
72
  index:, response: self,
72
73
  tool_calls: adapt_tool_calls(tools), original_tool_calls: tools
@@ -77,7 +78,11 @@ module LLM::Anthropic::ResponseAdapter
77
78
 
78
79
  def adapt_tool_calls(tools)
79
80
  (tools || []).filter_map do |tool|
80
- {id: tool.id, name: tool.name, arguments: tool.input}
81
+ {
82
+ id: tool.id,
83
+ name: tool.name,
84
+ arguments: LLM::Anthropic.parse_tool_input(tool.input)
85
+ }
81
86
  end
82
87
  end
83
88
 
@@ -105,7 +105,7 @@ class LLM::Anthropic
105
105
  registered = LLM::Function.find_by_name(tool["name"])
106
106
  fn = (registered || LLM::Function.new(tool["name"])).dup.tap do |fn|
107
107
  fn.id = tool["id"]
108
- fn.arguments = tool["input"]
108
+ fn.arguments = LLM::Anthropic.parse_tool_input(tool["input"])
109
109
  end
110
110
  [fn, (registered ? nil : @stream.tool_not_found(fn))]
111
111
  end
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Anthropic
4
+ module Utils
5
+ ##
6
+ # Normalizes Anthropic tool input to a Hash suitable for kwargs.
7
+ # @param input [Hash, String, nil]
8
+ # @return [Hash]
9
+ def parse_tool_input(input)
10
+ case input
11
+ when Hash then input
12
+ when String
13
+ parsed = LLM.json.load(input)
14
+ Hash === parsed ? parsed : {}
15
+ when nil then {}
16
+ else
17
+ input.respond_to?(:to_h) ? input.to_h : {}
18
+ end
19
+ rescue *LLM.json.parser_error
20
+ {}
21
+ end
22
+ end
23
+ end
@@ -14,6 +14,7 @@ module LLM
14
14
  # ctx.talk ["Tell me about this photo", ctx.local_file("/images/photo.png")]
15
15
  # ctx.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
16
16
  class Anthropic < Provider
17
+ require_relative "anthropic/utils"
17
18
  require_relative "anthropic/error_handler"
18
19
  require_relative "anthropic/request_adapter"
19
20
  require_relative "anthropic/response_adapter"
@@ -21,6 +22,7 @@ module LLM
21
22
  require_relative "anthropic/models"
22
23
  require_relative "anthropic/files"
23
24
  include RequestAdapter
25
+ extend Utils
24
26
 
25
27
  HOST = "api.anthropic.com"
26
28
 
@@ -79,6 +81,15 @@ module LLM
79
81
  "assistant"
80
82
  end
81
83
 
84
+ ##
85
+ # Anthropic expects tool results to be sent as user messages
86
+ # containing `tool_result` content blocks rather than a distinct
87
+ # `tool` role.
88
+ # @return (see LLM::Provider#tool_role)
89
+ def tool_role
90
+ :user
91
+ end
92
+
82
93
  ##
83
94
  # Returns the default model for chat completions
84
95
  # @see https://docs.anthropic.com/en/docs/about-claude/models/all-models#model-comparison-table claude-sonnet-4-20250514
@@ -15,6 +15,8 @@ module LLM::OpenAI::RequestAdapter
15
15
  catch(:abort) do
16
16
  if Hash === message
17
17
  {role: message[:role], content: adapt_content(message[:content])}
18
+ elsif message.tool_call?
19
+ message.extra[:original_tool_calls]
18
20
  else
19
21
  adapt_message
20
22
  end
@@ -23,12 +25,12 @@ module LLM::OpenAI::RequestAdapter
23
25
 
24
26
  private
25
27
 
26
- def adapt_content(content)
28
+ def adapt_content(content, role: message.role)
27
29
  case content
28
30
  when String
29
- [{type: :input_text, text: content.to_s}]
31
+ [{type: text_content_type(role), text: content.to_s}]
30
32
  when LLM::Response then adapt_remote_file(content)
31
- when LLM::Message then adapt_content(content.content)
33
+ when LLM::Message then adapt_content(content.content, role: content.role)
32
34
  when LLM::Object
33
35
  case content.kind
34
36
  when :image_url then [{type: :image_url, image_url: {url: content.value.to_s}}]
@@ -46,7 +48,7 @@ module LLM::OpenAI::RequestAdapter
46
48
  when Array
47
49
  adapt_array
48
50
  else
49
- {role: message.role, content: adapt_content(content)}
51
+ {role: message.role, content: adapt_content(content, role: message.role)}
50
52
  end
51
53
  end
52
54
 
@@ -56,7 +58,7 @@ module LLM::OpenAI::RequestAdapter
56
58
  elsif returns.any?
57
59
  returns.map { {type: "function_call_output", call_id: _1.id, output: LLM.json.dump(_1.value)} }
58
60
  else
59
- {role: message.role, content: content.flat_map { adapt_content(_1) }}
61
+ {role: message.role, content: content.flat_map { adapt_content(_1, role: message.role) }}
60
62
  end
61
63
  end
62
64
 
@@ -83,5 +85,9 @@ module LLM::OpenAI::RequestAdapter
83
85
  def message = @message
84
86
  def content = message.content
85
87
  def returns = content.grep(LLM::Function::Return)
88
+
89
+ def text_content_type(role)
90
+ role.to_s == "assistant" ? :output_text : :input_text
91
+ end
86
92
  end
87
93
  end
@@ -60,6 +60,13 @@ module LLM::OpenAI::ResponseAdapter
60
60
  body.model
61
61
  end
62
62
 
63
+ ##
64
+ # OpenAI's Responses API does not expose a system fingerprint.
65
+ # @return [nil]
66
+ def system_fingerprint
67
+ nil
68
+ end
69
+
63
70
  ##
64
71
  # Returns the aggregated text content from the response outputs.
65
72
  # @return [String]
@@ -88,10 +95,15 @@ module LLM::OpenAI::ResponseAdapter
88
95
  private
89
96
 
90
97
  def adapt_message
91
- message = LLM::Message.new("assistant", +"", {response: self, tool_calls: [], reasoning_content: +""})
98
+ message = LLM::Message.new(
99
+ "assistant",
100
+ +"",
101
+ {response: self, tool_calls: [], original_tool_calls: [], reasoning_content: +""}
102
+ )
92
103
  output.each do |choice|
93
104
  if choice.type == "function_call"
94
105
  message.extra[:tool_calls] << adapt_tool(choice)
106
+ message.extra[:original_tool_calls] << choice
95
107
  elsif choice.type == "reasoning"
96
108
  (choice.summary || []).each do |summary|
97
109
  next unless summary["type"] == "summary_text"
@@ -43,11 +43,19 @@ class LLM::OpenAI
43
43
  @body[k] = v
44
44
  end
45
45
  @body["output"] ||= []
46
+ when "response.in_progress", "response.completed"
47
+ response = chunk["response"] || {}
48
+ response.each do |k, v|
49
+ next if k == "output" && @body["output"].is_a?(Array) && @body["output"].any?
50
+ @body[k] = v
51
+ end
52
+ @body["output"] ||= response["output"] || []
46
53
  when "response.output_item.added"
47
54
  output_index = chunk["output_index"]
48
55
  item = chunk["item"]
49
56
  @body["output"][output_index] = item
50
57
  @body["output"][output_index]["content"] ||= []
58
+ @body["output"][output_index]["summary"] ||= [] if item["type"] == "reasoning"
51
59
  when "response.content_part.added"
52
60
  output_index = chunk["output_index"]
53
61
  content_index = chunk["content_index"]
@@ -55,6 +63,25 @@ class LLM::OpenAI
55
63
  @body["output"][output_index] ||= {"content" => []}
56
64
  @body["output"][output_index]["content"] ||= []
57
65
  @body["output"][output_index]["content"][content_index] = part
66
+ when "response.reasoning_summary_text.delta"
67
+ output_item = @body["output"][chunk["output_index"]]
68
+ if output_item && output_item["type"] == "reasoning"
69
+ summary_index = chunk["summary_index"] || 0
70
+ output_item["summary"] ||= []
71
+ output_item["summary"][summary_index] ||= {"type" => "summary_text", "text" => +""}
72
+ output_item["summary"][summary_index]["text"] << chunk["delta"]
73
+ emit_reasoning_content(chunk["delta"])
74
+ end
75
+ when "response.reasoning_summary_text.done"
76
+ output_item = @body["output"][chunk["output_index"]]
77
+ if output_item && output_item["type"] == "reasoning"
78
+ summary_index = chunk["summary_index"] || 0
79
+ output_item["summary"] ||= []
80
+ output_item["summary"][summary_index] = {
81
+ "type" => "summary_text",
82
+ "text" => chunk["text"]
83
+ }
84
+ end
58
85
  when "response.output_text.delta"
59
86
  output_index = chunk["output_index"]
60
87
  content_index = chunk["content_index"]
@@ -102,6 +129,10 @@ class LLM::OpenAI
102
129
  end
103
130
  end
104
131
 
132
+ def emit_reasoning_content(value)
133
+ @stream.on_reasoning_content(value) if @stream.respond_to?(:on_reasoning_content)
134
+ end
135
+
105
136
  def emit_tool(index, tool)
106
137
  return unless @stream.respond_to?(:on_tool_call)
107
138
  return unless complete_tool?(tool)
@@ -8,8 +8,10 @@ class LLM::Stream
8
8
  # returns an array of {LLM::Function::Return} values.
9
9
  class Queue
10
10
  ##
11
+ # @param [LLM::Stream] stream
11
12
  # @return [LLM::Stream::Queue]
12
- def initialize
13
+ def initialize(stream)
14
+ @stream = stream
13
15
  @items = []
14
16
  end
15
17
 
@@ -39,13 +41,24 @@ class LLM::Stream
39
41
  # @return [Array<LLM::Function::Return>]
40
42
  def wait(strategy)
41
43
  returns, tasks = @items.shift(@items.length).partition { LLM::Function::Return === _1 }
42
- returns.concat case strategy
44
+ results = case strategy
43
45
  when :thread then LLM::Function::ThreadGroup.new(tasks).wait
44
46
  when :task then LLM::Function::TaskGroup.new(tasks).wait
45
47
  when :fiber then LLM::Function::FiberGroup.new(tasks).wait
46
48
  else raise ArgumentError, "Unknown strategy: #{strategy.inspect}. Expected :thread, :task, or :fiber"
47
49
  end
50
+ returns.concat fire_hooks(tasks, results)
48
51
  end
49
52
  alias_method :value, :wait
53
+
54
+ private
55
+
56
+ def fire_hooks(tasks, results)
57
+ results.each_with_index do |ret, idx|
58
+ tool = tasks[idx]&.function
59
+ @stream.on_tool_return(tool, ret) if tool
60
+ end
61
+ results
62
+ end
50
63
  end
51
64
  end
data/lib/llm/stream.rb CHANGED
@@ -5,20 +5,20 @@ module LLM
5
5
  # The {LLM::Stream LLM::Stream} class provides the callback interface for
6
6
  # streamed model output in llm.rb.
7
7
  #
8
- # A stream object can be an instance of {LLM::Stream LLM::Stream}, a
9
- # subclass that overrides the callbacks it needs, or any other object that
10
- # implements some or all of the same interface. {#queue} provides a small
11
- # helper for collecting asynchronous tool work started from a callback, and
12
- # {#tool_not_found} returns an in-band tool error when a streamed tool
13
- # cannot be resolved.
8
+ # A stream object can be an instance of {LLM::Stream LLM::Stream} or a
9
+ # subclass that overrides the callbacks it needs. For basic streaming,
10
+ # llm.rb also accepts any object that implements `#<<`. {#queue} provides
11
+ # a small helper for collecting asynchronous tool work started from a
12
+ # callback, and {#tool_not_found} returns an in-band tool error when a
13
+ # streamed tool cannot be resolved.
14
14
  #
15
15
  # @note The `on_*` callbacks run inline with the streaming parser. They
16
16
  # therefore block streaming progress and should generally return as
17
17
  # quickly as possible.
18
18
  #
19
- # The most common callback is {#on_content}, which also maps to {#<<} for
20
- # compatibility with `StringIO`-style objects. Providers may also call
21
- # {#on_reasoning_content} and {#on_tool_call} when that data is available.
19
+ # The most common callback is {#on_content}, which also maps to {#<<}.
20
+ # Providers may also call {#on_reasoning_content} and {#on_tool_call} when
21
+ # that data is available.
22
22
  class Stream
23
23
  require_relative "stream/queue"
24
24
 
@@ -26,7 +26,7 @@ module LLM
26
26
  # Returns a lazily-initialized queue for tool results or spawned work.
27
27
  # @return [LLM::Stream::Queue]
28
28
  def queue
29
- @queue ||= Queue.new
29
+ @queue ||= Queue.new(self)
30
30
  end
31
31
 
32
32
  ##
@@ -79,6 +79,20 @@ module LLM
79
79
  nil
80
80
  end
81
81
 
82
+ ##
83
+ # Called when queued streamed tool work returns.
84
+ # @note This callback runs when {#wait} resolves work that was queued from
85
+ # {#on_tool_call}, such as values returned by `tool.spawn(:thread)`,
86
+ # `tool.spawn(:fiber)`, or `tool.spawn(:task)`.
87
+ # @param [LLM::Function] tool
88
+ # The tool that returned.
89
+ # @param [LLM::Function::Return] ret
90
+ # The completed tool return.
91
+ # @return [nil]
92
+ def on_tool_return(tool, ret)
93
+ nil
94
+ end
95
+
82
96
  # @endgroup
83
97
 
84
98
  # @group Error handlers
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "4.11.1"
4
+ VERSION = "4.13.0"
5
5
  end