llm.rb 4.9.0 → 4.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +152 -0
  3. data/README.md +178 -31
  4. data/data/anthropic.json +209 -242
  5. data/data/deepseek.json +15 -15
  6. data/data/google.json +553 -403
  7. data/data/openai.json +740 -535
  8. data/data/xai.json +250 -253
  9. data/data/zai.json +157 -90
  10. data/lib/llm/context/deserializer.rb +2 -1
  11. data/lib/llm/context.rb +58 -2
  12. data/lib/llm/contract/completion.rb +7 -0
  13. data/lib/llm/error.rb +4 -0
  14. data/lib/llm/eventhandler.rb +7 -0
  15. data/lib/llm/function/registry.rb +106 -0
  16. data/lib/llm/function/task.rb +39 -0
  17. data/lib/llm/function.rb +12 -7
  18. data/lib/llm/mcp/transport/http/event_handler.rb +66 -0
  19. data/lib/llm/mcp/transport/http.rb +156 -0
  20. data/lib/llm/mcp/transport/stdio.rb +7 -0
  21. data/lib/llm/mcp.rb +74 -30
  22. data/lib/llm/message.rb +9 -2
  23. data/lib/llm/provider.rb +10 -0
  24. data/lib/llm/providers/anthropic/response_adapter/completion.rb +6 -0
  25. data/lib/llm/providers/anthropic/stream_parser.rb +37 -4
  26. data/lib/llm/providers/anthropic.rb +1 -1
  27. data/lib/llm/providers/google/response_adapter/completion.rb +12 -5
  28. data/lib/llm/providers/google/stream_parser.rb +54 -11
  29. data/lib/llm/providers/google/utils.rb +30 -0
  30. data/lib/llm/providers/google.rb +2 -0
  31. data/lib/llm/providers/ollama/response_adapter/completion.rb +6 -0
  32. data/lib/llm/providers/ollama/stream_parser.rb +10 -4
  33. data/lib/llm/providers/ollama.rb +1 -1
  34. data/lib/llm/providers/openai/response_adapter/completion.rb +7 -0
  35. data/lib/llm/providers/openai/response_adapter/responds.rb +84 -10
  36. data/lib/llm/providers/openai/responses/stream_parser.rb +63 -4
  37. data/lib/llm/providers/openai/responses.rb +1 -1
  38. data/lib/llm/providers/openai/stream_parser.rb +68 -4
  39. data/lib/llm/providers/openai.rb +1 -1
  40. data/lib/llm/schema/all_of.rb +31 -0
  41. data/lib/llm/schema/any_of.rb +31 -0
  42. data/lib/llm/schema/one_of.rb +31 -0
  43. data/lib/llm/schema/parser.rb +36 -0
  44. data/lib/llm/schema.rb +45 -8
  45. data/lib/llm/stream/queue.rb +51 -0
  46. data/lib/llm/stream.rb +102 -0
  47. data/lib/llm/tool.rb +53 -47
  48. data/lib/llm/version.rb +1 -1
  49. data/lib/llm.rb +3 -2
  50. data/llm.gemspec +2 -2
  51. metadata +12 -1
@@ -0,0 +1,39 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Function
4
+ ##
5
+ # The {LLM::Function::Task} class wraps a single concurrent function call and
6
+ # provides a small, uniform interface across threads, fibers, and async tasks.
7
+ class Task
8
+ ##
9
+ # @return [Object]
10
+ attr_reader :task
11
+
12
+ ##
13
+ # @param [Thread, Fiber, Async::Task] task
14
+ # @return [LLM::Function::Task]
15
+ def initialize(task)
16
+ @task = task
17
+ end
18
+
19
+ ##
20
+ # @return [Boolean]
21
+ def alive?
22
+ task.alive?
23
+ end
24
+
25
+ ##
26
+ # @return [LLM::Function::Return]
27
+ def wait
28
+ if Thread === task
29
+ task.value
30
+ elsif Fiber === task
31
+ task.resume if task.alive?
32
+ task.value
33
+ else
34
+ task.wait
35
+ end
36
+ end
37
+ alias_method :value, :wait
38
+ end
39
+ end
data/lib/llm/function.rb CHANGED
@@ -29,12 +29,15 @@
29
29
  # end
30
30
  # end
31
31
  class LLM::Function
32
+ require_relative "function/registry"
32
33
  require_relative "function/tracing"
33
34
  require_relative "function/array"
35
+ require_relative "function/task"
34
36
  require_relative "function/thread_group"
35
37
  require_relative "function/fiber_group"
36
38
  require_relative "function/task_group"
37
39
 
40
+ extend LLM::Function::Registry
38
41
  prepend LLM::Function::Tracing
39
42
 
40
43
  Return = Struct.new(:id, :name, :value) do
@@ -144,7 +147,7 @@ class LLM::Function
144
147
  end
145
148
 
146
149
  ##
147
- # Calls the function in a separate thread.
150
+ # Calls the function concurrently.
148
151
  #
149
152
  # This is the low-level method that powers concurrent tool execution.
150
153
  # Prefer the collection methods on {LLM::Context#functions} for most
@@ -156,8 +159,8 @@ class LLM::Function
156
159
  # ctx.talk(ctx.functions.wait)
157
160
  #
158
161
  # # Direct usage (uncommon)
159
- # thread = tool.spawn
160
- # result = thread.value
162
+ # task = tool.spawn(:thread)
163
+ # result = task.value
161
164
  #
162
165
  # @param [Symbol] strategy
163
166
  # Controls concurrency strategy:
@@ -165,10 +168,10 @@ class LLM::Function
165
168
  # - `:task`: Use async tasks (requires async gem)
166
169
  # - `:fiber`: Use raw fibers
167
170
  #
168
- # @return [Thread, Async::Task, Fiber]
169
- # Returns a thread, async task, or fiber whose `#value` is an {LLM::Function::Return}.
171
+ # @return [LLM::Function::Task]
172
+ # Returns a task whose `#value` is an {LLM::Function::Return}.
170
173
  def spawn(strategy)
171
- case strategy
174
+ task = case strategy
172
175
  when :task
173
176
  require "async" unless defined?(::Async)
174
177
  Async { call_function }
@@ -183,6 +186,7 @@ class LLM::Function
183
186
  else
184
187
  raise ArgumentError, "Unknown strategy: #{strategy.inspect}. Expected :thread, :task, or :fiber"
185
188
  end
189
+ Task.new(task)
186
190
  ensure
187
191
  @called = true
188
192
  end
@@ -260,7 +264,8 @@ class LLM::Function
260
264
  # Returns a Return object with either the function result or error information.
261
265
  def call_function
262
266
  runner = ((Class === @runner) ? @runner.new : @runner)
263
- Return.new(id, name, runner.call(**arguments))
267
+ kwargs = Hash === arguments ? arguments.transform_keys(&:to_sym) : arguments
268
+ Return.new(id, name, runner.call(**kwargs))
264
269
  rescue => ex
265
270
  Return.new(id, name, {error: true, type: ex.class.name, message: ex.message})
266
271
  end
@@ -0,0 +1,66 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::MCP::Transport
4
+ ##
5
+ # The {LLM::MCP::Transport::HTTP::EventHandler LLM::MCP::Transport::HTTP::EventHandler}
6
+ # class adapts generic server-sent event callbacks into decoded JSON-RPC
7
+ # messages for {LLM::MCP::Transport::HTTP LLM::MCP::Transport::HTTP}.
8
+ # It accumulates event data until a blank line terminates the current
9
+ # event, then parses the payload as JSON and yields it to the callback
10
+ # given at initialization.
11
+ # @private
12
+ class HTTP::EventHandler
13
+ ##
14
+ # @yieldparam [Hash] message
15
+ # A decoded JSON-RPC message
16
+ # @return [LLM::MCP::Transport::HTTP::EventHandler]
17
+ def initialize(&on_message)
18
+ @on_message = on_message
19
+ reset
20
+ end
21
+
22
+ ##
23
+ # Receives the SSE event name.
24
+ # @param [LLM::EventStream::Event] event
25
+ # The event stream event
26
+ # @return [void]
27
+ def on_event(event)
28
+ @event = event.value
29
+ end
30
+
31
+ ##
32
+ # Receives one line of SSE data.
33
+ # @param [LLM::EventStream::Event] event
34
+ # The event stream event
35
+ # @return [void]
36
+ def on_data(event)
37
+ @data << event.value.to_s
38
+ end
39
+
40
+ # The generic event stream parser dispatches one line at a time.
41
+ # A blank line terminates the current SSE event.
42
+ # @param [LLM::EventStream::Event] event
43
+ # The event stream event
44
+ # @return [void]
45
+ def on_chunk(event)
46
+ flush if event.chunk == "\n"
47
+ end
48
+
49
+ private
50
+
51
+ def flush
52
+ return reset if @data.empty? && @event.nil?
53
+ payload = @data.join("\n")
54
+ reset
55
+ return if payload.empty? || payload == "[DONE]"
56
+ @on_message.call(LLM.json.load(payload))
57
+ rescue *LLM.json.parser_error
58
+ reset
59
+ end
60
+
61
+ def reset
62
+ @event = nil
63
+ @data = []
64
+ end
65
+ end
66
+ end
@@ -0,0 +1,156 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::MCP::Transport
4
+ ##
5
+ # The {LLM::MCP::Transport::HTTP LLM::MCP::Transport::HTTP} class
6
+ # provides an HTTP transport for {LLM::MCP LLM::MCP}. It sends
7
+ # JSON-RPC messages with HTTP POST requests and buffers response
8
+ # messages for non-blocking reads.
9
+ class HTTP
10
+ require_relative "http/event_handler"
11
+
12
+ ##
13
+ # @param [String] url
14
+ # The URL for the MCP HTTP endpoint
15
+ # @param [Hash] headers
16
+ # Extra headers to send with requests
17
+ # @param [Integer, nil] timeout
18
+ # The timeout in seconds. Defaults to nil
19
+ # @return [LLM::MCP::Transport::HTTP]
20
+ def initialize(url:, headers: {}, timeout: nil)
21
+ @uri = URI.parse(url)
22
+ @use_ssl = @uri.scheme == "https"
23
+ @headers = headers
24
+ @timeout = timeout
25
+ @queue = []
26
+ @monitor = Monitor.new
27
+ @running = false
28
+ end
29
+
30
+ ##
31
+ # Starts the HTTP transport.
32
+ # @raise [LLM::MCP::Error]
33
+ # When the transport is already running
34
+ # @return [void]
35
+ def start
36
+ lock do
37
+ raise LLM::MCP::Error, "MCP transport is already running" if running?
38
+ @queue.clear
39
+ @running = true
40
+ end
41
+ end
42
+
43
+ ##
44
+ # Stops the HTTP transport and closes the connection.
45
+ # This method is idempotent.
46
+ # @return [void]
47
+ def stop
48
+ lock do
49
+ return nil unless running?
50
+ @running = false
51
+ nil
52
+ end
53
+ end
54
+
55
+ ##
56
+ # Writes a JSON-RPC message via HTTP POST.
57
+ # @param [Hash] message
58
+ # The JSON-RPC message
59
+ # @raise [LLM::MCP::Error]
60
+ # When the transport is not running or the HTTP request fails
61
+ # @return [void]
62
+ def write(message)
63
+ raise LLM::MCP::Error, "MCP transport is not running" unless running?
64
+ req = Net::HTTP::Post.new(uri.path, headers.merge("content-type" => "application/json"))
65
+ req.body = LLM.json.dump(message)
66
+ if persistent_client.nil?
67
+ http = Net::HTTP.start(uri.host, uri.port, use_ssl:, open_timeout: timeout, read_timeout: timeout)
68
+ args = [req]
69
+ else
70
+ http = persistent_client
71
+ args = [uri, req]
72
+ end
73
+ http.request(*args) do |res|
74
+ unless Net::HTTPSuccess === res
75
+ raise LLM::MCP::Error, "MCP transport write failed with HTTP #{res.code}"
76
+ end
77
+ read(res)
78
+ end
79
+ end
80
+
81
+ ##
82
+ # Reads the next queued message without blocking.
83
+ # @raise [LLM::MCP::Error]
84
+ # When the transport is not running
85
+ # @raise [IO::WaitReadable]
86
+ # When no complete message is available to read
87
+ # @return [Hash]
88
+ def read_nonblock
89
+ lock do
90
+ raise LLM::MCP::Error, "MCP transport is not running" unless running?
91
+ raise IO::WaitReadable if @queue.empty?
92
+ @queue.shift
93
+ end
94
+ end
95
+
96
+ ##
97
+ # @return [Boolean]
98
+ # Returns true when the MCP server connection is alive
99
+ def running?
100
+ @running
101
+ end
102
+
103
+ ##
104
+ # Configures the transport to use a persistent HTTP connection pool
105
+ # via the optional dependency [Net::HTTP::Persistent](https://github.com/drbrain/net-http-persistent)
106
+ # @example
107
+ # mcp = LLM.mcp(http: {url: "https://example.com/mcp"}).persist!
108
+ # # do something with 'mcp'
109
+ # @return [LLM::MCP::Transport::HTTP]
110
+ def persist!
111
+ LLM.lock(:mcp) do
112
+ require "net/http/persistent" unless defined?(Net::HTTP::Persistent)
113
+ unless LLM::MCP.clients.key?(key)
114
+ http = Net::HTTP::Persistent.new(name: self.class.name)
115
+ http.read_timeout = timeout
116
+ http.open_timeout = timeout
117
+ LLM::MCP.clients[key] ||= http
118
+ end
119
+ end
120
+ self
121
+ end
122
+
123
+ private
124
+
125
+ attr_reader :uri, :use_ssl, :headers, :timeout
126
+
127
+ def read(res)
128
+ if res["content-type"].to_s.include?("text/event-stream")
129
+ parser = LLM::EventStream::Parser.new
130
+ parser.register EventHandler.new { enqueue(_1) }
131
+ res.read_body { parser << _1 }
132
+ parser.free
133
+ else
134
+ body = +""
135
+ res.read_body { body << _1 }
136
+ enqueue(LLM.json.load(body)) unless body.empty?
137
+ end
138
+ end
139
+
140
+ def enqueue(message)
141
+ lock { @queue << message }
142
+ end
143
+
144
+ def persistent_client
145
+ LLM::MCP.clients[key]
146
+ end
147
+
148
+ def key
149
+ "#{uri.scheme}:#{uri.host}:#{uri.port}:#{timeout}"
150
+ end
151
+
152
+ def lock(&)
153
+ @monitor.synchronize(&)
154
+ end
155
+ end
156
+ end
@@ -78,6 +78,13 @@ module LLM::MCP::Transport
78
78
  command.wait
79
79
  end
80
80
 
81
+ ##
82
+ # This method is a no-op for stdio transports
83
+ # @return [LLM::MCP::Transport::Stdio]
84
+ def persist!
85
+ self
86
+ end
87
+
81
88
  private
82
89
 
83
90
  attr_reader :command, :stdin, :stdout, :stderr
data/lib/llm/mcp.rb CHANGED
@@ -6,68 +6,118 @@
6
6
  # clients and servers to exchange capabilities such as tools, prompts,
7
7
  # resources, and other structured interactions.
8
8
  #
9
- # In llm.rb, {LLM::MCP LLM::MCP} currently supports stdio servers and
10
- # focuses on discovering tools that can be used through
9
+ # In llm.rb, {LLM::MCP LLM::MCP} currently supports stdio and HTTP
10
+ # transports and focuses on discovering tools that can be used through
11
11
  # {LLM::Context LLM::Context} and {LLM::Agent LLM::Agent}.
12
+ #
13
+ # Like {LLM::Context LLM::Context}, an MCP client is stateful and is
14
+ # expected to remain isolated to a single thread.
12
15
  class LLM::MCP
13
- require "monitor"
14
16
  require_relative "mcp/error"
15
17
  require_relative "mcp/command"
16
18
  require_relative "mcp/rpc"
17
19
  require_relative "mcp/pipe"
20
+ require_relative "mcp/transport/http"
18
21
  require_relative "mcp/transport/stdio"
19
22
 
20
23
  include RPC
21
24
 
25
+ @@clients = {}
26
+
27
+ ##
28
+ # @api private
29
+ def self.clients = @@clients
30
+
31
+ ##
32
+ # Builds an MCP client that uses the stdio transport.
33
+ # @param [LLM::Provider, nil] llm
34
+ # An instance of LLM::Provider. Optional.
35
+ # @param [Hash] stdio
36
+ # The stdio transport configuration
37
+ # @return [LLM::MCP]
38
+ def self.stdio(llm = nil, **stdio)
39
+ new(llm, stdio:)
40
+ end
41
+
42
+ ##
43
+ # Builds an MCP client that uses the HTTP transport.
44
+ # @param [LLM::Provider, nil] llm
45
+ # An instance of LLM::Provider. Optional.
46
+ # @param [Hash] http
47
+ # The HTTP transport configuration
48
+ # @return [LLM::MCP]
49
+ def self.http(llm = nil, **http)
50
+ new(llm, http:)
51
+ end
52
+
22
53
  ##
23
54
  # @param [LLM::Provider, nil] llm
24
55
  # The provider to use for MCP transports that need one
25
- # @param [Hash] stdio The configuration for the stdio transport
56
+ # @param [Hash, nil] stdio The configuration for the stdio transport
26
57
  # @option stdio [Array<String>] :argv
27
58
  # The command to run for the MCP process
28
59
  # @option stdio [Hash] :env
29
60
  # The environment variables to set for the MCP process
30
61
  # @option stdio [String, nil] :cwd
31
62
  # The working directory for the MCP process
32
- # @param [Integer] timeout The maximum amount of time to wait when reading from an MCP process
63
+ # @param [Hash, nil] http The configuration for the HTTP transport
64
+ # @option http [String] :url
65
+ # The URL for the MCP HTTP endpoint
66
+ # @option http [Hash] :headers
67
+ # Extra headers for requests
68
+ # @param [Integer] timeout
69
+ # The maximum amount of time to wait when reading from an MCP process
33
70
  # @return [LLM::MCP] A new MCP instance
34
- def initialize(llm = nil, stdio:, timeout: 30)
71
+ def initialize(llm = nil, stdio: nil, http: nil, timeout: 30)
35
72
  @llm = llm
36
- @command = Command.new(**stdio)
37
- @monitor = Monitor.new
38
- @transport = Transport::Stdio.new(command:)
39
73
  @timeout = timeout
74
+ if stdio && http
75
+ raise ArgumentError, "stdio and http are mutually exclusive"
76
+ elsif stdio
77
+ @command = Command.new(**stdio)
78
+ @transport = Transport::Stdio.new(command:)
79
+ elsif http
80
+ @transport = Transport::HTTP.new(**http, timeout:)
81
+ else
82
+ raise ArgumentError, "stdio or http is required"
83
+ end
40
84
  end
41
85
 
42
86
  ##
43
87
  # Starts the MCP process.
44
88
  # @return [void]
45
89
  def start
46
- lock do
47
- transport.start
48
- call(transport, "initialize", {clientInfo: {name: "llm.rb", version: LLM::VERSION}})
49
- call(transport, "notifications/initialized")
50
- end
90
+ transport.start
91
+ call(transport, "initialize", {clientInfo: {name: "llm.rb", version: LLM::VERSION}})
92
+ call(transport, "notifications/initialized")
51
93
  end
52
94
 
53
95
  ##
54
96
  # Stops the MCP process.
55
97
  # @return [void]
56
98
  def stop
57
- lock do
58
- transport.stop
59
- nil
60
- end
99
+ transport.stop
100
+ nil
101
+ end
102
+
103
+ ##
104
+ # Configures an HTTP MCP transport to use a persistent connection pool
105
+ # via the optional dependency [Net::HTTP::Persistent](https://github.com/drbrain/net-http-persistent)
106
+ # @example
107
+ # mcp = LLM.mcp(http: {url: "https://example.com/mcp"}).persist!
108
+ # # do something with 'mcp'
109
+ # @return [LLM::MCP]
110
+ def persist!
111
+ transport.persist!
112
+ self
61
113
  end
62
114
 
63
115
  ##
64
116
  # Returns the tools provided by the MCP process.
65
117
  # @return [Array<Class<LLM::Tool>>]
66
118
  def tools
67
- lock do
68
- res = call(transport, "tools/list")
69
- res["tools"].map { LLM::Tool.mcp(self, _1) }
70
- end
119
+ res = call(transport, "tools/list")
120
+ res["tools"].map { LLM::Tool.mcp(self, _1) }
71
121
  end
72
122
 
73
123
  ##
@@ -76,10 +126,8 @@ class LLM::MCP
76
126
  # @param [Hash] arguments The arguments to pass to the tool
77
127
  # @return [Object] The result of the tool call
78
128
  def call_tool(name, arguments = {})
79
- lock do
80
- res = call(transport, "tools/call", {name:, arguments:})
81
- adapt_tool_result(res)
82
- end
129
+ res = call(transport, "tools/call", {name:, arguments:})
130
+ adapt_tool_result(res)
83
131
  end
84
132
 
85
133
  private
@@ -95,8 +143,4 @@ class LLM::MCP
95
143
  result
96
144
  end
97
145
  end
98
-
99
- def lock(&)
100
- @monitor.synchronize(&)
101
- end
102
146
  end
data/lib/llm/message.rb CHANGED
@@ -33,7 +33,7 @@ module LLM
33
33
  # Returns a Hash representation of the message.
34
34
  # @return [Hash]
35
35
  def to_h
36
- {role:, content:,
36
+ {role:, content:, reasoning_content:,
37
37
  tools: extra.tool_calls,
38
38
  usage:,
39
39
  original_tool_calls: extra.original_tool_calls}.compact
@@ -67,6 +67,13 @@ module LLM
67
67
  LLM.json.load(content)
68
68
  end
69
69
 
70
+ ##
71
+ # Returns reasoning content associated with the message
72
+ # @return [String, nil]
73
+ def reasoning_content
74
+ extra.reasoning_content
75
+ end
76
+
70
77
  ##
71
78
  # @return [Array<LLM::Function>]
72
79
  def functions
@@ -158,7 +165,7 @@ module LLM
158
165
  def inspect
159
166
  "#<#{self.class.name}:0x#{object_id.to_s(16)} " \
160
167
  "tool_call=#{tool_calls.any?} role=#{role.inspect} " \
161
- "content=#{content.inspect}>"
168
+ "content=#{content.inspect} reasoning_content=#{reasoning_content.inspect}>"
162
169
  end
163
170
 
164
171
  private
data/lib/llm/provider.rb CHANGED
@@ -318,6 +318,15 @@ class LLM::Provider
318
318
  end
319
319
  end
320
320
 
321
+ ##
322
+ # @param [Object] stream
323
+ # @return [Boolean]
324
+ def streamable?(stream)
325
+ stream.respond_to?(:on_content) ||
326
+ stream.respond_to?(:on_reasoning_content) ||
327
+ stream.respond_to?(:<<)
328
+ end
329
+
321
330
  private
322
331
 
323
332
  attr_reader :client, :base_uri, :host, :port, :timeout, :ssl
@@ -393,6 +402,7 @@ class LLM::Provider
393
402
  res.body = body
394
403
  end
395
404
  ensure
405
+ handler&.free
396
406
  parser&.free
397
407
  end
398
408
  else
@@ -51,6 +51,12 @@ module LLM::Anthropic::ResponseAdapter
51
51
  super
52
52
  end
53
53
 
54
+ ##
55
+ # (see LLM::Contract::Completion#reasoning_content)
56
+ def reasoning_content
57
+ super
58
+ end
59
+
54
60
  ##
55
61
  # (see LLM::Contract::Completion#content!)
56
62
  def content!
@@ -10,11 +10,12 @@ class LLM::Anthropic
10
10
  attr_reader :body
11
11
 
12
12
  ##
13
- # @param [#<<] io An IO-like object
13
+ # @param [#<<, LLM::Stream] stream
14
+ # A stream sink that implements {#<<} or the {LLM::Stream} interface
14
15
  # @return [LLM::Anthropic::StreamParser]
15
- def initialize(io)
16
+ def initialize(stream)
16
17
  @body = {"role" => "assistant", "content" => []}
17
- @io = io
18
+ @stream = stream
18
19
  end
19
20
 
20
21
  ##
@@ -24,6 +25,12 @@ class LLM::Anthropic
24
25
  tap { merge!(chunk) }
25
26
  end
26
27
 
28
+ ##
29
+ # Frees internal parser state used during streaming.
30
+ # @return [void]
31
+ def free
32
+ end
33
+
27
34
  private
28
35
 
29
36
  def merge!(chunk)
@@ -34,7 +41,7 @@ class LLM::Anthropic
34
41
  elsif chunk["type"] == "content_block_delta"
35
42
  if chunk["delta"]["type"] == "text_delta"
36
43
  @body["content"][chunk["index"]]["text"] << chunk["delta"]["text"]
37
- @io << chunk["delta"]["text"] if @io.respond_to?(:<<)
44
+ emit_content(chunk["delta"]["text"])
38
45
  elsif chunk["delta"]["type"] == "input_json_delta"
39
46
  content = @body["content"][chunk["index"]]
40
47
  if Hash === content["input"]
@@ -53,6 +60,9 @@ class LLM::Anthropic
53
60
  if content["input"]
54
61
  content["input"] = LLM.json.load(content["input"])
55
62
  end
63
+ if content["type"] == "tool_use"
64
+ emit_tool(content)
65
+ end
56
66
  end
57
67
  end
58
68
 
@@ -76,5 +86,28 @@ class LLM::Anthropic
76
86
  end
77
87
  end
78
88
  end
89
+
90
+ def emit_content(value)
91
+ if @stream.respond_to?(:on_content)
92
+ @stream.on_content(value)
93
+ elsif @stream.respond_to?(:<<)
94
+ @stream << value
95
+ end
96
+ end
97
+
98
+ def emit_tool(tool)
99
+ return unless @stream.respond_to?(:on_tool_call)
100
+ function, error = resolve_tool(tool)
101
+ @stream.on_tool_call(function, error)
102
+ end
103
+
104
+ def resolve_tool(tool)
105
+ registered = LLM::Function.find_by_name(tool["name"])
106
+ fn = (registered || LLM::Function.new(tool["name"])).dup.tap do |fn|
107
+ fn.id = tool["id"]
108
+ fn.arguments = tool["input"]
109
+ end
110
+ [fn, (registered ? nil : @stream.tool_not_found(fn))]
111
+ end
79
112
  end
80
113
  end
@@ -141,7 +141,7 @@ module LLM
141
141
  tools = resolve_tools(params.delete(:tools))
142
142
  params = [params, adapt_tools(tools)].inject({}, &:merge!).compact
143
143
  role, stream = params.delete(:role), params.delete(:stream)
144
- params[:stream] = true if stream.respond_to?(:<<) || stream == true
144
+ params[:stream] = true if streamable?(stream) || stream == true
145
145
  [params, stream, tools, role]
146
146
  end
147
147