llm.rb 4.12.0 → 4.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/lib/llm/context.rb CHANGED
@@ -62,6 +62,7 @@ module LLM
62
62
  @mode = params.delete(:mode) || :completions
63
63
  @params = {model: llm.default_model, schema: nil}.compact.merge!(params)
64
64
  @messages = LLM::Buffer.new(llm)
65
+ @owner = Fiber.current
65
66
  end
66
67
 
67
68
  ##
@@ -103,9 +104,9 @@ module LLM
103
104
  # res = ctx.respond("What is the capital of France?")
104
105
  # puts res.output_text
105
106
  def respond(prompt, params = {})
106
- res_id = @messages.find(&:assistant?)&.response&.response_id
107
- params = params.merge(previous_response_id: res_id, input: @messages.to_a).compact
108
107
  params = @params.merge(params)
108
+ res_id = params[:store] == false ? nil : @messages.find(&:assistant?)&.response&.response_id
109
+ params = params.merge(previous_response_id: res_id, input: @messages.to_a).compact
109
110
  res = @llm.responses.create(prompt, params)
110
111
  role = params[:role] || @llm.user_role
111
112
  @messages.concat LLM::Prompt === prompt ? prompt.to_a : [LLM::Message.new(role, prompt)]
@@ -184,6 +185,15 @@ module LLM
184
185
  end
185
186
  end
186
187
 
188
+ ##
189
+ # Interrupt the active request, if any.
190
+ # This is inspired by Go's context cancellation model.
191
+ # @return [nil]
192
+ def interrupt!
193
+ llm.interrupt!(@owner)
194
+ end
195
+ alias_method :cancel!, :interrupt!
196
+
187
197
  ##
188
198
  # Returns token usage accumulated in this context
189
199
  # @note
data/lib/llm/error.rb CHANGED
@@ -55,6 +55,10 @@ module LLM
55
55
  # When stuck in a tool call loop
56
56
  ToolLoopError = Class.new(Error)
57
57
 
58
+ ##
59
+ # When a request is interrupted
60
+ Interrupt = Class.new(Error)
61
+
58
62
  ##
59
63
  # When a tool call cannot be mapped to a local tool
60
64
  NoSuchToolError = Class.new(Error)
@@ -13,13 +13,15 @@ module LLM
13
13
 
14
14
  ##
15
15
  # "data:" event callback
16
- # @param [LLM::EventStream::Event] event
16
+ # @param [LLM::EventStream::Event, String, nil] event
17
+ # @param [String, nil] chunk
17
18
  # @return [void]
18
- def on_data(event)
19
- return if event.end?
20
- chunk = LLM.json.load(event.value)
21
- return unless chunk
22
- @parser.parse!(chunk)
19
+ def on_data(event, chunk = nil)
20
+ value = chunk ? event : event.value
21
+ return if value == "[DONE]"
22
+ payload = LLM.json.load(value)
23
+ return unless payload
24
+ @parser.parse!(payload)
23
25
  rescue *LLM.json.parser_error
24
26
  end
25
27
 
@@ -28,13 +30,15 @@ module LLM
28
30
  # is received, regardless of whether it has
29
31
  # a field name or not. Primarily for ollama,
30
32
  # which does emit Server-Sent Events (SSE).
31
- # @param [LLM::EventStream::Event] event
33
+ # @param [LLM::EventStream::Event, String, nil] event
34
+ # @param [String, nil] chunk
32
35
  # @return [void]
33
- def on_chunk(event)
34
- return if event.end?
35
- chunk = LLM.json.load(event.chunk)
36
- return unless chunk
37
- @parser.parse!(chunk)
36
+ def on_chunk(event, chunk = nil)
37
+ raw_chunk = chunk || event&.chunk || event
38
+ return if raw_chunk == "[DONE]"
39
+ payload = LLM.json.load(raw_chunk)
40
+ return unless payload
41
+ @parser.parse!(payload)
38
42
  rescue *LLM.json.parser_error
39
43
  end
40
44
 
@@ -4,8 +4,17 @@ module LLM::EventStream
4
4
  ##
5
5
  # @private
6
6
  class Event
7
- FIELD_REGEXP = /[^:]+/
8
- VALUE_REGEXP = /(?<=: ).+/
7
+ UNSET = Object.new.freeze
8
+
9
+ def self.parse(chunk)
10
+ newline = chunk.end_with?("\n") ? chunk.bytesize - 1 : chunk.bytesize
11
+ separator = chunk.index(":")
12
+ return [nil, nil] unless separator
13
+ field = chunk.byteslice(0, separator)
14
+ value_start = separator + (chunk.getbyte(separator + 1) == 32 ? 2 : 1)
15
+ value = value_start < newline ? chunk.byteslice(value_start, newline - value_start) : nil
16
+ [field, value]
17
+ end
9
18
 
10
19
  ##
11
20
  # Returns the field name
@@ -25,9 +34,10 @@ module LLM::EventStream
25
34
  ##
26
35
  # @param [String] chunk
27
36
  # @return [LLM::EventStream::Event]
28
- def initialize(chunk)
29
- @field = chunk[FIELD_REGEXP]
30
- @value = chunk[VALUE_REGEXP]
37
+ def initialize(chunk, field: UNSET, value: UNSET)
38
+ @field, @value = self.class.parse(chunk) if field.equal?(UNSET) || value.equal?(UNSET)
39
+ @field = field unless field.equal?(UNSET)
40
+ @value = value unless value.equal?(UNSET)
31
41
  @chunk = chunk
32
42
  end
33
43
 
@@ -4,6 +4,8 @@ module LLM::EventStream
4
4
  ##
5
5
  # @private
6
6
  class Parser
7
+ COMPACT_THRESHOLD = 4096
8
+
7
9
  ##
8
10
  # @return [LLM::EventStream::Parser]
9
11
  def initialize
@@ -42,7 +44,8 @@ module LLM::EventStream
42
44
  # Returns the internal buffer
43
45
  # @return [String]
44
46
  def body
45
- @buffer.dup
47
+ return @buffer.dup if @cursor.zero?
48
+ @buffer.byteslice(@cursor, @buffer.bytesize - @cursor) || +""
46
49
  end
47
50
 
48
51
  ##
@@ -55,34 +58,46 @@ module LLM::EventStream
55
58
 
56
59
  private
57
60
 
58
- def parse!(event)
59
- event = Event.new(event)
60
- dispatch(event)
61
+ def parse!(chunk)
62
+ field, value = Event.parse(chunk)
63
+ dispatch_visitors(field, value, chunk)
64
+ dispatch_callbacks(field, value, chunk)
65
+ end
66
+
67
+ def dispatch_visitors(field, value, chunk)
68
+ @visitors.each { dispatch_visitor(_1, field, value, chunk) }
61
69
  end
62
70
 
63
- def dispatch(event)
64
- @visitors.each { dispatch_visitor(_1, event) }
65
- @events[event.field].each { _1.call(event) }
71
+ def dispatch_callbacks(field, value, chunk)
72
+ callbacks = @events[field]
73
+ return if callbacks.empty?
74
+ event = Event.new(chunk, field:, value:)
75
+ callbacks.each { _1.call(event) }
66
76
  end
67
77
 
68
- def dispatch_visitor(visitor, event)
69
- method = "on_#{event.field}"
78
+ def dispatch_visitor(visitor, field, value, chunk)
79
+ method = "on_#{field}"
70
80
  if visitor.respond_to?(method)
71
- visitor.public_send(method, event)
81
+ visitor.public_send(method, value, chunk)
72
82
  elsif visitor.respond_to?("on_chunk")
73
- visitor.on_chunk(event)
83
+ visitor.on_chunk(nil, chunk)
74
84
  end
75
85
  end
76
86
 
77
87
  def each_line
78
88
  while (newline = @buffer.index("\n", @cursor))
79
- line = @buffer[@cursor..newline]
89
+ line = @buffer.byteslice(@cursor, newline - @cursor + 1)
80
90
  @cursor = newline + 1
81
91
  yield(line)
82
92
  end
83
93
  return if @cursor.zero?
84
- @buffer = @buffer[@cursor..] || +""
85
- @cursor = 0
94
+ if @cursor >= @buffer.bytesize
95
+ @buffer.clear
96
+ @cursor = 0
97
+ elsif @cursor >= COMPACT_THRESHOLD
98
+ @buffer = @buffer.byteslice(@cursor, @buffer.bytesize - @cursor) || +""
99
+ @cursor = 0
100
+ end
86
101
  end
87
102
  end
88
103
  end
data/lib/llm/function.rb CHANGED
@@ -257,7 +257,7 @@ class LLM::Function
257
257
  when "LLM::OpenAI::Responses"
258
258
  {
259
259
  type: "function", name: @name, description: @description,
260
- parameters: @params.to_h.merge(additionalProperties: false), strict: true
260
+ parameters: (@params || {type: "object", properties: {}}).to_h.merge(additionalProperties: false), strict: false
261
261
  }.compact
262
262
  else
263
263
  {
@@ -74,7 +74,7 @@ class LLM::MCP
74
74
  # The IO stream to read from (:stdout, :stderr)
75
75
  # @raise [LLM::Error]
76
76
  # When the command is not running
77
- # @raise [IO::WaitReadable]
77
+ # @raise [IO::EAGAINWaitReadable]
78
78
  # When no complete message is available to read
79
79
  # @return [String]
80
80
  # The next complete line from the specified IO stream
data/lib/llm/mcp/error.rb CHANGED
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  class LLM::MCP
4
- class Error < LLM::Error
4
+ Error = Class.new(LLM::Error) do
5
5
  attr_reader :code, :data
6
6
 
7
7
  ##
@@ -27,5 +27,35 @@ class LLM::MCP
27
27
  end
28
28
  end
29
29
 
30
+ MismatchError = Class.new(Error) do
31
+ ##
32
+ # @return [Integer, String]
33
+ # The request id the client was waiting for
34
+ attr_reader :expected_id
35
+
36
+ ##
37
+ # @return [Integer, String]
38
+ # The response id received from the server
39
+ attr_reader :actual_id
40
+
41
+ ##
42
+ # @param [Integer, String] expected_id
43
+ # The request id the client was waiting for
44
+ # @param [Integer, String] actual_id
45
+ # The response id received from the server instead
46
+ def initialize(expected_id:, actual_id:)
47
+ @expected_id = expected_id
48
+ @actual_id = actual_id
49
+ super(message)
50
+ end
51
+
52
+ ##
53
+ # @return [String]
54
+ def message
55
+ "mismatched MCP response id #{actual_id.inspect} " \
56
+ "while waiting for #{expected_id.inspect}"
57
+ end
58
+ end
59
+
30
60
  TimeoutError = Class.new(Error)
31
61
  end
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::MCP
4
+ ##
5
+ # A per-request mailbox for routing a JSON-RPC response back to the
6
+ # caller waiting on that request id.
7
+ class Mailbox
8
+ def initialize
9
+ @queue = Queue.new
10
+ end
11
+
12
+ def <<(message)
13
+ @queue << message
14
+ self
15
+ end
16
+
17
+ def pop
18
+ @queue.pop(true)
19
+ rescue ThreadError
20
+ nil
21
+ end
22
+ end
23
+ end
data/lib/llm/mcp/pipe.rb CHANGED
@@ -27,7 +27,7 @@ class LLM::MCP
27
27
 
28
28
  ##
29
29
  # Reads from the reader end without blocking.
30
- # @raise [IO::WaitReadable]
30
+ # @raise [IO::EAGAINWaitReadable]
31
31
  # When no data is available to read
32
32
  # @return [String]
33
33
  def read_nonblock(...)
@@ -0,0 +1,44 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::MCP
4
+ ##
5
+ # Coordinates shared access to a transport by routing JSON-RPC
6
+ # responses to the mailbox waiting on the matching request id.
7
+ class Router
8
+ def initialize
9
+ @request_id = -1
10
+ @pending = {}
11
+ @lock = Monitor.new
12
+ @writer = Monitor.new
13
+ @reader = Monitor.new
14
+ end
15
+
16
+ def register
17
+ @lock.synchronize do
18
+ @request_id += 1
19
+ mailbox = LLM::MCP::Mailbox.new
20
+ @pending[@request_id] = mailbox
21
+ [@request_id, mailbox]
22
+ end
23
+ end
24
+
25
+ def clear(id)
26
+ @lock.synchronize { @pending.delete(id) }
27
+ end
28
+
29
+ def read(transport)
30
+ @reader.synchronize { transport.read_nonblock }
31
+ end
32
+
33
+ def write(transport, message)
34
+ @writer.synchronize { transport.write(message) }
35
+ end
36
+
37
+ def route(response)
38
+ mailbox = @lock.synchronize { @pending[response["id"]] }
39
+ raise LLM::MCP::MismatchError.new(expected_id: nil, actual_id: response["id"]) unless mailbox
40
+ mailbox << response
41
+ nil
42
+ end
43
+ end
44
+ end
data/lib/llm/mcp/rpc.rb CHANGED
@@ -27,13 +27,15 @@ class LLM::MCP
27
27
  def call(transport, method, params = {})
28
28
  message = {jsonrpc: "2.0", method:, params: default_params(method).merge(params)}
29
29
  if notification?(method)
30
- transport.write(message)
31
- nil
32
- else
33
- @request_id = (@request_id || -1) + 1
34
- id = @request_id
35
- transport.write(message.merge(id:))
36
- recv(transport, id)
30
+ router.write(transport, message)
31
+ return nil
32
+ end
33
+ id, mailbox = router.register
34
+ begin
35
+ router.write(transport, message.merge(id:))
36
+ recv(transport, id, mailbox)
37
+ ensure
38
+ router.clear(id)
37
39
  end
38
40
  end
39
41
 
@@ -49,16 +51,12 @@ class LLM::MCP
49
51
  # When the MCP process returns an error
50
52
  # @return [Object, nil]
51
53
  # The result returned by the MCP process
52
- def recv(transport, id)
54
+ def recv(transport, id, mailbox)
53
55
  poll(timeout:, ex: [IO::WaitReadable]) do
54
56
  loop do
55
- res = transport.read_nonblock
56
- next unless res["id"] == id
57
- if res["error"]
58
- raise LLM::MCP::Error.from(response: res)
59
- else
60
- break res["result"]
61
- end
57
+ res = mailbox.pop
58
+ return handle_response(id, res) if res
59
+ route_response(router.read(transport), id)
62
60
  end
63
61
  end
64
62
  end
@@ -101,6 +99,8 @@ class LLM::MCP
101
99
  # The exceptions to retry when raised
102
100
  # @yield
103
101
  # The block to run
102
+ # @raise [LLM::MCP::MismatchError]
103
+ # When an unrelated response id is received while waiting
104
104
  # @raise [LLM::MCP::TimeoutError]
105
105
  # When the block takes longer than the timeout
106
106
  # @return [Object]
@@ -114,5 +114,21 @@ class LLM::MCP
114
114
  sleep 0.05
115
115
  end
116
116
  end
117
+
118
+ def handle_response(id, res)
119
+ raise LLM::MCP::Error.from(response: res) if res["error"]
120
+ return res["result"] if res["id"] == id
121
+ raise LLM::MCP::MismatchError.new(expected_id: id, actual_id: res["id"])
122
+ end
123
+
124
+ def route_response(res, id)
125
+ return nil if res["method"]
126
+ return router.route(res) if res.key?("id")
127
+ raise LLM::MCP::MismatchError.new(expected_id: id, actual_id: nil)
128
+ end
129
+
130
+ def router
131
+ @router ||= LLM::MCP::Router.new
132
+ end
117
133
  end
118
134
  end
@@ -21,29 +21,31 @@ module LLM::MCP::Transport
21
21
 
22
22
  ##
23
23
  # Receives the SSE event name.
24
- # @param [LLM::EventStream::Event] event
24
+ # @param [LLM::EventStream::Event, String, nil] event
25
+ # @param [String, nil] chunk
25
26
  # The event stream event
26
27
  # @return [void]
27
- def on_event(event)
28
- @event = event.value
28
+ def on_event(event, chunk = nil)
29
+ @event = chunk ? event : event.value
29
30
  end
30
31
 
31
32
  ##
32
33
  # Receives one line of SSE data.
33
- # @param [LLM::EventStream::Event] event
34
+ # @param [LLM::EventStream::Event, String, nil] event
35
+ # @param [String, nil] chunk
34
36
  # The event stream event
35
37
  # @return [void]
36
- def on_data(event)
37
- @data << event.value.to_s
38
+ def on_data(event, chunk = nil)
39
+ @data << (chunk ? event : event.value).to_s
38
40
  end
39
41
 
40
42
  # The generic event stream parser dispatches one line at a time.
41
43
  # A blank line terminates the current SSE event.
42
- # @param [LLM::EventStream::Event] event
44
+ # @param [LLM::EventStream::Event, String] event
43
45
  # The event stream event
44
46
  # @return [void]
45
- def on_chunk(event)
46
- flush if event.chunk == "\n"
47
+ def on_chunk(event, chunk = nil)
48
+ flush if (chunk || event&.chunk || event) == "\n"
47
49
  end
48
50
 
49
51
  private
@@ -82,13 +82,13 @@ module LLM::MCP::Transport
82
82
  # Reads the next queued message without blocking.
83
83
  # @raise [LLM::MCP::Error]
84
84
  # When the transport is not running
85
- # @raise [IO::WaitReadable]
85
+ # @raise [IO::EAGAINWaitReadable]
86
86
  # When no complete message is available to read
87
87
  # @return [Hash]
88
88
  def read_nonblock
89
89
  lock do
90
90
  raise LLM::MCP::Error, "MCP transport is not running" unless running?
91
- raise IO::WaitReadable if @queue.empty?
91
+ raise IO::EAGAINWaitReadable, "no complete message available" if @queue.empty?
92
92
  @queue.shift
93
93
  end
94
94
  end
@@ -57,7 +57,7 @@ module LLM::MCP::Transport
57
57
  # Reads a message from the MCP process without blocking.
58
58
  # @raise [LLM::Error]
59
59
  # When the transport is not running
60
- # @raise [IO::WaitReadable]
60
+ # @raise [IO::EAGAINWaitReadable]
61
61
  # When no complete message is available to read
62
62
  # @return [Hash]
63
63
  # The next message from the MCP process
data/lib/llm/mcp.rb CHANGED
@@ -10,11 +10,14 @@
10
10
  # transports and focuses on discovering tools that can be used through
11
11
  # {LLM::Context LLM::Context} and {LLM::Agent LLM::Agent}.
12
12
  #
13
- # Like {LLM::Context LLM::Context}, an MCP client is stateful and is
14
- # expected to remain isolated to a single thread.
13
+ # An MCP client is stateful. Coordinate lifecycle operations such as
14
+ # {#start} and {#stop}; request methods can be issued concurrently and
15
+ # responses are matched by JSON-RPC id.
15
16
  class LLM::MCP
16
17
  require_relative "mcp/error"
17
18
  require_relative "mcp/command"
19
+ require_relative "mcp/mailbox"
20
+ require_relative "mcp/router"
18
21
  require_relative "mcp/rpc"
19
22
  require_relative "mcp/pipe"
20
23
  require_relative "mcp/transport/http"
@@ -121,6 +124,34 @@ class LLM::MCP
121
124
  res["tools"].map { LLM::Tool.mcp(self, _1) }
122
125
  end
123
126
 
127
+ ##
128
+ # Returns the prompts provided by the MCP process.
129
+ # @return [Array<LLM::Object>]
130
+ def prompts
131
+ res = call(transport, "prompts/list")
132
+ LLM::Object.from(res["prompts"])
133
+ end
134
+
135
+ ##
136
+ # Returns a prompt by name.
137
+ # @param [String] name The prompt name
138
+ # @param [Hash<String, String>, nil] arguments The prompt arguments
139
+ # @return [LLM::Object]
140
+ def find_prompt(name:, arguments: nil)
141
+ params = {name:}
142
+ params[:arguments] = arguments if arguments
143
+ res = call(transport, "prompts/get", params)
144
+ res["messages"] = [*res["messages"]].map do |message|
145
+ LLM::Message.new(
146
+ message["role"],
147
+ adapt_content(message["content"]),
148
+ {original_content: message["content"]}
149
+ )
150
+ end
151
+ LLM::Object.from(res)
152
+ end
153
+ alias_method :get_prompt, :find_prompt
154
+
124
155
  ##
125
156
  # Calls a tool by name with the given arguments
126
157
  # @param [String] name The name of the tool to call
@@ -135,6 +166,19 @@ class LLM::MCP
135
166
 
136
167
  attr_reader :llm, :command, :transport, :timeout
137
168
 
169
+ def adapt_content(content)
170
+ case content
171
+ when String
172
+ content
173
+ when Hash
174
+ content["type"] == "text" ? content["text"].to_s : LLM::Object.from(content)
175
+ when Array
176
+ content.map { adapt_content(_1) }
177
+ else
178
+ content
179
+ end
180
+ end
181
+
138
182
  def adapt_tool_result(result)
139
183
  if result["structuredContent"]
140
184
  result["structuredContent"]
@@ -0,0 +1,115 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::Provider::Transport
4
+ class HTTP
5
+ ##
6
+ # Internal HTTP request execution methods for {LLM::Provider}.
7
+ #
8
+ # This module handles provider-side HTTP execution, response parsing,
9
+ # streaming, and request body setup through
10
+ # {LLM::Provider::Transport::HTTP}.
11
+ #
12
+ # @api private
13
+ module HTTP::Execution
14
+ private
15
+
16
+ ##
17
+ # Executes a HTTP request
18
+ # @param [Net::HTTPRequest] request
19
+ # The request to send
20
+ # @param [Proc] b
21
+ # A block to yield the response to (optional)
22
+ # @return [Net::HTTPResponse]
23
+ # The response from the server
24
+ # @raise [LLM::Error::Unauthorized]
25
+ # When authentication fails
26
+ # @raise [LLM::Error::RateLimit]
27
+ # When the rate limit is exceeded
28
+ # @raise [LLM::Error]
29
+ # When any other unsuccessful status code is returned
30
+ # @raise [SystemCallError]
31
+ # When there is a network error at the operating system level
32
+ # @return [Net::HTTPResponse]
33
+ def execute(request:, operation:, stream: nil, stream_parser: self.stream_parser, model: nil, inputs: nil, &b)
34
+ owner = transport.request_owner
35
+ tracer = self.tracer
36
+ span = tracer.on_request_start(operation:, model:, inputs:)
37
+ res = transport.request(request, owner:) do |http|
38
+ perform_request(http, request, stream, stream_parser, &b)
39
+ end
40
+ [handle_response(res, tracer, span), span, tracer]
41
+ rescue *LLM::Provider::Transport::HTTP::Interruptible::INTERRUPT_ERRORS
42
+ raise LLM::Interrupt, "request interrupted" if transport.interrupted?(owner)
43
+ raise
44
+ end
45
+
46
+ ##
47
+ # Handles the response from a request
48
+ # @param [Net::HTTPResponse] res
49
+ # The response to handle
50
+ # @param [Object, nil] span
51
+ # The span
52
+ # @return [Net::HTTPResponse]
53
+ def handle_response(res, tracer, span)
54
+ case res
55
+ when Net::HTTPOK then res.body = parse_response(res)
56
+ else error_handler.new(tracer, span, res).raise_error!
57
+ end
58
+ res
59
+ end
60
+
61
+ ##
62
+ # Parse a HTTP response
63
+ # @param [Net::HTTPResponse] res
64
+ # @return [LLM::Object, String]
65
+ def parse_response(res)
66
+ case res["content-type"]
67
+ when %r{\Aapplication/json\s*} then LLM::Object.from(LLM.json.load(res.body))
68
+ else res.body
69
+ end
70
+ end
71
+
72
+ ##
73
+ # @param [Net::HTTPRequest] req
74
+ # The request to set the body stream for
75
+ # @param [IO] io
76
+ # The IO object to set as the body stream
77
+ # @return [void]
78
+ def set_body_stream(req, io)
79
+ req.body_stream = io
80
+ req["transfer-encoding"] = "chunked" unless req["content-length"]
81
+ end
82
+
83
+ ##
84
+ # Performs the request on the given HTTP connection.
85
+ # @param [Net::HTTP] http
86
+ # @param [Net::HTTPRequest] request
87
+ # @param [Object, nil] stream
88
+ # @param [Class] stream_parser
89
+ # @param [Proc, nil] b
90
+ # @return [Net::HTTPResponse]
91
+ def perform_request(http, request, stream, stream_parser, &b)
92
+ if stream
93
+ http.request(request) do |res|
94
+ if Net::HTTPSuccess === res
95
+ parser = StreamDecoder.new(stream_parser.new(stream))
96
+ res.read_body(parser)
97
+ body = parser.body
98
+ res.body = (Hash === body || Array === body) ? LLM::Object.from(body) : body
99
+ else
100
+ body = +""
101
+ res.read_body { body << _1 }
102
+ res.body = body
103
+ end
104
+ ensure
105
+ parser&.free
106
+ end
107
+ elsif b
108
+ http.request(request) { (Net::HTTPSuccess === _1) ? b.call(_1) : _1 }
109
+ else
110
+ http.request(request)
111
+ end
112
+ end
113
+ end
114
+ end
115
+ end