swarm_sdk 2.7.9 → 2.7.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/swarm_sdk/ruby_llm_patches/chat_callbacks_patch.rb +284 -0
- data/lib/swarm_sdk/ruby_llm_patches/configuration_patch.rb +41 -0
- data/lib/swarm_sdk/ruby_llm_patches/connection_patch.rb +47 -0
- data/lib/swarm_sdk/ruby_llm_patches/init.rb +41 -0
- data/lib/swarm_sdk/ruby_llm_patches/io_endpoint_patch.rb +40 -0
- data/lib/swarm_sdk/ruby_llm_patches/message_management_patch.rb +23 -0
- data/lib/swarm_sdk/ruby_llm_patches/responses_api_patch.rb +599 -0
- data/lib/swarm_sdk/ruby_llm_patches/tool_concurrency_patch.rb +218 -0
- data/lib/swarm_sdk/swarm/agent_initializer.rb +47 -274
- data/lib/swarm_sdk/swarm/lazy_delegate_chat.rb +372 -0
- data/lib/swarm_sdk/swarm.rb +55 -0
- data/lib/swarm_sdk/tools/delegate.rb +51 -5
- data/lib/swarm_sdk/version.rb +1 -1
- data/lib/swarm_sdk.rb +22 -2
- metadata +13 -18
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: bcabd24d5d88debcf61b86803b61996a60a8d56dac5ca56c1952478df8725079
|
|
4
|
+
data.tar.gz: 1c661fd7c839822ce4cddcfbb499ed8700b3311b45d6f791e94e3b4fb9965dd3
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: eec76f06fd85bbfe4bfc124473760ef9a3a84dd69aae3abb3de018b18b817882b5b1d23e1708cd9dce7f04abff433bec60eb436d7e5939a8925ef84f44a88005
|
|
7
|
+
data.tar.gz: 5b71b6839d87d0010525a5dcf708424a8a585ed59472a384af0d13b5e2992e760f9ee15e9707c63ce0023e3aef3e19c373e333fa1d6dfd3bcb87dc59b9dd76c3
|
|
@@ -0,0 +1,284 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "monitor"
|
|
4
|
+
|
|
5
|
+
# Extends RubyLLM::Chat with:
|
|
6
|
+
# - Multi-subscriber callbacks (allows multiple callbacks per event)
|
|
7
|
+
# - Subscription objects for unsubscribing
|
|
8
|
+
# - around_tool_execution hook for wrapping tool execution
|
|
9
|
+
# - around_llm_request hook for wrapping LLM API requests
|
|
10
|
+
# - Changed on_tool_result signature to pass (tool_call, result)
|
|
11
|
+
#
|
|
12
|
+
# Fork Reference: Commits d0912c7, a2a028d, 61cd510, 162189f
|
|
13
|
+
|
|
14
|
+
module RubyLLM
|
|
15
|
+
class Chat
|
|
16
|
+
# Represents an active subscription to a callback event
|
|
17
|
+
class Subscription
|
|
18
|
+
attr_reader :tag
|
|
19
|
+
|
|
20
|
+
def initialize(callback_list, callback, monitor:, tag: nil)
|
|
21
|
+
@callback_list = callback_list
|
|
22
|
+
@callback = callback
|
|
23
|
+
@monitor = monitor
|
|
24
|
+
@tag = tag
|
|
25
|
+
@active = true
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
def unsubscribe # rubocop:disable Naming/PredicateMethod
|
|
29
|
+
@monitor.synchronize do
|
|
30
|
+
return false unless @active
|
|
31
|
+
|
|
32
|
+
@callback_list.delete(@callback)
|
|
33
|
+
@active = false
|
|
34
|
+
end
|
|
35
|
+
true
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
def active?
|
|
39
|
+
@monitor.synchronize do
|
|
40
|
+
@active && @callback_list.include?(@callback)
|
|
41
|
+
end
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
def inspect
|
|
45
|
+
"#<#{self.class.name} tag=#{@tag.inspect} active=#{active?}>"
|
|
46
|
+
end
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
# Module to prepend for multi-subscriber callbacks
|
|
50
|
+
module MultiSubscriberCallbacks
|
|
51
|
+
def initialize(**kwargs)
|
|
52
|
+
super(**kwargs)
|
|
53
|
+
|
|
54
|
+
# Replace single callback hash with multi-subscriber arrays
|
|
55
|
+
@callbacks = {
|
|
56
|
+
new_message: [],
|
|
57
|
+
end_message: [],
|
|
58
|
+
tool_call: [],
|
|
59
|
+
tool_result: [],
|
|
60
|
+
}
|
|
61
|
+
@callback_monitor = Monitor.new
|
|
62
|
+
|
|
63
|
+
# Initialize around hooks
|
|
64
|
+
@around_tool_execution_hook = nil
|
|
65
|
+
@around_llm_request_hook = nil
|
|
66
|
+
|
|
67
|
+
# Keep @on for backward compatibility (read-only)
|
|
68
|
+
@on = nil
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
# Subscribe to an event with the given block
|
|
72
|
+
# Returns a Subscription that can be used to unsubscribe
|
|
73
|
+
def subscribe(event, tag: nil, &block)
|
|
74
|
+
@callback_monitor.synchronize do
|
|
75
|
+
unless @callbacks.key?(event)
|
|
76
|
+
raise ArgumentError, "Unknown event: #{event}. Valid events: #{@callbacks.keys.join(", ")}"
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
@callbacks[event] << block
|
|
80
|
+
Subscription.new(@callbacks[event], block, monitor: @callback_monitor, tag: tag)
|
|
81
|
+
end
|
|
82
|
+
end
|
|
83
|
+
|
|
84
|
+
# Subscribe to an event that automatically unsubscribes after firing once
|
|
85
|
+
def once(event, tag: nil, &block)
|
|
86
|
+
subscription = nil
|
|
87
|
+
wrapper = lambda do |*args|
|
|
88
|
+
subscription&.unsubscribe
|
|
89
|
+
block.call(*args)
|
|
90
|
+
end
|
|
91
|
+
subscription = subscribe(event, tag: tag, &wrapper)
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
# Override callback registration methods to support multi-subscriber
|
|
95
|
+
def on_new_message(&block)
|
|
96
|
+
subscribe(:new_message, &block)
|
|
97
|
+
self
|
|
98
|
+
end
|
|
99
|
+
|
|
100
|
+
def on_end_message(&block)
|
|
101
|
+
subscribe(:end_message, &block)
|
|
102
|
+
self
|
|
103
|
+
end
|
|
104
|
+
|
|
105
|
+
def on_tool_call(&block)
|
|
106
|
+
subscribe(:tool_call, &block)
|
|
107
|
+
self
|
|
108
|
+
end
|
|
109
|
+
|
|
110
|
+
def on_tool_result(&block)
|
|
111
|
+
subscribe(:tool_result, &block)
|
|
112
|
+
self
|
|
113
|
+
end
|
|
114
|
+
|
|
115
|
+
# Sets a hook to wrap tool execution with custom behavior
|
|
116
|
+
#
|
|
117
|
+
# @yield [ToolCall, Tool, Proc] Block called for each tool execution
|
|
118
|
+
# @return [self] for chaining
|
|
119
|
+
def around_tool_execution(&block)
|
|
120
|
+
@around_tool_execution_hook = block
|
|
121
|
+
self
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
# Sets a hook to wrap LLM API requests with custom behavior
|
|
125
|
+
#
|
|
126
|
+
# @yield [Array<Message>, Proc] Block called before each LLM request
|
|
127
|
+
# @return [self] for chaining
|
|
128
|
+
def around_llm_request(&block)
|
|
129
|
+
@around_llm_request_hook = block
|
|
130
|
+
self
|
|
131
|
+
end
|
|
132
|
+
|
|
133
|
+
# Clears all callbacks for the specified event, or all events if none specified
|
|
134
|
+
def clear_callbacks(event = nil)
|
|
135
|
+
@callback_monitor.synchronize do
|
|
136
|
+
if event
|
|
137
|
+
@callbacks[event]&.clear
|
|
138
|
+
else
|
|
139
|
+
@callbacks.each_value(&:clear)
|
|
140
|
+
end
|
|
141
|
+
end
|
|
142
|
+
self
|
|
143
|
+
end
|
|
144
|
+
|
|
145
|
+
# Returns the number of callbacks registered for the specified event
|
|
146
|
+
def callback_count(event = nil)
|
|
147
|
+
@callback_monitor.synchronize do
|
|
148
|
+
if event
|
|
149
|
+
@callbacks[event]&.size || 0
|
|
150
|
+
else
|
|
151
|
+
@callbacks.transform_values(&:size)
|
|
152
|
+
end
|
|
153
|
+
end
|
|
154
|
+
end
|
|
155
|
+
|
|
156
|
+
# Override complete to use emit() and support around_llm_request hook
|
|
157
|
+
# Follows fork pattern: tool call handling wraps message addition
|
|
158
|
+
def complete(&block)
|
|
159
|
+
# Execute LLM request (potentially wrapped by around_llm_request hook)
|
|
160
|
+
response = execute_llm_request(&block)
|
|
161
|
+
|
|
162
|
+
emit(:new_message) unless block_given?
|
|
163
|
+
|
|
164
|
+
if @schema && response.content.is_a?(String)
|
|
165
|
+
begin
|
|
166
|
+
response.content = JSON.parse(response.content)
|
|
167
|
+
rescue JSON::ParserError
|
|
168
|
+
# If parsing fails, keep content as string
|
|
169
|
+
end
|
|
170
|
+
end
|
|
171
|
+
|
|
172
|
+
add_message(response)
|
|
173
|
+
emit(:end_message, response)
|
|
174
|
+
if response.tool_call?
|
|
175
|
+
# For tool calls: add message, emit end_message, then handle tools
|
|
176
|
+
handle_tool_calls(response, &block)
|
|
177
|
+
else
|
|
178
|
+
# For final responses: add message and emit end_message
|
|
179
|
+
response
|
|
180
|
+
end
|
|
181
|
+
end
|
|
182
|
+
|
|
183
|
+
private
|
|
184
|
+
|
|
185
|
+
# Emit an event to all registered callbacks
|
|
186
|
+
# Callbacks are called in FIFO order, errors are isolated
|
|
187
|
+
def emit(event, *args)
|
|
188
|
+
callbacks = @callback_monitor.synchronize { @callbacks[event]&.dup || [] }
|
|
189
|
+
|
|
190
|
+
callbacks.each do |callback|
|
|
191
|
+
callback.call(*args)
|
|
192
|
+
rescue StandardError => e
|
|
193
|
+
RubyLLM.logger.error("[RubyLLM] Callback error for #{event}: #{e.message}")
|
|
194
|
+
end
|
|
195
|
+
end
|
|
196
|
+
|
|
197
|
+
# Execute LLM request, potentially wrapped by around_llm_request hook
|
|
198
|
+
def execute_llm_request(&block)
|
|
199
|
+
if @around_llm_request_hook
|
|
200
|
+
@around_llm_request_hook.call(messages) do |prepared_messages = messages|
|
|
201
|
+
perform_llm_request(prepared_messages, &block)
|
|
202
|
+
end
|
|
203
|
+
else
|
|
204
|
+
perform_llm_request(messages, &block)
|
|
205
|
+
end
|
|
206
|
+
end
|
|
207
|
+
|
|
208
|
+
# Perform the actual LLM request
|
|
209
|
+
def perform_llm_request(messages_to_send, &block)
|
|
210
|
+
@provider.complete(
|
|
211
|
+
messages_to_send,
|
|
212
|
+
tools: @tools,
|
|
213
|
+
temperature: @temperature,
|
|
214
|
+
model: @model,
|
|
215
|
+
params: @params,
|
|
216
|
+
headers: @headers,
|
|
217
|
+
schema: @schema,
|
|
218
|
+
thinking: @thinking,
|
|
219
|
+
&wrap_streaming_block(&block)
|
|
220
|
+
)
|
|
221
|
+
rescue ArgumentError => e
|
|
222
|
+
raise ArgumentError,
|
|
223
|
+
"#{e.message} — provider #{@provider.class.name} does not support this parameter " \
|
|
224
|
+
"(model: #{@model&.id || "unknown"})",
|
|
225
|
+
e.backtrace
|
|
226
|
+
end
|
|
227
|
+
|
|
228
|
+
# Override wrap_streaming_block to use emit
|
|
229
|
+
def wrap_streaming_block(&block)
|
|
230
|
+
return unless block_given?
|
|
231
|
+
|
|
232
|
+
emit(:new_message)
|
|
233
|
+
|
|
234
|
+
proc do |chunk|
|
|
235
|
+
block.call(chunk)
|
|
236
|
+
end
|
|
237
|
+
end
|
|
238
|
+
|
|
239
|
+
# Override handle_tool_calls to use emit and support around_tool_execution hook
|
|
240
|
+
def handle_tool_calls(response, &block)
|
|
241
|
+
halt_result = nil
|
|
242
|
+
|
|
243
|
+
response.tool_calls.each_value do |tool_call|
|
|
244
|
+
emit(:new_message)
|
|
245
|
+
emit(:tool_call, tool_call)
|
|
246
|
+
|
|
247
|
+
result = execute_tool_with_hook(tool_call)
|
|
248
|
+
|
|
249
|
+
# Emit tool_result with both tool_call and result (fork signature)
|
|
250
|
+
emit(:tool_result, tool_call, result)
|
|
251
|
+
|
|
252
|
+
tool_payload = result.is_a?(Tool::Halt) ? result.content : result
|
|
253
|
+
content = content_like?(tool_payload) ? tool_payload : tool_payload.to_s
|
|
254
|
+
message = add_message(role: :tool, content: content, tool_call_id: tool_call.id)
|
|
255
|
+
emit(:end_message, message)
|
|
256
|
+
|
|
257
|
+
halt_result = result if result.is_a?(Tool::Halt)
|
|
258
|
+
end
|
|
259
|
+
|
|
260
|
+
halt_result || complete(&block)
|
|
261
|
+
end
|
|
262
|
+
|
|
263
|
+
# Execute tool with around_tool_execution hook if set
|
|
264
|
+
# Fork signature: hook receives (tool_call, tool_instance, execute_proc)
|
|
265
|
+
# Note: tool_instance may be nil if tool is not found - the hook/execute_proc
|
|
266
|
+
# should handle this case (will raise NoMethodError, caught by rescue)
|
|
267
|
+
def execute_tool_with_hook(tool_call)
|
|
268
|
+
tool_instance = tools[tool_call.name.to_sym]
|
|
269
|
+
execute_proc = -> { tool_instance.call(tool_call.arguments) }
|
|
270
|
+
|
|
271
|
+
if @around_tool_execution_hook
|
|
272
|
+
@around_tool_execution_hook.call(tool_call, tool_instance, execute_proc)
|
|
273
|
+
else
|
|
274
|
+
execute_proc.call
|
|
275
|
+
end
|
|
276
|
+
rescue StandardError => e
|
|
277
|
+
"Error: #{e.class}: #{e.message}"
|
|
278
|
+
end
|
|
279
|
+
end
|
|
280
|
+
|
|
281
|
+
# Prepend the module to override methods
|
|
282
|
+
prepend MultiSubscriberCallbacks
|
|
283
|
+
end
|
|
284
|
+
end
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# Extends RubyLLM::Configuration with additional options:
|
|
4
|
+
# - anthropic_api_base: Configurable Anthropic API base URL
|
|
5
|
+
# - read_timeout, open_timeout, write_timeout: Granular timeout configuration
|
|
6
|
+
#
|
|
7
|
+
# Fork Reference: Commits da6144b, 3daa4fb
|
|
8
|
+
|
|
9
|
+
module RubyLLM
|
|
10
|
+
class Configuration
|
|
11
|
+
# Add new configuration accessors
|
|
12
|
+
attr_accessor :anthropic_api_base,
|
|
13
|
+
:read_timeout,
|
|
14
|
+
:open_timeout,
|
|
15
|
+
:write_timeout
|
|
16
|
+
|
|
17
|
+
# Store original initialize for chaining
|
|
18
|
+
alias_method :original_initialize_without_patches, :initialize
|
|
19
|
+
|
|
20
|
+
# Override initialize to set default values for new options
|
|
21
|
+
def initialize
|
|
22
|
+
original_initialize_without_patches
|
|
23
|
+
|
|
24
|
+
# Add new configuration options with defaults
|
|
25
|
+
@anthropic_api_base = nil # Uses default 'https://api.anthropic.com' if not set
|
|
26
|
+
@read_timeout = nil # Defaults to request_timeout if not set
|
|
27
|
+
@open_timeout = 30
|
|
28
|
+
@write_timeout = 30
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
# Patch Anthropic provider to use configurable base URL
|
|
33
|
+
module Providers
|
|
34
|
+
class Anthropic
|
|
35
|
+
# Override api_base to use configurable base URL
|
|
36
|
+
def api_base
|
|
37
|
+
@config.anthropic_api_base || "https://api.anthropic.com"
|
|
38
|
+
end
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
end
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# Extends RubyLLM::Connection with:
|
|
4
|
+
# - Connection.basic uses net_http adapter for SSL/IPv6 compatibility
|
|
5
|
+
# - Granular timeout support (read_timeout, open_timeout, write_timeout)
|
|
6
|
+
#
|
|
7
|
+
# Fork Reference: Commits cdc6067, 3daa4fb
|
|
8
|
+
|
|
9
|
+
module RubyLLM
|
|
10
|
+
class Connection
|
|
11
|
+
class << self
|
|
12
|
+
# Override basic to use net_http adapter
|
|
13
|
+
# This avoids async-http SSL/IPv6 issues for simple API calls
|
|
14
|
+
def basic(&)
|
|
15
|
+
Faraday.new do |f|
|
|
16
|
+
f.response(
|
|
17
|
+
:logger,
|
|
18
|
+
RubyLLM.logger,
|
|
19
|
+
bodies: false,
|
|
20
|
+
response: false,
|
|
21
|
+
errors: true,
|
|
22
|
+
headers: false,
|
|
23
|
+
log_level: :debug,
|
|
24
|
+
)
|
|
25
|
+
f.response(:raise_error)
|
|
26
|
+
yield f if block_given?
|
|
27
|
+
# Use net_http for simple API calls to avoid async-http SSL/IPv6 issues
|
|
28
|
+
f.adapter(:net_http)
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
private
|
|
34
|
+
|
|
35
|
+
# Override setup_timeout to support granular timeouts
|
|
36
|
+
def setup_timeout(faraday)
|
|
37
|
+
faraday.options.timeout = @config.request_timeout
|
|
38
|
+
faraday.options.open_timeout = @config.open_timeout if @config.respond_to?(:open_timeout) && @config.open_timeout
|
|
39
|
+
faraday.options.write_timeout = @config.write_timeout if @config.respond_to?(:write_timeout) && @config.write_timeout
|
|
40
|
+
|
|
41
|
+
# read_timeout defaults to request_timeout for streaming support
|
|
42
|
+
if @config.respond_to?(:read_timeout)
|
|
43
|
+
faraday.options.read_timeout = @config.read_timeout || @config.request_timeout
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
end
|
|
47
|
+
end
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# RubyLLM Compatibility Patches
|
|
4
|
+
#
|
|
5
|
+
# These patches extend upstream ruby_llm to match fork functionality used by SwarmSDK.
|
|
6
|
+
# Load order is important - patches are loaded in dependency order.
|
|
7
|
+
#
|
|
8
|
+
# Features provided by these patches:
|
|
9
|
+
# - Multi-subscriber callbacks with Subscription objects
|
|
10
|
+
# - around_tool_execution and around_llm_request hooks
|
|
11
|
+
# - Concurrent tool execution (async/threads)
|
|
12
|
+
# - preserve_system_prompt option in reset_messages!
|
|
13
|
+
# - Configurable Anthropic API base URL
|
|
14
|
+
# - Granular timeout configuration (read_timeout, open_timeout, write_timeout)
|
|
15
|
+
# - OpenAI Responses API support
|
|
16
|
+
# - IPv6 fallback fix for io-endpoint
|
|
17
|
+
#
|
|
18
|
+
# Once upstream ruby_llm adds these features, patches can be disabled.
|
|
19
|
+
|
|
20
|
+
# Load patches in dependency order
|
|
21
|
+
|
|
22
|
+
# 1. io-endpoint patch (infrastructure fix, no RubyLLM dependencies)
|
|
23
|
+
require_relative "io_endpoint_patch"
|
|
24
|
+
|
|
25
|
+
# 2. Configuration patch (must be loaded before connection/providers)
|
|
26
|
+
require_relative "configuration_patch"
|
|
27
|
+
|
|
28
|
+
# 3. Connection patch (depends on configuration patch)
|
|
29
|
+
require_relative "connection_patch"
|
|
30
|
+
|
|
31
|
+
# 4. Chat callbacks patch (core callback system)
|
|
32
|
+
require_relative "chat_callbacks_patch"
|
|
33
|
+
|
|
34
|
+
# 5. Tool concurrency patch (depends on chat callbacks patch)
|
|
35
|
+
require_relative "tool_concurrency_patch"
|
|
36
|
+
|
|
37
|
+
# 6. Message management patch (simple, no dependencies)
|
|
38
|
+
require_relative "message_management_patch"
|
|
39
|
+
|
|
40
|
+
# 7. Responses API patch (depends on configuration, uses error classes)
|
|
41
|
+
require_relative "responses_api_patch"
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# Monkey-patch io-endpoint to handle EHOSTUNREACH (IPv6 unreachable)
|
|
4
|
+
# This fixes an issue where the async adapter fails on IPv6 without trying IPv4
|
|
5
|
+
#
|
|
6
|
+
# Fork Reference: Commit cdc6067
|
|
7
|
+
|
|
8
|
+
begin
|
|
9
|
+
require "io/endpoint"
|
|
10
|
+
require "io/endpoint/host_endpoint"
|
|
11
|
+
|
|
12
|
+
# rubocop:disable Style/ClassAndModuleChildren
|
|
13
|
+
# Reopen the existing class (no superclass specified)
|
|
14
|
+
class IO::Endpoint::HostEndpoint
|
|
15
|
+
# Override connect to add EHOSTUNREACH to the rescue list
|
|
16
|
+
# This allows the connection to fall back to IPv4 when IPv6 is unavailable
|
|
17
|
+
def connect(wrapper = self.wrapper, &block)
|
|
18
|
+
last_error = nil
|
|
19
|
+
|
|
20
|
+
Addrinfo.foreach(*@specification) do |address|
|
|
21
|
+
socket = wrapper.connect(address, **@options)
|
|
22
|
+
rescue Errno::ECONNREFUSED, Errno::ENETUNREACH, Errno::EHOSTUNREACH, Errno::EAGAIN => last_error
|
|
23
|
+
# Try next address (IPv4 fallback)
|
|
24
|
+
else
|
|
25
|
+
return socket unless block_given?
|
|
26
|
+
|
|
27
|
+
begin
|
|
28
|
+
return yield(socket)
|
|
29
|
+
ensure
|
|
30
|
+
socket.close
|
|
31
|
+
end
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
raise last_error if last_error
|
|
35
|
+
end
|
|
36
|
+
end
|
|
37
|
+
# rubocop:enable Style/ClassAndModuleChildren
|
|
38
|
+
rescue LoadError
|
|
39
|
+
# io-endpoint gem not available, skip this patch
|
|
40
|
+
end
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# Extends RubyLLM::Chat with enhanced message management:
|
|
4
|
+
# - reset_messages! with preserve_system_prompt option
|
|
5
|
+
#
|
|
6
|
+
# Fork Reference: Commit e6a34b5
|
|
7
|
+
|
|
8
|
+
module RubyLLM
|
|
9
|
+
class Chat
|
|
10
|
+
# Override reset_messages! to support preserve_system_prompt option
|
|
11
|
+
#
|
|
12
|
+
# @param preserve_system_prompt [Boolean] If true (default), keeps system messages
|
|
13
|
+
# @return [self] for chaining
|
|
14
|
+
def reset_messages!(preserve_system_prompt: true)
|
|
15
|
+
if preserve_system_prompt
|
|
16
|
+
@messages.select! { |m| m.role == :system }
|
|
17
|
+
else
|
|
18
|
+
@messages.clear
|
|
19
|
+
end
|
|
20
|
+
self
|
|
21
|
+
end
|
|
22
|
+
end
|
|
23
|
+
end
|