swarm_sdk 2.7.10 → 2.7.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 7863f2b6c3b7c74c13ca47957713e5588167a09a2c95acfebb3c53bab571af7f
4
- data.tar.gz: edf4cc3e12b13327909b8c21a5b0b40dafde5707ff82883762d980b267330e8b
3
+ metadata.gz: 700ee5f9b81c58e030024d664a7c3e27db84c705c876493678c89371f803158a
4
+ data.tar.gz: b2a4e2d7dc72208bff64b9c8d5f72dbe9d2772ae5f2f61d8abf3973d5855867c
5
5
  SHA512:
6
- metadata.gz: df8234592d637315378d078c211b98b61bf3411d11ac49b2c2d82954314cf985686deb927d9d8f41855a341675fb216814415a7be0b7e5e0297fd593b83fc95f
7
- data.tar.gz: 360d76065cbc15d6968d13616cb5e2c29d5a6b3771aa58a7490fca5cf360fc6b0a2bd3b3c7c90b98d89388205ad927ea998b2804fb1076bed5d91aff0d982596
6
+ metadata.gz: 7e375f31f8419422c3b93065f0fa525d63ae063865b2dd62032ac42ff4e9f827111f5cf1dc13cc18e27e215e9e1a0924965791155f553d5af9261f53580680e4
7
+ data.tar.gz: f44a0e73852254048fe7219efed8db620bc41f4d274bfd938997ac5281a79c6cfa575e3be7631e404bbd84acfc3dd25649130a06fa10b85d4cac6a0099007d11
@@ -62,6 +62,7 @@ module SwarmSDK
62
62
  @memory_config = nil
63
63
  @shared_across_delegations = nil # nil = not set (will default to false in Definition)
64
64
  @streaming = nil # nil = not set (will use global config default)
65
+ @thinking = nil # nil = not set (extended thinking disabled)
65
66
  @context_management_config = nil # Context management DSL hooks
66
67
  end
67
68
 
@@ -372,6 +373,38 @@ module SwarmSDK
372
373
  !@streaming.nil?
373
374
  end
374
375
 
376
+ # Configure extended thinking for this agent
377
+ #
378
+ # Extended thinking allows models to reason through complex problems before responding.
379
+ # For Anthropic models, specify a budget (token count). For OpenAI models, specify effort.
380
+ # Both can be specified for cross-provider compatibility.
381
+ #
382
+ # @param effort [Symbol, String, nil] Reasoning effort level (:low, :medium, :high) — used by OpenAI
383
+ # @param budget [Integer, nil] Token budget for thinking — used by Anthropic
384
+ # @return [self] Returns self for method chaining
385
+ #
386
+ # @example Anthropic thinking with budget
387
+ # thinking budget: 10_000
388
+ #
389
+ # @example OpenAI reasoning effort
390
+ # thinking effort: :high
391
+ #
392
+ # @example Cross-provider (both)
393
+ # thinking effort: :high, budget: 10_000
394
+ def thinking(effort: nil, budget: nil)
395
+ raise ArgumentError, "thinking requires :effort or :budget" if effort.nil? && budget.nil?
396
+
397
+ @thinking = { effort: effort, budget: budget }.compact
398
+ self
399
+ end
400
+
401
+ # Check if thinking has been explicitly set
402
+ #
403
+ # @return [Boolean] true if thinking was explicitly configured
404
+ def thinking_set?
405
+ !@thinking.nil?
406
+ end
407
+
375
408
  # Configure context management handlers
376
409
  #
377
410
  # Define custom handlers for context warning thresholds (60%, 80%, 90%).
@@ -552,6 +585,7 @@ module SwarmSDK
552
585
  agent_config[:memory] = @memory_config if @memory_config
553
586
  agent_config[:shared_across_delegations] = @shared_across_delegations unless @shared_across_delegations.nil?
554
587
  agent_config[:streaming] = @streaming unless @streaming.nil?
588
+ agent_config[:thinking] = @thinking if @thinking
555
589
 
556
590
  # Convert DSL hooks to HookDefinition format
557
591
  agent_config[:hooks] = convert_hooks_to_definitions if @hooks.any?
@@ -189,10 +189,11 @@ module SwarmSDK
189
189
  # Try to fetch real model info for accurate context tracking
190
190
  fetch_real_model_info(model_id)
191
191
 
192
- # Configure system prompt, parameters, and headers
192
+ # Configure system prompt, parameters, headers, and thinking
193
193
  configure_system_prompt(system_prompt) if system_prompt
194
194
  configure_parameters(parameters)
195
195
  configure_headers(custom_headers)
196
+ configure_thinking(definition[:thinking])
196
197
 
197
198
  # Setup around_tool_execution hook for SwarmSDK orchestration
198
199
  setup_tool_execution_hook
@@ -986,6 +987,12 @@ module SwarmSDK
986
987
  emit_non_retryable_error(e, "UnknownAPIError")
987
988
  return build_error_message(e)
988
989
 
990
+ # === CATEGORY A (CONTINUED): PROGRAMMING ERRORS ===
991
+ rescue ArgumentError, TypeError, NameError => e
992
+ # Programming errors (wrong keywords, type mismatches) - won't fix by retrying
993
+ emit_non_retryable_error(e, e.class.name)
994
+ return build_error_message(e)
995
+
989
996
  # === CATEGORY C: NETWORK/OTHER ERRORS ===
990
997
  rescue StandardError => e
991
998
  # Network errors, timeouts, unknown errors - retry with delays
@@ -224,6 +224,22 @@ module SwarmSDK
224
224
  RubyLLM.logger.debug("SwarmSDK: Enabled native Responses API support")
225
225
  end
226
226
 
227
+ # Configure extended thinking on the RubyLLM chat instance
228
+ #
229
+ # @param thinking_config [Hash, nil] Thinking configuration with :effort and/or :budget
230
+ # @return [self]
231
+ #
232
+ # @example
233
+ # configure_thinking(budget: 10_000)
234
+ # configure_thinking(effort: :high)
235
+ # configure_thinking(effort: :high, budget: 10_000)
236
+ def configure_thinking(thinking_config)
237
+ return self unless thinking_config
238
+
239
+ @llm_chat.with_thinking(**thinking_config)
240
+ self
241
+ end
242
+
227
243
  # Configure LLM parameters with proper temperature normalization
228
244
  #
229
245
  # @param params [Hash] Parameter hash
@@ -42,7 +42,8 @@ module SwarmSDK
42
42
  :hooks,
43
43
  :plugin_configs,
44
44
  :shared_across_delegations,
45
- :streaming
45
+ :streaming,
46
+ :thinking
46
47
 
47
48
  attr_accessor :bypass_permissions, :max_concurrent_tools
48
49
 
@@ -114,6 +115,9 @@ module SwarmSDK
114
115
  # Streaming configuration (default: true from global config)
115
116
  @streaming = config.fetch(:streaming, SwarmSDK.config.streaming)
116
117
 
118
+ # Extended thinking configuration (nil = disabled)
119
+ @thinking = config[:thinking]
120
+
117
121
  # Build system prompt after directory and memory are set
118
122
  @system_prompt = build_full_system_prompt(config[:system_prompt])
119
123
 
@@ -450,6 +450,10 @@ module SwarmSDK
450
450
  if !all_agents_hash[:streaming].nil? && !agent_builder.streaming_set?
451
451
  agent_builder.streaming(all_agents_hash[:streaming])
452
452
  end
453
+
454
+ if all_agents_hash[:thinking] && !agent_builder.thinking_set?
455
+ agent_builder.thinking(**all_agents_hash[:thinking])
456
+ end
453
457
  end
454
458
 
455
459
  # Validate all_agents filesystem tools
@@ -100,6 +100,7 @@ module SwarmSDK
100
100
  coding_agent(all_agents_cfg[:coding_agent]) unless all_agents_cfg[:coding_agent].nil?
101
101
  disable_default_tools(all_agents_cfg[:disable_default_tools]) unless all_agents_cfg[:disable_default_tools].nil?
102
102
  streaming(all_agents_cfg[:streaming]) unless all_agents_cfg[:streaming].nil?
103
+ thinking(**all_agents_cfg[:thinking]) if all_agents_cfg[:thinking]
103
104
 
104
105
  if all_agents_hks.any?
105
106
  all_agents_hks.each do |event, hook_specs|
@@ -164,6 +165,7 @@ module SwarmSDK
164
165
  disable_default_tools(config[:disable_default_tools]) unless config[:disable_default_tools].nil?
165
166
  shared_across_delegations(config[:shared_across_delegations]) unless config[:shared_across_delegations].nil?
166
167
  streaming(config[:streaming]) unless config[:streaming].nil?
168
+ thinking(**config[:thinking]) if config[:thinking]
167
169
 
168
170
  if config[:tools]&.any?
169
171
  tool_names = config[:tools].map { |t| t.is_a?(Hash) ? t[:name] : t }
@@ -0,0 +1,286 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "monitor"
4
+
5
+ # Extends RubyLLM::Chat with:
6
+ # - Multi-subscriber callbacks (allows multiple callbacks per event)
7
+ # - Subscription objects for unsubscribing
8
+ # - around_tool_execution hook for wrapping tool execution
9
+ # - around_llm_request hook for wrapping LLM API requests
10
+ # - Changed on_tool_result signature to pass (tool_call, result)
11
+ #
12
+ # Fork Reference: Commits d0912c7, a2a028d, 61cd510, 162189f
13
+
14
+ module RubyLLM
15
+ class Chat
16
+ # Represents an active subscription to a callback event
17
+ class Subscription
18
+ attr_reader :tag
19
+
20
+ def initialize(callback_list, callback, monitor:, tag: nil)
21
+ @callback_list = callback_list
22
+ @callback = callback
23
+ @monitor = monitor
24
+ @tag = tag
25
+ @active = true
26
+ end
27
+
28
+ def unsubscribe # rubocop:disable Naming/PredicateMethod
29
+ @monitor.synchronize do
30
+ return false unless @active
31
+
32
+ @callback_list.delete(@callback)
33
+ @active = false
34
+ end
35
+ true
36
+ end
37
+
38
+ def active?
39
+ @monitor.synchronize do
40
+ @active && @callback_list.include?(@callback)
41
+ end
42
+ end
43
+
44
+ def inspect
45
+ "#<#{self.class.name} tag=#{@tag.inspect} active=#{active?}>"
46
+ end
47
+ end
48
+
49
+ # Module to prepend for multi-subscriber callbacks
50
+ module MultiSubscriberCallbacks
51
+ def initialize(**kwargs)
52
+ super(**kwargs)
53
+
54
+ # Replace single callback hash with multi-subscriber arrays
55
+ @callbacks = {
56
+ new_message: [],
57
+ end_message: [],
58
+ tool_call: [],
59
+ tool_result: [],
60
+ }
61
+ @callback_monitor = Monitor.new
62
+
63
+ # Initialize around hooks
64
+ @around_tool_execution_hook = nil
65
+ @around_llm_request_hook = nil
66
+
67
+ # Keep @on for backward compatibility (read-only)
68
+ @on = nil
69
+ end
70
+
71
+ # Subscribe to an event with the given block
72
+ # Returns a Subscription that can be used to unsubscribe
73
+ def subscribe(event, tag: nil, &block)
74
+ @callback_monitor.synchronize do
75
+ unless @callbacks.key?(event)
76
+ raise ArgumentError, "Unknown event: #{event}. Valid events: #{@callbacks.keys.join(", ")}"
77
+ end
78
+
79
+ @callbacks[event] << block
80
+ Subscription.new(@callbacks[event], block, monitor: @callback_monitor, tag: tag)
81
+ end
82
+ end
83
+
84
+ # Subscribe to an event that automatically unsubscribes after firing once
85
+ def once(event, tag: nil, &block)
86
+ subscription = nil
87
+ wrapper = lambda do |*args|
88
+ subscription&.unsubscribe
89
+ block.call(*args)
90
+ end
91
+ subscription = subscribe(event, tag: tag, &wrapper)
92
+ end
93
+
94
+ # Override callback registration methods to support multi-subscriber
95
+ def on_new_message(&block)
96
+ subscribe(:new_message, &block)
97
+ self
98
+ end
99
+
100
+ def on_end_message(&block)
101
+ subscribe(:end_message, &block)
102
+ self
103
+ end
104
+
105
+ def on_tool_call(&block)
106
+ subscribe(:tool_call, &block)
107
+ self
108
+ end
109
+
110
+ def on_tool_result(&block)
111
+ subscribe(:tool_result, &block)
112
+ self
113
+ end
114
+
115
+ # Sets a hook to wrap tool execution with custom behavior
116
+ #
117
+ # @yield [ToolCall, Tool, Proc] Block called for each tool execution
118
+ # @return [self] for chaining
119
+ def around_tool_execution(&block)
120
+ @around_tool_execution_hook = block
121
+ self
122
+ end
123
+
124
+ # Sets a hook to wrap LLM API requests with custom behavior
125
+ #
126
+ # @yield [Array<Message>, Proc] Block called before each LLM request
127
+ # @return [self] for chaining
128
+ def around_llm_request(&block)
129
+ @around_llm_request_hook = block
130
+ self
131
+ end
132
+
133
+ # Clears all callbacks for the specified event, or all events if none specified
134
+ def clear_callbacks(event = nil)
135
+ @callback_monitor.synchronize do
136
+ if event
137
+ @callbacks[event]&.clear
138
+ else
139
+ @callbacks.each_value(&:clear)
140
+ end
141
+ end
142
+ self
143
+ end
144
+
145
+ # Returns the number of callbacks registered for the specified event
146
+ def callback_count(event = nil)
147
+ @callback_monitor.synchronize do
148
+ if event
149
+ @callbacks[event]&.size || 0
150
+ else
151
+ @callbacks.transform_values(&:size)
152
+ end
153
+ end
154
+ end
155
+
156
+ # Override complete to use emit() and support around_llm_request hook
157
+ # Follows fork pattern: tool call handling wraps message addition
158
+ def complete(&block)
159
+ # Execute LLM request (potentially wrapped by around_llm_request hook)
160
+ response = execute_llm_request(&block)
161
+
162
+ emit(:new_message) unless block_given?
163
+
164
+ if @schema && response.content.is_a?(String)
165
+ begin
166
+ response.content = JSON.parse(response.content)
167
+ rescue JSON::ParserError
168
+ # If parsing fails, keep content as string
169
+ end
170
+ end
171
+
172
+ add_message(response)
173
+ emit(:end_message, response)
174
+ if response.tool_call?
175
+ # For tool calls: add message, emit end_message, then handle tools
176
+ handle_tool_calls(response, &block)
177
+ else
178
+ # For final responses: add message and emit end_message
179
+ response
180
+ end
181
+ end
182
+
183
+ private
184
+
185
+ # Emit an event to all registered callbacks
186
+ # Callbacks are called in FIFO order, errors are isolated
187
+ def emit(event, *args)
188
+ callbacks = @callback_monitor.synchronize { @callbacks[event]&.dup || [] }
189
+
190
+ callbacks.each do |callback|
191
+ callback.call(*args)
192
+ rescue StandardError => e
193
+ RubyLLM.logger.error("[RubyLLM] Callback error for #{event}: #{e.message}")
194
+ end
195
+ end
196
+
197
+ # Execute LLM request, potentially wrapped by around_llm_request hook
198
+ def execute_llm_request(&block)
199
+ if @around_llm_request_hook
200
+ @around_llm_request_hook.call(messages) do |prepared_messages = messages|
201
+ perform_llm_request(prepared_messages, &block)
202
+ end
203
+ else
204
+ perform_llm_request(messages, &block)
205
+ end
206
+ end
207
+
208
+ # Perform the actual LLM request
209
+ def perform_llm_request(messages_to_send, &block)
210
+ kwargs = {
211
+ tools: @tools,
212
+ temperature: @temperature,
213
+ model: @model,
214
+ params: @params,
215
+ headers: @headers,
216
+ schema: @schema,
217
+ }
218
+ # Only pass thinking when explicitly configured via with_thinking
219
+ # to maintain compatibility with providers that don't support this keyword
220
+ kwargs[:thinking] = @thinking if @thinking
221
+
222
+ @provider.complete(messages_to_send, **kwargs, &wrap_streaming_block(&block))
223
+ rescue ArgumentError => e
224
+ raise ArgumentError,
225
+ "#{e.message} — provider #{@provider.class.name} does not support this parameter " \
226
+ "(model: #{@model&.id || "unknown"})",
227
+ e.backtrace
228
+ end
229
+
230
+ # Override wrap_streaming_block to use emit
231
+ def wrap_streaming_block(&block)
232
+ return unless block_given?
233
+
234
+ emit(:new_message)
235
+
236
+ proc do |chunk|
237
+ block.call(chunk)
238
+ end
239
+ end
240
+
241
+ # Override handle_tool_calls to use emit and support around_tool_execution hook
242
+ def handle_tool_calls(response, &block)
243
+ halt_result = nil
244
+
245
+ response.tool_calls.each_value do |tool_call|
246
+ emit(:new_message)
247
+ emit(:tool_call, tool_call)
248
+
249
+ result = execute_tool_with_hook(tool_call)
250
+
251
+ # Emit tool_result with both tool_call and result (fork signature)
252
+ emit(:tool_result, tool_call, result)
253
+
254
+ tool_payload = result.is_a?(Tool::Halt) ? result.content : result
255
+ content = content_like?(tool_payload) ? tool_payload : tool_payload.to_s
256
+ message = add_message(role: :tool, content: content, tool_call_id: tool_call.id)
257
+ emit(:end_message, message)
258
+
259
+ halt_result = result if result.is_a?(Tool::Halt)
260
+ end
261
+
262
+ halt_result || complete(&block)
263
+ end
264
+
265
+ # Execute tool with around_tool_execution hook if set
266
+ # Fork signature: hook receives (tool_call, tool_instance, execute_proc)
267
+ # Note: tool_instance may be nil if tool is not found - the hook/execute_proc
268
+ # should handle this case (will raise NoMethodError, caught by rescue)
269
+ def execute_tool_with_hook(tool_call)
270
+ tool_instance = tools[tool_call.name.to_sym]
271
+ execute_proc = -> { tool_instance.call(tool_call.arguments) }
272
+
273
+ if @around_tool_execution_hook
274
+ @around_tool_execution_hook.call(tool_call, tool_instance, execute_proc)
275
+ else
276
+ execute_proc.call
277
+ end
278
+ rescue StandardError => e
279
+ "Error: #{e.class}: #{e.message}"
280
+ end
281
+ end
282
+
283
+ # Prepend the module to override methods
284
+ prepend MultiSubscriberCallbacks
285
+ end
286
+ end
@@ -0,0 +1,41 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Extends RubyLLM::Configuration with additional options:
4
+ # - anthropic_api_base: Configurable Anthropic API base URL
5
+ # - read_timeout, open_timeout, write_timeout: Granular timeout configuration
6
+ #
7
+ # Fork Reference: Commits da6144b, 3daa4fb
8
+
9
+ module RubyLLM
10
+ class Configuration
11
+ # Add new configuration accessors
12
+ attr_accessor :anthropic_api_base,
13
+ :read_timeout,
14
+ :open_timeout,
15
+ :write_timeout
16
+
17
+ # Store original initialize for chaining
18
+ alias_method :original_initialize_without_patches, :initialize
19
+
20
+ # Override initialize to set default values for new options
21
+ def initialize
22
+ original_initialize_without_patches
23
+
24
+ # Add new configuration options with defaults
25
+ @anthropic_api_base = nil # Uses default 'https://api.anthropic.com' if not set
26
+ @read_timeout = nil # Defaults to request_timeout if not set
27
+ @open_timeout = 30
28
+ @write_timeout = 30
29
+ end
30
+ end
31
+
32
+ # Patch Anthropic provider to use configurable base URL
33
+ module Providers
34
+ class Anthropic
35
+ # Override api_base to use configurable base URL
36
+ def api_base
37
+ @config.anthropic_api_base || "https://api.anthropic.com"
38
+ end
39
+ end
40
+ end
41
+ end
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Extends RubyLLM::Connection with:
4
+ # - Connection.basic uses net_http adapter for SSL/IPv6 compatibility
5
+ # - Granular timeout support (read_timeout, open_timeout, write_timeout)
6
+ #
7
+ # Fork Reference: Commits cdc6067, 3daa4fb
8
+
9
+ module RubyLLM
10
+ class Connection
11
+ class << self
12
+ # Override basic to use net_http adapter
13
+ # This avoids async-http SSL/IPv6 issues for simple API calls
14
+ def basic(&)
15
+ Faraday.new do |f|
16
+ f.response(
17
+ :logger,
18
+ RubyLLM.logger,
19
+ bodies: false,
20
+ response: false,
21
+ errors: true,
22
+ headers: false,
23
+ log_level: :debug,
24
+ )
25
+ f.response(:raise_error)
26
+ yield f if block_given?
27
+ # Use net_http for simple API calls to avoid async-http SSL/IPv6 issues
28
+ f.adapter(:net_http)
29
+ end
30
+ end
31
+ end
32
+
33
+ private
34
+
35
+ # Override setup_timeout to support granular timeouts
36
+ def setup_timeout(faraday)
37
+ faraday.options.timeout = @config.request_timeout
38
+ faraday.options.open_timeout = @config.open_timeout if @config.respond_to?(:open_timeout) && @config.open_timeout
39
+ faraday.options.write_timeout = @config.write_timeout if @config.respond_to?(:write_timeout) && @config.write_timeout
40
+
41
+ # read_timeout defaults to request_timeout for streaming support
42
+ if @config.respond_to?(:read_timeout)
43
+ faraday.options.read_timeout = @config.read_timeout || @config.request_timeout
44
+ end
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,41 @@
1
+ # frozen_string_literal: true
2
+
3
+ # RubyLLM Compatibility Patches
4
+ #
5
+ # These patches extend upstream ruby_llm to match fork functionality used by SwarmSDK.
6
+ # Load order is important - patches are loaded in dependency order.
7
+ #
8
+ # Features provided by these patches:
9
+ # - Multi-subscriber callbacks with Subscription objects
10
+ # - around_tool_execution and around_llm_request hooks
11
+ # - Concurrent tool execution (async/threads)
12
+ # - preserve_system_prompt option in reset_messages!
13
+ # - Configurable Anthropic API base URL
14
+ # - Granular timeout configuration (read_timeout, open_timeout, write_timeout)
15
+ # - OpenAI Responses API support
16
+ # - IPv6 fallback fix for io-endpoint
17
+ #
18
+ # Once upstream ruby_llm adds these features, patches can be disabled.
19
+
20
+ # Load patches in dependency order
21
+
22
+ # 1. io-endpoint patch (infrastructure fix, no RubyLLM dependencies)
23
+ require_relative "io_endpoint_patch"
24
+
25
+ # 2. Configuration patch (must be loaded before connection/providers)
26
+ require_relative "configuration_patch"
27
+
28
+ # 3. Connection patch (depends on configuration patch)
29
+ require_relative "connection_patch"
30
+
31
+ # 4. Chat callbacks patch (core callback system)
32
+ require_relative "chat_callbacks_patch"
33
+
34
+ # 5. Tool concurrency patch (depends on chat callbacks patch)
35
+ require_relative "tool_concurrency_patch"
36
+
37
+ # 6. Message management patch (simple, no dependencies)
38
+ require_relative "message_management_patch"
39
+
40
+ # 7. Responses API patch (depends on configuration, uses error classes)
41
+ require_relative "responses_api_patch"
@@ -0,0 +1,40 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Monkey-patch io-endpoint to handle EHOSTUNREACH (IPv6 unreachable)
4
+ # This fixes an issue where the async adapter fails on IPv6 without trying IPv4
5
+ #
6
+ # Fork Reference: Commit cdc6067
7
+
8
+ begin
9
+ require "io/endpoint"
10
+ require "io/endpoint/host_endpoint"
11
+
12
+ # rubocop:disable Style/ClassAndModuleChildren
13
+ # Reopen the existing class (no superclass specified)
14
+ class IO::Endpoint::HostEndpoint
15
+ # Override connect to add EHOSTUNREACH to the rescue list
16
+ # This allows the connection to fall back to IPv4 when IPv6 is unavailable
17
+ def connect(wrapper = self.wrapper, &block)
18
+ last_error = nil
19
+
20
+ Addrinfo.foreach(*@specification) do |address|
21
+ socket = wrapper.connect(address, **@options)
22
+ rescue Errno::ECONNREFUSED, Errno::ENETUNREACH, Errno::EHOSTUNREACH, Errno::EAGAIN => last_error
23
+ # Try next address (IPv4 fallback)
24
+ else
25
+ return socket unless block_given?
26
+
27
+ begin
28
+ return yield(socket)
29
+ ensure
30
+ socket.close
31
+ end
32
+ end
33
+
34
+ raise last_error if last_error
35
+ end
36
+ end
37
+ # rubocop:enable Style/ClassAndModuleChildren
38
+ rescue LoadError
39
+ # io-endpoint gem not available, skip this patch
40
+ end
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Extends RubyLLM::Chat with enhanced message management:
4
+ # - reset_messages! with preserve_system_prompt option
5
+ #
6
+ # Fork Reference: Commit e6a34b5
7
+
8
+ module RubyLLM
9
+ class Chat
10
+ # Override reset_messages! to support preserve_system_prompt option
11
+ #
12
+ # @param preserve_system_prompt [Boolean] If true (default), keeps system messages
13
+ # @return [self] for chaining
14
+ def reset_messages!(preserve_system_prompt: true)
15
+ if preserve_system_prompt
16
+ @messages.select! { |m| m.role == :system }
17
+ else
18
+ @messages.clear
19
+ end
20
+ self
21
+ end
22
+ end
23
+ end