swarm_sdk 2.7.14 → 2.7.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/swarm_sdk/agent/chat.rb +1 -0
- data/lib/swarm_sdk/agent/chat_helpers/llm_configuration.rb +4 -0
- data/lib/swarm_sdk/ruby_llm_patches/init.rb +7 -1
- data/lib/swarm_sdk/ruby_llm_patches/openai_thought_signature_patch.rb +98 -0
- data/lib/swarm_sdk/ruby_llm_patches/streaming_error_patch.rb +50 -0
- data/lib/swarm_sdk/version.rb +1 -1
- metadata +3 -1
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: e774808cdf4817c1cd483b3b09972cc1bf8b99d9226308baac8df60315c85ea2
|
|
4
|
+
data.tar.gz: 13a214280887d48dafdfe8b36da95caca7b7788120d92efc71ed3e9cb6da0293
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: b0c7654413b29e405fec0fa66ef6e992c2f84a5fdeaf6be6ce52412ca2f3f00cca0d24865141c97d922a7cd73b12ed00fbd3c0118fa48bee78a5846e0bb89fb7
|
|
7
|
+
data.tar.gz: fa812ed602e16954a23f72dec7bd9c1783afe4df852a93aa29db0358db1b329ede8303d7f59b5bdfe0d7847074ed515c534d0e32d5c2e523d3cef67886f61f56
|
data/lib/swarm_sdk/agent/chat.rb
CHANGED
|
@@ -155,6 +155,10 @@ module SwarmSDK
|
|
|
155
155
|
when "ollama"
|
|
156
156
|
config.ollama_api_base = base_url
|
|
157
157
|
# Ollama doesn't need an API key
|
|
158
|
+
when "gemini"
|
|
159
|
+
config.gemini_api_base = base_url
|
|
160
|
+
api_key = SwarmSDK.config.gemini_api_key
|
|
161
|
+
config.gemini_api_key = api_key if api_key
|
|
158
162
|
when "gpustack"
|
|
159
163
|
config.gpustack_api_base = base_url
|
|
160
164
|
api_key = SwarmSDK.config.gpustack_api_key
|
|
@@ -40,5 +40,11 @@ require_relative "message_management_patch"
|
|
|
40
40
|
# 7. Responses API patch (depends on configuration, uses error classes)
|
|
41
41
|
require_relative "responses_api_patch"
|
|
42
42
|
|
|
43
|
-
# 8.
|
|
43
|
+
# 8. Streaming error patch (hardens error parsing for non-standard proxy responses)
|
|
44
|
+
require_relative "streaming_error_patch"
|
|
45
|
+
|
|
46
|
+
# 9. OpenAI thought_signature patch (preserves Gemini thought signatures through OpenAI provider)
|
|
47
|
+
require_relative "openai_thought_signature_patch"
|
|
48
|
+
|
|
49
|
+
# 10. MCP SSL patch (configures SSL for HTTPX connections in ruby_llm-mcp)
|
|
44
50
|
require_relative "mcp_ssl_patch"
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# Patches RubyLLM::Providers::OpenAI::Tools to preserve thought_signature
|
|
4
|
+
# through the OpenAI-compatible streaming pipeline.
|
|
5
|
+
#
|
|
6
|
+
# Vertex AI Gemini 3 models with "thinking" enabled return a thought_signature
|
|
7
|
+
# in tool call responses via extra_content.google.thought_signature. This must
|
|
8
|
+
# be echoed back in subsequent requests or the API rejects the request with:
|
|
9
|
+
#
|
|
10
|
+
# "function call is missing a thought_signature"
|
|
11
|
+
#
|
|
12
|
+
# The native Gemini provider handles this correctly, but the OpenAI provider
|
|
13
|
+
# (used for OpenAI-compatible proxies) drops thought_signature in both
|
|
14
|
+
# parse_tool_calls and format_tool_calls. The rest of the pipeline
|
|
15
|
+
# (StreamAccumulator, ToolCall) already supports thought_signature.
|
|
16
|
+
#
|
|
17
|
+
# This patch:
|
|
18
|
+
# - Extracts thought_signature from extra_content during tool call parsing
|
|
19
|
+
# - Echoes thought_signature back in extra_content during serialization
|
|
20
|
+
|
|
21
|
+
module RubyLLM
|
|
22
|
+
module Providers
|
|
23
|
+
class OpenAI
|
|
24
|
+
module Tools
|
|
25
|
+
# rubocop:disable Style/ModuleFunction -- required to replace singleton method copy
|
|
26
|
+
|
|
27
|
+
module_function
|
|
28
|
+
|
|
29
|
+
# Parse tool calls from OpenAI-format response data
|
|
30
|
+
#
|
|
31
|
+
# @param tool_calls [Array<Hash>] Raw tool call data from API response
|
|
32
|
+
# @param parse_arguments [Boolean] Whether to JSON-parse arguments (false during streaming)
|
|
33
|
+
# @return [Hash{String => ToolCall}, nil] Parsed tool calls keyed by ID
|
|
34
|
+
def parse_tool_calls(tool_calls, parse_arguments: true)
|
|
35
|
+
return unless tool_calls&.any?
|
|
36
|
+
|
|
37
|
+
tool_calls.to_h do |tc|
|
|
38
|
+
thought_sig = tc.dig("extra_content", "google", "thought_signature")
|
|
39
|
+
|
|
40
|
+
[
|
|
41
|
+
tc["id"],
|
|
42
|
+
ToolCall.new(
|
|
43
|
+
id: tc["id"],
|
|
44
|
+
name: tc.dig("function", "name"),
|
|
45
|
+
arguments: if parse_arguments
|
|
46
|
+
parse_tool_call_arguments(tc)
|
|
47
|
+
else
|
|
48
|
+
tc.dig("function", "arguments")
|
|
49
|
+
end,
|
|
50
|
+
thought_signature: thought_sig,
|
|
51
|
+
),
|
|
52
|
+
]
|
|
53
|
+
end
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
# Serialize tool calls into OpenAI-format request data
|
|
57
|
+
#
|
|
58
|
+
# @param tool_calls [Hash{String => ToolCall}] Tool calls to serialize
|
|
59
|
+
# @return [Array<Hash>, nil] Serialized tool calls for API request
|
|
60
|
+
def format_tool_calls(tool_calls)
|
|
61
|
+
return unless tool_calls&.any?
|
|
62
|
+
|
|
63
|
+
tool_calls.map do |_, tc|
|
|
64
|
+
entry = {
|
|
65
|
+
id: tc.id,
|
|
66
|
+
type: "function",
|
|
67
|
+
function: {
|
|
68
|
+
name: tc.name,
|
|
69
|
+
arguments: JSON.generate(tc.arguments),
|
|
70
|
+
},
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
if tc.thought_signature
|
|
74
|
+
entry[:extra_content] = { google: { thought_signature: tc.thought_signature } }
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
entry
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
# Parse tool call arguments from raw hash
|
|
82
|
+
#
|
|
83
|
+
# @param tool_call [Hash] Raw tool call hash
|
|
84
|
+
# @return [Hash] Parsed arguments
|
|
85
|
+
def parse_tool_call_arguments(tool_call)
|
|
86
|
+
arguments = tool_call.dig("function", "arguments")
|
|
87
|
+
|
|
88
|
+
if arguments.nil? || arguments.empty?
|
|
89
|
+
{}
|
|
90
|
+
else
|
|
91
|
+
JSON.parse(arguments)
|
|
92
|
+
end
|
|
93
|
+
end
|
|
94
|
+
# rubocop:enable Style/ModuleFunction
|
|
95
|
+
end
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
end
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# Hardens RubyLLM::Providers::OpenAI::Streaming#parse_streaming_error against
|
|
4
|
+
# non-standard error response shapes returned by OpenAI-compatible proxies
|
|
5
|
+
# (e.g. Gemini via Vertex AI).
|
|
6
|
+
#
|
|
7
|
+
# The upstream implementation assumes `error_data['error']` is always a Hash,
|
|
8
|
+
# but some proxies return a bare String ({"error": "message"}) or an Array
|
|
9
|
+
# top-level, causing TypeError: no implicit conversion of String into Integer.
|
|
10
|
+
#
|
|
11
|
+
# This patch adds type guards while preserving the exact original behavior
|
|
12
|
+
# for well-formed OpenAI error responses.
|
|
13
|
+
#
|
|
14
|
+
# Upstream issue: https://github.com/crmne/ruby_llm/issues/XXX
|
|
15
|
+
|
|
16
|
+
module RubyLLM
|
|
17
|
+
module Providers
|
|
18
|
+
class OpenAI
|
|
19
|
+
module Streaming
|
|
20
|
+
# rubocop:disable Style/ModuleFunction -- module_function is required here
|
|
21
|
+
# to replace both the singleton and instance method copies created by the
|
|
22
|
+
# original module_function call in upstream RubyLLM. extend self would only
|
|
23
|
+
# add a delegation layer and not override the existing singleton method.
|
|
24
|
+
|
|
25
|
+
module_function
|
|
26
|
+
|
|
27
|
+
def parse_streaming_error(data)
|
|
28
|
+
error_data = JSON.parse(data)
|
|
29
|
+
return unless error_data.is_a?(Hash)
|
|
30
|
+
|
|
31
|
+
error = error_data["error"]
|
|
32
|
+
return unless error
|
|
33
|
+
|
|
34
|
+
# Some proxies return {"error": "message"} instead of {"error": {"type": ..., "message": ...}}
|
|
35
|
+
return [500, error.to_s] unless error.is_a?(Hash)
|
|
36
|
+
|
|
37
|
+
case error["type"]
|
|
38
|
+
when "server_error"
|
|
39
|
+
[500, error["message"]]
|
|
40
|
+
when "rate_limit_exceeded", "insufficient_quota"
|
|
41
|
+
[429, error["message"]]
|
|
42
|
+
else
|
|
43
|
+
[400, error["message"]]
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
# rubocop:enable Style/ModuleFunction
|
|
47
|
+
end
|
|
48
|
+
end
|
|
49
|
+
end
|
|
50
|
+
end
|
data/lib/swarm_sdk/version.rb
CHANGED
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: swarm_sdk
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 2.7.
|
|
4
|
+
version: 2.7.15
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Paulo Arruda
|
|
@@ -183,7 +183,9 @@ files:
|
|
|
183
183
|
- lib/swarm_sdk/ruby_llm_patches/io_endpoint_patch.rb
|
|
184
184
|
- lib/swarm_sdk/ruby_llm_patches/mcp_ssl_patch.rb
|
|
185
185
|
- lib/swarm_sdk/ruby_llm_patches/message_management_patch.rb
|
|
186
|
+
- lib/swarm_sdk/ruby_llm_patches/openai_thought_signature_patch.rb
|
|
186
187
|
- lib/swarm_sdk/ruby_llm_patches/responses_api_patch.rb
|
|
188
|
+
- lib/swarm_sdk/ruby_llm_patches/streaming_error_patch.rb
|
|
187
189
|
- lib/swarm_sdk/ruby_llm_patches/tool_concurrency_patch.rb
|
|
188
190
|
- lib/swarm_sdk/snapshot.rb
|
|
189
191
|
- lib/swarm_sdk/snapshot_from_events.rb
|