claude_swarm 0.3.6 → 0.3.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +26 -0
- data/CLAUDE.md +50 -3
- data/README.md +64 -0
- data/examples/simple-session-hook-swarm.yml +37 -0
- data/lib/claude_swarm/base_executor.rb +133 -0
- data/lib/claude_swarm/claude_code_executor.rb +21 -136
- data/lib/claude_swarm/claude_mcp_server.rb +2 -1
- data/lib/claude_swarm/cli.rb +1 -0
- data/lib/claude_swarm/configuration.rb +1 -0
- data/lib/claude_swarm/openai/chat_completion.rb +15 -15
- data/lib/claude_swarm/openai/executor.rb +46 -160
- data/lib/claude_swarm/openai/responses.rb +27 -27
- data/lib/claude_swarm/orchestrator.rb +166 -166
- data/lib/claude_swarm/settings_generator.rb +54 -0
- data/lib/claude_swarm/version.rb +1 -1
- metadata +4 -1
@@ -5,11 +5,11 @@ module ClaudeSwarm
|
|
5
5
|
class ChatCompletion
|
6
6
|
MAX_TURNS_WITH_TOOLS = 100_000 # virtually infinite
|
7
7
|
|
8
|
-
def initialize(openai_client:, mcp_client:, available_tools:,
|
8
|
+
def initialize(openai_client:, mcp_client:, available_tools:, executor:, instance_name:, model:, temperature: nil, reasoning_effort: nil)
|
9
9
|
@openai_client = openai_client
|
10
10
|
@mcp_client = mcp_client
|
11
11
|
@available_tools = available_tools
|
12
|
-
@executor =
|
12
|
+
@executor = executor
|
13
13
|
@instance_name = instance_name
|
14
14
|
@model = model
|
15
15
|
@temperature = temperature
|
@@ -57,7 +57,7 @@ module ClaudeSwarm
|
|
57
57
|
def process_chat_completion(messages, depth = 0)
|
58
58
|
# Prevent infinite recursion
|
59
59
|
if depth > MAX_TURNS_WITH_TOOLS
|
60
|
-
@executor.error
|
60
|
+
@executor.logger.error { "Maximum recursion depth reached in tool execution" }
|
61
61
|
return "Error: Maximum tool call depth exceeded"
|
62
62
|
end
|
63
63
|
|
@@ -83,7 +83,7 @@ module ClaudeSwarm
|
|
83
83
|
parameters[:tools] = @mcp_client.to_openai_tools if @available_tools&.any? && @mcp_client
|
84
84
|
|
85
85
|
# Log the request parameters
|
86
|
-
@executor.info
|
86
|
+
@executor.logger.info { "Chat API Request (depth=#{depth}): #{JSON.pretty_generate(parameters)}" }
|
87
87
|
|
88
88
|
# Append to session JSON
|
89
89
|
append_to_session_json({
|
@@ -97,16 +97,16 @@ module ClaudeSwarm
|
|
97
97
|
begin
|
98
98
|
response = @openai_client.chat(parameters: parameters)
|
99
99
|
rescue StandardError => e
|
100
|
-
@executor.error
|
101
|
-
@executor.error
|
100
|
+
@executor.logger.error { "Chat API error: #{e.class} - #{e.message}" }
|
101
|
+
@executor.logger.error { "Request parameters: #{JSON.pretty_generate(parameters)}" }
|
102
102
|
|
103
103
|
# Try to extract and log the response body for better debugging
|
104
104
|
if e.respond_to?(:response)
|
105
105
|
begin
|
106
106
|
error_body = e.response[:body]
|
107
|
-
@executor.error
|
107
|
+
@executor.logger.error { "Error response body: #{error_body}" }
|
108
108
|
rescue StandardError => parse_error
|
109
|
-
@executor.error
|
109
|
+
@executor.logger.error { "Could not parse error response: #{parse_error.message}" }
|
110
110
|
end
|
111
111
|
end
|
112
112
|
|
@@ -127,7 +127,7 @@ module ClaudeSwarm
|
|
127
127
|
end
|
128
128
|
|
129
129
|
# Log the response
|
130
|
-
@executor.info
|
130
|
+
@executor.logger.info { "Chat API Response (depth=#{depth}): #{JSON.pretty_generate(response)}" }
|
131
131
|
|
132
132
|
# Append to session JSON
|
133
133
|
append_to_session_json({
|
@@ -141,7 +141,7 @@ module ClaudeSwarm
|
|
141
141
|
message = response.dig("choices", 0, "message")
|
142
142
|
|
143
143
|
if message.nil?
|
144
|
-
@executor.error
|
144
|
+
@executor.logger.error { "No message in response: #{response.inspect}" }
|
145
145
|
return "Error: No response from OpenAI"
|
146
146
|
end
|
147
147
|
|
@@ -169,7 +169,7 @@ module ClaudeSwarm
|
|
169
169
|
|
170
170
|
def execute_and_append_tool_results(tool_calls, messages)
|
171
171
|
# Log tool calls
|
172
|
-
@executor.info
|
172
|
+
@executor.logger.info { "Executing tool calls: #{JSON.pretty_generate(tool_calls)}" }
|
173
173
|
|
174
174
|
# Append to session JSON
|
175
175
|
append_to_session_json({
|
@@ -189,13 +189,13 @@ module ClaudeSwarm
|
|
189
189
|
tool_args = tool_args_str.is_a?(String) ? JSON.parse(tool_args_str) : tool_args_str
|
190
190
|
|
191
191
|
# Log tool execution
|
192
|
-
@executor.info
|
192
|
+
@executor.logger.info { "Executing tool: #{tool_name} with args: #{JSON.pretty_generate(tool_args)}" }
|
193
193
|
|
194
194
|
# Execute tool via MCP
|
195
195
|
result = @mcp_client.call_tool(tool_name, tool_args)
|
196
196
|
|
197
197
|
# Log result
|
198
|
-
@executor.info
|
198
|
+
@executor.logger.info { "Tool result for #{tool_name}: #{result}" }
|
199
199
|
|
200
200
|
# Append to session JSON
|
201
201
|
append_to_session_json({
|
@@ -214,8 +214,8 @@ module ClaudeSwarm
|
|
214
214
|
content: result.to_s,
|
215
215
|
}
|
216
216
|
rescue StandardError => e
|
217
|
-
@executor.error
|
218
|
-
@executor.error
|
217
|
+
@executor.logger.error { "Tool execution failed for #{tool_name}: #{e.message}" }
|
218
|
+
@executor.logger.error { e.backtrace.join("\n") }
|
219
219
|
|
220
220
|
# Append error to session JSON
|
221
221
|
append_to_session_json({
|
@@ -2,25 +2,28 @@
|
|
2
2
|
|
3
3
|
module ClaudeSwarm
|
4
4
|
module OpenAI
|
5
|
-
class Executor
|
6
|
-
attr_reader :session_id, :last_response, :working_directory, :logger, :session_path
|
7
|
-
|
5
|
+
class Executor < BaseExecutor
|
8
6
|
def initialize(working_directory: Dir.pwd, model: nil, mcp_config: nil, vibe: false,
|
9
7
|
instance_name: nil, instance_id: nil, calling_instance: nil, calling_instance_id: nil,
|
10
|
-
claude_session_id: nil, additional_directories: [],
|
8
|
+
claude_session_id: nil, additional_directories: [], debug: false,
|
11
9
|
temperature: nil, api_version: "chat_completion", openai_token_env: "OPENAI_API_KEY",
|
12
10
|
base_url: nil, reasoning_effort: nil)
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
11
|
+
# Call parent initializer for common attributes
|
12
|
+
super(
|
13
|
+
working_directory: working_directory,
|
14
|
+
model: model,
|
15
|
+
mcp_config: mcp_config,
|
16
|
+
vibe: vibe,
|
17
|
+
instance_name: instance_name,
|
18
|
+
instance_id: instance_id,
|
19
|
+
calling_instance: calling_instance,
|
20
|
+
calling_instance_id: calling_instance_id,
|
21
|
+
claude_session_id: claude_session_id,
|
22
|
+
additional_directories: additional_directories,
|
23
|
+
debug: debug
|
24
|
+
)
|
25
|
+
|
26
|
+
# OpenAI-specific attributes
|
24
27
|
@temperature = temperature
|
25
28
|
@api_version = api_version
|
26
29
|
@base_url = base_url
|
@@ -30,37 +33,14 @@ module ClaudeSwarm
|
|
30
33
|
@conversation_messages = []
|
31
34
|
@previous_response_id = nil
|
32
35
|
|
33
|
-
# Setup logging first
|
34
|
-
setup_logging
|
35
|
-
|
36
36
|
# Setup OpenAI client
|
37
37
|
setup_openai_client(openai_token_env)
|
38
38
|
|
39
39
|
# Setup MCP client for tools
|
40
40
|
setup_mcp_client
|
41
41
|
|
42
|
-
# Create API
|
43
|
-
@
|
44
|
-
openai_client: @openai_client,
|
45
|
-
mcp_client: @mcp_client,
|
46
|
-
available_tools: @available_tools,
|
47
|
-
logger: self,
|
48
|
-
instance_name: @instance_name,
|
49
|
-
model: @model,
|
50
|
-
temperature: @temperature,
|
51
|
-
reasoning_effort: @reasoning_effort,
|
52
|
-
)
|
53
|
-
|
54
|
-
@responses_handler = OpenAI::Responses.new(
|
55
|
-
openai_client: @openai_client,
|
56
|
-
mcp_client: @mcp_client,
|
57
|
-
available_tools: @available_tools,
|
58
|
-
logger: self,
|
59
|
-
instance_name: @instance_name,
|
60
|
-
model: @model,
|
61
|
-
temperature: @temperature,
|
62
|
-
reasoning_effort: @reasoning_effort,
|
63
|
-
)
|
42
|
+
# Create API handler based on api_version
|
43
|
+
@api_handler = create_api_handler
|
64
44
|
end
|
65
45
|
|
66
46
|
def execute(prompt, options = {})
|
@@ -70,12 +50,8 @@ module ClaudeSwarm
|
|
70
50
|
# Start timing
|
71
51
|
start_time = Time.now
|
72
52
|
|
73
|
-
# Execute
|
74
|
-
result =
|
75
|
-
@responses_handler.execute(prompt, options)
|
76
|
-
else
|
77
|
-
@chat_completion_handler.execute(prompt, options)
|
78
|
-
end
|
53
|
+
# Execute using the appropriate handler
|
54
|
+
result = @api_handler.execute(prompt, options)
|
79
55
|
|
80
56
|
# Calculate duration
|
81
57
|
duration_ms = ((Time.now - start_time) * 1000).round
|
@@ -94,37 +70,14 @@ module ClaudeSwarm
|
|
94
70
|
@last_response = response
|
95
71
|
response
|
96
72
|
rescue StandardError => e
|
97
|
-
|
98
|
-
|
73
|
+
logger.error { "Unexpected error for #{@instance_name}: #{e.class} - #{e.message}" }
|
74
|
+
logger.error { "Backtrace: #{e.backtrace.join("\n")}" }
|
99
75
|
raise
|
100
76
|
end
|
101
77
|
|
102
78
|
def reset_session
|
103
|
-
|
104
|
-
@
|
105
|
-
@chat_completion_handler&.reset_session
|
106
|
-
@responses_handler&.reset_session
|
107
|
-
end
|
108
|
-
|
109
|
-
def has_session?
|
110
|
-
!@session_id.nil?
|
111
|
-
end
|
112
|
-
|
113
|
-
# Delegate logger methods for the API handlers
|
114
|
-
def info(message)
|
115
|
-
@logger.info(message)
|
116
|
-
end
|
117
|
-
|
118
|
-
def error(message)
|
119
|
-
@logger.error(message)
|
120
|
-
end
|
121
|
-
|
122
|
-
def warn(message)
|
123
|
-
@logger.warn(message)
|
124
|
-
end
|
125
|
-
|
126
|
-
def debug(message)
|
127
|
-
@logger.debug(message)
|
79
|
+
super
|
80
|
+
@api_handler&.reset_session
|
128
81
|
end
|
129
82
|
|
130
83
|
# Session JSON logger for the API handlers
|
@@ -197,7 +150,7 @@ module ClaudeSwarm
|
|
197
150
|
stdio_config[:read_timeout] = 1800
|
198
151
|
mcp_configs << stdio_config
|
199
152
|
when "sse"
|
200
|
-
|
153
|
+
logger.warn { "SSE MCP servers not yet supported for OpenAI instances: #{name}" }
|
201
154
|
# TODO: Add SSE support when available in ruby-mcp-client
|
202
155
|
end
|
203
156
|
end
|
@@ -215,16 +168,16 @@ module ClaudeSwarm
|
|
215
168
|
# List available tools from all MCP servers
|
216
169
|
begin
|
217
170
|
@available_tools = @mcp_client.list_tools
|
218
|
-
|
171
|
+
logger.info { "Loaded #{@available_tools.size} tools from #{mcp_configs.size} MCP server(s)" }
|
219
172
|
rescue StandardError => e
|
220
|
-
|
173
|
+
logger.error { "Failed to load MCP tools: #{e.message}" }
|
221
174
|
@available_tools = []
|
222
175
|
end
|
223
176
|
end
|
224
177
|
end
|
225
178
|
end
|
226
179
|
rescue StandardError => e
|
227
|
-
|
180
|
+
logger.error { "Failed to setup MCP client: #{e.message}" }
|
228
181
|
@mcp_client = nil
|
229
182
|
@available_tools = []
|
230
183
|
end
|
@@ -235,96 +188,29 @@ module ClaudeSwarm
|
|
235
188
|
"$0.00"
|
236
189
|
end
|
237
190
|
|
238
|
-
def
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
# Custom formatter for better readability
|
250
|
-
@logger.formatter = proc do |severity, datetime, _progname, msg|
|
251
|
-
"[#{datetime.strftime("%Y-%m-%d %H:%M:%S.%L")}] [#{severity}] #{msg}\n"
|
252
|
-
end
|
253
|
-
|
254
|
-
return unless @instance_name
|
255
|
-
|
256
|
-
instance_info = @instance_name
|
257
|
-
instance_info += " (#{@instance_id})" if @instance_id
|
258
|
-
@logger.info("Started OpenAI executor for instance: #{instance_info}")
|
259
|
-
end
|
260
|
-
|
261
|
-
def log_request(prompt)
|
262
|
-
caller_info = @calling_instance
|
263
|
-
caller_info += " (#{@calling_instance_id})" if @calling_instance_id
|
264
|
-
instance_info = @instance_name
|
265
|
-
instance_info += " (#{@instance_id})" if @instance_id
|
266
|
-
@logger.info("#{caller_info} -> #{instance_info}: \n---\n#{prompt}\n---")
|
267
|
-
|
268
|
-
# Build event hash for JSON logging
|
269
|
-
event = {
|
270
|
-
type: "request",
|
271
|
-
from_instance: @calling_instance,
|
272
|
-
from_instance_id: @calling_instance_id,
|
273
|
-
to_instance: @instance_name,
|
274
|
-
to_instance_id: @instance_id,
|
275
|
-
prompt: prompt,
|
276
|
-
timestamp: Time.now.iso8601,
|
191
|
+
def create_api_handler
|
192
|
+
handler_params = {
|
193
|
+
openai_client: @openai_client,
|
194
|
+
mcp_client: @mcp_client,
|
195
|
+
available_tools: @available_tools,
|
196
|
+
executor: self,
|
197
|
+
instance_name: @instance_name,
|
198
|
+
model: @model,
|
199
|
+
temperature: @temperature,
|
200
|
+
reasoning_effort: @reasoning_effort,
|
277
201
|
}
|
278
202
|
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
caller_info += " (#{@calling_instance_id})" if @calling_instance_id
|
285
|
-
instance_info = @instance_name
|
286
|
-
instance_info += " (#{@instance_id})" if @instance_id
|
287
|
-
@logger.info(
|
288
|
-
"(#{response["total_cost"]} - #{response["duration_ms"]}ms) #{instance_info} -> #{caller_info}: \n---\n#{response["result"]}\n---",
|
289
|
-
)
|
203
|
+
if @api_version == "responses"
|
204
|
+
OpenAI::Responses.new(**handler_params)
|
205
|
+
else
|
206
|
+
OpenAI::ChatCompletion.new(**handler_params)
|
207
|
+
end
|
290
208
|
end
|
291
209
|
|
292
210
|
def log_streaming_content(content)
|
293
211
|
# Log streaming content similar to ClaudeCodeExecutor
|
294
|
-
instance_info
|
295
|
-
instance_info += " (#{@instance_id})" if @instance_id
|
296
|
-
@logger.debug("#{instance_info} streaming: #{content}")
|
297
|
-
end
|
298
|
-
|
299
|
-
def append_to_session_json(event)
|
300
|
-
json_filename = "session.log.json"
|
301
|
-
json_path = File.join(@session_path, json_filename)
|
302
|
-
|
303
|
-
# Use file locking to ensure thread-safe writes
|
304
|
-
File.open(json_path, File::WRONLY | File::APPEND | File::CREAT) do |file|
|
305
|
-
file.flock(File::LOCK_EX)
|
306
|
-
|
307
|
-
# Create entry with metadata
|
308
|
-
entry = {
|
309
|
-
instance: @instance_name,
|
310
|
-
instance_id: @instance_id,
|
311
|
-
calling_instance: @calling_instance,
|
312
|
-
calling_instance_id: @calling_instance_id,
|
313
|
-
timestamp: Time.now.iso8601,
|
314
|
-
event: event,
|
315
|
-
}
|
316
|
-
|
317
|
-
# Write as single line JSON (JSONL format)
|
318
|
-
file.puts(entry.to_json)
|
319
|
-
|
320
|
-
file.flock(File::LOCK_UN)
|
321
|
-
end
|
322
|
-
rescue StandardError => e
|
323
|
-
@logger.error("Failed to append to session JSON: #{e.message}")
|
324
|
-
raise
|
212
|
+
logger.debug { "#{instance_info} streaming: #{content}" }
|
325
213
|
end
|
326
|
-
|
327
|
-
class ExecutionError < StandardError; end
|
328
214
|
end
|
329
215
|
end
|
330
216
|
end
|
@@ -5,11 +5,11 @@ module ClaudeSwarm
|
|
5
5
|
class Responses
|
6
6
|
MAX_TURNS_WITH_TOOLS = 100_000 # virtually infinite
|
7
7
|
|
8
|
-
def initialize(openai_client:, mcp_client:, available_tools:,
|
8
|
+
def initialize(openai_client:, mcp_client:, available_tools:, executor:, instance_name:, model:, temperature: nil, reasoning_effort: nil)
|
9
9
|
@openai_client = openai_client
|
10
10
|
@mcp_client = mcp_client
|
11
11
|
@available_tools = available_tools
|
12
|
-
@executor =
|
12
|
+
@executor = executor
|
13
13
|
@instance_name = instance_name
|
14
14
|
@model = model
|
15
15
|
@temperature = temperature
|
@@ -37,7 +37,7 @@ module ClaudeSwarm
|
|
37
37
|
def process_responses_api(input, conversation_array, previous_response_id, depth = 0)
|
38
38
|
# Prevent infinite recursion
|
39
39
|
if depth > MAX_TURNS_WITH_TOOLS
|
40
|
-
@executor.error
|
40
|
+
@executor.logger.error { "Maximum recursion depth reached in tool execution" }
|
41
41
|
return "Error: Maximum tool call depth exceeded"
|
42
42
|
end
|
43
43
|
|
@@ -72,11 +72,11 @@ module ClaudeSwarm
|
|
72
72
|
parameters[:input] = conversation_array
|
73
73
|
|
74
74
|
# Log conversation array to debug duplicates
|
75
|
-
@executor.info
|
75
|
+
@executor.logger.info { "Conversation array size: #{conversation_array.size}" }
|
76
76
|
conversation_ids = conversation_array.map do |item|
|
77
77
|
item["call_id"] || item["id"] || "no-id-#{item["type"]}"
|
78
78
|
end.compact
|
79
|
-
@executor.info
|
79
|
+
@executor.logger.info { "Conversation item IDs: #{conversation_ids.inspect}" }
|
80
80
|
end
|
81
81
|
|
82
82
|
# Add previous response ID for conversation continuity
|
@@ -93,11 +93,11 @@ module ClaudeSwarm
|
|
93
93
|
"parameters" => tool.schema || {},
|
94
94
|
}
|
95
95
|
end
|
96
|
-
@executor.info
|
96
|
+
@executor.logger.info { "Available tools for responses API: #{parameters[:tools].map { |t| t["name"] }.join(", ")}" }
|
97
97
|
end
|
98
98
|
|
99
99
|
# Log the request parameters
|
100
|
-
@executor.info
|
100
|
+
@executor.logger.info { "Responses API Request (depth=#{depth}): #{JSON.pretty_generate(parameters)}" }
|
101
101
|
|
102
102
|
# Append to session JSON
|
103
103
|
append_to_session_json({
|
@@ -111,16 +111,16 @@ module ClaudeSwarm
|
|
111
111
|
begin
|
112
112
|
response = @openai_client.responses.create(parameters: parameters)
|
113
113
|
rescue StandardError => e
|
114
|
-
@executor.error
|
115
|
-
@executor.error
|
114
|
+
@executor.logger.error { "Responses API error: #{e.class} - #{e.message}" }
|
115
|
+
@executor.logger.error { "Request parameters: #{JSON.pretty_generate(parameters)}" }
|
116
116
|
|
117
117
|
# Try to extract and log the response body for better debugging
|
118
118
|
if e.respond_to?(:response)
|
119
119
|
begin
|
120
120
|
error_body = e.response[:body]
|
121
|
-
@executor.error
|
121
|
+
@executor.logger.error { "Error response body: #{error_body}" }
|
122
122
|
rescue StandardError => parse_error
|
123
|
-
@executor.error
|
123
|
+
@executor.logger.error { "Could not parse error response: #{parse_error.message}" }
|
124
124
|
end
|
125
125
|
end
|
126
126
|
|
@@ -140,7 +140,7 @@ module ClaudeSwarm
|
|
140
140
|
end
|
141
141
|
|
142
142
|
# Log the full response
|
143
|
-
@executor.info
|
143
|
+
@executor.logger.info { "Responses API Full Response (depth=#{depth}): #{JSON.pretty_generate(response)}" }
|
144
144
|
|
145
145
|
# Append to session JSON
|
146
146
|
append_to_session_json({
|
@@ -157,7 +157,7 @@ module ClaudeSwarm
|
|
157
157
|
output = response["output"]
|
158
158
|
|
159
159
|
if output.nil?
|
160
|
-
@executor.error
|
160
|
+
@executor.logger.error { "No output in response" }
|
161
161
|
return "Error: No output in OpenAI response"
|
162
162
|
end
|
163
163
|
|
@@ -185,7 +185,7 @@ module ClaudeSwarm
|
|
185
185
|
extract_text_response(output)
|
186
186
|
end
|
187
187
|
else
|
188
|
-
@executor.error
|
188
|
+
@executor.logger.error { "Unexpected output format: #{output.inspect}" }
|
189
189
|
"Error: Unexpected response format"
|
190
190
|
end
|
191
191
|
end
|
@@ -200,12 +200,12 @@ module ClaudeSwarm
|
|
200
200
|
|
201
201
|
def build_conversation_with_outputs(function_calls)
|
202
202
|
# Log tool calls
|
203
|
-
@executor.info
|
203
|
+
@executor.logger.info { "Responses API - Handling #{function_calls.size} function calls" }
|
204
204
|
|
205
205
|
# Log IDs to check for duplicates
|
206
206
|
call_ids = function_calls.map { |fc| fc["call_id"] || fc["id"] }
|
207
|
-
@executor.info
|
208
|
-
@executor.warn
|
207
|
+
@executor.logger.info { "Function call IDs: #{call_ids.inspect}" }
|
208
|
+
@executor.logger.warn { "WARNING: Duplicate function call IDs detected!" } if call_ids.size != call_ids.uniq.size
|
209
209
|
|
210
210
|
# Append to session JSON
|
211
211
|
append_to_session_json({
|
@@ -226,20 +226,20 @@ module ClaudeSwarm
|
|
226
226
|
call_id = function_call["call_id"]
|
227
227
|
|
228
228
|
# Log both IDs to debug
|
229
|
-
@executor.info
|
229
|
+
@executor.logger.info { "Function call has id=#{function_call["id"]}, call_id=#{function_call["call_id"]}" }
|
230
230
|
|
231
231
|
begin
|
232
232
|
# Parse arguments
|
233
233
|
tool_args = JSON.parse(tool_args_str)
|
234
234
|
|
235
235
|
# Log tool execution
|
236
|
-
@executor.info
|
236
|
+
@executor.logger.info { "Responses API - Executing tool: #{tool_name} with args: #{JSON.pretty_generate(tool_args)}" }
|
237
237
|
|
238
238
|
# Execute tool via MCP
|
239
239
|
result = @mcp_client.call_tool(tool_name, tool_args)
|
240
240
|
|
241
241
|
# Log result
|
242
|
-
@executor.info
|
242
|
+
@executor.logger.info { "Responses API - Tool result for #{tool_name}: #{result}" }
|
243
243
|
|
244
244
|
# Append to session JSON
|
245
245
|
append_to_session_json({
|
@@ -257,8 +257,8 @@ module ClaudeSwarm
|
|
257
257
|
output: result.to_json, # Must be JSON string
|
258
258
|
}
|
259
259
|
rescue StandardError => e
|
260
|
-
@executor.error
|
261
|
-
@executor.error
|
260
|
+
@executor.logger.error { "Responses API - Tool execution failed for #{tool_name}: #{e.message}" }
|
261
|
+
@executor.logger.error { e.backtrace.join("\n") }
|
262
262
|
|
263
263
|
# Append error to session JSON
|
264
264
|
append_to_session_json({
|
@@ -282,8 +282,8 @@ module ClaudeSwarm
|
|
282
282
|
end
|
283
283
|
end
|
284
284
|
|
285
|
-
@executor.info
|
286
|
-
@executor.debug
|
285
|
+
@executor.logger.info { "Responses API - Built conversation with #{conversation.size} function outputs" }
|
286
|
+
@executor.logger.debug { "Final conversation structure: #{JSON.pretty_generate(conversation)}" }
|
287
287
|
conversation
|
288
288
|
end
|
289
289
|
|
@@ -302,13 +302,13 @@ module ClaudeSwarm
|
|
302
302
|
tool_args = JSON.parse(tool_args_str)
|
303
303
|
|
304
304
|
# Log tool execution
|
305
|
-
@executor.info
|
305
|
+
@executor.logger.info { "Responses API - Executing tool: #{tool_name} with args: #{JSON.pretty_generate(tool_args)}" }
|
306
306
|
|
307
307
|
# Execute tool via MCP
|
308
308
|
result = @mcp_client.call_tool(tool_name, tool_args)
|
309
309
|
|
310
310
|
# Log result
|
311
|
-
@executor.info
|
311
|
+
@executor.logger.info { "Responses API - Tool result for #{tool_name}: #{result}" }
|
312
312
|
|
313
313
|
# Add function output to conversation
|
314
314
|
conversation << {
|
@@ -317,7 +317,7 @@ module ClaudeSwarm
|
|
317
317
|
output: result.to_json, # Must be JSON string
|
318
318
|
}
|
319
319
|
rescue StandardError => e
|
320
|
-
@executor.error
|
320
|
+
@executor.logger.error { "Responses API - Tool execution failed for #{tool_name}: #{e.message}" }
|
321
321
|
|
322
322
|
# Add error output to conversation
|
323
323
|
conversation << {
|