claude_swarm 0.3.2 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,26 +1,53 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require "openai"
4
+ require "faraday/net_http_persistent"
5
+ require "faraday/retry"
6
+
3
7
  module ClaudeSwarm
4
8
  module OpenAI
5
- class Executor
6
- attr_reader :session_id, :last_response, :working_directory, :logger, :session_path
9
+ class Executor < BaseExecutor
10
+ # Static configuration for Faraday retry middleware
11
+ FARADAY_RETRY_CONFIG = {
12
+ max: 3, # Maximum number of retries
13
+ interval: 0.5, # Initial delay between retries (in seconds)
14
+ interval_randomness: 0.5, # Randomness factor for retry intervals
15
+ backoff_factor: 2, # Exponential backoff factor
16
+ exceptions: [
17
+ Faraday::TimeoutError,
18
+ Faraday::ConnectionFailed,
19
+ Faraday::ServerError, # Retry on 5xx errors
20
+ ].freeze,
21
+ retry_statuses: [429, 500, 502, 503, 504].freeze, # HTTP status codes to retry
22
+ }.freeze
23
+
24
+ # Static configuration for OpenAI client
25
+ OPENAI_CLIENT_CONFIG = {
26
+ log_errors: true,
27
+ request_timeout: 1800, # 30 minutes
28
+ }.freeze
7
29
 
8
30
  def initialize(working_directory: Dir.pwd, model: nil, mcp_config: nil, vibe: false,
9
31
  instance_name: nil, instance_id: nil, calling_instance: nil, calling_instance_id: nil,
10
- claude_session_id: nil, additional_directories: [],
32
+ claude_session_id: nil, additional_directories: [], debug: false,
11
33
  temperature: nil, api_version: "chat_completion", openai_token_env: "OPENAI_API_KEY",
12
34
  base_url: nil, reasoning_effort: nil)
13
- @working_directory = working_directory
14
- @additional_directories = additional_directories
15
- @model = model
16
- @mcp_config = mcp_config
17
- @vibe = vibe
18
- @session_id = claude_session_id
19
- @last_response = nil
20
- @instance_name = instance_name
21
- @instance_id = instance_id
22
- @calling_instance = calling_instance
23
- @calling_instance_id = calling_instance_id
35
+ # Call parent initializer for common attributes
36
+ super(
37
+ working_directory: working_directory,
38
+ model: model,
39
+ mcp_config: mcp_config,
40
+ vibe: vibe,
41
+ instance_name: instance_name,
42
+ instance_id: instance_id,
43
+ calling_instance: calling_instance,
44
+ calling_instance_id: calling_instance_id,
45
+ claude_session_id: claude_session_id,
46
+ additional_directories: additional_directories,
47
+ debug: debug
48
+ )
49
+
50
+ # OpenAI-specific attributes
24
51
  @temperature = temperature
25
52
  @api_version = api_version
26
53
  @base_url = base_url
@@ -30,37 +57,14 @@ module ClaudeSwarm
30
57
  @conversation_messages = []
31
58
  @previous_response_id = nil
32
59
 
33
- # Setup logging first
34
- setup_logging
35
-
36
60
  # Setup OpenAI client
37
61
  setup_openai_client(openai_token_env)
38
62
 
39
63
  # Setup MCP client for tools
40
64
  setup_mcp_client
41
65
 
42
- # Create API handlers
43
- @chat_completion_handler = OpenAI::ChatCompletion.new(
44
- openai_client: @openai_client,
45
- mcp_client: @mcp_client,
46
- available_tools: @available_tools,
47
- logger: self,
48
- instance_name: @instance_name,
49
- model: @model,
50
- temperature: @temperature,
51
- reasoning_effort: @reasoning_effort,
52
- )
53
-
54
- @responses_handler = OpenAI::Responses.new(
55
- openai_client: @openai_client,
56
- mcp_client: @mcp_client,
57
- available_tools: @available_tools,
58
- logger: self,
59
- instance_name: @instance_name,
60
- model: @model,
61
- temperature: @temperature,
62
- reasoning_effort: @reasoning_effort,
63
- )
66
+ # Create API handler based on api_version
67
+ @api_handler = create_api_handler
64
68
  end
65
69
 
66
70
  def execute(prompt, options = {})
@@ -70,61 +74,23 @@ module ClaudeSwarm
70
74
  # Start timing
71
75
  start_time = Time.now
72
76
 
73
- # Execute based on API version
74
- result = if @api_version == "responses"
75
- @responses_handler.execute(prompt, options)
76
- else
77
- @chat_completion_handler.execute(prompt, options)
78
- end
77
+ # Execute using the appropriate handler
78
+ result = @api_handler.execute(prompt, options)
79
79
 
80
80
  # Calculate duration
81
81
  duration_ms = ((Time.now - start_time) * 1000).round
82
82
 
83
- # Format response similar to ClaudeCodeExecutor
84
- response = {
85
- "type" => "result",
86
- "result" => result,
87
- "duration_ms" => duration_ms,
88
- "total_cost" => calculate_cost(result),
89
- "session_id" => @session_id,
90
- }
91
-
92
- log_response(response)
93
-
94
- @last_response = response
95
- response
83
+ # Build and return response
84
+ build_response(result, duration_ms)
96
85
  rescue StandardError => e
97
- @logger.error("Unexpected error for #{@instance_name}: #{e.class} - #{e.message}")
98
- @logger.error("Backtrace: #{e.backtrace.join("\n")}")
86
+ logger.error { "Unexpected error for #{@instance_name}: #{e.class} - #{e.message}" }
87
+ logger.error { "Backtrace: #{e.backtrace.join("\n")}" }
99
88
  raise
100
89
  end
101
90
 
102
91
  def reset_session
103
- @session_id = nil
104
- @last_response = nil
105
- @chat_completion_handler&.reset_session
106
- @responses_handler&.reset_session
107
- end
108
-
109
- def has_session?
110
- !@session_id.nil?
111
- end
112
-
113
- # Delegate logger methods for the API handlers
114
- def info(message)
115
- @logger.info(message)
116
- end
117
-
118
- def error(message)
119
- @logger.error(message)
120
- end
121
-
122
- def warn(message)
123
- @logger.warn(message)
124
- end
125
-
126
- def debug(message)
127
- @logger.debug(message)
92
+ super
93
+ @api_handler&.reset_session
128
94
  end
129
95
 
130
96
  # Session JSON logger for the API handlers
@@ -139,14 +105,15 @@ module ClaudeSwarm
139
105
  private
140
106
 
141
107
  def setup_openai_client(token_env)
142
- config = {
143
- access_token: ENV.fetch(token_env),
144
- log_errors: true,
145
- request_timeout: 1800, # 30 minutes
146
- }
147
- config[:uri_base] = @base_url if @base_url
108
+ openai_client_config = build_openai_client_config(token_env)
109
+
110
+ @openai_client = ::OpenAI::Client.new(openai_client_config) do |faraday|
111
+ # Use persistent HTTP connections for better performance
112
+ faraday.adapter(:net_http_persistent)
148
113
 
149
- @openai_client = ::OpenAI::Client.new(config)
114
+ # Add retry middleware with custom configuration
115
+ faraday.request(:retry, **build_faraday_retry_config)
116
+ end
150
117
  rescue KeyError
151
118
  raise ExecutionError, "OpenAI API key not found in environment variable: #{token_env}"
152
119
  end
@@ -155,54 +122,26 @@ module ClaudeSwarm
155
122
  return unless @mcp_config && File.exist?(@mcp_config)
156
123
 
157
124
  # Read MCP config to find MCP servers
158
- mcp_data = JSON.parse(File.read(@mcp_config))
159
-
160
- # Create MCP client with all MCP servers from the config
161
- if mcp_data["mcpServers"] && !mcp_data["mcpServers"].empty?
162
- mcp_configs = []
163
-
164
- mcp_data["mcpServers"].each do |name, server_config|
165
- case server_config["type"]
166
- when "stdio"
167
- # Combine command and args into a single array
168
- command_array = [server_config["command"]]
169
- command_array.concat(server_config["args"] || [])
170
-
171
- stdio_config = MCPClient.stdio_config(
172
- command: command_array,
173
- name: name,
174
- )
175
- stdio_config[:read_timeout] = 1800
176
- mcp_configs << stdio_config
177
- when "sse"
178
- @logger.warn("SSE MCP servers not yet supported for OpenAI instances: #{name}")
179
- # TODO: Add SSE support when available in ruby-mcp-client
180
- end
181
- end
182
-
183
- if mcp_configs.any?
184
- # Create MCP client with unbundled environment to avoid bundler conflicts
185
- # This ensures MCP servers run in a clean environment without inheriting
186
- # Claude Swarm's BUNDLE_* environment variables
187
- Bundler.with_unbundled_env do
188
- @mcp_client = MCPClient.create_client(
189
- mcp_server_configs: mcp_configs,
190
- logger: @logger,
191
- )
192
-
193
- # List available tools from all MCP servers
194
- begin
195
- @available_tools = @mcp_client.list_tools
196
- @logger.info("Loaded #{@available_tools.size} tools from #{mcp_configs.size} MCP server(s)")
197
- rescue StandardError => e
198
- @logger.error("Failed to load MCP tools: #{e.message}")
199
- @available_tools = []
200
- end
201
- end
202
- end
125
+ mcp_data = JsonHandler.parse_file!(@mcp_config)
126
+
127
+ # Build MCP configurations from servers
128
+ mcp_configs = build_mcp_configs(mcp_data["mcpServers"])
129
+ return if mcp_configs.empty?
130
+
131
+ # Create MCP client with unbundled environment to avoid bundler conflicts
132
+ # This ensures MCP servers run in a clean environment without inheriting
133
+ # Claude Swarm's BUNDLE_* environment variables
134
+ Bundler.with_unbundled_env do
135
+ @mcp_client = MCPClient.create_client(
136
+ mcp_server_configs: mcp_configs,
137
+ logger: @logger,
138
+ )
139
+
140
+ # List available tools from all MCP servers
141
+ load_mcp_tools(mcp_configs)
203
142
  end
204
143
  rescue StandardError => e
205
- @logger.error("Failed to setup MCP client: #{e.message}")
144
+ logger.error { "Failed to setup MCP client: #{e.message}" }
206
145
  @mcp_client = nil
207
146
  @available_tools = []
208
147
  end
@@ -213,96 +152,103 @@ module ClaudeSwarm
213
152
  "$0.00"
214
153
  end
215
154
 
216
- def setup_logging
217
- # Use session path from environment (required)
218
- @session_path = SessionPath.from_env
219
- SessionPath.ensure_directory(@session_path)
220
-
221
- # Create logger with session.log filename
222
- log_filename = "session.log"
223
- log_path = File.join(@session_path, log_filename)
224
- @logger = Logger.new(log_path)
225
- @logger.level = Logger::INFO
155
+ def create_api_handler
156
+ handler_params = {
157
+ openai_client: @openai_client,
158
+ mcp_client: @mcp_client,
159
+ available_tools: @available_tools,
160
+ executor: self,
161
+ instance_name: @instance_name,
162
+ model: @model,
163
+ temperature: @temperature,
164
+ reasoning_effort: @reasoning_effort,
165
+ }
226
166
 
227
- # Custom formatter for better readability
228
- @logger.formatter = proc do |severity, datetime, _progname, msg|
229
- "[#{datetime.strftime("%Y-%m-%d %H:%M:%S.%L")}] [#{severity}] #{msg}\n"
167
+ if @api_version == "responses"
168
+ OpenAI::Responses.new(**handler_params)
169
+ else
170
+ OpenAI::ChatCompletion.new(**handler_params)
230
171
  end
231
-
232
- return unless @instance_name
233
-
234
- instance_info = @instance_name
235
- instance_info += " (#{@instance_id})" if @instance_id
236
- @logger.info("Started OpenAI executor for instance: #{instance_info}")
237
172
  end
238
173
 
239
- def log_request(prompt)
240
- caller_info = @calling_instance
241
- caller_info += " (#{@calling_instance_id})" if @calling_instance_id
242
- instance_info = @instance_name
243
- instance_info += " (#{@instance_id})" if @instance_id
244
- @logger.info("#{caller_info} -> #{instance_info}: \n---\n#{prompt}\n---")
245
-
246
- # Build event hash for JSON logging
247
- event = {
248
- type: "request",
249
- from_instance: @calling_instance,
250
- from_instance_id: @calling_instance_id,
251
- to_instance: @instance_name,
252
- to_instance_id: @instance_id,
253
- prompt: prompt,
254
- timestamp: Time.now.iso8601,
255
- }
256
-
257
- append_to_session_json(event)
174
+ def log_streaming_content(content)
175
+ # Log streaming content similar to ClaudeCodeExecutor
176
+ logger.debug { "#{instance_info} streaming: #{content}" }
258
177
  end
259
178
 
260
- def log_response(response)
261
- caller_info = @calling_instance
262
- caller_info += " (#{@calling_instance_id})" if @calling_instance_id
263
- instance_info = @instance_name
264
- instance_info += " (#{@instance_id})" if @instance_id
265
- @logger.info(
266
- "(#{response["total_cost"]} - #{response["duration_ms"]}ms) #{instance_info} -> #{caller_info}: \n---\n#{response["result"]}\n---",
179
+ def build_faraday_retry_config
180
+ FARADAY_RETRY_CONFIG.merge(
181
+ retry_block: method(:handle_retry_logging),
267
182
  )
268
183
  end
269
184
 
270
- def log_streaming_content(content)
271
- # Log streaming content similar to ClaudeCodeExecutor
272
- instance_info = @instance_name
273
- instance_info += " (#{@instance_id})" if @instance_id
274
- @logger.debug("#{instance_info} streaming: #{content}")
185
+ def handle_retry_logging(env:, options:, retry_count:, exception:, will_retry:)
186
+ retry_delay = options.interval * (options.backoff_factor**(retry_count - 1))
187
+ error_info = exception&.message || "HTTP #{env.status}"
188
+
189
+ message = if will_retry
190
+ "Request failed (attempt #{retry_count}/#{options.max}): #{error_info}. Retrying in #{retry_delay} seconds..."
191
+ else
192
+ "Request failed after #{retry_count} attempts: #{error_info}. Giving up."
193
+ end
194
+
195
+ @logger.warn(message)
275
196
  end
276
197
 
277
- def append_to_session_json(event)
278
- json_filename = "session.log.json"
279
- json_path = File.join(@session_path, json_filename)
198
+ def build_openai_client_config(token_env)
199
+ OPENAI_CLIENT_CONFIG.merge(access_token: ENV.fetch(token_env)).tap do |config|
200
+ config[:uri_base] = @base_url if @base_url
201
+ end
202
+ end
280
203
 
281
- # Use file locking to ensure thread-safe writes
282
- File.open(json_path, File::WRONLY | File::APPEND | File::CREAT) do |file|
283
- file.flock(File::LOCK_EX)
204
+ def build_stdio_config(name, server_config)
205
+ # Combine command and args into a single array
206
+ command_array = [server_config["command"]]
207
+ command_array.concat(server_config["args"] || [])
284
208
 
285
- # Create entry with metadata
286
- entry = {
287
- instance: @instance_name,
288
- instance_id: @instance_id,
289
- calling_instance: @calling_instance,
290
- calling_instance_id: @calling_instance_id,
291
- timestamp: Time.now.iso8601,
292
- event: event,
293
- }
209
+ MCPClient.stdio_config(
210
+ command: command_array,
211
+ name: name,
212
+ ).tap do |config|
213
+ config[:read_timeout] = 1800
214
+ end
215
+ end
294
216
 
295
- # Write as single line JSON (JSONL format)
296
- file.puts(entry.to_json)
217
+ def build_mcp_configs(mcp_servers)
218
+ return [] if mcp_servers.nil? || mcp_servers.empty?
297
219
 
298
- file.flock(File::LOCK_UN)
220
+ mcp_servers.filter_map do |name, server_config|
221
+ case server_config["type"]
222
+ when "stdio"
223
+ build_stdio_config(name, server_config)
224
+ when "sse"
225
+ logger.warn { "SSE MCP servers not yet supported for OpenAI instances: #{name}" }
226
+ # TODO: Add SSE support when available in ruby-mcp-client
227
+ nil
228
+ end
299
229
  end
230
+ end
231
+
232
+ def load_mcp_tools(mcp_configs)
233
+ @available_tools = @mcp_client.list_tools
234
+ logger.info { "Loaded #{@available_tools.size} tools from #{mcp_configs.size} MCP server(s)" }
300
235
  rescue StandardError => e
301
- @logger.error("Failed to append to session JSON: #{e.message}")
302
- raise
236
+ logger.error { "Failed to load MCP tools: #{e.message}" }
237
+ @available_tools = []
303
238
  end
304
239
 
305
- class ExecutionError < StandardError; end
240
+ def build_response(result, duration_ms)
241
+ {
242
+ "type" => "result",
243
+ "result" => result,
244
+ "duration_ms" => duration_ms,
245
+ "total_cost" => calculate_cost(result),
246
+ "session_id" => @session_id,
247
+ }.tap do |response|
248
+ log_response(response)
249
+ @last_response = response
250
+ end
251
+ end
306
252
  end
307
253
  end
308
254
  end
@@ -5,11 +5,11 @@ module ClaudeSwarm
5
5
  class Responses
6
6
  MAX_TURNS_WITH_TOOLS = 100_000 # virtually infinite
7
7
 
8
- def initialize(openai_client:, mcp_client:, available_tools:, logger:, instance_name:, model:, temperature: nil, reasoning_effort: nil)
8
+ def initialize(openai_client:, mcp_client:, available_tools:, executor:, instance_name:, model:, temperature: nil, reasoning_effort: nil)
9
9
  @openai_client = openai_client
10
10
  @mcp_client = mcp_client
11
11
  @available_tools = available_tools
12
- @executor = logger # This is actually the executor, not a logger
12
+ @executor = executor
13
13
  @instance_name = instance_name
14
14
  @model = model
15
15
  @temperature = temperature
@@ -37,7 +37,7 @@ module ClaudeSwarm
37
37
  def process_responses_api(input, conversation_array, previous_response_id, depth = 0)
38
38
  # Prevent infinite recursion
39
39
  if depth > MAX_TURNS_WITH_TOOLS
40
- @executor.error("Maximum recursion depth reached in tool execution")
40
+ @executor.logger.error { "Maximum recursion depth reached in tool execution" }
41
41
  return "Error: Maximum tool call depth exceeded"
42
42
  end
43
43
 
@@ -72,11 +72,11 @@ module ClaudeSwarm
72
72
  parameters[:input] = conversation_array
73
73
 
74
74
  # Log conversation array to debug duplicates
75
- @executor.info("Conversation array size: #{conversation_array.size}")
75
+ @executor.logger.info { "Conversation array size: #{conversation_array.size}" }
76
76
  conversation_ids = conversation_array.map do |item|
77
77
  item["call_id"] || item["id"] || "no-id-#{item["type"]}"
78
78
  end.compact
79
- @executor.info("Conversation item IDs: #{conversation_ids.inspect}")
79
+ @executor.logger.info { "Conversation item IDs: #{conversation_ids.inspect}" }
80
80
  end
81
81
 
82
82
  # Add previous response ID for conversation continuity
@@ -93,11 +93,11 @@ module ClaudeSwarm
93
93
  "parameters" => tool.schema || {},
94
94
  }
95
95
  end
96
- @executor.info("Available tools for responses API: #{parameters[:tools].map { |t| t["name"] }.join(", ")}")
96
+ @executor.logger.info { "Available tools for responses API: #{parameters[:tools].map { |t| t["name"] }.join(", ")}" }
97
97
  end
98
98
 
99
99
  # Log the request parameters
100
- @executor.info("Responses API Request (depth=#{depth}): #{JSON.pretty_generate(parameters)}")
100
+ @executor.logger.info { "Responses API Request (depth=#{depth}): #{JsonHandler.pretty_generate!(parameters)}" }
101
101
 
102
102
  # Append to session JSON
103
103
  append_to_session_json({
@@ -111,16 +111,16 @@ module ClaudeSwarm
111
111
  begin
112
112
  response = @openai_client.responses.create(parameters: parameters)
113
113
  rescue StandardError => e
114
- @executor.error("Responses API error: #{e.class} - #{e.message}")
115
- @executor.error("Request parameters: #{JSON.pretty_generate(parameters)}")
114
+ @executor.logger.error { "Responses API error: #{e.class} - #{e.message}" }
115
+ @executor.logger.error { "Request parameters: #{JsonHandler.pretty_generate!(parameters)}" }
116
116
 
117
117
  # Try to extract and log the response body for better debugging
118
118
  if e.respond_to?(:response)
119
119
  begin
120
120
  error_body = e.response[:body]
121
- @executor.error("Error response body: #{error_body}")
121
+ @executor.logger.error { "Error response body: #{error_body}" }
122
122
  rescue StandardError => parse_error
123
- @executor.error("Could not parse error response: #{parse_error.message}")
123
+ @executor.logger.error { "Could not parse error response: #{parse_error.message}" }
124
124
  end
125
125
  end
126
126
 
@@ -140,7 +140,7 @@ module ClaudeSwarm
140
140
  end
141
141
 
142
142
  # Log the full response
143
- @executor.info("Responses API Full Response (depth=#{depth}): #{JSON.pretty_generate(response)}")
143
+ @executor.logger.info { "Responses API Full Response (depth=#{depth}): #{JsonHandler.pretty_generate!(response)}" }
144
144
 
145
145
  # Append to session JSON
146
146
  append_to_session_json({
@@ -157,7 +157,7 @@ module ClaudeSwarm
157
157
  output = response["output"]
158
158
 
159
159
  if output.nil?
160
- @executor.error("No output in response")
160
+ @executor.logger.error { "No output in response" }
161
161
  return "Error: No output in OpenAI response"
162
162
  end
163
163
 
@@ -185,7 +185,7 @@ module ClaudeSwarm
185
185
  extract_text_response(output)
186
186
  end
187
187
  else
188
- @executor.error("Unexpected output format: #{output.inspect}")
188
+ @executor.logger.error { "Unexpected output format: #{output.inspect}" }
189
189
  "Error: Unexpected response format"
190
190
  end
191
191
  end
@@ -200,12 +200,12 @@ module ClaudeSwarm
200
200
 
201
201
  def build_conversation_with_outputs(function_calls)
202
202
  # Log tool calls
203
- @executor.info("Responses API - Handling #{function_calls.size} function calls")
203
+ @executor.logger.info { "Responses API - Handling #{function_calls.size} function calls" }
204
204
 
205
205
  # Log IDs to check for duplicates
206
206
  call_ids = function_calls.map { |fc| fc["call_id"] || fc["id"] }
207
- @executor.info("Function call IDs: #{call_ids.inspect}")
208
- @executor.warn("WARNING: Duplicate function call IDs detected!") if call_ids.size != call_ids.uniq.size
207
+ @executor.logger.info { "Function call IDs: #{call_ids.inspect}" }
208
+ @executor.logger.warn { "WARNING: Duplicate function call IDs detected!" } if call_ids.size != call_ids.uniq.size
209
209
 
210
210
  # Append to session JSON
211
211
  append_to_session_json({
@@ -226,20 +226,20 @@ module ClaudeSwarm
226
226
  call_id = function_call["call_id"]
227
227
 
228
228
  # Log both IDs to debug
229
- @executor.info("Function call has id=#{function_call["id"]}, call_id=#{function_call["call_id"]}")
229
+ @executor.logger.info { "Function call has id=#{function_call["id"]}, call_id=#{function_call["call_id"]}" }
230
230
 
231
231
  begin
232
232
  # Parse arguments
233
- tool_args = JSON.parse(tool_args_str)
233
+ tool_args = JsonHandler.parse!(tool_args_str)
234
234
 
235
235
  # Log tool execution
236
- @executor.info("Responses API - Executing tool: #{tool_name} with args: #{JSON.pretty_generate(tool_args)}")
236
+ @executor.logger.info { "Responses API - Executing tool: #{tool_name} with args: #{JsonHandler.pretty_generate!(tool_args)}" }
237
237
 
238
238
  # Execute tool via MCP
239
239
  result = @mcp_client.call_tool(tool_name, tool_args)
240
240
 
241
241
  # Log result
242
- @executor.info("Responses API - Tool result for #{tool_name}: #{result}")
242
+ @executor.logger.info { "Responses API - Tool result for #{tool_name}: #{result}" }
243
243
 
244
244
  # Append to session JSON
245
245
  append_to_session_json({
@@ -257,8 +257,8 @@ module ClaudeSwarm
257
257
  output: result.to_json, # Must be JSON string
258
258
  }
259
259
  rescue StandardError => e
260
- @executor.error("Responses API - Tool execution failed for #{tool_name}: #{e.message}")
261
- @executor.error(e.backtrace.join("\n"))
260
+ @executor.logger.error { "Responses API - Tool execution failed for #{tool_name}: #{e.message}" }
261
+ @executor.logger.error { e.backtrace.join("\n") }
262
262
 
263
263
  # Append error to session JSON
264
264
  append_to_session_json({
@@ -282,8 +282,8 @@ module ClaudeSwarm
282
282
  end
283
283
  end
284
284
 
285
- @executor.info("Responses API - Built conversation with #{conversation.size} function outputs")
286
- @executor.debug("Final conversation structure: #{JSON.pretty_generate(conversation)}")
285
+ @executor.logger.info { "Responses API - Built conversation with #{conversation.size} function outputs" }
286
+ @executor.logger.debug { "Final conversation structure: #{JsonHandler.pretty_generate!(conversation)}" }
287
287
  conversation
288
288
  end
289
289
 
@@ -299,16 +299,16 @@ module ClaudeSwarm
299
299
 
300
300
  begin
301
301
  # Parse arguments
302
- tool_args = JSON.parse(tool_args_str)
302
+ tool_args = JsonHandler.parse!(tool_args_str)
303
303
 
304
304
  # Log tool execution
305
- @executor.info("Responses API - Executing tool: #{tool_name} with args: #{JSON.pretty_generate(tool_args)}")
305
+ @executor.logger.info { "Responses API - Executing tool: #{tool_name} with args: #{JsonHandler.pretty_generate!(tool_args)}" }
306
306
 
307
307
  # Execute tool via MCP
308
308
  result = @mcp_client.call_tool(tool_name, tool_args)
309
309
 
310
310
  # Log result
311
- @executor.info("Responses API - Tool result for #{tool_name}: #{result}")
311
+ @executor.logger.info { "Responses API - Tool result for #{tool_name}: #{result}" }
312
312
 
313
313
  # Add function output to conversation
314
314
  conversation << {
@@ -317,7 +317,7 @@ module ClaudeSwarm
317
317
  output: result.to_json, # Must be JSON string
318
318
  }
319
319
  rescue StandardError => e
320
- @executor.error("Responses API - Tool execution failed for #{tool_name}: #{e.message}")
320
+ @executor.logger.error { "Responses API - Tool execution failed for #{tool_name}: #{e.message}" }
321
321
 
322
322
  # Add error output to conversation
323
323
  conversation << {