claude_swarm 0.3.6 → 0.3.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,25 +2,28 @@
2
2
 
3
3
  module ClaudeSwarm
4
4
  module OpenAI
5
- class Executor
6
- attr_reader :session_id, :last_response, :working_directory, :logger, :session_path
7
-
5
+ class Executor < BaseExecutor
8
6
  def initialize(working_directory: Dir.pwd, model: nil, mcp_config: nil, vibe: false,
9
7
  instance_name: nil, instance_id: nil, calling_instance: nil, calling_instance_id: nil,
10
- claude_session_id: nil, additional_directories: [],
8
+ claude_session_id: nil, additional_directories: [], debug: false,
11
9
  temperature: nil, api_version: "chat_completion", openai_token_env: "OPENAI_API_KEY",
12
10
  base_url: nil, reasoning_effort: nil)
13
- @working_directory = working_directory
14
- @additional_directories = additional_directories
15
- @model = model
16
- @mcp_config = mcp_config
17
- @vibe = vibe
18
- @session_id = claude_session_id
19
- @last_response = nil
20
- @instance_name = instance_name
21
- @instance_id = instance_id
22
- @calling_instance = calling_instance
23
- @calling_instance_id = calling_instance_id
11
+ # Call parent initializer for common attributes
12
+ super(
13
+ working_directory: working_directory,
14
+ model: model,
15
+ mcp_config: mcp_config,
16
+ vibe: vibe,
17
+ instance_name: instance_name,
18
+ instance_id: instance_id,
19
+ calling_instance: calling_instance,
20
+ calling_instance_id: calling_instance_id,
21
+ claude_session_id: claude_session_id,
22
+ additional_directories: additional_directories,
23
+ debug: debug
24
+ )
25
+
26
+ # OpenAI-specific attributes
24
27
  @temperature = temperature
25
28
  @api_version = api_version
26
29
  @base_url = base_url
@@ -30,37 +33,14 @@ module ClaudeSwarm
30
33
  @conversation_messages = []
31
34
  @previous_response_id = nil
32
35
 
33
- # Setup logging first
34
- setup_logging
35
-
36
36
  # Setup OpenAI client
37
37
  setup_openai_client(openai_token_env)
38
38
 
39
39
  # Setup MCP client for tools
40
40
  setup_mcp_client
41
41
 
42
- # Create API handlers
43
- @chat_completion_handler = OpenAI::ChatCompletion.new(
44
- openai_client: @openai_client,
45
- mcp_client: @mcp_client,
46
- available_tools: @available_tools,
47
- logger: self,
48
- instance_name: @instance_name,
49
- model: @model,
50
- temperature: @temperature,
51
- reasoning_effort: @reasoning_effort,
52
- )
53
-
54
- @responses_handler = OpenAI::Responses.new(
55
- openai_client: @openai_client,
56
- mcp_client: @mcp_client,
57
- available_tools: @available_tools,
58
- logger: self,
59
- instance_name: @instance_name,
60
- model: @model,
61
- temperature: @temperature,
62
- reasoning_effort: @reasoning_effort,
63
- )
42
+ # Create API handler based on api_version
43
+ @api_handler = create_api_handler
64
44
  end
65
45
 
66
46
  def execute(prompt, options = {})
@@ -70,12 +50,8 @@ module ClaudeSwarm
70
50
  # Start timing
71
51
  start_time = Time.now
72
52
 
73
- # Execute based on API version
74
- result = if @api_version == "responses"
75
- @responses_handler.execute(prompt, options)
76
- else
77
- @chat_completion_handler.execute(prompt, options)
78
- end
53
+ # Execute using the appropriate handler
54
+ result = @api_handler.execute(prompt, options)
79
55
 
80
56
  # Calculate duration
81
57
  duration_ms = ((Time.now - start_time) * 1000).round
@@ -94,37 +70,14 @@ module ClaudeSwarm
94
70
  @last_response = response
95
71
  response
96
72
  rescue StandardError => e
97
- @logger.error("Unexpected error for #{@instance_name}: #{e.class} - #{e.message}")
98
- @logger.error("Backtrace: #{e.backtrace.join("\n")}")
73
+ logger.error { "Unexpected error for #{@instance_name}: #{e.class} - #{e.message}" }
74
+ logger.error { "Backtrace: #{e.backtrace.join("\n")}" }
99
75
  raise
100
76
  end
101
77
 
102
78
  def reset_session
103
- @session_id = nil
104
- @last_response = nil
105
- @chat_completion_handler&.reset_session
106
- @responses_handler&.reset_session
107
- end
108
-
109
- def has_session?
110
- !@session_id.nil?
111
- end
112
-
113
- # Delegate logger methods for the API handlers
114
- def info(message)
115
- @logger.info(message)
116
- end
117
-
118
- def error(message)
119
- @logger.error(message)
120
- end
121
-
122
- def warn(message)
123
- @logger.warn(message)
124
- end
125
-
126
- def debug(message)
127
- @logger.debug(message)
79
+ super
80
+ @api_handler&.reset_session
128
81
  end
129
82
 
130
83
  # Session JSON logger for the API handlers
@@ -197,7 +150,7 @@ module ClaudeSwarm
197
150
  stdio_config[:read_timeout] = 1800
198
151
  mcp_configs << stdio_config
199
152
  when "sse"
200
- @logger.warn("SSE MCP servers not yet supported for OpenAI instances: #{name}")
153
+ logger.warn { "SSE MCP servers not yet supported for OpenAI instances: #{name}" }
201
154
  # TODO: Add SSE support when available in ruby-mcp-client
202
155
  end
203
156
  end
@@ -215,16 +168,16 @@ module ClaudeSwarm
215
168
  # List available tools from all MCP servers
216
169
  begin
217
170
  @available_tools = @mcp_client.list_tools
218
- @logger.info("Loaded #{@available_tools.size} tools from #{mcp_configs.size} MCP server(s)")
171
+ logger.info { "Loaded #{@available_tools.size} tools from #{mcp_configs.size} MCP server(s)" }
219
172
  rescue StandardError => e
220
- @logger.error("Failed to load MCP tools: #{e.message}")
173
+ logger.error { "Failed to load MCP tools: #{e.message}" }
221
174
  @available_tools = []
222
175
  end
223
176
  end
224
177
  end
225
178
  end
226
179
  rescue StandardError => e
227
- @logger.error("Failed to setup MCP client: #{e.message}")
180
+ logger.error { "Failed to setup MCP client: #{e.message}" }
228
181
  @mcp_client = nil
229
182
  @available_tools = []
230
183
  end
@@ -235,96 +188,29 @@ module ClaudeSwarm
235
188
  "$0.00"
236
189
  end
237
190
 
238
- def setup_logging
239
- # Use session path from environment (required)
240
- @session_path = SessionPath.from_env
241
- SessionPath.ensure_directory(@session_path)
242
-
243
- # Create logger with session.log filename
244
- log_filename = "session.log"
245
- log_path = File.join(@session_path, log_filename)
246
- @logger = Logger.new(log_path)
247
- @logger.level = Logger::INFO
248
-
249
- # Custom formatter for better readability
250
- @logger.formatter = proc do |severity, datetime, _progname, msg|
251
- "[#{datetime.strftime("%Y-%m-%d %H:%M:%S.%L")}] [#{severity}] #{msg}\n"
252
- end
253
-
254
- return unless @instance_name
255
-
256
- instance_info = @instance_name
257
- instance_info += " (#{@instance_id})" if @instance_id
258
- @logger.info("Started OpenAI executor for instance: #{instance_info}")
259
- end
260
-
261
- def log_request(prompt)
262
- caller_info = @calling_instance
263
- caller_info += " (#{@calling_instance_id})" if @calling_instance_id
264
- instance_info = @instance_name
265
- instance_info += " (#{@instance_id})" if @instance_id
266
- @logger.info("#{caller_info} -> #{instance_info}: \n---\n#{prompt}\n---")
267
-
268
- # Build event hash for JSON logging
269
- event = {
270
- type: "request",
271
- from_instance: @calling_instance,
272
- from_instance_id: @calling_instance_id,
273
- to_instance: @instance_name,
274
- to_instance_id: @instance_id,
275
- prompt: prompt,
276
- timestamp: Time.now.iso8601,
191
+ def create_api_handler
192
+ handler_params = {
193
+ openai_client: @openai_client,
194
+ mcp_client: @mcp_client,
195
+ available_tools: @available_tools,
196
+ executor: self,
197
+ instance_name: @instance_name,
198
+ model: @model,
199
+ temperature: @temperature,
200
+ reasoning_effort: @reasoning_effort,
277
201
  }
278
202
 
279
- append_to_session_json(event)
280
- end
281
-
282
- def log_response(response)
283
- caller_info = @calling_instance
284
- caller_info += " (#{@calling_instance_id})" if @calling_instance_id
285
- instance_info = @instance_name
286
- instance_info += " (#{@instance_id})" if @instance_id
287
- @logger.info(
288
- "(#{response["total_cost"]} - #{response["duration_ms"]}ms) #{instance_info} -> #{caller_info}: \n---\n#{response["result"]}\n---",
289
- )
203
+ if @api_version == "responses"
204
+ OpenAI::Responses.new(**handler_params)
205
+ else
206
+ OpenAI::ChatCompletion.new(**handler_params)
207
+ end
290
208
  end
291
209
 
292
210
  def log_streaming_content(content)
293
211
  # Log streaming content similar to ClaudeCodeExecutor
294
- instance_info = @instance_name
295
- instance_info += " (#{@instance_id})" if @instance_id
296
- @logger.debug("#{instance_info} streaming: #{content}")
297
- end
298
-
299
- def append_to_session_json(event)
300
- json_filename = "session.log.json"
301
- json_path = File.join(@session_path, json_filename)
302
-
303
- # Use file locking to ensure thread-safe writes
304
- File.open(json_path, File::WRONLY | File::APPEND | File::CREAT) do |file|
305
- file.flock(File::LOCK_EX)
306
-
307
- # Create entry with metadata
308
- entry = {
309
- instance: @instance_name,
310
- instance_id: @instance_id,
311
- calling_instance: @calling_instance,
312
- calling_instance_id: @calling_instance_id,
313
- timestamp: Time.now.iso8601,
314
- event: event,
315
- }
316
-
317
- # Write as single line JSON (JSONL format)
318
- file.puts(entry.to_json)
319
-
320
- file.flock(File::LOCK_UN)
321
- end
322
- rescue StandardError => e
323
- @logger.error("Failed to append to session JSON: #{e.message}")
324
- raise
212
+ logger.debug { "#{instance_info} streaming: #{content}" }
325
213
  end
326
-
327
- class ExecutionError < StandardError; end
328
214
  end
329
215
  end
330
216
  end
@@ -5,11 +5,11 @@ module ClaudeSwarm
5
5
  class Responses
6
6
  MAX_TURNS_WITH_TOOLS = 100_000 # virtually infinite
7
7
 
8
- def initialize(openai_client:, mcp_client:, available_tools:, logger:, instance_name:, model:, temperature: nil, reasoning_effort: nil)
8
+ def initialize(openai_client:, mcp_client:, available_tools:, executor:, instance_name:, model:, temperature: nil, reasoning_effort: nil)
9
9
  @openai_client = openai_client
10
10
  @mcp_client = mcp_client
11
11
  @available_tools = available_tools
12
- @executor = logger # This is actually the executor, not a logger
12
+ @executor = executor
13
13
  @instance_name = instance_name
14
14
  @model = model
15
15
  @temperature = temperature
@@ -37,7 +37,7 @@ module ClaudeSwarm
37
37
  def process_responses_api(input, conversation_array, previous_response_id, depth = 0)
38
38
  # Prevent infinite recursion
39
39
  if depth > MAX_TURNS_WITH_TOOLS
40
- @executor.error("Maximum recursion depth reached in tool execution")
40
+ @executor.logger.error { "Maximum recursion depth reached in tool execution" }
41
41
  return "Error: Maximum tool call depth exceeded"
42
42
  end
43
43
 
@@ -72,11 +72,11 @@ module ClaudeSwarm
72
72
  parameters[:input] = conversation_array
73
73
 
74
74
  # Log conversation array to debug duplicates
75
- @executor.info("Conversation array size: #{conversation_array.size}")
75
+ @executor.logger.info { "Conversation array size: #{conversation_array.size}" }
76
76
  conversation_ids = conversation_array.map do |item|
77
77
  item["call_id"] || item["id"] || "no-id-#{item["type"]}"
78
78
  end.compact
79
- @executor.info("Conversation item IDs: #{conversation_ids.inspect}")
79
+ @executor.logger.info { "Conversation item IDs: #{conversation_ids.inspect}" }
80
80
  end
81
81
 
82
82
  # Add previous response ID for conversation continuity
@@ -93,11 +93,11 @@ module ClaudeSwarm
93
93
  "parameters" => tool.schema || {},
94
94
  }
95
95
  end
96
- @executor.info("Available tools for responses API: #{parameters[:tools].map { |t| t["name"] }.join(", ")}")
96
+ @executor.logger.info { "Available tools for responses API: #{parameters[:tools].map { |t| t["name"] }.join(", ")}" }
97
97
  end
98
98
 
99
99
  # Log the request parameters
100
- @executor.info("Responses API Request (depth=#{depth}): #{JSON.pretty_generate(parameters)}")
100
+ @executor.logger.info { "Responses API Request (depth=#{depth}): #{JSON.pretty_generate(parameters)}" }
101
101
 
102
102
  # Append to session JSON
103
103
  append_to_session_json({
@@ -111,16 +111,16 @@ module ClaudeSwarm
111
111
  begin
112
112
  response = @openai_client.responses.create(parameters: parameters)
113
113
  rescue StandardError => e
114
- @executor.error("Responses API error: #{e.class} - #{e.message}")
115
- @executor.error("Request parameters: #{JSON.pretty_generate(parameters)}")
114
+ @executor.logger.error { "Responses API error: #{e.class} - #{e.message}" }
115
+ @executor.logger.error { "Request parameters: #{JSON.pretty_generate(parameters)}" }
116
116
 
117
117
  # Try to extract and log the response body for better debugging
118
118
  if e.respond_to?(:response)
119
119
  begin
120
120
  error_body = e.response[:body]
121
- @executor.error("Error response body: #{error_body}")
121
+ @executor.logger.error { "Error response body: #{error_body}" }
122
122
  rescue StandardError => parse_error
123
- @executor.error("Could not parse error response: #{parse_error.message}")
123
+ @executor.logger.error { "Could not parse error response: #{parse_error.message}" }
124
124
  end
125
125
  end
126
126
 
@@ -140,7 +140,7 @@ module ClaudeSwarm
140
140
  end
141
141
 
142
142
  # Log the full response
143
- @executor.info("Responses API Full Response (depth=#{depth}): #{JSON.pretty_generate(response)}")
143
+ @executor.logger.info { "Responses API Full Response (depth=#{depth}): #{JSON.pretty_generate(response)}" }
144
144
 
145
145
  # Append to session JSON
146
146
  append_to_session_json({
@@ -157,7 +157,7 @@ module ClaudeSwarm
157
157
  output = response["output"]
158
158
 
159
159
  if output.nil?
160
- @executor.error("No output in response")
160
+ @executor.logger.error { "No output in response" }
161
161
  return "Error: No output in OpenAI response"
162
162
  end
163
163
 
@@ -185,7 +185,7 @@ module ClaudeSwarm
185
185
  extract_text_response(output)
186
186
  end
187
187
  else
188
- @executor.error("Unexpected output format: #{output.inspect}")
188
+ @executor.logger.error { "Unexpected output format: #{output.inspect}" }
189
189
  "Error: Unexpected response format"
190
190
  end
191
191
  end
@@ -200,12 +200,12 @@ module ClaudeSwarm
200
200
 
201
201
  def build_conversation_with_outputs(function_calls)
202
202
  # Log tool calls
203
- @executor.info("Responses API - Handling #{function_calls.size} function calls")
203
+ @executor.logger.info { "Responses API - Handling #{function_calls.size} function calls" }
204
204
 
205
205
  # Log IDs to check for duplicates
206
206
  call_ids = function_calls.map { |fc| fc["call_id"] || fc["id"] }
207
- @executor.info("Function call IDs: #{call_ids.inspect}")
208
- @executor.warn("WARNING: Duplicate function call IDs detected!") if call_ids.size != call_ids.uniq.size
207
+ @executor.logger.info { "Function call IDs: #{call_ids.inspect}" }
208
+ @executor.logger.warn { "WARNING: Duplicate function call IDs detected!" } if call_ids.size != call_ids.uniq.size
209
209
 
210
210
  # Append to session JSON
211
211
  append_to_session_json({
@@ -226,20 +226,20 @@ module ClaudeSwarm
226
226
  call_id = function_call["call_id"]
227
227
 
228
228
  # Log both IDs to debug
229
- @executor.info("Function call has id=#{function_call["id"]}, call_id=#{function_call["call_id"]}")
229
+ @executor.logger.info { "Function call has id=#{function_call["id"]}, call_id=#{function_call["call_id"]}" }
230
230
 
231
231
  begin
232
232
  # Parse arguments
233
233
  tool_args = JSON.parse(tool_args_str)
234
234
 
235
235
  # Log tool execution
236
- @executor.info("Responses API - Executing tool: #{tool_name} with args: #{JSON.pretty_generate(tool_args)}")
236
+ @executor.logger.info { "Responses API - Executing tool: #{tool_name} with args: #{JSON.pretty_generate(tool_args)}" }
237
237
 
238
238
  # Execute tool via MCP
239
239
  result = @mcp_client.call_tool(tool_name, tool_args)
240
240
 
241
241
  # Log result
242
- @executor.info("Responses API - Tool result for #{tool_name}: #{result}")
242
+ @executor.logger.info { "Responses API - Tool result for #{tool_name}: #{result}" }
243
243
 
244
244
  # Append to session JSON
245
245
  append_to_session_json({
@@ -257,8 +257,8 @@ module ClaudeSwarm
257
257
  output: result.to_json, # Must be JSON string
258
258
  }
259
259
  rescue StandardError => e
260
- @executor.error("Responses API - Tool execution failed for #{tool_name}: #{e.message}")
261
- @executor.error(e.backtrace.join("\n"))
260
+ @executor.logger.error { "Responses API - Tool execution failed for #{tool_name}: #{e.message}" }
261
+ @executor.logger.error { e.backtrace.join("\n") }
262
262
 
263
263
  # Append error to session JSON
264
264
  append_to_session_json({
@@ -282,8 +282,8 @@ module ClaudeSwarm
282
282
  end
283
283
  end
284
284
 
285
- @executor.info("Responses API - Built conversation with #{conversation.size} function outputs")
286
- @executor.debug("Final conversation structure: #{JSON.pretty_generate(conversation)}")
285
+ @executor.logger.info { "Responses API - Built conversation with #{conversation.size} function outputs" }
286
+ @executor.logger.debug { "Final conversation structure: #{JSON.pretty_generate(conversation)}" }
287
287
  conversation
288
288
  end
289
289
 
@@ -302,13 +302,13 @@ module ClaudeSwarm
302
302
  tool_args = JSON.parse(tool_args_str)
303
303
 
304
304
  # Log tool execution
305
- @executor.info("Responses API - Executing tool: #{tool_name} with args: #{JSON.pretty_generate(tool_args)}")
305
+ @executor.logger.info { "Responses API - Executing tool: #{tool_name} with args: #{JSON.pretty_generate(tool_args)}" }
306
306
 
307
307
  # Execute tool via MCP
308
308
  result = @mcp_client.call_tool(tool_name, tool_args)
309
309
 
310
310
  # Log result
311
- @executor.info("Responses API - Tool result for #{tool_name}: #{result}")
311
+ @executor.logger.info { "Responses API - Tool result for #{tool_name}: #{result}" }
312
312
 
313
313
  # Add function output to conversation
314
314
  conversation << {
@@ -317,7 +317,7 @@ module ClaudeSwarm
317
317
  output: result.to_json, # Must be JSON string
318
318
  }
319
319
  rescue StandardError => e
320
- @executor.error("Responses API - Tool execution failed for #{tool_name}: #{e.message}")
320
+ @executor.logger.error { "Responses API - Tool execution failed for #{tool_name}: #{e.message}" }
321
321
 
322
322
  # Add error output to conversation
323
323
  conversation << {