claude_swarm 0.1.20 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. checksums.yaml +4 -4
  2. data/.rubocop.yml +9 -66
  3. data/.rubocop_todo.yml +11 -0
  4. data/CHANGELOG.md +106 -0
  5. data/CLAUDE.md +61 -0
  6. data/README.md +174 -16
  7. data/Rakefile +1 -1
  8. data/examples/mixed-provider-swarm.yml +23 -0
  9. data/lib/claude_swarm/claude_code_executor.rb +7 -12
  10. data/lib/claude_swarm/claude_mcp_server.rb +26 -12
  11. data/lib/claude_swarm/cli.rb +293 -165
  12. data/lib/claude_swarm/commands/ps.rb +22 -24
  13. data/lib/claude_swarm/commands/show.rb +45 -63
  14. data/lib/claude_swarm/configuration.rb +161 -8
  15. data/lib/claude_swarm/mcp_generator.rb +39 -14
  16. data/lib/claude_swarm/openai/chat_completion.rb +264 -0
  17. data/lib/claude_swarm/openai/executor.rb +301 -0
  18. data/lib/claude_swarm/openai/responses.rb +338 -0
  19. data/lib/claude_swarm/orchestrator.rb +205 -39
  20. data/lib/claude_swarm/process_tracker.rb +7 -7
  21. data/lib/claude_swarm/session_cost_calculator.rb +93 -0
  22. data/lib/claude_swarm/session_path.rb +3 -5
  23. data/lib/claude_swarm/system_utils.rb +1 -3
  24. data/lib/claude_swarm/tools/reset_session_tool.rb +24 -0
  25. data/lib/claude_swarm/tools/session_info_tool.rb +24 -0
  26. data/lib/claude_swarm/tools/task_tool.rb +43 -0
  27. data/lib/claude_swarm/version.rb +1 -1
  28. data/lib/claude_swarm/worktree_manager.rb +39 -22
  29. data/lib/claude_swarm.rb +23 -10
  30. data/single.yml +481 -6
  31. metadata +54 -14
  32. data/claude-swarm.yml +0 -64
  33. data/lib/claude_swarm/reset_session_tool.rb +0 -22
  34. data/lib/claude_swarm/session_info_tool.rb +0 -22
  35. data/lib/claude_swarm/task_tool.rb +0 -39
  36. /data/{example → examples}/claude-swarm.yml +0 -0
  37. /data/{example → examples}/microservices-team.yml +0 -0
  38. /data/{example → examples}/session-restoration-demo.yml +0 -0
  39. /data/{example → examples}/test-generation.yml +0 -0
@@ -0,0 +1,264 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ClaudeSwarm
4
+ module OpenAI
5
+ class ChatCompletion
6
+ MAX_TURNS_WITH_TOOLS = 100_000 # virtually infinite
7
+
8
+ def initialize(openai_client:, mcp_client:, available_tools:, logger:, instance_name:, model:, temperature: nil, reasoning_effort: nil)
9
+ @openai_client = openai_client
10
+ @mcp_client = mcp_client
11
+ @available_tools = available_tools
12
+ @executor = logger # This is actually the executor, not a logger
13
+ @instance_name = instance_name
14
+ @model = model
15
+ @temperature = temperature
16
+ @reasoning_effort = reasoning_effort
17
+ @conversation_messages = []
18
+ end
19
+
20
+ def execute(prompt, options = {})
21
+ # Build messages array
22
+ messages = build_messages(prompt, options)
23
+
24
+ # Process chat with recursive tool handling
25
+ result = process_chat_completion(messages)
26
+
27
+ # Update conversation state
28
+ @conversation_messages = messages
29
+
30
+ result
31
+ end
32
+
33
+ def reset_session
34
+ @conversation_messages = []
35
+ end
36
+
37
+ private
38
+
39
+ def build_messages(prompt, options)
40
+ messages = []
41
+
42
+ # Add system prompt if provided
43
+ system_prompt = options[:system_prompt]
44
+ if system_prompt && @conversation_messages.empty?
45
+ messages << { role: "system", content: system_prompt }
46
+ elsif !@conversation_messages.empty?
47
+ # Use existing conversation
48
+ messages = @conversation_messages.dup
49
+ end
50
+
51
+ # Add user message
52
+ messages << { role: "user", content: prompt }
53
+
54
+ messages
55
+ end
56
+
57
+ def process_chat_completion(messages, depth = 0)
58
+ # Prevent infinite recursion
59
+ if depth > MAX_TURNS_WITH_TOOLS
60
+ @executor.error("Maximum recursion depth reached in tool execution")
61
+ return "Error: Maximum tool call depth exceeded"
62
+ end
63
+
64
+ # Build parameters
65
+ parameters = {
66
+ model: @model,
67
+ messages: messages,
68
+ }
69
+
70
+ # Only add temperature for non-o-series models
71
+ # O-series models don't support temperature parameter
72
+ if @temperature && !@model.match?(ClaudeSwarm::Configuration::O_SERIES_MODEL_PATTERN)
73
+ parameters[:temperature] = @temperature
74
+ end
75
+
76
+ # Only add reasoning_effort for o-series models
77
+ # reasoning_effort is only supported by o-series models: o1, o1 Preview, o1-mini, o1-pro, o3, o3-mini, o3-pro, o3-deep-research, o4-mini, o4-mini-deep-research, etc.
78
+ if @reasoning_effort && @model.match?(ClaudeSwarm::Configuration::O_SERIES_MODEL_PATTERN)
79
+ parameters[:reasoning_effort] = @reasoning_effort
80
+ end
81
+
82
+ # Add tools if available
83
+ parameters[:tools] = @mcp_client.to_openai_tools if @available_tools&.any? && @mcp_client
84
+
85
+ # Log the request parameters
86
+ @executor.info("Chat API Request (depth=#{depth}): #{JSON.pretty_generate(parameters)}")
87
+
88
+ # Append to session JSON
89
+ append_to_session_json({
90
+ type: "openai_request",
91
+ api: "chat",
92
+ depth: depth,
93
+ parameters: parameters,
94
+ })
95
+
96
+ # Make the API call without streaming
97
+ begin
98
+ response = @openai_client.chat(parameters: parameters)
99
+ rescue StandardError => e
100
+ @executor.error("Chat API error: #{e.class} - #{e.message}")
101
+ @executor.error("Request parameters: #{JSON.pretty_generate(parameters)}")
102
+
103
+ # Try to extract and log the response body for better debugging
104
+ if e.respond_to?(:response)
105
+ begin
106
+ error_body = e.response[:body]
107
+ @executor.error("Error response body: #{error_body}")
108
+ rescue StandardError => parse_error
109
+ @executor.error("Could not parse error response: #{parse_error.message}")
110
+ end
111
+ end
112
+
113
+ # Log error to session JSON
114
+ append_to_session_json({
115
+ type: "openai_error",
116
+ api: "chat",
117
+ depth: depth,
118
+ error: {
119
+ class: e.class.to_s,
120
+ message: e.message,
121
+ response_body: e.respond_to?(:response) ? e.response[:body] : nil,
122
+ backtrace: e.backtrace.first(5),
123
+ },
124
+ })
125
+
126
+ return "Error calling OpenAI chat API: #{e.message}"
127
+ end
128
+
129
+ # Log the response
130
+ @executor.info("Chat API Response (depth=#{depth}): #{JSON.pretty_generate(response)}")
131
+
132
+ # Append to session JSON
133
+ append_to_session_json({
134
+ type: "openai_response",
135
+ api: "chat",
136
+ depth: depth,
137
+ response: response,
138
+ })
139
+
140
+ # Extract the message from the response
141
+ message = response.dig("choices", 0, "message")
142
+
143
+ if message.nil?
144
+ @executor.error("No message in response: #{response.inspect}")
145
+ return "Error: No response from OpenAI"
146
+ end
147
+
148
+ # Check if there are tool calls
149
+ if message["tool_calls"]
150
+ # Add the assistant message with tool calls
151
+ messages << {
152
+ role: "assistant",
153
+ content: nil,
154
+ tool_calls: message["tool_calls"],
155
+ }
156
+
157
+ # Execute tools and collect results
158
+ execute_and_append_tool_results(message["tool_calls"], messages)
159
+
160
+ # Recursively process the next response
161
+ process_chat_completion(messages, depth + 1)
162
+ else
163
+ # Regular text response - this is the final response
164
+ response_text = message["content"] || ""
165
+ messages << { role: "assistant", content: response_text }
166
+ response_text
167
+ end
168
+ end
169
+
170
+ def execute_and_append_tool_results(tool_calls, messages)
171
+ # Log tool calls
172
+ @executor.info("Executing tool calls: #{JSON.pretty_generate(tool_calls)}")
173
+
174
+ # Append to session JSON
175
+ append_to_session_json({
176
+ type: "tool_calls",
177
+ api: "chat",
178
+ tool_calls: tool_calls,
179
+ })
180
+
181
+ # Execute tool calls in parallel threads
182
+ threads = tool_calls.map do |tool_call|
183
+ Thread.new do
184
+ tool_name = tool_call.dig("function", "name")
185
+ tool_args_str = tool_call.dig("function", "arguments")
186
+
187
+ begin
188
+ # Parse arguments
189
+ tool_args = tool_args_str.is_a?(String) ? JSON.parse(tool_args_str) : tool_args_str
190
+
191
+ # Log tool execution
192
+ @executor.info("Executing tool: #{tool_name} with args: #{JSON.pretty_generate(tool_args)}")
193
+
194
+ # Execute tool via MCP
195
+ result = @mcp_client.call_tool(tool_name, tool_args)
196
+
197
+ # Log result
198
+ @executor.info("Tool result for #{tool_name}: #{result}")
199
+
200
+ # Append to session JSON
201
+ append_to_session_json({
202
+ type: "tool_execution",
203
+ tool_name: tool_name,
204
+ arguments: tool_args,
205
+ result: result.to_s,
206
+ })
207
+
208
+ # Return success result
209
+ {
210
+ success: true,
211
+ tool_call_id: tool_call["id"],
212
+ role: "tool",
213
+ name: tool_name,
214
+ content: result.to_s,
215
+ }
216
+ rescue StandardError => e
217
+ @executor.error("Tool execution failed for #{tool_name}: #{e.message}")
218
+ @executor.error(e.backtrace.join("\n"))
219
+
220
+ # Append error to session JSON
221
+ append_to_session_json({
222
+ type: "tool_error",
223
+ tool_name: tool_name,
224
+ arguments: tool_args,
225
+ error: {
226
+ class: e.class.to_s,
227
+ message: e.message,
228
+ backtrace: e.backtrace.first(5),
229
+ },
230
+ })
231
+
232
+ # Return error result
233
+ {
234
+ success: false,
235
+ tool_call_id: tool_call["id"],
236
+ role: "tool",
237
+ name: tool_name,
238
+ content: "Error: #{e.message}",
239
+ }
240
+ end
241
+ end
242
+ end
243
+
244
+ # Collect results from all threads
245
+ tool_results = threads.map(&:value)
246
+
247
+ # Add all tool results to messages
248
+ tool_results.each do |result|
249
+ messages << {
250
+ tool_call_id: result[:tool_call_id],
251
+ role: result[:role],
252
+ name: result[:name],
253
+ content: result[:content],
254
+ }
255
+ end
256
+ end
257
+
258
+ def append_to_session_json(event)
259
+ # Delegate to the executor's log method
260
+ @executor.log(event) if @executor.respond_to?(:log)
261
+ end
262
+ end
263
+ end
264
+ end
@@ -0,0 +1,301 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ClaudeSwarm
4
+ module OpenAI
5
+ class Executor
6
+ attr_reader :session_id, :last_response, :working_directory, :logger, :session_path
7
+
8
+ def initialize(working_directory: Dir.pwd, model: nil, mcp_config: nil, vibe: false,
9
+ instance_name: nil, instance_id: nil, calling_instance: nil, calling_instance_id: nil,
10
+ claude_session_id: nil, additional_directories: [],
11
+ temperature: nil, api_version: "chat_completion", openai_token_env: "OPENAI_API_KEY",
12
+ base_url: nil, reasoning_effort: nil)
13
+ @working_directory = working_directory
14
+ @additional_directories = additional_directories
15
+ @model = model
16
+ @mcp_config = mcp_config
17
+ @vibe = vibe
18
+ @session_id = claude_session_id
19
+ @last_response = nil
20
+ @instance_name = instance_name
21
+ @instance_id = instance_id
22
+ @calling_instance = calling_instance
23
+ @calling_instance_id = calling_instance_id
24
+ @temperature = temperature
25
+ @api_version = api_version
26
+ @base_url = base_url
27
+ @reasoning_effort = reasoning_effort
28
+
29
+ # Conversation state for maintaining context
30
+ @conversation_messages = []
31
+ @previous_response_id = nil
32
+
33
+ # Setup logging first
34
+ setup_logging
35
+
36
+ # Setup OpenAI client
37
+ setup_openai_client(openai_token_env)
38
+
39
+ # Setup MCP client for tools
40
+ setup_mcp_client
41
+
42
+ # Create API handlers
43
+ @chat_completion_handler = OpenAI::ChatCompletion.new(
44
+ openai_client: @openai_client,
45
+ mcp_client: @mcp_client,
46
+ available_tools: @available_tools,
47
+ logger: self,
48
+ instance_name: @instance_name,
49
+ model: @model,
50
+ temperature: @temperature,
51
+ reasoning_effort: @reasoning_effort,
52
+ )
53
+
54
+ @responses_handler = OpenAI::Responses.new(
55
+ openai_client: @openai_client,
56
+ mcp_client: @mcp_client,
57
+ available_tools: @available_tools,
58
+ logger: self,
59
+ instance_name: @instance_name,
60
+ model: @model,
61
+ temperature: @temperature,
62
+ reasoning_effort: @reasoning_effort,
63
+ )
64
+ end
65
+
66
+ def execute(prompt, options = {})
67
+ # Log the request
68
+ log_request(prompt)
69
+
70
+ # Start timing
71
+ start_time = Time.now
72
+
73
+ # Execute based on API version
74
+ result = if @api_version == "responses"
75
+ @responses_handler.execute(prompt, options)
76
+ else
77
+ @chat_completion_handler.execute(prompt, options)
78
+ end
79
+
80
+ # Calculate duration
81
+ duration_ms = ((Time.now - start_time) * 1000).round
82
+
83
+ # Format response similar to ClaudeCodeExecutor
84
+ response = {
85
+ "type" => "result",
86
+ "result" => result,
87
+ "duration_ms" => duration_ms,
88
+ "total_cost" => calculate_cost(result),
89
+ "session_id" => @session_id,
90
+ }
91
+
92
+ log_response(response)
93
+
94
+ @last_response = response
95
+ response
96
+ rescue StandardError => e
97
+ @logger.error("Unexpected error for #{@instance_name}: #{e.class} - #{e.message}")
98
+ @logger.error("Backtrace: #{e.backtrace.join("\n")}")
99
+ raise
100
+ end
101
+
102
+ def reset_session
103
+ @session_id = nil
104
+ @last_response = nil
105
+ @chat_completion_handler&.reset_session
106
+ @responses_handler&.reset_session
107
+ end
108
+
109
+ def has_session?
110
+ !@session_id.nil?
111
+ end
112
+
113
+ # Delegate logger methods for the API handlers
114
+ def info(message)
115
+ @logger.info(message)
116
+ end
117
+
118
+ def error(message)
119
+ @logger.error(message)
120
+ end
121
+
122
+ def warn(message)
123
+ @logger.warn(message)
124
+ end
125
+
126
+ def debug(message)
127
+ @logger.debug(message)
128
+ end
129
+
130
+ # Session JSON logger for the API handlers
131
+ def session_json_logger
132
+ self
133
+ end
134
+
135
+ def log(event)
136
+ append_to_session_json(event)
137
+ end
138
+
139
+ private
140
+
141
+ def setup_openai_client(token_env)
142
+ config = {
143
+ access_token: ENV.fetch(token_env),
144
+ log_errors: true,
145
+ request_timeout: 1800, # 30 minutes
146
+ }
147
+ config[:uri_base] = @base_url if @base_url
148
+
149
+ @openai_client = ::OpenAI::Client.new(config)
150
+ rescue KeyError
151
+ raise ExecutionError, "OpenAI API key not found in environment variable: #{token_env}"
152
+ end
153
+
154
+ def setup_mcp_client
155
+ return unless @mcp_config && File.exist?(@mcp_config)
156
+
157
+ # Read MCP config to find MCP servers
158
+ mcp_data = JSON.parse(File.read(@mcp_config))
159
+
160
+ # Create MCP client with all MCP servers from the config
161
+ if mcp_data["mcpServers"] && !mcp_data["mcpServers"].empty?
162
+ mcp_configs = []
163
+
164
+ mcp_data["mcpServers"].each do |name, server_config|
165
+ case server_config["type"]
166
+ when "stdio"
167
+ # Combine command and args into a single array
168
+ command_array = [server_config["command"]]
169
+ command_array.concat(server_config["args"] || [])
170
+
171
+ mcp_configs << MCPClient.stdio_config(
172
+ command: command_array,
173
+ name: name,
174
+ )
175
+ when "sse"
176
+ @logger.warn("SSE MCP servers not yet supported for OpenAI instances: #{name}")
177
+ # TODO: Add SSE support when available in ruby-mcp-client
178
+ end
179
+ end
180
+
181
+ if mcp_configs.any?
182
+ @mcp_client = MCPClient.create_client(
183
+ mcp_server_configs: mcp_configs,
184
+ logger: @logger,
185
+ )
186
+
187
+ # List available tools from all MCP servers
188
+ begin
189
+ @available_tools = @mcp_client.list_tools
190
+ @logger.info("Loaded #{@available_tools.size} tools from #{mcp_configs.size} MCP server(s)")
191
+ rescue StandardError => e
192
+ @logger.error("Failed to load MCP tools: #{e.message}")
193
+ @available_tools = []
194
+ end
195
+ end
196
+ end
197
+ rescue StandardError => e
198
+ @logger.error("Failed to setup MCP client: #{e.message}")
199
+ @mcp_client = nil
200
+ @available_tools = []
201
+ end
202
+
203
+ def calculate_cost(_result)
204
+ # Simplified cost calculation
205
+ # In reality, we'd need to track token usage
206
+ "$0.00"
207
+ end
208
+
209
+ def setup_logging
210
+ # Use session path from environment (required)
211
+ @session_path = SessionPath.from_env
212
+ SessionPath.ensure_directory(@session_path)
213
+
214
+ # Create logger with session.log filename
215
+ log_filename = "session.log"
216
+ log_path = File.join(@session_path, log_filename)
217
+ @logger = Logger.new(log_path)
218
+ @logger.level = Logger::INFO
219
+
220
+ # Custom formatter for better readability
221
+ @logger.formatter = proc do |severity, datetime, _progname, msg|
222
+ "[#{datetime.strftime("%Y-%m-%d %H:%M:%S.%L")}] [#{severity}] #{msg}\n"
223
+ end
224
+
225
+ return unless @instance_name
226
+
227
+ instance_info = @instance_name
228
+ instance_info += " (#{@instance_id})" if @instance_id
229
+ @logger.info("Started OpenAI executor for instance: #{instance_info}")
230
+ end
231
+
232
+ def log_request(prompt)
233
+ caller_info = @calling_instance
234
+ caller_info += " (#{@calling_instance_id})" if @calling_instance_id
235
+ instance_info = @instance_name
236
+ instance_info += " (#{@instance_id})" if @instance_id
237
+ @logger.info("#{caller_info} -> #{instance_info}: \n---\n#{prompt}\n---")
238
+
239
+ # Build event hash for JSON logging
240
+ event = {
241
+ type: "request",
242
+ from_instance: @calling_instance,
243
+ from_instance_id: @calling_instance_id,
244
+ to_instance: @instance_name,
245
+ to_instance_id: @instance_id,
246
+ prompt: prompt,
247
+ timestamp: Time.now.iso8601,
248
+ }
249
+
250
+ append_to_session_json(event)
251
+ end
252
+
253
+ def log_response(response)
254
+ caller_info = @calling_instance
255
+ caller_info += " (#{@calling_instance_id})" if @calling_instance_id
256
+ instance_info = @instance_name
257
+ instance_info += " (#{@instance_id})" if @instance_id
258
+ @logger.info(
259
+ "(#{response["total_cost"]} - #{response["duration_ms"]}ms) #{instance_info} -> #{caller_info}: \n---\n#{response["result"]}\n---",
260
+ )
261
+ end
262
+
263
+ def log_streaming_content(content)
264
+ # Log streaming content similar to ClaudeCodeExecutor
265
+ instance_info = @instance_name
266
+ instance_info += " (#{@instance_id})" if @instance_id
267
+ @logger.debug("#{instance_info} streaming: #{content}")
268
+ end
269
+
270
+ def append_to_session_json(event)
271
+ json_filename = "session.log.json"
272
+ json_path = File.join(@session_path, json_filename)
273
+
274
+ # Use file locking to ensure thread-safe writes
275
+ File.open(json_path, File::WRONLY | File::APPEND | File::CREAT) do |file|
276
+ file.flock(File::LOCK_EX)
277
+
278
+ # Create entry with metadata
279
+ entry = {
280
+ instance: @instance_name,
281
+ instance_id: @instance_id,
282
+ calling_instance: @calling_instance,
283
+ calling_instance_id: @calling_instance_id,
284
+ timestamp: Time.now.iso8601,
285
+ event: event,
286
+ }
287
+
288
+ # Write as single line JSON (JSONL format)
289
+ file.puts(entry.to_json)
290
+
291
+ file.flock(File::LOCK_UN)
292
+ end
293
+ rescue StandardError => e
294
+ @logger.error("Failed to append to session JSON: #{e.message}")
295
+ raise
296
+ end
297
+
298
+ class ExecutionError < StandardError; end
299
+ end
300
+ end
301
+ end