claude_swarm 0.1.19 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.rubocop.yml +9 -63
- data/.rubocop_todo.yml +11 -0
- data/CHANGELOG.md +110 -0
- data/CLAUDE.md +64 -2
- data/README.md +190 -28
- data/Rakefile +1 -1
- data/examples/mixed-provider-swarm.yml +23 -0
- data/examples/monitoring-demo.yml +4 -4
- data/lib/claude_swarm/claude_code_executor.rb +7 -13
- data/lib/claude_swarm/claude_mcp_server.rb +26 -17
- data/lib/claude_swarm/cli.rb +384 -265
- data/lib/claude_swarm/commands/ps.rb +22 -24
- data/lib/claude_swarm/commands/show.rb +45 -63
- data/lib/claude_swarm/configuration.rb +137 -8
- data/lib/claude_swarm/mcp_generator.rb +39 -15
- data/lib/claude_swarm/openai/chat_completion.rb +264 -0
- data/lib/claude_swarm/openai/executor.rb +301 -0
- data/lib/claude_swarm/openai/responses.rb +338 -0
- data/lib/claude_swarm/orchestrator.rb +221 -45
- data/lib/claude_swarm/process_tracker.rb +7 -7
- data/lib/claude_swarm/session_cost_calculator.rb +93 -0
- data/lib/claude_swarm/session_path.rb +3 -5
- data/lib/claude_swarm/system_utils.rb +16 -0
- data/lib/claude_swarm/templates/generation_prompt.md.erb +230 -0
- data/lib/claude_swarm/tools/reset_session_tool.rb +24 -0
- data/lib/claude_swarm/tools/session_info_tool.rb +24 -0
- data/lib/claude_swarm/tools/task_tool.rb +43 -0
- data/lib/claude_swarm/version.rb +1 -1
- data/lib/claude_swarm/worktree_manager.rb +145 -48
- data/lib/claude_swarm.rb +34 -12
- data/llms.txt +2 -2
- data/single.yml +482 -6
- data/team.yml +344 -0
- metadata +65 -14
- data/claude-swarm.yml +0 -64
- data/lib/claude_swarm/reset_session_tool.rb +0 -22
- data/lib/claude_swarm/session_info_tool.rb +0 -22
- data/lib/claude_swarm/task_tool.rb +0 -39
- /data/{example → examples}/claude-swarm.yml +0 -0
- /data/{example → examples}/microservices-team.yml +0 -0
- /data/{example → examples}/session-restoration-demo.yml +0 -0
- /data/{example → examples}/test-generation.yml +0 -0
@@ -0,0 +1,338 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module ClaudeSwarm
|
4
|
+
module OpenAI
|
5
|
+
class Responses
|
6
|
+
MAX_TURNS_WITH_TOOLS = 100_000 # virtually infinite
|
7
|
+
|
8
|
+
def initialize(openai_client:, mcp_client:, available_tools:, logger:, instance_name:, model:, temperature: nil, reasoning_effort: nil)
|
9
|
+
@openai_client = openai_client
|
10
|
+
@mcp_client = mcp_client
|
11
|
+
@available_tools = available_tools
|
12
|
+
@executor = logger # This is actually the executor, not a logger
|
13
|
+
@instance_name = instance_name
|
14
|
+
@model = model
|
15
|
+
@temperature = temperature
|
16
|
+
@reasoning_effort = reasoning_effort
|
17
|
+
@system_prompt = nil
|
18
|
+
end
|
19
|
+
|
20
|
+
def execute(prompt, options = {})
|
21
|
+
# Store system prompt for first call
|
22
|
+
@system_prompt = options[:system_prompt] if options[:system_prompt]
|
23
|
+
|
24
|
+
# Start with initial prompt
|
25
|
+
initial_input = prompt
|
26
|
+
|
27
|
+
# Process with recursive tool handling - start with empty conversation
|
28
|
+
process_responses_api(initial_input, [], nil)
|
29
|
+
end
|
30
|
+
|
31
|
+
def reset_session
|
32
|
+
@system_prompt = nil
|
33
|
+
end
|
34
|
+
|
35
|
+
private
|
36
|
+
|
37
|
+
def process_responses_api(input, conversation_array, previous_response_id, depth = 0)
|
38
|
+
# Prevent infinite recursion
|
39
|
+
if depth > MAX_TURNS_WITH_TOOLS
|
40
|
+
@executor.error("Maximum recursion depth reached in tool execution")
|
41
|
+
return "Error: Maximum tool call depth exceeded"
|
42
|
+
end
|
43
|
+
|
44
|
+
# Build parameters
|
45
|
+
parameters = {
|
46
|
+
model: @model,
|
47
|
+
}
|
48
|
+
|
49
|
+
# Only add temperature for non-o-series models
|
50
|
+
# O-series models don't support temperature parameter
|
51
|
+
unless @model.match?(ClaudeSwarm::Configuration::O_SERIES_MODEL_PATTERN)
|
52
|
+
parameters[:temperature] = @temperature
|
53
|
+
end
|
54
|
+
|
55
|
+
# Only add reasoning effort for o-series models
|
56
|
+
# reasoning is only supported by o-series models: o1, o1 Preview, o1-mini, o1-pro, o3, o3-mini, o3-pro, o3-deep-research, o4-mini, o4-mini-deep-research, etc.
|
57
|
+
if @reasoning_effort && @model.match?(ClaudeSwarm::Configuration::O_SERIES_MODEL_PATTERN)
|
58
|
+
parameters[:reasoning] = { effort: @reasoning_effort }
|
59
|
+
end
|
60
|
+
|
61
|
+
# On first call, use string input (can include system prompt)
|
62
|
+
# On subsequent calls with function results, use array input
|
63
|
+
if conversation_array.empty?
|
64
|
+
# Initial call - string input
|
65
|
+
parameters[:input] = if depth.zero? && @system_prompt
|
66
|
+
"#{@system_prompt}\n\n#{input}"
|
67
|
+
else
|
68
|
+
input
|
69
|
+
end
|
70
|
+
else
|
71
|
+
# Follow-up call with conversation array (function calls + outputs)
|
72
|
+
parameters[:input] = conversation_array
|
73
|
+
|
74
|
+
# Log conversation array to debug duplicates
|
75
|
+
@executor.info("Conversation array size: #{conversation_array.size}")
|
76
|
+
conversation_ids = conversation_array.map do |item|
|
77
|
+
item["call_id"] || item["id"] || "no-id-#{item["type"]}"
|
78
|
+
end.compact
|
79
|
+
@executor.info("Conversation item IDs: #{conversation_ids.inspect}")
|
80
|
+
end
|
81
|
+
|
82
|
+
# Add previous response ID for conversation continuity
|
83
|
+
parameters[:previous_response_id] = previous_response_id if previous_response_id
|
84
|
+
|
85
|
+
# Add tools if available
|
86
|
+
if @available_tools&.any?
|
87
|
+
# Convert tools to responses API format
|
88
|
+
parameters[:tools] = @available_tools.map do |tool|
|
89
|
+
{
|
90
|
+
"type" => "function",
|
91
|
+
"name" => tool.name,
|
92
|
+
"description" => tool.description,
|
93
|
+
"parameters" => tool.schema || {},
|
94
|
+
}
|
95
|
+
end
|
96
|
+
@executor.info("Available tools for responses API: #{parameters[:tools].map { |t| t["name"] }.join(", ")}")
|
97
|
+
end
|
98
|
+
|
99
|
+
# Log the request parameters
|
100
|
+
@executor.info("Responses API Request (depth=#{depth}): #{JSON.pretty_generate(parameters)}")
|
101
|
+
|
102
|
+
# Append to session JSON
|
103
|
+
append_to_session_json({
|
104
|
+
type: "openai_request",
|
105
|
+
api: "responses",
|
106
|
+
depth: depth,
|
107
|
+
parameters: parameters,
|
108
|
+
})
|
109
|
+
|
110
|
+
# Make the API call without streaming
|
111
|
+
begin
|
112
|
+
response = @openai_client.responses.create(parameters: parameters)
|
113
|
+
rescue StandardError => e
|
114
|
+
@executor.error("Responses API error: #{e.class} - #{e.message}")
|
115
|
+
@executor.error("Request parameters: #{JSON.pretty_generate(parameters)}")
|
116
|
+
|
117
|
+
# Try to extract and log the response body for better debugging
|
118
|
+
if e.respond_to?(:response)
|
119
|
+
begin
|
120
|
+
error_body = e.response[:body]
|
121
|
+
@executor.error("Error response body: #{error_body}")
|
122
|
+
rescue StandardError => parse_error
|
123
|
+
@executor.error("Could not parse error response: #{parse_error.message}")
|
124
|
+
end
|
125
|
+
end
|
126
|
+
|
127
|
+
# Log error to session JSON
|
128
|
+
append_to_session_json({
|
129
|
+
type: "openai_error",
|
130
|
+
api: "responses",
|
131
|
+
error: {
|
132
|
+
class: e.class.to_s,
|
133
|
+
message: e.message,
|
134
|
+
response_body: e.respond_to?(:response) ? e.response[:body] : nil,
|
135
|
+
backtrace: e.backtrace.first(5),
|
136
|
+
},
|
137
|
+
})
|
138
|
+
|
139
|
+
return "Error calling OpenAI responses API: #{e.message}"
|
140
|
+
end
|
141
|
+
|
142
|
+
# Log the full response
|
143
|
+
@executor.info("Responses API Full Response (depth=#{depth}): #{JSON.pretty_generate(response)}")
|
144
|
+
|
145
|
+
# Append to session JSON
|
146
|
+
append_to_session_json({
|
147
|
+
type: "openai_response",
|
148
|
+
api: "responses",
|
149
|
+
depth: depth,
|
150
|
+
response: response,
|
151
|
+
})
|
152
|
+
|
153
|
+
# Extract response details
|
154
|
+
response_id = response["id"]
|
155
|
+
|
156
|
+
# Handle response based on output structure
|
157
|
+
output = response["output"]
|
158
|
+
|
159
|
+
if output.nil?
|
160
|
+
@executor.error("No output in response")
|
161
|
+
return "Error: No output in OpenAI response"
|
162
|
+
end
|
163
|
+
|
164
|
+
# Check if output is an array (as per documentation)
|
165
|
+
if output.is_a?(Array) && !output.empty?
|
166
|
+
# Check if there are function calls
|
167
|
+
function_calls = output.select { |item| item["type"] == "function_call" }
|
168
|
+
|
169
|
+
if function_calls.any?
|
170
|
+
# Check if we already have a conversation going
|
171
|
+
if conversation_array.empty?
|
172
|
+
# First depth - build new conversation
|
173
|
+
new_conversation = build_conversation_with_outputs(function_calls)
|
174
|
+
else
|
175
|
+
# Subsequent depth - append to existing conversation
|
176
|
+
# Don't re-add function calls, just add the new ones and their outputs
|
177
|
+
new_conversation = conversation_array.dup
|
178
|
+
append_new_outputs(function_calls, new_conversation)
|
179
|
+
end
|
180
|
+
|
181
|
+
# Recursively process with updated conversation
|
182
|
+
process_responses_api(nil, new_conversation, response_id, depth + 1)
|
183
|
+
else
|
184
|
+
# Look for text response
|
185
|
+
extract_text_response(output)
|
186
|
+
end
|
187
|
+
else
|
188
|
+
@executor.error("Unexpected output format: #{output.inspect}")
|
189
|
+
"Error: Unexpected response format"
|
190
|
+
end
|
191
|
+
end
|
192
|
+
|
193
|
+
def extract_text_response(output)
|
194
|
+
text_output = output.find { |item| item["content"] }
|
195
|
+
return "" unless text_output && text_output["content"].is_a?(Array)
|
196
|
+
|
197
|
+
text_content = text_output["content"].find { |item| item["text"] }
|
198
|
+
text_content ? text_content["text"] : ""
|
199
|
+
end
|
200
|
+
|
201
|
+
def build_conversation_with_outputs(function_calls)
|
202
|
+
# Log tool calls
|
203
|
+
@executor.info("Responses API - Handling #{function_calls.size} function calls")
|
204
|
+
|
205
|
+
# Log IDs to check for duplicates
|
206
|
+
call_ids = function_calls.map { |fc| fc["call_id"] || fc["id"] }
|
207
|
+
@executor.info("Function call IDs: #{call_ids.inspect}")
|
208
|
+
@executor.warn("WARNING: Duplicate function call IDs detected!") if call_ids.size != call_ids.uniq.size
|
209
|
+
|
210
|
+
# Append to session JSON
|
211
|
+
append_to_session_json({
|
212
|
+
type: "tool_calls",
|
213
|
+
api: "responses",
|
214
|
+
tool_calls: function_calls,
|
215
|
+
})
|
216
|
+
|
217
|
+
# Build conversation array with function outputs only
|
218
|
+
# The API already knows about the function calls from the previous response
|
219
|
+
conversation = []
|
220
|
+
|
221
|
+
# Then execute tools and add outputs
|
222
|
+
function_calls.each do |function_call|
|
223
|
+
tool_name = function_call["name"]
|
224
|
+
tool_args_str = function_call["arguments"]
|
225
|
+
# Use the call_id field for matching with function outputs
|
226
|
+
call_id = function_call["call_id"]
|
227
|
+
|
228
|
+
# Log both IDs to debug
|
229
|
+
@executor.info("Function call has id=#{function_call["id"]}, call_id=#{function_call["call_id"]}")
|
230
|
+
|
231
|
+
begin
|
232
|
+
# Parse arguments
|
233
|
+
tool_args = JSON.parse(tool_args_str)
|
234
|
+
|
235
|
+
# Log tool execution
|
236
|
+
@executor.info("Responses API - Executing tool: #{tool_name} with args: #{JSON.pretty_generate(tool_args)}")
|
237
|
+
|
238
|
+
# Execute tool via MCP
|
239
|
+
result = @mcp_client.call_tool(tool_name, tool_args)
|
240
|
+
|
241
|
+
# Log result
|
242
|
+
@executor.info("Responses API - Tool result for #{tool_name}: #{result}")
|
243
|
+
|
244
|
+
# Append to session JSON
|
245
|
+
append_to_session_json({
|
246
|
+
type: "tool_execution",
|
247
|
+
api: "responses",
|
248
|
+
tool_name: tool_name,
|
249
|
+
arguments: tool_args,
|
250
|
+
result: result.to_s,
|
251
|
+
})
|
252
|
+
|
253
|
+
# Add function output to conversation
|
254
|
+
conversation << {
|
255
|
+
type: "function_call_output",
|
256
|
+
call_id: call_id,
|
257
|
+
output: result.to_json, # Must be JSON string
|
258
|
+
}
|
259
|
+
rescue StandardError => e
|
260
|
+
@executor.error("Responses API - Tool execution failed for #{tool_name}: #{e.message}")
|
261
|
+
@executor.error(e.backtrace.join("\n"))
|
262
|
+
|
263
|
+
# Append error to session JSON
|
264
|
+
append_to_session_json({
|
265
|
+
type: "tool_error",
|
266
|
+
api: "responses",
|
267
|
+
tool_name: tool_name,
|
268
|
+
arguments: tool_args_str,
|
269
|
+
error: {
|
270
|
+
class: e.class.to_s,
|
271
|
+
message: e.message,
|
272
|
+
backtrace: e.backtrace.first(5),
|
273
|
+
},
|
274
|
+
})
|
275
|
+
|
276
|
+
# Add error output to conversation
|
277
|
+
conversation << {
|
278
|
+
type: "function_call_output",
|
279
|
+
call_id: call_id,
|
280
|
+
output: { error: e.message }.to_json,
|
281
|
+
}
|
282
|
+
end
|
283
|
+
end
|
284
|
+
|
285
|
+
@executor.info("Responses API - Built conversation with #{conversation.size} function outputs")
|
286
|
+
@executor.debug("Final conversation structure: #{JSON.pretty_generate(conversation)}")
|
287
|
+
conversation
|
288
|
+
end
|
289
|
+
|
290
|
+
def append_new_outputs(function_calls, conversation)
|
291
|
+
# Only add the new function outputs
|
292
|
+
# Don't add function calls - the API already knows about them
|
293
|
+
|
294
|
+
function_calls.each do |fc|
|
295
|
+
# Execute and add output only
|
296
|
+
tool_name = fc["name"]
|
297
|
+
tool_args_str = fc["arguments"]
|
298
|
+
call_id = fc["call_id"]
|
299
|
+
|
300
|
+
begin
|
301
|
+
# Parse arguments
|
302
|
+
tool_args = JSON.parse(tool_args_str)
|
303
|
+
|
304
|
+
# Log tool execution
|
305
|
+
@executor.info("Responses API - Executing tool: #{tool_name} with args: #{JSON.pretty_generate(tool_args)}")
|
306
|
+
|
307
|
+
# Execute tool via MCP
|
308
|
+
result = @mcp_client.call_tool(tool_name, tool_args)
|
309
|
+
|
310
|
+
# Log result
|
311
|
+
@executor.info("Responses API - Tool result for #{tool_name}: #{result}")
|
312
|
+
|
313
|
+
# Add function output to conversation
|
314
|
+
conversation << {
|
315
|
+
type: "function_call_output",
|
316
|
+
call_id: call_id,
|
317
|
+
output: result.to_json, # Must be JSON string
|
318
|
+
}
|
319
|
+
rescue StandardError => e
|
320
|
+
@executor.error("Responses API - Tool execution failed for #{tool_name}: #{e.message}")
|
321
|
+
|
322
|
+
# Add error output to conversation
|
323
|
+
conversation << {
|
324
|
+
type: "function_call_output",
|
325
|
+
call_id: call_id,
|
326
|
+
output: { error: e.message }.to_json,
|
327
|
+
}
|
328
|
+
end
|
329
|
+
end
|
330
|
+
end
|
331
|
+
|
332
|
+
def append_to_session_json(event)
|
333
|
+
# Delegate to the executor's log method
|
334
|
+
@executor.log(event) if @executor.respond_to?(:log)
|
335
|
+
end
|
336
|
+
end
|
337
|
+
end
|
338
|
+
end
|