swarm_memory 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE +21 -0
- data/lib/claude_swarm/base_executor.rb +133 -0
- data/lib/claude_swarm/claude_code_executor.rb +349 -0
- data/lib/claude_swarm/claude_mcp_server.rb +77 -0
- data/lib/claude_swarm/cli.rb +712 -0
- data/lib/claude_swarm/commands/ps.rb +216 -0
- data/lib/claude_swarm/commands/show.rb +139 -0
- data/lib/claude_swarm/configuration.rb +363 -0
- data/lib/claude_swarm/hooks/session_start_hook.rb +42 -0
- data/lib/claude_swarm/json_handler.rb +91 -0
- data/lib/claude_swarm/mcp_generator.rb +248 -0
- data/lib/claude_swarm/openai/chat_completion.rb +264 -0
- data/lib/claude_swarm/openai/executor.rb +254 -0
- data/lib/claude_swarm/openai/responses.rb +338 -0
- data/lib/claude_swarm/orchestrator.rb +879 -0
- data/lib/claude_swarm/process_tracker.rb +78 -0
- data/lib/claude_swarm/session_cost_calculator.rb +209 -0
- data/lib/claude_swarm/session_path.rb +42 -0
- data/lib/claude_swarm/settings_generator.rb +77 -0
- data/lib/claude_swarm/system_utils.rb +46 -0
- data/lib/claude_swarm/templates/generation_prompt.md.erb +230 -0
- data/lib/claude_swarm/tools/reset_session_tool.rb +24 -0
- data/lib/claude_swarm/tools/session_info_tool.rb +24 -0
- data/lib/claude_swarm/tools/task_tool.rb +63 -0
- data/lib/claude_swarm/version.rb +5 -0
- data/lib/claude_swarm/worktree_manager.rb +475 -0
- data/lib/claude_swarm/yaml_loader.rb +22 -0
- data/lib/claude_swarm.rb +69 -0
- data/lib/swarm_cli/cli.rb +201 -0
- data/lib/swarm_cli/command_registry.rb +61 -0
- data/lib/swarm_cli/commands/mcp_serve.rb +130 -0
- data/lib/swarm_cli/commands/mcp_tools.rb +148 -0
- data/lib/swarm_cli/commands/migrate.rb +55 -0
- data/lib/swarm_cli/commands/run.rb +173 -0
- data/lib/swarm_cli/config_loader.rb +97 -0
- data/lib/swarm_cli/formatters/human_formatter.rb +711 -0
- data/lib/swarm_cli/formatters/json_formatter.rb +51 -0
- data/lib/swarm_cli/interactive_repl.rb +918 -0
- data/lib/swarm_cli/mcp_serve_options.rb +44 -0
- data/lib/swarm_cli/mcp_tools_options.rb +59 -0
- data/lib/swarm_cli/migrate_options.rb +54 -0
- data/lib/swarm_cli/migrator.rb +132 -0
- data/lib/swarm_cli/options.rb +151 -0
- data/lib/swarm_cli/ui/components/agent_badge.rb +33 -0
- data/lib/swarm_cli/ui/components/content_block.rb +120 -0
- data/lib/swarm_cli/ui/components/divider.rb +57 -0
- data/lib/swarm_cli/ui/components/panel.rb +62 -0
- data/lib/swarm_cli/ui/components/usage_stats.rb +70 -0
- data/lib/swarm_cli/ui/formatters/cost.rb +49 -0
- data/lib/swarm_cli/ui/formatters/number.rb +58 -0
- data/lib/swarm_cli/ui/formatters/text.rb +77 -0
- data/lib/swarm_cli/ui/formatters/time.rb +73 -0
- data/lib/swarm_cli/ui/icons.rb +59 -0
- data/lib/swarm_cli/ui/renderers/event_renderer.rb +188 -0
- data/lib/swarm_cli/ui/state/agent_color_cache.rb +45 -0
- data/lib/swarm_cli/ui/state/depth_tracker.rb +40 -0
- data/lib/swarm_cli/ui/state/spinner_manager.rb +170 -0
- data/lib/swarm_cli/ui/state/usage_tracker.rb +62 -0
- data/lib/swarm_cli/version.rb +5 -0
- data/lib/swarm_cli.rb +45 -0
- data/lib/swarm_memory/adapters/base.rb +140 -0
- data/lib/swarm_memory/adapters/filesystem_adapter.rb +789 -0
- data/lib/swarm_memory/chat_extension.rb +34 -0
- data/lib/swarm_memory/cli/commands.rb +306 -0
- data/lib/swarm_memory/core/entry.rb +37 -0
- data/lib/swarm_memory/core/frontmatter_parser.rb +108 -0
- data/lib/swarm_memory/core/metadata_extractor.rb +68 -0
- data/lib/swarm_memory/core/path_normalizer.rb +75 -0
- data/lib/swarm_memory/core/semantic_index.rb +244 -0
- data/lib/swarm_memory/core/storage.rb +286 -0
- data/lib/swarm_memory/core/storage_read_tracker.rb +63 -0
- data/lib/swarm_memory/dsl/builder_extension.rb +40 -0
- data/lib/swarm_memory/dsl/memory_config.rb +113 -0
- data/lib/swarm_memory/embeddings/embedder.rb +36 -0
- data/lib/swarm_memory/embeddings/informers_embedder.rb +152 -0
- data/lib/swarm_memory/errors.rb +21 -0
- data/lib/swarm_memory/integration/cli_registration.rb +30 -0
- data/lib/swarm_memory/integration/configuration.rb +43 -0
- data/lib/swarm_memory/integration/registration.rb +31 -0
- data/lib/swarm_memory/integration/sdk_plugin.rb +531 -0
- data/lib/swarm_memory/optimization/analyzer.rb +244 -0
- data/lib/swarm_memory/optimization/defragmenter.rb +863 -0
- data/lib/swarm_memory/prompts/memory.md.erb +109 -0
- data/lib/swarm_memory/prompts/memory_assistant.md.erb +139 -0
- data/lib/swarm_memory/prompts/memory_researcher.md.erb +201 -0
- data/lib/swarm_memory/prompts/memory_retrieval.md.erb +76 -0
- data/lib/swarm_memory/search/semantic_search.rb +112 -0
- data/lib/swarm_memory/search/text_search.rb +40 -0
- data/lib/swarm_memory/search/text_similarity.rb +80 -0
- data/lib/swarm_memory/skills/meta/deep-learning.md +101 -0
- data/lib/swarm_memory/skills/meta/deep-learning.yml +14 -0
- data/lib/swarm_memory/tools/load_skill.rb +313 -0
- data/lib/swarm_memory/tools/memory_defrag.rb +382 -0
- data/lib/swarm_memory/tools/memory_delete.rb +99 -0
- data/lib/swarm_memory/tools/memory_edit.rb +185 -0
- data/lib/swarm_memory/tools/memory_glob.rb +145 -0
- data/lib/swarm_memory/tools/memory_grep.rb +209 -0
- data/lib/swarm_memory/tools/memory_multi_edit.rb +281 -0
- data/lib/swarm_memory/tools/memory_read.rb +123 -0
- data/lib/swarm_memory/tools/memory_write.rb +215 -0
- data/lib/swarm_memory/utils.rb +50 -0
- data/lib/swarm_memory/version.rb +5 -0
- data/lib/swarm_memory.rb +166 -0
- data/lib/swarm_sdk/agent/RETRY_LOGIC.md +127 -0
- data/lib/swarm_sdk/agent/builder.rb +461 -0
- data/lib/swarm_sdk/agent/chat/context_tracker.rb +314 -0
- data/lib/swarm_sdk/agent/chat/hook_integration.rb +372 -0
- data/lib/swarm_sdk/agent/chat/logging_helpers.rb +116 -0
- data/lib/swarm_sdk/agent/chat/system_reminder_injector.rb +152 -0
- data/lib/swarm_sdk/agent/chat.rb +1144 -0
- data/lib/swarm_sdk/agent/context.rb +112 -0
- data/lib/swarm_sdk/agent/context_manager.rb +309 -0
- data/lib/swarm_sdk/agent/definition.rb +556 -0
- data/lib/swarm_sdk/claude_code_agent_adapter.rb +205 -0
- data/lib/swarm_sdk/configuration.rb +296 -0
- data/lib/swarm_sdk/context_compactor/metrics.rb +147 -0
- data/lib/swarm_sdk/context_compactor/token_counter.rb +106 -0
- data/lib/swarm_sdk/context_compactor.rb +340 -0
- data/lib/swarm_sdk/hooks/adapter.rb +359 -0
- data/lib/swarm_sdk/hooks/context.rb +197 -0
- data/lib/swarm_sdk/hooks/definition.rb +80 -0
- data/lib/swarm_sdk/hooks/error.rb +29 -0
- data/lib/swarm_sdk/hooks/executor.rb +146 -0
- data/lib/swarm_sdk/hooks/registry.rb +147 -0
- data/lib/swarm_sdk/hooks/result.rb +150 -0
- data/lib/swarm_sdk/hooks/shell_executor.rb +254 -0
- data/lib/swarm_sdk/hooks/tool_call.rb +35 -0
- data/lib/swarm_sdk/hooks/tool_result.rb +62 -0
- data/lib/swarm_sdk/log_collector.rb +51 -0
- data/lib/swarm_sdk/log_stream.rb +69 -0
- data/lib/swarm_sdk/markdown_parser.rb +75 -0
- data/lib/swarm_sdk/model_aliases.json +5 -0
- data/lib/swarm_sdk/models.json +1 -0
- data/lib/swarm_sdk/models.rb +120 -0
- data/lib/swarm_sdk/node/agent_config.rb +49 -0
- data/lib/swarm_sdk/node/builder.rb +439 -0
- data/lib/swarm_sdk/node/transformer_executor.rb +248 -0
- data/lib/swarm_sdk/node_context.rb +170 -0
- data/lib/swarm_sdk/node_orchestrator.rb +384 -0
- data/lib/swarm_sdk/permissions/config.rb +239 -0
- data/lib/swarm_sdk/permissions/error_formatter.rb +121 -0
- data/lib/swarm_sdk/permissions/path_matcher.rb +35 -0
- data/lib/swarm_sdk/permissions/validator.rb +173 -0
- data/lib/swarm_sdk/permissions_builder.rb +122 -0
- data/lib/swarm_sdk/plugin.rb +147 -0
- data/lib/swarm_sdk/plugin_registry.rb +101 -0
- data/lib/swarm_sdk/prompts/base_system_prompt.md.erb +243 -0
- data/lib/swarm_sdk/providers/openai_with_responses.rb +582 -0
- data/lib/swarm_sdk/result.rb +97 -0
- data/lib/swarm_sdk/swarm/agent_initializer.rb +334 -0
- data/lib/swarm_sdk/swarm/all_agents_builder.rb +140 -0
- data/lib/swarm_sdk/swarm/builder.rb +586 -0
- data/lib/swarm_sdk/swarm/mcp_configurator.rb +151 -0
- data/lib/swarm_sdk/swarm/tool_configurator.rb +416 -0
- data/lib/swarm_sdk/swarm.rb +982 -0
- data/lib/swarm_sdk/tools/bash.rb +274 -0
- data/lib/swarm_sdk/tools/clock.rb +44 -0
- data/lib/swarm_sdk/tools/delegate.rb +164 -0
- data/lib/swarm_sdk/tools/document_converters/base_converter.rb +83 -0
- data/lib/swarm_sdk/tools/document_converters/docx_converter.rb +99 -0
- data/lib/swarm_sdk/tools/document_converters/html_converter.rb +101 -0
- data/lib/swarm_sdk/tools/document_converters/pdf_converter.rb +78 -0
- data/lib/swarm_sdk/tools/document_converters/xlsx_converter.rb +194 -0
- data/lib/swarm_sdk/tools/edit.rb +150 -0
- data/lib/swarm_sdk/tools/glob.rb +158 -0
- data/lib/swarm_sdk/tools/grep.rb +228 -0
- data/lib/swarm_sdk/tools/image_extractors/docx_image_extractor.rb +43 -0
- data/lib/swarm_sdk/tools/image_extractors/pdf_image_extractor.rb +163 -0
- data/lib/swarm_sdk/tools/image_formats/tiff_builder.rb +65 -0
- data/lib/swarm_sdk/tools/multi_edit.rb +232 -0
- data/lib/swarm_sdk/tools/path_resolver.rb +43 -0
- data/lib/swarm_sdk/tools/read.rb +251 -0
- data/lib/swarm_sdk/tools/registry.rb +93 -0
- data/lib/swarm_sdk/tools/scratchpad/scratchpad_list.rb +96 -0
- data/lib/swarm_sdk/tools/scratchpad/scratchpad_read.rb +76 -0
- data/lib/swarm_sdk/tools/scratchpad/scratchpad_write.rb +91 -0
- data/lib/swarm_sdk/tools/stores/read_tracker.rb +61 -0
- data/lib/swarm_sdk/tools/stores/scratchpad_storage.rb +224 -0
- data/lib/swarm_sdk/tools/stores/storage.rb +148 -0
- data/lib/swarm_sdk/tools/stores/todo_manager.rb +65 -0
- data/lib/swarm_sdk/tools/think.rb +95 -0
- data/lib/swarm_sdk/tools/todo_write.rb +216 -0
- data/lib/swarm_sdk/tools/web_fetch.rb +261 -0
- data/lib/swarm_sdk/tools/write.rb +117 -0
- data/lib/swarm_sdk/utils.rb +50 -0
- data/lib/swarm_sdk/version.rb +5 -0
- data/lib/swarm_sdk.rb +167 -0
- metadata +313 -0
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "openai"
|
|
4
|
+
require "faraday/net_http_persistent"
|
|
5
|
+
require "faraday/retry"
|
|
6
|
+
|
|
7
|
+
module ClaudeSwarm
|
|
8
|
+
module OpenAI
|
|
9
|
+
class Executor < BaseExecutor
|
|
10
|
+
# Static configuration for Faraday retry middleware
|
|
11
|
+
FARADAY_RETRY_CONFIG = {
|
|
12
|
+
max: 3, # Maximum number of retries
|
|
13
|
+
interval: 0.5, # Initial delay between retries (in seconds)
|
|
14
|
+
interval_randomness: 0.5, # Randomness factor for retry intervals
|
|
15
|
+
backoff_factor: 2, # Exponential backoff factor
|
|
16
|
+
exceptions: [
|
|
17
|
+
Faraday::TimeoutError,
|
|
18
|
+
Faraday::ConnectionFailed,
|
|
19
|
+
Faraday::ServerError, # Retry on 5xx errors
|
|
20
|
+
].freeze,
|
|
21
|
+
retry_statuses: [429, 500, 502, 503, 504].freeze, # HTTP status codes to retry
|
|
22
|
+
}.freeze
|
|
23
|
+
|
|
24
|
+
# Static configuration for OpenAI client
|
|
25
|
+
OPENAI_CLIENT_CONFIG = {
|
|
26
|
+
log_errors: true,
|
|
27
|
+
request_timeout: 1800, # 30 minutes
|
|
28
|
+
}.freeze
|
|
29
|
+
|
|
30
|
+
def initialize(working_directory: Dir.pwd, model: nil, mcp_config: nil, vibe: false,
|
|
31
|
+
instance_name: nil, instance_id: nil, calling_instance: nil, calling_instance_id: nil,
|
|
32
|
+
claude_session_id: nil, additional_directories: [], debug: false,
|
|
33
|
+
temperature: nil, api_version: "chat_completion", openai_token_env: "OPENAI_API_KEY",
|
|
34
|
+
base_url: nil, reasoning_effort: nil)
|
|
35
|
+
# Call parent initializer for common attributes
|
|
36
|
+
super(
|
|
37
|
+
working_directory: working_directory,
|
|
38
|
+
model: model,
|
|
39
|
+
mcp_config: mcp_config,
|
|
40
|
+
vibe: vibe,
|
|
41
|
+
instance_name: instance_name,
|
|
42
|
+
instance_id: instance_id,
|
|
43
|
+
calling_instance: calling_instance,
|
|
44
|
+
calling_instance_id: calling_instance_id,
|
|
45
|
+
claude_session_id: claude_session_id,
|
|
46
|
+
additional_directories: additional_directories,
|
|
47
|
+
debug: debug
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
# OpenAI-specific attributes
|
|
51
|
+
@temperature = temperature
|
|
52
|
+
@api_version = api_version
|
|
53
|
+
@base_url = base_url
|
|
54
|
+
@reasoning_effort = reasoning_effort
|
|
55
|
+
|
|
56
|
+
# Conversation state for maintaining context
|
|
57
|
+
@conversation_messages = []
|
|
58
|
+
@previous_response_id = nil
|
|
59
|
+
|
|
60
|
+
# Setup OpenAI client
|
|
61
|
+
setup_openai_client(openai_token_env)
|
|
62
|
+
|
|
63
|
+
# Setup MCP client for tools
|
|
64
|
+
setup_mcp_client
|
|
65
|
+
|
|
66
|
+
# Create API handler based on api_version
|
|
67
|
+
@api_handler = create_api_handler
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
def execute(prompt, options = {})
|
|
71
|
+
# Log the request
|
|
72
|
+
log_request(prompt)
|
|
73
|
+
|
|
74
|
+
# Start timing
|
|
75
|
+
start_time = Time.now
|
|
76
|
+
|
|
77
|
+
# Execute using the appropriate handler
|
|
78
|
+
result = @api_handler.execute(prompt, options)
|
|
79
|
+
|
|
80
|
+
# Calculate duration
|
|
81
|
+
duration_ms = ((Time.now - start_time) * 1000).round
|
|
82
|
+
|
|
83
|
+
# Build and return response
|
|
84
|
+
build_response(result, duration_ms)
|
|
85
|
+
rescue StandardError => e
|
|
86
|
+
logger.error { "Unexpected error for #{@instance_name}: #{e.class} - #{e.message}" }
|
|
87
|
+
logger.error { "Backtrace: #{e.backtrace.join("\n")}" }
|
|
88
|
+
raise
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
def reset_session
|
|
92
|
+
super
|
|
93
|
+
@api_handler&.reset_session
|
|
94
|
+
end
|
|
95
|
+
|
|
96
|
+
# Session JSON logger for the API handlers
|
|
97
|
+
def session_json_logger
|
|
98
|
+
self
|
|
99
|
+
end
|
|
100
|
+
|
|
101
|
+
def log(event)
|
|
102
|
+
append_to_session_json(event)
|
|
103
|
+
end
|
|
104
|
+
|
|
105
|
+
private
|
|
106
|
+
|
|
107
|
+
def setup_openai_client(token_env)
|
|
108
|
+
openai_client_config = build_openai_client_config(token_env)
|
|
109
|
+
|
|
110
|
+
@openai_client = ::OpenAI::Client.new(openai_client_config) do |faraday|
|
|
111
|
+
# Use persistent HTTP connections for better performance
|
|
112
|
+
faraday.adapter(:net_http_persistent)
|
|
113
|
+
|
|
114
|
+
# Add retry middleware with custom configuration
|
|
115
|
+
faraday.request(:retry, **build_faraday_retry_config)
|
|
116
|
+
end
|
|
117
|
+
rescue KeyError
|
|
118
|
+
raise ExecutionError, "OpenAI API key not found in environment variable: #{token_env}"
|
|
119
|
+
end
|
|
120
|
+
|
|
121
|
+
def setup_mcp_client
|
|
122
|
+
return unless @mcp_config && File.exist?(@mcp_config)
|
|
123
|
+
|
|
124
|
+
# Read MCP config to find MCP servers
|
|
125
|
+
mcp_data = JsonHandler.parse_file!(@mcp_config)
|
|
126
|
+
|
|
127
|
+
# Build MCP configurations from servers
|
|
128
|
+
mcp_configs = build_mcp_configs(mcp_data["mcpServers"])
|
|
129
|
+
return if mcp_configs.empty?
|
|
130
|
+
|
|
131
|
+
# Create MCP client with unbundled environment to avoid bundler conflicts
|
|
132
|
+
# This ensures MCP servers run in a clean environment without inheriting
|
|
133
|
+
# Claude Swarm's BUNDLE_* environment variables
|
|
134
|
+
Bundler.with_unbundled_env do
|
|
135
|
+
@mcp_client = MCPClient.create_client(
|
|
136
|
+
mcp_server_configs: mcp_configs,
|
|
137
|
+
logger: @logger,
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# List available tools from all MCP servers
|
|
141
|
+
load_mcp_tools(mcp_configs)
|
|
142
|
+
end
|
|
143
|
+
rescue StandardError => e
|
|
144
|
+
logger.error { "Failed to setup MCP client: #{e.message}" }
|
|
145
|
+
@mcp_client = nil
|
|
146
|
+
@available_tools = []
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
def calculate_cost(_result)
|
|
150
|
+
# Simplified cost calculation
|
|
151
|
+
# In reality, we'd need to track token usage
|
|
152
|
+
"$0.00"
|
|
153
|
+
end
|
|
154
|
+
|
|
155
|
+
def create_api_handler
|
|
156
|
+
handler_params = {
|
|
157
|
+
openai_client: @openai_client,
|
|
158
|
+
mcp_client: @mcp_client,
|
|
159
|
+
available_tools: @available_tools,
|
|
160
|
+
executor: self,
|
|
161
|
+
instance_name: @instance_name,
|
|
162
|
+
model: @model,
|
|
163
|
+
temperature: @temperature,
|
|
164
|
+
reasoning_effort: @reasoning_effort,
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
if @api_version == "responses"
|
|
168
|
+
OpenAI::Responses.new(**handler_params)
|
|
169
|
+
else
|
|
170
|
+
OpenAI::ChatCompletion.new(**handler_params)
|
|
171
|
+
end
|
|
172
|
+
end
|
|
173
|
+
|
|
174
|
+
def log_streaming_content(content)
|
|
175
|
+
# Log streaming content similar to ClaudeCodeExecutor
|
|
176
|
+
logger.debug { "#{instance_info} streaming: #{content}" }
|
|
177
|
+
end
|
|
178
|
+
|
|
179
|
+
def build_faraday_retry_config
|
|
180
|
+
FARADAY_RETRY_CONFIG.merge(
|
|
181
|
+
retry_block: method(:handle_retry_logging),
|
|
182
|
+
)
|
|
183
|
+
end
|
|
184
|
+
|
|
185
|
+
def handle_retry_logging(env:, options:, retry_count:, exception:, will_retry:)
|
|
186
|
+
retry_delay = options.interval * (options.backoff_factor**(retry_count - 1))
|
|
187
|
+
error_info = exception&.message || "HTTP #{env.status}"
|
|
188
|
+
|
|
189
|
+
message = if will_retry
|
|
190
|
+
"Request failed (attempt #{retry_count}/#{options.max}): #{error_info}. Retrying in #{retry_delay} seconds..."
|
|
191
|
+
else
|
|
192
|
+
"Request failed after #{retry_count} attempts: #{error_info}. Giving up."
|
|
193
|
+
end
|
|
194
|
+
|
|
195
|
+
@logger.warn(message)
|
|
196
|
+
end
|
|
197
|
+
|
|
198
|
+
def build_openai_client_config(token_env)
|
|
199
|
+
OPENAI_CLIENT_CONFIG.merge(access_token: ENV.fetch(token_env)).tap do |config|
|
|
200
|
+
config[:uri_base] = @base_url if @base_url
|
|
201
|
+
end
|
|
202
|
+
end
|
|
203
|
+
|
|
204
|
+
def build_stdio_config(name, server_config)
|
|
205
|
+
# Combine command and args into a single array
|
|
206
|
+
command_array = [server_config["command"]]
|
|
207
|
+
command_array.concat(server_config["args"] || [])
|
|
208
|
+
|
|
209
|
+
MCPClient.stdio_config(
|
|
210
|
+
command: command_array,
|
|
211
|
+
name: name,
|
|
212
|
+
).tap do |config|
|
|
213
|
+
config[:read_timeout] = 1800
|
|
214
|
+
end
|
|
215
|
+
end
|
|
216
|
+
|
|
217
|
+
def build_mcp_configs(mcp_servers)
|
|
218
|
+
return [] if mcp_servers.nil? || mcp_servers.empty?
|
|
219
|
+
|
|
220
|
+
mcp_servers.filter_map do |name, server_config|
|
|
221
|
+
case server_config["type"]
|
|
222
|
+
when "stdio"
|
|
223
|
+
build_stdio_config(name, server_config)
|
|
224
|
+
when "sse"
|
|
225
|
+
logger.warn { "SSE MCP servers not yet supported for OpenAI instances: #{name}" }
|
|
226
|
+
# TODO: Add SSE support when available in ruby-mcp-client
|
|
227
|
+
nil
|
|
228
|
+
end
|
|
229
|
+
end
|
|
230
|
+
end
|
|
231
|
+
|
|
232
|
+
def load_mcp_tools(mcp_configs)
|
|
233
|
+
@available_tools = @mcp_client.list_tools
|
|
234
|
+
logger.info { "Loaded #{@available_tools.size} tools from #{mcp_configs.size} MCP server(s)" }
|
|
235
|
+
rescue StandardError => e
|
|
236
|
+
logger.error { "Failed to load MCP tools: #{e.message}" }
|
|
237
|
+
@available_tools = []
|
|
238
|
+
end
|
|
239
|
+
|
|
240
|
+
def build_response(result, duration_ms)
|
|
241
|
+
{
|
|
242
|
+
"type" => "result",
|
|
243
|
+
"result" => result,
|
|
244
|
+
"duration_ms" => duration_ms,
|
|
245
|
+
"total_cost" => calculate_cost(result),
|
|
246
|
+
"session_id" => @session_id,
|
|
247
|
+
}.tap do |response|
|
|
248
|
+
log_response(response)
|
|
249
|
+
@last_response = response
|
|
250
|
+
end
|
|
251
|
+
end
|
|
252
|
+
end
|
|
253
|
+
end
|
|
254
|
+
end
|
|
@@ -0,0 +1,338 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module ClaudeSwarm
|
|
4
|
+
module OpenAI
|
|
5
|
+
class Responses
|
|
6
|
+
MAX_TURNS_WITH_TOOLS = 100_000 # virtually infinite
|
|
7
|
+
|
|
8
|
+
def initialize(openai_client:, mcp_client:, available_tools:, executor:, instance_name:, model:, temperature: nil, reasoning_effort: nil)
|
|
9
|
+
@openai_client = openai_client
|
|
10
|
+
@mcp_client = mcp_client
|
|
11
|
+
@available_tools = available_tools
|
|
12
|
+
@executor = executor
|
|
13
|
+
@instance_name = instance_name
|
|
14
|
+
@model = model
|
|
15
|
+
@temperature = temperature
|
|
16
|
+
@reasoning_effort = reasoning_effort
|
|
17
|
+
@system_prompt = nil
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
def execute(prompt, options = {})
|
|
21
|
+
# Store system prompt for first call
|
|
22
|
+
@system_prompt = options[:system_prompt] if options[:system_prompt]
|
|
23
|
+
|
|
24
|
+
# Start with initial prompt
|
|
25
|
+
initial_input = prompt
|
|
26
|
+
|
|
27
|
+
# Process with recursive tool handling - start with empty conversation
|
|
28
|
+
process_responses_api(initial_input, [], nil)
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
def reset_session
|
|
32
|
+
@system_prompt = nil
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
private
|
|
36
|
+
|
|
37
|
+
def process_responses_api(input, conversation_array, previous_response_id, depth = 0)
|
|
38
|
+
# Prevent infinite recursion
|
|
39
|
+
if depth > MAX_TURNS_WITH_TOOLS
|
|
40
|
+
@executor.logger.error { "Maximum recursion depth reached in tool execution" }
|
|
41
|
+
return "Error: Maximum tool call depth exceeded"
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
# Build parameters
|
|
45
|
+
parameters = {
|
|
46
|
+
model: @model,
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
# Only add temperature for non-o-series models
|
|
50
|
+
# O-series models don't support temperature parameter
|
|
51
|
+
unless @model.match?(ClaudeSwarm::Configuration::O_SERIES_MODEL_PATTERN)
|
|
52
|
+
parameters[:temperature] = @temperature
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
# Only add reasoning effort for o-series models
|
|
56
|
+
# reasoning is only supported by o-series models: o1, o1 Preview, o1-mini, o1-pro, o3, o3-mini, o3-pro, o3-deep-research, o4-mini, o4-mini-deep-research, etc.
|
|
57
|
+
if @reasoning_effort && @model.match?(ClaudeSwarm::Configuration::O_SERIES_MODEL_PATTERN)
|
|
58
|
+
parameters[:reasoning] = { effort: @reasoning_effort }
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
# On first call, use string input (can include system prompt)
|
|
62
|
+
# On subsequent calls with function results, use array input
|
|
63
|
+
if conversation_array.empty?
|
|
64
|
+
# Initial call - string input
|
|
65
|
+
parameters[:input] = if depth.zero? && @system_prompt
|
|
66
|
+
"#{@system_prompt}\n\n#{input}"
|
|
67
|
+
else
|
|
68
|
+
input
|
|
69
|
+
end
|
|
70
|
+
else
|
|
71
|
+
# Follow-up call with conversation array (function calls + outputs)
|
|
72
|
+
parameters[:input] = conversation_array
|
|
73
|
+
|
|
74
|
+
# Log conversation array to debug duplicates
|
|
75
|
+
@executor.logger.info { "Conversation array size: #{conversation_array.size}" }
|
|
76
|
+
conversation_ids = conversation_array.map do |item|
|
|
77
|
+
item["call_id"] || item["id"] || "no-id-#{item["type"]}"
|
|
78
|
+
end.compact
|
|
79
|
+
@executor.logger.info { "Conversation item IDs: #{conversation_ids.inspect}" }
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
# Add previous response ID for conversation continuity
|
|
83
|
+
parameters[:previous_response_id] = previous_response_id if previous_response_id
|
|
84
|
+
|
|
85
|
+
# Add tools if available
|
|
86
|
+
if @available_tools&.any?
|
|
87
|
+
# Convert tools to responses API format
|
|
88
|
+
parameters[:tools] = @available_tools.map do |tool|
|
|
89
|
+
{
|
|
90
|
+
"type" => "function",
|
|
91
|
+
"name" => tool.name,
|
|
92
|
+
"description" => tool.description,
|
|
93
|
+
"parameters" => tool.schema || {},
|
|
94
|
+
}
|
|
95
|
+
end
|
|
96
|
+
@executor.logger.info { "Available tools for responses API: #{parameters[:tools].map { |t| t["name"] }.join(", ")}" }
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
# Log the request parameters
|
|
100
|
+
@executor.logger.info { "Responses API Request (depth=#{depth}): #{JsonHandler.pretty_generate!(parameters)}" }
|
|
101
|
+
|
|
102
|
+
# Append to session JSON
|
|
103
|
+
append_to_session_json({
|
|
104
|
+
type: "openai_request",
|
|
105
|
+
api: "responses",
|
|
106
|
+
depth: depth,
|
|
107
|
+
parameters: parameters,
|
|
108
|
+
})
|
|
109
|
+
|
|
110
|
+
# Make the API call without streaming
|
|
111
|
+
begin
|
|
112
|
+
response = @openai_client.responses.create(parameters: parameters)
|
|
113
|
+
rescue StandardError => e
|
|
114
|
+
@executor.logger.error { "Responses API error: #{e.class} - #{e.message}" }
|
|
115
|
+
@executor.logger.error { "Request parameters: #{JsonHandler.pretty_generate!(parameters)}" }
|
|
116
|
+
|
|
117
|
+
# Try to extract and log the response body for better debugging
|
|
118
|
+
if e.respond_to?(:response)
|
|
119
|
+
begin
|
|
120
|
+
error_body = e.response[:body]
|
|
121
|
+
@executor.logger.error { "Error response body: #{error_body}" }
|
|
122
|
+
rescue StandardError => parse_error
|
|
123
|
+
@executor.logger.error { "Could not parse error response: #{parse_error.message}" }
|
|
124
|
+
end
|
|
125
|
+
end
|
|
126
|
+
|
|
127
|
+
# Log error to session JSON
|
|
128
|
+
append_to_session_json({
|
|
129
|
+
type: "openai_error",
|
|
130
|
+
api: "responses",
|
|
131
|
+
error: {
|
|
132
|
+
class: e.class.to_s,
|
|
133
|
+
message: e.message,
|
|
134
|
+
response_body: e.respond_to?(:response) ? e.response[:body] : nil,
|
|
135
|
+
backtrace: e.backtrace.first(5),
|
|
136
|
+
},
|
|
137
|
+
})
|
|
138
|
+
|
|
139
|
+
return "Error calling OpenAI responses API: #{e.message}"
|
|
140
|
+
end
|
|
141
|
+
|
|
142
|
+
# Log the full response
|
|
143
|
+
@executor.logger.info { "Responses API Full Response (depth=#{depth}): #{JsonHandler.pretty_generate!(response)}" }
|
|
144
|
+
|
|
145
|
+
# Append to session JSON
|
|
146
|
+
append_to_session_json({
|
|
147
|
+
type: "openai_response",
|
|
148
|
+
api: "responses",
|
|
149
|
+
depth: depth,
|
|
150
|
+
response: response,
|
|
151
|
+
})
|
|
152
|
+
|
|
153
|
+
# Extract response details
|
|
154
|
+
response_id = response["id"]
|
|
155
|
+
|
|
156
|
+
# Handle response based on output structure
|
|
157
|
+
output = response["output"]
|
|
158
|
+
|
|
159
|
+
if output.nil?
|
|
160
|
+
@executor.logger.error { "No output in response" }
|
|
161
|
+
return "Error: No output in OpenAI response"
|
|
162
|
+
end
|
|
163
|
+
|
|
164
|
+
# Check if output is an array (as per documentation)
|
|
165
|
+
if output.is_a?(Array) && !output.empty?
|
|
166
|
+
# Check if there are function calls
|
|
167
|
+
function_calls = output.select { |item| item["type"] == "function_call" }
|
|
168
|
+
|
|
169
|
+
if function_calls.any?
|
|
170
|
+
# Check if we already have a conversation going
|
|
171
|
+
if conversation_array.empty?
|
|
172
|
+
# First depth - build new conversation
|
|
173
|
+
new_conversation = build_conversation_with_outputs(function_calls)
|
|
174
|
+
else
|
|
175
|
+
# Subsequent depth - append to existing conversation
|
|
176
|
+
# Don't re-add function calls, just add the new ones and their outputs
|
|
177
|
+
new_conversation = conversation_array.dup
|
|
178
|
+
append_new_outputs(function_calls, new_conversation)
|
|
179
|
+
end
|
|
180
|
+
|
|
181
|
+
# Recursively process with updated conversation
|
|
182
|
+
process_responses_api(nil, new_conversation, response_id, depth + 1)
|
|
183
|
+
else
|
|
184
|
+
# Look for text response
|
|
185
|
+
extract_text_response(output)
|
|
186
|
+
end
|
|
187
|
+
else
|
|
188
|
+
@executor.logger.error { "Unexpected output format: #{output.inspect}" }
|
|
189
|
+
"Error: Unexpected response format"
|
|
190
|
+
end
|
|
191
|
+
end
|
|
192
|
+
|
|
193
|
+
def extract_text_response(output)
|
|
194
|
+
text_output = output.find { |item| item["content"] }
|
|
195
|
+
return "" unless text_output && text_output["content"].is_a?(Array)
|
|
196
|
+
|
|
197
|
+
text_content = text_output["content"].find { |item| item["text"] }
|
|
198
|
+
text_content ? text_content["text"] : ""
|
|
199
|
+
end
|
|
200
|
+
|
|
201
|
+
def build_conversation_with_outputs(function_calls)
|
|
202
|
+
# Log tool calls
|
|
203
|
+
@executor.logger.info { "Responses API - Handling #{function_calls.size} function calls" }
|
|
204
|
+
|
|
205
|
+
# Log IDs to check for duplicates
|
|
206
|
+
call_ids = function_calls.map { |fc| fc["call_id"] || fc["id"] }
|
|
207
|
+
@executor.logger.info { "Function call IDs: #{call_ids.inspect}" }
|
|
208
|
+
@executor.logger.warn { "WARNING: Duplicate function call IDs detected!" } if call_ids.size != call_ids.uniq.size
|
|
209
|
+
|
|
210
|
+
# Append to session JSON
|
|
211
|
+
append_to_session_json({
|
|
212
|
+
type: "tool_calls",
|
|
213
|
+
api: "responses",
|
|
214
|
+
tool_calls: function_calls,
|
|
215
|
+
})
|
|
216
|
+
|
|
217
|
+
# Build conversation array with function outputs only
|
|
218
|
+
# The API already knows about the function calls from the previous response
|
|
219
|
+
conversation = []
|
|
220
|
+
|
|
221
|
+
# Then execute tools and add outputs
|
|
222
|
+
function_calls.each do |function_call|
|
|
223
|
+
tool_name = function_call["name"]
|
|
224
|
+
tool_args_str = function_call["arguments"]
|
|
225
|
+
# Use the call_id field for matching with function outputs
|
|
226
|
+
call_id = function_call["call_id"]
|
|
227
|
+
|
|
228
|
+
# Log both IDs to debug
|
|
229
|
+
@executor.logger.info { "Function call has id=#{function_call["id"]}, call_id=#{function_call["call_id"]}" }
|
|
230
|
+
|
|
231
|
+
begin
|
|
232
|
+
# Parse arguments
|
|
233
|
+
tool_args = JsonHandler.parse!(tool_args_str)
|
|
234
|
+
|
|
235
|
+
# Log tool execution
|
|
236
|
+
@executor.logger.info { "Responses API - Executing tool: #{tool_name} with args: #{JsonHandler.pretty_generate!(tool_args)}" }
|
|
237
|
+
|
|
238
|
+
# Execute tool via MCP
|
|
239
|
+
result = @mcp_client.call_tool(tool_name, tool_args)
|
|
240
|
+
|
|
241
|
+
# Log result
|
|
242
|
+
@executor.logger.info { "Responses API - Tool result for #{tool_name}: #{result}" }
|
|
243
|
+
|
|
244
|
+
# Append to session JSON
|
|
245
|
+
append_to_session_json({
|
|
246
|
+
type: "tool_execution",
|
|
247
|
+
api: "responses",
|
|
248
|
+
tool_name: tool_name,
|
|
249
|
+
arguments: tool_args,
|
|
250
|
+
result: result.to_s,
|
|
251
|
+
})
|
|
252
|
+
|
|
253
|
+
# Add function output to conversation
|
|
254
|
+
conversation << {
|
|
255
|
+
type: "function_call_output",
|
|
256
|
+
call_id: call_id,
|
|
257
|
+
output: result.to_json, # Must be JSON string
|
|
258
|
+
}
|
|
259
|
+
rescue StandardError => e
|
|
260
|
+
@executor.logger.error { "Responses API - Tool execution failed for #{tool_name}: #{e.message}" }
|
|
261
|
+
@executor.logger.error { e.backtrace.join("\n") }
|
|
262
|
+
|
|
263
|
+
# Append error to session JSON
|
|
264
|
+
append_to_session_json({
|
|
265
|
+
type: "tool_error",
|
|
266
|
+
api: "responses",
|
|
267
|
+
tool_name: tool_name,
|
|
268
|
+
arguments: tool_args_str,
|
|
269
|
+
error: {
|
|
270
|
+
class: e.class.to_s,
|
|
271
|
+
message: e.message,
|
|
272
|
+
backtrace: e.backtrace.first(5),
|
|
273
|
+
},
|
|
274
|
+
})
|
|
275
|
+
|
|
276
|
+
# Add error output to conversation
|
|
277
|
+
conversation << {
|
|
278
|
+
type: "function_call_output",
|
|
279
|
+
call_id: call_id,
|
|
280
|
+
output: { error: e.message }.to_json,
|
|
281
|
+
}
|
|
282
|
+
end
|
|
283
|
+
end
|
|
284
|
+
|
|
285
|
+
@executor.logger.info { "Responses API - Built conversation with #{conversation.size} function outputs" }
|
|
286
|
+
@executor.logger.debug { "Final conversation structure: #{JsonHandler.pretty_generate!(conversation)}" }
|
|
287
|
+
conversation
|
|
288
|
+
end
|
|
289
|
+
|
|
290
|
+
def append_new_outputs(function_calls, conversation)
|
|
291
|
+
# Only add the new function outputs
|
|
292
|
+
# Don't add function calls - the API already knows about them
|
|
293
|
+
|
|
294
|
+
function_calls.each do |fc|
|
|
295
|
+
# Execute and add output only
|
|
296
|
+
tool_name = fc["name"]
|
|
297
|
+
tool_args_str = fc["arguments"]
|
|
298
|
+
call_id = fc["call_id"]
|
|
299
|
+
|
|
300
|
+
begin
|
|
301
|
+
# Parse arguments
|
|
302
|
+
tool_args = JsonHandler.parse!(tool_args_str)
|
|
303
|
+
|
|
304
|
+
# Log tool execution
|
|
305
|
+
@executor.logger.info { "Responses API - Executing tool: #{tool_name} with args: #{JsonHandler.pretty_generate!(tool_args)}" }
|
|
306
|
+
|
|
307
|
+
# Execute tool via MCP
|
|
308
|
+
result = @mcp_client.call_tool(tool_name, tool_args)
|
|
309
|
+
|
|
310
|
+
# Log result
|
|
311
|
+
@executor.logger.info { "Responses API - Tool result for #{tool_name}: #{result}" }
|
|
312
|
+
|
|
313
|
+
# Add function output to conversation
|
|
314
|
+
conversation << {
|
|
315
|
+
type: "function_call_output",
|
|
316
|
+
call_id: call_id,
|
|
317
|
+
output: result.to_json, # Must be JSON string
|
|
318
|
+
}
|
|
319
|
+
rescue StandardError => e
|
|
320
|
+
@executor.logger.error { "Responses API - Tool execution failed for #{tool_name}: #{e.message}" }
|
|
321
|
+
|
|
322
|
+
# Add error output to conversation
|
|
323
|
+
conversation << {
|
|
324
|
+
type: "function_call_output",
|
|
325
|
+
call_id: call_id,
|
|
326
|
+
output: { error: e.message }.to_json,
|
|
327
|
+
}
|
|
328
|
+
end
|
|
329
|
+
end
|
|
330
|
+
end
|
|
331
|
+
|
|
332
|
+
def append_to_session_json(event)
|
|
333
|
+
# Delegate to the executor's log method
|
|
334
|
+
@executor.log(event) if @executor.respond_to?(:log)
|
|
335
|
+
end
|
|
336
|
+
end
|
|
337
|
+
end
|
|
338
|
+
end
|