swarm_sdk 2.1.2 → 2.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/swarm_sdk/agent/builder.rb +33 -0
- data/lib/swarm_sdk/agent/chat/context_tracker.rb +33 -0
- data/lib/swarm_sdk/agent/chat/hook_integration.rb +41 -0
- data/lib/swarm_sdk/agent/chat/system_reminder_injector.rb +11 -27
- data/lib/swarm_sdk/agent/chat.rb +198 -51
- data/lib/swarm_sdk/agent/context.rb +6 -2
- data/lib/swarm_sdk/agent/context_manager.rb +6 -0
- data/lib/swarm_sdk/agent/definition.rb +15 -22
- data/lib/swarm_sdk/agent/llm_instrumentation_middleware.rb +180 -0
- data/lib/swarm_sdk/configuration.rb +420 -103
- data/lib/swarm_sdk/events_to_messages.rb +181 -0
- data/lib/swarm_sdk/log_collector.rb +31 -5
- data/lib/swarm_sdk/log_stream.rb +37 -8
- data/lib/swarm_sdk/model_aliases.json +4 -1
- data/lib/swarm_sdk/node/agent_config.rb +33 -8
- data/lib/swarm_sdk/node/builder.rb +39 -18
- data/lib/swarm_sdk/node_orchestrator.rb +293 -26
- data/lib/swarm_sdk/proc_helpers.rb +53 -0
- data/lib/swarm_sdk/prompts/base_system_prompt.md.erb +0 -126
- data/lib/swarm_sdk/providers/openai_with_responses.rb +22 -15
- data/lib/swarm_sdk/restore_result.rb +65 -0
- data/lib/swarm_sdk/snapshot.rb +156 -0
- data/lib/swarm_sdk/snapshot_from_events.rb +386 -0
- data/lib/swarm_sdk/state_restorer.rb +491 -0
- data/lib/swarm_sdk/state_snapshot.rb +369 -0
- data/lib/swarm_sdk/swarm/agent_initializer.rb +360 -55
- data/lib/swarm_sdk/swarm/all_agents_builder.rb +28 -1
- data/lib/swarm_sdk/swarm/builder.rb +208 -12
- data/lib/swarm_sdk/swarm/swarm_registry_builder.rb +67 -0
- data/lib/swarm_sdk/swarm/tool_configurator.rb +46 -11
- data/lib/swarm_sdk/swarm.rb +367 -90
- data/lib/swarm_sdk/swarm_loader.rb +145 -0
- data/lib/swarm_sdk/swarm_registry.rb +136 -0
- data/lib/swarm_sdk/tools/delegate.rb +92 -7
- data/lib/swarm_sdk/tools/read.rb +17 -5
- data/lib/swarm_sdk/tools/scratchpad/scratchpad_list.rb +23 -2
- data/lib/swarm_sdk/tools/scratchpad/scratchpad_read.rb +23 -2
- data/lib/swarm_sdk/tools/scratchpad/scratchpad_write.rb +21 -4
- data/lib/swarm_sdk/tools/stores/read_tracker.rb +47 -12
- data/lib/swarm_sdk/tools/stores/scratchpad_storage.rb +45 -0
- data/lib/swarm_sdk/tools/stores/storage.rb +4 -4
- data/lib/swarm_sdk/tools/think.rb +4 -1
- data/lib/swarm_sdk/tools/todo_write.rb +20 -8
- data/lib/swarm_sdk/utils.rb +18 -0
- data/lib/swarm_sdk/validation_result.rb +33 -0
- data/lib/swarm_sdk/version.rb +1 -1
- data/lib/swarm_sdk.rb +362 -21
- metadata +17 -5
|
@@ -44,13 +44,21 @@ module SwarmSDK
|
|
|
44
44
|
:agent_permissions,
|
|
45
45
|
:assume_model_exists,
|
|
46
46
|
:hooks,
|
|
47
|
-
:memory
|
|
47
|
+
:memory,
|
|
48
|
+
:shared_across_delegations
|
|
48
49
|
|
|
49
50
|
attr_accessor :bypass_permissions, :max_concurrent_tools
|
|
50
51
|
|
|
51
52
|
def initialize(name, config = {})
|
|
52
53
|
@name = name.to_sym
|
|
53
54
|
|
|
55
|
+
# Validate name doesn't contain '@' (reserved for delegation instances)
|
|
56
|
+
if @name.to_s.include?("@")
|
|
57
|
+
raise ConfigurationError,
|
|
58
|
+
"Agent names cannot contain '@' character (reserved for delegation instance naming). " \
|
|
59
|
+
"Agent: #{@name}"
|
|
60
|
+
end
|
|
61
|
+
|
|
54
62
|
# BREAKING CHANGE: Hard error for plural form
|
|
55
63
|
if config[:directories]
|
|
56
64
|
raise ConfigurationError,
|
|
@@ -96,6 +104,9 @@ module SwarmSDK
|
|
|
96
104
|
# (memory prompt needs to be appended if memory is enabled)
|
|
97
105
|
@memory = parse_memory_config(config[:memory])
|
|
98
106
|
|
|
107
|
+
# Delegation isolation mode (default: false = isolated instances per delegation)
|
|
108
|
+
@shared_across_delegations = config[:shared_across_delegations] || false
|
|
109
|
+
|
|
99
110
|
# Build system prompt after directory and memory are set
|
|
100
111
|
@system_prompt = build_full_system_prompt(config[:system_prompt])
|
|
101
112
|
|
|
@@ -111,7 +122,7 @@ module SwarmSDK
|
|
|
111
122
|
# Inject default write restrictions for security
|
|
112
123
|
@tools = inject_default_write_permissions(@tools)
|
|
113
124
|
|
|
114
|
-
@delegates_to = Array(config[:delegates_to] || []).map(&:to_sym)
|
|
125
|
+
@delegates_to = Array(config[:delegates_to] || []).map(&:to_sym).uniq
|
|
115
126
|
@mcp_servers = Array(config[:mcp_servers] || [])
|
|
116
127
|
|
|
117
128
|
# Parse hooks configuration
|
|
@@ -181,6 +192,7 @@ module SwarmSDK
|
|
|
181
192
|
assume_model_exists: @assume_model_exists,
|
|
182
193
|
max_concurrent_tools: @max_concurrent_tools,
|
|
183
194
|
hooks: @hooks,
|
|
195
|
+
shared_across_delegations: @shared_across_delegations,
|
|
184
196
|
# Permissions are core SDK functionality (not plugin-specific)
|
|
185
197
|
default_permissions: @default_permissions,
|
|
186
198
|
permissions: @agent_permissions,
|
|
@@ -358,7 +370,7 @@ module SwarmSDK
|
|
|
358
370
|
|
|
359
371
|
def render_non_coding_base_prompt
|
|
360
372
|
# Simplified base prompt for non-coding agents
|
|
361
|
-
# Includes environment info
|
|
373
|
+
# Includes environment info only
|
|
362
374
|
# Does not steer towards coding tasks
|
|
363
375
|
cwd = @directory || Dir.pwd
|
|
364
376
|
platform = RUBY_PLATFORM
|
|
@@ -383,25 +395,6 @@ module SwarmSDK
|
|
|
383
395
|
Platform: #{platform}
|
|
384
396
|
OS Version: #{os_version}
|
|
385
397
|
</env>
|
|
386
|
-
|
|
387
|
-
# Task Management
|
|
388
|
-
|
|
389
|
-
You have access to the TodoWrite tool to help you manage and plan tasks. Use this tool to track your progress and give visibility into your work.
|
|
390
|
-
|
|
391
|
-
When working on multi-step tasks:
|
|
392
|
-
1. Create a todo list with all known tasks before starting work
|
|
393
|
-
2. Mark each task as in_progress when you start it
|
|
394
|
-
3. Mark each task as completed IMMEDIATELY after finishing it
|
|
395
|
-
4. Complete ALL pending todos before finishing your response
|
|
396
|
-
|
|
397
|
-
# Scratchpad Storage
|
|
398
|
-
|
|
399
|
-
You have access to Scratchpad tools for storing and retrieving information:
|
|
400
|
-
- **ScratchpadWrite**: Store detailed outputs, analysis, or results that are too long for direct responses
|
|
401
|
-
- **ScratchpadRead**: Retrieve previously stored content
|
|
402
|
-
- **ScratchpadList**: List available scratchpad entries
|
|
403
|
-
|
|
404
|
-
Use the scratchpad to share information that would otherwise clutter your responses.
|
|
405
398
|
PROMPT
|
|
406
399
|
end
|
|
407
400
|
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SwarmSDK
|
|
4
|
+
module Agent
|
|
5
|
+
# Faraday middleware for capturing LLM API requests and responses
|
|
6
|
+
#
|
|
7
|
+
# This middleware intercepts HTTP calls to LLM providers and emits
|
|
8
|
+
# structured events via LogStream for logging and monitoring.
|
|
9
|
+
#
|
|
10
|
+
# Events emitted:
|
|
11
|
+
# - llm_api_request: Before sending request to LLM API
|
|
12
|
+
# - llm_api_response: After receiving response from LLM API
|
|
13
|
+
#
|
|
14
|
+
# The middleware is injected at runtime into the provider's Faraday
|
|
15
|
+
# connection stack (see Agent::Chat#inject_llm_instrumentation).
|
|
16
|
+
class LLMInstrumentationMiddleware < Faraday::Middleware
|
|
17
|
+
# Initialize middleware
|
|
18
|
+
#
|
|
19
|
+
# @param app [Faraday::Connection] Faraday app
|
|
20
|
+
# @param on_request [Proc] Callback for request events
|
|
21
|
+
# @param on_response [Proc] Callback for response events
|
|
22
|
+
# @param provider_name [String] Provider name for logging
|
|
23
|
+
def initialize(app, on_request:, on_response:, provider_name:)
|
|
24
|
+
super(app)
|
|
25
|
+
@on_request = on_request
|
|
26
|
+
@on_response = on_response
|
|
27
|
+
@provider_name = provider_name
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
# Intercept HTTP call
|
|
31
|
+
#
|
|
32
|
+
# @param env [Faraday::Env] Request environment
|
|
33
|
+
# @return [Faraday::Response] HTTP response
|
|
34
|
+
def call(env)
|
|
35
|
+
start_time = Time.now
|
|
36
|
+
|
|
37
|
+
# Emit request event
|
|
38
|
+
emit_request_event(env, start_time)
|
|
39
|
+
|
|
40
|
+
# Execute request
|
|
41
|
+
@app.call(env).on_complete do |response_env|
|
|
42
|
+
end_time = Time.now
|
|
43
|
+
duration = end_time - start_time
|
|
44
|
+
|
|
45
|
+
# Emit response event
|
|
46
|
+
emit_response_event(response_env, start_time, end_time, duration)
|
|
47
|
+
end
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
private
|
|
51
|
+
|
|
52
|
+
# Emit request event
|
|
53
|
+
#
|
|
54
|
+
# @param env [Faraday::Env] Request environment
|
|
55
|
+
# @param timestamp [Time] Request timestamp
|
|
56
|
+
# @return [void]
|
|
57
|
+
def emit_request_event(env, timestamp)
|
|
58
|
+
request_data = {
|
|
59
|
+
provider: @provider_name,
|
|
60
|
+
body: parse_body(env.body),
|
|
61
|
+
timestamp: timestamp.utc.iso8601,
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
@on_request.call(request_data)
|
|
65
|
+
rescue StandardError => e
|
|
66
|
+
# Don't let logging errors break the request
|
|
67
|
+
RubyLLM.logger.error("LLM instrumentation request error: #{e.message}")
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
# Emit response event
|
|
71
|
+
#
|
|
72
|
+
# @param env [Faraday::Env] Response environment
|
|
73
|
+
# @param start_time [Time] Request start time
|
|
74
|
+
# @param end_time [Time] Request end time
|
|
75
|
+
# @param duration [Float] Request duration in seconds
|
|
76
|
+
# @return [void]
|
|
77
|
+
def emit_response_event(env, start_time, end_time, duration)
|
|
78
|
+
response_data = {
|
|
79
|
+
provider: @provider_name,
|
|
80
|
+
body: parse_body(env.body),
|
|
81
|
+
duration_seconds: duration.round(3),
|
|
82
|
+
timestamp: end_time.utc.iso8601,
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
# Extract usage information from response body if available
|
|
86
|
+
if env.body.is_a?(String) && !env.body.empty?
|
|
87
|
+
begin
|
|
88
|
+
parsed = JSON.parse(env.body)
|
|
89
|
+
response_data[:usage] = extract_usage(parsed) if parsed.is_a?(Hash)
|
|
90
|
+
response_data[:model] = parsed["model"] if parsed.is_a?(Hash)
|
|
91
|
+
response_data[:finish_reason] = extract_finish_reason(parsed) if parsed.is_a?(Hash)
|
|
92
|
+
rescue JSON::ParserError
|
|
93
|
+
# Not JSON, skip usage extraction
|
|
94
|
+
end
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
@on_response.call(response_data)
|
|
98
|
+
rescue StandardError => e
|
|
99
|
+
# Don't let logging errors break the response
|
|
100
|
+
RubyLLM.logger.error("LLM instrumentation response error: #{e.message}")
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
# Sanitize headers by removing sensitive data
|
|
104
|
+
#
|
|
105
|
+
# @param headers [Hash] HTTP headers
|
|
106
|
+
# @return [Hash] Sanitized headers
|
|
107
|
+
def sanitize_headers(headers)
|
|
108
|
+
return {} unless headers
|
|
109
|
+
|
|
110
|
+
headers.transform_keys(&:to_s).transform_values do |value|
|
|
111
|
+
# Redact authorization headers
|
|
112
|
+
if value.to_s.match?(/bearer|token|key/i)
|
|
113
|
+
"[REDACTED]"
|
|
114
|
+
else
|
|
115
|
+
value.to_s
|
|
116
|
+
end
|
|
117
|
+
end
|
|
118
|
+
rescue StandardError
|
|
119
|
+
{}
|
|
120
|
+
end
|
|
121
|
+
|
|
122
|
+
# Parse request/response body
|
|
123
|
+
#
|
|
124
|
+
# @param body [String, Hash, nil] HTTP body
|
|
125
|
+
# @return [Hash, String, nil] Parsed body
|
|
126
|
+
def parse_body(body)
|
|
127
|
+
return if body.nil? || body == ""
|
|
128
|
+
|
|
129
|
+
# Already parsed
|
|
130
|
+
return body if body.is_a?(Hash)
|
|
131
|
+
|
|
132
|
+
# Try to parse JSON
|
|
133
|
+
JSON.parse(body)
|
|
134
|
+
rescue JSON::ParserError
|
|
135
|
+
# Return truncated string if not JSON
|
|
136
|
+
body.to_s[0..1000]
|
|
137
|
+
rescue StandardError
|
|
138
|
+
nil
|
|
139
|
+
end
|
|
140
|
+
|
|
141
|
+
# Extract usage statistics from response
|
|
142
|
+
#
|
|
143
|
+
# Handles different provider formats (OpenAI, Anthropic, etc.)
|
|
144
|
+
#
|
|
145
|
+
# @param parsed [Hash] Parsed response body
|
|
146
|
+
# @return [Hash, nil] Usage statistics
|
|
147
|
+
def extract_usage(parsed)
|
|
148
|
+
usage = parsed["usage"] || parsed.dig("usage")
|
|
149
|
+
return unless usage
|
|
150
|
+
|
|
151
|
+
{
|
|
152
|
+
input_tokens: usage["input_tokens"] || usage["prompt_tokens"],
|
|
153
|
+
output_tokens: usage["output_tokens"] || usage["completion_tokens"],
|
|
154
|
+
total_tokens: usage["total_tokens"],
|
|
155
|
+
}.compact
|
|
156
|
+
rescue StandardError
|
|
157
|
+
nil
|
|
158
|
+
end
|
|
159
|
+
|
|
160
|
+
# Extract finish reason from response
|
|
161
|
+
#
|
|
162
|
+
# Handles different provider formats
|
|
163
|
+
#
|
|
164
|
+
# @param parsed [Hash] Parsed response body
|
|
165
|
+
# @return [String, nil] Finish reason
|
|
166
|
+
def extract_finish_reason(parsed)
|
|
167
|
+
# Anthropic format
|
|
168
|
+
return parsed["stop_reason"] if parsed["stop_reason"]
|
|
169
|
+
|
|
170
|
+
# OpenAI format
|
|
171
|
+
choices = parsed["choices"]
|
|
172
|
+
return unless choices&.is_a?(Array) && !choices.empty?
|
|
173
|
+
|
|
174
|
+
choices.first["finish_reason"]
|
|
175
|
+
rescue StandardError
|
|
176
|
+
nil
|
|
177
|
+
end
|
|
178
|
+
end
|
|
179
|
+
end
|
|
180
|
+
end
|