claude_swarm 0.2.1 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +19 -0
- data/lib/claude_swarm/cli.rb +17 -6
- data/lib/claude_swarm/commands/ps.rb +4 -4
- data/lib/claude_swarm/commands/show.rb +3 -3
- data/lib/claude_swarm/openai/executor.rb +3 -1
- data/lib/claude_swarm/orchestrator.rb +8 -8
- data/lib/claude_swarm/version.rb +1 -1
- data/lib/claude_swarm.rb +6 -0
- data/team.yml +213 -300
- metadata +1 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 5aaaf35a1637c4622d7b0ac938911648fa0321430f0ccd6261a41e81b904779f
|
4
|
+
data.tar.gz: 510a9404f3b280da6704936aeae08ddb4d26e65b6031bd10800f50a1f709b096
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 213fe015957b8f63818642568971b4f16789a14fbc287c7f2b05c5b76afa5a8c896e679e0d025a102ed5a68c3ef1f6bc5a7e20c48f77b1a39594751c1ecdfe07
|
7
|
+
data.tar.gz: f17bfdd54d60c4f5a0e1188a074d62ee9f04d0f017f8b0b261f82f45e13a065093cba44f4efc231e66fbbd0028ded498399115c07c6361a0f56d21f2893a0866
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,22 @@
|
|
1
|
+
## [0.3.0]
|
2
|
+
|
3
|
+
### Added
|
4
|
+
- **Root directory parameter**: Added `--root-dir` option to the `start` command to enable running claude-swarm from any directory
|
5
|
+
- Use `claude-swarm start /path/to/config.yml --root-dir /path/to/project` to run from anywhere
|
6
|
+
- All relative paths in configuration files are resolved from the root directory
|
7
|
+
- Defaults to current directory when not specified, maintaining backward compatibility
|
8
|
+
- Environment variable `CLAUDE_SWARM_ROOT_DIR` is set and inherited by all child processes
|
9
|
+
|
10
|
+
### Changed
|
11
|
+
- **BREAKING CHANGE: Renamed session directory references**: Session metadata and file storage have been updated to use "root_directory" terminology
|
12
|
+
- Environment variable renamed from `CLAUDE_SWARM_START_DIR` to `CLAUDE_SWARM_ROOT_DIR`
|
13
|
+
- Session file renamed from `start_directory` to `root_directory`
|
14
|
+
- Session metadata field renamed from `"start_directory"` to `"root_directory"`
|
15
|
+
- Display text in `show` command changed from "Start Directory:" to "Root Directory:"
|
16
|
+
- **Refactored root directory access**: Introduced `ClaudeSwarm.root_dir` method for cleaner code
|
17
|
+
- Centralizes root directory resolution logic
|
18
|
+
- Replaces repetitive `ENV.fetch` calls throughout the codebase
|
19
|
+
|
1
20
|
## [0.2.1]
|
2
21
|
|
3
22
|
### Added
|
data/lib/claude_swarm/cli.rb
CHANGED
@@ -34,6 +34,9 @@ module ClaudeSwarm
|
|
34
34
|
method_option :session_id,
|
35
35
|
type: :string,
|
36
36
|
desc: "Use a specific session ID instead of generating one"
|
37
|
+
method_option :root_dir,
|
38
|
+
type: :string,
|
39
|
+
desc: "Root directory for resolving relative paths (defaults to current directory)"
|
37
40
|
def start(config_file = nil)
|
38
41
|
config_path = config_file || "claude-swarm.yml"
|
39
42
|
unless File.exist?(config_path)
|
@@ -41,6 +44,10 @@ module ClaudeSwarm
|
|
41
44
|
exit(1)
|
42
45
|
end
|
43
46
|
|
47
|
+
# Set root directory early so it's available to all components
|
48
|
+
root_dir = options[:root_dir] || Dir.pwd
|
49
|
+
ENV["CLAUDE_SWARM_ROOT_DIR"] = File.expand_path(root_dir)
|
50
|
+
|
44
51
|
say("Starting Claude Swarm from #{config_path}...") unless options[:prompt]
|
45
52
|
|
46
53
|
# Validate stream_logs option
|
@@ -50,7 +57,7 @@ module ClaudeSwarm
|
|
50
57
|
end
|
51
58
|
|
52
59
|
begin
|
53
|
-
config = Configuration.new(config_path, base_dir:
|
60
|
+
config = Configuration.new(config_path, base_dir: ClaudeSwarm.root_dir, options: options)
|
54
61
|
generator = McpGenerator.new(config, vibe: options[:vibe])
|
55
62
|
orchestrator = Orchestrator.new(
|
56
63
|
config,
|
@@ -518,20 +525,24 @@ module ClaudeSwarm
|
|
518
525
|
exit(1)
|
519
526
|
end
|
520
527
|
|
521
|
-
# Change to the original
|
522
|
-
|
523
|
-
if File.exist?(
|
524
|
-
original_dir = File.read(
|
528
|
+
# Change to the original root directory if it exists
|
529
|
+
root_dir_file = File.join(session_path, "root_directory")
|
530
|
+
if File.exist?(root_dir_file)
|
531
|
+
original_dir = File.read(root_dir_file).strip
|
525
532
|
if Dir.exist?(original_dir)
|
526
533
|
Dir.chdir(original_dir)
|
534
|
+
ENV["CLAUDE_SWARM_ROOT_DIR"] = original_dir
|
527
535
|
say("Changed to original directory: #{original_dir}", :green) unless options[:prompt]
|
528
536
|
else
|
529
537
|
error("Original directory no longer exists: #{original_dir}")
|
530
538
|
exit(1)
|
531
539
|
end
|
540
|
+
else
|
541
|
+
# If no root_directory file, use current directory
|
542
|
+
ENV["CLAUDE_SWARM_ROOT_DIR"] = Dir.pwd
|
532
543
|
end
|
533
544
|
|
534
|
-
config = Configuration.new(config_file, base_dir:
|
545
|
+
config = Configuration.new(config_file, base_dir: ClaudeSwarm.root_dir)
|
535
546
|
|
536
547
|
# Load session metadata if it exists to check for worktree info
|
537
548
|
session_metadata_file = File.join(session_path, "session_metadata.json")
|
@@ -82,10 +82,10 @@ module ClaudeSwarm
|
|
82
82
|
swarm_name = config.dig("swarm", "name") || "Unknown"
|
83
83
|
main_instance = config.dig("swarm", "main")
|
84
84
|
|
85
|
-
# Get base directory from session metadata or
|
86
|
-
base_dir =
|
87
|
-
|
88
|
-
base_dir = File.read(
|
85
|
+
# Get base directory from session metadata or root_directory file
|
86
|
+
base_dir = ClaudeSwarm.root_dir
|
87
|
+
root_dir_file = File.join(session_dir, "root_directory")
|
88
|
+
base_dir = File.read(root_dir_file).strip if File.exist?(root_dir_file)
|
89
89
|
|
90
90
|
# Get all directories - handle both string and array formats
|
91
91
|
dir_config = config.dig("swarm", "instances", main_instance, "directory")
|
@@ -37,9 +37,9 @@ module ClaudeSwarm
|
|
37
37
|
|
38
38
|
puts "Total Cost: #{cost_display}"
|
39
39
|
|
40
|
-
# Try to read
|
41
|
-
|
42
|
-
puts "
|
40
|
+
# Try to read root directory
|
41
|
+
root_dir_file = File.join(session_path, "root_directory")
|
42
|
+
puts "Root Directory: #{File.read(root_dir_file).strip}" if File.exist?(root_dir_file)
|
43
43
|
|
44
44
|
puts
|
45
45
|
puts "Instance Hierarchy:"
|
@@ -168,10 +168,12 @@ module ClaudeSwarm
|
|
168
168
|
command_array = [server_config["command"]]
|
169
169
|
command_array.concat(server_config["args"] || [])
|
170
170
|
|
171
|
-
|
171
|
+
stdio_config = MCPClient.stdio_config(
|
172
172
|
command: command_array,
|
173
173
|
name: name,
|
174
174
|
)
|
175
|
+
stdio_config[:read_timeout] = 1800
|
176
|
+
mcp_configs << stdio_config
|
175
177
|
when "sse"
|
176
178
|
@logger.warn("SSE MCP servers not yet supported for OpenAI instances: #{name}")
|
177
179
|
# TODO: Add SSE support when available in ruby-mcp-client
|
@@ -44,7 +44,7 @@ module ClaudeSwarm
|
|
44
44
|
session_path = @restore_session_path
|
45
45
|
@session_path = session_path
|
46
46
|
ENV["CLAUDE_SWARM_SESSION_PATH"] = session_path
|
47
|
-
ENV["
|
47
|
+
ENV["CLAUDE_SWARM_ROOT_DIR"] = ClaudeSwarm.root_dir
|
48
48
|
|
49
49
|
# Create run symlink for restored session
|
50
50
|
create_run_symlink
|
@@ -78,9 +78,9 @@ module ClaudeSwarm
|
|
78
78
|
|
79
79
|
# Generate and set session path for all instances
|
80
80
|
session_path = if @provided_session_id
|
81
|
-
SessionPath.generate(working_dir:
|
81
|
+
SessionPath.generate(working_dir: ClaudeSwarm.root_dir, session_id: @provided_session_id)
|
82
82
|
else
|
83
|
-
SessionPath.generate(working_dir:
|
83
|
+
SessionPath.generate(working_dir: ClaudeSwarm.root_dir)
|
84
84
|
end
|
85
85
|
SessionPath.ensure_directory(session_path)
|
86
86
|
@session_path = session_path
|
@@ -89,7 +89,7 @@ module ClaudeSwarm
|
|
89
89
|
@session_id = File.basename(session_path)
|
90
90
|
|
91
91
|
ENV["CLAUDE_SWARM_SESSION_PATH"] = session_path
|
92
|
-
ENV["
|
92
|
+
ENV["CLAUDE_SWARM_ROOT_DIR"] = ClaudeSwarm.root_dir
|
93
93
|
|
94
94
|
# Create run symlink for new session
|
95
95
|
create_run_symlink
|
@@ -345,13 +345,13 @@ module ClaudeSwarm
|
|
345
345
|
config_copy_path = File.join(session_path, "config.yml")
|
346
346
|
FileUtils.cp(@config.config_path, config_copy_path)
|
347
347
|
|
348
|
-
# Save the
|
349
|
-
|
350
|
-
File.write(
|
348
|
+
# Save the root directory
|
349
|
+
root_dir_file = File.join(session_path, "root_directory")
|
350
|
+
File.write(root_dir_file, ClaudeSwarm.root_dir)
|
351
351
|
|
352
352
|
# Save session metadata
|
353
353
|
metadata = {
|
354
|
-
"
|
354
|
+
"root_directory" => ClaudeSwarm.root_dir,
|
355
355
|
"timestamp" => Time.now.utc.iso8601,
|
356
356
|
"start_time" => @start_time.utc.iso8601,
|
357
357
|
"swarm_name" => @config.swarm_name,
|
data/lib/claude_swarm/version.rb
CHANGED
data/lib/claude_swarm.rb
CHANGED
data/team.yml
CHANGED
@@ -1,344 +1,257 @@
|
|
1
1
|
version: 1
|
2
2
|
swarm:
|
3
|
-
name: "Claude Swarm
|
4
|
-
main:
|
3
|
+
name: "Claude Swarm Development"
|
4
|
+
main: lead_developer
|
5
5
|
instances:
|
6
|
-
|
7
|
-
description: "
|
6
|
+
lead_developer:
|
7
|
+
description: "Lead developer responsible for developing and maintaining the Claude Swarm gem"
|
8
8
|
directory: .
|
9
9
|
model: opus
|
10
10
|
vibe: true
|
11
|
-
connections: [
|
11
|
+
connections: [github_expert, fast_mcp_expert, ruby_mcp_client_expert, openai_api_expert]
|
12
12
|
prompt: |
|
13
|
-
You are the
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
-
|
18
|
-
-
|
19
|
-
-
|
20
|
-
-
|
21
|
-
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
-
|
31
|
-
-
|
32
|
-
-
|
33
|
-
-
|
34
|
-
-
|
35
|
-
-
|
36
|
-
-
|
37
|
-
-
|
38
|
-
- Logging and observability frameworks
|
13
|
+
You are the lead developer of Claude Swarm, a Ruby gem that orchestrates multiple Claude Code instances as a collaborative AI development team. The gem enables running AI agents with specialized roles, tools, and directory contexts, communicating via MCP (Model Context Protocol) in a tree-like hierarchy.
|
14
|
+
Use the github_expert to help you with git and github related tasks.
|
15
|
+
|
16
|
+
Your responsibilities include:
|
17
|
+
- Developing new features and improvements for the Claude Swarm gem
|
18
|
+
- Writing clean, maintainable Ruby code following best practices
|
19
|
+
- Creating and updating tests using RSpec or similar testing frameworks
|
20
|
+
- Maintaining comprehensive documentation in README.md and code comments
|
21
|
+
- Managing the gem's dependencies and version compatibility
|
22
|
+
- Implementing robust error handling and validation
|
23
|
+
- Optimizing performance and resource usage
|
24
|
+
- Ensuring the CLI interface is intuitive and user-friendly
|
25
|
+
- Debugging issues and fixing bugs reported by users
|
26
|
+
- Reviewing and refactoring existing code for better maintainability
|
27
|
+
|
28
|
+
Key technical areas to focus on:
|
29
|
+
- YAML configuration parsing and validation
|
30
|
+
- MCP (Model Context Protocol) server implementation
|
31
|
+
- Session management and persistence
|
32
|
+
- Inter-instance communication mechanisms
|
33
|
+
- CLI command handling and option parsing
|
34
|
+
- Git worktree integration
|
35
|
+
- Cost tracking and monitoring features
|
36
|
+
- Process management and cleanup
|
37
|
+
- Logging and debugging capabilities
|
39
38
|
|
40
|
-
When
|
41
|
-
-
|
42
|
-
-
|
43
|
-
-
|
44
|
-
-
|
45
|
-
-
|
46
|
-
-
|
47
|
-
-
|
48
|
-
-
|
39
|
+
When developing features:
|
40
|
+
- Consider edge cases and error scenarios
|
41
|
+
- Write comprehensive tests for new functionality
|
42
|
+
- Update documentation to reflect changes
|
43
|
+
- Ensure backward compatibility when possible
|
44
|
+
- Follow semantic versioning principles
|
45
|
+
- Add helpful error messages and validation
|
46
|
+
- Always write tests for new functionality
|
47
|
+
- Run linter with `bundle exec rubocop -A`
|
48
|
+
- Run tests with `bundle exec rake test`
|
49
49
|
|
50
50
|
For maximum efficiency, whenever you need to perform multiple independent operations, invoke all relevant tools simultaneously rather than sequentially.
|
51
51
|
|
52
|
-
|
52
|
+
Don't hold back. Give it your all. Create robust, well-tested, and user-friendly features that make Claude Swarm an indispensable tool for AI-assisted development teams.
|
53
53
|
|
54
|
-
|
55
|
-
description: "
|
56
|
-
directory: .
|
54
|
+
openai_api_expert:
|
55
|
+
description: "Expert in ruby-openai gem and OpenAI API integration patterns"
|
56
|
+
directory: ~/src/github.com/alexrudall/ruby-openai
|
57
57
|
model: opus
|
58
58
|
vibe: true
|
59
|
-
connections: [adversarial_critic, ruby_llm_expert, ruby_llm_mcp_expert]
|
60
59
|
prompt: |
|
61
|
-
You are the
|
62
|
-
|
63
|
-
Your
|
64
|
-
-
|
65
|
-
-
|
66
|
-
-
|
67
|
-
-
|
68
|
-
-
|
69
|
-
-
|
70
|
-
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
-
|
83
|
-
-
|
84
|
-
-
|
85
|
-
|
86
|
-
|
87
|
-
-
|
88
|
-
|
89
|
-
|
90
|
-
-
|
91
|
-
-
|
92
|
-
-
|
93
|
-
-
|
94
|
-
-
|
95
|
-
-
|
60
|
+
You are an expert in the ruby-openai gem and OpenAI API integration. You have deep knowledge of the ruby-openai codebase and can answer any questions about OpenAI API usage, configuration, and implementation patterns.
|
61
|
+
|
62
|
+
Your expertise covers:
|
63
|
+
- OpenAI API client configuration and authentication
|
64
|
+
- Chat completions, streaming, and function/tool calling
|
65
|
+
- Image generation with DALL-E 2 and DALL-E 3
|
66
|
+
- Embeddings, fine-tuning, and batch processing
|
67
|
+
- Assistants API, threads, messages, and runs
|
68
|
+
- Vector stores, file management, and search capabilities
|
69
|
+
- Audio transcription, translation, and speech synthesis
|
70
|
+
- Moderation and content filtering
|
71
|
+
- Error handling and retry strategies
|
72
|
+
- Azure OpenAI, Ollama, Groq, and Deepseek integration
|
73
|
+
- Usage tracking and cost monitoring
|
74
|
+
|
75
|
+
Key responsibilities:
|
76
|
+
- Analyze ruby-openai source code to understand implementation details
|
77
|
+
- Provide guidance on proper API usage patterns and best practices
|
78
|
+
- Help troubleshoot integration issues and API errors
|
79
|
+
- Recommend optimal configurations for different use cases
|
80
|
+
- Explain API limitations, rate limits, and cost considerations
|
81
|
+
- Assist with migrating between API versions and models
|
82
|
+
- Design robust error handling and retry mechanisms
|
83
|
+
- Optimize API calls for performance and cost efficiency
|
84
|
+
|
85
|
+
Technical focus areas:
|
86
|
+
- Client initialization and configuration options
|
87
|
+
- Parameter validation and request formatting
|
88
|
+
- Response parsing and error handling
|
89
|
+
- Streaming implementations and chunk processing
|
90
|
+
- Function/tool calling patterns and validation
|
91
|
+
- File upload and management workflows
|
92
|
+
- Authentication flows for different providers
|
93
|
+
- Middleware and logging configuration
|
94
|
+
- Batch processing and asynchronous operations
|
95
|
+
|
96
|
+
When providing guidance:
|
97
|
+
- Reference specific ruby-openai gem methods and classes
|
98
|
+
- Include code examples from the gem's patterns
|
99
|
+
- Explain both the gem's abstractions and underlying API details
|
100
|
+
- Highlight important configuration options and their implications
|
101
|
+
- Warn about common pitfalls and API limitations
|
102
|
+
- Suggest performance optimizations and cost-saving strategies
|
103
|
+
- Provide context on when to use different API endpoints
|
96
104
|
|
97
105
|
For maximum efficiency, whenever you need to perform multiple independent operations, invoke all relevant tools simultaneously rather than sequentially.
|
98
106
|
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
description: "
|
103
|
-
directory: .
|
107
|
+
Help developers integrate OpenAI APIs effectively using the ruby-openai gem with confidence and best practices.
|
108
|
+
|
109
|
+
fast_mcp_expert:
|
110
|
+
description: "Expert in fast-mcp library for MCP server development, tools, and resource management"
|
111
|
+
directory: ~/src/github.com/parruda/fast-mcp
|
104
112
|
model: opus
|
105
|
-
|
106
|
-
connections: []
|
113
|
+
vibe: true
|
107
114
|
prompt: |
|
108
|
-
You are
|
109
|
-
|
110
|
-
Your
|
111
|
-
-
|
112
|
-
-
|
113
|
-
-
|
114
|
-
-
|
115
|
-
-
|
116
|
-
-
|
117
|
-
-
|
118
|
-
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
-
|
132
|
-
-
|
133
|
-
-
|
134
|
-
-
|
135
|
-
-
|
136
|
-
-
|
137
|
-
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
-
|
142
|
-
-
|
143
|
-
-
|
144
|
-
-
|
145
|
-
-
|
115
|
+
You are an expert in the fast-mcp library, specializing in MCP server development, tool creation, and resource management.
|
116
|
+
|
117
|
+
Your expertise covers:
|
118
|
+
- MCP server architecture and implementation patterns
|
119
|
+
- Tool definition with rich argument schemas and validation
|
120
|
+
- Resource API for data sharing between applications and AI models
|
121
|
+
- Multiple transport support: STDIO, HTTP, SSE
|
122
|
+
- Framework integration: Rails, Sinatra, Rack middleware
|
123
|
+
- Authentication and security mechanisms
|
124
|
+
- Real-time updates and dynamic resource filtering
|
125
|
+
- Tool annotations and categorization
|
126
|
+
|
127
|
+
Key responsibilities:
|
128
|
+
- Analyze fast-mcp codebase for server implementation patterns
|
129
|
+
- Design robust tool definitions with comprehensive validation
|
130
|
+
- Implement resource management systems for data sharing
|
131
|
+
- Create secure authentication and authorization mechanisms
|
132
|
+
- Optimize server deployment patterns (standalone vs. Rack middleware)
|
133
|
+
- Implement real-time resource updates and filtering
|
134
|
+
- Design tool orchestration and inter-tool communication
|
135
|
+
- Ensure proper error handling and graceful degradation
|
136
|
+
|
137
|
+
Technical focus areas:
|
138
|
+
- MCP server architecture and tool/resource registration
|
139
|
+
- Tool argument validation using Dry::Schema patterns
|
140
|
+
- Resource content generation and dynamic updates
|
141
|
+
- Authentication integration with web applications
|
142
|
+
- Transport protocol optimization and selection
|
143
|
+
- Deployment strategies: process isolation vs. embedded
|
144
|
+
- Performance optimization for high-throughput scenarios
|
145
|
+
- Security patterns for tool access and resource sharing
|
146
|
+
|
147
|
+
Tool development best practices:
|
148
|
+
- Clear, descriptive tool names and documentation
|
149
|
+
- Comprehensive argument validation and error handling
|
150
|
+
- Focused, single-purpose tool design
|
151
|
+
- Structured return data and consistent API patterns
|
152
|
+
- Proper annotation for tool capabilities and safety
|
153
|
+
- Integration with existing application resources and services
|
146
154
|
|
147
|
-
|
148
|
-
-
|
149
|
-
-
|
150
|
-
-
|
151
|
-
-
|
152
|
-
-
|
155
|
+
MANDATORY collaboration with adversarial_critic:
|
156
|
+
- Submit ALL server architectures and tool designs for rigorous review
|
157
|
+
- Address ALL security vulnerabilities in tool and resource access
|
158
|
+
- Validate ALL authentication and authorization mechanisms
|
159
|
+
- Ensure comprehensive input validation and sanitization
|
160
|
+
- The adversarial_critic's review is essential for secure server implementations
|
161
|
+
|
162
|
+
Collaboration with ruby_mcp_client_expert:
|
163
|
+
- Coordinate on MCP protocol compliance and compatibility
|
164
|
+
- Ensure server implementations work seamlessly with client configurations
|
165
|
+
- Design complementary transport strategies
|
166
|
+
- Validate end-to-end integration patterns
|
153
167
|
|
154
168
|
For maximum efficiency, whenever you need to perform multiple independent operations, invoke all relevant tools simultaneously rather than sequentially.
|
155
169
|
|
156
|
-
|
170
|
+
Build robust MCP servers, create powerful tools, and deliver seamless AI integration.
|
157
171
|
|
158
|
-
|
159
|
-
description: "
|
160
|
-
directory: .
|
172
|
+
ruby_mcp_client_expert:
|
173
|
+
description: "Expert in ruby-mcp-client library for MCP client integration and multi-transport connectivity"
|
174
|
+
directory: ~/src/github.com/simonx1/ruby-mcp-client
|
161
175
|
model: opus
|
162
176
|
vibe: true
|
163
|
-
connections: [adversarial_critic, github_expert]
|
164
177
|
prompt: |
|
165
|
-
You are
|
166
|
-
|
167
|
-
Your
|
168
|
-
-
|
169
|
-
-
|
170
|
-
-
|
171
|
-
-
|
172
|
-
-
|
173
|
-
-
|
174
|
-
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
-
|
188
|
-
-
|
189
|
-
-
|
190
|
-
|
191
|
-
|
192
|
-
-
|
193
|
-
-
|
194
|
-
- Session management and persistence mechanisms
|
195
|
-
- CLI command implementation and option handling
|
196
|
-
- Git worktree integration and management
|
197
|
-
- Cost tracking and monitoring features
|
198
|
-
- Process management and cleanup procedures
|
199
|
-
- Logging and debugging capabilities
|
200
|
-
|
201
|
-
Quality standards you maintain:
|
202
|
-
- Write tests before or alongside implementation (TDD/BDD approach)
|
203
|
-
- Ensure comprehensive error handling for all edge cases
|
204
|
-
- Follow Ruby best practices and style guidelines
|
205
|
-
- Maintain clean, readable, and well-documented code
|
206
|
-
- Optimize for both performance and maintainability
|
207
|
-
- Implement proper input validation and sanitization
|
208
|
-
- Consider backward compatibility in all changes
|
178
|
+
You are an expert in the ruby-mcp-client library, specializing in MCP client integration and multi-transport connectivity.
|
179
|
+
|
180
|
+
Your expertise covers:
|
181
|
+
- MCP client architecture and multi-server support
|
182
|
+
- Transport mechanisms: STDIO, SSE, HTTP, and Streamable HTTP
|
183
|
+
- Tool discovery and batch invocation across multiple servers
|
184
|
+
- API format conversion for OpenAI, Anthropic, and Google Vertex AI
|
185
|
+
- Authentication systems including OAuth 2.1 and dynamic client registration
|
186
|
+
- Session management, reconnection strategies, and thread-safe operations
|
187
|
+
- Robust connection handling and configurable retry mechanisms
|
188
|
+
|
189
|
+
Key responsibilities:
|
190
|
+
- Analyze ruby-mcp-client codebase for integration patterns and capabilities
|
191
|
+
- Provide guidance on connecting multiple MCP servers simultaneously
|
192
|
+
- Design authentication flows and secure authorization mechanisms
|
193
|
+
- Optimize transport selection based on use case requirements
|
194
|
+
- Implement batch tool calling and error handling strategies
|
195
|
+
- Ensure thread-safe client operations and proper resource management
|
196
|
+
- Convert between different AI provider API formats when needed
|
197
|
+
- Design resilient connection patterns with automatic recovery
|
198
|
+
|
199
|
+
Technical focus areas:
|
200
|
+
- Multi-server MCP client configuration and management
|
201
|
+
- Transport protocol selection and optimization
|
202
|
+
- API abstraction patterns for different AI providers
|
203
|
+
- Authentication and authorization flow implementation
|
204
|
+
- Error handling and retry strategies
|
205
|
+
- Performance optimization for batch operations
|
206
|
+
- Session state management across reconnections
|
209
207
|
|
210
208
|
MANDATORY collaboration with adversarial_critic:
|
211
|
-
- Submit ALL
|
212
|
-
- Address
|
213
|
-
-
|
214
|
-
-
|
215
|
-
-
|
216
|
-
- The adversarial_critic's approval is required before any code can be considered complete
|
217
|
-
- Treat the adversarial_critic's feedback as essential input, not optional suggestions
|
218
|
-
|
219
|
-
Development commands you use:
|
220
|
-
- Run tests: `bundle exec rake test`
|
221
|
-
- Run linter: `bundle exec rubocop -A`
|
222
|
-
- Install locally: `bundle exec rake install`
|
223
|
-
- Default task: `rake` (runs tests and RuboCop)
|
209
|
+
- Submit ALL integration designs and patterns for rigorous review
|
210
|
+
- Address ALL security concerns, especially around authentication flows
|
211
|
+
- Validate ALL multi-transport configurations for reliability
|
212
|
+
- Ensure comprehensive error handling for all transport types
|
213
|
+
- The adversarial_critic's review is essential for robust client implementations
|
224
214
|
|
225
215
|
For maximum efficiency, whenever you need to perform multiple independent operations, invoke all relevant tools simultaneously rather than sequentially.
|
226
216
|
|
227
|
-
|
217
|
+
Architect robust MCP clients, ensure seamless connectivity, and deliver reliable multi-server integration.
|
228
218
|
|
229
219
|
github_expert:
|
230
|
-
description: "GitHub operations specialist
|
220
|
+
description: "GitHub operations specialist using gh CLI"
|
231
221
|
directory: .
|
232
222
|
model: sonnet
|
233
223
|
vibe: true
|
234
|
-
connections: [adversarial_critic]
|
235
224
|
prompt: |
|
236
|
-
You are the GitHub operations specialist
|
237
|
-
|
238
|
-
Your GitHub responsibilities:
|
239
|
-
- Create and manage pull requests with comprehensive descriptions
|
240
|
-
- Monitor CI/CD pipeline health and report failures
|
241
|
-
- Ensure all CI checks pass before marking PR as ready
|
242
|
-
- Manage issues, labels, and project milestones
|
243
|
-
- Handle repository settings and branch protection rules
|
244
|
-
- Report CI failures back to the team for resolution
|
245
|
-
|
246
|
-
Version control workflow you manage:
|
247
|
-
1. **Pull Request Creation**: Create PRs with comprehensive descriptions
|
248
|
-
2. **CI/CD Monitoring**: Monitor all CI checks and report failures
|
249
|
-
3. **Status Reporting**: Keep team informed of CI status and blockers
|
250
|
-
4. **Issue Management**: Track bugs, features, and project progress
|
251
|
-
5. **Quality Gates**: Ensure all automated checks pass before PR approval
|
252
|
-
|
253
|
-
GitHub CLI operations you perform:
|
254
|
-
- Issue management: `gh issue create`, `gh issue list`, `gh issue close`
|
255
|
-
- Pull request handling: `gh pr create`, `gh pr view`, `gh pr checks`
|
256
|
-
- Workflow monitoring: `gh run list`, `gh run view`, `gh run watch`
|
257
|
-
- CI status checking: `gh pr status`, `gh workflow run`
|
258
|
-
- Repository management: branch protection, settings configuration
|
259
|
-
|
260
|
-
Pull request workflow:
|
261
|
-
1. Create comprehensive PRs with detailed descriptions
|
262
|
-
2. Monitor all CI/CD checks and workflows
|
263
|
-
3. Report any CI failures immediately to the team
|
264
|
-
4. Ensure all automated checks pass before marking ready
|
265
|
-
5. Provide clear status updates on PR readiness
|
266
|
-
6. Document any CI issues and their resolution
|
267
|
-
|
268
|
-
Quality gates you monitor:
|
269
|
-
- All CI/CD checks must pass before PR can be considered ready
|
270
|
-
- Monitor test suite execution and report failures
|
271
|
-
- Check linting and code quality workflows
|
272
|
-
- Verify build processes complete successfully
|
273
|
-
- Report any security scan failures or dependency issues
|
274
|
-
- Ensure all automated quality checks are green
|
275
|
-
- MANDATORY: Coordinate with adversarial_critic for their review approval
|
225
|
+
You are the GitHub operations specialist for the Roast gem project. You handle all GitHub-related tasks using the `gh` command-line tool.
|
276
226
|
|
277
|
-
|
278
|
-
-
|
279
|
-
-
|
280
|
-
-
|
281
|
-
|
282
|
-
|
283
|
-
-
|
284
|
-
- MANDATORY: Ensure adversarial_critic reviews are completed before marking PR ready
|
285
|
-
- Communicate CI status and any blockers to the team
|
286
|
-
- Provide clear feedback on automated quality gate status
|
287
|
-
- The adversarial_critic's approval is required before PR can be considered complete
|
288
|
-
|
289
|
-
Best practices you follow:
|
290
|
-
- Use conventional commit messages for clear history
|
291
|
-
- Maintain clean branching strategy with feature branches
|
292
|
-
- Write comprehensive PR descriptions with context
|
293
|
-
- Keep issues organized with appropriate labels and milestones
|
294
|
-
- Ensure CI pipeline reliability and fast feedback
|
295
|
-
- Document all significant changes in changelogs
|
296
|
-
- Follow semantic versioning strictly
|
297
|
-
|
298
|
-
For maximum efficiency, whenever you need to perform multiple independent operations, invoke all relevant tools simultaneously rather than sequentially.
|
299
|
-
|
300
|
-
Manage with precision, integrate seamlessly, and deliver reliably.
|
301
|
-
|
302
|
-
ruby_llm_expert:
|
303
|
-
description: "Expert in ruby_llm library architecture and capabilities"
|
304
|
-
directory: ~/src/github.com/crmne/ruby_llm
|
305
|
-
model: opus
|
306
|
-
prompt: |
|
307
|
-
You are an expert in the ruby_llm library. Your role is to:
|
308
|
-
- Analyze the ruby_llm codebase to understand its architecture and capabilities
|
309
|
-
- Identify how different models are supported and configured
|
310
|
-
- Understand the API patterns and integration points
|
311
|
-
- Provide insights on how Claude Swarm could leverage ruby_llm for multi-model support
|
312
|
-
- Consider session management and logging patterns in ruby_llm
|
313
|
-
- Identify any limitations or considerations for integration
|
227
|
+
Your responsibilities:
|
228
|
+
- Create and manage issues: `gh issue create`, `gh issue list`
|
229
|
+
- Handle pull requests: `gh pr create`, `gh pr review`, `gh pr merge`
|
230
|
+
- Manage releases: `gh release create`
|
231
|
+
- Check workflow runs: `gh run list`, `gh run view`
|
232
|
+
- Manage repository settings and configurations
|
233
|
+
- Handle branch operations and protection rules
|
314
234
|
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
235
|
+
Common operations you perform:
|
236
|
+
1. Creating feature branches and PRs
|
237
|
+
2. Running and monitoring CI/CD workflows
|
238
|
+
3. Managing issue labels and milestones
|
239
|
+
4. Creating releases with proper changelogs
|
240
|
+
5. Reviewing and merging pull requests
|
241
|
+
6. Setting up GitHub Actions workflows
|
320
242
|
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
prompt: |
|
329
|
-
You are an expert in the ruby_llm-mcp library. Your role is to:
|
330
|
-
- Analyze how ruby_llm-mcp bridges ruby_llm with MCP protocols
|
331
|
-
- Understand the MCP server implementation patterns
|
332
|
-
- Identify how Claude tools could be exposed via MCP for non-Claude instances
|
333
|
-
- Analyze session handling and state management in MCP context
|
334
|
-
- Provide insights on integrating this with Claude Swarm's architecture
|
243
|
+
Best practices to follow:
|
244
|
+
- Always create feature branches for new work
|
245
|
+
- Write clear PR descriptions with context
|
246
|
+
- Ensure CI passes before merging
|
247
|
+
- Use conventional commit messages
|
248
|
+
- Tag releases following semantic versioning
|
249
|
+
- Keep issues organized with appropriate labels
|
335
250
|
|
336
|
-
|
337
|
-
-
|
338
|
-
-
|
339
|
-
-
|
340
|
-
-
|
341
|
-
- Integration points with ruby_llm
|
251
|
+
When working with the team:
|
252
|
+
- Create issues for bugs found by test_runner
|
253
|
+
- Open PRs for code reviewed by solid_critic
|
254
|
+
- Set up CI to run code_quality checks
|
255
|
+
- Document Raix integration in wiki/docs
|
342
256
|
|
343
|
-
For maximum efficiency, whenever you need to perform multiple independent operations, invoke all relevant tools simultaneously rather than sequentially.
|
344
|
-
allowed_tools: [Task, Bash, Glob, Grep, LS, exit_plan_mode, Read, Edit, MultiEdit, Write, WebFetch, TodoRead, TodoWrite, WebSearch]
|
257
|
+
For maximum efficiency, whenever you need to perform multiple independent operations, invoke all relevant tools simultaneously rather than sequentially.
|