claude-dev-kit 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. package/.claude/agents/angelic-workshop-energy-clearing.md +113 -0
  2. package/.claude/agents/angelic-workshop-intake.md +84 -0
  3. package/.claude/agents/angelic-workshop-integration.md +140 -0
  4. package/.claude/agents/angelic-workshop-invocation.md +92 -0
  5. package/.claude/agents/angelic-workshop-lead.md +225 -0
  6. package/.claude/agents/angelic-workshop-transmission.md +108 -0
  7. package/.claude/agents/deep-think-partner.md +41 -0
  8. package/.claude/agents/dev-backend.md +74 -0
  9. package/.claude/agents/dev-e2e.md +101 -0
  10. package/.claude/agents/dev-frontend.md +82 -0
  11. package/.claude/agents/dev-lead.md +144 -0
  12. package/.claude/agents/dev-reviewer.md +122 -0
  13. package/.claude/agents/dev-test.md +88 -0
  14. package/.claude/agents/documentation-manager.md +73 -0
  15. package/.claude/agents/haiku-executor.md +8 -0
  16. package/.claude/agents/pm-groomer.md +98 -0
  17. package/.claude/agents/pm-prp-writer.md +144 -0
  18. package/.claude/agents/pm-sizer.md +84 -0
  19. package/.claude/agents/project-manager.md +91 -0
  20. package/.claude/agents/system-architect.md +98 -0
  21. package/.claude/agents/validation-gates.md +121 -0
  22. package/.claude/agents/workflow-builder.md +416 -0
  23. package/.claude/commands/ai/detect.md +117 -0
  24. package/.claude/commands/ai/route.md +128 -0
  25. package/.claude/commands/ai/switch.md +121 -0
  26. package/.claude/commands/bs/brainstorm_full.md +149 -0
  27. package/.claude/commands/bs/claude.md +37 -0
  28. package/.claude/commands/bs/codex.md +37 -0
  29. package/.claude/commands/bs/gemini.md +37 -0
  30. package/.claude/commands/bs/glm.md +37 -0
  31. package/.claude/commands/bs/grok.md +37 -0
  32. package/.claude/commands/bs/kimi.md +37 -0
  33. package/.claude/commands/bs/minimax.md +37 -0
  34. package/.claude/commands/bs/ollama.md +71 -0
  35. package/.claude/commands/code/build-and-fix.md +80 -0
  36. package/.claude/commands/code/simplify.md +77 -0
  37. package/.claude/commands/dev/backend.md +47 -0
  38. package/.claude/commands/dev/e2e.md +49 -0
  39. package/.claude/commands/dev/frontend.md +45 -0
  40. package/.claude/commands/dev/review.md +48 -0
  41. package/.claude/commands/dev/test.md +54 -0
  42. package/.claude/commands/dev-epic.md +121 -0
  43. package/.claude/commands/dev-issue.md +79 -0
  44. package/.claude/commands/dev.md +134 -0
  45. package/.claude/commands/execute-prp.md +113 -0
  46. package/.claude/commands/fix-github-issue.md +14 -0
  47. package/.claude/commands/generate-prp.md +73 -0
  48. package/.claude/commands/git/status.md +14 -0
  49. package/.claude/commands/haiku.md +13 -0
  50. package/.claude/commands/improve.md +178 -0
  51. package/.claude/commands/init.md +311 -0
  52. package/.claude/commands/pm/groom.md +58 -0
  53. package/.claude/commands/pm/plan-epic.md +74 -0
  54. package/.claude/commands/pm/size.md +46 -0
  55. package/.claude/commands/pm.md +47 -0
  56. package/.claude/commands/primer.md +16 -0
  57. package/.claude/commands/self-improve.md +243 -0
  58. package/.claude/commands/think.md +68 -0
  59. package/.claude/commands/workflow/angelic-workshop.md +89 -0
  60. package/.claude/commands/workflow/build.md +91 -0
  61. package/.claude/hooks/pre-tool-use/block-dangerous-commands.js +196 -0
  62. package/.claude/hooks/skill-activation-prompt/package-lock.json +560 -0
  63. package/.claude/hooks/skill-activation-prompt/package.json +16 -0
  64. package/.claude/hooks/skill-activation-prompt/skill-activation-prompt.ts +135 -0
  65. package/.claude/hooks/skill-activation-prompt/skill-rules.json +50 -0
  66. package/.claude/hooks/stop/context_monitor.py +155 -0
  67. package/.claude/hooks/stop/learning_logger.py +218 -0
  68. package/.claude/skills/ai-router/SKILL.md +119 -0
  69. package/.claude/skills/build-and-fix/SKILL.md +271 -0
  70. package/.claude/skills/build-and-fix/examples/javascript-lint-fix.md +37 -0
  71. package/.claude/skills/build-and-fix/language-configs/javascript.yaml +139 -0
  72. package/.claude/skills/build-and-fix/references/config-schema.md +120 -0
  73. package/.claude/skills/build-and-fix/references/error-patterns.md +273 -0
  74. package/.claude/skills/code-investigator/SKILL.md +299 -0
  75. package/.claude/skills/code-investigator/references/investigation-workflows.md +542 -0
  76. package/.claude/skills/code-investigator/references/language-specific.md +761 -0
  77. package/.claude/skills/code-investigator/references/search-patterns.md +258 -0
  78. package/.claude/skills/code-investigator/references/serena-patterns.md +328 -0
  79. package/.claude/skills/stack-detector/SKILL.md +153 -0
  80. package/.claude/skills/verification-before-completion/SKILL.md +143 -0
  81. package/.claude/templates/claude-md-template.md +56 -0
  82. package/.claude/templates/stacks/express-node.md +134 -0
  83. package/.claude/templates/stacks/fastapi.md +152 -0
  84. package/.claude/templates/stacks/generic.md +101 -0
  85. package/.claude/templates/stacks/nextjs-prisma.md +235 -0
  86. package/README.md +499 -0
  87. package/bin/claude-dev-kit.js +11 -0
  88. package/package.json +31 -0
  89. package/scripts/install.sh +448 -0
@@ -0,0 +1,117 @@
1
+ ---
2
+ description: "Detect which AI CLI tools are installed and update providers.json with availability status."
3
+ argument-hint: []
4
+ ---
5
+
6
+ # /ai:detect — AI Provider Detection
7
+
8
+ Scan the system for installed AI CLI tools, update `.claude/providers.json` with current availability, and report what's ready to use.
9
+
10
+ ## Steps
11
+
12
+ ### 1. Read current providers.json
13
+
14
+ ```bash
15
+ cat .claude/providers.json
16
+ ```
17
+
18
+ Extract the list of providers and their `cli` fields.
19
+
20
+ ### 2. Check each CLI's availability
21
+
22
+ Run these checks **in parallel**:
23
+
24
+ ```bash
25
+ # Claude (always available inside Claude Code — check for standalone CLI too)
26
+ which claude 2>/dev/null && claude --version 2>/dev/null | head -1 || echo "NOT_FOUND"
27
+
28
+ # Gemini
29
+ which gemini 2>/dev/null && gemini --version 2>/dev/null | head -1 || echo "NOT_FOUND"
30
+
31
+ # OpenCode (covers Codex, Grok, Kimi via --model flag)
32
+ which opencode 2>/dev/null && opencode --version 2>/dev/null | head -1 || echo "NOT_FOUND"
33
+
34
+ # Ollama (local LLMs)
35
+ which ollama 2>/dev/null && ollama --version 2>/dev/null | head -1 || echo "NOT_FOUND"
36
+
37
+ # GLM wrapper
38
+ which cczy 2>/dev/null || echo "NOT_FOUND"
39
+
40
+ # MiniMax wrapper
41
+ which ccmy 2>/dev/null || echo "NOT_FOUND"
42
+ ```
43
+
44
+ ### 2b. If Ollama is found, list available models
45
+
46
+ ```bash
47
+ ollama list 2>/dev/null | tail -n +2 | awk '{print $1}'
48
+ ```
49
+
50
+ Use this list to:
51
+ - Confirm Ollama is running (if the command fails with a connection error, set a warning that `ollama serve` may not be running)
52
+ - Set the `model` field in providers.json to the first available coding model found, using this preference order: `qwen2.5-coder`, `codellama`, `deepseek-coder-v2`, `llama3.2`, `llama3.1`, `mistral`, `gemma3` — or the first model in the list if none match
53
+
54
+ ### 3. Build detection results
55
+
56
+ For each provider, mark:
57
+ - `"available": true` — CLI found and responds
58
+ - `"available": false` — CLI not found
59
+ - Note: `opencode` covers codex, grok, kimi, and opencode providers
60
+
61
+ Special rules:
62
+ - `claude`: always `true` (we are running inside Claude Code)
63
+ - `codex`, `grok`, `kimi`: set to `true` if `opencode` is available
64
+ - `glm`: set to `true` if `cczy` is available
65
+ - `minimax`: set to `true` if `ccmy` is available
66
+ - `ollama`: set to `true` if `ollama` binary found AND at least one model is installed; also update the `model` field with the best available model (see step 2b)
67
+
68
+ ### 4. Update providers.json
69
+
70
+ Read the current providers.json and update each provider's `"available"` field with the detection results.
71
+
72
+ Write the updated JSON back to `.claude/providers.json`.
73
+
74
+ ### 5. Report results
75
+
76
+ Print a table:
77
+
78
+ ```
79
+ ## AI Provider Detection Results
80
+
81
+ | Provider | CLI | Available | Strengths |
82
+ |-------------|------------|-----------|-----------------------------------|
83
+ | claude | claude | ✓ | reasoning, agents, coding |
84
+ | gemini | gemini | ✓ / ✗ | large-context, web-search |
85
+ | codex | opencode | ✓ / ✗ | coding, code-completion |
86
+ | grok | opencode | ✓ / ✗ | speed, reasoning |
87
+ | kimi | opencode | ✓ / ✗ | coding, math |
88
+ | ollama | ollama | ✓ / ✗ | privacy, offline, no-cost [LOCAL] |
89
+ | glm | cczy | ✓ / ✗ | multilingual |
90
+ | minimax | ccmy | ✓ / ✗ | multimodal |
91
+ | opencode | opencode | ✓ / ✗ | coding, multi-model |
92
+
93
+ ## Ollama Models Installed
94
+ <list from `ollama list`, or "none" if not installed>
95
+ Active model: <value of providers.ollama.model>
96
+
97
+ ## Current Default: <value of "default" field>
98
+
99
+ ## Routing Configuration
100
+ Large context tasks → <routing.large_context>
101
+ Speed tasks → <routing.speed>
102
+ Coding tasks → <routing.coding>
103
+ Privacy/offline tasks → ollama (if available)
104
+
105
+ ## Tips
106
+ - Install Gemini CLI: https://github.com/google-gemini/gemini-cli
107
+ - Install OpenCode: https://opencode.ai/docs/installation
108
+ - Install Ollama (local LLMs): https://ollama.ai — then run `ollama pull llama3.2`
109
+ - Switch Ollama model: /ai:switch ollama:codellama
110
+ - Run /ai:switch <provider> to change the default provider
111
+ ```
112
+
113
+ ## Notes
114
+
115
+ - This command is safe to re-run at any time
116
+ - providers.json is updated in-place with only the `available` fields changed
117
+ - Other fields (run_cmd, strengths, notes) are preserved exactly
@@ -0,0 +1,128 @@
1
+ ---
2
+ description: "Route a task to the best available AI CLI tool based on task type. Claude orchestrates and returns the result."
3
+ argument-hint: [task description]
4
+ ---
5
+
6
+ # /ai:route — Intelligent AI Task Routing
7
+
8
+ Analyze the task, select the optimal AI provider based on available CLIs and task type, execute the task, and return the result. Claude Code always orchestrates — it calls the chosen AI as a subprocess and synthesizes the response.
9
+
10
+ ## Steps
11
+
12
+ ### 1. Get the task
13
+
14
+ ```
15
+ TASK = $ARGUMENTS
16
+ ```
17
+
18
+ If empty, ask the user: "What task do you want to route to the best AI? Describe the task."
19
+
20
+ ### 2. Read providers configuration
21
+
22
+ ```bash
23
+ cat .claude/providers.json
24
+ ```
25
+
26
+ Identify:
27
+ - `routing` preferences object
28
+ - Which providers have `"available": true`
29
+
30
+ ### 3. Determine task type and select provider
31
+
32
+ Analyze `TASK` against these patterns:
33
+
34
+ **Large context / full codebase:**
35
+ - Keywords: "entire codebase", "all files", "full project scan", "whole repo", "scan everything"
36
+ - → Use `routing.large_context` provider (default: gemini)
37
+
38
+ **Speed-first:**
39
+ - Keywords: "quick", "fast", "briefly", "tldr", "one sentence", "just tell me"
40
+ - → Use `routing.speed` provider (default: grok)
41
+
42
+ **Code generation / implementation:**
43
+ - Keywords: "implement", "write a function", "create a class", "code that", "build a"
44
+ - → Use `routing.coding` provider (default: claude)
45
+
46
+ **Multi-AI synthesis:**
47
+ - Keywords: "brainstorm", "multiple perspectives", "compare approaches", "what do different AIs think"
48
+ - → Suggest `/bs:brainstorm_full` instead
49
+
50
+ **Math / algorithms:**
51
+ - Keywords: "algorithm", "complexity", "O(n)", "optimize", "calculate", "math"
52
+ - → Use kimi if available, otherwise claude
53
+
54
+ **Privacy / local / sensitive:**
55
+ - Keywords: "private", "don't send to cloud", "local only", "confidential", "sensitive data", "air-gapped", "offline", "no cloud"
56
+ - → Use ollama if available (data stays on machine), otherwise warn the user and use claude
57
+
58
+ **Default:** Use the `"default"` provider from providers.json.
59
+
60
+ If selected provider is unavailable, fall back to claude.
61
+
62
+ ### 4. Show routing decision
63
+
64
+ ```
65
+ Routing to: <provider_name> (<reason>)
66
+ Context window: <context_window> tokens
67
+ ```
68
+
69
+ ### 5. Execute with selected provider
70
+
71
+ Read `run_cmd` from providers.json for the selected provider.
72
+ Substitute template variables in `run_cmd`:
73
+ - Replace `{prompt}` with the properly shell-escaped task content
74
+ - Replace `{model}` with the value of `providers.<name>.model` (used by Ollama)
75
+
76
+ **For type: cli:**
77
+ ```bash
78
+ <run_cmd with {prompt} and {model} substituted>
79
+ ```
80
+
81
+ **For type: piped:**
82
+ ```bash
83
+ echo '<escaped_task>' | <run_cmd with {prompt} removed>
84
+ ```
85
+ (The piped CLI reads the prompt from stdin)
86
+
87
+ **For Ollama specifically:** Before running, verify the Ollama service is reachable:
88
+ ```bash
89
+ ollama list 2>/dev/null | head -1 || echo "OLLAMA_DOWN"
90
+ ```
91
+ If `OLLAMA_DOWN`, inform the user: "Ollama is installed but not running. Start it with: `ollama serve`"
92
+
93
+ Run in background if long-running:
94
+ - `run_in_background: true` for tasks that may take > 30 seconds
95
+
96
+ Wait for completion (no timeout — let it run as long as needed).
97
+
98
+ ### 6. Return result
99
+
100
+ ```
101
+ ## Result from <provider_name>
102
+
103
+ <output>
104
+
105
+ ---
106
+ Routed by: ai-router | Provider: <provider_name> | Task type: <detected_type>
107
+ ```
108
+
109
+ ### 7. Optionally compare
110
+
111
+ If the user's task looks like it would benefit from comparison (e.g., architecture decisions, complex tradeoffs), after returning the primary result ask:
112
+
113
+ > "Want me to also get a second opinion from [another available provider]?"
114
+
115
+ ## Error Handling
116
+
117
+ If the selected provider fails (exit non-zero):
118
+ 1. Report the error clearly
119
+ 2. Automatically fall back to claude
120
+ 3. Re-run the task with claude
121
+ 4. Note which provider failed
122
+
123
+ ## Notes
124
+
125
+ - Claude Code is always the orchestrator — it never "becomes" another AI
126
+ - Other AIs are called as subprocess CLIs and their output is returned here
127
+ - Sensitive data (API keys, .env contents) should never be included in the task prompt sent to external providers
128
+ - The routing configuration in providers.json can be customized with `/ai:switch`
@@ -0,0 +1,121 @@
1
+ ---
2
+ description: "Switch the default AI provider used for tasks. Run /ai:detect first to see available providers. Use 'ollama:<model>' to change the active Ollama model."
3
+ argument-hint: [claude | gemini | codex | grok | kimi | ollama | ollama:<model> | glm | minimax | opencode]
4
+ ---
5
+
6
+ # /ai:switch — Switch Default AI Provider
7
+
8
+ Change which AI CLI tool is used as the default for tasks. This updates the `"default"` field in `.claude/providers.json`.
9
+
10
+ ## Steps
11
+
12
+ ### 1. Parse the requested provider
13
+
14
+ ```
15
+ REQUESTED = $ARGUMENTS (trimmed, lowercased)
16
+ ```
17
+
18
+ If `$ARGUMENTS` is empty, skip to the listing step below.
19
+
20
+ **Special case — Ollama model switch:**
21
+ If `REQUESTED` starts with `ollama:` (e.g. `ollama:codellama`):
22
+ - Extract the model name after the colon: `MODEL = everything after "ollama:"`
23
+ - Read providers.json
24
+ - Verify `ollama` provider exists
25
+ - Update `providers.ollama.model` to `MODEL`
26
+ - Write updated providers.json
27
+ - Report: `Ollama model set to: <MODEL>` and stop
28
+
29
+ ### 2. Read providers.json
30
+
31
+ ```bash
32
+ cat .claude/providers.json
33
+ ```
34
+
35
+ ### 3. Validate the requested provider
36
+
37
+ Check that `REQUESTED` is a key in the `providers` object.
38
+
39
+ If not found:
40
+ ```
41
+ Unknown provider: "<REQUESTED>"
42
+
43
+ Available providers:
44
+ claude, gemini, codex, grok, kimi, ollama, glm, minimax, opencode
45
+
46
+ For Ollama, you can also set the active model:
47
+ /ai:switch ollama:codellama
48
+ /ai:switch ollama:qwen2.5-coder
49
+ /ai:switch ollama:deepseek-coder-v2
50
+
51
+ Run /ai:detect to check which are installed and list Ollama models.
52
+ ```
53
+ Stop.
54
+
55
+ ### 4. Check availability
56
+
57
+ If `providers[REQUESTED].available` is `false`:
58
+ ```
59
+ Warning: <REQUESTED> is not currently available (CLI not detected).
60
+ Run /ai:detect to refresh availability, or install the CLI first.
61
+
62
+ Continue anyway? (Note: tasks will fail at runtime if the CLI is missing.)
63
+ ```
64
+
65
+ Ask the user with AskUserQuestion whether to proceed despite unavailability.
66
+
67
+ If user says no, stop.
68
+
69
+ ### 5. Update providers.json
70
+
71
+ Set `"default": "<REQUESTED>"` in providers.json.
72
+
73
+ Write the updated JSON back.
74
+
75
+ ### 6. Report success
76
+
77
+ ```
78
+ ## Default AI switched to: <provider_name>
79
+
80
+ Provider: <name>
81
+ CLI: <cli>
82
+ Strengths: <strengths list>
83
+ Context Window: <context_window> tokens
84
+
85
+ The routing configuration still applies:
86
+ Large context → <routing.large_context>
87
+ Speed → <routing.speed>
88
+ Coding → <routing.coding>
89
+
90
+ Run /ai:route <task> to intelligently route a specific task.
91
+ Run /ai:detect to refresh which providers are available.
92
+ ```
93
+
94
+ ---
95
+
96
+ ## If $ARGUMENTS is empty — show current state
97
+
98
+ ```
99
+ ## Current AI Configuration
100
+
101
+ Default provider: <default>
102
+
103
+ All providers:
104
+ ✓ claude — reasoning, agents, coding [always available]
105
+ ✓/✗ gemini — large-context, web-search [installed / not installed]
106
+ ✓/✗ codex — coding, code-completion [requires opencode]
107
+ ✓/✗ grok — speed, reasoning [requires opencode]
108
+ ✓/✗ kimi — coding, math [requires opencode]
109
+ ✓/✗ ollama — privacy, offline, no-cost [LOCAL] [requires ollama]
110
+ ✓/✗ glm — multilingual [requires cczy]
111
+ ✓/✗ minimax — multimodal [requires ccmy]
112
+ ✓/✗ opencode — coding, multi-model [requires opencode]
113
+
114
+ Usage:
115
+ /ai:switch gemini — use Gemini as default
116
+ /ai:switch ollama — use Ollama (local) as default
117
+ /ai:switch ollama:codellama — switch to codellama model in Ollama
118
+ /ai:switch claude — revert to Claude
119
+ /ai:detect — refresh availability + list Ollama models
120
+ /ai:route <task> — route specific task to best AI
121
+ ```
@@ -0,0 +1,149 @@
1
+ ---
2
+ description: Brainstorm using 7 different LLMs (four-step flow)
3
+ ---
4
+
5
+ # Multi-LLM Brainstorming (Mode)
6
+
7
+ You are orchestrating a multi-LLM brainstorming workflow using background tasks and explicit output retrieval.
8
+
9
+ This workflow has four phases:
10
+
11
+ - Phase 0: Determine the brainstorm prompt
12
+ - Phase 1: Launch 7 LLMs in parallel (background)
13
+ - Phase 2: Collect results
14
+ - Phase 3: Synthesize results
15
+
16
+ ---
17
+
18
+ ## Phase 0 — Get the prompt
19
+
20
+ 1. If `$ARGUMENTS` is **not empty**, then:
21
+ - Set `PROMPT = $ARGUMENTS`
22
+ - Skip asking the user anything and go straight to **Phase 1**.
23
+
24
+ 2. If `$ARGUMENTS` is **empty**, then:
25
+ - Say **only**:
26
+ > Multi-LLM brainstorm mode activated. Send me your brainstorm question in the next message.
27
+ - Then **wait** for the user's next message.
28
+ - When the user replies, set:
29
+ - `PROMPT =` the full content of that next user message.
30
+ - Immediately continue with **Phase 1** using `PROMPT`.
31
+
32
+ ---
33
+
34
+ ## Phase 1 — Launch 7 LLMs in parallel
35
+
36
+ Launch ALL 7 LLMs **in a single message** using background execution.
37
+ Append to `PROMPT`: "Do **not** make any changes in code. Do **not** modify any files."
38
+
39
+ Let each tool run as long as necessary. Don't enforce any timeouts or finish tasks by timeout.
40
+ **IMPORTANT** - don't stop any process. Let LLM to work as long as necessary, even if it take 24 hours.
41
+ **NEVER** check intermediate bash output of running bashes. Let them FINISH execution as long as necessary.
42
+
43
+ `PROMPT_ESCAPED` is properly shell-escaped `PROMPT`
44
+
45
+ **CRITICAL**: Use the actual **Bash** tool, NOT the Skill tool. Launch all 7 in ONE message with these exact tool calls:
46
+
47
+ 1. **Claude**
48
+ Use **Bash** tool:
49
+ - `command`: `echo "PROMPT_ESCAPED" | env -u CLAUDECODE claude -p --agent deep-think-partner`
50
+ - `run_in_background`: `true`
51
+ - `description`: `Claude brainstorm`
52
+
53
+ 2. **Codex**
54
+ Use **Bash** tool:
55
+ - `command`: `opencode run --model openai/gpt-5.3-codex "PROMPT_ESCAPED"`
56
+ - `run_in_background`: `true`
57
+ - `description`: `Codex brainstorm`
58
+
59
+ 3. **Gemini**
60
+ Use **Bash** tool:
61
+ - `command`: `gemini -p "PROMPT_ESCAPED"`
62
+ - `run_in_background`: `true`
63
+ - `description`: `Gemini brainstorm`
64
+
65
+ 4. **Grok**
66
+ Use **Bash** tool:
67
+ - `command`: `opencode run --model openrouter/x-ai/grok-4.1-fast "PROMPT_ESCAPED"`
68
+ - `run_in_background`: `true`
69
+ - `description`: `Grok brainstorm`
70
+
71
+ 5. **Glm**
72
+ Use **Bash** tool:
73
+ - `command`: `echo "PROMPT_ESCAPED" | env -u CLAUDECODE cczy -p --agent deep-think-partner`
74
+ - `run_in_background`: `true`
75
+ - `description`: `Glm brainstorm`
76
+
77
+ 6. **MiniMax**
78
+ Use **Bash** tool:
79
+ - `command`: `echo "PROMPT_ESCAPED" | env -u CLAUDECODE ccmy -p --agent deep-think-partner`
80
+ - `run_in_background`: `true`
81
+ - `description`: `MiniMax brainstorm`
82
+
83
+ 7. **Kimi**
84
+ Use **Bash** tool:
85
+ - `command`: `opencode run --model openrouter/moonshotai/kimi-k2.5 "PROMPT_ESCAPED"`
86
+ - `run_in_background`: `true`
87
+ - `description`: `Kimi brainstorm`
88
+
89
+ After launching, you will receive task IDs for each background process.
90
+
91
+ ---
92
+
93
+ ## Phase 2 — Collect results and Synthesize
94
+
95
+ Use **TaskOutput** tool to retrieve each result. Call TaskOutput for each task_id with `block: true` to wait for completion.
96
+
97
+ **IMPORTANT**: Let each task run as long as needed. Do NOT impose any timeouts.
98
+
99
+ As each result comes in, display it clearly labeled:
100
+
101
+ ```
102
+ ### Claude
103
+ [paste Claude's response here]
104
+
105
+ ### Codex
106
+ [paste Codex's response here]
107
+
108
+ ### Gemini
109
+ [paste Gemini's response here]
110
+
111
+ ### Grok
112
+ [paste Grok's response here]
113
+
114
+ ### Glm
115
+ [paste Glm's response here]
116
+
117
+ ### MiniMax
118
+ [paste MiniMax's response here]
119
+
120
+ ### Kimi
121
+ [paste Kimi's response here]
122
+ ```
123
+
124
+ If any task fails, note which one failed and continue with the remaining ones.
125
+
126
+ ---
127
+
128
+ ## Phase 3 — Synthesize
129
+
130
+ After all available model responses are collected:
131
+
132
+ Provide a concise analysis with these sections:
133
+
134
+ 1. **Consensus** – What do at least 4 models broadly agree on?
135
+ 2. **Unique Insights** – What valuable, distinct perspective did each model add?
136
+ 3. **Contradictions** – Where do they disagree, and what might explain the difference?
137
+ 4. **Recommendation** – Your synthesized best approach / action plan.
138
+
139
+ Keep the synthesis **concise and actionable**.
140
+
141
+ ---
142
+
143
+ ## Behavior Notes
144
+
145
+ - Do **not** re-ask for the prompt once `PROMPT` is set, unless the user clearly changes the question.
146
+ - Always use **Bash** and **Task** tools directly, NOT the Skill tool for invoking LLMs.
147
+ - Use `run_in_background: true` for all 7 launches, then retrieve with `TaskOutput`.
148
+ - Do **not** make any changes in code. Do **not** modify any files.
149
+ - If the user gives a new message after the synthesis clearly changing the topic, treat that as a request to **rerun the whole workflow** with the new `PROMPT`.
@@ -0,0 +1,37 @@
1
+ ---
2
+ description: Run claude cli to execute current promt
3
+ ---
4
+
5
+ # Run Claude LLM
6
+
7
+ Execute the prompt using Claude CLI in background mode.
8
+
9
+ ## Instructions
10
+
11
+ 1. If `$ARGUMENTS` is **empty**, ask the user for their prompt and wait for their response.
12
+
13
+ 2. Set `PROMPT` to `$ARGUMENTS` (or the user's response if arguments were empty).
14
+
15
+ `PROMPT_ESCAPED` is properly shell-escaped `PROMPT`.
16
+
17
+ 3. **Launch in background** using **Bash** tool:
18
+ - `command`: `echo "PROMPT_ESCAPED" | env -u CLAUDECODE claude -p`
19
+ - `run_in_background`: `true`
20
+ - `description`: `Claude execution`
21
+
22
+ 4. **Wait for completion** using **TaskOutput** tool:
23
+ - `task_id`: the task ID returned from step 3
24
+ - `block`: `true`
25
+ - Do NOT set any timeout - let it run as long as necessary
26
+
27
+ 5. Display the result clearly:
28
+ ```
29
+ ### Claude Response
30
+ [paste the response here]
31
+ ```
32
+
33
+ ## Behavior Notes
34
+
35
+ - Do **not** impose any timeouts. Let the LLM work as long as necessary.
36
+ - Do **not** check intermediate output. Wait for full completion.
37
+ - Do **not** make any code changes or modify files unless the response explicitly requires it and user confirms.
@@ -0,0 +1,37 @@
1
+ ---
2
+ description: Run codex cli to execute current promt
3
+ ---
4
+
5
+ # Run Codex LLM
6
+
7
+ Execute the prompt using Codex CLI in background mode.
8
+
9
+ ## Instructions
10
+
11
+ 1. If `$ARGUMENTS` is **empty**, ask the user for their prompt and wait for their response.
12
+
13
+ 2. Set `PROMPT` to `$ARGUMENTS` (or the user's response if arguments were empty).
14
+
15
+ `PROMPT_ESCAPED` is properly shell-escaped `PROMPT`.
16
+
17
+ 3. **Launch in background** using **Bash** tool:
18
+ - `command`: `opencode run --model openai/gpt-5.3-codex "PROMPT_ESCAPED"`
19
+ - `run_in_background`: `true`
20
+ - `description`: `Codex execution`
21
+
22
+ 4. **Wait for completion** using **TaskOutput** tool:
23
+ - `task_id`: the task ID returned from step 3
24
+ - `block`: `true`
25
+ - Do NOT set any timeout - let it run as long as necessary
26
+
27
+ 5. Display the result clearly:
28
+ ```
29
+ ### Codex Response
30
+ [paste the response here]
31
+ ```
32
+
33
+ ## Behavior Notes
34
+
35
+ - Do **not** impose any timeouts. Let the LLM work as long as necessary.
36
+ - Do **not** check intermediate output. Wait for full completion.
37
+ - Do **not** make any code changes or modify files unless the response explicitly requires it and user confirms.
@@ -0,0 +1,37 @@
1
+ ---
2
+ description: Run gemini cli to execute current promt
3
+ ---
4
+
5
+ # Run Gemini LLM
6
+
7
+ Execute the prompt using Gemini CLI in background mode.
8
+
9
+ ## Instructions
10
+
11
+ 1. If `$ARGUMENTS` is **empty**, ask the user for their prompt and wait for their response.
12
+
13
+ 2. Set `PROMPT` to `$ARGUMENTS` (or the user's response if arguments were empty).
14
+
15
+ `PROMPT_ESCAPED` is properly shell-escaped `PROMPT`.
16
+
17
+ 3. **Launch in background** using **Bash** tool:
18
+ - `command`: `gemini -p "PROMPT_ESCAPED"`
19
+ - `run_in_background`: `true`
20
+ - `description`: `Gemini execution`
21
+
22
+ 4. **Wait for completion** using **TaskOutput** tool:
23
+ - `task_id`: the task ID returned from step 3
24
+ - `block`: `true`
25
+ - Do NOT set any timeout - let it run as long as necessary
26
+
27
+ 5. Display the result clearly:
28
+ ```
29
+ ### Gemini Response
30
+ [paste the response here]
31
+ ```
32
+
33
+ ## Behavior Notes
34
+
35
+ - Do **not** impose any timeouts. Let the LLM work as long as necessary.
36
+ - Do **not** check intermediate output. Wait for full completion.
37
+ - Do **not** make any code changes or modify files unless the response explicitly requires it and user confirms.
@@ -0,0 +1,37 @@
1
+ ---
2
+ description: Run claude cli with GLM LLM to execute current promt
3
+ ---
4
+
5
+ # Run Claude LLM
6
+
7
+ Execute the prompt using Claude CLI with GLM LLM in background mode.
8
+
9
+ ## Instructions
10
+
11
+ 1. If `$ARGUMENTS` is **empty**, ask the user for their prompt and wait for their response.
12
+
13
+ 2. Set `PROMPT` to `$ARGUMENTS` (or the user's response if arguments were empty).
14
+
15
+ `PROMPT_ESCAPED` is properly shell-escaped `PROMPT`.
16
+
17
+ 3. **Launch in background** using **Bash** tool:
18
+ - `command`: `echo "PROMPT_ESCAPED" | env -u CLAUDECODE cczy -p`
19
+ - `run_in_background`: `true`
20
+ - `description`: `Claude execution`
21
+
22
+ 4. **Wait for completion** using **TaskOutput** tool:
23
+ - `task_id`: the task ID returned from step 3
24
+ - `block`: `true`
25
+ - Do NOT set any timeout - let it run as long as necessary
26
+
27
+ 5. Display the result clearly:
28
+ ```
29
+ ### Claude with GLM LLM Response
30
+ [paste the response here]
31
+ ```
32
+
33
+ ## Behavior Notes
34
+
35
+ - Do **not** impose any timeouts. Let the LLM work as long as necessary.
36
+ - Do **not** check intermediate output. Wait for full completion.
37
+ - Do **not** make any code changes or modify files unless the response explicitly requires it and user confirms.