abelworkflow 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/.gitignore +13 -0
  2. package/.skill-lock.json +29 -0
  3. package/AGENTS.md +45 -0
  4. package/README.md +147 -0
  5. package/bin/abelworkflow.mjs +2 -0
  6. package/commands/oc/diagnose.md +63 -0
  7. package/commands/oc/implementation.md +157 -0
  8. package/commands/oc/init.md +27 -0
  9. package/commands/oc/plan.md +88 -0
  10. package/commands/oc/research.md +126 -0
  11. package/lib/cli.mjs +222 -0
  12. package/package.json +23 -0
  13. package/skills/confidence-check/SKILL.md +124 -0
  14. package/skills/confidence-check/confidence.ts +335 -0
  15. package/skills/context7-auto-research/.env +4 -0
  16. package/skills/context7-auto-research/.env.example +4 -0
  17. package/skills/context7-auto-research/SKILL.md +83 -0
  18. package/skills/context7-auto-research/context7-api.js +283 -0
  19. package/skills/dev-browser/SKILL.md +225 -0
  20. package/skills/dev-browser/bun.lock +443 -0
  21. package/skills/dev-browser/package-lock.json +2988 -0
  22. package/skills/dev-browser/package.json +31 -0
  23. package/skills/dev-browser/references/scraping.md +155 -0
  24. package/skills/dev-browser/resolve-skill-dir.sh +35 -0
  25. package/skills/dev-browser/scripts/start-relay.ts +32 -0
  26. package/skills/dev-browser/scripts/start-server.ts +117 -0
  27. package/skills/dev-browser/server.sh +24 -0
  28. package/skills/dev-browser/src/client.ts +474 -0
  29. package/skills/dev-browser/src/index.ts +287 -0
  30. package/skills/dev-browser/src/relay.ts +731 -0
  31. package/skills/dev-browser/src/snapshot/browser-script.ts +877 -0
  32. package/skills/dev-browser/src/snapshot/index.ts +14 -0
  33. package/skills/dev-browser/src/snapshot/inject.ts +13 -0
  34. package/skills/dev-browser/src/types.ts +34 -0
  35. package/skills/dev-browser/tsconfig.json +36 -0
  36. package/skills/dev-browser/vitest.config.ts +12 -0
  37. package/skills/git-commit/SKILL.md +124 -0
  38. package/skills/grok-search/.env.example +24 -0
  39. package/skills/grok-search/SKILL.md +114 -0
  40. package/skills/grok-search/requirements.txt +2 -0
  41. package/skills/grok-search/scripts/groksearch_cli.py +1214 -0
  42. package/skills/grok-search/scripts/groksearch_entry.py +116 -0
  43. package/skills/prompt-enhancer/ADVANCED.md +74 -0
  44. package/skills/prompt-enhancer/SKILL.md +71 -0
  45. package/skills/prompt-enhancer/TEMPLATE.md +91 -0
  46. package/skills/prompt-enhancer/scripts/enhance.py +142 -0
  47. package/skills/sequential-think/SKILL.md +198 -0
  48. package/skills/sequential-think/scripts/.env.example +5 -0
  49. package/skills/sequential-think/scripts/sequential_think_cli.py +253 -0
  50. package/skills/time/SKILL.md +116 -0
  51. package/skills/time/scripts/time_cli.py +104 -0
@@ -0,0 +1,116 @@
1
+ #!/usr/bin/env python3
2
+ """Cross-platform bootstrap entrypoint for GrokSearch CLI."""
3
+
4
+ import os
5
+ import shutil
6
+ import subprocess
7
+ import sys
8
+ from pathlib import Path
9
+ from typing import Optional
10
+
11
+
12
+ ROOT_DIR = Path(__file__).resolve().parent.parent
13
+ VENV_DIR = Path(os.environ.get("GROKSEARCH_VENV_DIR", str(ROOT_DIR / ".venv")))
14
+ REQ_FILE = ROOT_DIR / "requirements.txt"
15
+ CLI_PY = ROOT_DIR / "scripts" / "groksearch_cli.py"
16
+
17
+
18
+ def venv_python() -> Optional[Path]:
19
+ candidates = []
20
+ if sys.platform == "win32":
21
+ candidates.extend(
22
+ [
23
+ VENV_DIR / "Scripts" / "python.exe",
24
+ VENV_DIR / "Scripts" / "python",
25
+ ]
26
+ )
27
+ candidates.append(VENV_DIR / "bin" / "python")
28
+ for candidate in candidates:
29
+ if candidate.is_file():
30
+ return candidate
31
+ return None
32
+
33
+
34
+ def python_spec() -> Optional[str]:
35
+ for name in ("GROKSEARCH_PYTHON", "AGENTS_SKILLS_PYTHON"):
36
+ value = os.environ.get(name)
37
+ if value:
38
+ return value
39
+ return None
40
+
41
+
42
+ def has_uv() -> bool:
43
+ return shutil.which("uv") is not None
44
+
45
+
46
+ def find_system_python() -> Optional[str]:
47
+ env_python = os.environ.get("AGENTS_SKILLS_PYTHON")
48
+ if env_python and Path(env_python).is_file():
49
+ return env_python
50
+ if sys.executable and Path(sys.executable).is_file():
51
+ return sys.executable
52
+ for command in ("python3", "python"):
53
+ found = shutil.which(command)
54
+ if found:
55
+ return found
56
+ return None
57
+
58
+
59
+ def create_venv() -> None:
60
+ if has_uv():
61
+ command = ["uv", "venv"]
62
+ spec = python_spec()
63
+ if spec:
64
+ command.extend(["--python", spec])
65
+ command.append(str(VENV_DIR))
66
+ subprocess.run(command, check=True)
67
+ return
68
+ python_bin = find_system_python()
69
+ if not python_bin:
70
+ print("Error: No usable uv or python found. Cannot create virtual environment.", file=sys.stderr)
71
+ sys.exit(1)
72
+ subprocess.run([python_bin, "-m", "venv", str(VENV_DIR)], check=True)
73
+
74
+
75
+ def install_deps(python_bin: Path) -> None:
76
+ check = subprocess.run(
77
+ [str(python_bin), "-c", "import httpx, tenacity"],
78
+ stdout=subprocess.DEVNULL,
79
+ stderr=subprocess.DEVNULL,
80
+ )
81
+ if check.returncode == 0:
82
+ return
83
+ if has_uv() and REQ_FILE.is_file():
84
+ subprocess.run(["uv", "pip", "install", "--python", str(python_bin), "-r", str(REQ_FILE)], check=True)
85
+ return
86
+ if REQ_FILE.is_file():
87
+ subprocess.run([str(python_bin), "-m", "pip", "install", "-r", str(REQ_FILE)], check=True)
88
+ return
89
+ subprocess.run([str(python_bin), "-m", "pip", "install", "httpx", "tenacity"], check=True)
90
+
91
+
92
+ def validate_venv_dir() -> None:
93
+ if VENV_DIR.exists() and not VENV_DIR.is_dir():
94
+ print(f"Error: {VENV_DIR} exists but is not a directory.", file=sys.stderr)
95
+ sys.exit(1)
96
+ if VENV_DIR.is_dir() and not (VENV_DIR / "pyvenv.cfg").exists() and venv_python() is None:
97
+ print(f"Error: {VENV_DIR} exists but is not a valid venv.", file=sys.stderr)
98
+ sys.exit(1)
99
+
100
+
101
+ def main() -> None:
102
+ validate_venv_dir()
103
+ python_bin = venv_python()
104
+ if python_bin is None:
105
+ create_venv()
106
+ python_bin = venv_python()
107
+ if python_bin is None:
108
+ print("Error: Failed to locate python in venv after creation.", file=sys.stderr)
109
+ sys.exit(1)
110
+ install_deps(python_bin)
111
+ result = subprocess.run([str(python_bin), str(CLI_PY)] + sys.argv[1:])
112
+ sys.exit(result.returncode)
113
+
114
+
115
+ if __name__ == "__main__":
116
+ main()
@@ -0,0 +1,74 @@
1
+ # Advanced Usage
2
+
3
+ ## Using the Python Script Directly
4
+
5
+ The `scripts/enhance.py` script can be used standalone:
6
+
7
+ ```bash
8
+ # Basic usage
9
+ python3 scripts/enhance.py "your prompt here"
10
+
11
+ # With custom model (if API key is set)
12
+ ANTHROPIC_API_KEY=sk-ant-... python3 scripts/enhance.py "your prompt"
13
+ ```
14
+
15
+ ## Environment Variables
16
+
17
+ | Variable | Description | Default |
18
+ |----------|-------------|---------|
19
+ | `ANTHROPIC_API_KEY` | Anthropic API key for Claude | - |
20
+ | `OPENAI_API_KEY` | OpenAI API key (fallback) | - |
21
+ | `PE_MODEL` | Model to use | `claude-sonnet-4-20250514` |
22
+
23
+ ## Integration with Other Tools
24
+
25
+ ### Piping Output
26
+
27
+ ```bash
28
+ # Pipe to clipboard (macOS)
29
+ python3 scripts/enhance.py "my prompt" | pbcopy
30
+
31
+ # Pipe to file
32
+ python3 scripts/enhance.py "my prompt" > enhanced.md
33
+
34
+ # Chain with other commands
35
+ python3 scripts/enhance.py "my prompt" | claude -p
36
+ ```
37
+
38
+ ### In Shell Scripts
39
+
40
+ ```bash
41
+ #!/bin/bash
42
+ ENHANCED=$(python3 ~/.agents/skills/prompt-enhancer/scripts/enhance.py "$1")
43
+ echo "$ENHANCED"
44
+ ```
45
+
46
+ ## Manual Enhancement (No API)
47
+
48
+ If no API key is available, you can manually apply the enhancement principles:
49
+
50
+ 1. Read the user's prompt
51
+ 2. Apply the template from [TEMPLATE.md](TEMPLATE.md)
52
+ 3. Structure the output with:
53
+ - Context section
54
+ - Objective section
55
+ - Step-by-step instructions
56
+ - Constraints
57
+
58
+ ## Troubleshooting
59
+
60
+ ### Script Not Found
61
+ Ensure the skill is installed in the correct location:
62
+ ```bash
63
+ ls ~/.agents/skills/prompt-enhancer/scripts/enhance.py
64
+ ```
65
+
66
+ ### Permission Denied
67
+ Make the script executable:
68
+ ```bash
69
+ chmod +x ~/.agents/skills/prompt-enhancer/scripts/enhance.py
70
+ ```
71
+
72
+ ### No API Key
73
+ The script will fall back to a local template-based enhancement if no API key is found.
74
+
@@ -0,0 +1,71 @@
1
+ ---
2
+ name: prompt-enhancer
3
+ description: Enhance and refine prompts for AI coding agents using Chain-of-Thought reasoning. Use when user asks to improve, optimize, rewrite, or enhance a prompt. Transforms vague requests into structured, high-context instructions for Claude Code, Codex, or Gemini CLI.
4
+ allowed-tools: Bash(python3:*), Bash(pe:*), Read, Grep
5
+ ---
6
+
7
+ # Prompt Enhancer Skill
8
+
9
+ Transforms vague or simple prompts into structured, high-context instructions optimized for AI coding agents.
10
+
11
+ ## When to Use
12
+
13
+ - User asks to "improve my prompt" or "make this prompt better"
14
+ - User wants to "optimize this instruction for Claude/Codex"
15
+ - User needs help writing a better prompt for an AI agent
16
+ - User mentions "prompt engineering" or "rewrite this"
17
+
18
+ ## Quick Start
19
+
20
+ Run the enhance script with the user's prompt:
21
+
22
+ ```bash
23
+ python3 ~/.agents/skills/prompt-enhancer/scripts/enhance.py "user's raw prompt here"
24
+ ```
25
+
26
+ ## How It Works
27
+
28
+ The enhancer applies these principles:
29
+
30
+ 1. **Add Context**: What project/tech stack is involved?
31
+ 2. **Clarify Objective**: What exactly should be accomplished?
32
+ 3. **Chain of Thought**: Add step-by-step reasoning instructions
33
+ 4. **Define Constraints**: What are the boundaries and requirements?
34
+ 5. **Specify Output**: What format should the result be in?
35
+
36
+ ## Output Format
37
+
38
+ The enhanced prompt follows this structure:
39
+
40
+ ```markdown
41
+ # Context
42
+ [Refined context description]
43
+
44
+ # Objective
45
+ [Precise task definition]
46
+
47
+ # Step-by-Step Instructions
48
+ 1. [Step 1]
49
+ 2. [Step 2]
50
+ ...
51
+
52
+ # Constraints
53
+ - [Constraint 1]
54
+ - [Constraint 2]
55
+ ```
56
+
57
+ ## Additional Resources
58
+
59
+ - For advanced usage patterns, see [ADVANCED.md](ADVANCED.md)
60
+ - For the system prompt template, see [TEMPLATE.md](TEMPLATE.md)
61
+
62
+ ## Alternative: Use `pe` CLI
63
+
64
+ If you have the `pe` CLI installed globally:
65
+
66
+ ```bash
67
+ pe "user's raw prompt here"
68
+ ```
69
+
70
+ Install via: `npm install -g prompt-enhancer` (or clone repo and `npm link`)
71
+
@@ -0,0 +1,91 @@
1
+ # Prompt Enhancement Template
2
+
3
+ This is the system prompt used to enhance user prompts.
4
+
5
+ ## System Role
6
+
7
+ You are an expert Prompt Engineer for Coding Agents (Claude Code, Codex, Gemini CLI).
8
+ Your goal is to rewrite the user's raw input into a structured, high-context prompt that maximizes the agent's effectiveness.
9
+
10
+ ## Guidelines
11
+
12
+ 1. **Structure**: Use a clear Markdown structure with headers.
13
+ 2. **Chain of Thought**: Explicitly ask the agent to "Think step-by-step" or "Analyze the file structure first".
14
+ 3. **Context**: If the user's prompt is vague, add placeholders like "[Insert relevant file(s)]" or "[Specify tech stack]" in the rewritten prompt, or simply infer them if obvious.
15
+ 4. **Format**:
16
+ - **Context**: What is the current state? What files are involved?
17
+ - **Objective**: What exactly should be done?
18
+ - **Constraints**: specific libraries, coding styles, or "no placeholders".
19
+ - **Response Format**: e.g., "Return only the code block" or "Explain step-by-step".
20
+
21
+ ## Output Template
22
+
23
+ ```markdown
24
+ # Context
25
+ [Refined context description]
26
+
27
+ # Objective
28
+ [Precise task definition]
29
+
30
+ # Step-by-Step Instructions
31
+ 1. [Step 1]
32
+ 2. [Step 2]
33
+ ...
34
+
35
+ # Constraints
36
+ - [Constraint 1]
37
+ - [Constraint 2]
38
+ ```
39
+
40
+ ## Examples
41
+
42
+ ### Example 1: Vague Input
43
+
44
+ **Input:** "Write a login component"
45
+
46
+ **Enhanced Output:**
47
+ ```markdown
48
+ # Context
49
+ Building a React application. Need secure authentication UI component.
50
+
51
+ # Objective
52
+ Create a functional Login component with form validation.
53
+
54
+ # Step-by-Step Instructions
55
+ 1. Create LoginForm component with email/password fields
56
+ 2. Add client-side validation (email format, password min length)
57
+ 3. Handle form submission with loading state
58
+ 4. Display error messages for failed attempts
59
+ 5. Provide success callback for navigation
60
+
61
+ # Constraints
62
+ - Use controlled components for form inputs
63
+ - Follow accessibility best practices (ARIA labels, focus management)
64
+ - Handle both validation errors and API errors distinctly
65
+ ```
66
+
67
+ ### Example 2: Ambiguous Bug Fix
68
+
69
+ **Input:** "fix the sorting bug"
70
+
71
+ **Enhanced Output:**
72
+ ```markdown
73
+ # Context
74
+ There is a reported bug related to sorting functionality. Need to investigate and fix.
75
+
76
+ # Objective
77
+ Identify and fix the sorting bug in the codebase.
78
+
79
+ # Step-by-Step Instructions
80
+ 1. First, identify files containing sorting logic (search for "sort", "order", "compare")
81
+ 2. Review the sorting implementation and identify the issue
82
+ 3. Check edge cases: empty arrays, single items, duplicate values
83
+ 4. Implement the fix with proper type handling
84
+ 5. Add or update tests to cover the bug scenario
85
+
86
+ # Constraints
87
+ - Maintain backward compatibility
88
+ - Do not change the function signature unless necessary
89
+ - Add inline comments explaining the fix
90
+ ```
91
+
@@ -0,0 +1,142 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Prompt Enhancer Script - Standalone Python script for enhancing prompts.
4
+ Can be used with or without an API key.
5
+ """
6
+
7
+ import sys
8
+ import os
9
+ import subprocess
10
+
11
+ SYSTEM_PROMPT = """
12
+ You are an expert Prompt Engineer for Coding Agents (Claude Code, Codex, Gemini CLI).
13
+ Your goal is to rewrite the user's raw input into a structured, high-context prompt that maximizes the agent's effectiveness.
14
+
15
+ Guidelines:
16
+ 1. Structure: Use a clear Markdown structure with headers.
17
+ 2. Chain of Thought: Explicitly ask the agent to "Think step-by-step" or "Analyze the file structure first".
18
+ 3. Context: If the user's prompt is vague, add placeholders like "[Insert relevant file(s)]" or "[Specify tech stack]" in the rewritten prompt, or simply infer them if obvious.
19
+ 4. Format:
20
+ - Context: What is the current state? What files are involved?
21
+ - Objective: What exactly should be done?
22
+ - Constraints: specific libraries, coding styles, or "no placeholders".
23
+ - Response Format: e.g., "Return only the code block" or "Explain step-by-step".
24
+
25
+ Output Template:
26
+
27
+ # Context
28
+ [Refined context description]
29
+
30
+ # Objective
31
+ [Precise task definition]
32
+
33
+ # Step-by-Step Instructions
34
+ 1. [Step 1]
35
+ 2. [Step 2]
36
+ ...
37
+
38
+ # Constraints
39
+ - [Constraint 1]
40
+ - [Constraint 2]
41
+ """
42
+
43
+
44
+ def enhance_with_anthropic(prompt: str, api_key: str) -> str:
45
+ """Enhance prompt using Anthropic API."""
46
+ try:
47
+ import anthropic
48
+ except ImportError:
49
+ print("Installing anthropic package...", file=sys.stderr)
50
+ subprocess.run(
51
+ [sys.executable, "-m", "pip", "install", "anthropic", "-q"],
52
+ check=True
53
+ )
54
+ import anthropic
55
+
56
+ client = anthropic.Anthropic(api_key=api_key)
57
+ message = client.messages.create(
58
+ model=os.environ.get("PE_MODEL", "claude-sonnet-4-20250514"),
59
+ max_tokens=2048,
60
+ system=SYSTEM_PROMPT,
61
+ messages=[{"role": "user", "content": prompt}]
62
+ )
63
+ return message.content[0].text
64
+
65
+
66
+ def enhance_with_openai(prompt: str, api_key: str) -> str:
67
+ """Enhance prompt using OpenAI API."""
68
+ try:
69
+ import openai
70
+ except ImportError:
71
+ print("Installing openai package...", file=sys.stderr)
72
+ subprocess.run(
73
+ [sys.executable, "-m", "pip", "install", "openai", "-q"],
74
+ check=True
75
+ )
76
+ import openai
77
+
78
+ client = openai.OpenAI(api_key=api_key)
79
+ response = client.chat.completions.create(
80
+ model=os.environ.get("PE_MODEL", "gpt-4o"),
81
+ messages=[
82
+ {"role": "system", "content": SYSTEM_PROMPT},
83
+ {"role": "user", "content": prompt}
84
+ ]
85
+ )
86
+ return response.choices[0].message.content
87
+
88
+
89
+ def enhance_locally(prompt: str) -> str:
90
+ """Enhance prompt using local template (no API)."""
91
+ return f"""# Context
92
+ [Analyze the context for: {prompt}]
93
+
94
+ # Objective
95
+ {prompt}
96
+
97
+ # Step-by-Step Instructions
98
+ 1. First, understand the current state and requirements
99
+ 2. Identify the key components involved
100
+ 3. Plan the implementation approach
101
+ 4. Execute the changes step by step
102
+ 5. Verify the results
103
+
104
+ # Constraints
105
+ - Follow existing code style and conventions
106
+ - Ensure backward compatibility
107
+ - Add appropriate error handling
108
+ """
109
+
110
+
111
+ def main():
112
+ if len(sys.argv) < 2:
113
+ print("Usage: enhance.py <prompt>", file=sys.stderr)
114
+ print("Example: enhance.py 'Write a login component'", file=sys.stderr)
115
+ sys.exit(1)
116
+
117
+ prompt = " ".join(sys.argv[1:])
118
+
119
+ # Try Anthropic first, then OpenAI, then local
120
+ anthropic_key = os.environ.get("ANTHROPIC_API_KEY")
121
+ openai_key = os.environ.get("OPENAI_API_KEY")
122
+
123
+ try:
124
+ if anthropic_key:
125
+ result = enhance_with_anthropic(prompt, anthropic_key)
126
+ elif openai_key:
127
+ result = enhance_with_openai(prompt, openai_key)
128
+ else:
129
+ # No API key - use local template
130
+ print("Note: No API key found, using local template.", file=sys.stderr)
131
+ result = enhance_locally(prompt)
132
+
133
+ print(result)
134
+ except Exception as e:
135
+ print(f"Error: {e}", file=sys.stderr)
136
+ print("\nFalling back to local template...", file=sys.stderr)
137
+ print(enhance_locally(prompt))
138
+
139
+
140
+ if __name__ == "__main__":
141
+ main()
142
+
@@ -0,0 +1,198 @@
1
+ ---
2
+ name: sequential-think
3
+ description: |
4
+ Multi-step reasoning engine for complex analysis and systematic problem solving. Use when: (1) Complex debugging scenarios with multiple layers, (2) Architectural analysis and system design, (3) Problems requiring hypothesis testing and validation, (4) Multi-component failure investigation, (5) Performance bottleneck identification. Triggers: "--think", "--think-hard", "--ultrathink", "analyze step by step", "break down this problem", "systematic analysis". IMPORTANT: Do NOT use for simple single-step tasks.
5
+ ---
6
+
7
+ # Sequential Think
8
+
9
+ Structured iterative thinking for complex problem-solving. Supports both MCP server and standalone CLI.
10
+
11
+ ## Execution Methods
12
+
13
+ ### Method 1: MCP Tools (if available)
14
+ Use `mcp__sequential-thinking__sequentialthinking` tool directly.
15
+
16
+ ### Method 2: CLI Script (no MCP dependency)
17
+ Run `scripts/sequential_think_cli.py` via Bash:
18
+
19
+ ```bash
20
+ # Process a thought
21
+ python scripts/sequential_think_cli.py think \
22
+ --thought "First, let me analyze the problem structure..." \
23
+ --thought-number 1 \
24
+ --total-thoughts 5
25
+
26
+ # Continue thinking chain
27
+ python scripts/sequential_think_cli.py think \
28
+ --thought "Based on step 1, I hypothesize that..." \
29
+ --thought-number 2 \
30
+ --total-thoughts 5
31
+
32
+ # Revise a previous thought
33
+ python scripts/sequential_think_cli.py think \
34
+ --thought "Reconsidering step 1, I realize..." \
35
+ --thought-number 3 \
36
+ --total-thoughts 5 \
37
+ --is-revision \
38
+ --revises-thought 1
39
+
40
+ # Branch into alternative path
41
+ python scripts/sequential_think_cli.py think \
42
+ --thought "Alternative approach: what if we..." \
43
+ --thought-number 4 \
44
+ --total-thoughts 6 \
45
+ --branch-from 2 \
46
+ --branch-id "alt-approach"
47
+
48
+ # Final thought (complete chain)
49
+ python scripts/sequential_think_cli.py think \
50
+ --thought "Conclusion: the solution is..." \
51
+ --thought-number 5 \
52
+ --total-thoughts 5 \
53
+ --no-next
54
+
55
+ # View thought history
56
+ python scripts/sequential_think_cli.py history [--format json|text]
57
+
58
+ # Clear thought history
59
+ python scripts/sequential_think_cli.py clear
60
+ ```
61
+
62
+ ## Core Principles
63
+
64
+ ### Iterative Thinking Process
65
+ - Each tool call = one "thought" in the chain
66
+ - Build upon, question, or revise previous thoughts
67
+ - Express uncertainty when it exists
68
+
69
+ ### Dynamic Thought Count
70
+ - Start with initial estimate of `totalThoughts`
71
+ - Adjust up/down as understanding evolves
72
+ - Add more thoughts even after reaching initial end
73
+
74
+ ### Hypothesis-Driven Approach
75
+ 1. Generate hypotheses as potential solutions emerge
76
+ 2. Verify hypotheses based on chain-of-thought steps
77
+ 3. Repeat until satisfied with solution
78
+
79
+ ### Completion Criteria
80
+ - Only set `nextThoughtNeeded: false` when truly finished
81
+ - Must have satisfactory, verified answer
82
+ - Don't rush to conclusion
83
+
84
+ ## When to Use
85
+
86
+ | Scenario | Use Sequential Think |
87
+ |----------|---------------------|
88
+ | Complex debugging (3+ layers) | ✅ Yes |
89
+ | Architectural analysis | ✅ Yes |
90
+ | Multi-component investigation | ✅ Yes |
91
+ | Performance bottleneck analysis | ✅ Yes |
92
+ | Root cause analysis | ✅ Yes |
93
+ | Simple explanation | ❌ No |
94
+ | Single-file change | ❌ No |
95
+ | Straightforward fix | ❌ No |
96
+
97
+ ## Parameters
98
+
99
+ | Parameter | Type | Required | Description |
100
+ |-----------|------|----------|-------------|
101
+ | `thought` | string | Yes | Current thinking step content |
102
+ | `thoughtNumber` | int | Yes | Current position in sequence (1-based) |
103
+ | `totalThoughts` | int | Yes | Estimated total thoughts needed |
104
+ | `nextThoughtNeeded` | bool | No | Whether more thinking needed (default: true) |
105
+ | `isRevision` | bool | No | Whether this revises previous thinking |
106
+ | `revisesThought` | int | No | Which thought number is being reconsidered |
107
+ | `branchFromThought` | int | No | Branching point thought number |
108
+ | `branchId` | string | No | Identifier for current branch |
109
+ | `needsMoreThoughts` | bool | No | Signal that more thoughts needed beyond estimate |
110
+
111
+ ## Output Format
112
+
113
+ ```json
114
+ {
115
+ "thoughtNumber": 3,
116
+ "totalThoughts": 5,
117
+ "nextThoughtNeeded": true,
118
+ "branches": ["alt-approach"],
119
+ "thoughtHistoryLength": 3
120
+ }
121
+ ```
122
+
123
+ ## Workflow Pattern
124
+
125
+ ### Phase 1: Problem Decomposition
126
+ ```
127
+ Thought 1: Identify problem scope and constraints
128
+ Thought 2: Break into sub-problems
129
+ Thought 3: Identify dependencies between sub-problems
130
+ ```
131
+
132
+ ### Phase 2: Hypothesis Generation
133
+ ```
134
+ Thought 4: Generate initial hypothesis
135
+ Thought 5: Identify evidence needed to verify
136
+ ```
137
+
138
+ ### Phase 3: Verification & Iteration
139
+ ```
140
+ Thought 6: Test hypothesis against evidence
141
+ Thought 7: Revise if needed (isRevision=true)
142
+ Thought 8: Branch if alternative path promising
143
+ ```
144
+
145
+ ### Phase 4: Conclusion
146
+ ```
147
+ Final Thought: Synthesize findings, provide answer (nextThoughtNeeded=false)
148
+ ```
149
+
150
+ ## Best Practices
151
+
152
+ 1. **Start with estimate, adjust as needed**
153
+ - Initial `totalThoughts` is just a guess
154
+ - Increase if problem more complex than expected
155
+ - Decrease if solution found early
156
+
157
+ 2. **Use revisions for course correction**
158
+ - Mark `isRevision=true` when reconsidering
159
+ - Reference `revisesThought` for clarity
160
+
161
+ 3. **Branch for alternative approaches**
162
+ - Use `branchFromThought` to explore alternatives
163
+ - Give meaningful `branchId` names
164
+
165
+ 4. **Filter irrelevant information**
166
+ - Each thought should advance toward solution
167
+ - Ignore tangential details
168
+
169
+ 5. **Don't rush completion**
170
+ - Only `nextThoughtNeeded=false` when truly done
171
+ - Verify hypothesis before concluding
172
+
173
+ ## Anti-Patterns
174
+
175
+ | Prohibited | Correct |
176
+ |------------|---------|
177
+ | Use for simple tasks | Reserve for complex multi-step problems |
178
+ | Skip thought numbers | Always increment correctly |
179
+ | Conclude without verification | Verify hypothesis before final thought |
180
+ | Ignore previous thoughts | Build upon or explicitly revise |
181
+ | Fixed totalThoughts | Adjust as understanding evolves |
182
+
183
+ ## Integration with Other Tools
184
+
185
+ ### With augment-context-engine
186
+ ```
187
+ augment-context-engine → align current state → Sequential Think → analyze and plan
188
+ ```
189
+
190
+ ### With Context7
191
+ ```
192
+ Sequential Think → coordinate analysis → Context7 → provide official patterns
193
+ ```
194
+
195
+ ### With Serena
196
+ ```
197
+ Serena → symbol-level exploration → Sequential Think → systematic analysis
198
+ ```
@@ -0,0 +1,5 @@
1
+ # Sequential Think Configuration
2
+ # This skill uses local storage only - no external API required.
3
+
4
+ # Optional: Custom history file location (default: ~/.config/sequential-think/)
5
+ # SEQUENTIAL_THINK_CONFIG_DIR=/path/to/custom/dir